diff --git a/.github/ISSUE_TEMPLATE/proposal.md b/.github/ISSUE_TEMPLATE/proposal.md new file mode 100644 index 0000000000..45f0bff42f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/proposal.md @@ -0,0 +1,37 @@ +--- +name: Protocol Change Proposal +about: Create a proposal to request a change to the protocol + +--- + + + +# Protocol Change Proposal + +## Summary + + + +## Problem Definition + + + +## Proposal + + + +____ + +#### For Admin Use + +- [ ] Not duplicate issue +- [ ] Appropriate labels applied +- [ ] Appropriate contributors tagged +- [ ] Contributor assigned/self-assigned diff --git a/.github/workflows/check-generated.yml b/.github/workflows/check-generated.yml index 5b08611ccb..4fa68cf13b 100644 --- a/.github/workflows/check-generated.yml +++ b/.github/workflows/check-generated.yml @@ -18,14 +18,14 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: '1.17' + go-version: '1.18' - uses: actions/checkout@v3 - name: "Check generated mocks" run: | set -euo pipefail - readonly MOCKERY=2.12.3 # N.B. no leading "v" + readonly MOCKERY=2.14.0 # N.B. no leading "v" curl -sL "https://github.com/vektra/mockery/releases/download/v${MOCKERY}/mockery_${MOCKERY}_Linux_x86_64.tar.gz" | tar -C /usr/local/bin -xzf - make mockery 2>/dev/null if ! git diff --stat --exit-code ; then diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 9a1d38ff03..64a4bc2436 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -44,7 +44,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: "1.17" + go-version: "1.18" - uses: actions/checkout@v3 - uses: technote-space/get-diff-action@v6 with: @@ -66,7 +66,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: "1.17" + go-version: "1.18" - uses: actions/checkout@v3 - uses: technote-space/get-diff-action@v6 with: diff --git a/.github/workflows/e2e-manual.yml b/.github/workflows/e2e-manual.yml index f90f88cd3a..405f4c5175 100644 --- a/.github/workflows/e2e-manual.yml +++ b/.github/workflows/e2e-manual.yml @@ -16,7 +16,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: '1.17' + go-version: '1.18' - uses: actions/checkout@v3 diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml index bb671af9ce..5b7e655df3 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-34x.yml @@ -23,7 +23,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: '1.17' + go-version: '1.18' - uses: actions/checkout@v3 with: diff --git a/.github/workflows/e2e-nightly-master.yml b/.github/workflows/e2e-nightly-master.yml index c90df00500..c98ed504df 100644 --- a/.github/workflows/e2e-nightly-master.yml +++ b/.github/workflows/e2e-nightly-master.yml @@ -22,7 +22,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: '1.17' + go-version: '1.18' - uses: actions/checkout@v3 diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 1c361667e5..8e0a4f4be9 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -15,7 +15,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: '^1.17' + go-version: '1.18' - uses: actions/checkout@v3 - uses: technote-space/get-diff-action@v6 with: diff --git a/.github/workflows/fuzz-nightly.yml b/.github/workflows/fuzz-nightly.yml index dd9e875134..815b482fe9 100644 --- a/.github/workflows/fuzz-nightly.yml +++ b/.github/workflows/fuzz-nightly.yml @@ -11,7 +11,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: '1.17' + go-version: '1.18' - uses: actions/checkout@v3 diff --git a/.github/workflows/linkchecker.yml b/.github/workflows/linkchecker.yml deleted file mode 100644 index e2ba808617..0000000000 --- a/.github/workflows/linkchecker.yml +++ /dev/null @@ -1,12 +0,0 @@ -name: Check Markdown links -on: - schedule: - - cron: '* */24 * * *' -jobs: - markdown-link-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: creachadair/github-action-markdown-link-check@master - with: - folder-path: "docs" diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index d5a720dfc8..d7ddfb2410 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -14,9 +14,8 @@ jobs: timeout-minutes: 8 steps: - uses: actions/setup-go@v3 - with: - go-version: 1.17.1 - - uses: actions/checkout@v3 + with: + go-version: '1.18' - uses: technote-space/get-diff-action@v6 with: PATTERNS: | @@ -25,8 +24,10 @@ jobs: go.sum - uses: golangci/golangci-lint-action@v3.2.0 with: - # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. - version: v1.45 + # Required: the version of golangci-lint is required and + # must be specified without patch version: we always use the + # latest patch version. + version: v1.50.1 args: --timeout 10m github-token: ${{ secrets.github_token }} if: env.GIT_DIFF diff --git a/.github/workflows/proto-lint.yml b/.github/workflows/proto-lint.yml index e6c2b927e4..1b7cdc3963 100644 --- a/.github/workflows/proto-lint.yml +++ b/.github/workflows/proto-lint.yml @@ -15,7 +15,7 @@ jobs: timeout-minutes: 5 steps: - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.6.0 + - uses: bufbuild/buf-setup-action@v1.8.0 - uses: bufbuild/buf-lint-action@v1 with: input: 'proto' diff --git a/.github/workflows/proto.yml b/.github/workflows/proto.yml deleted file mode 100644 index 6eec3f46f5..0000000000 --- a/.github/workflows/proto.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: Protobuf -# Protobuf runs buf (https://buf.build/) lint and check-breakage -# This workflow is only run when a .proto file has been modified -on: - pull_request: - paths: - - "**.proto" -jobs: - proto-lint: - runs-on: ubuntu-latest - timeout-minutes: 4 - steps: - - uses: actions/checkout@v3 - - name: lint - run: make proto-lint - proto-breakage: - runs-on: ubuntu-latest - timeout-minutes: 4 - steps: - - uses: actions/checkout@v3 - - name: check-breakage - run: make proto-check-breaking-ci diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7ddd7dee8a..194399586f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -16,7 +16,7 @@ jobs: - uses: actions/setup-go@v3 with: - go-version: '1.17' + go-version: '1.18' - run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 6a03003cc8..c30fbebd4a 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -7,7 +7,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v5 + - uses: actions/stale@v6 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-pr-message: "This pull request has been automatically marked as stale because it has not had diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index bcbe7d5d42..f6ac3a1975 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -25,7 +25,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: "1.17" + go-version: "1.18" - uses: actions/checkout@v3 - uses: technote-space/get-diff-action@v6 with: @@ -57,7 +57,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: "1.17" + go-version: "^1.18" - uses: actions/checkout@v3 - uses: technote-space/get-diff-action@v6 with: @@ -89,7 +89,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: "1.17" + go-version: "^1.18" - uses: actions/checkout@v3 - uses: technote-space/get-diff-action@v6 with: diff --git a/.gitignore b/.gitignore index 1986b10c80..4583d098bf 100644 --- a/.gitignore +++ b/.gitignore @@ -22,6 +22,7 @@ docs/.vuepress/dist *.log abci-cli docs/node_modules/ +docs/.vuepress/public/rpc index.html.md scripts/wal2json/wal2json @@ -46,6 +47,8 @@ terraform.tfstate terraform.tfstate.backup terraform.tfstate.d profile\.out +test/app/grpc_client +test/loadtime/build test/e2e/build test/e2e/networks/*/ test/logs @@ -56,3 +59,11 @@ test/fuzz/**/corpus test/fuzz/**/crashers test/fuzz/**/suppressions test/fuzz/**/*.zip +*.aux +*.bbl +*.blg +*.pdf +*.gz +*.dvi +# Python virtual environments +.venv diff --git a/.golangci.yml b/.golangci.yml index 2bca6f0934..215344765e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -2,20 +2,12 @@ linters: enable: - asciicheck - bodyclose - - deadcode - depguard - dogsled - dupl - errcheck - exportloopref - # - funlen - # - gochecknoglobals - # - gochecknoinits - # - gocognit - goconst - # - gocritic - # - gocyclo - # - godox - gofmt - goimports - revive @@ -23,44 +15,36 @@ linters: - gosimple - govet - ineffassign - # - interfacer - - lll - # - maligned - # - misspell + - misspell - nakedret - nolintlint - prealloc - staticcheck - - structcheck + # - structcheck // to be fixed by golangci-lint - stylecheck - # - typecheck + - typecheck - unconvert - # - unparam - unused - - varcheck - # - whitespace - # - wsl issues: exclude-rules: - path: _test\.go linters: - gosec - - linters: - - lll - source: "https://" max-same-issues: 50 linters-settings: dogsled: max-blank-identifiers: 3 - maligned: - suggest-new: true - # govet: - # check-shadowing: true - revive: + golint: min-confidence: 0 misspell: locale: US ignore-words: - behaviour + maligned: + suggest-new: true + +run: + skip-files: + - libs/pubsub/query/query.peg.go diff --git a/.goreleaser.yml b/.goreleaser.yml index 339abb8aab..da1a648a03 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -25,4 +25,13 @@ checksum: algorithm: sha256 release: - name_template: "{{.Version}} (WARNING: BETA SOFTWARE)" + prerelease: auto + name_template: "{{.Version}}" + +archives: + - files: + - LICENSE + - README.md + - UPGRADING.md + - SECURITY.md + - CHANGELOG.md diff --git a/.markdownlint.yml b/.markdownlint.yml index 1637001cc2..80e3be4edb 100644 --- a/.markdownlint.yml +++ b/.markdownlint.yml @@ -1,8 +1,11 @@ -default: true, -MD007: { "indent": 4 } +default: true +MD001: false +MD007: {indent: 4} MD013: false -MD024: { siblings_only: true } +MD024: {siblings_only: true} MD025: false -MD033: { no-inline-html: false } -no-hard-tabs: false -whitespace: false +MD033: false +MD036: false +MD010: false +MD012: false +MD028: false diff --git a/.vscode/settings.json b/.vscode/settings.json index 3a42e525f1..2fdab83508 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -5,4 +5,5 @@ "--proto_path=${workspaceRoot}/third_party/proto" ] } + } diff --git a/CHANGELOG.md b/CHANGELOG.md index e37ab67cce..145eba32bb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,108 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/cosmos). +## v0.34.23 + +*Nov 9, 2022* + +This release introduces some new Prometheus metrics to help in determining what +kinds of messages are consuming the most P2P bandwidth. This builds towards our +broader goal of optimizing Tendermint bandwidth consumption, and will give us +meaningful insights once we can establish these metrics for a number of chains. + +We now also return `Cache-Control` headers for select RPC endpoints to help +facilitate caching. + +Special thanks to external contributors on this release: @JayT106 + +### IMPROVEMENTS +- `[p2p]` [\#9641](https://github.com/tendermint/tendermint/issues/9641) Add new + Envelope type and associated methods for sending and receiving Envelopes + instead of raw bytes. This also adds new metrics, + `tendermint_p2p_message_send_bytes_total` and + `tendermint_p2p_message_receive_bytes_total`, that expose how many bytes of + each message type have been sent. +- `[rpc]` [\#9666](https://github.com/tendermint/tendermint/issues/9666) Enable + caching of RPC responses (@JayT106) + + The following RPC endpoints will return `Cache-Control` headers with a maximum + age of 1 day: + + - `/abci_info` + - `/block`, if `height` is supplied + - `/block_by_hash` + - `/block_results`, if `height` is supplied + - `/blockchain` + - `/check_tx` + - `/commit`, if `height` is supplied + - `/consensus_params`, if `height` is supplied + - `/genesis` + - `/genesis_chunked` + - `/tx` + - `/validators`, if `height` is supplied + +## v0.34.22 + +This release includes several bug fixes, [one of +which](https://github.com/tendermint/tendermint/pull/9518) we discovered while +building up a baseline for v0.34 against which to compare our upcoming v0.37 +release during our [QA process](./docs/qa/). + +Special thanks to external contributors on this release: @RiccardoM + +### FEATURES + +- [rpc] [\#9423](https://github.com/tendermint/tendermint/pull/9423) Support + HTTPS URLs from the WebSocket client (@RiccardoM, @cmwaters) + +### BUG FIXES + +- [config] [\#9483](https://github.com/tendermint/tendermint/issues/9483) + Calling `tendermint init` would incorrectly leave out the new `[storage]` + section delimiter in the generated configuration file - this has now been + fixed +- [p2p] [\#9500](https://github.com/tendermint/tendermint/issues/9500) Prevent + peers who have errored being added to the peer set (@jmalicevic) +- [indexer] [\#9473](https://github.com/tendermint/tendermint/issues/9473) Fix + bug that caused the psql indexer to index empty blocks whenever one of the + transactions returned a non zero code. The relevant deduplication logic has + been moved within the kv indexer only (@cmwaters) +- [blocksync] [\#9518](https://github.com/tendermint/tendermint/issues/9518) A + block sync stall was observed during our QA process whereby the node was + unable to make progress. Retrying block requests after a timeout fixes this. + +## v0.34.21 + +Release highlights include: + +- A new `[storage]` configuration section and flag `discard_abci_responses`, + which, if enabled, discards all ABCI responses except the latest one in order + to reduce disk space usage in the state store. When enabled, the + `block_results` RPC endpoint can no longer function and will return an error. +- A new CLI command, `reindex-event`, to re-index block and tx events to the + event sinks. You can run this command when the event store backend + dropped/disconnected or you want to replace the backend. When + `discard_abci_responses` is enabled, you will not be able to use this command. + +Special thanks to external contributors on this release: @rootwarp & @animart + +### FEATURES + +- [cli] [\#9083](https://github.com/tendermint/tendermint/issues/9083) Backport command to reindex missed events (@cmwaters) +- [cli] [\#9107](https://github.com/tendermint/tendermint/issues/9107) Add the `p2p.external-address` argument to set the node P2P external address (@amimart) + +### IMPROVEMENTS + +- [config] [\#9054](https://github.com/tendermint/tendermint/issues/9054) `discard_abci_responses` flag added to discard all ABCI + responses except the last in order to save on storage space in the state + store (@samricotta) + +### BUG FIXES + +- [mempool] [\#9033](https://github.com/tendermint/tendermint/issues/9033) Rework lock discipline to mitigate callback deadlocks in the + priority mempool +- [cli] [\#9103](https://github.com/tendermint/tendermint/issues/9103) fix unsafe-reset-all for working with home path (@rootwarp) + ## v0.34.20 Special thanks to external contributors on this release: @joeabbey @yihuang @@ -438,7 +540,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze, - [abci] [\#5174](https://github.com/tendermint/tendermint/pull/5174) Remove `MockEvidence` in favor of testing with actual evidence types (`DuplicateVoteEvidence` & `LightClientAttackEvidence`) (@cmwaters) - [abci] [\#5191](https://github.com/tendermint/tendermint/pull/5191) Add `InitChain.InitialHeight` field giving the initial block height (@erikgrinaker) - [abci] [\#5227](https://github.com/tendermint/tendermint/pull/5227) Add `ResponseInitChain.app_hash` which is recorded in genesis block (@erikgrinaker) -- [config] [\#5147](https://github.com/tendermint/tendermint/pull/5147) Add `--consensus.double_sign_check_height` flag and `DoubleSignCheckHeight` config variable. See [ADR-51](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-051-double-signing-risk-reduction.md) (@dongsam) +- [config] [\#5147](https://github.com/tendermint/tendermint/pull/5147) Add `--consensus.double_sign_check_height` flag and `DoubleSignCheckHeight` config variable. See [ADR-51](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-051-double-signing-risk-reduction.md) (@dongsam) - [db] [\#5233](https://github.com/tendermint/tendermint/pull/5233) Add support for `badgerdb` database backend (@erikgrinaker) - [evidence] [\#4532](https://github.com/tendermint/tendermint/pull/4532) Handle evidence from light clients (@melekes) - [evidence] [#4821](https://github.com/tendermint/tendermint/pull/4821) Amnesia (light client attack) evidence can be detected, verified and committed (@cmwaters) @@ -452,7 +554,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze, - [rpc] [\#5017](https://github.com/tendermint/tendermint/pull/5017) Add `/check_tx` endpoint to check transactions without executing them or adding them to the mempool (@melekes) - [rpc] [\#5108](https://github.com/tendermint/tendermint/pull/5108) Subscribe using the websocket for new evidence events (@cmwaters) - [statesync] Add state sync support, where a new node can be rapidly bootstrapped by fetching state snapshots from peers instead of replaying blocks. See the `[statesync]` config section. -- [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and refactor evidence lifecycle - for more information see [ADR-059](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-059-evidence-composition-and-lifecycle.md) (@cmwaters) +- [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and refactor evidence lifecycle - for more information see [ADR-059](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-059-evidence-composition-and-lifecycle.md) (@cmwaters) ### IMPROVEMENTS @@ -532,7 +634,7 @@ This security release fixes: Tendermint 0.33.0 and above allow block proposers to include signatures for the wrong block. This may happen naturally if you start a network, have it run for some time and restart it **without changing the chainID**. (It is a -[misconfiguration](https://docs.tendermint.com/master/tendermint-core/using-tendermint.html) +[misconfiguration](https://docs.tendermint.com/v0.33/tendermint-core/using-tendermint.html) to reuse chainIDs.) Correct block proposers will accidentally include signatures for the wrong block if they see these signatures, and then commits won't validate, making all proposed blocks invalid. A malicious validator (even with a minimal @@ -831,7 +933,7 @@ and a validator address plus a timestamp. Note we may remove the validator address & timestamp fields in the future (see ADR-25). `lite2` package has been added to solve `lite` issues and introduce weak -subjectivity interface. Refer to the [spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md) for complete details. +subjectivity interface. Refer to the [spec](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/light-client.md) for complete details. `lite` package is now deprecated and will be removed in v0.34 release. ### BREAKING CHANGES: @@ -1192,7 +1294,7 @@ Special thanks to external contributors on this release: @jon-certik, @gracenoah *August 28, 2019* @climber73 wrote the [Writing a Tendermint Core application in Java -(gRPC)](https://github.com/tendermint/tendermint/blob/master/docs/guides/java.md) +(gRPC)](https://github.com/tendermint/tendermint/blob/v0.32.x/docs/guides/java.md) guide. Special thanks to external contributors on this release: @@ -1225,7 +1327,7 @@ Special thanks to external contributors on this release: ### FEATURES: -- [blockchain] [\#3561](https://github.com/tendermint/tendermint/issues/3561) Add early version of the new blockchain reactor, which is supposed to be more modular and testable compared to the old version. To try it, you'll have to change `version` in the config file, [here](https://github.com/tendermint/tendermint/blob/master/config/toml.go#L303) NOTE: It's not ready for a production yet. For further information, see [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md) & [ADR-43](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-043-blockchain-riri-org.md) +- [blockchain] [\#3561](https://github.com/tendermint/tendermint/issues/3561) Add early version of the new blockchain reactor, which is supposed to be more modular and testable compared to the old version. To try it, you'll have to change `version` in the config file, [here](https://github.com/tendermint/tendermint/blob/v0.34.x/config/toml.go#L303) NOTE: It's not ready for a production yet. For further information, see [ADR-40](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-040-blockchain-reactor-refactor.md) & [ADR-43](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-043-blockchain-riri-org.md) - [mempool] [\#3826](https://github.com/tendermint/tendermint/issues/3826) Make `max_msg_bytes` configurable(@bluele) - [node] [\#3846](https://github.com/tendermint/tendermint/pull/3846) Allow replacing existing p2p.Reactor(s) using [`CustomReactors` option](https://godoc.org/github.com/tendermint/tendermint/node#CustomReactors). @@ -1542,7 +1644,7 @@ Special thanks to external contributors on this release: - [libs/db] [\#3611](https://github.com/tendermint/tendermint/issues/3611) Conditional compilation * Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or use `make build_c` / `make install_c` (full instructions can be found at - https://docs.tendermint.com/master/introduction/install.html#compile-with-cleveldb-support) + ) * Use `boltdb` tag to compile Tendermint with bolt db - [node] [\#3362](https://github.com/tendermint/tendermint/issues/3362) Return an error if `persistent_peers` list is invalid (except when IP lookup fails) @@ -1766,7 +1868,7 @@ more details. - [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique clientIDs with open subscriptions. Configurable via `rpc.max_subscription_clients` - [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique queries a given client can subscribe to at once. Configurable via `rpc.max_subscriptions_per_client`. - [rpc] [\#3435](https://github.com/tendermint/tendermint/issues/3435) Default ReadTimeout and WriteTimeout changed to 10s. WriteTimeout can increased by setting `rpc.timeout_broadcast_tx_commit` in the config. - - [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods. + - [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods. * Apps - [abci] [\#3403](https://github.com/tendermint/tendermint/issues/3403) Remove `time_iota_ms` from BlockParams. This is a @@ -1819,7 +1921,7 @@ more details. - [blockchain] [\#3358](https://github.com/tendermint/tendermint/pull/3358) Fix timer leak in `BlockPool` (@guagualvcha) - [cmd] [\#3408](https://github.com/tendermint/tendermint/issues/3408) Fix `testnet` command's panic when creating non-validator configs (using `--n` flag) (@srmo) - [libs/db/remotedb/grpcdb] [\#3402](https://github.com/tendermint/tendermint/issues/3402) Close Iterator/ReverseIterator after use -- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md) +- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-033-pubsub.md) - [lite] [\#3364](https://github.com/tendermint/tendermint/issues/3364) Fix `/validators` and `/abci_query` proxy endpoints (@guagualvcha) - [p2p/conn] [\#3347](https://github.com/tendermint/tendermint/issues/3347) Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection @@ -2517,7 +2619,7 @@ Special thanks to external contributors on this release: This release is mostly about the ConsensusParams - removing fields and enforcing MaxGas. It also addresses some issues found via security audit, removes various unused functions from `libs/common`, and implements -[ADR-012](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-012-peer-transport.md). +[ADR-012](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-012-peer-transport.md). BREAKING CHANGES: @@ -2598,7 +2700,7 @@ BREAKING CHANGES: - [abci] Added address of the original proposer of the block to Header - [abci] Change ABCI Header to match Tendermint exactly - [abci] [\#2159](https://github.com/tendermint/tendermint/issues/2159) Update use of `Validator` (see - [ADR-018](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-018-ABCI-Validators.md)): + [ADR-018](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-018-ABCI-Validators.md)): - Remove PubKey from `Validator` (so it's just Address and Power) - Introduce `ValidatorUpdate` (with just PubKey and Power) - InitChain and EndBlock use ValidatorUpdate @@ -2620,7 +2722,7 @@ BREAKING CHANGES: - [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block (!) - Add NextValidatorSet to State, changes on-disk representation of state - [state] [\#2184](https://github.com/tendermint/tendermint/issues/2184) Enforce ConsensusParams.BlockSize.MaxBytes (See - [ADR-020](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-020-block-size.md)). + [ADR-020](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-020-block-size.md)). - Remove ConsensusParams.BlockSize.MaxTxs - Introduce maximum sizes for all components of a block, including ChainID - [types] Updates to the block Header: @@ -2631,7 +2733,7 @@ BREAKING CHANGES: - [consensus] [\#2203](https://github.com/tendermint/tendermint/issues/2203) Implement BFT time - Timestamp in block must be monotonic and equal the median of timestamps in block's LastCommit - [crypto] [\#2239](https://github.com/tendermint/tendermint/issues/2239) Secp256k1 signature changes (See - [ADR-014](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-014-secp-malleability.md)): + [ADR-014](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-014-secp-malleability.md)): - format changed from DER to `r || s`, both little endian encoded as 32 bytes. - malleability removed by requiring `s` to be in canonical form. @@ -3431,7 +3533,7 @@ Also includes the Grand Repo-Merge of 2017. BREAKING CHANGES: - Config and Flags: - - The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/master/config/config.go#L11), + - The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/v0.10.0/config/config.go#L11), containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusConfig`, `RPCConfig` - This affects the following flags: - `--seeds` is now `--p2p.seeds` diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index b88d1cbd1f..1b36756d7f 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -1,10 +1,6 @@ # Unreleased Changes -## v0.34.21 - -Special thanks to external contributors on this release: - -Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint). +## v0.34.24 ### BREAKING CHANGES diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 715019e90a..afb9da3de9 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -26,7 +26,7 @@ will indicate their support with a heartfelt emoji. If the issue would benefit from thorough discussion, maintainers may request that you create a [Request For -Comment](https://github.com/tendermint/spec/tree/master/rfc). Discussion +Comment](https://github.com/tendermint/tendermint/tree/main/rfc). Discussion at the RFC stage will build collective understanding of the dimensions of the problems and help structure conversations around trade-offs. diff --git a/DOCKER/Dockerfile b/DOCKER/Dockerfile index ecb0c8066f..cc387db4b6 100644 --- a/DOCKER/Dockerfile +++ b/DOCKER/Dockerfile @@ -1,5 +1,5 @@ # stage 1 Generate Tendermint Binary -FROM golang:1.17-alpine as builder +FROM golang:1.18-alpine as builder RUN apk update && \ apk upgrade && \ apk --no-cache add make @@ -9,7 +9,7 @@ WORKDIR /tendermint RUN make build-linux # stage 2 -FROM golang:1.17-alpine +FROM golang:1.18-alpine LABEL maintainer="hello@tendermint.com" # Tendermint will be looking for the genesis file in /tendermint/config/genesis.json diff --git a/DOCKER/README.md b/DOCKER/README.md index 5cd39446f6..d7ca6d37fe 100644 --- a/DOCKER/README.md +++ b/DOCKER/README.md @@ -6,7 +6,7 @@ DockerHub tags for official releases are [here](https://hub.docker.com/r/tenderm Official releases can be found [here](https://github.com/tendermint/tendermint/releases). -The Dockerfile for tendermint is not expected to change in the near future. The master file used for all builds can be found [here](https://raw.githubusercontent.com/tendermint/tendermint/master/DOCKER/Dockerfile). +The Dockerfile for tendermint is not expected to change in the near future. The master file used for all builds can be found [here](https://raw.githubusercontent.com/tendermint/tendermint/main/DOCKER/Dockerfile). Respective versioned files can be found (replace the Xs with the version number). @@ -20,9 +20,9 @@ Respective versioned files can be found Running linter" - @golangci-lint run + @go run github.com/golangci/golangci-lint/cmd/golangci-lint run .PHONY: lint DESTINATION = ./index.html.md diff --git a/SECURITY.md b/SECURITY.md index 351f5606c3..61f02e10a1 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,98 +2,146 @@ ## Reporting a Bug -As part of our [Coordinated Vulnerability Disclosure -Policy](https://tendermint.com/security), we operate a [bug -bounty](https://hackerone.com/tendermint). -See the policy for more details on submissions and rewards, and see "Example Vulnerabilities" (below) for examples of the kinds of bugs we're most interested in. +As part of our [Coordinated Vulnerability Disclosure Policy](https://tendermint.com/security), +we operate a [bug bounty][hackerone]. See the policy for more +details on submissions and rewards, and see "Example Vulnerabilities" (below) +for examples of the kinds of bugs we're most interested in. -### Guidelines +### Guidelines We require that all researchers: -* Use the bug bounty to disclose all vulnerabilities, and avoid posting vulnerability information in public places, including Github Issues, Discord channels, and Telegram groups -* Make every effort to avoid privacy violations, degradation of user experience, disruption to production systems (including but not limited to the Cosmos Hub), and destruction of data -* Keep any information about vulnerabilities that you’ve discovered confidential between yourself and the Tendermint Core engineering team until the issue has been resolved and disclosed +* Use the bug bounty to disclose all vulnerabilities, and avoid posting + vulnerability information in public places, including Github Issues, Discord + channels, and Telegram groups +* Make every effort to avoid privacy violations, degradation of user experience, + disruption to production systems (including but not limited to the Cosmos + Hub), and destruction of data +* Keep any information about vulnerabilities that you’ve discovered confidential + between yourself and the Tendermint Core engineering team until the issue has + been resolved and disclosed * Avoid posting personally identifiable information, privately or publicly If you follow these guidelines when reporting an issue to us, we commit to: -* Not pursue or support any legal action related to your research on this vulnerability -* Work with you to understand, resolve and ultimately disclose the issue in a timely fashion +* Not pursue or support any legal action related to your research on this + vulnerability +* Work with you to understand, resolve and ultimately disclose the issue in a + timely fashion -## Disclosure Process +## Disclosure Process Tendermint Core uses the following disclosure process: -1. Once a security report is received, the Tendermint Core team works to verify the issue and confirm its severity level using CVSS. -2. The Tendermint Core team collaborates with the Gaia team to determine the vulnerability’s potential impact on the Cosmos Hub. -3. Patches are prepared for eligible releases of Tendermint in private repositories. See “Supported Releases” below for more information on which releases are considered eligible. -4. If it is determined that a CVE-ID is required, we request a CVE through a CVE Numbering Authority. -5. We notify the community that a security release is coming, to give users time to prepare their systems for the update. Notifications can include forum posts, tweets, and emails to partners and validators, including emails sent to the [Tendermint Security Mailing List](https://berlin.us4.list-manage.com/subscribe?u=431b35421ff7edcc77df5df10&id=3fe93307bc). -6. 24 hours following this notification, the fixes are applied publicly and new releases are issued. -7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these releases, and then themselves issue new releases. -8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we notify the community, again, through the same channels as above. We also publish a Security Advisory on Github and publish the CVE, as long as neither the Security Advisory nor the CVE include any information on how to exploit these vulnerabilities beyond what information is already available in the patch itself. -9. Once the community is notified, we will pay out any relevant bug bounties to submitters. -10. One week after the releases go out, we will publish a post with further details on the vulnerability as well as our response to it. - -This process can take some time. Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we follow the process described above to ensure that disclosures are handled consistently and to keep Tendermint Core and its downstream dependent projects--including but not limited to Gaia and the Cosmos Hub--as secure as possible. - -### Example Timeline - -The following is an example timeline for the triage and response. The required roles and team members are described in parentheses after each task; however, multiple people can play each role and each person may play multiple roles. - -#### > 24 Hours Before Release Time - -1. Request CVE number (ADMIN) -2. Gather emails and other contact info for validators (COMMS LEAD) -3. Test fixes on a testnet (TENDERMINT ENG, COSMOS ENG) -4. Write “Security Advisory” for forum (TENDERMINT LEAD) +1. Once a security report is received, the Tendermint Core team works to verify + the issue and confirm its severity level using CVSS. +2. The Tendermint Core team collaborates with the Gaia team to determine the + vulnerability’s potential impact on the Cosmos Hub. +3. Patches are prepared for eligible releases of Tendermint in private + repositories. See “Supported Releases” below for more information on which + releases are considered eligible. +4. If it is determined that a CVE-ID is required, we request a CVE through a CVE + Numbering Authority. +5. We notify the community that a security release is coming, to give users time + to prepare their systems for the update. Notifications can include forum + posts, tweets, and emails to partners and validators, including emails sent + to the [Tendermint Security Mailing List][tmsec-mailing]. +6. 24 hours following this notification, the fixes are applied publicly and new + releases are issued. +7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these + releases, and then themselves issue new releases. +8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we + notify the community, again, through the same channels as above. We also + publish a Security Advisory on Github and publish the CVE, as long as neither + the Security Advisory nor the CVE include any information on how to exploit + these vulnerabilities beyond what information is already available in the + patch itself. +9. Once the community is notified, we will pay out any relevant bug bounties to + submitters. +10. One week after the releases go out, we will publish a post with further + details on the vulnerability as well as our response to it. + +This process can take some time. Every effort will be made to handle the bug in +as timely a manner as possible, however it's important that we follow the +process described above to ensure that disclosures are handled consistently and +to keep Tendermint Core and its downstream dependent projects--including but not +limited to Gaia and the Cosmos Hub--as secure as possible. + +### Example Timeline + +The following is an example timeline for the triage and response. The required +roles and team members are described in parentheses after each task; however, +multiple people can play each role and each person may play multiple roles. + +#### 24+ Hours Before Release Time + +1. Request CVE number (ADMIN) +2. Gather emails and other contact info for validators (COMMS LEAD) +3. Create patches in a private security repo, and ensure that PRs are open + targeting all relevant release branches (TENDERMINT ENG, TENDERMINT LEAD) +4. Test fixes on a testnet (TENDERMINT ENG, COSMOS SDK ENG) +5. Write “Security Advisory” for forum (TENDERMINT LEAD) #### 24 Hours Before Release Time -1. Post “Security Advisory” pre-notification on forum (TENDERMINT LEAD) -2. Post Tweet linking to forum post (COMMS LEAD) -3. Announce security advisory/link to post in various other social channels (Telegram, Discord) (COMMS LEAD) -4. Send emails to validators or other users (PARTNERSHIPS LEAD) +1. Post “Security Advisory” pre-notification on forum (TENDERMINT LEAD) +2. Post Tweet linking to forum post (COMMS LEAD) +3. Announce security advisory/link to post in various other social channels + (Telegram, Discord) (COMMS LEAD) +4. Send emails to validators or other users (PARTNERSHIPS LEAD) #### Release Time -1. Cut Tendermint releases for eligible versions (TENDERMINT ENG, TENDERMINT LEAD) +1. Cut Tendermint releases for eligible versions (TENDERMINT ENG, TENDERMINT + LEAD) 2. Cut Cosmos SDK release for eligible versions (COSMOS ENG) 3. Cut Gaia release for eligible versions (GAIA ENG) 4. Post “Security releases” on forum (TENDERMINT LEAD) 5. Post new Tweet linking to forum post (COMMS LEAD) -6. Remind everyone via social channels (Telegram, Discord) that the release is out (COMMS LEAD) -7. Send emails to validators or other users (COMMS LEAD) -8. Publish Security Advisory and CVE, if CVE has no sensitive information (ADMIN) +6. Remind everyone via social channels (Telegram, Discord) that the release is + out (COMMS LEAD) +7. Send emails to validators or other users (COMMS LEAD) +8. Publish Security Advisory and CVE, if CVE has no sensitive information + (ADMIN) #### After Release Time 1. Write forum post with exploit details (TENDERMINT LEAD) -2. Approve pay-out on HackerOne for submitter (ADMIN) +2. Approve pay-out on HackerOne for submitter (ADMIN) #### 7 Days After Release Time -1. Publish CVE if it has not yet been published (ADMIN) +1. Publish CVE if it has not yet been published (ADMIN) 2. Publish forum post with exploit details (TENDERMINT ENG, TENDERMINT LEAD) ## Supported Releases -The Tendermint Core team commits to releasing security patch releases for both the latest minor release as well for the major/minor release that the Cosmos Hub is running. +The Tendermint Core team commits to releasing security patch releases for both +the latest minor release as well for the major/minor release that the Cosmos Hub +is running. -If you are running older versions of Tendermint Core, we encourage you to upgrade at your earliest opportunity so that you can receive security patches directly from the Tendermint repo. While you are welcome to backport security patches to older versions for your own use, we will not publish or promote these backports. +If you are running older versions of Tendermint Core, we encourage you to +upgrade at your earliest opportunity so that you can receive security patches +directly from the Tendermint repo. While you are welcome to backport security +patches to older versions for your own use, we will not publish or promote these +backports. ## Scope -The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/tendermint). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope: +The full scope of our bug bounty program is outlined on our +[Hacker One program page][hackerone]. Please also note that, in the interest of +the safety of our users and staff, a few things are explicitly excluded from +scope: -* Any third-party services -* Findings from physical testing, such as office access +* Any third-party services +* Findings from physical testing, such as office access * Findings derived from social engineering (e.g., phishing) -## Example Vulnerabilities +## Example Vulnerabilities -The following is a list of examples of the kinds of vulnerabilities that we’re most interested in. It is not exhaustive: there are other kinds of issues we may also be interested in! +The following is a list of examples of the kinds of vulnerabilities that we’re +most interested in. It is not exhaustive: there are other kinds of issues we may +also be interested in! ### Specification @@ -105,7 +153,8 @@ The following is a list of examples of the kinds of vulnerabilities that we’re Assuming less than 1/3 of the voting power is Byzantine (malicious): -* Validation of blockchain data structures, including blocks, block parts, votes, and so on +* Validation of blockchain data structures, including blocks, block parts, + votes, and so on * Execution of blocks * Validator set changes * Proposer round robin @@ -114,6 +163,9 @@ Assuming less than 1/3 of the voting power is Byzantine (malicious): * A node halting (liveness failure) * Syncing new and old nodes +Assuming more than 1/3 the voting power is Byzantine: + +* Attacks that go unpunished (unhandled evidence) ### Networking @@ -139,7 +191,7 @@ Attacks may come through the P2P network or the RPC layer: ### Libraries -* Serialization (Amino) +* Serialization * Reading/Writing files and databases ### Cryptography @@ -150,5 +202,8 @@ Attacks may come through the P2P network or the RPC layer: ### Light Client -* Core verification +* Core verification * Bisection/sequential algorithms + +[hackerone]: https://hackerone.com/cosmos +[tmsec-mailing]: https://berlin.us4.list-manage.com/subscribe?u=431b35421ff7edcc77df5df10&id=3fe93307bc diff --git a/STYLE_GUIDE.md b/STYLE_GUIDE.md index 98e81d7235..0ed354f6b8 100644 --- a/STYLE_GUIDE.md +++ b/STYLE_GUIDE.md @@ -98,7 +98,7 @@ Sometimes it's necessary to rename libraries to avoid naming collisions or ambig * Make use of table driven testing where possible and not-cumbersome * [Inspiration](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) * Make use of [assert](https://godoc.org/github.com/stretchr/testify/assert) and [require](https://godoc.org/github.com/stretchr/testify/require) -* When using mocks, it is recommended to use Testify [mock] ( +* When using mocks, it is recommended to use Testify [mock]( ) along with [Mockery](https://github.com/vektra/mockery) for autogeneration ## Errors diff --git a/UPGRADING.md b/UPGRADING.md index da2b4661bb..056a94b40c 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -1,6 +1,22 @@ # Upgrading Tendermint Core -This guide provides instructions for upgrading to specific versions of Tendermint Core. +This guide provides instructions for upgrading to specific versions of +Tendermint Core. + +## v0.34.20 + +### Feature: Priority Mempool + +This release backports an implementation of the Priority Mempool from the v0.35 +branch. This implementation of the mempool permits the application to set a +priority on each transaction during CheckTx, and during block selection the +highest-priority transactions are chosen (subject to the constraints on size +and gas cost). + +Operators can enable the priority mempool by setting `mempool.version` to +`"v1"` in the `config.toml`. For more technical details about the priority +mempool, see [ADR 067: Mempool +Refactor](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-067-mempool-refactor.md). ## v0.34.20 @@ -23,7 +39,7 @@ Refactor](https://github.com/tendermint/tendermint/blob/master/docs/architecture This release is not compatible with previous blockchains due to changes to the encoding format (see "Protocol Buffers," below) and the block header (see "Blockchain Protocol"). -Note also that Tendermint 0.34 also requires Go 1.15 or higher. +Note also that Tendermint 0.34 also requires Go 1.18 or higher. ### ABCI Changes @@ -33,7 +49,7 @@ Note also that Tendermint 0.34 also requires Go 1.15 or higher. were added to support the new State Sync feature. Previously, syncing a new node to a preexisting network could take days; but with State Sync, new nodes are able to join a network in a matter of seconds. - Read [the spec](https://docs.tendermint.com/master/spec/abci/apps.html#state-sync) + Read [the spec](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/apps.md#state-sync) if you want to learn more about State Sync, or if you'd like your application to use it. (If you don't want to support State Sync in your application, you can just implement these new ABCI methods as no-ops, leaving them empty.) @@ -49,7 +65,7 @@ Note also that Tendermint 0.34 also requires Go 1.15 or higher. Applications should be able to handle these evidence types (i.e., through slashing or other accountability measures). -* The [`PublicKey` type](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/crypto/keys.proto#L13-L15) +* The [`PublicKey` type](https://github.com/tendermint/tendermint/blob/v0.34.x/proto/tendermint/crypto/keys.proto#L13-L15) (used in ABCI as part of `ValidatorUpdate`) now uses a `oneof` protobuf type. Note that since Tendermint only supports ed25519 validator keys, there's only one option in the `oneof`. For more, see "Protocol Buffers," below. @@ -64,12 +80,9 @@ directory. For more, see "Protobuf," below. ### Blockchain Protocol -* `Header#LastResultsHash` previously was the root hash of a Merkle tree built from `ResponseDeliverTx(Code, Data)` responses. - As of 0.34,`Header#LastResultsHash` is now the root hash of a Merkle tree built from: - * `BeginBlock#Events` - * Root hash of a Merkle tree built from `ResponseDeliverTx(Code, Data, - GasWanted, GasUsed, Events)` responses - * `BeginBlock#Events` +* `Header#LastResultsHash`, which is the root hash of a Merkle tree built from + `ResponseDeliverTx(Code, Data)` as of v0.34 also includes `GasWanted` and `GasUsed` + fields. * Merkle hashes of empty trees previously returned nothing, but now return the hash of an empty input, to conform with [RFC-6962](https://tools.ietf.org/html/rfc6962). @@ -159,7 +172,7 @@ The `bech32` package has moved to the Cosmos SDK: ### CLI The `tendermint lite` command has been renamed to `tendermint light` and has a slightly different API. -See [the docs](https://docs.tendermint.com/master/tendermint-core/light-client-protocol.html#http-proxy) for details. +See [the docs](https://docs.tendermint.com/v0.33/tendermint-core/light-client-protocol.html#http-proxy) for details. ### Light Client @@ -173,6 +186,7 @@ Other user-relevant changes include: * The `Verifier` was broken up into two pieces: * Core verification logic (pure `VerifyX` functions) * `Client` object, which represents the complete light client +* The new light client stores headers and validator sets as `LightBlock`s * The RPC client can be found in the `/rpc` directory. * The HTTP(S) proxy is located in the `/proxy` directory. @@ -314,7 +328,7 @@ Evidence Params has been changed to include duration. ### RPC Changes * `/validators` is now paginated (default: 30 vals per page) -* `/block_results` response format updated [see RPC docs for details](https://docs.tendermint.com/master/rpc/#/Info/block_results) +* `/block_results` response format updated [see RPC docs for details](https://docs.tendermint.com/v0.33/rpc/#/Info/block_results) * Event suffix has been removed from the ID in event responses * IDs are now integers not `json-client-XYZ` @@ -433,11 +447,11 @@ the compilation tag: Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or use `make build_c` / `make install_c` (full instructions can be found at -) +) ## v0.31.0 -This release contains a breaking change to the behaviour of the pubsub system. +This release contains a breaking change to the behavior of the pubsub system. It also contains some minor breaking changes in the Go API and ABCI. There are no changes to the block or p2p protocols, so v0.31.0 should work fine with blockchains created from the v0.30 series. @@ -455,7 +469,7 @@ In this case, the WS client will receive an error with description: "error": { "code": -32000, "msg": "Server error", - "data": "subscription was cancelled (reason: client is not pulling messages fast enough)" // or "subscription was cancelled (reason: Tendermint exited)" + "data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)" } } @@ -508,14 +522,14 @@ due to changes in how various data structures are hashed. Any implementations of Tendermint blockchain verification, including lite clients, will need to be updated. For specific details: -* [Merkle tree](https://github.com/tendermint/spec/blob/master/spec/blockchain/encoding.md#merkle-trees) -* [ConsensusParams](https://github.com/tendermint/spec/blob/master/spec/blockchain/state.md#consensusparams) +* [Merkle tree](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/encoding.md#merkle-trees) +* [ConsensusParams](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/state.md#consensusparams) There was also a small change to field ordering in the vote struct. Any implementations of an out-of-process validator (like a Key-Management Server) will need to be updated. For specific details: -* [Vote](https://github.com/tendermint/spec/blob/master/spec/consensus/signing.md#votes) +* [Vote](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/signing.md#votes) Finally, the proposer selection algorithm continues to evolve. See the [work-in-progress @@ -636,7 +650,7 @@ to `timeout_propose = "3s"`. ### RPC Changes -The default behaviour of `/abci_query` has been changed to not return a proof, +The default behavior of `/abci_query` has been changed to not return a proof, and the name of the parameter that controls this has been changed from `trusted` to `prove`. To get proofs with your queries, ensure you set `prove=true`. diff --git a/abci/README.md b/abci/README.md index 6399f59017..71cc787507 100644 --- a/abci/README.md +++ b/abci/README.md @@ -19,7 +19,7 @@ To get up and running quickly, see the [getting started guide](../docs/app-dev/g A detailed description of the ABCI methods and message types is contained in: -- [The main spec](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md) +- [The main spec](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/abci.md) - [A protobuf file](./types/types.proto) - [A Go interface](./types/application.go) diff --git a/abci/client/mocks/client.go b/abci/client/mocks/client.go index 9dcb9a3cf3..f463579a51 100644 --- a/abci/client/mocks/client.go +++ b/abci/client/mocks/client.go @@ -781,13 +781,13 @@ func (_m *Client) String() string { return r0 } -type NewClientT interface { +type mockConstructorTestingTNewClient interface { mock.TestingT Cleanup(func()) } // NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewClient(t NewClientT) *Client { +func NewClient(t mockConstructorTestingTNewClient) *Client { mock := &Client{} mock.Mock.Test(t) diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index dafa882378..3f912bbee7 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -2,7 +2,7 @@ package kvstore import ( "fmt" - "io/ioutil" + "os" "sort" "testing" @@ -71,7 +71,7 @@ func TestKVStoreKV(t *testing.T) { } func TestPersistentKVStoreKV(t *testing.T) { - dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO + dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO if err != nil { t.Fatal(err) } @@ -87,7 +87,7 @@ func TestPersistentKVStoreKV(t *testing.T) { } func TestPersistentKVStoreInfo(t *testing.T) { - dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO + dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO if err != nil { t.Fatal(err) } @@ -114,12 +114,11 @@ func TestPersistentKVStoreInfo(t *testing.T) { if resInfo.LastBlockHeight != height { t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight) } - } // add a validator, remove a validator, update a validator func TestValUpdates(t *testing.T) { - dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO + dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO if err != nil { t.Fatal(err) } @@ -162,7 +161,7 @@ func TestValUpdates(t *testing.T) { makeApplyBlock(t, kvstore, 2, diff, tx1, tx2, tx3) - vals1 = append(vals[:nInit-2], vals[nInit+1]) // nolint: gocritic + vals1 = append(vals[:nInit-2], vals[nInit+1]) //nolint: gocritic vals2 = kvstore.Validators() valsEqual(t, vals1, vals2) @@ -181,7 +180,6 @@ func TestValUpdates(t *testing.T) { vals1 = append([]types.ValidatorUpdate{v1}, vals1[1:]...) vals2 = kvstore.Validators() valsEqual(t, vals1, vals2) - } func makeApplyBlock( @@ -189,7 +187,8 @@ func makeApplyBlock( kvstore types.Application, heightInt int, diff []types.ValidatorUpdate, - txs ...[]byte) { + txs ...[]byte, +) { // make and apply block height := int64(heightInt) hash := []byte("foo") @@ -207,7 +206,6 @@ func makeApplyBlock( kvstore.Commit() valsEqual(t, diff, resEndBlock.ValidatorUpdates) - } // order doesn't matter diff --git a/abci/server/server.go b/abci/server/server.go index 6dd13ad020..4b70545b21 100644 --- a/abci/server/server.go +++ b/abci/server/server.go @@ -2,9 +2,8 @@ Package server is used to start a new ABCI server. It contains two server implementation: - * gRPC server - * socket server - + - gRPC server + - socket server */ package server diff --git a/behaviour/doc.go b/behaviour/doc.go index 40061e0958..7b00ae1eb3 100644 --- a/behaviour/doc.go +++ b/behaviour/doc.go @@ -8,35 +8,34 @@ There are four different behaviours a reactor can report. 1. bad message -type badMessage struct { - explanation string -} + type badMessage struct { + explanation string + } -This message will request the peer be stopped for an error +# This message will request the peer be stopped for an error 2. message out of order -type messageOutOfOrder struct { - explanation string -} + type messageOutOfOrder struct { + explanation string + } -This message will request the peer be stopped for an error +# This message will request the peer be stopped for an error 3. consesnsus Vote -type consensusVote struct { - explanation string -} + type consensusVote struct { + explanation string + } -This message will request the peer be marked as good +# This message will request the peer be marked as good 4. block part -type blockPart struct { - explanation string -} + type blockPart struct { + explanation string + } This message will request the peer be marked as good - */ package behaviour diff --git a/blockchain/msgs.go b/blockchain/msgs.go index 35868830b5..9b45e859c5 100644 --- a/blockchain/msgs.go +++ b/blockchain/msgs.go @@ -6,6 +6,7 @@ import ( "github.com/gogo/protobuf/proto" + "github.com/tendermint/tendermint/p2p" bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain" "github.com/tendermint/tendermint/types" ) @@ -19,58 +20,6 @@ const ( BlockResponseMessageFieldKeySize ) -// EncodeMsg encodes a Protobuf message -func EncodeMsg(pb proto.Message) ([]byte, error) { - msg := bcproto.Message{} - - switch pb := pb.(type) { - case *bcproto.BlockRequest: - msg.Sum = &bcproto.Message_BlockRequest{BlockRequest: pb} - case *bcproto.BlockResponse: - msg.Sum = &bcproto.Message_BlockResponse{BlockResponse: pb} - case *bcproto.NoBlockResponse: - msg.Sum = &bcproto.Message_NoBlockResponse{NoBlockResponse: pb} - case *bcproto.StatusRequest: - msg.Sum = &bcproto.Message_StatusRequest{StatusRequest: pb} - case *bcproto.StatusResponse: - msg.Sum = &bcproto.Message_StatusResponse{StatusResponse: pb} - default: - return nil, fmt.Errorf("unknown message type %T", pb) - } - - bz, err := proto.Marshal(&msg) - if err != nil { - return nil, fmt.Errorf("unable to marshal %T: %w", pb, err) - } - - return bz, nil -} - -// DecodeMsg decodes a Protobuf message. -func DecodeMsg(bz []byte) (proto.Message, error) { - pb := &bcproto.Message{} - - err := proto.Unmarshal(bz, pb) - if err != nil { - return nil, err - } - - switch msg := pb.Sum.(type) { - case *bcproto.Message_BlockRequest: - return msg.BlockRequest, nil - case *bcproto.Message_BlockResponse: - return msg.BlockResponse, nil - case *bcproto.Message_NoBlockResponse: - return msg.NoBlockResponse, nil - case *bcproto.Message_StatusRequest: - return msg.StatusRequest, nil - case *bcproto.Message_StatusResponse: - return msg.StatusResponse, nil - default: - return nil, fmt.Errorf("unknown message type %T", msg) - } -} - // ValidateMsg validates a message. func ValidateMsg(pb proto.Message) error { if pb == nil { @@ -108,3 +57,31 @@ func ValidateMsg(pb proto.Message) error { } return nil } + +// EncodeMsg encodes a Protobuf message +// +// Deprecated: Will be removed in v0.37. +func EncodeMsg(pb proto.Message) ([]byte, error) { + if um, ok := pb.(p2p.Wrapper); ok { + pb = um.Wrap() + } + bz, err := proto.Marshal(pb) + if err != nil { + return nil, fmt.Errorf("unable to marshal %T: %w", pb, err) + } + + return bz, nil +} + +// DecodeMsg decodes a Protobuf message. +// +// Deprecated: Will be removed in v0.37. +func DecodeMsg(bz []byte) (proto.Message, error) { + pb := &bcproto.Message{} + + err := proto.Unmarshal(bz, pb) + if err != nil { + return nil, err + } + return pb.Unwrap() +} diff --git a/blockchain/msgs_test.go b/blockchain/msgs_test.go index 1e90a58457..bba8fed73b 100644 --- a/blockchain/msgs_test.go +++ b/blockchain/msgs_test.go @@ -79,7 +79,7 @@ func TestBcStatusResponseMessageValidateBasic(t *testing.T) { } } -// nolint:lll // ignore line length in tests +//nolint:lll // ignore line length in tests func TestBlockchainMessageVectors(t *testing.T) { block := types.MakeBlock(int64(3), factory.MakeData([]types.Tx{types.Tx("Hello World")}, nil), nil, nil) block.Version.Block = 11 // overwrite updated protocol version diff --git a/blockchain/v0/pool.go b/blockchain/v0/pool.go index 69e0b55c4a..e3923b0d9b 100644 --- a/blockchain/v0/pool.go +++ b/blockchain/v0/pool.go @@ -32,6 +32,7 @@ const ( maxTotalRequesters = 600 maxPendingRequests = maxTotalRequesters maxPendingRequestsPerPeer = 20 + requestRetrySeconds = 30 // Minimum recv rate to ensure we're receiving blocks from a peer fast // enough. If a peer is not sending us data at at least that rate, we @@ -410,6 +411,7 @@ func (pool *BlockPool) sendError(err error, peerID p2p.ID) { } // for debugging purposes +// //nolint:unused func (pool *BlockPool) debug() string { pool.mtx.Lock() @@ -601,7 +603,7 @@ OUTER_LOOP: } peer = bpr.pool.pickIncrAvailablePeer(bpr.height) if peer == nil { - // log.Info("No peers available", "height", height) + bpr.Logger.Debug("No peers currently available; will retry shortly", "height", bpr.height) time.Sleep(requestIntervalMS * time.Millisecond) continue PICK_PEER_LOOP } @@ -611,6 +613,7 @@ OUTER_LOOP: bpr.peerID = peer.id bpr.mtx.Unlock() + to := time.NewTimer(requestRetrySeconds * time.Second) // Send request and wait. bpr.pool.sendRequest(bpr.height, peer.id) WAIT_LOOP: @@ -623,6 +626,11 @@ OUTER_LOOP: return case <-bpr.Quit(): return + case <-to.C: + bpr.Logger.Debug("Retrying block request after timeout", "height", bpr.height, "peer", bpr.peerID) + // Simulate a redo + bpr.reset() + continue OUTER_LOOP case peerID := <-bpr.redoCh: if peerID == bpr.peerID { bpr.reset() diff --git a/blockchain/v0/reactor.go b/blockchain/v0/reactor.go index c14d2d34a6..0ac278041d 100644 --- a/blockchain/v0/reactor.go +++ b/blockchain/v0/reactor.go @@ -5,6 +5,8 @@ import ( "reflect" "time" + "github.com/gogo/protobuf/proto" + bc "github.com/tendermint/tendermint/blockchain" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" @@ -144,21 +146,20 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { SendQueueCapacity: 1000, RecvBufferCapacity: 50 * 4096, RecvMessageCapacity: bc.MaxMsgSize, + MessageType: &bcproto.Message{}, }, } } // AddPeer implements Reactor by sending our state to peer. func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) { - msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{ - Base: bcR.store.Base(), - Height: bcR.store.Height()}) - if err != nil { - bcR.Logger.Error("could not convert msg to protobuf", "err", err) - return - } - - peer.Send(BlockchainChannel, msgBytes) + p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: BlockchainChannel, + Message: &bcproto.StatusResponse{ + Base: bcR.store.Base(), + Height: bcR.store.Height(), + }, + }, bcR.Logger) // it's OK if send fails. will try later in poolRoutine // peer is added to the pool once we receive the first @@ -182,75 +183,73 @@ func (bcR *BlockchainReactor) respondToPeer(msg *bcproto.BlockRequest, bcR.Logger.Error("could not convert msg to protobuf", "err", err) return false } - - msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: bl}) - if err != nil { - bcR.Logger.Error("could not marshal msg", "err", err) - return false - } - - return src.TrySend(BlockchainChannel, msgBytes) - } - - bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height) - - msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: msg.Height}) - if err != nil { - bcR.Logger.Error("could not convert msg to protobuf", "err", err) - return false + return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck + ChannelID: BlockchainChannel, + Message: &bcproto.BlockResponse{Block: bl}, + }, bcR.Logger) } - return src.TrySend(BlockchainChannel, msgBytes) + return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck + ChannelID: BlockchainChannel, + Message: &bcproto.NoBlockResponse{Height: msg.Height}, + }, bcR.Logger) } -// Receive implements Reactor by handling 4 types of messages (look below). -func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - msg, err := bc.DecodeMsg(msgBytes) - if err != nil { - bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) - bcR.Switch.StopPeerForError(src, err) +func (bcR *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) { + if err := bc.ValidateMsg(e.Message); err != nil { + bcR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err) + bcR.Switch.StopPeerForError(e.Src, err) return } - if err = bc.ValidateMsg(msg); err != nil { - bcR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err) - bcR.Switch.StopPeerForError(src, err) - return - } + bcR.Logger.Debug("Receive", "e.Src", e.Src, "chID", e.ChannelID, "msg", e.Message) - bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg) - - switch msg := msg.(type) { + switch msg := e.Message.(type) { case *bcproto.BlockRequest: - bcR.respondToPeer(msg, src) + bcR.respondToPeer(msg, e.Src) case *bcproto.BlockResponse: bi, err := types.BlockFromProto(msg.Block) if err != nil { bcR.Logger.Error("Block content is invalid", "err", err) return } - bcR.pool.AddBlock(src.ID(), bi, len(msgBytes)) + bcR.pool.AddBlock(e.Src.ID(), bi, msg.Block.Size()) case *bcproto.StatusRequest: // Send peer our state. - msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{ - Height: bcR.store.Height(), - Base: bcR.store.Base(), - }) - if err != nil { - bcR.Logger.Error("could not convert msg to protobut", "err", err) - return - } - src.TrySend(BlockchainChannel, msgBytes) + p2p.TrySendEnvelopeShim(e.Src, p2p.Envelope{ //nolint: staticcheck + ChannelID: BlockchainChannel, + Message: &bcproto.StatusResponse{ + Height: bcR.store.Height(), + Base: bcR.store.Base(), + }, + }, bcR.Logger) case *bcproto.StatusResponse: // Got a peer status. Unverified. - bcR.pool.SetPeerRange(src.ID(), msg.Base, msg.Height) + bcR.pool.SetPeerRange(e.Src.ID(), msg.Base, msg.Height) case *bcproto.NoBlockResponse: - bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height) + bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height) default: bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } } +func (bcR *BlockchainReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) { + msg := &bcproto.Message{} + err := proto.Unmarshal(msgBytes, msg) + if err != nil { + panic(err) + } + uw, err := msg.Unwrap() + if err != nil { + panic(err) + } + bcR.ReceiveEnvelope(p2p.Envelope{ + ChannelID: chID, + Src: peer, + Message: uw, + }) +} + // Handle messages from the poolReactor telling the reactor what to do. // NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down! func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) { @@ -286,13 +285,10 @@ func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) { if peer == nil { continue } - msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: request.Height}) - if err != nil { - bcR.Logger.Error("could not convert msg to proto", "err", err) - continue - } - - queued := peer.TrySend(BlockchainChannel, msgBytes) + queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: BlockchainChannel, + Message: &bcproto.BlockRequest{Height: request.Height}, + }, bcR.Logger) if !queued { bcR.Logger.Debug("Send queue is full, drop block request", "peer", peer.ID(), "height", request.Height) } @@ -304,7 +300,7 @@ func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) { case <-statusUpdateTicker.C: // ask for status updates - go bcR.BroadcastStatusRequest() // nolint: errcheck + go bcR.BroadcastStatusRequest() //nolint: errcheck } } @@ -425,13 +421,9 @@ FOR_LOOP: // BroadcastStatusRequest broadcasts `BlockStore` base and height. func (bcR *BlockchainReactor) BroadcastStatusRequest() error { - bm, err := bc.EncodeMsg(&bcproto.StatusRequest{}) - if err != nil { - bcR.Logger.Error("could not convert msg to proto", "err", err) - return fmt.Errorf("could not convert msg to proto: %w", err) - } - - bcR.Switch.Broadcast(BlockchainChannel, bm) - + bcR.Switch.BroadcastEnvelope(p2p.Envelope{ + ChannelID: BlockchainChannel, + Message: &bcproto.StatusRequest{}, + }) return nil } diff --git a/blockchain/v0/reactor_test.go b/blockchain/v0/reactor_test.go index 4d23c9f2ce..b74cdf1a10 100644 --- a/blockchain/v0/reactor_test.go +++ b/blockchain/v0/reactor_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -17,6 +18,7 @@ import ( "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/mempool/mock" "github.com/tendermint/tendermint/p2p" + bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" @@ -71,7 +73,9 @@ func newBlockchainReactor( blockDB := dbm.NewMemDB() stateDB := dbm.NewMemDB() - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) blockStore := store.NewBlockStore(blockDB) state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) @@ -84,7 +88,9 @@ func newBlockchainReactor( // pool.height is determined from the store. fastSync := true db := dbm.NewMemDB() - stateStore = sm.NewStore(db) + stateStore = sm.NewStore(db, sm.StoreOptions{ + DiscardABCIResponses: false, + }) blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}) if err = stateStore.Save(state); err != nil { @@ -189,6 +195,25 @@ func TestNoBlockResponse(t *testing.T) { } } +func TestLegacyReactorReceiveBasic(t *testing.T) { + config = cfg.ResetTestRoot("blockchain_reactor_test") + defer os.RemoveAll(config.RootDir) + genDoc, privVals := randGenesisDoc(1, false, 30) + reactor := newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 10).reactor + peer := p2p.CreateRandomPeer(false) + + reactor.InitPeer(peer) + reactor.AddPeer(peer) + m := &bcproto.StatusRequest{} + wm := m.Wrap() + msg, err := proto.Marshal(wm) + assert.NoError(t, err) + + assert.NotPanics(t, func() { + reactor.Receive(BlockchainChannel, peer, msg) + }) +} + // NOTE: This is too hard to test without // an easy way to add test peer to switch // or without significant refactoring of the module. diff --git a/blockchain/v1/reactor.go b/blockchain/v1/reactor.go index c4c61ec516..f6fb93ab8d 100644 --- a/blockchain/v1/reactor.go +++ b/blockchain/v1/reactor.go @@ -2,9 +2,10 @@ package v1 import ( "fmt" - "reflect" "time" + "github.com/gogo/protobuf/proto" + "github.com/tendermint/tendermint/behaviour" bc "github.com/tendermint/tendermint/blockchain" "github.com/tendermint/tendermint/libs/log" @@ -172,21 +173,20 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { SendQueueCapacity: 2000, RecvBufferCapacity: 50 * 4096, RecvMessageCapacity: bc.MaxMsgSize, + MessageType: &bcproto.Message{}, }, } } // AddPeer implements Reactor by sending our state to peer. func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) { - msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{ - Base: bcR.store.Base(), - Height: bcR.store.Height(), - }) - if err != nil { - bcR.Logger.Error("could not convert msg to protobuf", "err", err) - return - } - peer.Send(BlockchainChannel, msgBytes) + p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: BlockchainChannel, + Message: &bcproto.StatusResponse{ + Base: bcR.store.Base(), + Height: bcR.store.Height(), + }, + }, bcR.Logger) // it's OK if send fails. will try later in poolRoutine // peer is added to the pool once we receive the first @@ -206,35 +206,28 @@ func (bcR *BlockchainReactor) sendBlockToPeer(msg *bcproto.BlockRequest, bcR.Logger.Error("Could not send block message to peer", "err", err) return false } - msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: pbbi}) - if err != nil { - bcR.Logger.Error("unable to marshal msg", "err", err) - return false - } - return src.TrySend(BlockchainChannel, msgBytes) + return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck + ChannelID: BlockchainChannel, + Message: &bcproto.BlockResponse{Block: pbbi}, + }, bcR.Logger) } bcR.Logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height) - msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: msg.Height}) - if err != nil { - bcR.Logger.Error("unable to marshal msg", "err", err) - return false - } - return src.TrySend(BlockchainChannel, msgBytes) + return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck + ChannelID: BlockchainChannel, + Message: &bcproto.NoBlockResponse{Height: msg.Height}, + }, bcR.Logger) } func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcproto.StatusRequest, src p2p.Peer) (queued bool) { - msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{ - Base: bcR.store.Base(), - Height: bcR.store.Height(), - }) - if err != nil { - bcR.Logger.Error("unable to marshal msg", "err", err) - return false - } - - return src.TrySend(BlockchainChannel, msgBytes) + return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck + ChannelID: BlockchainChannel, + Message: &bcproto.StatusResponse{ + Base: bcR.store.Base(), + Height: bcR.store.Height(), + }, + }, bcR.Logger) } // RemovePeer implements Reactor by removing peer from the pool. @@ -250,34 +243,27 @@ func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) { } // Receive implements Reactor by handling 4 types of messages (look below). -func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - msg, err := bc.DecodeMsg(msgBytes) - if err != nil { - bcR.Logger.Error("error decoding message", "src", src, "chId", chID, "err", err) - _ = bcR.swReporter.Report(behaviour.BadMessage(src.ID(), err.Error())) - return - } - - if err = bc.ValidateMsg(msg); err != nil { - bcR.Logger.Error("peer sent us invalid msg", "peer", src, "msg", msg, "err", err) - _ = bcR.swReporter.Report(behaviour.BadMessage(src.ID(), err.Error())) +func (bcR *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) { + if err := bc.ValidateMsg(e.Message); err != nil { + bcR.Logger.Error("peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err) + _ = bcR.swReporter.Report(behaviour.BadMessage(e.Src.ID(), err.Error())) return } - bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg) + bcR.Logger.Debug("Receive", "src", e.Src, "chID", e.ChannelID, "msg", e.Message) - switch msg := msg.(type) { + switch msg := e.Message.(type) { case *bcproto.BlockRequest: - if queued := bcR.sendBlockToPeer(msg, src); !queued { + if queued := bcR.sendBlockToPeer(msg, e.Src); !queued { // Unfortunately not queued since the queue is full. - bcR.Logger.Error("Could not send block message to peer", "src", src, "height", msg.Height) + bcR.Logger.Error("Could not send block message to peer", "src", e.Src, "height", msg.Height) } case *bcproto.StatusRequest: // Send peer our state. - if queued := bcR.sendStatusResponseToPeer(msg, src); !queued { + if queued := bcR.sendStatusResponseToPeer(msg, e.Src); !queued { // Unfortunately not queued since the queue is full. - bcR.Logger.Error("Could not send status message to peer", "src", src) + bcR.Logger.Error("Could not send status message to peer", "src", e.Src) } case *bcproto.BlockResponse: @@ -289,23 +275,23 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) msgForFSM := bcReactorMessage{ event: blockResponseEv, data: bReactorEventData{ - peerID: src.ID(), + peerID: e.Src.ID(), height: bi.Height, block: bi, - length: len(msgBytes), + length: msg.Size(), }, } - bcR.Logger.Info("Received", "src", src, "height", bi.Height) + bcR.Logger.Info("Received", "src", e.Src, "height", bi.Height) bcR.messagesForFSMCh <- msgForFSM case *bcproto.NoBlockResponse: msgForFSM := bcReactorMessage{ event: noBlockResponseEv, data: bReactorEventData{ - peerID: src.ID(), + peerID: e.Src.ID(), height: msg.Height, }, } - bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height) + bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height) bcR.messagesForFSMCh <- msgForFSM case *bcproto.StatusResponse: @@ -313,16 +299,33 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) msgForFSM := bcReactorMessage{ event: statusResponseEv, data: bReactorEventData{ - peerID: src.ID(), + peerID: e.Src.ID(), height: msg.Height, - length: len(msgBytes), + length: msg.Size(), }, } bcR.messagesForFSMCh <- msgForFSM default: - bcR.Logger.Error(fmt.Sprintf("unknown message type %v", reflect.TypeOf(msg))) + bcR.Logger.Error(fmt.Sprintf("unknown message type %T", msg)) + } +} + +func (bcR *BlockchainReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) { + msg := &bcproto.Message{} + err := proto.Unmarshal(msgBytes, msg) + if err != nil { + panic(err) + } + uw, err := msg.Unwrap() + if err != nil { + panic(err) } + bcR.ReceiveEnvelope(p2p.Envelope{ + ChannelID: chID, + Src: peer, + Message: uw, + }) } // processBlocksRoutine processes blocks until signlaed to stop over the stopProcessing channel @@ -492,11 +495,10 @@ func (bcR *BlockchainReactor) processBlock() error { // Implements bcRNotifier // sendStatusRequest broadcasts `BlockStore` height. func (bcR *BlockchainReactor) sendStatusRequest() { - msgBytes, err := bc.EncodeMsg(&bcproto.StatusRequest{}) - if err != nil { - panic(err) - } - bcR.Switch.Broadcast(BlockchainChannel, msgBytes) + bcR.Switch.BroadcastEnvelope(p2p.Envelope{ + ChannelID: BlockchainChannel, + Message: &bcproto.StatusRequest{}, + }) } // Implements bcRNotifier @@ -507,11 +509,10 @@ func (bcR *BlockchainReactor) sendBlockRequest(peerID p2p.ID, height int64) erro return errNilPeerForBlockRequest } - msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: height}) - if err != nil { - return err - } - queued := peer.TrySend(BlockchainChannel, msgBytes) + queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: BlockchainChannel, + Message: &bcproto.BlockRequest{Height: height}, + }, bcR.Logger) if !queued { return errSendQueueFull } @@ -534,8 +535,8 @@ func (bcR *BlockchainReactor) switchToConsensus() { // Called by FSM and pool: // - pool calls when it detects slow peer or when peer times out // - FSM calls when: -// - adding a block (addBlock) fails -// - reactor processing of a block reports failure and FSM sends back the peers of first and second blocks +// - adding a block (addBlock) fails +// - reactor processing of a block reports failure and FSM sends back the peers of first and second blocks func (bcR *BlockchainReactor) sendPeerError(err error, peerID p2p.ID) { bcR.Logger.Info("sendPeerError:", "peer", peerID, "error", err) msgData := bcFsmMessage{ diff --git a/blockchain/v1/reactor_test.go b/blockchain/v1/reactor_test.go index 7761d92f24..89e9ae9e73 100644 --- a/blockchain/v1/reactor_test.go +++ b/blockchain/v1/reactor_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -18,6 +19,7 @@ import ( "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/mempool/mock" "github.com/tendermint/tendermint/p2p" + bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" @@ -103,7 +105,9 @@ func newBlockchainReactor( blockDB := dbm.NewMemDB() stateDB := dbm.NewMemDB() - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) blockStore := store.NewBlockStore(blockDB) state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) @@ -116,7 +120,9 @@ func newBlockchainReactor( // pool.height is determined from the store. fastSync := true db := dbm.NewMemDB() - stateStore = sm.NewStore(db) + stateStore = sm.NewStore(db, sm.StoreOptions{ + DiscardABCIResponses: false, + }) blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}) if err = stateStore.Save(state); err != nil { @@ -346,6 +352,25 @@ outerFor: assert.True(t, lastReactorPair.bcR.Switch.Peers().Size() < len(reactorPairs)-1) } +func TestLegacyReactorReceiveBasic(t *testing.T) { + config = cfg.ResetTestRoot("blockchain_reactor_test") + defer os.RemoveAll(config.RootDir) + genDoc, privVals := randGenesisDoc(1, false, 30) + reactor := newBlockchainReactor(t, log.TestingLogger(), genDoc, privVals, 10) + peer := p2p.CreateRandomPeer(false) + + reactor.InitPeer(peer) + reactor.AddPeer(peer) + m := &bcproto.StatusRequest{} + wm := m.Wrap() + msg, err := proto.Marshal(wm) + assert.NoError(t, err) + + assert.NotPanics(t, func() { + reactor.Receive(BlockchainChannel, peer, msg) + }) +} + //---------------------------------------------- // utility funcs diff --git a/blockchain/v2/io.go b/blockchain/v2/io.go index 4951573ce4..af9f037fe6 100644 --- a/blockchain/v2/io.go +++ b/blockchain/v2/io.go @@ -3,7 +3,6 @@ package v2 import ( "fmt" - bc "github.com/tendermint/tendermint/blockchain" "github.com/tendermint/tendermint/p2p" bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain" "github.com/tendermint/tendermint/state" @@ -16,7 +15,7 @@ type iIO interface { sendBlockNotFound(height int64, peerID p2p.ID) error sendStatusResponse(base, height int64, peerID p2p.ID) error - broadcastStatusRequest() error + broadcastStatusRequest() trySwitchToConsensus(state state.State, skipWAL bool) bool } @@ -47,13 +46,10 @@ func (sio *switchIO) sendBlockRequest(peerID p2p.ID, height int64) error { if peer == nil { return fmt.Errorf("peer not found") } - msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: height}) - if err != nil { - return err - } - - queued := peer.TrySend(BlockchainChannel, msgBytes) - if !queued { + if queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: BlockchainChannel, + Message: &bcproto.BlockRequest{Height: height}, + }, sio.sw.Logger); !queued { return fmt.Errorf("send queue full") } return nil @@ -65,12 +61,10 @@ func (sio *switchIO) sendStatusResponse(base int64, height int64, peerID p2p.ID) return fmt.Errorf("peer not found") } - msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{Height: height, Base: base}) - if err != nil { - return err - } - - if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { + if queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: BlockchainChannel, + Message: &bcproto.StatusRequest{}, + }, sio.sw.Logger); !queued { return fmt.Errorf("peer queue full") } @@ -91,11 +85,10 @@ func (sio *switchIO) sendBlockToPeer(block *types.Block, peerID p2p.ID) error { return err } - msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: bpb}) - if err != nil { - return err - } - if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { + if queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: BlockchainChannel, + Message: &bcproto.BlockResponse{Block: bpb}, + }, sio.sw.Logger); !queued { return fmt.Errorf("peer queue full") } @@ -107,12 +100,10 @@ func (sio *switchIO) sendBlockNotFound(height int64, peerID p2p.ID) error { if peer == nil { return fmt.Errorf("peer not found") } - msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: height}) - if err != nil { - return err - } - - if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { + if queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: BlockchainChannel, + Message: &bcproto.NoBlockResponse{Height: height}, + }, sio.sw.Logger); !queued { return fmt.Errorf("peer queue full") } @@ -127,14 +118,10 @@ func (sio *switchIO) trySwitchToConsensus(state state.State, skipWAL bool) bool return ok } -func (sio *switchIO) broadcastStatusRequest() error { - msgBytes, err := bc.EncodeMsg(&bcproto.StatusRequest{}) - if err != nil { - return err - } - +func (sio *switchIO) broadcastStatusRequest() { // XXX: maybe we should use an io specific peer list here - sio.sw.Broadcast(BlockchainChannel, msgBytes) - - return nil + sio.sw.BroadcastEnvelope(p2p.Envelope{ + ChannelID: BlockchainChannel, + Message: &bcproto.StatusRequest{}, + }) } diff --git a/blockchain/v2/reactor.go b/blockchain/v2/reactor.go index 9dea749d70..f428974858 100644 --- a/blockchain/v2/reactor.go +++ b/blockchain/v2/reactor.go @@ -5,6 +5,8 @@ import ( "fmt" "time" + "github.com/gogo/protobuf/proto" + "github.com/tendermint/tendermint/behaviour" bc "github.com/tendermint/tendermint/blockchain" "github.com/tendermint/tendermint/libs/log" @@ -215,7 +217,7 @@ type bcBlockResponse struct { priorityNormal time time.Time peerID p2p.ID - size int64 + size int block *types.Block } @@ -349,9 +351,7 @@ func (r *BlockchainReactor) demux(events <-chan Event) { case <-doProcessBlockCh: r.processor.send(rProcessBlock{}) case <-doStatusCh: - if err := r.io.broadcastStatusRequest(); err != nil { - r.logger.Error("Error broadcasting status request", "err", err) - } + r.io.broadcastStatusRequest() // Events from peers. Closing the channel signals event loop termination. case event, ok := <-events: @@ -455,39 +455,31 @@ func (r *BlockchainReactor) Stop() error { } // Receive implements Reactor by handling different message types. -func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - msg, err := bc.DecodeMsg(msgBytes) - if err != nil { - r.logger.Error("error decoding message", - "src", src.ID(), "chId", chID, "msg", msg, "err", err) - _ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error())) - return - } - - if err = bc.ValidateMsg(msg); err != nil { - r.logger.Error("peer sent us invalid msg", "peer", src, "msg", msg, "err", err) - _ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error())) +func (r *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) { + if err := bc.ValidateMsg(e.Message); err != nil { + r.logger.Error("peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err) + _ = r.reporter.Report(behaviour.BadMessage(e.Src.ID(), err.Error())) return } - r.logger.Debug("Receive", "src", src.ID(), "chID", chID, "msg", msg) + r.logger.Debug("Receive", "src", e.Src.ID(), "chID", e.ChannelID, "msg", e.Message) - switch msg := msg.(type) { + switch msg := e.Message.(type) { case *bcproto.StatusRequest: - if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), src.ID()); err != nil { - r.logger.Error("Could not send status message to peer", "src", src) + if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), e.Src.ID()); err != nil { + r.logger.Error("Could not send status message to peer", "src", e.Src) } case *bcproto.BlockRequest: block := r.store.LoadBlock(msg.Height) if block != nil { - if err = r.io.sendBlockToPeer(block, src.ID()); err != nil { + if err := r.io.sendBlockToPeer(block, e.Src.ID()); err != nil { r.logger.Error("Could not send block message to peer: ", err) } } else { - r.logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height) - peerID := src.ID() - if err = r.io.sendBlockNotFound(msg.Height, peerID); err != nil { + r.logger.Info("peer asking for a block we don't have", "src", e.Src, "height", msg.Height) + peerID := e.Src.ID() + if err := r.io.sendBlockNotFound(msg.Height, peerID); err != nil { r.logger.Error("Couldn't send block not found: ", err) } } @@ -495,7 +487,7 @@ func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { case *bcproto.StatusResponse: r.mtx.RLock() if r.events != nil { - r.events <- bcStatusResponse{peerID: src.ID(), base: msg.Base, height: msg.Height} + r.events <- bcStatusResponse{peerID: e.Src.ID(), base: msg.Base, height: msg.Height} } r.mtx.RUnlock() @@ -508,10 +500,10 @@ func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { r.mtx.RLock() if r.events != nil { r.events <- bcBlockResponse{ - peerID: src.ID(), + peerID: e.Src.ID(), block: bi, - size: int64(len(msgBytes)), time: time.Now(), + size: msg.Size(), } } r.mtx.RUnlock() @@ -519,12 +511,29 @@ func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { case *bcproto.NoBlockResponse: r.mtx.RLock() if r.events != nil { - r.events <- bcNoBlockResponse{peerID: src.ID(), height: msg.Height, time: time.Now()} + r.events <- bcNoBlockResponse{peerID: e.Src.ID(), height: msg.Height, time: time.Now()} } r.mtx.RUnlock() } } +func (r *BlockchainReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) { + msg := &bcproto.Message{} + err := proto.Unmarshal(msgBytes, msg) + if err != nil { + panic(err) + } + uw, err := msg.Unwrap() + if err != nil { + panic(err) + } + r.ReceiveEnvelope(p2p.Envelope{ + ChannelID: chID, + Src: peer, + Message: uw, + }) +} + // AddPeer implements Reactor interface func (r *BlockchainReactor) AddPeer(peer p2p.Peer) { err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), peer.ID()) @@ -559,6 +568,7 @@ func (r *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { SendQueueCapacity: 2000, RecvBufferCapacity: 50 * 4096, RecvMessageCapacity: bc.MaxMsgSize, + MessageType: &bcproto.Message{}, }, } } diff --git a/blockchain/v2/reactor_test.go b/blockchain/v2/reactor_test.go index 660931e0ea..6ddf15936f 100644 --- a/blockchain/v2/reactor_test.go +++ b/blockchain/v2/reactor_test.go @@ -9,13 +9,13 @@ import ( "testing" "time" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/behaviour" - bc "github.com/tendermint/tendermint/blockchain" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" @@ -54,34 +54,19 @@ func (mp mockPeer) NodeInfo() p2p.NodeInfo { func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} } func (mp mockPeer) SocketAddr() *p2p.NetAddress { return &p2p.NetAddress{} } +func (mp mockPeer) SendEnvelope(e p2p.Envelope) bool { return true } +func (mp mockPeer) TrySendEnvelope(e p2p.Envelope) bool { return true } + func (mp mockPeer) Send(byte, []byte) bool { return true } func (mp mockPeer) TrySend(byte, []byte) bool { return true } func (mp mockPeer) Set(string, interface{}) {} func (mp mockPeer) Get(string) interface{} { return struct{}{} } -// nolint:unused // ignore -type mockBlockStore struct { - blocks map[int64]*types.Block -} - -// nolint:unused // ignore -func (ml *mockBlockStore) Height() int64 { - return int64(len(ml.blocks)) -} - -// nolint:unused // ignore -func (ml *mockBlockStore) LoadBlock(height int64) *types.Block { - return ml.blocks[height] -} - -// nolint:unused // ignore -func (ml *mockBlockStore) SaveBlock(block *types.Block, part *types.PartSet, commit *types.Commit) { - ml.blocks[block.Height] = block -} +func (mp mockPeer) SetRemovalFailed() {} +func (mp mockPeer) GetRemovalFailed() bool { return false } -type mockBlockApplier struct { -} +type mockBlockApplier struct{} // XXX: Add whitelist/blacklist? func (mba *mockBlockApplier) ApplyBlock( @@ -131,8 +116,7 @@ func (sio *mockSwitchIo) trySwitchToConsensus(state sm.State, skipWAL bool) bool return true } -func (sio *mockSwitchIo) broadcastStatusRequest() error { - return nil +func (sio *mockSwitchIo) broadcastStatusRequest() { } type testReactorParams struct { @@ -160,7 +144,9 @@ func newTestReactor(p testReactorParams) *BlockchainReactor { panic(fmt.Errorf("error start app: %w", err)) } db := dbm.NewMemDB() - stateStore := sm.NewStore(db) + stateStore := sm.NewStore(db, sm.StoreOptions{ + DiscardABCIResponses: false, + }) appl = sm.NewBlockExecutor(stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}) if err = stateStore.Save(state); err != nil { panic(err) @@ -351,9 +337,7 @@ func newTestReactor(p testReactorParams) *BlockchainReactor { // } func TestReactorHelperMode(t *testing.T) { - var ( - channelID = byte(0x40) - ) + channelID := byte(0x40) config := cfg.ResetTestRoot("blockchain_reactor_v2_test") defer os.RemoveAll(config.RootDir) @@ -369,7 +353,7 @@ func TestReactorHelperMode(t *testing.T) { type testEvent struct { peer string - event interface{} + event proto.Message } tests := []struct { @@ -381,10 +365,10 @@ func TestReactorHelperMode(t *testing.T) { name: "status request", params: params, msgs: []testEvent{ - {"P1", bcproto.StatusRequest{}}, - {"P1", bcproto.BlockRequest{Height: 13}}, - {"P1", bcproto.BlockRequest{Height: 20}}, - {"P1", bcproto.BlockRequest{Height: 22}}, + {"P1", &bcproto.StatusRequest{}}, + {"P1", &bcproto.BlockRequest{Height: 13}}, + {"P1", &bcproto.BlockRequest{Height: 20}}, + {"P1", &bcproto.BlockRequest{Height: 22}}, }, }, } @@ -401,25 +385,27 @@ func TestReactorHelperMode(t *testing.T) { for i := 0; i < len(tt.msgs); i++ { step := tt.msgs[i] switch ev := step.event.(type) { - case bcproto.StatusRequest: + case *bcproto.StatusRequest: old := mockSwitch.numStatusResponse - msg, err := bc.EncodeMsg(&ev) - assert.NoError(t, err) - reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg) + reactor.ReceiveEnvelope(p2p.Envelope{ + ChannelID: channelID, + Src: mockPeer{id: p2p.ID(step.peer)}, + Message: ev}) assert.Equal(t, old+1, mockSwitch.numStatusResponse) - case bcproto.BlockRequest: + case *bcproto.BlockRequest: if ev.Height > params.startHeight { old := mockSwitch.numNoBlockResponse - msg, err := bc.EncodeMsg(&ev) - assert.NoError(t, err) - reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg) + reactor.ReceiveEnvelope(p2p.Envelope{ + ChannelID: channelID, + Src: mockPeer{id: p2p.ID(step.peer)}, + Message: ev}) assert.Equal(t, old+1, mockSwitch.numNoBlockResponse) } else { old := mockSwitch.numBlockResponse - msg, err := bc.EncodeMsg(&ev) - assert.NoError(t, err) - assert.NoError(t, err) - reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg) + reactor.ReceiveEnvelope(p2p.Envelope{ + ChannelID: channelID, + Src: mockPeer{id: p2p.ID(step.peer)}, + Message: ev}) assert.Equal(t, old+1, mockSwitch.numBlockResponse) } } @@ -430,6 +416,34 @@ func TestReactorHelperMode(t *testing.T) { } } +func TestLegacyReactorReceiveBasic(t *testing.T) { + config := cfg.ResetTestRoot("blockchain_reactor_v2_test") + defer os.RemoveAll(config.RootDir) + genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30) + params := testReactorParams{ + logger: log.TestingLogger(), + genDoc: genDoc, + privVals: privVals, + startHeight: 20, + mockA: true, + } + reactor := newTestReactor(params) + mockSwitch := &mockSwitchIo{switchedToConsensus: false} + reactor.io = mockSwitch + peer := p2p.CreateRandomPeer(false) + + reactor.InitPeer(peer) + reactor.AddPeer(peer) + m := &bcproto.StatusRequest{} + wm := m.Wrap() + msg, err := proto.Marshal(wm) + assert.NoError(t, err) + + assert.NotPanics(t, func() { + reactor.Receive(BlockchainChannel, peer, msg) + }) +} + func TestReactorSetSwitchNil(t *testing.T) { config := cfg.ResetTestRoot("blockchain_reactor_v2_test") defer os.RemoveAll(config.RootDir) @@ -472,7 +486,8 @@ type testApp struct { } func randGenesisDoc(chainID string, numValidators int, randPower bool, minPower int64) ( - *types.GenesisDoc, []types.PrivValidator) { + *types.GenesisDoc, []types.PrivValidator, +) { validators := make([]types.GenesisValidator, numValidators) privValidators := make([]types.PrivValidator, numValidators) for i := 0; i < numValidators; i++ { @@ -497,7 +512,8 @@ func randGenesisDoc(chainID string, numValidators int, randPower bool, minPower func newReactorStore( genDoc *types.GenesisDoc, privVals []types.PrivValidator, - maxBlockHeight int64) (*store.BlockStore, sm.State, *sm.BlockExecutor) { + maxBlockHeight int64, +) (*store.BlockStore, sm.State, *sm.BlockExecutor) { if len(privVals) != 1 { panic("only support one validator") } @@ -511,14 +527,19 @@ func newReactorStore( stateDB := dbm.NewMemDB() blockStore := store.NewBlockStore(dbm.NewMemDB()) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) if err != nil { panic(fmt.Errorf("error constructing state from genesis file: %w", err)) } db := dbm.NewMemDB() - stateStore = sm.NewStore(db) + stateStore = sm.NewStore(db, sm.StoreOptions{ + DiscardABCIResponses: false, + }, + ) blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}) if err = stateStore.Save(state); err != nil { diff --git a/blockchain/v2/routine.go b/blockchain/v2/routine.go index dad0e737fa..119ca22e2d 100644 --- a/blockchain/v2/routine.go +++ b/blockchain/v2/routine.go @@ -52,11 +52,6 @@ func (rt *Routine) setLogger(logger log.Logger) { rt.logger = logger } -// nolint:unused -func (rt *Routine) setMetrics(metrics *Metrics) { - rt.metrics = metrics -} - func (rt *Routine) start() { rt.logger.Info("routine start", "msg", log.NewLazySprintf("%s: run", rt.name)) running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1)) diff --git a/blockchain/v2/scheduler.go b/blockchain/v2/scheduler.go index 75fe9d46dc..35166764fe 100644 --- a/blockchain/v2/scheduler.go +++ b/blockchain/v2/scheduler.go @@ -366,7 +366,7 @@ func (sc *scheduler) setStateAtHeight(height int64, state blockState) { } // CONTRACT: peer exists and in Ready state. -func (sc *scheduler) markReceived(peerID p2p.ID, height int64, size int64, now time.Time) error { +func (sc *scheduler) markReceived(peerID p2p.ID, height int64, size int, now time.Time) error { peer := sc.peers[peerID] if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID { @@ -379,7 +379,7 @@ func (sc *scheduler) markReceived(peerID p2p.ID, height int64, size int64, now t height, pendingTime, now) } - peer.lastRate = size / now.Sub(pendingTime).Nanoseconds() + peer.lastRate = int64(size) / now.Sub(pendingTime).Nanoseconds() sc.setStateAtHeight(height, blockStateReceived) delete(sc.pendingBlocks, height) @@ -532,7 +532,7 @@ func (sc *scheduler) handleBlockResponse(event bcBlockResponse) (Event, error) { return noOp, nil } - err = sc.markReceived(event.peerID, event.block.Height, event.size, event.time) + err = sc.markReceived(event.peerID, event.block.Height, event.block.Size(), event.time) if err != nil { sc.removePeer(event.peerID) return scPeerError{peerID: event.peerID, reason: err}, nil diff --git a/blockchain/v2/scheduler_test.go b/blockchain/v2/scheduler_test.go index 762ffd2c53..bed01bafc6 100644 --- a/blockchain/v2/scheduler_test.go +++ b/blockchain/v2/scheduler_test.go @@ -853,7 +853,7 @@ func TestScMarkReceived(t *testing.T) { type args struct { peerID p2p.ID height int64 - size int64 + size int tm time.Time } tests := []struct { diff --git a/cmd/tendermint/commands/debug/dump.go b/cmd/tendermint/commands/debug/dump.go index 678f707918..67025ea5f5 100644 --- a/cmd/tendermint/commands/debug/dump.go +++ b/cmd/tendermint/commands/debug/dump.go @@ -3,7 +3,6 @@ package debug import ( "errors" "fmt" - "io/ioutil" "os" "path/filepath" "time" @@ -82,7 +81,7 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error { func dumpDebugData(outDir string, conf *cfg.Config, rpc *rpchttp.HTTP) { start := time.Now().UTC() - tmpDir, err := ioutil.TempDir(outDir, "tendermint_debug_tmp") + tmpDir, err := os.MkdirTemp(outDir, "tendermint_debug_tmp") if err != nil { logger.Error("failed to create temporary directory", "dir", tmpDir, "error", err) return diff --git a/cmd/tendermint/commands/debug/io.go b/cmd/tendermint/commands/debug/io.go index dcfff50c89..e5444a7359 100644 --- a/cmd/tendermint/commands/debug/io.go +++ b/cmd/tendermint/commands/debug/io.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "os" "path" "path/filepath" @@ -68,7 +67,6 @@ func zipDir(src, dest string) error { _, err = io.Copy(headerWriter, file) return err }) - } // copyFile copies a file from src to dest and returns an error upon failure. The @@ -111,5 +109,5 @@ func writeStateJSONToFile(state interface{}, dir, filename string) error { return fmt.Errorf("failed to encode state dump: %w", err) } - return ioutil.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm) + return os.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm) } diff --git a/cmd/tendermint/commands/debug/kill.go b/cmd/tendermint/commands/debug/kill.go index a2c7a5fe14..f1670b150e 100644 --- a/cmd/tendermint/commands/debug/kill.go +++ b/cmd/tendermint/commands/debug/kill.go @@ -3,7 +3,6 @@ package debug import ( "errors" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -56,7 +55,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error { // Create a temporary directory which will contain all the state dumps and // relevant files and directories that will be compressed into a file. - tmpDir, err := ioutil.TempDir(os.TempDir(), "tendermint_debug_tmp") + tmpDir, err := os.MkdirTemp(os.TempDir(), "tendermint_debug_tmp") if err != nil { return fmt.Errorf("failed to create temporary directory: %w", err) } @@ -105,7 +104,7 @@ func killProc(pid uint64, dir string) error { // pipe STDERR output from tailing the Tendermint process to a file // // NOTE: This will only work on UNIX systems. - cmd := exec.Command("tail", "-f", fmt.Sprintf("/proc/%d/fd/2", pid)) // nolint: gosec + cmd := exec.Command("tail", "-f", fmt.Sprintf("/proc/%d/fd/2", pid)) //nolint: gosec outFile, err := os.Create(filepath.Join(dir, "stacktrace.out")) if err != nil { diff --git a/cmd/tendermint/commands/debug/util.go b/cmd/tendermint/commands/debug/util.go index 226bfadc79..f29fd5a81e 100644 --- a/cmd/tendermint/commands/debug/util.go +++ b/cmd/tendermint/commands/debug/util.go @@ -3,7 +3,7 @@ package debug import ( "context" "fmt" - "io/ioutil" + "io" "net/http" "os" "path" @@ -67,16 +67,17 @@ func copyConfig(home, dir string) error { func dumpProfile(dir, addr, profile string, debug int) error { endpoint := fmt.Sprintf("%s/debug/pprof/%s?debug=%d", addr, profile, debug) - resp, err := http.Get(endpoint) // nolint: gosec + //nolint:gosec,nolintlint + resp, err := http.Get(endpoint) if err != nil { return fmt.Errorf("failed to query for %s profile: %w", profile, err) } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return fmt.Errorf("failed to read %s profile response body: %w", profile, err) } - return ioutil.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm) + return os.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm) } diff --git a/cmd/tendermint/commands/reindex_event.go b/cmd/tendermint/commands/reindex_event.go index 32cb709e79..4ccfd91b37 100644 --- a/cmd/tendermint/commands/reindex_event.go +++ b/cmd/tendermint/commands/reindex_event.go @@ -32,14 +32,17 @@ var ( // ReIndexEventCmd constructs a command to re-index events in a block height interval. var ReIndexEventCmd = &cobra.Command{ Use: "reindex-event", - Short: "reindex events to the event store backends", + Short: "Re-index events to the event store backends", Long: ` -reindex-event is an offline tooling to re-index block and tx events to the eventsinks, -you can run this command when the event store backend dropped/disconnected or you want to +reindex-event is an offline tooling to re-index block and tx events to the eventsinks. +You can run this command when the event store backend dropped/disconnected or you want to replace the backend. The default start-height is 0, meaning the tooling will start reindex from the base block height(inclusive); and the default end-height is 0, meaning the tooling will reindex until the latest block height(inclusive). User can omit either or both arguments. + +Note: This operation requires ABCIResponses. Do not set DiscardABCIResponses to true if you +want to use this command. `, Example: ` tendermint reindex-event diff --git a/cmd/tendermint/commands/reindex_event_test.go b/cmd/tendermint/commands/reindex_event_test.go index 716f4167a5..87ff80ddcf 100644 --- a/cmd/tendermint/commands/reindex_event_test.go +++ b/cmd/tendermint/commands/reindex_event_test.go @@ -9,6 +9,8 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + abcitypes "github.com/tendermint/tendermint/abci/types" tmcfg "github.com/tendermint/tendermint/config" prototmstate "github.com/tendermint/tendermint/proto/tendermint/state" @@ -16,7 +18,6 @@ import ( "github.com/tendermint/tendermint/state/mocks" txmocks "github.com/tendermint/tendermint/state/txindex/mocks" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) const ( diff --git a/cmd/tendermint/commands/reset.go b/cmd/tendermint/commands/reset.go index dd022060bd..86beb6a56f 100644 --- a/cmd/tendermint/commands/reset.go +++ b/cmd/tendermint/commands/reset.go @@ -29,7 +29,7 @@ var ResetStateCmd = &cobra.Command{ Short: "Remove all the data and WAL", PreRun: deprecateSnakeCase, RunE: func(cmd *cobra.Command, args []string) (err error) { - config, err = ParseConfig() + config, err = ParseConfig(cmd) if err != nil { return err } @@ -54,7 +54,7 @@ var ResetPrivValidatorCmd = &cobra.Command{ // XXX: this is totally unsafe. // it's only suitable for testnets. func resetAllCmd(cmd *cobra.Command, args []string) (err error) { - config, err = ParseConfig() + config, err = ParseConfig(cmd) if err != nil { return err } @@ -71,7 +71,7 @@ func resetAllCmd(cmd *cobra.Command, args []string) (err error) { // XXX: this is totally unsafe. // it's only suitable for testnets. func resetPrivValidator(cmd *cobra.Command, args []string) (err error) { - config, err = ParseConfig() + config, err = ParseConfig(cmd) if err != nil { return err } diff --git a/cmd/tendermint/commands/rollback.go b/cmd/tendermint/commands/rollback.go index 912e1b3898..7e7190fb55 100644 --- a/cmd/tendermint/commands/rollback.go +++ b/cmd/tendermint/commands/rollback.go @@ -77,7 +77,9 @@ func loadStateAndBlockStore(config *cfg.Config) (*store.BlockStore, state.Store, if err != nil { return nil, nil, err } - stateStore := state.NewStore(stateDB) + stateStore := state.NewStore(stateDB, state.StoreOptions{ + DiscardABCIResponses: config.Storage.DiscardABCIResponses, + }) return blockStore, stateStore, nil } diff --git a/cmd/tendermint/commands/root.go b/cmd/tendermint/commands/root.go index 2478f95a58..46c9ac7c7b 100644 --- a/cmd/tendermint/commands/root.go +++ b/cmd/tendermint/commands/root.go @@ -29,12 +29,25 @@ func registerFlagsRootCmd(cmd *cobra.Command) { // ParseConfig retrieves the default environment configuration, // sets up the Tendermint root and ensures that the root exists -func ParseConfig() (*cfg.Config, error) { +func ParseConfig(cmd *cobra.Command) (*cfg.Config, error) { conf := cfg.DefaultConfig() err := viper.Unmarshal(conf) if err != nil { return nil, err } + + var home string + if os.Getenv("TMHOME") != "" { + home = os.Getenv("TMHOME") + } else { + home, err = cmd.Flags().GetString(cli.HomeFlag) + if err != nil { + return nil, err + } + } + + conf.RootDir = home + conf.SetRoot(conf.RootDir) cfg.EnsureRoot(conf.RootDir) if err := conf.ValidateBasic(); err != nil { @@ -52,7 +65,7 @@ var RootCmd = &cobra.Command{ return nil } - config, err = ParseConfig() + config, err = ParseConfig(cmd) if err != nil { return err } diff --git a/cmd/tendermint/commands/root_test.go b/cmd/tendermint/commands/root_test.go index d1e5964b25..3de7620c21 100644 --- a/cmd/tendermint/commands/root_test.go +++ b/cmd/tendermint/commands/root_test.go @@ -2,7 +2,6 @@ package commands import ( "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -18,9 +17,7 @@ import ( tmos "github.com/tendermint/tendermint/libs/os" ) -var ( - defaultRoot = os.ExpandEnv("$HOME/.some/test/dir") -) +var defaultRoot = os.ExpandEnv("$HOME/.some/test/dir") // clearConfig clears env vars, the given root dir, and resets viper. func clearConfig(dir string) { @@ -88,7 +85,6 @@ func TestRootHome(t *testing.T) { } func TestRootFlagsEnv(t *testing.T) { - // defaults defaults := cfg.DefaultConfig() defaultLogLvl := defaults.LogLevel @@ -116,7 +112,6 @@ func TestRootFlagsEnv(t *testing.T) { } func TestRootConfig(t *testing.T) { - // write non-default config nonDefaultLogLvl := "abc:debug" cvals := map[string]string{ @@ -140,7 +135,7 @@ func TestRootConfig(t *testing.T) { // XXX: path must match cfg.defaultConfigPath configFilePath := filepath.Join(defaultRoot, "config") - err := tmos.EnsureDir(configFilePath, 0700) + err := tmos.EnsureDir(configFilePath, 0o700) require.Nil(t, err) // write the non-defaults to a different path @@ -168,5 +163,5 @@ func WriteConfigVals(dir string, vals map[string]string) error { data += fmt.Sprintf("%s = \"%s\"\n", k, v) } cfile := filepath.Join(dir, "config.toml") - return ioutil.WriteFile(cfile, []byte(data), 0600) + return os.WriteFile(cfile, []byte(data), 0o600) } diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index fd4408c5ab..7773f8daa6 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -63,6 +63,8 @@ func AddNodeFlags(cmd *cobra.Command) { "p2p.laddr", config.P2P.ListenAddress, "node listen address. (0.0.0.0:0 means any interface, any port)") + cmd.Flags().String("p2p.external-address", + config.P2P.ExternalAddress, "ip:port address to advertise to peers for them to dial") cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes") cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers") cmd.Flags().String("p2p.unconditional_peer_ids", diff --git a/cmd/tendermint/commands/version.go b/cmd/tendermint/commands/version.go index d1a7fba582..d33a7c3a35 100644 --- a/cmd/tendermint/commands/version.go +++ b/cmd/tendermint/commands/version.go @@ -1,6 +1,7 @@ package commands import ( + "encoding/json" "fmt" "github.com/spf13/cobra" @@ -13,6 +14,25 @@ var VersionCmd = &cobra.Command{ Use: "version", Short: "Show version info", Run: func(cmd *cobra.Command, args []string) { - fmt.Println(version.TMCoreSemVer) + if verbose { + values, _ := json.MarshalIndent(struct { + Tendermint string `json:"tendermint"` + ABCI string `json:"abci"` + BlockProtocol uint64 `json:"block_protocol"` + P2PProtocol uint64 `json:"p2p_protocol"` + }{ + Tendermint: version.TMCoreSemVer, + ABCI: version.ABCIVersion, + BlockProtocol: version.BlockProtocol, + P2PProtocol: version.P2PProtocol, + }, "", " ") + fmt.Println(string(values)) + } else { + fmt.Println(version.TMCoreSemVer) + } }, } + +func init() { + VersionCmd.Flags().BoolVarP(&verbose, "verbose", "v", false, "Show protocol and library versions") +} diff --git a/cmd/tendermint/main.go b/cmd/tendermint/main.go index 59e7a1b128..04665a88f9 100644 --- a/cmd/tendermint/main.go +++ b/cmd/tendermint/main.go @@ -18,6 +18,7 @@ func main() { cmd.InitFilesCmd, cmd.ProbeUpnpCmd, cmd.LightCmd, + cmd.ReIndexEventCmd, cmd.ReplayCmd, cmd.ReplayConsoleCmd, cmd.ResetAllCmd, diff --git a/config/config.go b/config/config.go index d052b40ae1..d7250e00e7 100644 --- a/config/config.go +++ b/config/config.go @@ -74,6 +74,7 @@ type Config struct { StateSync *StateSyncConfig `mapstructure:"statesync"` FastSync *FastSyncConfig `mapstructure:"fastsync"` Consensus *ConsensusConfig `mapstructure:"consensus"` + Storage *StorageConfig `mapstructure:"storage"` TxIndex *TxIndexConfig `mapstructure:"tx_index"` Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"` } @@ -88,6 +89,7 @@ func DefaultConfig() *Config { StateSync: DefaultStateSyncConfig(), FastSync: DefaultFastSyncConfig(), Consensus: DefaultConsensusConfig(), + Storage: DefaultStorageConfig(), TxIndex: DefaultTxIndexConfig(), Instrumentation: DefaultInstrumentationConfig(), } @@ -103,6 +105,7 @@ func TestConfig() *Config { StateSync: TestStateSyncConfig(), FastSync: TestFastSyncConfig(), Consensus: TestConsensusConfig(), + Storage: TestStorageConfig(), TxIndex: TestTxIndexConfig(), Instrumentation: TestInstrumentationConfig(), } @@ -1071,11 +1074,41 @@ func (cfg *ConsensusConfig) ValidateBasic() error { } //----------------------------------------------------------------------------- +// StorageConfig + +// StorageConfig allows more fine-grained control over certain storage-related +// behavior. +type StorageConfig struct { + // Set to false to ensure ABCI responses are persisted. ABCI responses are + // required for `/block_results` RPC queries, and to reindex events in the + // command-line tool. + DiscardABCIResponses bool `mapstructure:"discard_abci_responses"` +} + +// DefaultStorageConfig returns the default configuration options relating to +// Tendermint storage optimization. +func DefaultStorageConfig() *StorageConfig { + return &StorageConfig{ + DiscardABCIResponses: false, + } +} + +// TestStorageConfig returns storage configuration that can be used for +// testing. +func TestStorageConfig() *StorageConfig { + return &StorageConfig{ + DiscardABCIResponses: false, + } +} + +// ----------------------------------------------------------------------------- // TxIndexConfig // Remember that Event has the following structure: // type: [ -// key: value, -// ... +// +// key: value, +// ... +// // ] // // CompositeKeys are constructed by `type.key` diff --git a/config/config_test.go b/config/config_test.go index 6a46933bcb..f19c7b1c7c 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -27,7 +27,6 @@ func TestDefaultConfig(t *testing.T) { assert.Equal("/foo/bar", cfg.GenesisFile()) assert.Equal("/opt/data", cfg.DBDir()) assert.Equal("/foo/wal/mem", cfg.Mempool.WalDir()) - } func TestConfigValidateBasic(t *testing.T) { @@ -140,8 +139,8 @@ func TestFastSyncConfigValidateBasic(t *testing.T) { assert.Error(t, cfg.ValidateBasic()) } +//nolint:lll func TestConsensusConfig_ValidateBasic(t *testing.T) { - // nolint: lll testcases := map[string]struct { modify func(*ConsensusConfig) expectErr bool @@ -166,6 +165,7 @@ func TestConsensusConfig_ValidateBasic(t *testing.T) { "PeerQueryMaj23SleepDuration negative": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = -1 }, true}, "DoubleSignCheckHeight negative": {func(c *ConsensusConfig) { c.DoubleSignCheckHeight = -1 }, true}, } + for desc, tc := range testcases { tc := tc // appease linter t.Run(desc, func(t *testing.T) { diff --git a/config/toml.go b/config/toml.go index ad40b1efcf..f6e9434d35 100644 --- a/config/toml.go +++ b/config/toml.go @@ -3,7 +3,7 @@ package config import ( "bytes" "fmt" - "io/ioutil" + "os" "path/filepath" "strings" "text/template" @@ -12,7 +12,7 @@ import ( ) // DefaultDirPerm is the default permissions used when creating directories. -const DefaultDirPerm = 0700 +const DefaultDirPerm = 0o700 var configTemplate *template.Template @@ -63,7 +63,7 @@ func WriteConfigFile(configFilePath string, config *Config) { panic(err) } - tmos.MustWriteFile(configFilePath, buffer.Bytes(), 0644) + tmos.MustWriteFile(configFilePath, buffer.Bytes(), 0o644) } // Note: any changes to the comments/variables/mapstructure @@ -480,6 +480,17 @@ create_empty_blocks_interval = "{{ .Consensus.CreateEmptyBlocksInterval }}" peer_gossip_sleep_duration = "{{ .Consensus.PeerGossipSleepDuration }}" peer_query_maj23_sleep_duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}" +####################################################### +### Storage Configuration Options ### +####################################################### +[storage] + +# Set to true to discard ABCI responses from the state store, which can save a +# considerable amount of disk space. Set to false to ensure ABCI responses are +# persisted. ABCI responses are required for /block_results RPC queries, and to +# reindex events in the command-line tool. +discard_abci_responses = {{ .Storage.DiscardABCIResponses}} + ####################################################### ### Transaction Indexer Configuration Options ### ####################################################### @@ -533,7 +544,7 @@ func ResetTestRoot(testName string) *Config { func ResetTestRootWithChainID(testName string, chainID string) *Config { // create a unique, concurrency-safe test directory under os.TempDir() - rootDir, err := ioutil.TempDir("", fmt.Sprintf("%s-%s_", chainID, testName)) + rootDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s_", chainID, testName)) if err != nil { panic(err) } @@ -560,11 +571,11 @@ func ResetTestRootWithChainID(testName string, chainID string) *Config { chainID = "tendermint_test" } testGenesis := fmt.Sprintf(testGenesisFmt, chainID) - tmos.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644) + tmos.MustWriteFile(genesisFilePath, []byte(testGenesis), 0o644) } // we always overwrite the priv val - tmos.MustWriteFile(privKeyFilePath, []byte(testPrivValidatorKey), 0644) - tmos.MustWriteFile(privStateFilePath, []byte(testPrivValidatorState), 0644) + tmos.MustWriteFile(privKeyFilePath, []byte(testPrivValidatorKey), 0o644) + tmos.MustWriteFile(privStateFilePath, []byte(testPrivValidatorState), 0o644) config := TestConfig().SetRoot(rootDir) return config diff --git a/config/toml_test.go b/config/toml_test.go index f197106872..678cf6a9bb 100644 --- a/config/toml_test.go +++ b/config/toml_test.go @@ -1,7 +1,6 @@ package config import ( - "io/ioutil" "os" "path/filepath" "strings" @@ -23,7 +22,7 @@ func TestEnsureRoot(t *testing.T) { require := require.New(t) // setup temp dir for test - tmpDir, err := ioutil.TempDir("", "config-test") + tmpDir, err := os.MkdirTemp("", "config-test") require.Nil(err) defer os.RemoveAll(tmpDir) @@ -31,7 +30,7 @@ func TestEnsureRoot(t *testing.T) { EnsureRoot(tmpDir) // make sure config is set properly - data, err := ioutil.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath)) + data, err := os.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath)) require.Nil(err) if !checkConfig(string(data)) { @@ -52,7 +51,7 @@ func TestEnsureTestRoot(t *testing.T) { rootDir := cfg.RootDir // make sure config is set properly - data, err := ioutil.ReadFile(filepath.Join(rootDir, defaultConfigFilePath)) + data, err := os.ReadFile(filepath.Join(rootDir, defaultConfigFilePath)) require.Nil(err) if !checkConfig(string(data)) { @@ -68,7 +67,7 @@ func checkConfig(configFile string) bool { var valid bool // list of words we expect in the config - var elems = []string{ + elems := []string{ "moniker", "seeds", "proxy_app", diff --git a/consensus/README.md b/consensus/README.md index 44a36012ff..15687b6269 100644 --- a/consensus/README.md +++ b/consensus/README.md @@ -1,3 +1,3 @@ # Consensus -See the [consensus spec](https://github.com/tendermint/spec/tree/master/spec/consensus) and the [reactor consensus spec](https://github.com/tendermint/spec/tree/master/spec/reactors/consensus) for more information. +See the [consensus spec](https://github.com/tendermint/tendermint/tree/v0.34.x/spec/consensus) and the [reactor consensus spec](https://github.com/tendermint/tendermint/tree/v0.34.x/spec/reactors/consensus) for more information. diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index f47a2d229f..7a82ab4ffe 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -26,6 +26,7 @@ import ( mempoolv0 "github.com/tendermint/tendermint/mempool/v0" mempoolv1 "github.com/tendermint/tendermint/mempool/v1" "github.com/tendermint/tendermint/p2p" + tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" @@ -50,7 +51,9 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { for i := 0; i < nValidators; i++ { logger := consensusLogger().With("test", "byzantine", "validator", i) stateDB := dbm.NewMemDB() // each state needs its own db - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) defer os.RemoveAll(thisConfig.RootDir) @@ -163,10 +166,16 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { for i, peer := range peerList { if i < len(peerList)/2 { bcs.Logger.Info("Signed and pushed vote", "vote", prevote1, "peer", peer) - peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote1})) + p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + Message: &tmcons.Vote{Vote: prevote1.ToProto()}, + ChannelID: VoteChannel, + }, bcs.Logger) } else { bcs.Logger.Info("Signed and pushed vote", "vote", prevote2, "peer", peer) - peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote2})) + p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + Message: &tmcons.Vote{Vote: prevote2.ToProto()}, + ChannelID: VoteChannel, + }, bcs.Logger) } } } else { @@ -420,7 +429,7 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) { // wait for someone in the big partition (B) to make a block <-blocksSubs[ind2].Out() - t.Log("A block has been committed. Healing partition") + t.Logf("A block has been committed. Healing partition") p2p.Connect2Switches(switches, ind0, ind1) p2p.Connect2Switches(switches, ind0, ind2) @@ -446,8 +455,8 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) { case <-done: case <-tick.C: for i, reactor := range reactors { - t.Log(fmt.Sprintf("Consensus Reactor %v", i)) - t.Log(fmt.Sprintf("%v", reactor)) + t.Logf(fmt.Sprintf("Consensus Reactor %v", i)) + t.Logf(fmt.Sprintf("%v", reactor)) } t.Fatalf("Timed out waiting for all validators to commit first block") } @@ -512,18 +521,26 @@ func sendProposalAndParts( parts *types.PartSet, ) { // proposal - msg := &ProposalMessage{Proposal: proposal} - peer.Send(DataChannel, MustEncode(msg)) + p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: DataChannel, + Message: &tmcons.Proposal{Proposal: *proposal.ToProto()}, + }, cs.Logger) // parts for i := 0; i < int(parts.Total()); i++ { part := parts.GetPart(i) - msg := &BlockPartMessage{ - Height: height, // This tells peer that this part applies to us. - Round: round, // This tells peer that this part applies to us. - Part: part, + pp, err := part.ToProto() + if err != nil { + panic(err) // TODO: wbanfield better error handling } - peer.Send(DataChannel, MustEncode(msg)) + p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: DataChannel, + Message: &tmcons.BlockPart{ + Height: height, // This tells peer that this part applies to us. + Round: round, // This tells peer that this part applies to us. + Part: *pp, + }, + }, cs.Logger) } // votes @@ -531,9 +548,14 @@ func sendProposalAndParts( prevote, _ := cs.signVote(tmproto.PrevoteType, blockHash, parts.Header()) precommit, _ := cs.signVote(tmproto.PrecommitType, blockHash, parts.Header()) cs.mtx.Unlock() - - peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote})) - peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit})) + p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: VoteChannel, + Message: &tmcons.Vote{Vote: prevote.ToProto()}, + }, cs.Logger) + p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: VoteChannel, + Message: &tmcons.Vote{Vote: precommit.ToProto()}, + }, cs.Logger) } //---------------------------------------- @@ -571,7 +593,10 @@ func (br *ByzantineReactor) AddPeer(peer p2p.Peer) { func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) { br.reactor.RemovePeer(peer, reason) } -func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) { - br.reactor.Receive(chID, peer, msgBytes) +func (br *ByzantineReactor) ReceiveEnvelope(e p2p.Envelope) { + br.reactor.ReceiveEnvelope(e) +} +func (br *ByzantineReactor) Receive(chID byte, p p2p.Peer, m []byte) { + br.reactor.Receive(chID, p, m) } func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer } diff --git a/consensus/common_test.go b/consensus/common_test.go index 4722e16000..5b4320f8ef 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -4,8 +4,8 @@ import ( "bytes" "context" "fmt" - "io/ioutil" "os" + "path" "path/filepath" "sort" "sync" @@ -15,8 +15,6 @@ import ( "github.com/go-kit/log/term" "github.com/stretchr/testify/require" - "path" - dbm "github.com/tendermint/tm-db" abcicli "github.com/tendermint/tendermint/abci/client" @@ -92,8 +90,8 @@ func newValidatorStub(privValidator types.PrivValidator, valIndex int32) *valida func (vs *validatorStub) signVote( voteType tmproto.SignedMsgType, hash []byte, - header types.PartSetHeader) (*types.Vote, error) { - + header types.PartSetHeader, +) (*types.Vote, error) { pubKey, err := vs.PrivValidator.GetPubKey() if err != nil { return nil, fmt.Errorf("can't get pubkey: %w", err) @@ -141,7 +139,8 @@ func signVotes( voteType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader, - vss ...*validatorStub) []*types.Vote { + vss ...*validatorStub, +) []*types.Vote { votes := make([]*types.Vote, len(vss)) for i, vs := range vss { votes[i] = signVote(vs, voteType, hash, header) @@ -428,7 +427,9 @@ func newStateWithConfigAndBlockStore( // Make State stateDB := blockDB - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) if err := stateStore.Save(state); err != nil { // for save height 1's validators info panic(err) } @@ -450,7 +451,7 @@ func newStateWithConfigAndBlockStore( func loadPrivValidator(config *cfg.Config) *privval.FilePV { privValidatorKeyFile := config.PrivValidatorKeyFile() - ensureDir(filepath.Dir(privValidatorKeyFile), 0700) + ensureDir(filepath.Dir(privValidatorKeyFile), 0o700) privValidatorStateFile := config.PrivValidatorStateFile() privValidator := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) privValidator.Reset() @@ -477,7 +478,8 @@ func randState(nValidators int) (*State, []*validatorStub) { //------------------------------------------------------------------------------- func ensureNoNewEvent(ch <-chan tmpubsub.Message, timeout time.Duration, - errorMessage string) { + errorMessage string, +) { select { case <-time.After(timeout): break @@ -655,7 +657,8 @@ func ensurePrevote(voteCh <-chan tmpubsub.Message, height int64, round int32) { } func ensureVote(voteCh <-chan tmpubsub.Message, height int64, round int32, - voteType tmproto.SignedMsgType) { + voteType tmproto.SignedMsgType, +) { select { case <-time.After(ensureTimeout): panic("Timeout expired while waiting for NewVote event") @@ -711,21 +714,24 @@ func consensusLogger() log.Logger { } func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker, - appFunc func() abci.Application, configOpts ...func(*cfg.Config)) ([]*State, cleanupFunc) { + appFunc func() abci.Application, configOpts ...func(*cfg.Config), +) ([]*State, cleanupFunc) { genDoc, privVals := randGenesisDoc(nValidators, false, 30) css := make([]*State, nValidators) logger := consensusLogger() configRootDirs := make([]string, 0, nValidators) for i := 0; i < nValidators; i++ { stateDB := dbm.NewMemDB() // each state needs its own db - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) configRootDirs = append(configRootDirs, thisConfig.RootDir) for _, opt := range configOpts { opt(thisConfig) } - ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal + ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0o700) // dir for wal app := appFunc() vals := types.TM2PB.ValidatorUpdates(state.Validators) app.InitChain(abci.RequestInitChain{Validators: vals}) @@ -756,11 +762,13 @@ func randConsensusNetWithPeers( configRootDirs := make([]string, 0, nPeers) for i := 0; i < nPeers; i++ { stateDB := dbm.NewMemDB() // each state needs its own db - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) configRootDirs = append(configRootDirs, thisConfig.RootDir) - ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal + ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0o700) // dir for wal if i == 0 { peer0Config = thisConfig } @@ -768,11 +776,11 @@ func randConsensusNetWithPeers( if i < nValidators { privVal = privVals[i] } else { - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") if err != nil { panic(err) } - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") if err != nil { panic(err) } @@ -894,7 +902,7 @@ func newCounter() abci.Application { } func newPersistentKVStore() abci.Application { - dir, err := ioutil.TempDir("", "persistent-kvstore") + dir, err := os.MkdirTemp("", "persistent-kvstore") if err != nil { panic(err) } diff --git a/consensus/invalid_test.go b/consensus/invalid_test.go index 907693c573..a9c1a1f199 100644 --- a/consensus/invalid_test.go +++ b/consensus/invalid_test.go @@ -7,6 +7,7 @@ import ( "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/p2p" + tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -94,7 +95,10 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, sw peers := sw.Peers().List() for _, peer := range peers { cs.Logger.Info("Sending bad vote", "block", blockHash, "peer", peer) - peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit})) + p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + Message: &tmcons.Vote{Vote: precommit.ToProto()}, + ChannelID: VoteChannel, + }, cs.Logger) } }() } diff --git a/consensus/mempool_test.go b/consensus/mempool_test.go index a0cdae1dfa..c9e461d481 100644 --- a/consensus/mempool_test.go +++ b/consensus/mempool_test.go @@ -113,7 +113,7 @@ func deliverTxsRange(cs *State, start, end int) { func TestMempoolTxConcurrentWithCommit(t *testing.T) { state, privVals := randGenesisState(1, false, 10) blockDB := dbm.NewMemDB() - stateStore := sm.NewStore(blockDB) + stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false}) cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockDB) err := stateStore.Save(state) require.NoError(t, err) @@ -138,7 +138,7 @@ func TestMempoolRmBadTx(t *testing.T) { state, privVals := randGenesisState(1, false, 10) app := NewCounterApplication() blockDB := dbm.NewMemDB() - stateStore := sm.NewStore(blockDB) + stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false}) cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB) err := stateStore.Save(state) require.NoError(t, err) diff --git a/consensus/msgs.go b/consensus/msgs.go index 4de96b5f40..c63af3ea84 100644 --- a/consensus/msgs.go +++ b/consensus/msgs.go @@ -5,7 +5,6 @@ import ( "fmt" "github.com/gogo/protobuf/proto" - cstypes "github.com/tendermint/tendermint/consensus/types" "github.com/tendermint/tendermint/libs/bits" tmmath "github.com/tendermint/tendermint/libs/math" @@ -15,173 +14,155 @@ import ( "github.com/tendermint/tendermint/types" ) -// MsgToProto takes a consensus message type and returns the proto defined consensus message +// MsgToProto takes a consensus message type and returns the proto defined consensus message. +// +// TODO: This needs to be removed, but WALToProto depends on this. func MsgToProto(msg Message) (*tmcons.Message, error) { if msg == nil { return nil, errors.New("consensus: message is nil") } - var pb tmcons.Message - switch msg := msg.(type) { case *NewRoundStepMessage: - pb = tmcons.Message{ - Sum: &tmcons.Message_NewRoundStep{ - NewRoundStep: &tmcons.NewRoundStep{ - Height: msg.Height, - Round: msg.Round, - Step: uint32(msg.Step), - SecondsSinceStartTime: msg.SecondsSinceStartTime, - LastCommitRound: msg.LastCommitRound, - }, - }, - } + m := &tmcons.NewRoundStep{ + Height: msg.Height, + Round: msg.Round, + Step: uint32(msg.Step), + SecondsSinceStartTime: msg.SecondsSinceStartTime, + LastCommitRound: msg.LastCommitRound, + } + return m.Wrap().(*tmcons.Message), nil + case *NewValidBlockMessage: pbPartSetHeader := msg.BlockPartSetHeader.ToProto() pbBits := msg.BlockParts.ToProto() - pb = tmcons.Message{ - Sum: &tmcons.Message_NewValidBlock{ - NewValidBlock: &tmcons.NewValidBlock{ - Height: msg.Height, - Round: msg.Round, - BlockPartSetHeader: pbPartSetHeader, - BlockParts: pbBits, - IsCommit: msg.IsCommit, - }, - }, + m := &tmcons.NewValidBlock{ + Height: msg.Height, + Round: msg.Round, + BlockPartSetHeader: pbPartSetHeader, + BlockParts: pbBits, + IsCommit: msg.IsCommit, } + return m.Wrap().(*tmcons.Message), nil + case *ProposalMessage: pbP := msg.Proposal.ToProto() - pb = tmcons.Message{ - Sum: &tmcons.Message_Proposal{ - Proposal: &tmcons.Proposal{ - Proposal: *pbP, - }, - }, + m := &tmcons.Proposal{ + Proposal: *pbP, } + return m.Wrap().(*tmcons.Message), nil + case *ProposalPOLMessage: pbBits := msg.ProposalPOL.ToProto() - pb = tmcons.Message{ - Sum: &tmcons.Message_ProposalPol{ - ProposalPol: &tmcons.ProposalPOL{ - Height: msg.Height, - ProposalPolRound: msg.ProposalPOLRound, - ProposalPol: *pbBits, - }, - }, + m := &tmcons.ProposalPOL{ + Height: msg.Height, + ProposalPolRound: msg.ProposalPOLRound, + ProposalPol: *pbBits, } + return m.Wrap().(*tmcons.Message), nil + case *BlockPartMessage: parts, err := msg.Part.ToProto() if err != nil { return nil, fmt.Errorf("msg to proto error: %w", err) } - pb = tmcons.Message{ - Sum: &tmcons.Message_BlockPart{ - BlockPart: &tmcons.BlockPart{ - Height: msg.Height, - Round: msg.Round, - Part: *parts, - }, - }, + m := &tmcons.BlockPart{ + Height: msg.Height, + Round: msg.Round, + Part: *parts, } + return m.Wrap().(*tmcons.Message), nil + case *VoteMessage: vote := msg.Vote.ToProto() - pb = tmcons.Message{ - Sum: &tmcons.Message_Vote{ - Vote: &tmcons.Vote{ - Vote: vote, - }, - }, + m := &tmcons.Vote{ + Vote: vote, } + return m.Wrap().(*tmcons.Message), nil + case *HasVoteMessage: - pb = tmcons.Message{ - Sum: &tmcons.Message_HasVote{ - HasVote: &tmcons.HasVote{ - Height: msg.Height, - Round: msg.Round, - Type: msg.Type, - Index: msg.Index, - }, - }, + m := &tmcons.HasVote{ + Height: msg.Height, + Round: msg.Round, + Type: msg.Type, + Index: msg.Index, } + return m.Wrap().(*tmcons.Message), nil + case *VoteSetMaj23Message: bi := msg.BlockID.ToProto() - pb = tmcons.Message{ - Sum: &tmcons.Message_VoteSetMaj23{ - VoteSetMaj23: &tmcons.VoteSetMaj23{ - Height: msg.Height, - Round: msg.Round, - Type: msg.Type, - BlockID: bi, - }, - }, + m := &tmcons.VoteSetMaj23{ + Height: msg.Height, + Round: msg.Round, + Type: msg.Type, + BlockID: bi, } + return m.Wrap().(*tmcons.Message), nil + case *VoteSetBitsMessage: bi := msg.BlockID.ToProto() bits := msg.Votes.ToProto() - vsb := &tmcons.Message_VoteSetBits{ - VoteSetBits: &tmcons.VoteSetBits{ - Height: msg.Height, - Round: msg.Round, - Type: msg.Type, - BlockID: bi, - }, + m := &tmcons.VoteSetBits{ + Height: msg.Height, + Round: msg.Round, + Type: msg.Type, + BlockID: bi, } if bits != nil { - vsb.VoteSetBits.Votes = *bits + m.Votes = *bits } - pb = tmcons.Message{ - Sum: vsb, - } + return m.Wrap().(*tmcons.Message), nil default: return nil, fmt.Errorf("consensus: message not recognized: %T", msg) } - - return &pb, nil } // MsgFromProto takes a consensus proto message and returns the native go type -func MsgFromProto(msg *tmcons.Message) (Message, error) { - if msg == nil { +func MsgFromProto(p *tmcons.Message) (Message, error) { + if p == nil { return nil, errors.New("consensus: nil message") } var pb Message + um, err := p.Unwrap() + if err != nil { + return nil, err + } - switch msg := msg.Sum.(type) { - case *tmcons.Message_NewRoundStep: - rs, err := tmmath.SafeConvertUint8(int64(msg.NewRoundStep.Step)) + switch msg := um.(type) { + case *tmcons.NewRoundStep: + rs, err := tmmath.SafeConvertUint8(int64(msg.Step)) // deny message based on possible overflow if err != nil { return nil, fmt.Errorf("denying message due to possible overflow: %w", err) } pb = &NewRoundStepMessage{ - Height: msg.NewRoundStep.Height, - Round: msg.NewRoundStep.Round, + Height: msg.Height, + Round: msg.Round, Step: cstypes.RoundStepType(rs), - SecondsSinceStartTime: msg.NewRoundStep.SecondsSinceStartTime, - LastCommitRound: msg.NewRoundStep.LastCommitRound, + SecondsSinceStartTime: msg.SecondsSinceStartTime, + LastCommitRound: msg.LastCommitRound, } - case *tmcons.Message_NewValidBlock: - pbPartSetHeader, err := types.PartSetHeaderFromProto(&msg.NewValidBlock.BlockPartSetHeader) + case *tmcons.NewValidBlock: + pbPartSetHeader, err := types.PartSetHeaderFromProto(&msg.BlockPartSetHeader) if err != nil { return nil, fmt.Errorf("parts to proto error: %w", err) } pbBits := new(bits.BitArray) - pbBits.FromProto(msg.NewValidBlock.BlockParts) + pbBits.FromProto(msg.BlockParts) pb = &NewValidBlockMessage{ - Height: msg.NewValidBlock.Height, - Round: msg.NewValidBlock.Round, + Height: msg.Height, + Round: msg.Round, BlockPartSetHeader: *pbPartSetHeader, BlockParts: pbBits, - IsCommit: msg.NewValidBlock.IsCommit, + IsCommit: msg.IsCommit, } - case *tmcons.Message_Proposal: - pbP, err := types.ProposalFromProto(&msg.Proposal.Proposal) + case *tmcons.Proposal: + pbP, err := types.ProposalFromProto(&msg.Proposal) if err != nil { return nil, fmt.Errorf("proposal msg to proto error: %w", err) } @@ -189,26 +170,26 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) { pb = &ProposalMessage{ Proposal: pbP, } - case *tmcons.Message_ProposalPol: + case *tmcons.ProposalPOL: pbBits := new(bits.BitArray) - pbBits.FromProto(&msg.ProposalPol.ProposalPol) + pbBits.FromProto(&msg.ProposalPol) pb = &ProposalPOLMessage{ - Height: msg.ProposalPol.Height, - ProposalPOLRound: msg.ProposalPol.ProposalPolRound, + Height: msg.Height, + ProposalPOLRound: msg.ProposalPolRound, ProposalPOL: pbBits, } - case *tmcons.Message_BlockPart: - parts, err := types.PartFromProto(&msg.BlockPart.Part) + case *tmcons.BlockPart: + parts, err := types.PartFromProto(&msg.Part) if err != nil { return nil, fmt.Errorf("blockpart msg to proto error: %w", err) } pb = &BlockPartMessage{ - Height: msg.BlockPart.Height, - Round: msg.BlockPart.Round, + Height: msg.Height, + Round: msg.Round, Part: parts, } - case *tmcons.Message_Vote: - vote, err := types.VoteFromProto(msg.Vote.Vote) + case *tmcons.Vote: + vote, err := types.VoteFromProto(msg.Vote) if err != nil { return nil, fmt.Errorf("vote msg to proto error: %w", err) } @@ -216,36 +197,36 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) { pb = &VoteMessage{ Vote: vote, } - case *tmcons.Message_HasVote: + case *tmcons.HasVote: pb = &HasVoteMessage{ - Height: msg.HasVote.Height, - Round: msg.HasVote.Round, - Type: msg.HasVote.Type, - Index: msg.HasVote.Index, + Height: msg.Height, + Round: msg.Round, + Type: msg.Type, + Index: msg.Index, } - case *tmcons.Message_VoteSetMaj23: - bi, err := types.BlockIDFromProto(&msg.VoteSetMaj23.BlockID) + case *tmcons.VoteSetMaj23: + bi, err := types.BlockIDFromProto(&msg.BlockID) if err != nil { return nil, fmt.Errorf("voteSetMaj23 msg to proto error: %w", err) } pb = &VoteSetMaj23Message{ - Height: msg.VoteSetMaj23.Height, - Round: msg.VoteSetMaj23.Round, - Type: msg.VoteSetMaj23.Type, + Height: msg.Height, + Round: msg.Round, + Type: msg.Type, BlockID: *bi, } - case *tmcons.Message_VoteSetBits: - bi, err := types.BlockIDFromProto(&msg.VoteSetBits.BlockID) + case *tmcons.VoteSetBits: + bi, err := types.BlockIDFromProto(&msg.BlockID) if err != nil { return nil, fmt.Errorf("voteSetBits msg to proto error: %w", err) } bits := new(bits.BitArray) - bits.FromProto(&msg.VoteSetBits.Votes) + bits.FromProto(&msg.Votes) pb = &VoteSetBitsMessage{ - Height: msg.VoteSetBits.Height, - Round: msg.VoteSetBits.Round, - Type: msg.VoteSetBits.Type, + Height: msg.Height, + Round: msg.Round, + Type: msg.Type, BlockID: *bi, Votes: bits, } @@ -262,6 +243,8 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) { // MustEncode takes the reactors msg, makes it proto and marshals it // this mimics `MustMarshalBinaryBare` in that is panics on error +// +// Deprecated: Will be removed in v0.37. func MustEncode(msg Message) []byte { pb, err := MsgToProto(msg) if err != nil { diff --git a/consensus/msgs_test.go b/consensus/msgs_test.go index b1f32e67dd..62f723b1c1 100644 --- a/consensus/msgs_test.go +++ b/consensus/msgs_test.go @@ -80,17 +80,15 @@ func TestMsgToProto(t *testing.T) { Step: 1, SecondsSinceStartTime: 1, LastCommitRound: 2, - }, &tmcons.Message{ - Sum: &tmcons.Message_NewRoundStep{ - NewRoundStep: &tmcons.NewRoundStep{ - Height: 2, - Round: 1, - Step: 1, - SecondsSinceStartTime: 1, - LastCommitRound: 2, - }, - }, - }, false}, + }, (&tmcons.NewRoundStep{ + Height: 2, + Round: 1, + Step: 1, + SecondsSinceStartTime: 1, + LastCommitRound: 2, + }).Wrap().(*tmcons.Message), + + false}, {"successful NewValidBlockMessage", &NewValidBlockMessage{ Height: 1, @@ -98,92 +96,78 @@ func TestMsgToProto(t *testing.T) { BlockPartSetHeader: psh, BlockParts: bits, IsCommit: false, - }, &tmcons.Message{ - Sum: &tmcons.Message_NewValidBlock{ - NewValidBlock: &tmcons.NewValidBlock{ - Height: 1, - Round: 1, - BlockPartSetHeader: pbPsh, - BlockParts: pbBits, - IsCommit: false, - }, - }, - }, false}, + }, (&tmcons.NewValidBlock{ + Height: 1, + Round: 1, + BlockPartSetHeader: pbPsh, + BlockParts: pbBits, + IsCommit: false, + }).Wrap().(*tmcons.Message), + + false}, {"successful BlockPartMessage", &BlockPartMessage{ Height: 100, Round: 1, Part: &parts, - }, &tmcons.Message{ - Sum: &tmcons.Message_BlockPart{ - BlockPart: &tmcons.BlockPart{ - Height: 100, - Round: 1, - Part: *pbParts, - }, - }, - }, false}, + }, (&tmcons.BlockPart{ + Height: 100, + Round: 1, + Part: *pbParts, + }).Wrap().(*tmcons.Message), + + false}, {"successful ProposalPOLMessage", &ProposalPOLMessage{ Height: 1, ProposalPOLRound: 1, ProposalPOL: bits, - }, &tmcons.Message{ - Sum: &tmcons.Message_ProposalPol{ - ProposalPol: &tmcons.ProposalPOL{ - Height: 1, - ProposalPolRound: 1, - ProposalPol: *pbBits, - }, - }}, false}, + }, (&tmcons.ProposalPOL{ + Height: 1, + ProposalPolRound: 1, + ProposalPol: *pbBits, + }).Wrap().(*tmcons.Message), + false}, {"successful ProposalMessage", &ProposalMessage{ Proposal: &proposal, - }, &tmcons.Message{ - Sum: &tmcons.Message_Proposal{ - Proposal: &tmcons.Proposal{ - Proposal: *pbProposal, - }, - }, - }, false}, + }, (&tmcons.Proposal{ + Proposal: *pbProposal, + }).Wrap().(*tmcons.Message), + + false}, {"successful VoteMessage", &VoteMessage{ Vote: vote, - }, &tmcons.Message{ - Sum: &tmcons.Message_Vote{ - Vote: &tmcons.Vote{ - Vote: pbVote, - }, - }, - }, false}, + }, (&tmcons.Vote{ + Vote: pbVote, + }).Wrap().(*tmcons.Message), + + false}, {"successful VoteSetMaj23", &VoteSetMaj23Message{ Height: 1, Round: 1, Type: 1, BlockID: bi, - }, &tmcons.Message{ - Sum: &tmcons.Message_VoteSetMaj23{ - VoteSetMaj23: &tmcons.VoteSetMaj23{ - Height: 1, - Round: 1, - Type: 1, - BlockID: pbBi, - }, - }, - }, false}, + }, (&tmcons.VoteSetMaj23{ + Height: 1, + Round: 1, + Type: 1, + BlockID: pbBi, + }).Wrap().(*tmcons.Message), + + false}, {"successful VoteSetBits", &VoteSetBitsMessage{ Height: 1, Round: 1, Type: 1, BlockID: bi, Votes: bits, - }, &tmcons.Message{ - Sum: &tmcons.Message_VoteSetBits{ - VoteSetBits: &tmcons.VoteSetBits{ - Height: 1, - Round: 1, - Type: 1, - BlockID: pbBi, - Votes: *pbBits, - }, - }, - }, false}, + }, (&tmcons.VoteSetBits{ + Height: 1, + Round: 1, + Type: 1, + BlockID: pbBi, + Votes: *pbBits, + }).Wrap().(*tmcons.Message), + + false}, {"failure", nil, &tmcons.Message{}, true}, } for _, tt := range testsCases { @@ -314,7 +298,7 @@ func TestWALMsgProto(t *testing.T) { } } -// nolint:lll //ignore line length for tests +//nolint:lll //ignore line length for tests func TestConsMsgsVectors(t *testing.T) { date := time.Date(2018, 8, 30, 12, 0, 0, 0, time.UTC) psh := types.PartSetHeader{ diff --git a/consensus/reactor.go b/consensus/reactor.go index d6b22786b0..e052f197af 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -8,7 +8,6 @@ import ( "time" "github.com/gogo/protobuf/proto" - cstypes "github.com/tendermint/tendermint/consensus/types" "github.com/tendermint/tendermint/libs/bits" tmevents "github.com/tendermint/tendermint/libs/events" @@ -148,6 +147,7 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor { Priority: 6, SendQueueCapacity: 100, RecvMessageCapacity: maxMsgSize, + MessageType: &tmcons.Message{}, }, { ID: DataChannel, // maybe split between gossiping current block and catchup stuff @@ -156,6 +156,7 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor { SendQueueCapacity: 100, RecvBufferCapacity: 50 * 4096, RecvMessageCapacity: maxMsgSize, + MessageType: &tmcons.Message{}, }, { ID: VoteChannel, @@ -163,6 +164,7 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor { SendQueueCapacity: 100, RecvBufferCapacity: 100 * 100, RecvMessageCapacity: maxMsgSize, + MessageType: &tmcons.Message{}, }, { ID: VoteSetBitsChannel, @@ -170,6 +172,7 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor { SendQueueCapacity: 2, RecvBufferCapacity: 1024, RecvMessageCapacity: maxMsgSize, + MessageType: &tmcons.Message{}, }, } } @@ -223,34 +226,37 @@ func (conR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { // Peer state updates can happen in parallel, but processing of // proposals, block parts, and votes are ordered by the receiveRoutine // NOTE: blocks on consensus state for proposals, block parts, and votes -func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { +func (conR *Reactor) ReceiveEnvelope(e p2p.Envelope) { if !conR.IsRunning() { - conR.Logger.Debug("Receive", "src", src, "chId", chID, "bytes", msgBytes) + conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID) return } - - msg, err := decodeMsg(msgBytes) + m := e.Message + if wm, ok := m.(p2p.Wrapper); ok { + m = wm.Wrap() + } + msg, err := MsgFromProto(m.(*tmcons.Message)) if err != nil { - conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) - conR.Switch.StopPeerForError(src, err) + conR.Logger.Error("Error decoding message", "src", e.Src, "chId", e.ChannelID, "err", err) + conR.Switch.StopPeerForError(e.Src, err) return } if err = msg.ValidateBasic(); err != nil { - conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err) - conR.Switch.StopPeerForError(src, err) + conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err) + conR.Switch.StopPeerForError(e.Src, err) return } - conR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg) + conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", msg) // Get peer states - ps, ok := src.Get(types.PeerStateKey).(*PeerState) + ps, ok := e.Src.Get(types.PeerStateKey).(*PeerState) if !ok { - panic(fmt.Sprintf("Peer %v has no state", src)) + panic(fmt.Sprintf("Peer %v has no state", e.Src)) } - switch chID { + switch e.ChannelID { case StateChannel: switch msg := msg.(type) { case *NewRoundStepMessage: @@ -258,8 +264,8 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { initialHeight := conR.conS.state.InitialHeight conR.conS.mtx.Unlock() if err = msg.ValidateHeight(initialHeight); err != nil { - conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err) - conR.Switch.StopPeerForError(src, err) + conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", msg, "err", err) + conR.Switch.StopPeerForError(e.Src, err) return } ps.ApplyNewRoundStepMessage(msg) @@ -278,7 +284,7 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { // Peer claims to have a maj23 for some BlockID at H,R,S, err := votes.SetPeerMaj23(msg.Round, msg.Type, ps.peer.ID(), msg.BlockID) if err != nil { - conR.Switch.StopPeerForError(src, err) + conR.Switch.StopPeerForError(e.Src, err) return } // Respond with a VoteSetBitsMessage showing which votes we have. @@ -292,13 +298,19 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { default: panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?") } - src.TrySend(VoteSetBitsChannel, MustEncode(&VoteSetBitsMessage{ + eMsg := &tmcons.VoteSetBits{ Height: msg.Height, Round: msg.Round, Type: msg.Type, - BlockID: msg.BlockID, - Votes: ourVotes, - })) + BlockID: msg.BlockID.ToProto(), + } + if votes := ourVotes.ToProto(); votes != nil { + eMsg.Votes = *votes + } + p2p.TrySendEnvelopeShim(e.Src, p2p.Envelope{ //nolint: staticcheck + ChannelID: VoteSetBitsChannel, + Message: eMsg, + }, conR.Logger) default: conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } @@ -311,13 +323,13 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { switch msg := msg.(type) { case *ProposalMessage: ps.SetHasProposal(msg.Proposal) - conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()} + conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()} case *ProposalPOLMessage: ps.ApplyProposalPOLMessage(msg) case *BlockPartMessage: ps.SetHasProposalBlockPart(msg.Height, msg.Round, int(msg.Part.Index)) - conR.Metrics.BlockParts.With("peer_id", string(src.ID())).Add(1) - conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()} + conR.Metrics.BlockParts.With("peer_id", string(e.Src.ID())).Add(1) + conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()} default: conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } @@ -337,7 +349,7 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { ps.EnsureVoteBitArrays(height-1, lastCommitSize) ps.SetHasVote(msg.Vote) - cs.peerMsgQueue <- msgInfo{msg, src.ID()} + cs.peerMsgQueue <- msgInfo{msg, e.Src.ID()} default: // don't punish (leave room for soft upgrades) @@ -376,8 +388,25 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { } default: - conR.Logger.Error(fmt.Sprintf("Unknown chId %X", chID)) + conR.Logger.Error(fmt.Sprintf("Unknown chId %X", e.ChannelID)) + } +} + +func (conR *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) { + msg := &tmcons.Message{} + err := proto.Unmarshal(msgBytes, msg) + if err != nil { + panic(err) } + uw, err := msg.Unwrap() + if err != nil { + panic(err) + } + conR.ReceiveEnvelope(p2p.Envelope{ + ChannelID: chID, + Src: peer, + Message: uw, + }) } // SetEventBus sets event bus. @@ -430,29 +459,39 @@ func (conR *Reactor) unsubscribeFromBroadcastEvents() { func (conR *Reactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) { nrsMsg := makeRoundStepMessage(rs) - conR.Switch.Broadcast(StateChannel, MustEncode(nrsMsg)) + conR.Switch.BroadcastEnvelope(p2p.Envelope{ + ChannelID: StateChannel, + Message: nrsMsg, + }) } func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) { - csMsg := &NewValidBlockMessage{ + psh := rs.ProposalBlockParts.Header() + csMsg := &tmcons.NewValidBlock{ Height: rs.Height, Round: rs.Round, - BlockPartSetHeader: rs.ProposalBlockParts.Header(), - BlockParts: rs.ProposalBlockParts.BitArray(), + BlockPartSetHeader: psh.ToProto(), + BlockParts: rs.ProposalBlockParts.BitArray().ToProto(), IsCommit: rs.Step == cstypes.RoundStepCommit, } - conR.Switch.Broadcast(StateChannel, MustEncode(csMsg)) + conR.Switch.BroadcastEnvelope(p2p.Envelope{ + ChannelID: StateChannel, + Message: csMsg, + }) } // Broadcasts HasVoteMessage to peers that care. func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) { - msg := &HasVoteMessage{ + msg := &tmcons.HasVote{ Height: vote.Height, Round: vote.Round, Type: vote.Type, Index: vote.ValidatorIndex, } - conR.Switch.Broadcast(StateChannel, MustEncode(msg)) + conR.Switch.BroadcastEnvelope(p2p.Envelope{ + ChannelID: StateChannel, + Message: msg, + }) /* // TODO: Make this broadcast more selective. for _, peer := range conR.Switch.Peers().List() { @@ -463,7 +502,11 @@ func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) { prs := ps.GetRoundState() if prs.Height == vote.Height { // TODO: Also filter on round? - peer.TrySend(StateChannel, struct{ ConsensusMessage }{msg}) + e := p2p.Envelope{ + ChannelID: StateChannel, struct{ ConsensusMessage }{msg}, + Message: p, + } + p2p.TrySendEnvelopeShim(peer, e) //nolint: staticcheck } else { // Height doesn't match // TODO: check a field, maybe CatchupCommitRound? @@ -473,11 +516,11 @@ func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) { */ } -func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage) { - nrsMsg = &NewRoundStepMessage{ +func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *tmcons.NewRoundStep) { + nrsMsg = &tmcons.NewRoundStep{ Height: rs.Height, Round: rs.Round, - Step: rs.Step, + Step: uint32(rs.Step), SecondsSinceStartTime: int64(time.Since(rs.StartTime).Seconds()), LastCommitRound: rs.LastCommit.GetRound(), } @@ -487,7 +530,10 @@ func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage) func (conR *Reactor) sendNewRoundStepMessage(peer p2p.Peer) { rs := conR.getRoundState() nrsMsg := makeRoundStepMessage(rs) - peer.Send(StateChannel, MustEncode(nrsMsg)) + p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: StateChannel, + Message: nrsMsg, + }, conR.Logger) } func (conR *Reactor) updateRoundStateRoutine() { @@ -526,13 +572,19 @@ OUTER_LOOP: if rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartSetHeader) { if index, ok := rs.ProposalBlockParts.BitArray().Sub(prs.ProposalBlockParts.Copy()).PickRandom(); ok { part := rs.ProposalBlockParts.GetPart(index) - msg := &BlockPartMessage{ - Height: rs.Height, // This tells peer that this part applies to us. - Round: rs.Round, // This tells peer that this part applies to us. - Part: part, + parts, err := part.ToProto() + if err != nil { + panic(err) } logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round) - if peer.Send(DataChannel, MustEncode(msg)) { + if p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: DataChannel, + Message: &tmcons.BlockPart{ + Height: rs.Height, // This tells peer that this part applies to us. + Round: rs.Round, // This tells peer that this part applies to us. + Part: *parts, + }, + }, logger) { ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) } continue OUTER_LOOP @@ -578,9 +630,11 @@ OUTER_LOOP: if rs.Proposal != nil && !prs.Proposal { // Proposal: share the proposal metadata with peer. { - msg := &ProposalMessage{Proposal: rs.Proposal} logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round) - if peer.Send(DataChannel, MustEncode(msg)) { + if p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: DataChannel, + Message: &tmcons.Proposal{Proposal: *rs.Proposal.ToProto()}, + }, logger) { // NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected! ps.SetHasProposal(rs.Proposal) } @@ -590,13 +644,15 @@ OUTER_LOOP: // rs.Proposal was validated, so rs.Proposal.POLRound <= rs.Round, // so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound). if 0 <= rs.Proposal.POLRound { - msg := &ProposalPOLMessage{ - Height: rs.Height, - ProposalPOLRound: rs.Proposal.POLRound, - ProposalPOL: rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray(), - } logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round) - peer.Send(DataChannel, MustEncode(msg)) + p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: DataChannel, + Message: &tmcons.ProposalPOL{ + Height: rs.Height, + ProposalPolRound: rs.Proposal.POLRound, + ProposalPol: *rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray().ToProto(), + }, + }, logger) } continue OUTER_LOOP } @@ -633,13 +689,20 @@ func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundSt return } // Send the part - msg := &BlockPartMessage{ - Height: prs.Height, // Not our height, so it doesn't matter. - Round: prs.Round, // Not our height, so it doesn't matter. - Part: part, - } logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index) - if peer.Send(DataChannel, MustEncode(msg)) { + pp, err := part.ToProto() + if err != nil { + logger.Error("Could not convert part to proto", "index", index, "error", err) + return + } + if p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: DataChannel, + Message: &tmcons.BlockPart{ + Height: prs.Height, // Not our height, so it doesn't matter. + Round: prs.Round, // Not our height, so it doesn't matter. + Part: *pp, + }, + }, logger) { ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) } else { logger.Debug("Sending block part for catchup failed") @@ -798,12 +861,16 @@ OUTER_LOOP: prs := ps.GetRoundState() if rs.Height == prs.Height { if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok { - peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{ - Height: prs.Height, - Round: prs.Round, - Type: tmproto.PrevoteType, - BlockID: maj23, - })) + + p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: StateChannel, + Message: &tmcons.VoteSetMaj23{ + Height: prs.Height, + Round: prs.Round, + Type: tmproto.PrevoteType, + BlockID: maj23.ToProto(), + }, + }, ps.logger) time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } @@ -815,12 +882,15 @@ OUTER_LOOP: prs := ps.GetRoundState() if rs.Height == prs.Height { if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok { - peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{ - Height: prs.Height, - Round: prs.Round, - Type: tmproto.PrecommitType, - BlockID: maj23, - })) + p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: StateChannel, + Message: &tmcons.VoteSetMaj23{ + Height: prs.Height, + Round: prs.Round, + Type: tmproto.PrecommitType, + BlockID: maj23.ToProto(), + }, + }, ps.logger) time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } @@ -832,12 +902,16 @@ OUTER_LOOP: prs := ps.GetRoundState() if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 { if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok { - peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{ - Height: prs.Height, - Round: prs.ProposalPOLRound, - Type: tmproto.PrevoteType, - BlockID: maj23, - })) + + p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: StateChannel, + Message: &tmcons.VoteSetMaj23{ + Height: prs.Height, + Round: prs.ProposalPOLRound, + Type: tmproto.PrevoteType, + BlockID: maj23.ToProto(), + }, + }, ps.logger) time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } @@ -852,12 +926,15 @@ OUTER_LOOP: if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= conR.conS.blockStore.Height() && prs.Height >= conR.conS.blockStore.Base() { if commit := conR.conS.LoadCommit(prs.Height); commit != nil { - peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{ - Height: prs.Height, - Round: commit.Round, - Type: tmproto.PrecommitType, - BlockID: commit.BlockID, - })) + p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: StateChannel, + Message: &tmcons.VoteSetMaj23{ + Height: prs.Height, + Round: commit.Round, + Type: tmproto.PrecommitType, + BlockID: commit.BlockID.ToProto(), + }, + }, ps.logger) time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } @@ -1071,9 +1148,13 @@ func (ps *PeerState) SetHasProposalBlockPart(height int64, round int32, index in // Returns true if vote was sent. func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool { if vote, ok := ps.PickVoteToSend(votes); ok { - msg := &VoteMessage{vote} ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote) - if ps.peer.Send(VoteChannel, MustEncode(msg)) { + if p2p.SendEnvelopeShim(ps.peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: VoteChannel, + Message: &tmcons.Vote{ + Vote: vote.ToProto(), + }, + }, ps.logger) { ps.SetHasVote(vote) return true } @@ -1439,15 +1520,6 @@ func init() { tmjson.RegisterType(&VoteSetBitsMessage{}, "tendermint/VoteSetBits") } -func decodeMsg(bz []byte) (msg Message, err error) { - pb := &tmcons.Message{} - if err = proto.Unmarshal(bz, pb); err != nil { - return msg, err - } - - return MsgFromProto(pb) -} - //------------------------------------- // NewRoundStepMessage is sent for every step taken in the ConsensusState. diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index 5d68cd9b70..851070ba67 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -33,6 +34,7 @@ import ( mempoolv1 "github.com/tendermint/tendermint/mempool/v1" "github.com/tendermint/tendermint/p2p" p2pmock "github.com/tendermint/tendermint/p2p/mock" + tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" statemocks "github.com/tendermint/tendermint/state/mocks" @@ -138,7 +140,9 @@ func TestReactorWithEvidence(t *testing.T) { logger := consensusLogger() for i := 0; i < nValidators; i++ { stateDB := dbm.NewMemDB() // each state needs its own db - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) defer os.RemoveAll(thisConfig.RootDir) @@ -252,7 +256,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { }, css) } -func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) { +func TestLegacyReactorReceiveBasicIfAddPeerHasntBeenCalledYet(t *testing.T) { N := 1 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) defer cleanup() @@ -262,13 +266,45 @@ func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) { var ( reactor = reactors[0] peer = p2pmock.NewPeer(nil) - msg = MustEncode(&HasVoteMessage{Height: 1, - Round: 1, Index: 1, Type: tmproto.PrevoteType}) ) reactor.InitPeer(peer) // simulate switch calling Receive before AddPeer + assert.NotPanics(t, func() { + reactor.ReceiveEnvelope(p2p.Envelope{ + ChannelID: StateChannel, + Src: peer, + Message: &tmcons.HasVote{Height: 1, + Round: 1, Index: 1, Type: tmproto.PrevoteType}, + }) + reactor.AddPeer(peer) + }) +} + +func TestLegacyReactorReceiveBasic(t *testing.T) { + N := 1 + css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) + defer cleanup() + reactors, _, eventBuses := startConsensusNet(t, css, N) + defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) + + var ( + reactor = reactors[0] + peer = p2pmock.NewPeer(nil) + ) + + reactor.InitPeer(peer) + v := &tmcons.HasVote{ + Height: 1, + Round: 1, + Index: 1, + Type: tmproto.PrevoteType, + } + w := v.Wrap() + msg, err := proto.Marshal(w) + assert.NoError(t, err) + assert.NotPanics(t, func() { reactor.Receive(StateChannel, peer, msg) reactor.AddPeer(peer) @@ -285,15 +321,18 @@ func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) { var ( reactor = reactors[0] peer = p2pmock.NewPeer(nil) - msg = MustEncode(&HasVoteMessage{Height: 1, - Round: 1, Index: 1, Type: tmproto.PrevoteType}) ) // we should call InitPeer here // simulate switch calling Receive before AddPeer assert.Panics(t, func() { - reactor.Receive(StateChannel, peer, msg) + reactor.ReceiveEnvelope(p2p.Envelope{ + ChannelID: StateChannel, + Src: peer, + Message: &tmcons.HasVote{Height: 1, + Round: 1, Index: 1, Type: tmproto.PrevoteType}, + }) }) } @@ -689,7 +728,7 @@ func capture() { // Ensure basic validation of structs is functioning func TestNewRoundStepMessageValidateBasic(t *testing.T) { - testCases := []struct { // nolint: maligned + testCases := []struct { expectErr bool messageRound int32 messageLastCommitRound int32 @@ -728,7 +767,7 @@ func TestNewRoundStepMessageValidateBasic(t *testing.T) { func TestNewRoundStepMessageValidateHeight(t *testing.T) { initialHeight := int64(10) - testCases := []struct { // nolint: maligned + testCases := []struct { //nolint: maligned expectErr bool messageLastCommitRound int32 messageHeight int64 @@ -878,7 +917,7 @@ func TestHasVoteMessageValidateBasic(t *testing.T) { invalidSignedMsgType tmproto.SignedMsgType = 0x03 ) - testCases := []struct { // nolint: maligned + testCases := []struct { //nolint: maligned expectErr bool messageRound int32 messageIndex int32 @@ -923,7 +962,7 @@ func TestVoteSetMaj23MessageValidateBasic(t *testing.T) { }, } - testCases := []struct { // nolint: maligned + testCases := []struct { //nolint: maligned expectErr bool messageRound int32 messageHeight int64 diff --git a/consensus/replay.go b/consensus/replay.go index 9fd59a40eb..bed2a2c4da 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -418,7 +418,7 @@ func (h *Handshaker) ReplayBlocks( case appBlockHeight == storeBlockHeight: // We ran Commit, but didn't save the state, so replayBlock with mock app. - abciResponses, err := h.stateStore.LoadABCIResponses(storeBlockHeight) + abciResponses, err := h.stateStore.LoadLastABCIResponse(storeBlockHeight) if err != nil { return nil, err } diff --git a/consensus/replay_file.go b/consensus/replay_file.go index 4bf7466abf..0145dfe928 100644 --- a/consensus/replay_file.go +++ b/consensus/replay_file.go @@ -297,7 +297,9 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo if err != nil { tmos.Exit(err.Error()) } - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) gdoc, err := sm.MakeGenesisDocFromFile(config.GenesisFile()) if err != nil { tmos.Exit(err.Error()) diff --git a/consensus/replay_test.go b/consensus/replay_test.go index c830cc8303..b037c5cfa4 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "path/filepath" "runtime" @@ -66,7 +65,8 @@ func TestMain(m *testing.M) { // wal writer when we need to, instead of with every message. func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config, - lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store) { + lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store, +) { logger := log.TestingLogger() state, _ := stateStore.LoadFromDBOrGenesisFile(consensusReplayConfig.GenesisFile()) privValidator := loadPrivValidator(consensusReplayConfig) @@ -79,7 +79,7 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Confi ) cs.SetLogger(logger) - bytes, _ := ioutil.ReadFile(cs.config.WalFile()) + bytes, _ := os.ReadFile(cs.config.WalFile()) t.Logf("====== WAL: \n\r%X\n", bytes) err := cs.Start() @@ -127,14 +127,18 @@ func TestWALCrash(t *testing.T) { initFn func(dbm.DB, *State, context.Context) heightToStop int64 }{ - {"empty block", + { + "empty block", func(stateDB dbm.DB, cs *State, ctx context.Context) {}, - 1}, - {"many non-empty blocks", + 1, + }, + { + "many non-empty blocks", func(stateDB dbm.DB, cs *State, ctx context.Context) { go sendTxs(ctx, cs) }, - 3}, + 3, + }, } for i, tc := range testCases { @@ -147,7 +151,8 @@ func TestWALCrash(t *testing.T) { } func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *cfg.Config, - initFn func(dbm.DB, *State, context.Context), heightToStop int64) { + initFn func(dbm.DB, *State, context.Context), heightToStop int64, +) { walPanicked := make(chan error) crashingWal := &crashingWAL{panicCh: walPanicked, heightToStop: heightToStop} @@ -160,7 +165,9 @@ LOOP: logger := log.NewNopLogger() blockDB := dbm.NewMemDB() stateDB := blockDB - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) require.NoError(t, err) privValidator := loadPrivValidator(consensusReplayConfig) @@ -283,7 +290,8 @@ func (w *crashingWAL) FlushAndSync() error { return w.next.FlushAndSync() } func (w *crashingWAL) SearchForEndHeight( height int64, - options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) { + options *WALSearchOptions, +) (rd io.ReadCloser, found bool, err error) { return w.next.SearchForEndHeight(height, options) } @@ -587,7 +595,7 @@ func TestHandshakeReplayNone(t *testing.T) { func TestMockProxyApp(t *testing.T) { sim.CleanupFunc() // clean the test env created in TestSimulateValidatorsChange logger := log.TestingLogger() - var validTxs, invalidTxs = 0, 0 + validTxs, invalidTxs := 0, 0 txIndex := 0 assert.NotPanics(t, func() { @@ -635,7 +643,7 @@ func TestMockProxyApp(t *testing.T) { } func tempWALWithData(data []byte) string { - walFile, err := ioutil.TempFile("", "wal") + walFile, err := os.CreateTemp("", "wal") if err != nil { panic(fmt.Sprintf("failed to create temp WAL file: %v", err)) } @@ -694,7 +702,9 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin stateDB, genesisState, store = stateAndStore(config, pubKey, kvstore.ProtocolVersion) } - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) store.chain = chain store.commits = commits @@ -713,7 +723,9 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin // use a throwaway tendermint state proxyApp := proxy.NewAppConns(clientCreator2) stateDB1 := dbm.NewMemDB() - stateStore := sm.NewStore(stateDB1) + stateStore := sm.NewStore(stateDB1, sm.StoreOptions{ + DiscardABCIResponses: false, + }) err := stateStore.Save(genesisState) require.NoError(t, err) buildAppStateFromChain(proxyApp, stateStore, genesisState, chain, nBlocks, mode) @@ -789,7 +801,8 @@ func applyBlock(stateStore sm.Store, st sm.State, blk *types.Block, proxyApp pro } func buildAppStateFromChain(proxyApp proxy.AppConns, stateStore sm.Store, - state sm.State, chain []*types.Block, nBlocks int, mode uint) { + state sm.State, chain []*types.Block, nBlocks int, mode uint, +) { // start a new app without handshake, play nBlocks blocks if err := proxyApp.Start(); err != nil { panic(err) @@ -826,7 +839,6 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateStore sm.Store, default: panic(fmt.Sprintf("unknown mode %v", mode)) } - } func buildTMStateFromChain( @@ -835,7 +847,8 @@ func buildTMStateFromChain( state sm.State, chain []*types.Block, nBlocks int, - mode uint) sm.State { + mode uint, +) sm.State { // run the whole chain against this client to build up the tendermint state clientCreator := proxy.NewLocalClientCreator( kvstore.NewPersistentKVStoreApplication( @@ -892,7 +905,9 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { pubKey, err := privVal.GetPubKey() require.NoError(t, err) stateDB, state, store := stateAndStore(config, pubKey, appVersion) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) state.LastValidators = state.Validators.Copy() // mode = 0 for committing all the blocks @@ -976,8 +991,8 @@ func makeBlocks(n int, state *sm.State, privVal types.PrivValidator) []*types.Bl } func makeBlock(state sm.State, lastBlock *types.Block, lastBlockMeta *types.BlockMeta, - privVal types.PrivValidator, height int64) (*types.Block, *types.PartSet) { - + privVal types.PrivValidator, height int64, +) (*types.Block, *types.PartSet) { lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, nil) if height > 1 { vote, _ := types.MakeVote( @@ -1065,8 +1080,8 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { case EndHeightMessage: // if its not the first one, we have a full block if thisBlockParts != nil { - var pbb = new(tmproto.Block) - bz, err := ioutil.ReadAll(thisBlockParts.GetReader()) + pbb := new(tmproto.Block) + bz, err := io.ReadAll(thisBlockParts.GetReader()) if err != nil { panic(err) } @@ -1105,11 +1120,11 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { } } // grab the last block too - bz, err := ioutil.ReadAll(thisBlockParts.GetReader()) + bz, err := io.ReadAll(thisBlockParts.GetReader()) if err != nil { panic(err) } - var pbb = new(tmproto.Block) + pbb := new(tmproto.Block) err = proto.Unmarshal(bz, pbb) if err != nil { panic(err) @@ -1153,9 +1168,12 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} { func stateAndStore( config *cfg.Config, pubKey crypto.PubKey, - appVersion uint64) (dbm.DB, sm.State, *mockBlockStore) { + appVersion uint64, +) (dbm.DB, sm.State, *mockBlockStore) { stateDB := dbm.NewMemDB() - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile()) state.Version.Consensus.App = appVersion store := newMockBlockStore(config, state.ConsensusParams) @@ -1189,6 +1207,7 @@ func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { return bs.chain[int64(len(bs.chain))-1] } + func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { block := bs.chain[height-1] return &types.BlockMeta{ @@ -1199,9 +1218,11 @@ func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { } + func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit { return bs.commits[height-1] } + func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit { return bs.commits[height-1] } @@ -1232,7 +1253,9 @@ func TestHandshakeUpdatesValidators(t *testing.T) { pubKey, err := privVal.GetPubKey() require.NoError(t, err) stateDB, state, store := stateAndStore(config, pubKey, 0x0) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) oldValAddr := state.Validators.Validators[0].Address diff --git a/consensus/state.go b/consensus/state.go index 7f0e9c3371..6b39cab210 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -4,7 +4,7 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "io" "os" "runtime/debug" "sort" @@ -468,7 +468,6 @@ func (cs *State) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, err error // SetProposal inputs a proposal. func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.ID) error { - if peerID == "" { cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, ""} } else { @@ -481,7 +480,6 @@ func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.ID) error { // AddProposalBlockPart inputs a part of the proposal block. func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Part, peerID p2p.ID) error { - if peerID == "" { cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""} } else { @@ -499,7 +497,6 @@ func (cs *State) SetProposalAndBlock( parts *types.PartSet, peerID p2p.ID, ) error { - if err := cs.SetProposal(proposal, peerID); err != nil { return err } @@ -821,7 +818,7 @@ func (cs *State) handleMsg(mi msgInfo) { // We unlock here to yield to any routines that need to read the the RoundState. // Previously, this code held the lock from the point at which the final block - // part was recieved until the block executed against the application. + // part was received until the block executed against the application. // This prevented the reactor from being able to retrieve the most updated // version of the RoundState. The reactor needs the updated RoundState to // gossip the now completed block. @@ -937,7 +934,6 @@ func (cs *State) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) { default: panic(fmt.Sprintf("invalid timeout step: %v", ti.Step)) } - } func (cs *State) handleTxsAvailable() { @@ -1181,7 +1177,6 @@ func (cs *State) isProposalComplete() bool { } // if this is false the proposer is lying or we haven't received the POL yet return cs.Votes.Prevotes(cs.Proposal.POLRound).HasTwoThirdsMajority() - } // Create the next block to propose and return it. Returns nil block upon error. @@ -1898,12 +1893,12 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (add ) } if added && cs.ProposalBlockParts.IsComplete() { - bz, err := ioutil.ReadAll(cs.ProposalBlockParts.GetReader()) + bz, err := io.ReadAll(cs.ProposalBlockParts.GetReader()) if err != nil { return added, err } - var pbb = new(tmproto.Block) + pbb := new(tmproto.Block) err = proto.Unmarshal(bz, pbb) if err != nil { return added, err @@ -1968,7 +1963,7 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, error) { // If the vote height is off, we'll just ignore it, // But if it's a conflicting sig, add it to the cs.evpool. // If it's otherwise invalid, punish peer. - // nolint: gocritic + //nolint: gocritic if voteErr, ok := err.(*types.ErrVoteConflictingVotes); ok { if cs.privValidatorPubKey == nil { return false, errPubKeyIsNotSet @@ -2231,10 +2226,11 @@ func (cs *State) voteTime() time.Time { now := tmtime.Now() minVoteTime := now // TODO: We should remove next line in case we don't vote for v in case cs.ProposalBlock == nil, - // even if cs.LockedBlock != nil. See https://docs.tendermint.com/master/spec/. + // even if cs.LockedBlock != nil. See https://github.com/tendermint/tendermint/tree/v0.34.x/spec/. timeIota := time.Duration(cs.state.ConsensusParams.Block.TimeIotaMs) * time.Millisecond if cs.LockedBlock != nil { - // See the BFT time spec https://docs.tendermint.com/master/spec/consensus/bft-time.html + // See the BFT time spec + // https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/bft-time.md minVoteTime = cs.LockedBlock.Time.Add(timeIota) } else if cs.ProposalBlock != nil { minVoteTime = cs.ProposalBlock.Time.Add(timeIota) diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go index 1c449717bd..53712bf3dc 100644 --- a/consensus/wal_generator.go +++ b/consensus/wal_generator.go @@ -47,7 +47,9 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { } blockStoreDB := db.NewMemDB() stateDB := blockStoreDB - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, err := sm.MakeGenesisState(genDoc) if err != nil { return fmt.Errorf("failed to make genesis state: %w", err) diff --git a/consensus/wal_test.go b/consensus/wal_test.go index 4ee8136091..5a4f4098f7 100644 --- a/consensus/wal_test.go +++ b/consensus/wal_test.go @@ -3,7 +3,6 @@ package consensus import ( "bytes" "crypto/rand" - "io/ioutil" "os" "path/filepath" @@ -27,7 +26,7 @@ const ( ) func TestWALTruncate(t *testing.T) { - walDir, err := ioutil.TempDir("", "wal") + walDir, err := os.MkdirTemp("", "wal") require.NoError(t, err) defer os.RemoveAll(walDir) @@ -109,7 +108,7 @@ func TestWALEncoderDecoder(t *testing.T) { } func TestWALWrite(t *testing.T) { - walDir, err := ioutil.TempDir("", "wal") + walDir, err := os.MkdirTemp("", "wal") require.NoError(t, err) defer os.RemoveAll(walDir) walFile := filepath.Join(walDir, "wal") @@ -177,7 +176,7 @@ func TestWALSearchForEndHeight(t *testing.T) { } func TestWALPeriodicSync(t *testing.T) { - walDir, err := ioutil.TempDir("", "wal") + walDir, err := os.MkdirTemp("", "wal") require.NoError(t, err) defer os.RemoveAll(walDir) @@ -269,18 +268,23 @@ func BenchmarkWalDecode512B(b *testing.B) { func BenchmarkWalDecode10KB(b *testing.B) { benchmarkWalDecode(b, 10*1024) } + func BenchmarkWalDecode100KB(b *testing.B) { benchmarkWalDecode(b, 100*1024) } + func BenchmarkWalDecode1MB(b *testing.B) { benchmarkWalDecode(b, 1024*1024) } + func BenchmarkWalDecode10MB(b *testing.B) { benchmarkWalDecode(b, 10*1024*1024) } + func BenchmarkWalDecode100MB(b *testing.B) { benchmarkWalDecode(b, 100*1024*1024) } + func BenchmarkWalDecode1GB(b *testing.B) { benchmarkWalDecode(b, 1024*1024*1024) } diff --git a/crypto/README.md b/crypto/README.md index 20346d7155..eaedbf7f2c 100644 --- a/crypto/README.md +++ b/crypto/README.md @@ -12,7 +12,7 @@ For any specific algorithm, use its specific module e.g. ## Binary encoding -For Binary encoding, please refer to the [Tendermint encoding specification](https://docs.tendermint.com/master/spec/blockchain/encoding.html). +For Binary encoding, please refer to the [Tendermint encoding specification](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/core/encoding.md). ## JSON Encoding diff --git a/crypto/armor/armor.go b/crypto/armor/armor.go index bfc2193e93..99e2c3b3a2 100644 --- a/crypto/armor/armor.go +++ b/crypto/armor/armor.go @@ -3,9 +3,9 @@ package armor import ( "bytes" "fmt" - "io/ioutil" + "io" - "golang.org/x/crypto/openpgp/armor" // nolint: staticcheck + "golang.org/x/crypto/openpgp/armor" //nolint: staticcheck ) func EncodeArmor(blockType string, headers map[string]string, data []byte) string { @@ -31,7 +31,7 @@ func DecodeArmor(armorStr string) (blockType string, headers map[string]string, if err != nil { return "", nil, nil, err } - data, err = ioutil.ReadAll(block.Body) + data, err = io.ReadAll(block.Body) if err != nil { return "", nil, nil, err } diff --git a/crypto/merkle/doc.go b/crypto/merkle/doc.go index 865c302170..fe50b34631 100644 --- a/crypto/merkle/doc.go +++ b/crypto/merkle/doc.go @@ -12,20 +12,19 @@ second pre-image attacks. Hence, use this library with caution. Otherwise you might run into similar issues as, e.g., in early Bitcoin: https://bitcointalk.org/?topic=102395 - * - / \ - / \ - / \ - / \ - * * - / \ / \ - / \ / \ - / \ / \ - * * * h6 - / \ / \ / \ - h0 h1 h2 h3 h4 h5 + * + / \ + / \ + / \ + / \ + * * + / \ / \ + / \ / \ + / \ / \ + * * * h6 + / \ / \ / \ + h0 h1 h2 h3 h4 h5 TODO(ismail): add 2nd pre-image protection or clarify further on how we use this and why this secure. - */ package merkle diff --git a/crypto/merkle/proof_value.go b/crypto/merkle/proof_value.go index ab776216b0..842dc82018 100644 --- a/crypto/merkle/proof_value.go +++ b/crypto/merkle/proof_value.go @@ -85,8 +85,8 @@ func (op ValueOp) Run(args [][]byte) ([][]byte, error) { bz := new(bytes.Buffer) // Wrap to hash the KVPair. - encodeByteSlice(bz, op.key) // nolint: errcheck // does not error - encodeByteSlice(bz, vhash) // nolint: errcheck // does not error + encodeByteSlice(bz, op.key) //nolint: errcheck // does not error + encodeByteSlice(bz, vhash) //nolint: errcheck // does not error kvhash := leafHash(bz.Bytes()) if !bytes.Equal(kvhash, op.Proof.LeafHash) { diff --git a/crypto/merkle/tree.go b/crypto/merkle/tree.go index 466c434824..089c2f82ee 100644 --- a/crypto/merkle/tree.go +++ b/crypto/merkle/tree.go @@ -47,10 +47,10 @@ func HashFromByteSlices(items [][]byte) []byte { // // These preliminary results suggest: // -// 1. The performance of the HashFromByteSlice is pretty good -// 2. Go has low overhead for recursive functions -// 3. The performance of the HashFromByteSlice routine is dominated -// by the actual hashing of data +// 1. The performance of the HashFromByteSlice is pretty good +// 2. Go has low overhead for recursive functions +// 3. The performance of the HashFromByteSlice routine is dominated +// by the actual hashing of data // // Although this work is in no way exhaustive, point #3 suggests that // optimization of this routine would need to take an alternative diff --git a/crypto/secp256k1/secp256k1.go b/crypto/secp256k1/secp256k1.go index 0fbd9ad2db..21073bea12 100644 --- a/crypto/secp256k1/secp256k1.go +++ b/crypto/secp256k1/secp256k1.go @@ -9,13 +9,13 @@ import ( "math/big" secp256k1 "github.com/btcsuite/btcd/btcec" - "golang.org/x/crypto/ripemd160" // nolint: staticcheck // necessary for Bitcoin address format + "golang.org/x/crypto/ripemd160" //nolint: staticcheck // necessary for Bitcoin address format "github.com/tendermint/tendermint/crypto" tmjson "github.com/tendermint/tendermint/libs/json" ) -//------------------------------------- +// ------------------------------------- const ( PrivKeyName = "tendermint/PrivKeySecp256k1" PubKeyName = "tendermint/PubKeySecp256k1" @@ -124,8 +124,8 @@ func GenPrivKeySecp256k1(secret []byte) PrivKey { // used to reject malleable signatures // see: -// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93 -// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39 +// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93 +// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39 var secp256k1halfN = new(big.Int).Rsh(secp256k1.S256().N, 1) // Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg. diff --git a/docs/.vuepress/config.js b/docs/.vuepress/config.js index 351be09644..71338fb6ff 100644 --- a/docs/.vuepress/config.js +++ b/docs/.vuepress/config.js @@ -1,14 +1,6 @@ module.exports = { theme: 'cosmos', title: 'Tendermint Core', - // locales: { - // "/": { - // lang: "en-US" - // }, - // "/ru/": { - // lang: "ru" - // } - // }, base: process.env.VUEPRESS_BASE, themeConfig: { repo: 'tendermint/tendermint', @@ -23,16 +15,12 @@ module.exports = { }, versions: [ { - "label": "v0.33", - "key": "v0.33" - }, - { - "label": "v0.34", + "label": "v0.34 (latest)", "key": "v0.34" }, { - "label": "v0.35", - "key": "v0.35" + "label": "v0.33", + "key": "v0.33" } ], topbar: { @@ -45,10 +33,8 @@ module.exports = { title: 'Resources', children: [ { - // TODO(creachadair): Figure out how to make this per-branch. - // See: https://github.com/tendermint/tendermint/issues/7908 title: 'RPC', - path: 'https://docs.tendermint.com/v0.35/rpc/', + path: (process.env.VUEPRESS_BASE ? process.env.VUEPRESS_BASE : '/')+'rpc/', static: true }, ] @@ -59,9 +45,9 @@ module.exports = { title: 'Help & Support', editLink: true, forum: { - title: 'Tendermint Forum', - text: 'Join the Tendermint forum to learn more', - url: 'https://forum.cosmos.network/c/tendermint', + title: 'Tendermint Discussions', + text: 'Join the Tendermint discussions to learn more', + url: 'https://github.com/tendermint/tendermint/discussions', bg: '#0B7E0B', logo: 'tendermint' }, @@ -72,7 +58,7 @@ module.exports = { }, footer: { question: { - text: 'Chat with Tendermint developers in Discord or reach out on the Tendermint Forum to learn more.' + text: 'Chat with Tendermint developers in Discord or reach out on GitHub to learn more.' }, logo: '/logo-bw.svg', textLink: { @@ -129,8 +115,8 @@ module.exports = { url: 'https://medium.com/@tendermint' }, { - title: 'Forum', - url: 'https://forum.cosmos.network/c/tendermint' + title: 'GitHub Discussions', + url: 'https://github.com/tendermint/tendermint/discussions' } ] }, diff --git a/docs/.vuepress/redirects b/docs/.vuepress/redirects index 15bd6111b5..f17d072aa6 100644 --- a/docs/.vuepress/redirects +++ b/docs/.vuepress/redirects @@ -1 +1,66 @@ -/master/ /v0.35/ +/redirects/master/ /main/ +/redirects/master/spec/core/state.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/core/state.md +/redirects/master/spec/core/encoding.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/core/encoding.md +/redirects/master/spec/core/genesis.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/core/genesis.md +/redirects/master/spec/core/data_structures.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/core/data_structures.md +/redirects/master/spec/core/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/core/readme.md +/redirects/master/spec/p2p/messages/pex.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/messages/pex.md +/redirects/master/spec/p2p/messages/mempool.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/messages/mempool.md +/redirects/master/spec/p2p/messages/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/messages/README.md +/redirects/master/spec/p2p/messages/block-sync.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/messages/block-sync.md +/redirects/master/spec/p2p/messages/state-sync.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/messages/state-sync.md +/redirects/master/spec/p2p/messages/consensus.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/messages/consensus.md +/redirects/master/spec/p2p/messages/evidence.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/messages/evidence.md +/redirects/master/spec/p2p/peer.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/peer.md +/redirects/master/spec/p2p/connection.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/connection.md +/redirects/master/spec/p2p/config.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/config.md +/redirects/master/spec/p2p/node.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/node.md +/redirects/master/spec/p2p/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/readme.md +/redirects/master/spec/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/README.md +/redirects/master/spec/ivy-proofs/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/ivy-proofs/README.md +/redirects/master/spec/consensus/proposer-selection.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/proposer-selection.md +/redirects/master/spec/consensus/creating-proposal.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/creating-proposal.md +/redirects/master/spec/consensus/proposer-based-timestamp/pbts-sysmodel_001_draft.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/proposer-based-timestamp/pbts-sysmodel_001_draft.md +/redirects/master/spec/consensus/proposer-based-timestamp/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/proposer-based-timestamp/README.md +/redirects/master/spec/consensus/proposer-based-timestamp/pbts-algorithm_001_draft.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/proposer-based-timestamp/pbts-algorithm_001_draft.md +/redirects/master/spec/consensus/proposer-based-timestamp/pbts_001_draft.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/proposer-based-timestamp/pbts_001_draft.md +/redirects/master/spec/consensus/light-client/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/light-client/README.md +/redirects/master/spec/consensus/light-client/accountability.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/light-client/accountability.md +/redirects/master/spec/consensus/light-client/detection.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/light-client/detection.md +/redirects/master/spec/consensus/light-client/verification.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/light-client/verification.md +/redirects/master/spec/consensus/consensus-paper/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/consensus-paper/README.md +/redirects/master/spec/consensus/signing.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/signing.md +/redirects/master/spec/consensus/consensus.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/consensus.md +/redirects/master/spec/consensus/evidence.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/evidence.md +/redirects/master/spec/consensus/bft-time.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/bft-time.md +/redirects/master/spec/consensus/wal.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/wal.md +/redirects/master/spec/consensus/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/readme.md +/redirects/master/spec/light-client/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/README.md +/redirects/master/spec/light-client/detection/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/detection/README.md +/redirects/master/spec/light-client/detection/req-ibc-detection.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/detection/req-ibc-detection.md +/redirects/master/spec/light-client/detection/detection_001_reviewed.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/detection/detection_001_reviewed.md +/redirects/master/spec/light-client/detection/draft-functions.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/detection/draft-functions.md +/redirects/master/spec/light-client/detection/discussions.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/detection/discussions.md +/redirects/master/spec/light-client/detection/detection_003_reviewed.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/detection/detection_003_reviewed.md +/redirects/master/spec/light-client/accountability/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/accountability/README.md +/redirects/master/spec/light-client/accountability/results/001indinv-apalache-report.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/accountability/results/001indinv-apalache-report.md +/redirects/master/spec/light-client/accountability/Synopsis.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/accountability/Synopsis.md +/redirects/master/spec/light-client/verification/verification_003_draft.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/verification/verification_003_draft.md +/redirects/master/spec/light-client/verification/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/verification/README.md +/redirects/master/spec/light-client/verification/verification_001_published.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/verification/verification_001_published.md +/redirects/master/spec/light-client/verification/verification_002_draft.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/verification/verification_002_draft.md +/redirects/master/spec/light-client/supervisor/supervisor_002_draft.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/supervisor/supervisor_002_draft.md +/redirects/master/spec/light-client/supervisor/supervisor_001_draft.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/supervisor/supervisor_001_draft.md +/redirects/master/spec/light-client/attacks/isolate-attackers_001_draft.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/attacks/isolate-attackers_001_draft.md +/redirects/master/spec/light-client/attacks/notes-on-evidence-handling.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/attacks/notes-on-evidence-handling.md +/redirects/master/spec/light-client/attacks/isolate-attackers_002_reviewed.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/attacks/isolate-attackers_002_reviewed.md +/redirects/master/spec/abci/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/README.md +/redirects/master/spec/abci/client-server.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/client-server.md +/redirects/master/spec/abci/apps.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/apps.md +/redirects/master/spec/abci/abci.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/abci.md +/redirects/master/spec/rpc/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/rpc/README.md +/redirects/master/spec/blockchain/state.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/state.md +/redirects/master/spec/blockchain/encoding.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/encoding.md +/redirects/master/spec/blockchain/blockchain.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/blockchain.md +/redirects/master/spec/blockchain/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/readme.md +/redirects/master/tutorials/go.html /v0.34/tutorials/go.html diff --git a/docs/DOCS_README.md b/docs/DOCS_README.md index 04883e462a..a195b287f3 100644 --- a/docs/DOCS_README.md +++ b/docs/DOCS_README.md @@ -2,39 +2,38 @@ The documentation for Tendermint Core is hosted at: -- +- -built from the files in this (`/docs`) directory for -[master](https://github.com/tendermint/tendermint/tree/master/docs) respectively. +built from the files in this (`/docs`) directory. ## How It Works -There is a CircleCI job listening for changes in the `/docs` directory, on both -the `master` branch. Any updates to files in this directory -on those branches will automatically trigger a website deployment. Under the hood, -the private website repository has a `make build-docs` target consumed by a CircleCI job in that repo. +There is a [GitHub Action](../.github/workflows/docs-deployment.yml) that is +triggered by changes in the `/docs` directory on `main` as well as the branch of +each major supported version (e.g. `v0.34.x`). Any updates to files in this +directory on those branches will automatically trigger a website deployment. ## README -The [README.md](./README.md) is also the landing page for the documentation -on the website. During the Jenkins build, the current commit is added to the bottom -of the README. +The [README.md](./README.md) is also the landing page for the documentation on +the website. ## Config.js -The [config.js](./.vuepress/config.js) generates the sidebar and Table of Contents -on the website docs. Note the use of relative links and the omission of -file extensions. Additional features are available to improve the look -of the sidebar. +The [config.js](./.vuepress/config.js) generates the sidebar and Table of +Contents on the website docs. Note the use of relative links and the omission of +file extensions. Additional features are available to improve the look of the +sidebar. ## Links -**NOTE:** Strongly consider the existing links - both within this directory -and to the website docs - when moving or deleting files. +**NOTE:** Strongly consider the existing links - both within this directory and +to the website docs - when moving or deleting files. Links to directories _MUST_ end in a `/`. -Relative links should be used nearly everywhere, having discovered and weighed the following: +Relative links should be used nearly everywhere, having discovered and weighed +the following: ### Relative @@ -65,7 +64,8 @@ Make sure you are in the `docs` directory and run the following commands: rm -rf node_modules ``` -This command will remove old version of the visual theme and required packages. This step is optional. +This command will remove old version of the visual theme and required packages. +This step is optional. ```bash npm install @@ -79,17 +79,24 @@ npm run serve -Run `pre` and `post` hooks and start a hot-reloading web-server. See output of this command for the URL (it is often ). +Run `pre` and `post` hooks and start a hot-reloading web-server. See output of +this command for the URL (it is often ). -To build documentation as a static website run `npm run build`. You will find the website in `.vuepress/dist` directory. +To build documentation as a static website run `npm run build`. You will find +the website in `.vuepress/dist` directory. ## Search -We are using [Algolia](https://www.algolia.com) to power full-text search. This uses a public API search-only key in the `config.js` as well as a [tendermint.json](https://github.com/algolia/docsearch-configs/blob/master/configs/tendermint.json) configuration file that we can update with PRs. +We are using [Algolia](https://www.algolia.com) to power full-text search. This +uses a public API search-only key in the `config.js` as well as a +[tendermint.json](https://github.com/algolia/docsearch-configs/blob/master/configs/tendermint.json) +configuration file that we can update with PRs. ## Consistency -Because the build processes are identical (as is the information contained herein), this file should be kept in sync as -much as possible with its [counterpart in the Cosmos SDK repo](https://github.com/cosmos/cosmos-sdk/blob/master/docs/DOCS_README.md). +Because the build processes are identical (as is the information contained +herein), this file should be kept in sync as much as possible with its +[counterpart in the Cosmos SDK +repo](https://github.com/cosmos/cosmos-sdk/blob/master/docs/DOCS_README.md). diff --git a/docs/README.md b/docs/README.md index 7cd5f68d4c..5041766a09 100644 --- a/docs/README.md +++ b/docs/README.md @@ -14,20 +14,29 @@ of a web-server, database, and supporting libraries for blockchain applications written in any programming language. Like a web-server serving web applications, Tendermint serves blockchain applications. -More formally, Tendermint Core performs Byzantine Fault Tolerant (BFT) -State Machine Replication (SMR) for arbitrary deterministic, finite state machines. +More formally, Tendermint Core performs Byzantine Fault Tolerant (BFT) State +Machine Replication (SMR) for arbitrary deterministic, finite state machines. For more background, see [What is Tendermint?](introduction/what-is-tendermint.md). -To get started quickly with an example application, see the [quick start guide](introduction/quick-start.md). +To get started quickly with an example application, see the [quick start +guide](introduction/quick-start.md). -To learn about application development on Tendermint, see the [Application Blockchain Interface](https://github.com/tendermint/spec/tree/master/spec/abci). +To learn about application development on Tendermint, see the [Application +Blockchain +Interface](https://github.com/tendermint/tendermint/tree/v0.34.x/spec/abci). For more details on using Tendermint, see the respective documentation for -[Tendermint Core](tendermint-core/), [benchmarking and monitoring](tools/), and [network deployments](networks/). +[Tendermint Core](tendermint-core/), [benchmarking and monitoring](tools/), and +[network deployments](networks/). -To find out about the Tendermint ecosystem you can go [here](https://github.com/tendermint/awesome#ecosystem). If you are a project that is using Tendermint you are welcome to make a PR to add your project to the list. +To find out about the Tendermint ecosystem you can go +[here](https://github.com/tendermint/awesome#ecosystem). If you are a project +that is using Tendermint you are welcome to make a PR to add your project to the +list. ## Contribute -To contribute to the documentation, see [this file](https://github.com/tendermint/tendermint/blob/master/docs/DOCS_README.md) for details of the build process and considerations when making changes. +To contribute to the documentation, see [this +file](https://github.com/tendermint/tendermint/blob/main/docs/DOCS_README.md) +for details of the build process and considerations when making changes. diff --git a/docs/app-dev/abci-cli.md b/docs/app-dev/abci-cli.md index 1645c4f8ca..98a1005c99 100644 --- a/docs/app-dev/abci-cli.md +++ b/docs/app-dev/abci-cli.md @@ -138,7 +138,7 @@ response. The server may be generic for a particular language, and we provide a [reference implementation in -Golang](https://github.com/tendermint/tendermint/tree/master/abci/server). See the +Golang](https://github.com/tendermint/tendermint/tree/v0.34.x/abci/server). See the [list of other ABCI implementations](https://github.com/tendermint/awesome#ecosystem) for servers in other languages. @@ -325,7 +325,7 @@ But the ultimate flexibility comes from being able to write the application easily in any language. We have implemented the counter in a number of languages [see the -example directory](https://github.com/tendermint/tendermint/tree/master/abci/example). +example directory](https://github.com/tendermint/tendermint/tree/v0.34.x/abci/example). To run the Node.js version, fist download & install [the Javascript ABCI server](https://github.com/tendermint/js-abci): diff --git a/docs/app-dev/app-architecture.md b/docs/app-dev/app-architecture.md index ec2822688c..761a9d85a1 100644 --- a/docs/app-dev/app-architecture.md +++ b/docs/app-dev/app-architecture.md @@ -55,6 +55,6 @@ Tendermint. See the following for more extensive documentation: - [Interchain Standard for the Light-Client REST API](https://github.com/cosmos/cosmos-sdk/pull/1028) -- [Tendermint RPC Docs](https://docs.tendermint.com/master/rpc/) +- [Tendermint RPC Docs](https://docs.tendermint.com/v0.34/rpc/) - [Tendermint in Production](../tendermint-core/running-in-production.md) - [ABCI spec](https://github.com/tendermint/spec/tree/95cf253b6df623066ff7cd4074a94e7a3f147c7a/spec/abci) diff --git a/docs/app-dev/indexing-transactions.md b/docs/app-dev/indexing-transactions.md index 0019df311b..33925ec7b6 100644 --- a/docs/app-dev/indexing-transactions.md +++ b/docs/app-dev/indexing-transactions.md @@ -15,7 +15,7 @@ the block itself is never stored. Each event contains a type and a list of attributes, which are key-value pairs denoting something about what happened during the method's execution. For more details on `Events`, see the -[ABCI](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md#events) +[ABCI](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/abci.md#events) documentation. An `Event` has a composite key associated with it. A `compositeKey` is @@ -146,7 +146,7 @@ You can query for a paginated set of transaction by their events by calling the curl "localhost:26657/tx_search?query=\"message.sender='cosmos1...'\"&prove=true" ``` -Check out [API docs](https://docs.tendermint.com/master/rpc/#/Info/tx_search) +Check out [API docs](https://docs.tendermint.com/v0.34/rpc/#/Info/tx_search) for more information on query syntax and other options. ## Subscribing to Transactions @@ -165,7 +165,7 @@ a query to `/subscribe` RPC endpoint. } ``` -Check out [API docs](https://docs.tendermint.com/master/rpc/#subscribe) for more information +Check out [API docs](https://docs.tendermint.com/v0.34/rpc/#subscribe) for more information on query syntax and other options. ## Querying Blocks Events @@ -177,5 +177,5 @@ You can query for a paginated set of blocks by their events by calling the curl "localhost:26657/block_search?query=\"block.height > 10 AND val_set.num_changed > 0\"" ``` -Check out [API docs](https://docs.tendermint.com/master/rpc/#/Info/block_search) +Check out [API docs](https://docs.tendermint.com/v0.34/rpc/#/Info/block_search) for more information on query syntax and other options. diff --git a/docs/introduction/what-is-tendermint.md b/docs/introduction/what-is-tendermint.md index 9cff48a2a4..0fe02af193 100644 --- a/docs/introduction/what-is-tendermint.md +++ b/docs/introduction/what-is-tendermint.md @@ -120,7 +120,7 @@ consensus engine, and provides a particular application state. ## ABCI Overview The [Application BlockChain Interface -(ABCI)](https://github.com/tendermint/tendermint/tree/master/abci) +(ABCI)](https://github.com/tendermint/tendermint/tree/v0.34.x/abci) allows for Byzantine Fault Tolerant replication of applications written in any programming language. @@ -180,15 +180,15 @@ The application will be responsible for - Allowing clients to query the UTXO database. Tendermint is able to decompose the blockchain design by offering a very -simple API (ie. the ABCI) between the application process and consensus +simple API (i.e. the ABCI) between the application process and consensus process. The ABCI consists of 3 primary message types that get delivered from the core to the application. The application replies with corresponding response messages. -The messages are specified here: [ABCI Message -Types](https://github.com/tendermint/tendermint/blob/master/abci/README.md#message-types). +The messages are specified in the [ABCI +specification](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/abci.md). The **DeliverTx** message is the work horse of the application. Each transaction in the blockchain is delivered with this message. The diff --git a/docs/networks/terraform-and-ansible.md b/docs/networks/terraform-and-ansible.md index b11df37109..788945b680 100644 --- a/docs/networks/terraform-and-ansible.md +++ b/docs/networks/terraform-and-ansible.md @@ -14,7 +14,7 @@ testnets on those servers. ## Install NOTE: see the [integration bash -script](https://github.com/tendermint/tendermint/blob/master/networks/remote/integration.sh) +script](https://github.com/tendermint/tendermint/blob/v0.34.x/networks/remote/integration.sh) that can be run on a fresh DO droplet and will automatically spin up a 4 node testnet. The script more or less does everything described below. @@ -58,7 +58,7 @@ With the droplets created and running, let's setup Ansible. ## Ansible The playbooks in [the ansible -directory](https://github.com/tendermint/tendermint/tree/master/networks/remote/ansible) +directory](https://github.com/tendermint/tendermint/tree/v0.34.x/networks/remote/ansible) run ansible roles to configure the sentry node architecture. You must switch to this directory to run ansible (`cd $GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible`). diff --git a/docs/package-lock.json b/docs/package-lock.json index 8bbdae8cc6..37dfc40e70 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -8876,9 +8876,9 @@ } }, "node_modules/minimist": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", - "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==" + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz", + "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==" }, "node_modules/mississippi": { "version": "3.0.0", @@ -13045,9 +13045,9 @@ } }, "node_modules/url-parse": { - "version": "1.5.7", - "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.7.tgz", - "integrity": "sha512-HxWkieX+STA38EDk7CE9MEryFeHCKzgagxlGvsdS7WBImq9Mk+PGwiT56w82WI3aicwJA8REp42Cxo98c8FZMA==", + "version": "1.5.10", + "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", + "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", "dependencies": { "querystringify": "^2.1.1", "requires-port": "^1.0.0" @@ -21113,9 +21113,9 @@ } }, "minimist": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", - "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==" + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz", + "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==" }, "mississippi": { "version": "3.0.0", @@ -24536,9 +24536,9 @@ } }, "url-parse": { - "version": "1.5.7", - "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.7.tgz", - "integrity": "sha512-HxWkieX+STA38EDk7CE9MEryFeHCKzgagxlGvsdS7WBImq9Mk+PGwiT56w82WI3aicwJA8REp42Cxo98c8FZMA==", + "version": "1.5.10", + "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", + "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", "requires": { "querystringify": "^2.1.1", "requires-port": "^1.0.0" diff --git a/docs/qa/README.md b/docs/qa/README.md new file mode 100644 index 0000000000..cd1fba9a8d --- /dev/null +++ b/docs/qa/README.md @@ -0,0 +1,22 @@ +--- +order: 1 +parent: + title: Tendermint Quality Assurance + description: This is a report on the process followed and results obtained when running v0.34.x on testnets + order: 2 +--- + +# Tendermint Quality Assurance + +This directory keeps track of the process followed by the Tendermint Core team +for Quality Assurance before cutting a release. +This directory is to live in multiple branches. On each release branch, +the contents of this directory reflect the status of the process +at the time the Quality Assurance process was applied for that release. + +File [method](./method.md) keeps track of the process followed to obtain the results +used to decide if a release is passing the Quality Assurance process. +The results obtained in each release are stored in their own directory. +The following releases have undergone the Quality Assurance process: + +* [v0.34.x](./v034/), which was tested just before releasing v0.34.22 diff --git a/docs/qa/method.md b/docs/qa/method.md new file mode 100644 index 0000000000..cc4f82dfa4 --- /dev/null +++ b/docs/qa/method.md @@ -0,0 +1,214 @@ +--- +order: 1 +title: Method +--- + +# Method + +This document provides a detailed description of the QA process. +It is intended to be used by engineers reproducing the experimental setup for future tests of Tendermint. + +The (first iteration of the) QA process as described [in the RELEASES.md document][releases] +was applied to version v0.34.x in order to have a set of results acting as benchmarking baseline. +This baseline is then compared with results obtained in later versions. + +Out of the testnet-based test cases described in [the releases document][releases] we focused on two of them: +_200 Node Test_, and _Rotating Nodes Test_. + +[releases]: https://github.com/tendermint/tendermint/blob/v0.37.x/RELEASES.md#large-scale-testnets + +## Software Dependencies + +### Infrastructure Requirements to Run the Tests + +* An account at Digital Ocean (DO), with a high droplet limit (>202) +* The machine to orchestrate the tests should have the following installed: + * A clone of the [testnet repository][testnet-repo] + * This repository contains all the scripts mentioned in the reminder of this section + * [Digital Ocean CLI][doctl] + * [Terraform CLI][Terraform] + * [Ansible CLI][Ansible] + +[testnet-repo]: https://github.com/interchainio/tendermint-testnet +[Ansible]: https://docs.ansible.com/ansible/latest/index.html +[Terraform]: https://www.terraform.io/docs +[doctl]: https://docs.digitalocean.com/reference/doctl/how-to/install/ + +### Requirements for Result Extraction + +* Matlab or Octave +* [Prometheus][prometheus] server installed +* blockstore DB of one of the full nodes in the testnet +* Prometheus DB + +[prometheus]: https://prometheus.io/ + +## 200 Node Testnet + +### Running the test + +This section explains how the tests were carried out for reproducibility purposes. + +1. [If you haven't done it before] + Follow steps 1-4 of the `README.md` at the top of the testnet repository to configure Terraform, and `doctl`. +2. Copy file `testnets/testnet200.toml` onto `testnet.toml` (do NOT commit this change) +3. Set the variable `VERSION_TAG` in the `Makefile` to the git hash that is to be tested. +4. Follow steps 5-10 of the `README.md` to configure and start the 200 node testnet + * WARNING: Do NOT forget to run `make terraform-destroy` as soon as you are done with the tests (see step 9) +5. As a sanity check, connect to the Prometheus node's web interface and check the graph for the `tendermint_consensus_height` metric. + All nodes should be increasing their heights. +6. `ssh` into the `testnet-load-runner`, then copy script `script/200-node-loadscript.sh` and run it from the load runner node. + * Before running it, you need to edit the script to provide the IP address of a full node. + This node will receive all transactions from the load runner node. + * This script will take about 40 mins to run + * It is running 90-seconds-long experiments in a loop with different loads +7. Run `make retrieve-data` to gather all relevant data from the testnet into the orchestrating machine +8. Verify that the data was collected without errors + * at least one blockstore DB for a Tendermint validator + * the Prometheus database from the Prometheus node + * for extra care, you can run `zip -T` on the `prometheus.zip` file and (one of) the `blockstore.db.zip` file(s) +9. **Run `make terraform-destroy`** + * Don't forget to type `yes`! Otherwise you're in trouble. + +### Result Extraction + +The method for extracting the results described here is highly manual (and exploratory) at this stage. +The Core team should improve it at every iteration to increase the amount of automation. + +#### Steps + +1. Unzip the blockstore into a directory +2. Extract the latency report and the raw latencies for all the experiments. Run these commands from the directory containing the blockstore + * `go run github.com/tendermint/tendermint/test/loadtime/cmd/report@3ec6e424d --database-type goleveldb --data-dir ./ > results/report.txt` + * `go run github.com/tendermint/tendermint/test/loadtime/cmd/report@3ec6e424d --database-type goleveldb --data-dir ./ --csv results/raw.csv` +3. File `report.txt` contains an unordered list of experiments with varying concurrent connections and transaction rate + * Create files `report01.txt`, `report02.txt`, `report04.txt` and, for each experiment in file `report.txt`, + copy its related lines to the filename that matches the number of connections. + * Sort the experiments in `report01.txt` in ascending tx rate order. Likewise for `report02.txt` and `report04.txt`. +4. Generate file `report_tabbed.txt` by showing the contents `report01.txt`, `report02.txt`, `report04.txt` side by side + * This effectively creates a table where rows are a particular tx rate and columns are a particular number of websocket connections. +5. Extract the raw latencies from file `raw.csv` using the following bash loop. This creates a `.csv` file and a `.dat` file per experiment. + The format of the `.dat` files is amenable to loading them as matrices in Octave + + ```bash + uuids=($(cat report01.txt report02.txt report04.txt | grep '^Experiment ID: ' | awk '{ print $3 }')) + c=1 + for i in 01 02 04; do + for j in 0025 0050 0100 0200; do + echo $i $j $c "${uuids[$c]}" + filename=c${i}_r${j} + grep ${uuids[$c]} raw.csv > ${filename}.csv + cat ${filename}.csv | tr , ' ' | awk '{ print $2, $3 }' > ${filename}.dat + c=$(expr $c + 1) + done + done + ``` + +6. Enter Octave +7. Load all `.dat` files generated in step 5 into matrices using this Octave code snippet + + ```octave + conns = { "01"; "02"; "04" }; + rates = { "0025"; "0050"; "0100"; "0200" }; + for i = 1:length(conns) + for j = 1:length(rates) + filename = strcat("c", conns{i}, "_r", rates{j}, ".dat"); + load("-ascii", filename); + endfor + endfor + ``` + +8. Set variable release to the current release undergoing QA + + ```octave + release = "v0.34.x"; + ``` + +9. Generate a plot with all (or some) experiments, where the X axis is the experiment time, + and the y axis is the latency of transactions. + The following snippet plots all experiments. + + ```octave + legends = {}; + hold off; + for i = 1:length(conns) + for j = 1:length(rates) + data_name = strcat("c", conns{i}, "_r", rates{j}); + l = strcat("c=", conns{i}, " r=", rates{j}); + m = eval(data_name); plot((m(:,1) - min(m(:,1))) / 1e+9, m(:,2) / 1e+9, "."); + hold on; + legends(1, end+1) = l; + endfor + endfor + legend(legends, "location", "northeastoutside"); + xlabel("experiment time (s)"); + ylabel("latency (s)"); + t = sprintf("200-node testnet - %s", release); + title(t); + ``` + +10. Consider adjusting the axis, in case you want to compare your results to the baseline, for instance + + ```octave + axis([0, 100, 0, 30], "tic"); + ``` + +11. Use Octave's GUI menu to save the plot (e.g. as `.png`) + +12. Repeat steps 9 and 10 to obtain as many plots as deemed necessary. + +13. To generate a latency vs throughput plot, using the raw CSV file generated + in step 2, follow the instructions for the [`latency_throughput.py`] script. + +[`latency_throughput.py`]: ../../scripts/qa/reporting/README.md + +#### Extracting Prometheus Metrics + +1. Stop the prometheus server if it is running as a service (e.g. a `systemd` unit). +2. Unzip the prometheus database retrieved from the testnet, and move it to replace the + local prometheus database. +3. Start the prometheus server and make sure no error logs appear at start up. +4. Introduce the metrics you want to gather or plot. + +## Rotating Node Testnet + +### Running the test + +This section explains how the tests were carried out for reproducibility purposes. + +1. [If you haven't done it before] + Follow steps 1-4 of the `README.md` at the top of the testnet repository to configure Terraform, and `doctl`. +2. Copy file `testnet_rotating.toml` onto `testnet.toml` (do NOT commit this change) +3. Set variable `VERSION_TAG` to the git hash that is to be tested. +4. Run `make terraform-apply EPHEMERAL_SIZE=25` + * WARNING: Do NOT forget to run `make terraform-destroy` as soon as you are done with the tests +5. Follow steps 6-10 of the `README.md` to configure and start the "stable" part of the rotating node testnet +6. As a sanity check, connect to the Prometheus node's web interface and check the graph for the `tendermint_consensus_height` metric. + All nodes should be increasing their heights. +7. On a different shell, + * run `make runload ROTATE_CONNECTIONS=X ROTATE_TX_RATE=Y` + * `X` and `Y` should reflect a load below the saturation point (see, e.g., + [this paragraph](./v034/README.md#finding-the-saturation-point) for further info) +8. Run `make rotate` to start the script that creates the ephemeral nodes, and kills them when they are caught up. + * WARNING: If you run this command from your laptop, the laptop needs to be up and connected for full length + of the experiment. +9. When the height of the chain reaches 3000, stop the `make rotate` script +10. When the rotate script has made two iterations (i.e., all ephemeral nodes have caught up twice) + after height 3000 was reached, stop `make rotate` +11. Run `make retrieve-data` to gather all relevant data from the testnet into the orchestrating machine +12. Verify that the data was collected without errors + * at least one blockstore DB for a Tendermint validator + * the Prometheus database from the Prometheus node + * for extra care, you can run `zip -T` on the `prometheus.zip` file and (one of) the `blockstore.db.zip` file(s) +13. **Run `make terraform-destroy`** + +Steps 8 to 10 are highly manual at the moment and will be improved in next iterations. + +### Result Extraction + +In order to obtain a latency plot, follow the instructions above for the 200 node experiment, but: + +* The `results.txt` file contains only one experiment +* Therefore, no need for any `for` loops + +As for prometheus, the same method as for the 200 node experiment can be applied. diff --git a/docs/qa/v034/README.md b/docs/qa/v034/README.md new file mode 100644 index 0000000000..b07b102912 --- /dev/null +++ b/docs/qa/v034/README.md @@ -0,0 +1,278 @@ +--- +order: 1 +parent: + title: Tendermint Quality Assurance Results for v0.34.x + description: This is a report on the results obtained when running v0.34.x on testnets + order: 2 +--- + +# v0.34.x + +## 200 Node Testnet + +### Finding the Saturation Point + +The first goal when examining the results of the tests is identifying the saturation point. +The saturation point is a setup with a transaction load big enough to prevent the testnet +from being stable: the load runner tries to produce slightly more transactions than can +be processed by the testnet. + +The following table summarizes the results for v0.34.x, for the different experiments +(extracted from file [`v034_report_tabbed.txt`](./img/v034_report_tabbed.txt)). + +The X axis of this table is `c`, the number of connections created by the load runner process to the target node. +The Y axis of this table is `r`, the rate or number of transactions issued per second. + +| | c=1 | c=2 | c=4 | +| :--- | ----: | ----: | ----: | +| r=25 | 2225 | 4450 | 8900 | +| r=50 | 4450 | 8900 | 17800 | +| r=100 | 8900 | 17800 | 35600 | +| r=200 | 17800 | 35600 | 38660 | + +The table shows the number of 1024-byte-long transactions that were produced by the load runner, +and processed by Tendermint, during the 90 seconds of the experiment's duration. +Each cell in the table refers to an experiment with a particular number of websocket connections (`c`) +to a chosen validator, and the number of transactions per second that the load runner +tries to produce (`r`). Note that the overall load that the tool attempts to generate is $c \cdot r$. + +We can see that the saturation point is beyond the diagonal that spans cells + +* `r=200,c=2` +* `r=100,c=4` + +given that the total transactions should be close to the product of the rate, the number of connections, +and the experiment time (89 seconds, since the last batch never gets sent). + +All experiments below the saturation diagonal (`r=200,c=4`) have in common that the total +number of transactions processed is noticeably less than the product $c \cdot r \cdot 89$, +which is the expected number of transactions when the system is able to deal well with the +load. +With `r=200,c=4`, we obtained 38660 whereas the theoretical number of transactions should +have been $200 \cdot 4 \cdot 89 = 71200$. + +At this point, we chose an experiment at the limit of the saturation diagonal, +in order to further study the performance of this release. +**The chosen experiment is `r=200,c=2`**. + +This is a plot of the CPU load (average over 1 minute, as output by `top`) of the load runner for `r=200,c=2`, +where we can see that the load stays close to 0 most of the time. + +![load-load-runner](./img/v034_r200c2_load-runner.png) + +### Examining latencies + +The method described [here](../method.md) allows us to plot the latencies of transactions +for all experiments. + +![all-latencies](./img/v034_200node_latencies.png) + +As we can see, even the experiments beyond the saturation diagonal managed to keep +transaction latency stable (i.e. not constantly increasing). +Our interpretation for this is that contention within Tendermint was propagated, +via the websockets, to the load runner, +hence the load runner could not produce the target load, but a fraction of it. + +Further examination of the Prometheus data (see below), showed that the mempool contained many transactions +at steady state, but did not grow much without quickly returning to this steady state. This demonstrates +that the transactions were able to be processed by the Tendermint network at least as quickly as they +were submitted to the mempool. Finally, the test script made sure that, at the end of an experiment, the +mempool was empty so that all transactions submitted to the chain were processed. + +Finally, the number of points present in the plot appears to be much less than expected given the +number of transactions in each experiment, particularly close to or above the saturation diagonal. +This is a visual effect of the plot; what appear to be points in the plot are actually potentially huge +clusters of points. To corroborate this, we have zoomed in the plot above by setting (carefully chosen) +tiny axis intervals. The cluster shown below looks like a single point in the plot above. + +![all-latencies-zoomed](./img/v034_200node_latencies_zoomed.png) + +The plot of latencies can we used as a baseline to compare with other releases. + +The following plot summarizes average latencies versus overall throughputs +across different numbers of WebSocket connections to the node into which +transactions are being loaded. + +![latency-vs-throughput](./img/v034_latency_throughput.png) + +### Prometheus Metrics on the Chosen Experiment + +As mentioned [above](#finding-the-saturation-point), the chosen experiment is `r=200,c=2`. +This section further examines key metrics for this experiment extracted from Prometheus data. + +#### Mempool Size + +The mempool size, a count of the number of transactions in the mempool, was shown to be stable and homogeneous +at all full nodes. It did not exhibit any unconstrained growth. +The plot below shows the evolution over time of the cumulative number of transactions inside all full nodes' mempools +at a given time. +The two spikes that can be observed correspond to a period where consensus instances proceeded beyond the initial round +at some nodes. + +![mempool-cumulative](./img/v034_r200c2_mempool_size.png) + +The plot below shows evolution of the average over all full nodes, which oscillates between 1500 and 2000 +outstanding transactions. + +![mempool-avg](./img/v034_r200c2_mempool_size_avg.png) + +The peaks observed coincide with the moments when some nodes proceeded beyond the initial round of consensus (see below). + +#### Peers + +The number of peers was stable at all nodes. +It was higher for the seed nodes (around 140) than for the rest (between 21 and 74). +The fact that non-seed nodes reach more than 50 peers is due to #9548. + +![peers](./img/v034_r200c2_peers.png) + +#### Consensus Rounds per Height + +Most heights took just one round, but some nodes needed to advance to round 1 at some point. + +![rounds](./img/v034_r200c2_rounds.png) + +#### Blocks Produced per Minute, Transactions Processed per Minute + +The blocks produced per minute are the slope of this plot. + +![heights](./img/v034_r200c2_heights.png) + +Over a period of 2 minutes, the height goes from 530 to 569. +This results in an average of 19.5 blocks produced per minute. + +The transactions processed per minute are the slope of this plot. + +![total-txs](./img/v034_r200c2_total-txs.png) + +Over a period of 2 minutes, the total goes from 64525 to 100125 transactions, +resulting in 17800 transactions per minute. However, we can see in the plot that +all transactions in the load are processed long before the two minutes. +If we adjust the time window when transactions are processed (approx. 105 seconds), +we obtain 20343 transactions per minute. + +#### Memory Resident Set Size + +Resident Set Size of all monitored processes is plotted below. + +![rss](./img/v034_r200c2_rss.png) + +The average over all processes oscillates around 1.2 GiB and does not demonstrate unconstrained growth. + +![rss-avg](./img/v034_r200c2_rss_avg.png) + +#### CPU utilization + +The best metric from Prometheus to gauge CPU utilization in a Unix machine is `load1`, +as it usually appears in the +[output of `top`](https://www.digitalocean.com/community/tutorials/load-average-in-linux). + +![load1](./img/v034_r200c2_load1.png) + +It is contained in most cases below 5, which is generally considered acceptable load. + +### Test Result + +**Result: N/A** (v0.34.x is the baseline) + +Date: 2022-10-14 + +Version: 3ec6e424d6ae4c96867c2dcf8310572156068bb6 + +## Rotating Node Testnet + +For this testnet, we will use a load that can safely be considered below the saturation +point for the size of this testnet (between 13 and 38 full nodes): `c=4,r=800`. + +N.B.: The version of Tendermint used for these tests is affected by #9539. +However, the reduced load that reaches the mempools is orthogonal to functionality +we are focusing on here. + +### Latencies + +The plot of all latencies can be seen in the following plot. + +![rotating-all-latencies](./img/v034_rotating_latencies.png) + +We can observe there are some very high latencies, towards the end of the test. +Upon suspicion that they are duplicate transactions, we examined the latencies +raw file and discovered there are more than 100K duplicate transactions. + +The following plot shows the latencies file where all duplicate transactions have +been removed, i.e., only the first occurrence of a duplicate transaction is kept. + +![rotating-all-latencies-uniq](./img/v034_rotating_latencies_uniq.png) + +This problem, existing in `v0.34.x`, will need to be addressed, perhaps in the same way +we addressed it when running the 200 node test with high loads: increasing the `cache_size` +configuration parameter. + +### Prometheus Metrics + +The set of metrics shown here are less than for the 200 node experiment. +We are only interested in those for which the catch-up process (blocksync) may have an impact. + +#### Blocks and Transactions per minute + +Just as shown for the 200 node test, the blocks produced per minute are the gradient of this plot. + +![rotating-heights](./img/v034_rotating_heights.png) + +Over a period of 5229 seconds, the height goes from 2 to 3638. +This results in an average of 41 blocks produced per minute. + +The following plot shows only the heights reported by ephemeral nodes +(which are also included in the plot above). Note that the _height_ metric +is only showed _once the node has switched to consensus_, hence the gaps +when nodes are killed, wiped out, started from scratch, and catching up. + +![rotating-heights-ephe](./img/v034_rotating_heights_ephe.png) + +The transactions processed per minute are the gradient of this plot. + +![rotating-total-txs](./img/v034_rotating_total-txs.png) + +The small lines we see periodically close to `y=0` are the transactions that +ephemeral nodes start processing when they are caught up. + +Over a period of 5229 minutes, the total goes from 0 to 387697 transactions, +resulting in 4449 transactions per minute. We can see some abrupt changes in +the plot's gradient. This will need to be investigated. + +#### Peers + +The plot below shows the evolution in peers throughout the experiment. +The periodic changes observed are due to the ephemeral nodes being stopped, +wiped out, and recreated. + +![rotating-peers](./img/v034_rotating_peers.png) + +The validators' plots are concentrated at the higher part of the graph, whereas the ephemeral nodes +are mostly at the lower part. + +#### Memory Resident Set Size + +The average Resident Set Size (RSS) over all processes seems stable, and slightly growing toward the end. +This might be related to the increased in transaction load observed above. + +![rotating-rss-avg](./img/v034_rotating_rss_avg.png) + +The memory taken by the validators and the ephemeral nodes (when they are up) is comparable. + +#### CPU utilization + +The plot shows metric `load1` for all nodes. + +![rotating-load1](./img/v034_rotating_load1.png) + +It is contained under 5 most of the time, which is considered normal load. +The purple line, which follows a different pattern is the validator receiving all +transactions, via RPC, from the load runner process. + +### Test Result + +**Result: N/A** + +Date: 2022-10-10 + +Version: a28c987f5a604ff66b515dd415270063e6fb069d diff --git a/docs/qa/v034/img/v034_200node_latencies.png b/docs/qa/v034/img/v034_200node_latencies.png new file mode 100644 index 0000000000..afd1060caf Binary files /dev/null and b/docs/qa/v034/img/v034_200node_latencies.png differ diff --git a/docs/qa/v034/img/v034_200node_latencies_zoomed.png b/docs/qa/v034/img/v034_200node_latencies_zoomed.png new file mode 100644 index 0000000000..1ff9364422 Binary files /dev/null and b/docs/qa/v034/img/v034_200node_latencies_zoomed.png differ diff --git a/docs/qa/v034/img/v034_latency_throughput.png b/docs/qa/v034/img/v034_latency_throughput.png new file mode 100644 index 0000000000..3674fe47b4 Binary files /dev/null and b/docs/qa/v034/img/v034_latency_throughput.png differ diff --git a/docs/qa/v034/img/v034_r200c2_heights.png b/docs/qa/v034/img/v034_r200c2_heights.png new file mode 100644 index 0000000000..11f3bba432 Binary files /dev/null and b/docs/qa/v034/img/v034_r200c2_heights.png differ diff --git a/docs/qa/v034/img/v034_r200c2_load-runner.png b/docs/qa/v034/img/v034_r200c2_load-runner.png new file mode 100644 index 0000000000..70211b0d21 Binary files /dev/null and b/docs/qa/v034/img/v034_r200c2_load-runner.png differ diff --git a/docs/qa/v034/img/v034_r200c2_load1.png b/docs/qa/v034/img/v034_r200c2_load1.png new file mode 100644 index 0000000000..11012844dc Binary files /dev/null and b/docs/qa/v034/img/v034_r200c2_load1.png differ diff --git a/docs/qa/v034/img/v034_r200c2_mempool_size.png b/docs/qa/v034/img/v034_r200c2_mempool_size.png new file mode 100644 index 0000000000..c5d690200a Binary files /dev/null and b/docs/qa/v034/img/v034_r200c2_mempool_size.png differ diff --git a/docs/qa/v034/img/v034_r200c2_mempool_size_avg.png b/docs/qa/v034/img/v034_r200c2_mempool_size_avg.png new file mode 100644 index 0000000000..bda399fe5d Binary files /dev/null and b/docs/qa/v034/img/v034_r200c2_mempool_size_avg.png differ diff --git a/docs/qa/v034/img/v034_r200c2_peers.png b/docs/qa/v034/img/v034_r200c2_peers.png new file mode 100644 index 0000000000..a0aea7ada3 Binary files /dev/null and b/docs/qa/v034/img/v034_r200c2_peers.png differ diff --git a/docs/qa/v034/img/v034_r200c2_rounds.png b/docs/qa/v034/img/v034_r200c2_rounds.png new file mode 100644 index 0000000000..215be100de Binary files /dev/null and b/docs/qa/v034/img/v034_r200c2_rounds.png differ diff --git a/docs/qa/v034/img/v034_r200c2_rss.png b/docs/qa/v034/img/v034_r200c2_rss.png new file mode 100644 index 0000000000..6d14dced0b Binary files /dev/null and b/docs/qa/v034/img/v034_r200c2_rss.png differ diff --git a/docs/qa/v034/img/v034_r200c2_rss_avg.png b/docs/qa/v034/img/v034_r200c2_rss_avg.png new file mode 100644 index 0000000000..8dec67da29 Binary files /dev/null and b/docs/qa/v034/img/v034_r200c2_rss_avg.png differ diff --git a/docs/qa/v034/img/v034_r200c2_total-txs.png b/docs/qa/v034/img/v034_r200c2_total-txs.png new file mode 100644 index 0000000000..177d5f1c31 Binary files /dev/null and b/docs/qa/v034/img/v034_r200c2_total-txs.png differ diff --git a/docs/qa/v034/img/v034_report_tabbed.txt b/docs/qa/v034/img/v034_report_tabbed.txt new file mode 100644 index 0000000000..2514954743 --- /dev/null +++ b/docs/qa/v034/img/v034_report_tabbed.txt @@ -0,0 +1,52 @@ +Experiment ID: 3d5cf4ef-1a1a-4b46-aa2d-da5643d2e81e │Experiment ID: 80e472ec-13a1-4772-a827-3b0c907fb51d │Experiment ID: 07aca6cf-c5a4-4696-988f-e3270fc6333b + │ │ + Connections: 1 │ Connections: 2 │ Connections: 4 + Rate: 25 │ Rate: 25 │ Rate: 25 + Size: 1024 │ Size: 1024 │ Size: 1024 + │ │ + Total Valid Tx: 2225 │ Total Valid Tx: 4450 │ Total Valid Tx: 8900 + Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0 + Minimum Latency: 599.404362ms │ Minimum Latency: 448.145181ms │ Minimum Latency: 412.485729ms + Maximum Latency: 3.539686885s │ Maximum Latency: 3.237392049s │ Maximum Latency: 12.026665368s + Average Latency: 1.441485349s │ Average Latency: 1.441267946s │ Average Latency: 2.150192457s + Standard Deviation: 541.049869ms │ Standard Deviation: 525.040007ms │ Standard Deviation: 2.233852478s + │ │ +Experiment ID: 953dc544-dd40-40e8-8712-20c34c3ce45e │Experiment ID: d31fc258-16e7-45cd-9dc8-13ab87bc0b0a │Experiment ID: 15d90a7e-b941-42f4-b411-2f15f857739e + │ │ + Connections: 1 │ Connections: 2 │ Connections: 4 + Rate: 50 │ Rate: 50 │ Rate: 50 + Size: 1024 │ Size: 1024 │ Size: 1024 + │ │ + Total Valid Tx: 4450 │ Total Valid Tx: 8900 │ Total Valid Tx: 17800 + Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0 + Minimum Latency: 482.046942ms │ Minimum Latency: 435.458913ms │ Minimum Latency: 510.746448ms + Maximum Latency: 3.761483455s │ Maximum Latency: 7.175583584s │ Maximum Latency: 6.551497882s + Average Latency: 1.450408183s │ Average Latency: 1.681673116s │ Average Latency: 1.738083875s + Standard Deviation: 587.560056ms │ Standard Deviation: 1.147902047s │ Standard Deviation: 943.46522ms + │ │ +Experiment ID: 9a0b9980-9ce6-4db5-a80a-65ca70294b87 │Experiment ID: df8fa4f4-80af-4ded-8a28-356d15018b43 │Experiment ID: d0e41c2c-89c0-4f38-8e34-ca07adae593a + │ │ + Connections: 1 │ Connections: 2 │ Connections: 4 + Rate: 100 │ Rate: 100 │ Rate: 100 + Size: 1024 │ Size: 1024 │ Size: 1024 + │ │ + Total Valid Tx: 8900 │ Total Valid Tx: 17800 │ Total Valid Tx: 35600 + Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0 + Minimum Latency: 477.417219ms │ Minimum Latency: 564.29247ms │ Minimum Latency: 840.71089ms + Maximum Latency: 6.63744785s │ Maximum Latency: 6.988553219s │ Maximum Latency: 9.555312398s + Average Latency: 1.561216103s │ Average Latency: 1.76419063s │ Average Latency: 3.200941683s + Standard Deviation: 1.011333552s │ Standard Deviation: 1.068459423s │ Standard Deviation: 1.732346601s + │ │ +Experiment ID: 493df3ee-4a36-4bce-80f8-6d65da66beda │Experiment ID: 13060525-f04f-46f6-8ade-286684b2fe50 │Experiment ID: 1777cbd2-8c96-42e4-9ec7-9b21f2225e4d + │ │ + Connections: 1 │ Connections: 2 │ Connections: 4 + Rate: 200 │ Rate: 200 │ Rate: 200 + Size: 1024 │ Size: 1024 │ Size: 1024 + │ │ + Total Valid Tx: 17800 │ Total Valid Tx: 35600 │ Total Valid Tx: 38660 + Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0 + Minimum Latency: 493.705261ms │ Minimum Latency: 955.090573ms │ Minimum Latency: 1.9485821s + Maximum Latency: 7.440921872s │ Maximum Latency: 10.086673491s │ Maximum Latency: 17.73103976s + Average Latency: 1.875510582s │ Average Latency: 3.438130099s │ Average Latency: 8.143862237s + Standard Deviation: 1.304336995s │ Standard Deviation: 1.966391574s │ Standard Deviation: 3.943140002s + diff --git a/docs/qa/v034/img/v034_rotating_heights.png b/docs/qa/v034/img/v034_rotating_heights.png new file mode 100644 index 0000000000..47913c282f Binary files /dev/null and b/docs/qa/v034/img/v034_rotating_heights.png differ diff --git a/docs/qa/v034/img/v034_rotating_heights_ephe.png b/docs/qa/v034/img/v034_rotating_heights_ephe.png new file mode 100644 index 0000000000..981b93d6c4 Binary files /dev/null and b/docs/qa/v034/img/v034_rotating_heights_ephe.png differ diff --git a/docs/qa/v034/img/v034_rotating_latencies.png b/docs/qa/v034/img/v034_rotating_latencies.png new file mode 100644 index 0000000000..f0a54ed5b6 Binary files /dev/null and b/docs/qa/v034/img/v034_rotating_latencies.png differ diff --git a/docs/qa/v034/img/v034_rotating_latencies_uniq.png b/docs/qa/v034/img/v034_rotating_latencies_uniq.png new file mode 100644 index 0000000000..e5d694a16e Binary files /dev/null and b/docs/qa/v034/img/v034_rotating_latencies_uniq.png differ diff --git a/docs/qa/v034/img/v034_rotating_load1.png b/docs/qa/v034/img/v034_rotating_load1.png new file mode 100644 index 0000000000..e9c385b85e Binary files /dev/null and b/docs/qa/v034/img/v034_rotating_load1.png differ diff --git a/docs/qa/v034/img/v034_rotating_peers.png b/docs/qa/v034/img/v034_rotating_peers.png new file mode 100644 index 0000000000..ab5c8732d3 Binary files /dev/null and b/docs/qa/v034/img/v034_rotating_peers.png differ diff --git a/docs/qa/v034/img/v034_rotating_rss_avg.png b/docs/qa/v034/img/v034_rotating_rss_avg.png new file mode 100644 index 0000000000..9a4167320c Binary files /dev/null and b/docs/qa/v034/img/v034_rotating_rss_avg.png differ diff --git a/docs/qa/v034/img/v034_rotating_total-txs.png b/docs/qa/v034/img/v034_rotating_total-txs.png new file mode 100644 index 0000000000..1ce5f47e9b Binary files /dev/null and b/docs/qa/v034/img/v034_rotating_total-txs.png differ diff --git a/docs/tendermint-core/how-to-read-logs.md b/docs/tendermint-core/how-to-read-logs.md index 933cce5403..e13e14ba90 100644 --- a/docs/tendermint-core/how-to-read-logs.md +++ b/docs/tendermint-core/how-to-read-logs.md @@ -67,7 +67,7 @@ Next follows a standard block creation cycle, where we enter a new round, propose a block, receive more than 2/3 of prevotes, then precommits and finally have a chance to commit a block. For details, please refer to [Byzantine Consensus -Algorithm](https://github.com/tendermint/spec/blob/master/spec/consensus/consensus.md). +Algorithm](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/consensus.md). ```sh I[10-04|13:54:30.393] enterNewRound(91/0). Current: 91/0/RoundStepNewHeight module=consensus diff --git a/docs/tendermint-core/metrics.md b/docs/tendermint-core/metrics.md index d6c2bd82dc..245b32cdcb 100644 --- a/docs/tendermint-core/metrics.md +++ b/docs/tendermint-core/metrics.md @@ -18,38 +18,40 @@ Listen address can be changed in the config file (see The following metrics are available: -| **Name** | **Type** | **Tags** | **Description** | -| -------------------------------------- | --------- | ------------- | ---------------------------------------------------------------------- | -| consensus_height | Gauge | | Height of the chain | -| consensus_validators | Gauge | | Number of validators | -| consensus_validators_power | Gauge | | Total voting power of all validators | -| consensus_validator_power | Gauge | | Voting power of the node if in the validator set | -| consensus_validator_last_signed_height | Gauge | | Last height the node signed a block, if the node is a validator | -| consensus_validator_missed_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator | -| consensus_missing_validators | Gauge | | Number of validators who did not sign | -| consensus_missing_validators_power | Gauge | | Total voting power of the missing validators | -| consensus_byzantine_validators | Gauge | | Number of validators who tried to double sign | -| consensus_byzantine_validators_power | Gauge | | Total voting power of the byzantine validators | -| consensus_block_interval_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds | -| consensus_rounds | Gauge | | Number of rounds | -| consensus_num_txs | Gauge | | Number of transactions | -| consensus_total_txs | Gauge | | Total number of transactions committed | -| consensus_block_parts | counter | peer_id | number of blockparts transmitted by peer | -| consensus_latest_block_height | gauge | | /status sync_info number | -| consensus_fast_syncing | gauge | | either 0 (not fast syncing) or 1 (syncing) | -| consensus_state_syncing | gauge | | either 0 (not state syncing) or 1 (syncing) | -| consensus_block_size_bytes | Gauge | | Block size in bytes | -| p2p_peers | Gauge | | Number of peers node's connected to | -| p2p_peer_receive_bytes_total | counter | peer_id, chID | number of bytes per channel received from a given peer | -| p2p_peer_send_bytes_total | counter | peer_id, chID | number of bytes per channel sent to a given peer | -| p2p_peer_pending_send_bytes | gauge | peer_id | number of pending bytes to be sent to a given peer | -| p2p_num_txs | gauge | peer_id | number of transactions submitted by each peer_id | -| p2p_pending_send_bytes | gauge | peer_id | amount of data pending to be sent to peer | -| mempool_size | Gauge | | Number of uncommitted transactions | -| mempool_tx_size_bytes | histogram | | transaction sizes in bytes | -| mempool_failed_txs | counter | | number of failed transactions | -| mempool_recheck_times | counter | | number of transactions rechecked in the mempool | -| state_block_processing_time | histogram | | time between BeginBlock and EndBlock in ms | +| **Name** | **Type** | **Tags** | **Description** | +|------------------------------------------|-----------|-------------------|------------------------------------------------------------------------| +| `consensus_height` | Gauge | | Height of the chain | +| `consensus_validators` | Gauge | | Number of validators | +| `consensus_validators_power` | Gauge | | Total voting power of all validators | +| `consensus_validator_power` | Gauge | | Voting power of the node if in the validator set | +| `consensus_validator_last_signed_height` | Gauge | | Last height the node signed a block, if the node is a validator | +| `consensus_validator_missed_blocks` | Gauge | | Total amount of blocks missed for the node, if the node is a validator | +| `consensus_missing_validators` | Gauge | | Number of validators who did not sign | +| `consensus_missing_validators_power` | Gauge | | Total voting power of the missing validators | +| `consensus_byzantine_validators` | Gauge | | Number of validators who tried to double sign | +| `consensus_byzantine_validators_power` | Gauge | | Total voting power of the byzantine validators | +| `consensus_block_interval_seconds` | Histogram | | Time between this and last block (Block.Header.Time) in seconds | +| `consensus_rounds` | Gauge | | Number of rounds | +| `consensus_num_txs` | Gauge | | Number of transactions | +| `consensus_total_txs` | Gauge | | Total number of transactions committed | +| `consensus_block_parts` | Counter | `peer_id` | Number of blockparts transmitted by peer | +| `consensus_latest_block_height` | Gauge | | /status sync\_info number | +| `consensus_fast_syncing` | Gauge | | Either 0 (not fast syncing) or 1 (syncing) | +| `consensus_state_syncing` | Gauge | | Either 0 (not state syncing) or 1 (syncing) | +| `consensus_block_size_bytes` | Gauge | | Block size in bytes | +| `p2p_message_send_bytes_total` | Counter | `message_type` | Number of bytes sent to all peers per message type | +| `p2p_message_receive_bytes_total` | Counter | `message_type` | Number of bytes received from all peers per message type | +| `p2p_peers` | Gauge | | Number of peers node's connected to | +| `p2p_peer_receive_bytes_total` | Counter | `peer_id`, `chID` | Number of bytes per channel received from a given peer | +| `p2p_peer_send_bytes_total` | Counter | `peer_id`, `chID` | Number of bytes per channel sent to a given peer | +| `p2p_peer_pending_send_bytes` | Gauge | `peer_id` | Number of pending bytes to be sent to a given peer | +| `p2p_num_txs` | Gauge | `peer_id` | Number of transactions submitted by each peer\_id | +| `p2p_pending_send_bytes` | Gauge | `peer_id` | Amount of data pending to be sent to peer | +| `mempool_size` | Gauge | | Number of uncommitted transactions | +| `mempool_tx_size_bytes` | Histogram | | Transaction sizes in bytes | +| `mempool_failed_txs` | Counter | | Number of failed transactions | +| `mempool_recheck_times` | Counter | | Number of transactions rechecked in the mempool | +| `state_block_processing_time` | Histogram | | Time between BeginBlock and EndBlock in ms | ## Useful queries diff --git a/docs/tendermint-core/rpc.md b/docs/tendermint-core/rpc.md index 56c2491c05..300078359a 100644 --- a/docs/tendermint-core/rpc.md +++ b/docs/tendermint-core/rpc.md @@ -6,6 +6,6 @@ order: 9 The RPC documentation is hosted here: -- [https://docs.tendermint.com/master/rpc/](https://docs.tendermint.com/master/rpc/) +- [https://docs.tendermint.com/v0.34/rpc/](https://docs.tendermint.com/v0.34/rpc/) To update the documentation, edit the relevant `godoc` comments in the [rpc/core directory](https://github.com/tendermint/tendermint/blob/v0.34.x/rpc/core). diff --git a/docs/tendermint-core/running-in-production.md b/docs/tendermint-core/running-in-production.md index 41f40641e1..06c0d82c45 100644 --- a/docs/tendermint-core/running-in-production.md +++ b/docs/tendermint-core/running-in-production.md @@ -95,7 +95,7 @@ mechanisms. ### RPC Endpoints returning multiple entries are limited by default to return 30 -elements (100 max). See the [RPC Documentation](https://docs.tendermint.com/master/rpc/) +elements (100 max). See the [RPC Documentation](https://docs.tendermint.com/v0.34/rpc/) for more information. Rate-limiting and authentication are another key aspects to help protect diff --git a/docs/tendermint-core/subscription.md b/docs/tendermint-core/subscription.md index 067d0bf51b..d8723bdec3 100644 --- a/docs/tendermint-core/subscription.md +++ b/docs/tendermint-core/subscription.md @@ -31,7 +31,7 @@ method via Websocket along with a valid query. } ``` -Check out [API docs](https://docs.tendermint.com/master/rpc/) for +Check out [API docs](https://docs.tendermint.com/v0.34/rpc/) for more information on query syntax and other options. You can also use tags, given you had included them into DeliverTx @@ -43,7 +43,7 @@ transactions](./indexing-transactions.md) for details. When validator set changes, ValidatorSetUpdates event is published. The event carries a list of pubkey/power pairs. The list is the same Tendermint receives from ABCI application (see [EndBlock -section](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md#endblock) in +section](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/abci.md#endblock) in the ABCI spec). Response: diff --git a/docs/tendermint-core/using-tendermint.md b/docs/tendermint-core/using-tendermint.md index 602b0a6a5b..4df4801417 100644 --- a/docs/tendermint-core/using-tendermint.md +++ b/docs/tendermint-core/using-tendermint.md @@ -39,7 +39,7 @@ tendermint testnet --help The `genesis.json` file in `$TMHOME/config/` defines the initial TendermintCore state upon genesis of the blockchain ([see -definition](https://github.com/tendermint/tendermint/blob/master/types/genesis.go)). +definition](https://github.com/tendermint/tendermint/blob/v0.34.x/types/genesis.go)). #### Fields @@ -49,7 +49,7 @@ definition](https://github.com/tendermint/tendermint/blob/master/types/genesis.g chain IDs, you will have a bad time. The ChainID must be less than 50 symbols. - `initial_height`: Height at which Tendermint should begin at. If a blockchain is conducting a network upgrade, starting from the stopped height brings uniqueness to previous heights. -- `consensus_params` [spec](https://github.com/tendermint/spec/blob/master/spec/core/state.md#consensusparams) +- `consensus_params` [spec](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/core/data_structures.md#consensusparams) - `block` - `max_bytes`: Max block size, in bytes. - `max_gas`: Max gas per block. @@ -183,7 +183,7 @@ endpoints. Some take no arguments (like `/status`), while others specify the argument name and use `_` as a placeholder. -> TIP: Find the RPC Documentation [here](https://docs.tendermint.com/master/rpc/) +> TIP: Find the RPC Documentation [here](https://docs.tendermint.com/v0.34/rpc/) ### Formatting diff --git a/docs/tutorials/go-built-in.md b/docs/tutorials/go-built-in.md index a5b1d61af2..60c444476f 100644 --- a/docs/tutorials/go-built-in.md +++ b/docs/tutorials/go-built-in.md @@ -84,7 +84,7 @@ Hello, Tendermint Core Tendermint Core communicates with the application through the Application BlockChain Interface (ABCI). All message types are defined in the [protobuf -file](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/abci/types.proto). +file](https://github.com/tendermint/tendermint/blob/v0.34.x/proto/tendermint/abci/types.proto). This allows Tendermint Core to run applications written in any programming language. @@ -218,7 +218,7 @@ etc.) by Tendermint Core. Valid transactions will eventually be committed given they are not too big and have enough gas. To learn more about gas, check out ["the -specification"](https://docs.tendermint.com/master/spec/abci/apps.html#gas). +specification"](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/apps.md#gas). For the underlying key-value store we'll use [badger](https://github.com/dgraph-io/badger), which is an embeddable, @@ -337,7 +337,7 @@ func (app *KVStoreApplication) Query(reqQuery abcitypes.RequestQuery) (resQuery ``` The complete specification can be found -[here](https://docs.tendermint.com/master/spec/abci/). +[here](https://github.com/tendermint/tendermint/tree/v0.34.x/spec/abci/). ## 1.4 Starting an application and a Tendermint Core instance in the same process @@ -610,7 +610,7 @@ go build To create a default configuration, nodeKey and private validator files, let's execute `tendermint init`. But before we do that, we will need to install Tendermint Core. Please refer to [the official -guide](https://docs.tendermint.com/master/introduction/install.html). If you're +guide](https://docs.tendermint.com/v0.34/introduction/install.html). If you're installing from source, don't forget to checkout the latest release (`git checkout vX.Y.Z`). ```bash @@ -680,4 +680,4 @@ $ curl -s 'localhost:26657/abci_query?data="tendermint"' I hope everything went smoothly and your first, but hopefully not the last, Tendermint Core application is up and running. If not, please [open an issue on Github](https://github.com/tendermint/tendermint/issues/new/choose). To dig -deeper, read [the docs](https://docs.tendermint.com/master/). +deeper, read [the docs](https://docs.tendermint.com/v0.34/). diff --git a/docs/tutorials/go.md b/docs/tutorials/go.md index 28a015b995..64a7b57104 100644 --- a/docs/tutorials/go.md +++ b/docs/tutorials/go.md @@ -87,7 +87,7 @@ Hello, Tendermint Core Tendermint Core communicates with the application through the Application BlockChain Interface (ABCI). All message types are defined in the [protobuf -file](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/abci/types.proto). +file](https://github.com/tendermint/tendermint/blob/v0.34.x/proto/tendermint/abci/types.proto). This allows Tendermint Core to run applications written in any programming language. @@ -221,7 +221,7 @@ etc.) by Tendermint Core. Valid transactions will eventually be committed given they are not too big and have enough gas. To learn more about gas, check out ["the -specification"](https://docs.tendermint.com/master/spec/abci/apps.html#gas). +specification"](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/apps.md#gas). For the underlying key-value store we'll use [badger](https://github.com/dgraph-io/badger), which is an embeddable, @@ -340,7 +340,7 @@ func (app *KVStoreApplication) Query(reqQuery abcitypes.RequestQuery) (resQuery ``` The complete specification can be found -[here](https://docs.tendermint.com/master/spec/abci/). +[here](https://github.com/tendermint/tendermint/tree/v0.34.x/spec/abci/). ## 1.4 Starting an application and a Tendermint Core instances @@ -468,7 +468,7 @@ go build To create a default configuration, nodeKey and private validator files, let's execute `tendermint init`. But before we do that, we will need to install Tendermint Core. Please refer to [the official -guide](https://docs.tendermint.com/master/introduction/install.html). If you're +guide](https://docs.tendermint.com/v0.34/introduction/install.html). If you're installing from source, don't forget to checkout the latest release (`git checkout vX.Y.Z`). ```bash @@ -482,7 +482,7 @@ I[2019-07-16|18:20:36.482] Generated genesis file module=m Feel free to explore the generated files, which can be found at `/tmp/example/config` directory. Documentation on the config can be found -[here](https://docs.tendermint.com/master/tendermint-core/configuration.html). +[here](https://docs.tendermint.com/v0.34/tendermint-core/configuration.html). We are ready to start our application: @@ -565,4 +565,4 @@ curl -s 'localhost:26657/abci_query?data="tendermint"' I hope everything went smoothly and your first, but hopefully not the last, Tendermint Core application is up and running. If not, please [open an issue on Github](https://github.com/tendermint/tendermint/issues/new/choose). To dig -deeper, read [the docs](https://docs.tendermint.com/master/). +deeper, read [the docs](https://docs.tendermint.com/v0.34/). diff --git a/docs/tutorials/java.md b/docs/tutorials/java.md index dbd005957b..526d3a2c68 100644 --- a/docs/tutorials/java.md +++ b/docs/tutorials/java.md @@ -115,7 +115,7 @@ Hello world. Tendermint Core communicates with the application through the Application BlockChain Interface (ABCI). All message types are defined in the [protobuf -file](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/abci/types.proto). +file](https://github.com/tendermint/tendermint/blob/v0.34.x/proto/tendermint/abci/types.proto). This allows Tendermint Core to run applications written in any programming language. @@ -323,7 +323,7 @@ etc.) by Tendermint Core. Valid transactions will eventually be committed given they are not too big and have enough gas. To learn more about gas, check out ["the -specification"](https://docs.tendermint.com/master/spec/abci/apps.html#gas). +specification"](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/apps.md#gas). For the underlying key-value store we'll use [JetBrains Xodus](https://github.com/JetBrains/xodus), which is a transactional schema-less embedded high-performance database written in Java. @@ -467,7 +467,7 @@ public void query(RequestQuery req, StreamObserver responseObserv ``` The complete specification can be found -[here](https://docs.tendermint.com/master/spec/abci/). +[here](https://github.com/tendermint/tendermint/tree/v0.34.x/spec/abci/). ## 1.4 Starting an application and a Tendermint Core instances @@ -559,7 +559,7 @@ I[2019-07-16|18:20:36.482] Generated genesis file module=m Feel free to explore the generated files, which can be found at `/tmp/example/config` directory. Documentation on the config can be found -[here](https://docs.tendermint.com/master/tendermint-core/configuration.html). +[here](https://docs.tendermint.com/v0.34/tendermint-core/configuration.html). We are ready to start our application: @@ -625,6 +625,6 @@ $ curl -s 'localhost:26657/abci_query?data="tendermint"' I hope everything went smoothly and your first, but hopefully not the last, Tendermint Core application is up and running. If not, please [open an issue on Github](https://github.com/tendermint/tendermint/issues/new/choose). To dig -deeper, read [the docs](https://docs.tendermint.com/master/). +deeper, read [the docs](https://docs.tendermint.com/v0.34/). The full source code of this example project can be found [here](https://github.com/climber73/tendermint-abci-grpc-java). diff --git a/docs/tutorials/kotlin.md b/docs/tutorials/kotlin.md index 50f846e68a..c311e9d71e 100644 --- a/docs/tutorials/kotlin.md +++ b/docs/tutorials/kotlin.md @@ -115,7 +115,7 @@ Hello world. Tendermint Core communicates with the application through the Application BlockChain Interface (ABCI). All message types are defined in the [protobuf -file](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/abci/types.proto). +file](https://github.com/tendermint/tendermint/blob/v0.34.x/proto/tendermint/abci/types.proto). This allows Tendermint Core to run applications written in any programming language. @@ -314,7 +314,7 @@ etc.) by Tendermint Core. Valid transactions will eventually be committed given they are not too big and have enough gas. To learn more about gas, check out ["the -specification"](https://docs.tendermint.com/master/spec/abci/apps.html#gas). +specification"](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/apps.md#gas). For the underlying key-value store we'll use [JetBrains Xodus](https://github.com/JetBrains/xodus), which is a transactional schema-less embedded high-performance database written in Java. @@ -446,7 +446,7 @@ override fun query(req: RequestQuery, responseObserver: StreamObserver 0 { evR.Logger.Debug("Gossiping evidence to peer", "ev", ev, "peer", peer) - msgBytes, err := encodeMsg(evis) + evp, err := evidenceListToProto(evis) if err != nil { panic(err) } - success := peer.Send(EvidenceChannel, msgBytes) + + success := p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: EvidenceChannel, + Message: evp, + }, evR.Logger) if !success { time.Sleep(peerRetryMessageIntervalMS * time.Millisecond) continue @@ -210,7 +229,7 @@ type PeerState interface { // encodemsg takes a array of evidence // returns the byte encoding of the List Message -func encodeMsg(evis []types.Evidence) ([]byte, error) { +func evidenceListToProto(evis []types.Evidence) (*tmproto.EvidenceList, error) { evi := make([]tmproto.Evidence, len(evis)) for i := 0; i < len(evis); i++ { ev, err := types.EvidenceToProto(evis[i]) @@ -222,19 +241,13 @@ func encodeMsg(evis []types.Evidence) ([]byte, error) { epl := tmproto.EvidenceList{ Evidence: evi, } - - return epl.Marshal() + return &epl, nil } -// decodemsg takes an array of bytes -// returns an array of evidence -func decodeMsg(bz []byte) (evis []types.Evidence, err error) { - lm := tmproto.EvidenceList{} - if err := lm.Unmarshal(bz); err != nil { - return nil, err - } +func evidenceListFromProto(m proto.Message) ([]types.Evidence, error) { + lm := m.(*tmproto.EvidenceList) - evis = make([]types.Evidence, len(lm.Evidence)) + evis := make([]types.Evidence, len(lm.Evidence)) for i := 0; i < len(lm.Evidence); i++ { ev, err := types.EvidenceFromProto(&lm.Evidence[i]) if err != nil { diff --git a/evidence/reactor_test.go b/evidence/reactor_test.go index c0a22be26e..8dbd58dcbf 100644 --- a/evidence/reactor_test.go +++ b/evidence/reactor_test.go @@ -9,6 +9,7 @@ import ( "github.com/fortytw2/leaktest" "github.com/go-kit/log/term" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -207,7 +208,10 @@ func TestReactorBroadcastEvidenceMemoryLeak(t *testing.T) { // i.e. broadcastEvidenceRoutine finishes when peer is stopped defer leaktest.CheckTimeout(t, 10*time.Second)() - p.On("Send", evidence.EvidenceChannel, mock.AnythingOfType("[]uint8")).Return(false) + p.On("SendEnvelope", mock.MatchedBy(func(i interface{}) bool { + e, ok := i.(p2p.Envelope) + return ok && e.ChannelID == evidence.EvidenceChannel + })).Return(false) quitChan := make(<-chan struct{}) p.On("Quit").Return(quitChan) ps := peerState{2} @@ -366,8 +370,35 @@ func exampleVote(t byte) *types.Vote { ValidatorIndex: 56789, } } +func TestLegacyReactorReceiveBasic(t *testing.T) { + config := cfg.TestConfig() + N := 1 + + stateDBs := make([]sm.Store, N) + val := types.NewMockPV() + stateDBs[0] = initializeValidatorState(val, 1) + + reactors, _ := makeAndConnectReactorsAndPools(config, stateDBs) + + var ( + reactor = reactors[0] + peer = &p2pmocks.Peer{} + ) + quitChan := make(<-chan struct{}) + peer.On("Quit").Return(quitChan) + + reactor.InitPeer(peer) + reactor.AddPeer(peer) + e := &tmproto.EvidenceList{} + msg, err := proto.Marshal(e) + assert.NoError(t, err) + + assert.NotPanics(t, func() { + reactor.Receive(evidence.EvidenceChannel, peer, msg) + }) +} -// nolint:lll //ignore line length for tests +//nolint:lll //ignore line length for tests func TestEvidenceVectors(t *testing.T) { val := &types.Validator{ diff --git a/evidence/services.go b/evidence/services.go index 274433cbe0..5c4d2e953e 100644 --- a/evidence/services.go +++ b/evidence/services.go @@ -4,7 +4,7 @@ import ( "github.com/tendermint/tendermint/types" ) -//go:generate mockery --case underscore --name BlockStore +//go:generate ../scripts/mockery_generate.sh BlockStore type BlockStore interface { LoadBlockMeta(height int64) *types.BlockMeta diff --git a/evidence/verify.go b/evidence/verify.go index f3eba53581..c20cb0a2de 100644 --- a/evidence/verify.go +++ b/evidence/verify.go @@ -102,13 +102,14 @@ func (evpool *Pool) verify(evidence types.Evidence) error { // VerifyLightClientAttack verifies LightClientAttackEvidence against the state of the full node. This involves // the following checks: -// - the common header from the full node has at least 1/3 voting power which is also present in -// the conflicting header's commit -// - 2/3+ of the conflicting validator set correctly signed the conflicting block -// - the nodes trusted header at the same height as the conflicting header has a different hash +// - the common header from the full node has at least 1/3 voting power which is also present in +// the conflicting header's commit +// - 2/3+ of the conflicting validator set correctly signed the conflicting block +// - the nodes trusted header at the same height as the conflicting header has a different hash // // CONTRACT: must run ValidateBasic() on the evidence before verifying -// must check that the evidence has not expired (i.e. is outside the maximum age threshold) +// +// must check that the evidence has not expired (i.e. is outside the maximum age threshold) func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, trustedHeader *types.SignedHeader, commonVals *types.ValidatorSet, now time.Time, trustPeriod time.Duration) error { // In the case of lunatic attack there will be a different commonHeader height. Therefore the node perform a single @@ -154,10 +155,10 @@ func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, t // VerifyDuplicateVote verifies DuplicateVoteEvidence against the state of full node. This involves the // following checks: -// - the validator is in the validator set at the height of the evidence -// - the height, round, type and validator address of the votes must be the same -// - the block ID's must be different -// - The signatures must both be valid +// - the validator is in the validator set at the height of the evidence +// - the height, round, type and validator address of the votes must be the same +// - the block ID's must be different +// - The signatures must both be valid func VerifyDuplicateVote(e *types.DuplicateVoteEvidence, chainID string, valSet *types.ValidatorSet) error { _, val := valSet.GetByAddress(e.VoteA.ValidatorAddress) if val == nil { diff --git a/go.mod b/go.mod index 77eb291913..2001d6107d 100644 --- a/go.mod +++ b/go.mod @@ -1,21 +1,23 @@ module github.com/tendermint/tendermint -go 1.17 +go 1.18 require ( - github.com/BurntSushi/toml v1.2.0 + github.com/BurntSushi/toml v1.2.1 github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d - github.com/Workiva/go-datastructures v1.0.52 - github.com/adlio/schema v1.1.13 - github.com/btcsuite/btcd v0.21.0-beta - github.com/btcsuite/btcutil v1.0.2 + github.com/Workiva/go-datastructures v1.0.53 + github.com/adlio/schema v1.3.3 + github.com/btcsuite/btcd v0.22.1 + github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce + github.com/bufbuild/buf v1.9.0 github.com/celestiaorg/nmt v0.11.0 github.com/fortytw2/leaktest v1.3.0 github.com/go-kit/kit v0.12.0 github.com/go-kit/log v0.2.1 github.com/go-logfmt/logfmt v0.5.1 - github.com/gogo/protobuf v1.3.2 + github.com/gofrs/uuid v4.3.0+incompatible github.com/golang/protobuf v1.5.2 + github.com/golangci/golangci-lint v1.50.1 github.com/google/orderedcode v0.0.1 github.com/gorilla/websocket v1.5.0 github.com/gtank/merlin v0.1.1 @@ -25,43 +27,48 @@ require ( github.com/ory/dockertest v3.3.5+incompatible github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.12.2 - github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 github.com/rs/cors v1.8.2 - github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa + github.com/sasha-s/go-deadlock v0.3.1 github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa - github.com/spf13/cobra v1.5.0 - github.com/spf13/viper v1.12.0 + github.com/spf13/cobra v1.6.0 + github.com/spf13/viper v1.13.0 github.com/stretchr/testify v1.8.0 + github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 +) + +require ( + github.com/google/uuid v1.3.0 github.com/tendermint/tm-db v0.6.6 - golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e - golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 - google.golang.org/grpc v1.48.0 + golang.org/x/crypto v0.1.0 + golang.org/x/net v0.1.0 + google.golang.org/grpc v1.50.1 ) +require github.com/vektra/mockery/v2 v2.14.0 + require ( - github.com/bufbuild/buf v1.4.0 - github.com/creachadair/taskgroup v0.3.2 - github.com/golangci/golangci-lint v1.47.2 - github.com/prometheus/common v0.34.0 // indirect - github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca - github.com/vektra/mockery/v2 v2.14.0 - google.golang.org/protobuf v1.28.0 + github.com/gogo/protobuf v1.3.2 + github.com/informalsystems/tm-load-test v1.0.0 + gonum.org/v1/gonum v0.12.0 + google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 ) require ( 4d63.com/gochecknoglobals v0.1.0 // indirect + github.com/Abirdcfly/dupword v0.0.7 // indirect github.com/Antonboom/errname v0.1.7 // indirect github.com/Antonboom/nilnil v0.1.1 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/DataDog/zstd v1.4.1 // indirect github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect - github.com/GaijinEntertainment/go-exhaustruct/v2 v2.2.0 // indirect + github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 // indirect github.com/Masterminds/semver v1.5.0 // indirect - github.com/Microsoft/go-winio v0.5.0 // indirect + github.com/Microsoft/go-winio v0.6.0 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect - github.com/OpenPeeDeeP/depguard v1.1.0 // indirect + github.com/OpenPeeDeeP/depguard v1.1.1 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect - github.com/alingse/asasalint v0.0.10 // indirect + github.com/alingse/asasalint v0.0.11 // indirect github.com/ashanbrown/forbidigo v1.3.0 // indirect github.com/ashanbrown/makezero v1.1.1 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -70,38 +77,46 @@ require ( github.com/bombsimon/wsl/v3 v3.3.0 // indirect github.com/breml/bidichk v0.2.3 // indirect github.com/breml/errchkjson v0.3.0 // indirect + github.com/bufbuild/connect-go v1.0.0 // indirect + github.com/bufbuild/protocompile v0.1.0 // indirect github.com/butuzov/ireturn v0.1.1 // indirect github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/charithe/durationcheck v0.0.9 // indirect - github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4 // indirect - github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6 // indirect + github.com/chavacava/garif v0.0.0-20220630083739-93517212f375 // indirect + github.com/containerd/containerd v1.6.8 // indirect + github.com/containerd/continuity v0.3.0 // indirect + github.com/containerd/typeurl v1.0.2 // indirect github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/daixiang0/gci v0.4.3 // indirect + github.com/creachadair/taskgroup v0.3.2 + github.com/curioswitch/go-reassign v0.2.0 // indirect + github.com/daixiang0/gci v0.8.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/denis-tingaikin/go-header v0.4.3 // indirect github.com/dgraph-io/badger/v2 v2.2007.2 // indirect github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de // indirect github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect + github.com/docker/distribution v2.8.1+incompatible // indirect + github.com/docker/docker v20.10.19+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect - github.com/docker/go-units v0.4.0 // indirect + github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/esimonov/ifshort v1.0.4 // indirect github.com/ettle/strcase v0.1.1 // indirect - github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c // indirect - github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect - github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect github.com/fatih/color v1.13.0 // indirect github.com/fatih/structtag v1.2.0 // indirect github.com/firefart/nonamedreturns v1.0.4 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/fzipp/gocyclo v0.6.0 // indirect - github.com/go-critic/go-critic v0.6.3 // indirect + github.com/go-chi/chi/v5 v5.0.7 // indirect + github.com/go-critic/go-critic v0.6.5 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-toolsmith/astcast v1.0.0 // indirect - github.com/go-toolsmith/astcopy v1.0.0 // indirect - github.com/go-toolsmith/astequal v1.0.1 // indirect + github.com/go-toolsmith/astcopy v1.0.2 // indirect + github.com/go-toolsmith/astequal v1.0.3 // indirect github.com/go-toolsmith/astfmt v1.0.0 // indirect github.com/go-toolsmith/astp v1.0.0 // indirect github.com/go-toolsmith/strparse v1.0.0 // indirect @@ -109,43 +124,42 @@ require ( github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gofrs/flock v0.8.1 // indirect - github.com/gofrs/uuid v4.2.0+incompatible // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/snappy v0.0.3 // indirect + github.com/golang/snappy v0.0.4 // indirect github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect - github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a // indirect + github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 // indirect github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect github.com/golangci/misspell v0.3.5 // indirect - github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2 // indirect + github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 // indirect github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect github.com/google/btree v1.0.0 // indirect - github.com/google/go-cmp v0.5.8 // indirect + github.com/google/go-cmp v0.5.9 // indirect github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 // indirect github.com/gostaticanalysis/analysisutil v0.7.1 // indirect github.com/gostaticanalysis/comment v1.4.2 // indirect github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect github.com/gostaticanalysis/nilerr v0.1.1 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/gtank/ristretto255 v0.1.2 // indirect - github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect - github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a // indirect github.com/jgautheron/goconst v1.5.1 // indirect - github.com/jhump/protocompile v0.0.0-20220216033700-d705409f108f // indirect - github.com/jhump/protoreflect v1.12.1-0.20220417024638-438db461d753 // indirect github.com/jingyugao/rowserrcheck v1.1.1 // indirect github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect github.com/jmhodges/levigo v1.0.0 // indirect github.com/julz/importas v0.1.0 // indirect - github.com/kisielk/errcheck v1.6.1 // indirect + github.com/kisielk/errcheck v1.6.2 // indirect github.com/kisielk/gotool v1.0.0 // indirect - github.com/klauspost/compress v1.15.6 // indirect + github.com/kkHAIKE/contextcheck v1.1.3 // indirect + github.com/klauspost/compress v1.15.11 // indirect github.com/klauspost/pgzip v1.2.5 // indirect github.com/kulti/thelper v0.6.3 // indirect github.com/kunwardeep/paralleltest v1.0.6 // indirect @@ -155,50 +169,58 @@ require ( github.com/leonklingele/grouper v1.1.0 // indirect github.com/lufeee/execinquery v1.2.1 // indirect github.com/magiconair/properties v1.8.6 // indirect + github.com/maratori/testableexamples v1.0.0 // indirect github.com/maratori/testpackage v1.1.0 // indirect github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect - github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/mbilski/exhaustivestruct v1.2.0 // indirect - github.com/mgechev/revive v1.2.1 // indirect - github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643 // indirect + github.com/mgechev/revive v1.2.4 // indirect + github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/moby/buildkit v0.10.4 // indirect + github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect github.com/moricho/tparallel v0.2.1 // indirect + github.com/morikuni/aec v1.0.0 // indirect github.com/nakabonne/nestif v0.3.1 // indirect github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect - github.com/nishanths/exhaustive v0.8.1 // indirect + github.com/nishanths/exhaustive v0.8.3 // indirect github.com/nishanths/predeclared v0.2.2 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/opencontainers/go-digest v1.0.0-rc1 // indirect - github.com/opencontainers/image-spec v1.0.1 // indirect - github.com/opencontainers/runc v1.0.2 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc2 // indirect + github.com/opencontainers/runc v1.1.3 // indirect github.com/pelletier/go-toml v1.9.5 // indirect - github.com/pelletier/go-toml/v2 v2.0.2 // indirect + github.com/pelletier/go-toml/v2 v2.0.5 // indirect github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pkg/profile v1.6.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/polyfloyd/go-errorlint v1.0.0 // indirect + github.com/polyfloyd/go-errorlint v1.0.5 // indirect github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/procfs v0.7.3 // indirect - github.com/quasilyte/go-ruleguard v0.3.16-0.20220213074421-6aa060fab41a // indirect - github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5 // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.8.0 // indirect + github.com/quasilyte/go-ruleguard v0.3.18 // indirect + github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f // indirect github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect github.com/rs/zerolog v1.27.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/ryancurrah/gomodguard v1.2.3 // indirect + github.com/ryancurrah/gomodguard v1.2.4 // indirect github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect - github.com/securego/gosec/v2 v2.12.0 // indirect + github.com/sashamelentyev/interfacebloat v1.1.0 // indirect + github.com/sashamelentyev/usestdlibvars v1.20.0 // indirect + github.com/satori/go.uuid v1.2.0 // indirect + github.com/securego/gosec/v2 v2.13.1 // indirect github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect - github.com/sirupsen/logrus v1.8.1 // indirect + github.com/sirupsen/logrus v1.9.0 // indirect github.com/sivchari/containedctx v1.0.2 // indirect - github.com/sivchari/nosnakecase v1.5.0 // indirect + github.com/sivchari/nosnakecase v1.7.0 // indirect github.com/sivchari/tenv v1.7.0 // indirect github.com/sonatard/noctx v0.0.1 // indirect github.com/sourcegraph/go-diff v0.6.1 // indirect @@ -209,39 +231,43 @@ require ( github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect github.com/stretchr/objx v0.4.0 // indirect - github.com/subosito/gotenv v1.4.0 // indirect - github.com/sylvia7788/contextcheck v1.0.4 // indirect + github.com/subosito/gotenv v1.4.1 // indirect github.com/tdakkota/asciicheck v0.1.1 // indirect github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect github.com/tetafro/godot v1.4.11 // indirect github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 // indirect - github.com/tomarrell/wrapcheck/v2 v2.6.2 // indirect - github.com/tommy-muehle/go-mnd/v2 v2.5.0 // indirect + github.com/timonwong/loggercheck v0.9.3 // indirect + github.com/tomarrell/wrapcheck/v2 v2.7.0 // indirect + github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect github.com/ultraware/funlen v0.0.3 // indirect github.com/ultraware/whitespace v0.0.5 // indirect github.com/uudashr/gocognit v1.0.6 // indirect github.com/yagipy/maintidx v1.0.0 // indirect github.com/yeya24/promlinter v0.2.0 // indirect - gitlab.com/bosi/decorder v0.2.2 // indirect + gitlab.com/bosi/decorder v0.2.3 // indirect go.etcd.io/bbolt v1.3.6 // indirect go.opencensus.io v0.23.0 // indirect - go.uber.org/atomic v1.9.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3 // indirect + go.opentelemetry.io/otel v1.11.0 // indirect + go.opentelemetry.io/otel/metric v0.32.3 // indirect + go.opentelemetry.io/otel/trace v1.11.0 // indirect + go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.8.0 // indirect - go.uber.org/zap v1.21.0 // indirect - golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d // indirect - golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect - golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f // indirect - golang.org/x/sys v0.0.0-20220702020025-31831981b65f // indirect - golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect - golang.org/x/text v0.3.7 // indirect - golang.org/x/tools v0.1.12-0.20220628192153-7743d1d949f1 // indirect - google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad // indirect - gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect - gopkg.in/ini.v1 v1.66.6 // indirect + go.uber.org/zap v1.23.0 // indirect + golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect + golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 // indirect + golang.org/x/mod v0.6.0 // indirect + golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 // indirect + golang.org/x/sys v0.1.0 // indirect + golang.org/x/term v0.1.0 // indirect + golang.org/x/text v0.4.0 // indirect + golang.org/x/tools v0.2.0 // indirect + google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - honnef.co/go/tools v0.3.2 // indirect - mvdan.cc/gofumpt v0.3.1 // indirect + honnef.co/go/tools v0.3.3 // indirect + mvdan.cc/gofumpt v0.4.0 // indirect mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 // indirect diff --git a/go.sum b/go.sum index f3b821ac2a..cfcf0c706f 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,6 @@ 4d63.com/gochecknoglobals v0.1.0 h1:zeZSRqj5yCg28tCkIV/z/lWbwvNm5qnKVS15PI8nhD0= 4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= -bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -15,7 +15,6 @@ cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6 cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= @@ -24,44 +23,30 @@ cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPT cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.6.1 h1:2sMmt8prCn7DPaG4Pmh0N3Inmc8cT8ae5k1M6VJ9Wqc= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w= -cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Abirdcfly/dupword v0.0.7 h1:z14n0yytA3wNO2gpCD/jVtp/acEXPGmYu0esewpBt6Q= +github.com/Abirdcfly/dupword v0.0.7/go.mod h1:K/4M1kj+Zh39d2aotRwypvasonOyAMH1c/IZJzE0dmk= github.com/Antonboom/errname v0.1.7 h1:mBBDKvEYwPl4WFFNwec1CZO096G6vzK9vvDQzAwkako= github.com/Antonboom/errname v0.1.7/go.mod h1:g0ONh16msHIPgJSGsecu1G/dcF2hlYR/0SddnIAGavU= github.com/Antonboom/nilnil v0.1.1 h1:PHhrh5ANKFWRBh7TdYmyyq2gyT2lotnvFvvFbylF81Q= @@ -70,46 +55,44 @@ github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0= -github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d h1:nalkkPQcITbvhmL4+C4cKA87NW0tfm3Kl9VXRoPywFg= github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d/go.mod h1:URdX5+vg25ts3aCh8H5IFZybJYKWhJHYMTnf+ULtoC4= +github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/GaijinEntertainment/go-exhaustruct/v2 v2.2.0 h1:V9xVvhKbLt7unNEGAruK1xXglyc668Pq3Xx0MNTNqpo= -github.com/GaijinEntertainment/go-exhaustruct/v2 v2.2.0/go.mod h1:n/vLeA7V+QY84iYAGwMkkUUp9ooeuftMEvaDrSVch+Q= +github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 h1:+r1rSv4gvYn0wmRjC8X7IAzX8QezqtFV9m0MUHFJgts= +github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0/go.mod h1:b3g59n2Y+T5xmcxJL+UEG2f8cQploZm1mR/v6BW0mU0= github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= +github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OpenPeeDeeP/depguard v1.1.0 h1:pjK9nLPS1FwQYGGpPxoMYpe7qACHOhAWQMQzV71i49o= -github.com/OpenPeeDeeP/depguard v1.1.0/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc= +github.com/OpenPeeDeeP/depguard v1.1.1 h1:TSUznLjvp/4IUP+OQ0t/4jF4QUyxIcVX8YnghZdunyA= +github.com/OpenPeeDeeP/depguard v1.1.1/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/Workiva/go-datastructures v1.0.52 h1:PLSK6pwn8mYdaoaCZEMsXBpBotr4HHn9abU0yMQt0NI= github.com/Workiva/go-datastructures v1.0.52/go.mod h1:Z+F2Rca0qCsVYDS8z7bAGm8f3UkzuWYS/oBZz5a7VVA= -github.com/adlio/schema v1.1.13 h1:LeNMVg5Z1FX+Qgz8tJUijBLRdcpbFUElz+d1489On98= +github.com/Workiva/go-datastructures v1.0.53 h1:J6Y/52yX10Xc5JjXmGtWoSSxs3mZnGSaq37xZZh7Yig= +github.com/Workiva/go-datastructures v1.0.53/go.mod h1:1yZL+zfsztete+ePzZz/Zb1/t5BnDuE2Ya2MMGhzP6A= github.com/adlio/schema v1.1.13/go.mod h1:L5Z7tw+7lRK1Fnpi/LT/ooCP1elkXn0krMWBQHUhEDE= +github.com/adlio/schema v1.3.3 h1:oBJn8I02PyTB466pZO1UZEn1TV5XLlifBSyMrmHl/1I= +github.com/adlio/schema v1.3.3/go.mod h1:1EsRssiv9/Ce2CMzq5DoL7RiMshhuigQxrR4DMV9fHg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= @@ -120,40 +103,39 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= -github.com/alingse/asasalint v0.0.10 h1:qqGPDTV0ff0tWHN/nnIlSdjlU/EwRPaUY4SfpE1rnms= -github.com/alingse/asasalint v0.0.10/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= -github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= -github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= +github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/ashanbrown/forbidigo v1.3.0 h1:VkYIwb/xxdireGAdJNZoo24O4lmnEWkactplBlWTShc= github.com/ashanbrown/forbidigo v1.3.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= -github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A= github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= @@ -165,12 +147,16 @@ github.com/breml/bidichk v0.2.3/go.mod h1:8u2C6DnAy0g2cEq+k/A2+tr9O1s+vHGxWn0LTc github.com/breml/errchkjson v0.3.0 h1:YdDqhfqMT+I1vIxPSas44P+9Z9HzJwCeAzjB8PxP1xw= github.com/breml/errchkjson v0.3.0/go.mod h1:9Cogkyv9gcT8HREpzi3TiqBxCqDzo8awa92zSDFcofU= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= -github.com/btcsuite/btcd v0.21.0-beta h1:At9hIZdJW0s9E/fAz28nrz6AmcNlSVucCH796ZteX1M= github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= +github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA= +github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c= +github.com/btcsuite/btcd v0.22.1/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/btcutil v1.0.2 h1:9iZ1Terx9fMIOtq1VrwdqfsATL9MC2l8ZrUY6YZ2uts= github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= +github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce h1:YtWJF7RHm2pYCvA5t0RPmAaLUhREsKuKd+SLhxFbFeQ= +github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= @@ -178,20 +164,24 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/bufbuild/buf v1.4.0 h1:GqE3a8CMmcFvWPzuY3Mahf9Kf3S9XgZ/ORpfYFzO+90= -github.com/bufbuild/buf v1.4.0/go.mod h1:mwHG7klTHnX+rM/ym8LXGl7vYpVmnwT96xWoRB4H5QI= +github.com/bufbuild/buf v1.9.0 h1:8a60qapVuRj6crerWR0rny4UUV/MhZSL5gagJuBxmx8= +github.com/bufbuild/buf v1.9.0/go.mod h1:1Q+rMHiMVcfgScEF/GOldxmu4o9TrQ2sQQh58K6MscE= +github.com/bufbuild/connect-go v1.0.0 h1:htSflKUT8y1jxhoPhPYTZMrsY3ipUXjjrbcZR5O2cVo= +github.com/bufbuild/connect-go v1.0.0/go.mod h1:9iNvh/NOsfhNBUH5CtvXeVUskQO1xsrEviH7ZArwZ3I= +github.com/bufbuild/protocompile v0.1.0 h1:HjgJBI85hY/qmW5tw/66sNDZ7z0UDdVSi/5r40WHw4s= +github.com/bufbuild/protocompile v0.1.0/go.mod h1:ix/MMMdsT3fzxfw91dvbfzKW3fRRnuPCP47kpAm5m/4= github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY= github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 h1:CJdIpo8n5MFP2MwK0gSRcOVlDlFdQJO1p+FqdxYzmvc= -github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4/go.mod h1:fzuHnhzj1pUygGz+1ZkB3uQbEUL4htqCGJ4Qs2LwMZA= github.com/celestiaorg/nmt v0.11.0 h1:iqTaNwnVzM3njBmPklpHzb3A4Xy/JKahoclRPbAzxNc= github.com/celestiaorg/nmt v0.11.0/go.mod h1:NN3W8EEoospv8EHCw50DDNWwPLpJkFHoEFiqCEcNCH4= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -199,16 +189,19 @@ github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cb github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charithe/durationcheck v0.0.9 h1:mPP4ucLrf/rKZiIG/a9IPXHGlh8p4CzgpyTy6EEutYk= github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= -github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4 h1:tFXjAxje9thrTF4h57Ckik+scJjTWdwAtZqZPtOT48M= -github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4/go.mod h1:W8EnPSQ8Nv4fUjc/v1/8tHFqhuOJXnRub0dTfuAQktU= +github.com/chavacava/garif v0.0.0-20220630083739-93517212f375 h1:E7LT642ysztPWE0dfz43cWOvMiF42DyTRC+eZIaO4yI= +github.com/chavacava/garif v0.0.0-20220630083739-93517212f375/go.mod h1:4m1Rv7xfuwWPNKXlThldNuJvutYM6J95wNuuVmn55To= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -217,40 +210,51 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= -github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6 h1:NmTXa/uVnDyp0TY5MKi197+3HWcnYWfnHGyaFthlnGw= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/containerd/containerd v1.6.8 h1:h4dOFDwzHmqFEP754PgfgTeVXFnLiRc6kiqC7tplDJs= +github.com/containerd/containerd v1.6.8/go.mod h1:By6p5KqPK0/7/CgO/A6t/Gz+CUYUu2zf1hUaaymVXB0= github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= +github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d h1:49RLWk1j44Xu4fjHb6JFYmeUnDORVwHNkDxaQ0ctCVU= github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y= -github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6VqkYlkM= github.com/creachadair/taskgroup v0.3.2/go.mod h1:wieWwecHVzsidg2CsUnFinW1faVN4+kq+TDlRJQ0Wbk= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cristalhq/acmd v0.8.1/go.mod h1:LG5oa43pE/BbxtfMoImHCQN++0Su7dzipdgBjMCBVDQ= +github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= +github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/daixiang0/gci v0.4.3 h1:wf7x0xRjQqTlA2dzHTI0A/xPyp7VcBatBG9nwGatwbQ= -github.com/daixiang0/gci v0.4.3/go.mod h1:EpVfrztufwVgQRXjnX4zuNinEpLj5OmMjtu/+MB0V0c= -github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/daixiang0/gci v0.8.1 h1:T4xpSC+hmsi4CSyuYfIJdMZAr9o7xZmHpQVygMghGZ4= +github.com/daixiang0/gci v0.8.1/go.mod h1:EpVfrztufwVgQRXjnX4zuNinEpLj5OmMjtu/+MB0V0c= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -258,6 +262,7 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU= github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c= +github.com/denisenkom/go-mssqldb v0.12.0 h1:VtrkII767ttSPNRfFekePK3sctr+joXgO58stqQbtUA= github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k= github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA= @@ -265,10 +270,17 @@ github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KP github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M= +github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= +github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v20.10.19+incompatible h1:lzEmjivyNHFHMNAFLXORMBXyGIhw/UP4DvJwvyKYq64= +github.com/docker/docker v20.10.19+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -276,6 +288,7 @@ github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5m github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -284,24 +297,19 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA= github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= -github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= -github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= +github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ= +github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= -github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= -github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= +github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 h1:E2s37DuLxFhQDg5gKsWoLBOB0n+ZW8s599zru8FJ2/Y= +github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= @@ -312,28 +320,28 @@ github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIg github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= -github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-critic/go-critic v0.6.3 h1:abibh5XYBTASawfTQ0rA7dVtQT+6KzpGqb/J+DxRDaw= -github.com/go-critic/go-critic v0.6.3/go.mod h1:c6b3ZP1MQ7o6lPR7Rv3lEf7pYQUmAcx8ABHgdZCQt/k= +github.com/go-chi/chi/v5 v5.0.7 h1:rDTPXLDHGATaeHvVlLcR4Qe0zftYethFucbjVQ1PxU8= +github.com/go-chi/chi/v5 v5.0.7/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/go-critic/go-critic v0.6.5 h1:fDaR/5GWURljXwF8Eh31T2GZNz9X4jeboS912mWF8Uo= +github.com/go-critic/go-critic v0.6.5/go.mod h1:ezfP/Lh7MA6dBNn4c6ab5ALv3sKnZVLx37tr00uuaOY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -345,19 +353,23 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-toolsmith/astcast v1.0.0 h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR6jE7g= github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= -github.com/go-toolsmith/astcopy v1.0.0 h1:OMgl1b1MEpjFQ1m5ztEO06rz5CUd3oBv9RF7+DyvdG8= -github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= +github.com/go-toolsmith/astcopy v1.0.2 h1:YnWf5Rnh1hUudj11kei53kI57quN/VH6Hp1n+erozn0= +github.com/go-toolsmith/astcopy v1.0.2/go.mod h1:4TcEdbElGc9twQEYpVo/aieIXfHhiuLh4aLAck6dO7Y= github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astequal v1.0.1 h1:JbSszi42Jiqu36Gnf363HWS9MTEAz67vTQLponh3Moc= -github.com/go-toolsmith/astequal v1.0.1/go.mod h1:4oGA3EZXTVItV/ipGiOx7NWkY5veFfcsOJVS2YxltLw= +github.com/go-toolsmith/astequal v1.0.2/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= +github.com/go-toolsmith/astequal v1.0.3 h1:+LVdyRatFS+XO78SGV4I3TCEA0AC7fKEGma+fH+674o= +github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= github.com/go-toolsmith/astfmt v1.0.0 h1:A0vDDXt+vsvLEdbMFJAUBI/uTbRw1ffOPnxsILnFL6k= github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= github.com/go-toolsmith/astp v1.0.0 h1:alXE75TXgcmupDsMK1fRAy0YUzLzqPVvBKoyWV+KPXg= @@ -374,20 +386,24 @@ github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= -github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.3.0+incompatible h1:CaSVZxm5B+7o45rtab4jC2G37WGYX1zQfuU2i6DSvnc= +github.com/gofrs/uuid v4.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= +github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188 h1:+eHOFJl1BaXrQxKX+T06f78590z4qA2ZzBTqahsKSE4= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -401,8 +417,6 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -423,33 +437,32 @@ github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6J5HIP8ZtyMdiDscjMLfRBSPuzVVeo= github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ= -github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= -github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.47.2 h1:qvMDVv49Hrx3PSEXZ0bD/yhwSbhsOihQjFYCKieegIw= -github.com/golangci/golangci-lint v1.47.2/go.mod h1:lpS2pjBZtRyXewUcOY7yUL3K4KfpoWz072yRN8AuhHg= +github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 h1:amWTbTGqOZ71ruzrdA+Nx5WA3tV1N0goTspwmKCQvBY= +github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs= +github.com/golangci/golangci-lint v1.50.1 h1:C829clMcZXEORakZlwpk7M4iDw2XiwxxKaG504SZ9zY= +github.com/golangci/golangci-lint v1.50.1/go.mod h1:AQjHBopYS//oB8xs0y0M/dtxdKHkdhl0RvmjUct0/4w= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= github.com/golangci/misspell v0.3.5 h1:pLzmVdl3VxTOncgzHcvLOKirdvcx/TydsClUQXTehjo= github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2 h1:SgM7GDZTxtTTQPU84heOxy34iG5Du7F2jcoZnvp+fXI= -github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= +github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 h1:DIPQnGy2Gv2FSA4B/hh8Q7xx3B7AIDk3DAMeHclH1vQ= +github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= -github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -462,16 +475,14 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -480,49 +491,35 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= -github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gookit/color v1.5.1/go.mod h1:wZFzea4X8qN6vHOSP2apMb4/+w/orMznEzYsIHPaqKM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 h1:PVRE9d4AQKmbelZ7emNig1+NT27DUmKZn5qXxfio54U= github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= -github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= -github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= @@ -535,34 +532,35 @@ github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3 github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= -github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= -github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc= github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= -github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= -github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= @@ -571,56 +569,51 @@ github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= -github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= -github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= -github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= -github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/hudl/fargo v1.4.0/go.mod h1:9Ai6uvFy5fQNq6VPKtg+Ceq1+eTY4nKUlR2JElEOcDo= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= +github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/informalsystems/tm-load-test v1.0.0 h1:e1IeUw8701HWCMuOM1vLM/XcpH2Lrb88GNWdFAPDmmA= +github.com/informalsystems/tm-load-test v1.0.0/go.mod h1:WVaSKaQdfZK3v0C74EMzn7//+3aeCZF8wkIKBz2/M74= github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a h1:d4+I1YEKVmWZrgkt6jpXBnLgV2ZjO0YxEtLDdfIZfH4= github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a/go.mod h1:Zi/ZFkEqFHTm7qkjyNJjaWH4LQA9LQhGJyF0lTYGpxw= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= -github.com/jhump/gopoet v0.0.0-20190322174617-17282ff210b3/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= -github.com/jhump/gopoet v0.1.0/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= -github.com/jhump/goprotoc v0.5.0/go.mod h1:VrbvcYrQOrTi3i0Vf+m+oqQWk9l72mjkJCYo7UvLHRQ= -github.com/jhump/protocompile v0.0.0-20220216033700-d705409f108f h1:BNuUg9k2EiJmlMwjoef3e8vZLHplbVw6DrjGFjLL+Yo= -github.com/jhump/protocompile v0.0.0-20220216033700-d705409f108f/go.mod h1:qr2b5kx4HbFS7/g4uYO5qv9ei8303JMsC7ESbYiqr2Q= -github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= -github.com/jhump/protoreflect v1.11.0/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E= -github.com/jhump/protoreflect v1.12.1-0.20220417024638-438db461d753 h1:uFlcJKZPLQd7rmOY/RrvBuUaYmAFnlFHKLivhO6cOy8= -github.com/jhump/protoreflect v1.12.1-0.20220417024638-438db461d753/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= +github.com/jhump/protoreflect v1.13.1-0.20220928232736-101791cb1b4c h1:XImQJfpJLmGEEd8ll5yPVyL/aEvmgGHW4WYTyNseLOM= github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= @@ -632,12 +625,11 @@ github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= -github.com/josharian/txtarfs v0.0.0-20210218200122-0702f000015a/go.mod h1:izVPOvVRsHiKkeGCT6tYBNWyDVuzj9wAaBb5R9qamfw= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -645,31 +637,27 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY= github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/errcheck v1.6.1 h1:cErYo+J4SmEjdXZrVXGwLJCE2sB06s23LpkcyWNrT+s= -github.com/kisielk/errcheck v1.6.1/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw= +github.com/kisielk/errcheck v1.6.2 h1:uGQ9xI8/pgc9iOoCe7kWQgRE6SBTrCGmTSf0LrEtY7c= +github.com/kisielk/errcheck v1.6.2/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw= github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkHAIKE/contextcheck v1.1.3 h1:l4pNvrb8JSwRd51ojtcOxOeHJzHek+MtOyXbaR0uvmw= +github.com/kkHAIKE/contextcheck v1.1.3/go.mod h1:PG/cwd6c0705/LM0KTr1acO2gORUxkSVWyLJOFW5qoo= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.6 h1:6D9PcO8QWu0JyaQ2zUMmu16T1T+zjjEpP91guRsvDfY= -github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= +github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -677,7 +665,6 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -686,7 +673,6 @@ github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= github.com/kunwardeep/paralleltest v1.0.6 h1:FCKYMF1OF2+RveWlABsdnmsvJrei5aoyZoaGS+Ugg8g= github.com/kunwardeep/paralleltest v1.0.6/go.mod h1:Y0Y0XISdZM5IKm3TREQMZ6iteqn1YuwCsJO/0kL9Zes= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kyoh86/exportloopref v0.1.8 h1:5Ry/at+eFdkX9Vsdw3qU4YkvGtzuVfzT4X7S77LoN/M= github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA= @@ -695,23 +681,25 @@ github.com/ldez/tagliatelle v0.3.1 h1:3BqVVlReVUZwafJUwQ+oxbx2BEX2vUG4Yu/NOfMiKi github.com/ldez/tagliatelle v0.3.1/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= github.com/leonklingele/grouper v1.1.0 h1:tC2y/ygPbMFSBOs3DcyaEMKnnwH7eYKzohOtRrf0SAg= github.com/leonklingele/grouper v1.1.0/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= -github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI= +github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= github.com/maratori/testpackage v1.1.0 h1:GJY4wlzQhuBusMF1oahQCBtUV/AQ/k69IZ68vxaac2Q= github.com/maratori/testpackage v1.1.0/go.mod h1:PeAhzU8qkCwdGEMTEupsHJNlQu2gZopMC6RjbhmHeDc= github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 h1:pWxk9e//NbPwfxat7RXkts09K+dEBJWakUWwICVqYbA= @@ -723,157 +711,170 @@ github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/mattn/go-sqlite3 v1.14.9 h1:10HX2Td0ocZpYEjhilsuo6WWtUqttj2Kb0KtD86/KYA= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo= github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.2.1 h1:GjFml7ZsoR0IrQ2E2YIvWFNS5GPDV7xNwvA5GM1HZC4= -github.com/mgechev/revive v1.2.1/go.mod h1:+Ro3wqY4vakcYNtkBWdZC7dBg1xSB6sp054wWwmeFm0= +github.com/mgechev/revive v1.2.4 h1:+2Hd/S8oO2H0Ikq2+egtNwQsVhAeELHjxjIUFX5ajLI= +github.com/mgechev/revive v1.2.4/go.mod h1:iAWlQishqCuj4yhV24FTnKSXGpbAA+0SckXB8GQMX/Q= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= -github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643 h1:hLDRPB66XQT/8+wG9WsDpiCvZf1yKO7sz7scAjSlBa0= github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 h1:QRUSJEgZn2Snx0EmT/QLXibWjSUDjKWvXIT19NBVp94= +github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/buildkit v0.10.4 h1:FvC+buO8isGpUFZ1abdSLdGHZVqg9sqI4BbFL8tlzP4= +github.com/moby/buildkit v0.10.4/go.mod h1:Yajz9vt1Zw5q9Pp4pdb3TCSUXJBIroIQGQ3TTs/sLug= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae h1:O4SWKdcHVCvYqyDV+9CJA1fcDN2L11Bule0iFy3YlAI= +github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/moricho/tparallel v0.2.1 h1:95FytivzT6rYzdJLdtfn6m1bfFJylOJK41+lgv/EHf4= github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= -github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8= -github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= -github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= github.com/nats-io/jwt/v2 v2.0.3/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= github.com/nats-io/nats-server/v2 v2.5.0/go.mod h1:Kj86UtrXAL6LwYRA6H4RqzkHhK0Vcv2ZnKD5WbQ1t3g= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= github.com/nats-io/nats.go v1.12.1/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA= github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nishanths/exhaustive v0.8.1 h1:0QKNascWv9qIHY7zRoZSxeRr6kuk5aAT3YXLTiDmjTo= -github.com/nishanths/exhaustive v0.8.1/go.mod h1:qj+zJJUgJ76tR92+25+03oYUhzF4R7/2Wk7fGTfCHmg= -github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= +github.com/nishanths/exhaustive v0.8.3 h1:pw5O09vwg8ZaditDp/nQRqVnrMczSJDxRDJMowvhsrM= +github.com/nishanths/exhaustive v0.8.3/go.mod h1:qj+zJJUgJ76tR92+25+03oYUhzF4R7/2Wk7fGTfCHmg= github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/ulid/v2 v2.0.2/go.mod h1:mtBL0Qe/0HAx6/a4Z30qxVIAL1eQDweXq5lxOEiwQ68= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.2 h1:HFB2fbVIlhIfCfOW81bZFbiC/RvnpXSdhbF2/DJr134= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= -github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY= -github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.20.0 h1:8W0cWlwFkflGPLltQvLRB7ZVD5HuP6ng320w2IS245Q= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= +github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= +github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/ory/dockertest/v3 v3.9.1 h1:v4dkG+dlu76goxMiTT2j8zV7s4oPPEppKT8K8p2f1kY= github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= -github.com/pelletier/go-toml/v2 v2.0.2 h1:+jQXlF3scKIcSEKkdHzXhCTDLPFi5r1wnK6yPS+49Gw= -github.com/pelletier/go-toml/v2 v2.0.2/go.mod h1:MovirKjgVRESsAvNZlAjtFwV867yGuwRkXbG66OzopI= +github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= +github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= +github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -883,101 +884,110 @@ github.com/pkg/profile v1.6.0 h1:hUDfIISABYI59DyeB3OTay/HxSRwTQ8rB/H83k6r5dM= github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v1.0.0 h1:pDrQG0lrh68e602Wfp68BlUTRFoHn8PZYAjLgt2LFsM= -github.com/polyfloyd/go-errorlint v1.0.0/go.mod h1:KZy4xxPJyy88/gldCe5OdW6OQRtNO3EZE7hXzmnebgA= +github.com/polyfloyd/go-errorlint v1.0.5 h1:AHB5JRCjlmelh9RrLxT9sgzpalIwwq4hqE8EkwIwKdY= +github.com/polyfloyd/go-errorlint v1.0.5/go.mod h1:APVvOesVSAnne5SClsPxPdfvZTVDojXh1/G3qb5wjGI= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.34.0 h1:RBmGO9d/FVjqHT0yUGQwBJhkwKV+wPCn7KGpvfab0uE= -github.com/prometheus/common v0.34.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= -github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= -github.com/quasilyte/go-ruleguard v0.3.16-0.20220213074421-6aa060fab41a h1:sWFavxtIctGrVs5SYZ5Ml1CvrDAs8Kf5kx2PI3C41dA= -github.com/quasilyte/go-ruleguard v0.3.16-0.20220213074421-6aa060fab41a/go.mod h1:VMX+OnnSw4LicdiEGtRSD/1X8kW7GuEscjYNr4cOIT4= +github.com/quasilyte/go-ruleguard v0.3.18 h1:sd+abO1PEI9fkYennwzHn9kl3nqP6M5vE7FiOzZ+5CE= +github.com/quasilyte/go-ruleguard v0.3.18/go.mod h1:lOIzcYlgxrQ2sGJ735EHXmf/e9MJ516j16K/Ifcttvs= github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/dsl v0.3.16/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quasilyte/go-ruleguard/dsl v0.3.21/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= github.com/quasilyte/go-ruleguard/rules v0.0.0-20211022131956-028d6511ab71/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= -github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5 h1:PDWGei+Rf2bBiuZIbZmM20J2ftEy9IeUCHA8HbQqed8= -github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5/go.mod h1:wSEyW6O61xRV6zb6My3HxrQ5/8ke7NE2OayqCHa3xRM= +github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f h1:6Gtn2i04RD0gVyYf2/IUMTIs+qYleBt4zxDqkLTcu4U= +github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY= github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/remyoudompheng/go-dbus v0.0.0-20121104212943-b7232d34b1d5/go.mod h1:+u151txRmLpwxBmpYn9z3d1sdJdjRPQpsXuYeY9jNls= -github.com/remyoudompheng/go-liblzma v0.0.0-20190506200333-81bf2d431b96/go.mod h1:90HvCY7+oHHUKkbeMCiHt1WuFR2/hPJ9QrljDG+v6ls= -github.com/remyoudompheng/go-misc v0.0.0-20190427085024-2d6ac652a50e/go.mod h1:80FQABjoFzZ2M5uEa6FUaJYEmqU2UOKojlFVak1UAwI= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= -github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.27.0 h1:1T7qCieN22GVc8S4Q2yuexzBb1EqjbgjSH9RohbMjKs= github.com/rs/zerolog v1.27.0/go.mod h1:7frBqO0oezxmnO7GF86FY++uy8I0Tk/If5ni1G9Qc0U= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.2.3 h1:ww2fsjqocGCAFamzvv/b8IsRduuHHeK2MHTcTxZTQX8= -github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoLtnBZ7/TEhXAbg= +github.com/ryancurrah/gomodguard v1.2.4 h1:CpMSDKan0LtNGGhPrvupAoLeObRFjND8/tU1rEOtBp4= +github.com/ryancurrah/gomodguard v1.2.4/go.mod h1:+Kem4VjWwvFpUJRJSwa16s1tBJe+vbv02+naTow2f6M= github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw= github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= -github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA= github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= -github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa h1:0U2s5loxrTy6/VgfVoLuVLFJcURKLH49ie0zSch7gh4= github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= +github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= +github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= +github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= +github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= +github.com/sashamelentyev/usestdlibvars v1.20.0 h1:K6CXjqqtSYSsuyRDDC7Sjn6vTMLiSJa4ZmDkiokoqtw= +github.com/sashamelentyev/usestdlibvars v1.20.0/go.mod h1:0GaP+ecfZMXShS0A94CJn6aEuPRILv8h/VuWI9n1ygg= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/securego/gosec/v2 v2.12.0 h1:CQWdW7ATFpvLSohMVsajscfyHJ5rsGmEXmsNcsDNmAg= -github.com/securego/gosec/v2 v2.12.0/go.mod h1:iTpT+eKTw59bSgklBHlSnH5O2tNygHMDxfvMubA4i7I= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/securego/gosec/v2 v2.13.1 h1:7mU32qn2dyC81MH9L2kefnQyRMUarfDER3iQyMHcjYM= +github.com/securego/gosec/v2 v2.13.1/go.mod h1:EO1sImBMBWFjOTFzMWfTRrZW6M15gm60ljzrmy/wtHo= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shirou/gopsutil/v3 v3.22.6/go.mod h1:EdIubSnZhbAvBS1yJ7Xi+AShB/hxwLHOMz4MCYz7yMs= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -986,12 +996,13 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sivchari/containedctx v1.0.2 h1:0hLQKpgC53OVF1VT7CeoFHk9YKstur1XOgfYIc1yrHI= github.com/sivchari/containedctx v1.0.2/go.mod h1:PwZOeqm4/DLoJOqMSIJs3aKqXRX4YO+uXww087KZ7Bw= -github.com/sivchari/nosnakecase v1.5.0 h1:ZBvAu1H3uteN0KQ0IsLpIFOwYgPEhKLyv2ahrVkub6M= -github.com/sivchari/nosnakecase v1.5.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY= +github.com/sivchari/nosnakecase v1.7.0 h1:7QkpWIRMe8x25gckkFd2A5Pi6Ymo0qgr4JrhGt95do8= +github.com/sivchari/nosnakecase v1.7.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY= github.com/sivchari/tenv v1.7.0 h1:d4laZMBK6jpe5PWepxlV9S+LC0yXqvYHiq8E6ceoVVE= github.com/sivchari/tenv v1.7.0/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -1008,20 +1019,20 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= -github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= -github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= -github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI= +github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -1030,21 +1041,25 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= -github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= -github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/spf13/viper v1.13.0 h1:BWSJ/M+f+3nmdz9bxB+bWX28kkALN2ok11D0rSo8EJU= +github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -1053,23 +1068,21 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= -github.com/subosito/gotenv v1.4.0 h1:yAzM1+SmVcz5R4tXGsNMu1jUl2aOJXoiWUCEwwnGrvs= -github.com/subosito/gotenv v1.4.0/go.mod h1:mZd6rFysKEcUhUHXJk0C/08wAgyDBFuwEYL7vWWGaGo= -github.com/sylvia7788/contextcheck v1.0.4 h1:MsiVqROAdr0efZc/fOCt0c235qm9XJqHtWwM+2h2B04= -github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ= +github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= +github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca h1:Ld/zXl5t4+D69SiV4JoN7kkfvJdOWlPpfxrzxpLMoUk= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tdakkota/asciicheck v0.1.1 h1:PKzG7JUTUmVspQTDqtkX9eSiLGossXTybutHwTXuO0A= github.com/tdakkota/asciicheck v0.1.1/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= +github.com/tendermint/tendermint v0.34.14/go.mod h1:FrwVm3TvsVicI9Z7FlucHV6Znfd5KBc/Lpp69cCwtk0= +github.com/tendermint/tm-db v0.6.4/go.mod h1:dptYhIpJ2M5kUuenLr+Yyf3zQOv1SgBZcl8/BmWlMBw= github.com/tendermint/tm-db v0.6.6 h1:EzhaOfR0bdKyATqcd5PNeyeq8r+V4bRPHBfyFdD9kGM= github.com/tendermint/tm-db v0.6.6/go.mod h1:wP8d49A85B7/erz/r4YbKssKw6ylsO/hKtFk7E1aWZI= github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= @@ -1080,17 +1093,18 @@ github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw= github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 h1:kl4KhGNsJIbDHS9/4U9yQo1UcPQM0kOMJHn29EoH/Ro= github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= -github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= +github.com/timonwong/loggercheck v0.9.3 h1:ecACo9fNiHxX4/Bc02rW2+kaJIAMAes7qJ7JKxt0EZI= +github.com/timonwong/loggercheck v0.9.3/go.mod h1:wUqnk9yAOIKtGA39l1KLE9Iz0QiTocu/YZoOf+OzFdw= +github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck/v2 v2.6.2 h1:3dI6YNcrJTQ/CJQ6M/DUkc0gnqYSIk6o0rChn9E/D0M= -github.com/tomarrell/wrapcheck/v2 v2.6.2/go.mod h1:ao7l5p0aOlUNJKI0qVwB4Yjlqutd0IvAB9Rdwyilxvg= -github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= -github.com/tommy-muehle/go-mnd/v2 v2.5.0 h1:iAj0a8e6+dXSL7Liq0aXPox36FiN1dBbjA6lt9fl65s= -github.com/tommy-muehle/go-mnd/v2 v2.5.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= +github.com/tomarrell/wrapcheck/v2 v2.7.0 h1:J/F8DbSKJC83bAvC6FoZaRjZiZ/iKoueSdrEkmGeacA= +github.com/tomarrell/wrapcheck/v2 v2.7.0/go.mod h1:ao7l5p0aOlUNJKI0qVwB4Yjlqutd0IvAB9Rdwyilxvg= +github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= +github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= +github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA= github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= @@ -1100,54 +1114,40 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842Y= github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus= -github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8= -github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= github.com/vektra/mockery/v2 v2.14.0 h1:KZ1p5Hrn8tiY+LErRMr14HHle6khxo+JKOXLBW/yfqs= github.com/vektra/mockery/v2 v2.14.0/go.mod h1:bnD1T8tExSgPD1ripLkDbr60JA9VtQeu12P3wgLZd7M= -github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o= github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA= -github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= -github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= -github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -gitlab.com/NebulousLabs/errors v0.0.0-20171229012116-7ead97ef90b8/go.mod h1:ZkMZ0dpQyWwlENaeZVBiQRjhMEZvk6VTXquzl3FOFP8= -gitlab.com/NebulousLabs/fastrand v0.0.0-20181126182046-603482d69e40/go.mod h1:rOnSnoRyxMI3fe/7KIbVcsHRGxe30OONv8dEgo+vCfA= -gitlab.com/bosi/decorder v0.2.2 h1:LRfb3lP6mZWjUzpMOCLTVjcnl/SqZWBWmKNqQvMocQs= -gitlab.com/bosi/decorder v0.2.2/go.mod h1:9K1RB5+VPNQYtXtTDAzd2OEftsZb1oV0IrJrzChSdGE= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +gitlab.com/bosi/decorder v0.2.3 h1:gX4/RgK16ijY8V+BRQHAySfQAb354T7/xQpDB2n10P0= +gitlab.com/bosi/decorder v0.2.3/go.mod h1:9K1RB5+VPNQYtXtTDAzd2OEftsZb1oV0IrJrzChSdGE= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= -go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+DHwTGEbU= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= -go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -1156,19 +1156,26 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3 h1:syAz40OyelLZo42+3U68Phisvrx4qh+4wpdZw7eUUdY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3/go.mod h1:Dts42MGkzZne2yCru741+bFiTMWkIj/LLRizad7b9tw= +go.opentelemetry.io/otel v1.11.0 h1:kfToEGMDq6TrVrJ9Vht84Y8y9enykSZzDDZglV0kIEk= +go.opentelemetry.io/otel v1.11.0/go.mod h1:H2KtuEphyMvlhZ+F7tg9GRhAOe60moNx61Ex+WmiKkk= +go.opentelemetry.io/otel/metric v0.32.3 h1:dMpnJYk2KULXr0j8ph6N7+IcuiIQXlPXD4kix9t7L9c= +go.opentelemetry.io/otel/metric v0.32.3/go.mod h1:pgiGmKohxHyTPHGOff+vrtIH39/R9fiO/WoenUQ3kcc= +go.opentelemetry.io/otel/trace v1.11.0 h1:20U/Vj42SX+mASlXLmSGBg6jpI1jQtv682lZtTAOVFI= +go.opentelemetry.io/otel/trace v1.11.0/go.mod h1:nyYjis9jy0gytE9LXGU+/m1sHTKbRY0fX0hulNNDP1U= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= @@ -1178,37 +1185,33 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= -go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= +go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200109152110-61a87790db17/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1222,11 +1225,11 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5 h1:FR+oGxGfbQu1d+jglI3rCkjAjUnhRSZcUxr+DqlDLNo= -golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d h1:+W8Qf4iJtMGKkyAygcKohjxTk4JPsL9DpzApJ22m5Ic= -golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 h1:Ic/qN6TEifvObMGQy72k0n1LlJr7DjWWEi+MOsDOiSk= +golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1253,24 +1256,27 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1278,7 +1284,6 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1286,7 +1291,6 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -1305,25 +1309,16 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 h1:Yqz/iviulwKwAREEeUd3nbBFn0XuyJqkoft2IlrvOhc= -golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1335,20 +1330,13 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 h1:OSnWWcOd/CtWQC2cYSBgbTSJv3ciqd8r54ySIW2y3RE= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1356,9 +1344,9 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 h1:cu5kTvlzcw1Q5S9f5ip1/cpiB4nXvw1XYzFPGgzLUOY= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1366,6 +1354,7 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1382,7 +1371,6 @@ golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1392,6 +1380,7 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1404,7 +1393,6 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1418,9 +1406,9 @@ golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1437,49 +1425,34 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211031064116-611d5d643895/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220406163625-3f8b81556e12/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220702020025-31831981b65f h1:xdsejrW/0Wf2diT5CPp3XmKUNbr7Xvw8kYilQ+6qjRY= golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220411215600-e5f449aeb171/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM= -golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1488,22 +1461,23 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190228203856-589c23e65e65/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -1517,25 +1491,26 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1551,7 +1526,6 @@ golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWc golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -1560,9 +1534,6 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200630154851-b2d8b0336632/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -1573,8 +1544,8 @@ golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82u golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1588,33 +1559,29 @@ golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0t golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= -golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.9-0.20211228192929-ee1ca4ffc4da/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= -golang.org/x/tools v0.1.12-0.20220628192153-7743d1d949f1 h1:NHLFZ56qCjD+0hYY3kE5Wl40Z7q4Gn9Ln/7YU0lsGko= -golang.org/x/tools v0.1.12-0.20220628192153-7743d1d949f1/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o= +gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= @@ -1632,44 +1599,25 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -1690,8 +1638,6 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1707,60 +1653,25 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad h1:kqrS+lhvaMHCxul6sKQvKJ8nAAhlVItmZV822hYFH/U= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/genproto v0.0.0-20211101144312-62acf1d99145/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a h1:GH6UPn3ixhWcKDhpnEC55S75cerLPdpp3hrhfKYjZgw= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= @@ -1772,21 +1683,12 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= -google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1799,27 +1701,24 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 h1:KR8+MyP7/qOlV+8Af01LtjL04bu7on42eVsxT4EyBQk= +google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI= -gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -1830,18 +1729,19 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.6/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1849,10 +1749,10 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.3.2 h1:ytYb4rOqyp1TSa2EPvNVwtPQJctSELKaMyLfqNP4+34= -honnef.co/go/tools v0.3.2/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw= -mvdan.cc/gofumpt v0.3.1 h1:avhhrOmv0IuvQVK7fvwV91oFSGAk5/6Po8GXTzICeu8= -mvdan.cc/gofumpt v0.3.1/go.mod h1:w3ymliuxvzVx8DAutBnVyDqYb1Niy/yCJt/lk821YCE= +honnef.co/go/tools v0.3.3 h1:oDx7VAwstgpYpb3wv0oxiZlxY+foCpRAwY7Vk6XpAgA= +honnef.co/go/tools v0.3.3/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw= +mvdan.cc/gofumpt v0.4.0 h1:JVf4NN1mIpHogBj7ABpgOyZc65/UUOkKQFkoURsz4MM= +mvdan.cc/gofumpt v0.4.0/go.mod h1:PljLOHDeZqgS8opHRKLzp2It2VBuSdteAgqUfzMTxlQ= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= @@ -1865,3 +1765,4 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/libs/autofile/autofile_test.go b/libs/autofile/autofile_test.go index 3e3814d5e9..ff6056fc56 100644 --- a/libs/autofile/autofile_test.go +++ b/libs/autofile/autofile_test.go @@ -1,7 +1,6 @@ package autofile import ( - "io/ioutil" "os" "path/filepath" "syscall" @@ -24,7 +23,7 @@ func TestSIGHUP(t *testing.T) { }) // First, create a temporary directory and move into it - dir, err := ioutil.TempDir("", "sighup_test") + dir, err := os.MkdirTemp("", "sighup_test") require.NoError(t, err) t.Cleanup(func() { os.RemoveAll(dir) @@ -49,7 +48,7 @@ func TestSIGHUP(t *testing.T) { require.NoError(t, err) // Move into a different temporary directory - otherDir, err := ioutil.TempDir("", "sighup_test_other") + otherDir, err := os.MkdirTemp("", "sighup_test_other") require.NoError(t, err) defer os.RemoveAll(otherDir) err = os.Chdir(otherDir) @@ -79,7 +78,7 @@ func TestSIGHUP(t *testing.T) { } // The current directory should be empty - files, err := ioutil.ReadDir(".") + files, err := os.ReadDir(".") require.NoError(t, err) assert.Empty(t, files) } @@ -87,7 +86,7 @@ func TestSIGHUP(t *testing.T) { // // Manually modify file permissions, close, and reopen using autofile: // // We expect the file permissions to be changed back to the intended perms. // func TestOpenAutoFilePerms(t *testing.T) { -// file, err := ioutil.TempFile("", "permission_test") +// file, err := os.CreateTemp("", "permission_test") // require.NoError(t, err) // err = file.Close() // require.NoError(t, err) @@ -113,7 +112,7 @@ func TestSIGHUP(t *testing.T) { func TestAutoFileSize(t *testing.T) { // First, create an AutoFile writing to a tempfile dir - f, err := ioutil.TempFile("", "sighup_test") + f, err := os.CreateTemp("", "sighup_test") require.NoError(t, err) err = f.Close() require.NoError(t, err) diff --git a/libs/autofile/group_test.go b/libs/autofile/group_test.go index 0981923eb4..a57c59b606 100644 --- a/libs/autofile/group_test.go +++ b/libs/autofile/group_test.go @@ -2,7 +2,6 @@ package autofile import ( "io" - "io/ioutil" "os" "path/filepath" "testing" @@ -17,7 +16,7 @@ import ( func createTestGroupWithHeadSizeLimit(t *testing.T, headSizeLimit int64) *Group { testID := tmrand.Str(12) testDir := "_test_" + testID - err := tmos.EnsureDir(testDir, 0700) + err := tmos.EnsureDir(testDir, 0o700) require.NoError(t, err, "Error creating dir") headPath := testDir + "/myfile" @@ -122,7 +121,7 @@ func TestRotateFile(t *testing.T) { } }() - dir, err := ioutil.TempDir("", "rotate_test") + dir, err := os.MkdirTemp("", "rotate_test") require.NoError(t, err) defer os.RemoveAll(dir) err = os.Chdir(dir) @@ -151,21 +150,21 @@ func TestRotateFile(t *testing.T) { require.NoError(t, err) // Read g.Head.Path+"000" - body1, err := ioutil.ReadFile(g.Head.Path + ".000") + body1, err := os.ReadFile(g.Head.Path + ".000") assert.NoError(t, err, "Failed to read first rolled file") if string(body1) != "Line 1\nLine 2\nLine 3\n" { t.Errorf("got unexpected contents: [%v]", string(body1)) } // Read g.Head.Path - body2, err := ioutil.ReadFile(g.Head.Path) + body2, err := os.ReadFile(g.Head.Path) assert.NoError(t, err, "Failed to read first rolled file") if string(body2) != "Line 4\nLine 5\nLine 6\n" { t.Errorf("got unexpected contents: [%v]", string(body2)) } // Make sure there are no files in the current, temporary directory - files, err := ioutil.ReadDir(".") + files, err := os.ReadDir(".") require.NoError(t, err) assert.Empty(t, files) diff --git a/libs/cli/flags/log_level.go b/libs/cli/flags/log_level.go index d96ad3f47c..706305300e 100644 --- a/libs/cli/flags/log_level.go +++ b/libs/cli/flags/log_level.go @@ -17,7 +17,8 @@ const ( // all other modules). // // Example: -// ParseLogLevel("consensus:debug,mempool:debug,*:error", log.NewTMLogger(os.Stdout), "info") +// +// ParseLogLevel("consensus:debug,mempool:debug,*:error", log.NewTMLogger(os.Stdout), "info") func ParseLogLevel(lvl string, logger log.Logger, defaultLogLevelValue string) (log.Logger, error) { if lvl == "" { return nil, errors.New("empty log level") diff --git a/libs/cli/helper.go b/libs/cli/helper.go index 4b87bd60be..fb90bb0bb5 100644 --- a/libs/cli/helper.go +++ b/libs/cli/helper.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "os" "path/filepath" @@ -19,7 +18,7 @@ func WriteConfigVals(dir string, vals map[string]string) error { data += fmt.Sprintf("%s = \"%s\"\n", k, v) } cfile := filepath.Join(dir, "config.toml") - return ioutil.WriteFile(cfile, []byte(data), 0600) + return os.WriteFile(cfile, []byte(data), 0o600) } // RunWithArgs executes the given command with the specified command line args diff --git a/libs/cli/setup_test.go b/libs/cli/setup_test.go index 0cb3223446..87539278c6 100644 --- a/libs/cli/setup_test.go +++ b/libs/cli/setup_test.go @@ -2,7 +2,7 @@ package cli import ( "fmt" - "io/ioutil" + "os" "strconv" "strings" "testing" @@ -27,8 +27,11 @@ func TestSetupEnv(t *testing.T) { {nil, map[string]string{"DEMO_FOOBAR": "good"}, "good"}, {nil, map[string]string{"DEMOFOOBAR": "silly"}, "silly"}, // and that cli overrides env... - {[]string{"--foobar", "important"}, - map[string]string{"DEMO_FOOBAR": "ignored"}, "important"}, + { + []string{"--foobar", "important"}, + map[string]string{"DEMO_FOOBAR": "ignored"}, + "important", + }, } for idx, tc := range cases { @@ -55,7 +58,7 @@ func TestSetupEnv(t *testing.T) { } func tempDir() string { - cdir, err := ioutil.TempDir("", "test-cli") + cdir, err := os.MkdirTemp("", "test-cli") if err != nil { panic(err) } diff --git a/libs/clist/clist.go b/libs/clist/clist.go index 5579b1d0f2..2e4171b1c7 100644 --- a/libs/clist/clist.go +++ b/libs/clist/clist.go @@ -24,7 +24,6 @@ import ( const MaxLength = int(^uint(0) >> 1) /* - CElement is an element of a linked-list Traversal from a CElement is goroutine-safe. @@ -41,7 +40,6 @@ the for-loop. Use sync.Cond when you need serial access to the "condition". In our case our condition is if `next != nil || removed`, and there's no reason to serialize that condition for goroutines waiting on NextWait() (since it's just a read operation). - */ type CElement struct { mtx tmsync.RWMutex diff --git a/libs/clist/clist_test.go b/libs/clist/clist_test.go index d10a1e5ae9..7b10478fd9 100644 --- a/libs/clist/clist_test.go +++ b/libs/clist/clist_test.go @@ -68,6 +68,7 @@ func TestSmall(t *testing.T) { // This test is quite hacky because it relies on SetFinalizer // which isn't guaranteed to run at all. + //nolint:unused,deadcode func _TestGCFifo(t *testing.T) { if runtime.GOARCH != "amd64" { @@ -117,6 +118,7 @@ func _TestGCFifo(t *testing.T) { // This test is quite hacky because it relies on SetFinalizer // which isn't guaranteed to run at all. +// //nolint:unused,deadcode func _TestGCRandom(t *testing.T) { if runtime.GOARCH != "amd64" { diff --git a/libs/flowrate/flowrate.go b/libs/flowrate/flowrate.go index c7ba932821..fdc168d18d 100644 --- a/libs/flowrate/flowrate.go +++ b/libs/flowrate/flowrate.go @@ -39,10 +39,10 @@ type Monitor struct { // weight of each sample in the exponential moving average (EMA) calculation. // The exact formulas are: // -// sampleTime = currentTime - prevSampleTime -// sampleRate = byteCount / sampleTime -// weight = 1 - exp(-sampleTime/windowSize) -// newRate = weight*sampleRate + (1-weight)*oldRate +// sampleTime = currentTime - prevSampleTime +// sampleRate = byteCount / sampleTime +// weight = 1 - exp(-sampleTime/windowSize) +// newRate = weight*sampleRate + (1-weight)*oldRate // // The default values for sampleRate and windowSize (if <= 0) are 100ms and 1s, // respectively. diff --git a/libs/json/doc.go b/libs/json/doc.go index d5ef4047f3..1b92c0db62 100644 --- a/libs/json/doc.go +++ b/libs/json/doc.go @@ -13,12 +13,12 @@ // compatibility with e.g. Javascript (which uses 64-bit floats for numbers, having 53-bit // precision): // -// int32(32) // Output: 32 -// uint32(32) // Output: 32 -// int64(64) // Output: "64" -// uint64(64) // Output: "64" -// int(64) // Output: "64" -// uint(64) // Output: "64" +// int32(32) // Output: 32 +// uint32(32) // Output: 32 +// int64(64) // Output: "64" +// uint64(64) // Output: "64" +// int(64) // Output: "64" +// uint(64) // Output: "64" // // Encoding of other scalars follows encoding/json: // @@ -50,7 +50,7 @@ // Times are encoded as encoding/json, in RFC3339Nano format, but requiring UTC time zone (with zero // times emitted as "0001-01-01T00:00:00Z" as with encoding/json): // -// time.Date(2020, 6, 8, 16, 21, 28, 123, time.FixedZone("UTC+2", 2*60*60)) +// time.Date(2020, 6, 8, 16, 21, 28, 123, time.FixedZone("UTC+2", 2*60*60)) // // Output: "2020-06-08T14:21:28.000000123Z" // time.Time{} // Output: "0001-01-01T00:00:00Z" // (*time.Time)(nil) // Output: null @@ -95,5 +95,4 @@ // // Struct{Car: &Car{Wheels: 4}, Vehicle: &Car{Wheels: 4}} // // Output: {"Car": {"Wheels: 4"}, "Vehicle": {"type":"vehicle/car","value":{"Wheels":4}}} -// package json diff --git a/libs/log/filter.go b/libs/log/filter.go index e39a85dcbf..4b7ed981cd 100644 --- a/libs/log/filter.go +++ b/libs/log/filter.go @@ -69,18 +69,19 @@ func (l *filter) Error(msg string, keyvals ...interface{}) { // Allow*With methods, it is used as the logger's level. // // Examples: -// logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto")) -// logger.With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto" // -// logger = log.NewFilter(logger, log.AllowError(), -// log.AllowInfoWith("module", "crypto"), -// log.AllowNoneWith("user", "Sam")) -// logger.With("module", "crypto", "user", "Sam").Info("Hello") # returns nil +// logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto")) +// logger.With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto" // -// logger = log.NewFilter(logger, -// log.AllowError(), -// log.AllowInfoWith("module", "crypto"), log.AllowNoneWith("user", "Sam")) -// logger.With("user", "Sam").With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto user=Sam" +// logger = log.NewFilter(logger, log.AllowError(), +// log.AllowInfoWith("module", "crypto"), +// log.AllowNoneWith("user", "Sam")) +// logger.With("module", "crypto", "user", "Sam").Info("Hello") # returns nil +// +// logger = log.NewFilter(logger, +// log.AllowError(), +// log.AllowInfoWith("module", "crypto"), log.AllowNoneWith("user", "Sam")) +// logger.With("user", "Sam").With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto user=Sam" func (l *filter) With(keyvals ...interface{}) Logger { keyInAllowedKeyvals := false diff --git a/libs/log/logger.go b/libs/log/logger.go index 9b1a65d42a..34aca8af5d 100644 --- a/libs/log/logger.go +++ b/libs/log/logger.go @@ -22,9 +22,9 @@ type Logger interface { // // If w implements the following interface, so does the returned writer. // -// interface { -// Fd() uintptr -// } +// interface { +// Fd() uintptr +// } func NewSyncWriter(w io.Writer) io.Writer { return kitlog.NewSyncWriter(w) } diff --git a/libs/log/tm_logger_test.go b/libs/log/tm_logger_test.go index cbe29d9942..8427febf18 100644 --- a/libs/log/tm_logger_test.go +++ b/libs/log/tm_logger_test.go @@ -2,7 +2,7 @@ package log_test import ( "bytes" - "io/ioutil" + "io" "strings" "testing" @@ -90,11 +90,11 @@ func TestError(t *testing.T) { } func BenchmarkTMLoggerSimple(b *testing.B) { - benchmarkRunner(b, log.NewTMLogger(ioutil.Discard), baseInfoMessage) + benchmarkRunner(b, log.NewTMLogger(io.Discard), baseInfoMessage) } func BenchmarkTMLoggerContextual(b *testing.B) { - benchmarkRunner(b, log.NewTMLogger(ioutil.Discard), withInfoMessage) + benchmarkRunner(b, log.NewTMLogger(io.Discard), withInfoMessage) } func benchmarkRunner(b *testing.B, logger log.Logger, f func(log.Logger)) { diff --git a/libs/log/tmfmt_logger.go b/libs/log/tmfmt_logger.go index 391ae478a4..492c1c12ea 100644 --- a/libs/log/tmfmt_logger.go +++ b/libs/log/tmfmt_logger.go @@ -65,7 +65,7 @@ func (l tmfmtLogger) Log(keyvals ...interface{}) error { switch keyvals[i] { case kitlevel.Key(): excludeIndexes = append(excludeIndexes, i) - switch keyvals[i+1].(type) { // nolint:gocritic + switch keyvals[i+1].(type) { //nolint:gocritic case string: lvl = keyvals[i+1].(string) case kitlevel.Value: diff --git a/libs/log/tmfmt_logger_test.go b/libs/log/tmfmt_logger_test.go index 4b82455f13..9642d03646 100644 --- a/libs/log/tmfmt_logger_test.go +++ b/libs/log/tmfmt_logger_test.go @@ -3,7 +3,7 @@ package log_test import ( "bytes" "errors" - "io/ioutil" + "io" "math" "regexp" "testing" @@ -62,16 +62,16 @@ func TestTMFmtLogger(t *testing.T) { } func BenchmarkTMFmtLoggerSimple(b *testing.B) { - benchmarkRunnerKitlog(b, log.NewTMFmtLogger(ioutil.Discard), baseMessage) + benchmarkRunnerKitlog(b, log.NewTMFmtLogger(io.Discard), baseMessage) } func BenchmarkTMFmtLoggerContextual(b *testing.B) { - benchmarkRunnerKitlog(b, log.NewTMFmtLogger(ioutil.Discard), withMessage) + benchmarkRunnerKitlog(b, log.NewTMFmtLogger(io.Discard), withMessage) } func TestTMFmtLoggerConcurrency(t *testing.T) { t.Parallel() - testConcurrency(t, log.NewTMFmtLogger(ioutil.Discard), 10000) + testConcurrency(t, log.NewTMFmtLogger(io.Discard), 10000) } func benchmarkRunnerKitlog(b *testing.B, logger kitlog.Logger, f func(kitlog.Logger)) { @@ -83,7 +83,7 @@ func benchmarkRunnerKitlog(b *testing.B, logger kitlog.Logger, f func(kitlog.Log } } -//nolint: errcheck // ignore errors +//nolint:errcheck // ignore errors var ( baseMessage = func(logger kitlog.Logger) { logger.Log("foo_key", "foo_value") } withMessage = func(logger kitlog.Logger) { kitlog.With(logger, "a", "b").Log("d", "f") } diff --git a/libs/os/os.go b/libs/os/os.go index a24e1ba7ff..16aa3e68bb 100644 --- a/libs/os/os.go +++ b/libs/os/os.go @@ -4,7 +4,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "os/signal" "syscall" @@ -62,11 +61,11 @@ func FileExists(filePath string) bool { } func ReadFile(filePath string) ([]byte, error) { - return ioutil.ReadFile(filePath) + return os.ReadFile(filePath) } func MustReadFile(filePath string) []byte { - fileBytes, err := ioutil.ReadFile(filePath) + fileBytes, err := os.ReadFile(filePath) if err != nil { Exit(fmt.Sprintf("MustReadFile failed: %v", err)) return nil @@ -75,7 +74,7 @@ func MustReadFile(filePath string) []byte { } func WriteFile(filePath string, contents []byte, mode os.FileMode) error { - return ioutil.WriteFile(filePath, contents, mode) + return os.WriteFile(filePath, contents, mode) } func MustWriteFile(filePath string, contents []byte, mode os.FileMode) { diff --git a/libs/os/os_test.go b/libs/os/os_test.go index 88bf1412c3..7c43476e88 100644 --- a/libs/os/os_test.go +++ b/libs/os/os_test.go @@ -3,7 +3,6 @@ package os import ( "bytes" "fmt" - "io/ioutil" "os" "path/filepath" "testing" @@ -12,7 +11,7 @@ import ( ) func TestCopyFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "example") + tmpfile, err := os.CreateTemp("", "example") if err != nil { t.Fatal(err) } @@ -29,7 +28,7 @@ func TestCopyFile(t *testing.T) { if _, err := os.Stat(copyfile); os.IsNotExist(err) { t.Fatal("copy should exist") } - data, err := ioutil.ReadFile(copyfile) + data, err := os.ReadFile(copyfile) if err != nil { t.Fatal(err) } @@ -40,35 +39,35 @@ func TestCopyFile(t *testing.T) { } func TestEnsureDir(t *testing.T) { - tmp, err := ioutil.TempDir("", "ensure-dir") + tmp, err := os.MkdirTemp("", "ensure-dir") require.NoError(t, err) defer os.RemoveAll(tmp) // Should be possible to create a new directory. - err = EnsureDir(filepath.Join(tmp, "dir"), 0755) + err = EnsureDir(filepath.Join(tmp, "dir"), 0o755) require.NoError(t, err) require.DirExists(t, filepath.Join(tmp, "dir")) // Should succeed on existing directory. - err = EnsureDir(filepath.Join(tmp, "dir"), 0755) + err = EnsureDir(filepath.Join(tmp, "dir"), 0o755) require.NoError(t, err) // Should fail on file. - err = ioutil.WriteFile(filepath.Join(tmp, "file"), []byte{}, 0644) + err = os.WriteFile(filepath.Join(tmp, "file"), []byte{}, 0o644) require.NoError(t, err) - err = EnsureDir(filepath.Join(tmp, "file"), 0755) + err = EnsureDir(filepath.Join(tmp, "file"), 0o755) require.Error(t, err) // Should allow symlink to dir. err = os.Symlink(filepath.Join(tmp, "dir"), filepath.Join(tmp, "linkdir")) require.NoError(t, err) - err = EnsureDir(filepath.Join(tmp, "linkdir"), 0755) + err = EnsureDir(filepath.Join(tmp, "linkdir"), 0o755) require.NoError(t, err) // Should error on symlink to file. err = os.Symlink(filepath.Join(tmp, "file"), filepath.Join(tmp, "linkfile")) require.NoError(t, err) - err = EnsureDir(filepath.Join(tmp, "linkfile"), 0755) + err = EnsureDir(filepath.Join(tmp, "linkfile"), 0o755) require.Error(t, err) } @@ -76,7 +75,7 @@ func TestEnsureDir(t *testing.T) { // the origin is positively a non-directory and that it is ready for copying. // See https://github.com/tendermint/tendermint/issues/6427 func TestTrickedTruncation(t *testing.T) { - tmpDir, err := ioutil.TempDir(os.TempDir(), "pwn_truncate") + tmpDir, err := os.MkdirTemp(os.TempDir(), "pwn_truncate") if err != nil { t.Fatal(err) } @@ -84,12 +83,12 @@ func TestTrickedTruncation(t *testing.T) { originalWALPath := filepath.Join(tmpDir, "wal") originalWALContent := []byte("I AM BECOME DEATH, DESTROYER OF ALL WORLDS!") - if err := ioutil.WriteFile(originalWALPath, originalWALContent, 0755); err != nil { + if err := os.WriteFile(originalWALPath, originalWALContent, 0o755); err != nil { t.Fatal(err) } // 1. Sanity check. - readWAL, err := ioutil.ReadFile(originalWALPath) + readWAL, err := os.ReadFile(originalWALPath) if err != nil { t.Fatal(err) } @@ -104,7 +103,7 @@ func TestTrickedTruncation(t *testing.T) { } // 3. Check the WAL's content - reReadWAL, err := ioutil.ReadFile(originalWALPath) + reReadWAL, err := os.ReadFile(originalWALPath) if err != nil { t.Fatal(err) } diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index 914a080de9..321e775c88 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -12,26 +12,25 @@ // // Example: // -// q, err := query.New("account.name='John'") -// if err != nil { -// return err -// } -// ctx, cancel := context.WithTimeout(context.Background(), 1 * time.Second) -// defer cancel() -// subscription, err := pubsub.Subscribe(ctx, "johns-transactions", q) -// if err != nil { -// return err -// } -// -// for { -// select { -// case msg <- subscription.Out(): -// // handle msg.Data() and msg.Events() -// case <-subscription.Cancelled(): -// return subscription.Err() -// } -// } +// q, err := query.New("account.name='John'") +// if err != nil { +// return err +// } +// ctx, cancel := context.WithTimeout(context.Background(), 1 * time.Second) +// defer cancel() +// subscription, err := pubsub.Subscribe(ctx, "johns-transactions", q) +// if err != nil { +// return err +// } // +// for { +// select { +// case msg <- subscription.Out(): +// // handle msg.Data() and msg.Events() +// case <-subscription.Cancelled(): +// return subscription.Err() +// } +// } package pubsub import ( diff --git a/libs/pubsub/query/query.go b/libs/pubsub/query/query.go index cf6903ccfc..35023d5650 100644 --- a/libs/pubsub/query/query.go +++ b/libs/pubsub/query/query.go @@ -1,6 +1,6 @@ // Package query provides a parser for a custom query format: // -// abci.invoice.number=22 AND abci.invoice.owner=Ivan +// abci.invoice.number=22 AND abci.invoice.owner=Ivan // // See query.peg for the grammar, which is a https://en.wikipedia.org/wiki/Parsing_expression_grammar. // More: https://github.com/PhilippeSigaud/Pegged/wiki/PEG-Basics diff --git a/libs/pubsub/query/query.peg.go b/libs/pubsub/query/query.peg.go index 98f8f4ed1e..27a708f67a 100644 --- a/libs/pubsub/query/query.peg.go +++ b/libs/pubsub/query/query.peg.go @@ -1,4 +1,3 @@ -//nolint package query import ( diff --git a/libs/pubsub/subscription.go b/libs/pubsub/subscription.go index 8f90e177ac..eca2c5dfb9 100644 --- a/libs/pubsub/subscription.go +++ b/libs/pubsub/subscription.go @@ -43,7 +43,6 @@ func (s *Subscription) Out() <-chan Message { return s.out } -// nolint: misspell // Cancelled returns a channel that's closed when the subscription is // terminated and supposed to be used in a select statement. func (s *Subscription) Cancelled() <-chan struct{} { @@ -54,7 +53,8 @@ func (s *Subscription) Cancelled() <-chan struct{} { // If the channel is closed, Err returns a non-nil error explaining why: // - ErrUnsubscribed if the subscriber choose to unsubscribe, // - ErrOutOfCapacity if the subscriber is not pulling messages fast enough -// and the channel returned by Out became full, +// and the channel returned by Out became full, +// // After Err returns a non-nil error, successive calls to Err return the same // error. func (s *Subscription) Err() error { diff --git a/libs/rand/random.go b/libs/rand/random.go index 41d04a4406..e86581d8b2 100644 --- a/libs/rand/random.go +++ b/libs/rand/random.go @@ -48,7 +48,7 @@ func (r *Rand) init() { } func (r *Rand) reset(seed int64) { - r.rand = mrand.New(mrand.NewSource(seed)) // nolint:gosec // G404: Use of weak random number generator + r.rand = mrand.New(mrand.NewSource(seed)) //nolint:gosec,nolintlint // G404: Use of weak random number generator } //---------------------------------------- diff --git a/libs/tempfile/tempfile_test.go b/libs/tempfile/tempfile_test.go index 9d07f806bd..ccff9f42e0 100644 --- a/libs/tempfile/tempfile_test.go +++ b/libs/tempfile/tempfile_test.go @@ -5,7 +5,6 @@ package tempfile import ( "bytes" "fmt" - "io/ioutil" "os" testing "testing" @@ -18,16 +17,16 @@ func TestWriteFileAtomic(t *testing.T) { var ( data = []byte(tmrand.Str(tmrand.Intn(2048))) old = tmrand.Bytes(tmrand.Intn(2048)) - perm os.FileMode = 0600 + perm os.FileMode = 0o600 ) - f, err := ioutil.TempFile("/tmp", "write-atomic-test-") + f, err := os.CreateTemp("/tmp", "write-atomic-test-") if err != nil { t.Fatal(err) } defer os.Remove(f.Name()) - if err = ioutil.WriteFile(f.Name(), old, 0600); err != nil { + if err = os.WriteFile(f.Name(), old, 0o600); err != nil { t.Fatal(err) } @@ -35,7 +34,7 @@ func TestWriteFileAtomic(t *testing.T) { t.Fatal(err) } - rData, err := ioutil.ReadFile(f.Name()) + rData, err := os.ReadFile(f.Name()) if err != nil { t.Fatal(err) } @@ -69,7 +68,7 @@ func TestWriteFileAtomicDuplicateFile(t *testing.T) { firstFileRand := randWriteFileSuffix() atomicWriteFileRand = defaultSeed fname := "/tmp/" + atomicWriteFilePrefix + firstFileRand - f, err := os.OpenFile(fname, atomicWriteFileFlag, 0777) + f, err := os.OpenFile(fname, atomicWriteFileFlag, 0o777) defer os.Remove(fname) // Defer here, in case there is a panic in WriteFileAtomic. defer os.Remove(fileToWrite) @@ -77,14 +76,14 @@ func TestWriteFileAtomicDuplicateFile(t *testing.T) { require.NoError(t, err) _, err = f.WriteString(testString) require.NoError(t, err) - err = WriteFileAtomic(fileToWrite, []byte(expectedString), 0777) + err = WriteFileAtomic(fileToWrite, []byte(expectedString), 0o777) require.NoError(t, err) // Check that the first atomic file was untouched - firstAtomicFileBytes, err := ioutil.ReadFile(fname) + firstAtomicFileBytes, err := os.ReadFile(fname) require.NoError(t, err, "Error reading first atomic file") require.Equal(t, []byte(testString), firstAtomicFileBytes, "First atomic file was overwritten") // Check that the resultant file is correct - resultantFileBytes, err := ioutil.ReadFile(fileToWrite) + resultantFileBytes, err := os.ReadFile(fileToWrite) require.NoError(t, err, "Error reading resultant file") require.Equal(t, []byte(expectedString), resultantFileBytes, "Written file had incorrect bytes") @@ -113,7 +112,7 @@ func TestWriteFileAtomicManyDuplicates(t *testing.T) { for i := 0; i < atomicWriteFileMaxNumConflicts+2; i++ { fileRand := randWriteFileSuffix() fname := "/tmp/" + atomicWriteFilePrefix + fileRand - f, err := os.OpenFile(fname, atomicWriteFileFlag, 0777) + f, err := os.OpenFile(fname, atomicWriteFileFlag, 0o777) require.Nil(t, err) _, err = f.WriteString(fmt.Sprintf(testString, i)) require.NoError(t, err) @@ -124,21 +123,21 @@ func TestWriteFileAtomicManyDuplicates(t *testing.T) { // Defer here, in case there is a panic in WriteFileAtomic. defer os.Remove(fileToWrite) - err := WriteFileAtomic(fileToWrite, []byte(expectedString), 0777) + err := WriteFileAtomic(fileToWrite, []byte(expectedString), 0o777) require.NoError(t, err) // Check that all intermittent atomic file were untouched atomicWriteFileRand = defaultSeed for i := 0; i < atomicWriteFileMaxNumConflicts+2; i++ { fileRand := randWriteFileSuffix() fname := "/tmp/" + atomicWriteFilePrefix + fileRand - firstAtomicFileBytes, err := ioutil.ReadFile(fname) + firstAtomicFileBytes, err := os.ReadFile(fname) require.Nil(t, err, "Error reading first atomic file") require.Equal(t, []byte(fmt.Sprintf(testString, i)), firstAtomicFileBytes, "atomic write file %d was overwritten", i) } // Check that the resultant file is correct - resultantFileBytes, err := ioutil.ReadFile(fileToWrite) + resultantFileBytes, err := os.ReadFile(fileToWrite) require.Nil(t, err, "Error reading resultant file") require.Equal(t, []byte(expectedString), resultantFileBytes, "Written file had incorrect bytes") } diff --git a/light/client.go b/light/client.go index 51330c77c9..8a0500cbfe 100644 --- a/light/client.go +++ b/light/client.go @@ -284,16 +284,16 @@ func (c *Client) restoreTrustedLightBlock() error { // if options.Height: // -// 1) ahead of trustedLightBlock.Height => fetch light blocks (same height as +// 1. ahead of trustedLightBlock.Height => fetch light blocks (same height as // trustedLightBlock) from primary provider and check it's hash matches the // trustedLightBlock's hash (if not, remove trustedLightBlock and all the light blocks // before) // -// 2) equals trustedLightBlock.Height => check options.Hash matches the +// 2. equals trustedLightBlock.Height => check options.Hash matches the // trustedLightBlock's hash (if not, remove trustedLightBlock and all the light blocks // before) // -// 3) behind trustedLightBlock.Height => remove all the light blocks between +// 3. behind trustedLightBlock.Height => remove all the light blocks between // options.Height and trustedLightBlock.Height, update trustedLightBlock, then // check options.Hash matches the trustedLightBlock's hash (if not, remove // trustedLightBlock and all the light blocks before) @@ -395,10 +395,10 @@ func (c *Client) initializeWithTrustOptions(ctx context.Context, options TrustOp // TrustedLightBlock returns a trusted light block at the given height (0 - the latest). // // It returns an error if: -// - there are some issues with the trusted store, although that should not -// happen normally; -// - negative height is passed; -// - header has not been verified yet and is therefore not in the store +// - there are some issues with the trusted store, although that should not +// happen normally; +// - negative height is passed; +// - header has not been verified yet and is therefore not in the store // // Safe for concurrent use by multiple goroutines. func (c *Client) TrustedLightBlock(height int64) (*types.LightBlock, error) { @@ -506,12 +506,13 @@ func (c *Client) VerifyLightBlockAtHeight(ctx context.Context, height int64, now // headers are not adjacent, verifySkipping is performed and necessary (not all) // intermediate headers will be requested. See the specification for details. // Intermediate headers are not saved to database. -// https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md +// https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/light-client.md // // If the header, which is older than the currently trusted header, is // requested and the light client does not have it, VerifyHeader will perform: -// a) verifySkipping verification if nearest trusted header is found & not expired -// b) backwards verification in all other cases +// +// a) verifySkipping verification if nearest trusted header is found & not expired +// b) backwards verification in all other cases // // It returns ErrOldHeaderExpired if the latest trusted header expired. // @@ -980,12 +981,12 @@ func (c *Client) backwards( // lightBlockFromPrimary retrieves the lightBlock from the primary provider // at the specified height. This method also handles provider behavior as follows: // -// 1. If the provider does not respond or does not have the block, it tries again -// with a different provider -// 2. If all providers return the same error, the light client forwards the error to -// where the initial request came from -// 3. If the provider provides an invalid light block, is deemed unreliable or returns -// any other error, the primary is permanently dropped and is replaced by a witness. +// 1. If the provider does not respond or does not have the block, it tries again +// with a different provider +// 2. If all providers return the same error, the light client forwards the error to +// where the initial request came from +// 3. If the provider provides an invalid light block, is deemed unreliable or returns +// any other error, the primary is permanently dropped and is replaced by a witness. func (c *Client) lightBlockFromPrimary(ctx context.Context, height int64) (*types.LightBlock, error) { c.providerMutex.Lock() l, err := c.primary.LightBlock(ctx, height) diff --git a/light/detector.go b/light/detector.go index 881242135c..1fd21f41eb 100644 --- a/light/detector.go +++ b/light/detector.go @@ -109,7 +109,9 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig // // 1: errConflictingHeaders -> there may have been an attack on this light client // 2: errBadWitness -> the witness has either not responded, doesn't have the header or has given us an invalid one -// Note: In the case of an invalid header we remove the witness +// +// Note: In the case of an invalid header we remove the witness +// // 3: nil -> the hashes of the two headers match func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan error, h *types.SignedHeader, witness provider.Provider, witnessIndex int) { @@ -275,16 +277,16 @@ func (c *Client) handleConflictingHeaders( // it has received from another and preforms verifySkipping at the heights of each of the intermediate // headers in the trace until it reaches the divergentHeader. 1 of 2 things can happen. // -// 1. The light client verifies a header that is different to the intermediate header in the trace. This -// is the bifurcation point and the light client can create evidence from it -// 2. The source stops responding, doesn't have the block or sends an invalid header in which case we -// return the error and remove the witness +// 1. The light client verifies a header that is different to the intermediate header in the trace. This +// is the bifurcation point and the light client can create evidence from it +// 2. The source stops responding, doesn't have the block or sends an invalid header in which case we +// return the error and remove the witness // // CONTRACT: -// 1. Trace can not be empty len(trace) > 0 -// 2. The last block in the trace can not be of a lower height than the target block -// trace[len(trace)-1].Height >= targetBlock.Height -// 3. The +// 1. Trace can not be empty len(trace) > 0 +// 2. The last block in the trace can not be of a lower height than the target block +// trace[len(trace)-1].Height >= targetBlock.Height +// 3. The func (c *Client) examineConflictingHeaderAgainstTrace( ctx context.Context, trace []*types.LightBlock, diff --git a/light/doc.go b/light/doc.go index 700bbeb6cf..2fa6fa72ba 100644 --- a/light/doc.go +++ b/light/doc.go @@ -63,38 +63,38 @@ This package provides three major things: Example usage: - db, err := dbm.NewGoLevelDB("light-client-db", dbDir) - if err != nil { - // handle error - } - - c, err := NewHTTPClient( - chainID, - TrustOptions{ - Period: 504 * time.Hour, // 21 days - Height: 100, - Hash: header.Hash(), - }, - "http://localhost:26657", - []string{"http://witness1:26657"}, - dbs.New(db, ""), - ) - if err != nil { - // handle error - } - - h, err := c.TrustedHeader(100) - if err != nil { - // handle error - } - fmt.Println("header", h) + db, err := dbm.NewGoLevelDB("light-client-db", dbDir) + if err != nil { + // handle error + } + + c, err := NewHTTPClient( + chainID, + TrustOptions{ + Period: 504 * time.Hour, // 21 days + Height: 100, + Hash: header.Hash(), + }, + "http://localhost:26657", + []string{"http://witness1:26657"}, + dbs.New(db, ""), + ) + if err != nil { + // handle error + } + + h, err := c.TrustedHeader(100) + if err != nil { + // handle error + } + fmt.Println("header", h) Check out other examples in example_test.go ## 2. Pure functions to verify a new header (see verifier.go) Verify function verifies a new header against some trusted header. See -https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/verification.md +https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/light-client/verification.md for details. There are two methods of verification: sequential and bisection @@ -118,10 +118,10 @@ as a wrapper, which verifies all the headers, using a light client connected to some other node. See -https://docs.tendermint.com/master/tendermint-core/light-client-protocol.html +https://docs.tendermint.com/v0.34/tendermint-core/light-client-protocol.html for usage example. Or see -https://github.com/tendermint/spec/tree/master/spec/consensus/light-client +https://github.com/tendermint/tendermint/tree/v0.34.x/spec/consensus/light-client for the full spec */ package light diff --git a/light/example_test.go b/light/example_test.go index b599778b86..f49b34a5de 100644 --- a/light/example_test.go +++ b/light/example_test.go @@ -3,7 +3,6 @@ package light_test import ( "context" "fmt" - "io/ioutil" stdlog "log" "os" "testing" @@ -25,7 +24,7 @@ func ExampleClient_Update() { // give Tendermint time to generate some blocks time.Sleep(5 * time.Second) - dbDir, err := ioutil.TempDir("", "light-client-example") + dbDir, err := os.MkdirTemp("", "light-client-example") if err != nil { stdlog.Fatal(err) } @@ -93,7 +92,7 @@ func ExampleClient_VerifyLightBlockAtHeight() { // give Tendermint time to generate some blocks time.Sleep(5 * time.Second) - dbDir, err := ioutil.TempDir("", "light-client-example") + dbDir, err := os.MkdirTemp("", "light-client-example") if err != nil { stdlog.Fatal(err) } diff --git a/light/provider/http/http.go b/light/provider/http/http.go index 665fcbe707..f529cae484 100644 --- a/light/provider/http/http.go +++ b/light/provider/http/http.go @@ -216,6 +216,6 @@ func validateHeight(height int64) (*int64, error) { // exponential backoff (with jitter) // 0.5s -> 2s -> 4.5s -> 8s -> 12.5 with 1s variation func backoffTimeout(attempt uint16) time.Duration { - // nolint:gosec // G404: Use of weak random number generator + //nolint:gosec // G404: Use of weak random number generator return time.Duration(500*attempt*attempt)*time.Millisecond + time.Duration(rand.Intn(1000))*time.Millisecond } diff --git a/light/proxy/routes.go b/light/proxy/routes.go index 7aacbed10b..169d026f26 100644 --- a/light/proxy/routes.go +++ b/light/proxy/routes.go @@ -21,21 +21,21 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc { "health": rpcserver.NewRPCFunc(makeHealthFunc(c), ""), "status": rpcserver.NewRPCFunc(makeStatusFunc(c), ""), "net_info": rpcserver.NewRPCFunc(makeNetInfoFunc(c), ""), - "blockchain": rpcserver.NewRPCFunc(makeBlockchainInfoFunc(c), "minHeight,maxHeight"), - "genesis": rpcserver.NewRPCFunc(makeGenesisFunc(c), ""), - "genesis_chunked": rpcserver.NewRPCFunc(makeGenesisChunkedFunc(c), ""), - "block": rpcserver.NewRPCFunc(makeBlockFunc(c), "height"), - "block_by_hash": rpcserver.NewRPCFunc(makeBlockByHashFunc(c), "hash"), - "block_results": rpcserver.NewRPCFunc(makeBlockResultsFunc(c), "height"), - "commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height"), + "blockchain": rpcserver.NewRPCFunc(makeBlockchainInfoFunc(c), "minHeight,maxHeight", rpcserver.Cacheable()), + "genesis": rpcserver.NewRPCFunc(makeGenesisFunc(c), "", rpcserver.Cacheable()), + "genesis_chunked": rpcserver.NewRPCFunc(makeGenesisChunkedFunc(c), "", rpcserver.Cacheable()), + "block": rpcserver.NewRPCFunc(makeBlockFunc(c), "height", rpcserver.Cacheable("height")), + "block_by_hash": rpcserver.NewRPCFunc(makeBlockByHashFunc(c), "hash", rpcserver.Cacheable()), + "block_results": rpcserver.NewRPCFunc(makeBlockResultsFunc(c), "height", rpcserver.Cacheable("height")), + "commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height", rpcserver.Cacheable("height")), "data_commitment": rpcserver.NewRPCFunc(makeDataCommitmentFunc(c), "beginBlock,endBlock"), - "tx": rpcserver.NewRPCFunc(makeTxFunc(c), "hash,prove"), + "tx": rpcserver.NewRPCFunc(makeTxFunc(c), "hash,prove", rpcserver.Cacheable()), "tx_search": rpcserver.NewRPCFunc(makeTxSearchFunc(c), "query,prove,page,per_page,order_by"), "block_search": rpcserver.NewRPCFunc(makeBlockSearchFunc(c), "query,page,per_page,order_by"), - "validators": rpcserver.NewRPCFunc(makeValidatorsFunc(c), "height,page,per_page"), + "validators": rpcserver.NewRPCFunc(makeValidatorsFunc(c), "height,page,per_page", rpcserver.Cacheable("height")), "dump_consensus_state": rpcserver.NewRPCFunc(makeDumpConsensusStateFunc(c), ""), "consensus_state": rpcserver.NewRPCFunc(makeConsensusStateFunc(c), ""), - "consensus_params": rpcserver.NewRPCFunc(makeConsensusParamsFunc(c), "height"), + "consensus_params": rpcserver.NewRPCFunc(makeConsensusParamsFunc(c), "height", rpcserver.Cacheable("height")), "unconfirmed_txs": rpcserver.NewRPCFunc(makeUnconfirmedTxsFunc(c), "limit"), "num_unconfirmed_txs": rpcserver.NewRPCFunc(makeNumUnconfirmedTxsFunc(c), ""), @@ -46,7 +46,7 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc { // abci API "abci_query": rpcserver.NewRPCFunc(makeABCIQueryFunc(c), "path,data,height,prove"), - "abci_info": rpcserver.NewRPCFunc(makeABCIInfoFunc(c), ""), + "abci_info": rpcserver.NewRPCFunc(makeABCIInfoFunc(c), "", rpcserver.Cacheable()), // evidence API "broadcast_evidence": rpcserver.NewRPCFunc(makeBroadcastEvidenceFunc(c), "evidence"), @@ -63,7 +63,7 @@ func makeHealthFunc(c *lrpc.Client) rpcHealthFunc { type rpcStatusFunc func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) -// nolint: interfacer +//nolint:interfacer func makeStatusFunc(c *lrpc.Client) rpcStatusFunc { return func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { return c.Status(ctx.Context()) @@ -276,8 +276,8 @@ type rpcABCIQueryFunc func(ctx *rpctypes.Context, path string, func makeABCIQueryFunc(c *lrpc.Client) rpcABCIQueryFunc { return func(ctx *rpctypes.Context, path string, data bytes.HexBytes, - height int64, prove bool) (*ctypes.ResultABCIQuery, error) { - + height int64, prove bool, + ) (*ctypes.ResultABCIQuery, error) { return c.ABCIQueryWithOptions(ctx.Context(), path, data, rpcclient.ABCIQueryOptions{ Height: height, Prove: prove, @@ -295,7 +295,7 @@ func makeABCIInfoFunc(c *lrpc.Client) rpcABCIInfoFunc { type rpcBroadcastEvidenceFunc func(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) -// nolint: interfacer +//nolint:interfacer func makeBroadcastEvidenceFunc(c *lrpc.Client) rpcBroadcastEvidenceFunc { return func(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { return c.BroadcastEvidence(ctx.Context(), ev) diff --git a/light/rpc/client.go b/light/rpc/client.go index a4ee6b3fcc..2ebcff8c5e 100644 --- a/light/rpc/client.go +++ b/light/rpc/client.go @@ -27,7 +27,8 @@ var errNegOrZeroHeight = errors.New("negative or zero height") type KeyPathFunc func(path string, key []byte) (merkle.KeyPath, error) // LightClient is an interface that contains functionality needed by Client from the light client. -//go:generate mockery --case underscore --name LightClient +// +//go:generate ../../scripts/mockery_generate.sh LightClient type LightClient interface { ChainID() string Update(ctx context.Context, now time.Time) (*types.LightBlock, error) diff --git a/light/rpc/mocks/light_client.go b/light/rpc/mocks/light_client.go index 9dfdcd001d..fabf73b01e 100644 --- a/light/rpc/mocks/light_client.go +++ b/light/rpc/mocks/light_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.14.0. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/light/verifier.go b/light/verifier.go index 0b0a4926b1..2ec02e8773 100644 --- a/light/verifier.go +++ b/light/verifier.go @@ -19,13 +19,13 @@ var ( // VerifyNonAdjacent verifies non-adjacent untrustedHeader against // trustedHeader. It ensures that: // -// a) trustedHeader can still be trusted (if not, ErrOldHeaderExpired is returned) -// b) untrustedHeader is valid (if not, ErrInvalidHeader is returned) -// c) trustLevel ([1/3, 1]) of trustedHeaderVals (or trustedHeaderNextVals) -// signed correctly (if not, ErrNewValSetCantBeTrusted is returned) -// d) more than 2/3 of untrustedVals have signed h2 -// (otherwise, ErrInvalidHeader is returned) -// e) headers are non-adjacent. +// a) trustedHeader can still be trusted (if not, ErrOldHeaderExpired is returned) +// b) untrustedHeader is valid (if not, ErrInvalidHeader is returned) +// c) trustLevel ([1/3, 1]) of trustedHeaderVals (or trustedHeaderNextVals) +// signed correctly (if not, ErrNewValSetCantBeTrusted is returned) +// d) more than 2/3 of untrustedVals have signed h2 +// (otherwise, ErrInvalidHeader is returned) +// e) headers are non-adjacent. // // maxClockDrift defines how much untrustedHeader.Time can drift into the // future. @@ -81,12 +81,12 @@ func VerifyNonAdjacent( // VerifyAdjacent verifies directly adjacent untrustedHeader against // trustedHeader. It ensures that: // -// a) trustedHeader can still be trusted (if not, ErrOldHeaderExpired is returned) -// b) untrustedHeader is valid (if not, ErrInvalidHeader is returned) -// c) untrustedHeader.ValidatorsHash equals trustedHeader.NextValidatorsHash -// d) more than 2/3 of new validators (untrustedVals) have signed h2 -// (otherwise, ErrInvalidHeader is returned) -// e) headers are adjacent. +// a) trustedHeader can still be trusted (if not, ErrOldHeaderExpired is returned) +// b) untrustedHeader is valid (if not, ErrInvalidHeader is returned) +// c) untrustedHeader.ValidatorsHash equals trustedHeader.NextValidatorsHash +// d) more than 2/3 of new validators (untrustedVals) have signed h2 +// (otherwise, ErrInvalidHeader is returned) +// e) headers are adjacent. // // maxClockDrift defines how much untrustedHeader.Time can drift into the // future. @@ -212,12 +212,12 @@ func HeaderExpired(h *types.SignedHeader, trustingPeriod time.Duration, now time // VerifyBackwards verifies an untrusted header with a height one less than // that of an adjacent trusted header. It ensures that: // -// a) untrusted header is valid -// b) untrusted header has a time before the trusted header -// c) that the LastBlockID hash of the trusted header is the same as the hash -// of the trusted header +// a) untrusted header is valid +// b) untrusted header has a time before the trusted header +// c) that the LastBlockID hash of the trusted header is the same as the hash +// of the trusted header // -// For any of these cases ErrInvalidHeader is returned. +// For any of these cases ErrInvalidHeader is returned. func VerifyBackwards(untrustedHeader, trustedHeader *types.Header) error { if err := untrustedHeader.ValidateBasic(); err != nil { return ErrInvalidHeader{err} diff --git a/mempool/v0/reactor.go b/mempool/v0/reactor.go index 3fc8506418..52ed185142 100644 --- a/mempool/v0/reactor.go +++ b/mempool/v0/reactor.go @@ -5,6 +5,8 @@ import ( "fmt" "time" + "github.com/gogo/protobuf/proto" + cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/clist" "github.com/tendermint/tendermint/libs/log" @@ -134,6 +136,7 @@ func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor { ID: mempool.MempoolChannel, Priority: 5, RecvMessageCapacity: batchMsg.Size(), + MessageType: &protomem.Message{}, }, } } @@ -154,32 +157,56 @@ func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { // Receive implements Reactor. // It adds any received transactions to the mempool. -func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - msg, err := memR.decodeMsg(msgBytes) - if err != nil { - memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) - memR.Switch.StopPeerForError(src, err) - return - } - memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg) - - txInfo := mempool.TxInfo{SenderID: memR.ids.GetForPeer(src)} - if src != nil { - txInfo.SenderP2PID = src.ID() - } +func (memR *Reactor) ReceiveEnvelope(e p2p.Envelope) { + memR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", e.Message) + switch msg := e.Message.(type) { + case *protomem.Txs: + protoTxs := msg.GetTxs() + if len(protoTxs) == 0 { + memR.Logger.Error("received empty txs from peer", "src", e.Src) + return + } + txInfo := mempool.TxInfo{SenderID: memR.ids.GetForPeer(e.Src)} + if e.Src != nil { + txInfo.SenderP2PID = e.Src.ID() + } - for _, tx := range msg.Txs { - err = memR.mempool.CheckTx(tx, nil, txInfo) - if errors.Is(err, mempool.ErrTxInCache) { - memR.Logger.Debug("Tx already exists in cache", "tx", tx.String()) - } else if err != nil { - memR.Logger.Info("Could not check tx", "tx", tx.String(), "err", err) + var err error + for _, tx := range protoTxs { + ntx := types.Tx(tx) + err = memR.mempool.CheckTx(ntx, nil, txInfo) + if errors.Is(err, mempool.ErrTxInCache) { + memR.Logger.Debug("Tx already exists in cache", "tx", ntx.String()) + } else if err != nil { + memR.Logger.Info("Could not check tx", "tx", ntx.String(), "err", err) + } } + default: + memR.Logger.Error("unknown message type", "src", e.Src, "chId", e.ChannelID, "msg", e.Message) + memR.Switch.StopPeerForError(e.Src, fmt.Errorf("mempool cannot handle message of type: %T", e.Message)) + return } // broadcasting happens from go routines per peer } +func (memR *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) { + msg := &protomem.Message{} + err := proto.Unmarshal(msgBytes, msg) + if err != nil { + panic(err) + } + uw, err := msg.Unwrap() + if err != nil { + panic(err) + } + memR.ReceiveEnvelope(p2p.Envelope{ + ChannelID: chID, + Src: peer, + Message: uw, + }) +} + // PeerState describes the state of a peer. type PeerState interface { GetHeight() int64 @@ -234,18 +261,10 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { // https://github.com/tendermint/tendermint/issues/5796 if _, ok := memTx.senders.Load(peerID); !ok { - msg := protomem.Message{ - Sum: &protomem.Message_Txs{ - Txs: &protomem.Txs{Txs: [][]byte{memTx.tx}}, - }, - } - - bz, err := msg.Marshal() - if err != nil { - panic(err) - } - - success := peer.Send(mempool.MempoolChannel, bz) + success := p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: mempool.MempoolChannel, + Message: &protomem.Txs{Txs: [][]byte{memTx.tx}}, + }, memR.Logger) if !success { time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond) continue @@ -264,35 +283,6 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { } } -func (memR *Reactor) decodeMsg(bz []byte) (TxsMessage, error) { - msg := protomem.Message{} - err := msg.Unmarshal(bz) - if err != nil { - return TxsMessage{}, err - } - - var message TxsMessage - - if i, ok := msg.Sum.(*protomem.Message_Txs); ok { - txs := i.Txs.GetTxs() - - if len(txs) == 0 { - return message, errors.New("empty TxsMessage") - } - - decoded := make([]types.Tx, len(txs)) - for j, tx := range txs { - decoded[j] = types.Tx(tx) - } - - message = TxsMessage{ - Txs: decoded, - } - return message, nil - } - return message, fmt.Errorf("msg type: %T is not supported", msg) -} - // TxsMessage is a Message containing transactions. type TxsMessage struct { Txs []types.Tx diff --git a/mempool/v0/reactor_test.go b/mempool/v0/reactor_test.go index 4250836549..c629b0aeb2 100644 --- a/mempool/v0/reactor_test.go +++ b/mempool/v0/reactor_test.go @@ -10,6 +10,7 @@ import ( "github.com/fortytw2/leaktest" "github.com/go-kit/log/term" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -264,6 +265,10 @@ func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) { }) } +// TODO: This test tests that we don't panic and are able to generate new +// PeerIDs for each peer we add. It seems as though we should be able to test +// this in a much more direct way. +// https://github.com/tendermint/tendermint/issues/9639 func TestDontExhaustMaxActiveIDs(t *testing.T) { config := cfg.TestConfig() const N = 1 @@ -279,11 +284,41 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) { for i := 0; i < mempool.MaxActiveIDs+1; i++ { peer := mock.NewPeer(nil) - reactor.Receive(mempool.MempoolChannel, peer, []byte{0x1, 0x2, 0x3}) + reactor.ReceiveEnvelope(p2p.Envelope{ + ChannelID: mempool.MempoolChannel, + Src: peer, + Message: &memproto.Message{}, // This uses the wrong message type on purpose to stop the peer as in an error state in the reactor. + }, + ) reactor.AddPeer(peer) } } +func TestLegacyReactorReceiveBasic(t *testing.T) { + config := cfg.TestConfig() + const N = 1 + reactors := makeAndConnectReactors(config, N) + var ( + reactor = reactors[0] + peer = mock.NewPeer(nil) + ) + defer func() { + err := reactor.Stop() + assert.NoError(t, err) + }() + + reactor.InitPeer(peer) + reactor.AddPeer(peer) + m := &memproto.Txs{} + wm := m.Wrap() + msg, err := proto.Marshal(wm) + assert.NoError(t, err) + + assert.NotPanics(t, func() { + reactor.Receive(mempool.MempoolChannel, peer, msg) + }) +} + // mempoolLogger is a TestingLogger which uses a different // color for each validator ("validator" key must exist). func mempoolLogger() log.Logger { diff --git a/mempool/v1/mempool.go b/mempool/v1/mempool.go index 0deee0fbba..20db6d2947 100644 --- a/mempool/v1/mempool.go +++ b/mempool/v1/mempool.go @@ -9,6 +9,7 @@ import ( "time" "github.com/creachadair/taskgroup" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/clist" diff --git a/mempool/v1/mempool_bench_test.go b/mempool/v1/mempool_bench_test.go index bad8ec8ab9..a26501275d 100644 --- a/mempool/v1/mempool_bench_test.go +++ b/mempool/v1/mempool_bench_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/mempool" ) diff --git a/mempool/v1/reactor.go b/mempool/v1/reactor.go index 4da51bab8f..00fe65e797 100644 --- a/mempool/v1/reactor.go +++ b/mempool/v1/reactor.go @@ -5,6 +5,8 @@ import ( "fmt" "time" + "github.com/gogo/protobuf/proto" + cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/clist" "github.com/tendermint/tendermint/libs/log" @@ -133,6 +135,7 @@ func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor { ID: mempool.MempoolChannel, Priority: 5, RecvMessageCapacity: batchMsg.Size(), + MessageType: &protomem.Message{}, }, } } @@ -153,28 +156,54 @@ func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { // Receive implements Reactor. // It adds any received transactions to the mempool. -func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - msg, err := memR.decodeMsg(msgBytes) - if err != nil { - memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) - memR.Switch.StopPeerForError(src, err) +func (memR *Reactor) ReceiveEnvelope(e p2p.Envelope) { + memR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", e.Message) + switch msg := e.Message.(type) { + case *protomem.Txs: + protoTxs := msg.GetTxs() + if len(protoTxs) == 0 { + memR.Logger.Error("received tmpty txs from peer", "src", e.Src) + return + } + txInfo := mempool.TxInfo{SenderID: memR.ids.GetForPeer(e.Src)} + if e.Src != nil { + txInfo.SenderP2PID = e.Src.ID() + } + + var err error + for _, tx := range protoTxs { + ntx := types.Tx(tx) + err = memR.mempool.CheckTx(ntx, nil, txInfo) + if errors.Is(err, mempool.ErrTxInCache) { + memR.Logger.Debug("Tx already exists in cache", "tx", ntx.String()) + } else if err != nil { + memR.Logger.Info("Could not check tx", "tx", ntx.String(), "err", err) + } + } + default: + memR.Logger.Error("unknown message type", "src", e.Src, "chId", e.ChannelID, "msg", e.Message) + memR.Switch.StopPeerForError(e.Src, fmt.Errorf("mempool cannot handle message of type: %T", e.Message)) return } - memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg) - txInfo := mempool.TxInfo{SenderID: memR.ids.GetForPeer(src)} - if src != nil { - txInfo.SenderP2PID = src.ID() + // broadcasting happens from go routines per peer +} + +func (memR *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) { + msg := &protomem.Message{} + err := proto.Unmarshal(msgBytes, msg) + if err != nil { + panic(err) } - for _, tx := range msg.Txs { - err = memR.mempool.CheckTx(tx, nil, txInfo) - if err == mempool.ErrTxInCache { - memR.Logger.Debug("Tx already exists in cache", "tx", tx.String()) - } else if err != nil { - memR.Logger.Info("Could not check tx", "tx", tx.String(), "err", err) - } + uw, err := msg.Unwrap() + if err != nil { + panic(err) } - // broadcasting happens from go routines per peer + memR.ReceiveEnvelope(p2p.Envelope{ + ChannelID: chID, + Src: peer, + Message: uw, + }) } // PeerState describes the state of a peer. @@ -233,18 +262,10 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { // NOTE: Transaction batching was disabled due to // https://github.com/tendermint/tendermint/issues/5796 if !memTx.HasPeer(peerID) { - msg := protomem.Message{ - Sum: &protomem.Message_Txs{ - Txs: &protomem.Txs{Txs: [][]byte{memTx.tx}}, - }, - } - - bz, err := msg.Marshal() - if err != nil { - panic(err) - } - - success := peer.Send(mempool.MempoolChannel, bz) + success := p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: mempool.MempoolChannel, + Message: &protomem.Txs{Txs: [][]byte{memTx.tx}}, + }, memR.Logger) if !success { time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond) continue @@ -268,37 +289,6 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { //----------------------------------------------------------------------------- // Messages -func (memR *Reactor) decodeMsg(bz []byte) (TxsMessage, error) { - msg := protomem.Message{} - err := msg.Unmarshal(bz) - if err != nil { - return TxsMessage{}, err - } - - var message TxsMessage - - if i, ok := msg.Sum.(*protomem.Message_Txs); ok { - txs := i.Txs.GetTxs() - - if len(txs) == 0 { - return message, errors.New("empty TxsMessage") - } - - decoded := make([]types.Tx, len(txs)) - for j, tx := range txs { - decoded[j] = types.Tx(tx) - } - - message = TxsMessage{ - Txs: decoded, - } - return message, nil - } - return message, fmt.Errorf("msg type: %T is not supported", msg) -} - -//------------------------------------- - // TxsMessage is a Message containing transactions. type TxsMessage struct { Txs []types.Tx diff --git a/mempool/v1/reactor_test.go b/mempool/v1/reactor_test.go index a911220164..74f9f469ff 100644 --- a/mempool/v1/reactor_test.go +++ b/mempool/v1/reactor_test.go @@ -8,10 +8,12 @@ import ( "time" "github.com/go-kit/log/term" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/p2p/mock" cfg "github.com/tendermint/tendermint/config" @@ -93,6 +95,35 @@ func TestMempoolVectors(t *testing.T) { } } +func TestLegacyReactorReceiveBasic(t *testing.T) { + config := cfg.TestConfig() + // if there were more than two reactors, the order of transactions could not be + // asserted in waitForTxsOnReactors (due to transactions gossiping). If we + // replace Connect2Switches (full mesh) with a func, which connects first + // reactor to others and nothing else, this test should also pass with >2 reactors. + const N = 1 + reactors := makeAndConnectReactors(config, N) + var ( + reactor = reactors[0] + peer = mock.NewPeer(nil) + ) + defer func() { + err := reactor.Stop() + assert.NoError(t, err) + }() + + reactor.InitPeer(peer) + reactor.AddPeer(peer) + m := &memproto.Txs{} + wm := m.Wrap() + msg, err := proto.Marshal(wm) + assert.NoError(t, err) + + assert.NotPanics(t, func() { + reactor.Receive(mempool.MempoolChannel, peer, msg) + }) +} + func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor { reactors := make([]*Reactor, n) logger := mempoolLogger() diff --git a/networks/local/README.md b/networks/local/README.md index dcb31ae713..de7057ee79 100644 --- a/networks/local/README.md +++ b/networks/local/README.md @@ -1,3 +1,3 @@ # Local Cluster with Docker Compose -See the [docs](https://docs.tendermint.com/master/networks/docker-compose.html). +See the [docs](https://docs.tendermint.com/v0.34/networks/docker-compose.html). diff --git a/networks/remote/README.md b/networks/remote/README.md index 8f2e047363..f5d0685a3e 100644 --- a/networks/remote/README.md +++ b/networks/remote/README.md @@ -1,3 +1,3 @@ # Remote Cluster with Terraform and Ansible -See the [docs](https://docs.tendermint.com/master/networks/terraform-and-ansible.html). +See the [docs](https://docs.tendermint.com/v0.34/networks/terraform-and-ansible.html). diff --git a/node/doc.go b/node/doc.go index 08f3fa2586..3a145c573a 100644 --- a/node/doc.go +++ b/node/doc.go @@ -6,35 +6,34 @@ Adding new p2p.Reactor(s) To add a new p2p.Reactor, use the CustomReactors option: - node, err := NewNode( - config, - privVal, - nodeKey, - clientCreator, - genesisDocProvider, - dbProvider, - metricsProvider, - logger, - CustomReactors(map[string]p2p.Reactor{"CUSTOM": customReactor}), - ) + node, err := NewNode( + config, + privVal, + nodeKey, + clientCreator, + genesisDocProvider, + dbProvider, + metricsProvider, + logger, + CustomReactors(map[string]p2p.Reactor{"CUSTOM": customReactor}), + ) Replacing existing p2p.Reactor(s) To replace the built-in p2p.Reactor, use the CustomReactors option: - node, err := NewNode( - config, - privVal, - nodeKey, - clientCreator, - genesisDocProvider, - dbProvider, - metricsProvider, - logger, - CustomReactors(map[string]p2p.Reactor{"BLOCKCHAIN": customBlockchainReactor}), - ) + node, err := NewNode( + config, + privVal, + nodeKey, + clientCreator, + genesisDocProvider, + dbProvider, + metricsProvider, + logger, + CustomReactors(map[string]p2p.Reactor{"BLOCKCHAIN": customBlockchainReactor}), + ) The list of existing reactors can be found in CustomReactors documentation. - */ package node diff --git a/node/node.go b/node/node.go index 22d1471cac..eb0b77e427 100644 --- a/node/node.go +++ b/node/node.go @@ -53,7 +53,7 @@ import ( tmtime "github.com/tendermint/tendermint/types/time" "github.com/tendermint/tendermint/version" - _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port + _ "net/http/pprof" //nolint: gosec // securely exposed on separate, optional port _ "github.com/lib/pq" // provide the psql db driver ) @@ -69,6 +69,8 @@ type DBContext struct { // DBProvider takes a DBContext and returns an instantiated DB. type DBProvider func(*DBContext) (dbm.DB, error) +const readHeaderTimeout = 10 * time.Second + // DefaultDBProvider returns a database using the DBBackend and DBDir // specified in the ctx.Config. func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) { @@ -144,12 +146,12 @@ type fastSyncReactor interface { // WARNING: using any name from the below list of the existing reactors will // result in replacing it with the custom one. // -// - MEMPOOL -// - BLOCKCHAIN -// - CONSENSUS -// - EVIDENCE -// - PEX -// - STATESYNC +// - MEMPOOL +// - BLOCKCHAIN +// - CONSENSUS +// - EVIDENCE +// - PEX +// - STATESYNC func CustomReactors(reactors map[string]p2p.Reactor) Option { return func(n *Node) { for name, reactor := range reactors { @@ -271,7 +273,6 @@ func createAndStartIndexerService( eventBus *types.EventBus, logger log.Logger, ) (*txindex.IndexerService, txindex.TxIndexer, indexer.BlockIndexer, error) { - var ( txIndexer txindex.TxIndexer blockIndexer indexer.BlockIndexer @@ -303,7 +304,7 @@ func createAndStartIndexerService( blockIndexer = &blockidxnull.BlockerIndexer{} } - indexerService := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus) + indexerService := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus, false) indexerService.SetLogger(logger.With("module", "txindex")) if err := indexerService.Start(); err != nil { @@ -320,8 +321,8 @@ func doHandshake( genDoc *types.GenesisDoc, eventBus types.BlockEventPublisher, proxyApp proxy.AppConns, - consensusLogger log.Logger) error { - + consensusLogger log.Logger, +) error { handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc) handshaker.SetLogger(consensusLogger) handshaker.SetEventBus(eventBus) @@ -371,7 +372,6 @@ func createMempoolAndMempoolReactor( memplMetrics *mempl.Metrics, logger log.Logger, ) (mempl.Mempool, p2p.Reactor) { - switch config.Mempool.Version { case cfg.MempoolV1: mp := mempoolv1.NewTxMempool( @@ -422,14 +422,16 @@ func createMempoolAndMempoolReactor( } func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider, - stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger) (*evidence.Reactor, *evidence.Pool, error) { - + stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger, +) (*evidence.Reactor, *evidence.Pool, error) { evidenceDB, err := dbProvider(&DBContext{"evidence", config}) if err != nil { return nil, nil, err } evidenceLogger := logger.With("module", "evidence") - evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB), blockStore) + evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: config.Storage.DiscardABCIResponses, + }), blockStore) if err != nil { return nil, nil, err } @@ -443,8 +445,8 @@ func createBlockchainReactor(config *cfg.Config, blockExec *sm.BlockExecutor, blockStore *store.BlockStore, fastSync bool, - logger log.Logger) (bcReactor p2p.Reactor, err error) { - + logger log.Logger, +) (bcReactor p2p.Reactor, err error) { switch config.FastSync.Version { case "v0": bcReactor = bcv0.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) @@ -470,8 +472,8 @@ func createConsensusReactor(config *cfg.Config, csMetrics *cs.Metrics, waitSync bool, eventBus *types.EventBus, - consensusLogger log.Logger) (*cs.Reactor, *cs.State) { - + consensusLogger log.Logger, +) (*cs.Reactor, *cs.State) { consensusState := cs.NewState( config.Consensus, state.Copy(), @@ -573,8 +575,8 @@ func createSwitch(config *cfg.Config, evidenceReactor *evidence.Reactor, nodeInfo p2p.NodeInfo, nodeKey *p2p.NodeKey, - p2pLogger log.Logger) *p2p.Switch { - + p2pLogger log.Logger, +) *p2p.Switch { sw := p2p.NewSwitch( config.P2P, transport, @@ -596,8 +598,8 @@ func createSwitch(config *cfg.Config, } func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch, - p2pLogger log.Logger, nodeKey *p2p.NodeKey) (pex.AddrBook, error) { - + p2pLogger log.Logger, nodeKey *p2p.NodeKey, +) (pex.AddrBook, error) { addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict) addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile())) @@ -623,8 +625,8 @@ func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch, } func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config, - sw *p2p.Switch, logger log.Logger) *pex.Reactor { - + sw *p2p.Switch, logger log.Logger, +) *pex.Reactor { // TODO persistent peers ? so we can have their DNS addrs saved pexReactor := pex.NewReactor(addrBook, &pex.ReactorConfig{ @@ -646,7 +648,8 @@ func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config, // startStateSync starts an asynchronous state sync process, then switches to fast sync mode. func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reactor, stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, fastSync bool, - stateStore sm.Store, blockStore *store.BlockStore, state sm.State) error { + stateStore sm.Store, blockStore *store.BlockStore, state sm.State, +) error { ssR.Logger.Info("Starting state sync") if stateProvider == nil { @@ -708,14 +711,16 @@ func NewNode(config *cfg.Config, dbProvider DBProvider, metricsProvider MetricsProvider, logger log.Logger, - options ...Option) (*Node, error) { - + options ...Option, +) (*Node, error) { blockStore, stateDB, err := initDBs(config, dbProvider) if err != nil { return nil, err } - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: config.Storage.DiscardABCIResponses, + }) state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider) if err != nil { @@ -889,6 +894,7 @@ func NewNode(config *cfg.Config, if config.RPC.PprofListenAddress != "" { go func() { logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress) + //nolint:gosec,nolintlint // G114: Use of net/http serve function that has no support for setting timeouts logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil)) }() } @@ -1203,7 +1209,6 @@ func (n *Node) startRPC() ([]net.Listener, error) { } return listeners, nil - } // startPrometheusServer starts a Prometheus HTTP server, listening for metrics @@ -1217,6 +1222,7 @@ func (n *Node) startPrometheusServer(addr string) *http.Server { promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections}, ), ), + ReadHeaderTimeout: readHeaderTimeout, } go func() { if err := srv.ListenAndServe(); err != http.ErrServerClosed { @@ -1375,9 +1381,7 @@ func makeNodeInfo( //------------------------------------------------------------------------------ -var ( - genesisDocKey = []byte("genesisDoc") -) +var genesisDocKey = []byte("genesisDoc") // LoadStateFromDBOrGenesisDocProvider attempts to load the state from the // database, or creates one using the given genesisDocProvider. On success this also @@ -1399,7 +1403,9 @@ func LoadStateFromDBOrGenesisDocProvider( return sm.State{}, nil, err } } - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) if err != nil { return sm.State{}, nil, err diff --git a/node/node_test.go b/node/node_test.go index 8a4a44dc7a..953870a811 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -235,7 +235,9 @@ func TestCreateProposalBlock(t *testing.T) { var height int64 = 1 state, stateDB, privVals := state(1, height) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) maxBytes := 16384 var partSize uint32 = 256 maxEvidenceBytes := int64(maxBytes / 2) @@ -340,7 +342,9 @@ func TestMaxProposalBlockSize(t *testing.T) { var height int64 = 1 state, stateDB, _ := state(1, height) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) var maxBytes int64 = 16384 var partSize uint32 = 256 state.ConsensusParams.Block.MaxBytes = maxBytes @@ -464,7 +468,9 @@ func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) { // save validators to db for 2 heights stateDB := dbm.NewMemDB() - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) if err := stateStore.Save(s); err != nil { panic(err) } diff --git a/p2p/README.md b/p2p/README.md index 9ba7303fa1..549f1da938 100644 --- a/p2p/README.md +++ b/p2p/README.md @@ -4,8 +4,8 @@ The p2p package provides an abstraction around peer-to-peer communication. Docs: -- [Connection](https://docs.tendermint.com/master/spec/p2p/connection.html) for details on how connections and multiplexing work -- [Peer](https://docs.tendermint.com/master/spec/p2p/node.html) for details on peer ID, handshakes, and peer exchange -- [Node](https://docs.tendermint.com/master/spec/p2p/node.html) for details about different types of nodes and how they should work -- [Pex](https://docs.tendermint.com/master/spec/reactors/pex/pex.html) for details on peer discovery and exchange -- [Config](https://docs.tendermint.com/master/spec/p2p/config.html) for details on some config option +- [Connection](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/connection.md) for details on how connections and multiplexing work +- [Peer](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/node.md) for details on peer ID, handshakes, and peer exchange +- [Node](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/node.md) for details about different types of nodes and how they should work +- [Pex](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/reactors/pex/pex.md) for details on peer discovery and exchange +- [Config](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/config.md) for details on some config option diff --git a/p2p/base_reactor.go b/p2p/base_reactor.go index 86b0d980a0..87e145fb20 100644 --- a/p2p/base_reactor.go +++ b/p2p/base_reactor.go @@ -44,9 +44,25 @@ type Reactor interface { // copying. // // CONTRACT: msgBytes are not nil. + // + // Only one of Receive or ReceiveEnvelope are called per message. If ReceiveEnvelope + // is implemented, it will be used, otherwise the switch will fallback to + // using Receive. + // Deprecated: Reactors looking to receive data from a peer should implement ReceiveEnvelope. + // Receive will be deprecated in favor of ReceiveEnvelope in v0.37. Receive(chID byte, peer Peer, msgBytes []byte) } +type EnvelopeReceiver interface { + // ReceiveEnvelope is called by the switch when an envelope is received from any connected + // peer on any of the channels registered by the reactor. + // + // Only one of Receive or ReceiveEnvelope are called per message. If ReceiveEnvelope + // is implemented, it will be used, otherwise the switch will fallback to + // using Receive. Receive will be replaced by ReceiveEnvelope in a future version + ReceiveEnvelope(Envelope) +} + //-------------------------------------- type BaseReactor struct { @@ -67,5 +83,6 @@ func (br *BaseReactor) SetSwitch(sw *Switch) { func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil } func (*BaseReactor) AddPeer(peer Peer) {} func (*BaseReactor) RemovePeer(peer Peer, reason interface{}) {} +func (*BaseReactor) ReceiveEnvelope(e Envelope) {} func (*BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {} func (*BaseReactor) InitPeer(peer Peer) Peer { return peer } diff --git a/p2p/conn/conn_notgo110.go b/p2p/conn/conn_notgo110.go index 21dffad2c2..96f9a2a7ea 100644 --- a/p2p/conn/conn_notgo110.go +++ b/p2p/conn/conn_notgo110.go @@ -10,9 +10,13 @@ import ( // Only Go1.10 has a proper net.Conn implementation that // has the SetDeadline method implemented as per -// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706 +// +// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706 +// // lest we run into problems like -// https://github.com/tendermint/tendermint/issues/851 +// +// https://github.com/tendermint/tendermint/issues/851 +// // so for go versions < Go1.10 use our custom net.Conn creator // that doesn't return an `Unimplemented error` for net.Conn. // Before https://github.com/tendermint/tendermint/commit/49faa79bdce5663894b3febbf4955fb1d172df04 diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index 44ff838939..187f579222 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -62,6 +62,7 @@ The byte id and the relative priorities of each `Channel` are configured upon initialization of the connection. There are two methods for sending messages: + func (m MConnection) Send(chID byte, msgBytes []byte) bool {} func (m MConnection) TrySend(chID byte, msgBytes []byte}) bool {} @@ -723,6 +724,7 @@ type ChannelDescriptor struct { SendQueueCapacity int RecvBufferCapacity int RecvMessageCapacity int + MessageType proto.Message } func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) { diff --git a/p2p/conn/connection_test.go b/p2p/conn/connection_test.go index c41a46c48e..f59df3dc8b 100644 --- a/p2p/conn/connection_test.go +++ b/p2p/conn/connection_test.go @@ -51,7 +51,7 @@ func TestMConnectionSendFlushStop(t *testing.T) { clientConn := createTestMConnection(client) err := clientConn.Start() require.Nil(t, err) - defer clientConn.Stop() // nolint:errcheck // ignore for tests + defer clientConn.Stop() //nolint:errcheck // ignore for tests msg := []byte("abc") assert.True(t, clientConn.Send(0x01, msg)) @@ -89,7 +89,7 @@ func TestMConnectionSend(t *testing.T) { mconn := createTestMConnection(client) err := mconn.Start() require.Nil(t, err) - defer mconn.Stop() // nolint:errcheck // ignore for tests + defer mconn.Stop() //nolint:errcheck // ignore for tests msg := []byte("Ant-Man") assert.True(t, mconn.Send(0x01, msg)) @@ -128,12 +128,12 @@ func TestMConnectionReceive(t *testing.T) { mconn1 := createMConnectionWithCallbacks(client, onReceive, onError) err := mconn1.Start() require.Nil(t, err) - defer mconn1.Stop() // nolint:errcheck // ignore for tests + defer mconn1.Stop() //nolint:errcheck // ignore for tests mconn2 := createTestMConnection(server) err = mconn2.Start() require.Nil(t, err) - defer mconn2.Stop() // nolint:errcheck // ignore for tests + defer mconn2.Stop() //nolint:errcheck // ignore for tests msg := []byte("Cyclops") assert.True(t, mconn2.Send(0x01, msg)) @@ -156,7 +156,7 @@ func TestMConnectionStatus(t *testing.T) { mconn := createTestMConnection(client) err := mconn.Start() require.Nil(t, err) - defer mconn.Stop() // nolint:errcheck // ignore for tests + defer mconn.Stop() //nolint:errcheck // ignore for tests status := mconn.Status() assert.NotNil(t, status) @@ -179,7 +179,7 @@ func TestMConnectionPongTimeoutResultsInError(t *testing.T) { mconn := createMConnectionWithCallbacks(client, onReceive, onError) err := mconn.Start() require.Nil(t, err) - defer mconn.Stop() // nolint:errcheck // ignore for tests + defer mconn.Stop() //nolint:errcheck // ignore for tests serverGotPing := make(chan struct{}) go func() { @@ -218,7 +218,7 @@ func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) { mconn := createMConnectionWithCallbacks(client, onReceive, onError) err := mconn.Start() require.Nil(t, err) - defer mconn.Stop() // nolint:errcheck // ignore for tests + defer mconn.Stop() //nolint:errcheck // ignore for tests // sending 3 pongs in a row (abuse) protoWriter := protoio.NewDelimitedWriter(server) @@ -273,7 +273,7 @@ func TestMConnectionMultiplePings(t *testing.T) { mconn := createMConnectionWithCallbacks(client, onReceive, onError) err := mconn.Start() require.Nil(t, err) - defer mconn.Stop() // nolint:errcheck // ignore for tests + defer mconn.Stop() //nolint:errcheck // ignore for tests // sending 3 pings in a row (abuse) // see https://github.com/tendermint/tendermint/issues/1190 @@ -322,7 +322,7 @@ func TestMConnectionPingPongs(t *testing.T) { mconn := createMConnectionWithCallbacks(client, onReceive, onError) err := mconn.Start() require.Nil(t, err) - defer mconn.Stop() // nolint:errcheck // ignore for tests + defer mconn.Stop() //nolint:errcheck // ignore for tests serverGotPing := make(chan struct{}) go func() { @@ -380,7 +380,7 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) { mconn := createMConnectionWithCallbacks(client, onReceive, onError) err := mconn.Start() require.Nil(t, err) - defer mconn.Stop() // nolint:errcheck // ignore for tests + defer mconn.Stop() //nolint:errcheck // ignore for tests if err := client.Close(); err != nil { t.Error(err) @@ -492,8 +492,8 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { chOnRcv := make(chan struct{}) mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) - defer mconnClient.Stop() // nolint:errcheck // ignore for tests - defer mconnServer.Stop() // nolint:errcheck // ignore for tests + defer mconnClient.Stop() //nolint:errcheck // ignore for tests + defer mconnServer.Stop() //nolint:errcheck // ignore for tests mconnServer.onReceive = func(chID byte, msgBytes []byte) { chOnRcv <- struct{}{} @@ -528,8 +528,8 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { func TestMConnectionReadErrorUnknownMsgType(t *testing.T) { chOnErr := make(chan struct{}) mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) - defer mconnClient.Stop() // nolint:errcheck // ignore for tests - defer mconnServer.Stop() // nolint:errcheck // ignore for tests + defer mconnClient.Stop() //nolint:errcheck // ignore for tests + defer mconnServer.Stop() //nolint:errcheck // ignore for tests // send msg with unknown msg type _, err := protoio.NewDelimitedWriter(mconnClient.conn).WriteMsg(&types.Header{ChainID: "x"}) @@ -545,7 +545,7 @@ func TestMConnectionTrySend(t *testing.T) { mconn := createTestMConnection(client) err := mconn.Start() require.Nil(t, err) - defer mconn.Stop() // nolint:errcheck // ignore for tests + defer mconn.Stop() //nolint:errcheck // ignore for tests msg := []byte("Semicolon-Woman") resultCh := make(chan string, 2) @@ -564,7 +564,7 @@ func TestMConnectionTrySend(t *testing.T) { assert.Equal(t, "TrySend", <-resultCh) } -// nolint:lll //ignore line length for tests +//nolint:lll //ignore line length for tests func TestConnVectors(t *testing.T) { testCases := []struct { diff --git a/p2p/errors.go b/p2p/errors.go index 3650a7a0a8..4fc915292f 100644 --- a/p2p/errors.go +++ b/p2p/errors.go @@ -145,6 +145,13 @@ func (e ErrTransportClosed) Error() string { return "transport has been closed" } +// ErrPeerRemoval is raised when attempting to remove a peer results in an error. +type ErrPeerRemoval struct{} + +func (e ErrPeerRemoval) Error() string { + return "peer removal failed" +} + //------------------------------------------------------------------- type ErrNetAddressNoID struct { diff --git a/p2p/fuzz.go b/p2p/fuzz.go index 0ada85ecc0..e41e989bb9 100644 --- a/p2p/fuzz.go +++ b/p2p/fuzz.go @@ -103,7 +103,7 @@ func (fc *FuzzedConnection) SetWriteDeadline(t time.Time) error { func (fc *FuzzedConnection) randomDuration() time.Duration { maxDelayMillis := int(fc.config.MaxDelay.Nanoseconds() / 1000) - return time.Millisecond * time.Duration(tmrand.Int()%maxDelayMillis) // nolint: gas + return time.Millisecond * time.Duration(tmrand.Int()%maxDelayMillis) //nolint: gas } // implements the fuzz (delay, kill conn) diff --git a/p2p/key.go b/p2p/key.go index f0f3dfd030..b99ad62630 100644 --- a/p2p/key.go +++ b/p2p/key.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/hex" "fmt" - "io/ioutil" + "os" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" @@ -70,7 +70,7 @@ func LoadOrGenNodeKey(filePath string) (*NodeKey, error) { // LoadNodeKey loads NodeKey located in filePath. func LoadNodeKey(filePath string) (*NodeKey, error) { - jsonBytes, err := ioutil.ReadFile(filePath) + jsonBytes, err := os.ReadFile(filePath) if err != nil { return nil, err } @@ -88,7 +88,7 @@ func (nodeKey *NodeKey) SaveAs(filePath string) error { if err != nil { return err } - err = ioutil.WriteFile(filePath, jsonBytes, 0600) + err = os.WriteFile(filePath, jsonBytes, 0o600) if err != nil { return err } diff --git a/p2p/metrics.go b/p2p/metrics.go index 675dd9c7c7..7c80658e5d 100644 --- a/p2p/metrics.go +++ b/p2p/metrics.go @@ -1,6 +1,11 @@ package p2p import ( + "fmt" + "reflect" + "regexp" + "sync" + "github.com/go-kit/kit/metrics" "github.com/go-kit/kit/metrics/discard" "github.com/go-kit/kit/metrics/prometheus" @@ -13,6 +18,13 @@ const ( MetricsSubsystem = "p2p" ) +var ( + // valueToLabelRegexp is used to find the golang package name and type name + // so that the name can be turned into a prometheus label where the characters + // in the label do not include prometheus special characters such as '*' and '.'. + valueToLabelRegexp = regexp.MustCompile(`\*?(\w+)\.(.*)`) +) + // Metrics contains metrics exposed by this package. type Metrics struct { // Number of peers. @@ -25,6 +37,10 @@ type Metrics struct { PeerPendingSendBytes metrics.Gauge // Number of transactions submitted by each peer. NumTxs metrics.Gauge + // Number of bytes of each message type received. + MessageReceiveBytesTotal metrics.Counter + // Number of bytes of each message type sent. + MessageSendBytesTotal metrics.Counter } // PrometheusMetrics returns Metrics build using Prometheus client library. @@ -58,7 +74,7 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Namespace: namespace, Subsystem: MetricsSubsystem, Name: "peer_pending_send_bytes", - Help: "Number of pending bytes to be sent to a given peer.", + Help: "Pending bytes to be sent to a given peer.", }, append(labels, "peer_id")).With(labelsAndValues...), NumTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, @@ -66,16 +82,64 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "num_txs", Help: "Number of transactions submitted by each peer.", }, append(labels, "peer_id")).With(labelsAndValues...), + MessageReceiveBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "message_receive_bytes_total", + Help: "Number of bytes of each message type received.", + }, append(labels, "message_type")).With(labelsAndValues...), + MessageSendBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "message_send_bytes_total", + Help: "Number of bytes of each message type sent.", + }, append(labels, "message_type")).With(labelsAndValues...), } } -// NopMetrics returns no-op Metrics. func NopMetrics() *Metrics { return &Metrics{ - Peers: discard.NewGauge(), - PeerReceiveBytesTotal: discard.NewCounter(), - PeerSendBytesTotal: discard.NewCounter(), - PeerPendingSendBytes: discard.NewGauge(), - NumTxs: discard.NewGauge(), + Peers: discard.NewGauge(), + PeerReceiveBytesTotal: discard.NewCounter(), + PeerSendBytesTotal: discard.NewCounter(), + PeerPendingSendBytes: discard.NewGauge(), + NumTxs: discard.NewGauge(), + MessageReceiveBytesTotal: discard.NewCounter(), + MessageSendBytesTotal: discard.NewCounter(), + } +} + +type metricsLabelCache struct { + mtx *sync.RWMutex + messageLabelNames map[reflect.Type]string +} + +// ValueToMetricLabel is a method that is used to produce a prometheus label value of the golang +// type that is passed in. +// This method uses a map on the Metrics struct so that each label name only needs +// to be produced once to prevent expensive string operations. +func (m *metricsLabelCache) ValueToMetricLabel(i interface{}) string { + t := reflect.TypeOf(i) + m.mtx.RLock() + + if s, ok := m.messageLabelNames[t]; ok { + m.mtx.RUnlock() + return s + } + m.mtx.RUnlock() + + s := t.String() + ss := valueToLabelRegexp.FindStringSubmatch(s) + l := fmt.Sprintf("%s_%s", ss[1], ss[2]) + m.mtx.Lock() + defer m.mtx.Unlock() + m.messageLabelNames[t] = l + return l +} + +func newMetricsLabelCache() *metricsLabelCache { + return &metricsLabelCache{ + mtx: &sync.RWMutex{}, + messageLabelNames: map[reflect.Type]string{}, } } diff --git a/p2p/mock/peer.go b/p2p/mock/peer.go index 59f6e0f4aa..31ce856237 100644 --- a/p2p/mock/peer.go +++ b/p2p/mock/peer.go @@ -42,9 +42,11 @@ func NewPeer(ip net.IP) *Peer { return mp } -func (mp *Peer) FlushStop() { mp.Stop() } //nolint:errcheck //ignore error -func (mp *Peer) TrySend(chID byte, msgBytes []byte) bool { return true } -func (mp *Peer) Send(chID byte, msgBytes []byte) bool { return true } +func (mp *Peer) FlushStop() { mp.Stop() } //nolint:errcheck //ignore error +func (mp *Peer) TrySendEnvelope(e p2p.Envelope) bool { return true } +func (mp *Peer) SendEnvelope(e p2p.Envelope) bool { return true } +func (mp *Peer) TrySend(_ byte, _ []byte) bool { return true } +func (mp *Peer) Send(_ byte, _ []byte) bool { return true } func (mp *Peer) NodeInfo() p2p.NodeInfo { return p2p.DefaultNodeInfo{ DefaultNodeID: mp.addr.ID, @@ -68,3 +70,5 @@ func (mp *Peer) RemoteIP() net.IP { return mp.ip } func (mp *Peer) SocketAddr() *p2p.NetAddress { return mp.addr } func (mp *Peer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} } func (mp *Peer) CloseConn() error { return nil } +func (mp *Peer) SetRemovalFailed() {} +func (mp *Peer) GetRemovalFailed() bool { return false } diff --git a/p2p/mock/reactor.go b/p2p/mock/reactor.go index 0389a7d190..8443ac8f11 100644 --- a/p2p/mock/reactor.go +++ b/p2p/mock/reactor.go @@ -22,4 +22,5 @@ func NewReactor() *Reactor { func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { return r.Channels } func (r *Reactor) AddPeer(peer p2p.Peer) {} func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {} +func (r *Reactor) ReceiveEnvelope(e p2p.Envelope) {} func (r *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {} diff --git a/p2p/mocks/peer.go b/p2p/mocks/peer.go index 94ec169f97..92f0106e16 100644 --- a/p2p/mocks/peer.go +++ b/p2p/mocks/peer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.14.0. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -53,6 +53,20 @@ func (_m *Peer) Get(_a0 string) interface{} { return r0 } +// GetRemovalFailed provides a mock function with given fields: +func (_m *Peer) GetRemovalFailed() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + // ID provides a mock function with given fields: func (_m *Peer) ID() p2p.ID { ret := _m.Called() @@ -109,6 +123,34 @@ func (_m *Peer) IsRunning() bool { return r0 } +// SendEnvelope provides a mock function with given fields: _a0 +func (_m *Peer) SendEnvelope(_a0 p2p.Envelope) bool { + ret := _m.Called(_a0) + + var r0 bool + if rf, ok := ret.Get(0).(func(p2p.Envelope) bool); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// TrySendEnvelope provides a mock function with given fields: _a0 +func (_m *Peer) TrySendEnvelope(_a0 p2p.Envelope) bool { + ret := _m.Called(_a0) + + var r0 bool + if rf, ok := ret.Get(0).(func(p2p.Envelope) bool); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + // NodeInfo provides a mock function with given fields: func (_m *Peer) NodeInfo() p2p.NodeInfo { ret := _m.Called() @@ -244,6 +286,11 @@ func (_m *Peer) SetLogger(_a0 log.Logger) { _m.Called(_a0) } +// SetRemovalFailed provides a mock function with given fields: +func (_m *Peer) SetRemovalFailed() { + _m.Called() +} + // SocketAddr provides a mock function with given fields: func (_m *Peer) SocketAddr() *p2p.NetAddress { ret := _m.Called() diff --git a/p2p/node_info_test.go b/p2p/node_info_test.go index 1bceb4a101..9c317f8a16 100644 --- a/p2p/node_info_test.go +++ b/p2p/node_info_test.go @@ -33,7 +33,7 @@ func TestNodeInfoValidate(t *testing.T) { }{ { "Too Many Channels", - func(ni *DefaultNodeInfo) { ni.Channels = append(channels, byte(maxNumChannels)) }, // nolint: gocritic + func(ni *DefaultNodeInfo) { ni.Channels = append(channels, byte(maxNumChannels)) }, //nolint: gocritic true, }, {"Duplicate Channel", func(ni *DefaultNodeInfo) { ni.Channels = dupChannels }, true}, diff --git a/p2p/peer.go b/p2p/peer.go index 21ebac6782..36b58f0e4f 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -3,8 +3,11 @@ package p2p import ( "fmt" "net" + "reflect" "time" + "github.com/gogo/protobuf/proto" + "github.com/tendermint/tendermint/libs/cmap" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" @@ -12,7 +15,7 @@ import ( tmconn "github.com/tendermint/tendermint/p2p/conn" ) -//go:generate mockery --case underscore --name Peer +//go:generate ../scripts/mockery_generate.sh Peer const metricsTickerDuration = 10 * time.Second @@ -34,11 +37,68 @@ type Peer interface { Status() tmconn.ConnectionStatus SocketAddr() *NetAddress // actual address of the socket + // Deprecated: entities looking to act as peers should implement SendEnvelope instead. + // Send will be removed in v0.37. Send(byte, []byte) bool + + // Deprecated: entities looking to act as peers should implement TrySendEnvelope instead. + // TrySend will be removed in v0.37. TrySend(byte, []byte) bool Set(string, interface{}) Get(string) interface{} + + SetRemovalFailed() + GetRemovalFailed() bool +} + +type EnvelopeSender interface { + SendEnvelope(Envelope) bool + TrySendEnvelope(Envelope) bool +} + +// EnvelopeSendShim implements a shim to allow the legacy peer type that +// does not implement SendEnvelope to be used in places where envelopes are +// being sent. If the peer implements the *Envelope methods, then they are used, +// otherwise, the message is marshaled and dispatched to the legacy *Send. +// +// Deprecated: Will be removed in v0.37. +func SendEnvelopeShim(p Peer, e Envelope, lg log.Logger) bool { + if es, ok := p.(EnvelopeSender); ok { + return es.SendEnvelope(e) + } + msg := e.Message + if w, ok := msg.(Wrapper); ok { + msg = w.Wrap() + } + msgBytes, err := proto.Marshal(msg) + if err != nil { + lg.Error("marshaling message to send", "error", err) + return false + } + return p.Send(e.ChannelID, msgBytes) +} + +// EnvelopeTrySendShim implements a shim to allow the legacy peer type that +// does not implement TrySendEnvelope to be used in places where envelopes are +// being sent. If the peer implements the *Envelope methods, then they are used, +// otherwise, the message is marshaled and dispatched to the legacy *Send. +// +// Deprecated: Will be removed in v0.37. +func TrySendEnvelopeShim(p Peer, e Envelope, lg log.Logger) bool { + if es, ok := p.(EnvelopeSender); ok { + return es.SendEnvelope(e) + } + msg := e.Message + if w, ok := msg.(Wrapper); ok { + msg = w.Wrap() + } + msgBytes, err := proto.Marshal(msg) + if err != nil { + lg.Error("marshaling message to send", "error", err) + return false + } + return p.TrySend(e.ChannelID, msgBytes) } //---------------------------------------------------------- @@ -117,6 +177,10 @@ type peer struct { metrics *Metrics metricsTicker *time.Ticker + mlc *metricsLabelCache + + // When removal of a peer fails, we set this flag + removalAttemptFailed bool } type PeerOption func(*peer) @@ -126,8 +190,10 @@ func newPeer( mConfig tmconn.MConnConfig, nodeInfo NodeInfo, reactorsByCh map[byte]Reactor, + msgTypeByChID map[byte]proto.Message, chDescs []*tmconn.ChannelDescriptor, onPeerError func(Peer, interface{}), + mlc *metricsLabelCache, options ...PeerOption, ) *peer { p := &peer{ @@ -137,12 +203,14 @@ func newPeer( Data: cmap.NewCMap(), metricsTicker: time.NewTicker(metricsTickerDuration), metrics: NopMetrics(), + mlc: mlc, } p.mconn = createMConnection( pc.conn, p, reactorsByCh, + msgTypeByChID, chDescs, onPeerError, mConfig, @@ -188,7 +256,7 @@ func (p *peer) OnStart() error { } // FlushStop mimics OnStop but additionally ensures that all successful -// .Send() calls will get flushed before closing the connection. +// SendEnvelope() calls will get flushed before closing the connection. // NOTE: it is not safe to call this method more than once. func (p *peer) FlushStop() { p.metricsTicker.Stop() @@ -241,12 +309,39 @@ func (p *peer) Status() tmconn.ConnectionStatus { return p.mconn.Status() } +// SendEnvelope sends the message in the envelope on the channel specified by the +// envelope. Returns false if the connection times out trying to place the message +// onto its internal queue. +// Using SendEnvelope allows for tracking the message bytes sent and received by message type +// as a metric which Send cannot support. +func (p *peer) SendEnvelope(e Envelope) bool { + if !p.IsRunning() { + return false + } else if !p.hasChannel(e.ChannelID) { + return false + } + msg := e.Message + metricLabelValue := p.mlc.ValueToMetricLabel(msg) + if w, ok := msg.(Wrapper); ok { + msg = w.Wrap() + } + msgBytes, err := proto.Marshal(msg) + if err != nil { + p.Logger.Error("marshaling message to send", "error", err) + return false + } + res := p.Send(e.ChannelID, msgBytes) + if res { + p.metrics.MessageSendBytesTotal.With("message_type", metricLabelValue).Add(float64(len(msgBytes))) + } + return res +} + // Send msg bytes to the channel identified by chID byte. Returns false if the // send queue is full after timeout, specified by MConnection. +// SendEnvelope replaces Send which will be deprecated in a future release. func (p *peer) Send(chID byte, msgBytes []byte) bool { if !p.IsRunning() { - // see Switch#Broadcast, where we fetch the list of peers and loop over - // them - while we're looping, one peer may be removed and stopped. return false } else if !p.hasChannel(chID) { return false @@ -262,8 +357,38 @@ func (p *peer) Send(chID byte, msgBytes []byte) bool { return res } +// TrySendEnvelope attempts to sends the message in the envelope on the channel specified by the +// envelope. Returns false immediately if the connection's internal queue is full +// Using TrySendEnvelope allows for tracking the message bytes sent and received by message type +// as a metric which TrySend cannot support. +func (p *peer) TrySendEnvelope(e Envelope) bool { + if !p.IsRunning() { + // see Switch#Broadcast, where we fetch the list of peers and loop over + // them - while we're looping, one peer may be removed and stopped. + return false + } else if !p.hasChannel(e.ChannelID) { + return false + } + msg := e.Message + metricLabelValue := p.mlc.ValueToMetricLabel(msg) + if w, ok := msg.(Wrapper); ok { + msg = w.Wrap() + } + msgBytes, err := proto.Marshal(msg) + if err != nil { + p.Logger.Error("marshaling message to send", "error", err) + return false + } + res := p.TrySend(e.ChannelID, msgBytes) + if res { + p.metrics.MessageSendBytesTotal.With("message_type", metricLabelValue).Add(float64(len(msgBytes))) + } + return res +} + // TrySend msg bytes to the channel identified by chID byte. Immediately returns // false if the send queue is full. +// TrySendEnvelope replaces TrySend which will be deprecated in a future release. func (p *peer) TrySend(chID byte, msgBytes []byte) bool { if !p.IsRunning() { return false @@ -316,6 +441,14 @@ func (p *peer) CloseConn() error { return p.peerConn.conn.Close() } +func (p *peer) SetRemovalFailed() { + p.removalAttemptFailed = true +} + +func (p *peer) GetRemovalFailed() bool { + return p.removalAttemptFailed +} + //--------------------------------------------------- // methods only used for testing // TODO: can we remove these? @@ -370,6 +503,7 @@ func createMConnection( conn net.Conn, p *peer, reactorsByCh map[byte]Reactor, + msgTypeByChID map[byte]proto.Message, chDescs []*tmconn.ChannelDescriptor, onPeerError func(Peer, interface{}), config tmconn.MConnConfig, @@ -382,12 +516,33 @@ func createMConnection( // which does onPeerError. panic(fmt.Sprintf("Unknown channel %X", chID)) } + mt := msgTypeByChID[chID] + msg := proto.Clone(mt) + err := proto.Unmarshal(msgBytes, msg) + if err != nil { + panic(fmt.Errorf("unmarshaling message: %s into type: %s", err, reflect.TypeOf(mt))) + } labels := []string{ "peer_id", string(p.ID()), "chID", fmt.Sprintf("%#x", chID), } + if w, ok := msg.(Unwrapper); ok { + msg, err = w.Unwrap() + if err != nil { + panic(fmt.Errorf("unwrapping message: %s", err)) + } + } p.metrics.PeerReceiveBytesTotal.With(labels...).Add(float64(len(msgBytes))) - reactor.Receive(chID, p, msgBytes) + p.metrics.MessageReceiveBytesTotal.With("message_type", p.mlc.ValueToMetricLabel(msg)).Add(float64(len(msgBytes))) + if nr, ok := reactor.(EnvelopeReceiver); ok { + nr.ReceiveEnvelope(Envelope{ + ChannelID: chID, + Src: p, + Message: msg, + }) + } else { + reactor.Receive(chID, p, msgBytes) + } } onError := func(r interface{}) { diff --git a/p2p/peer_set.go b/p2p/peer_set.go index 38dff7a9fb..30bcc4d32f 100644 --- a/p2p/peer_set.go +++ b/p2p/peer_set.go @@ -47,6 +47,9 @@ func (ps *PeerSet) Add(peer Peer) error { if ps.lookup[peer.ID()] != nil { return ErrSwitchDuplicatePeerID{peer.ID()} } + if peer.GetRemovalFailed() { + return ErrPeerRemoval{} + } index := len(ps.list) // Appending is safe even with other goroutines @@ -107,6 +110,12 @@ func (ps *PeerSet) Remove(peer Peer) bool { item := ps.lookup[peer.ID()] if item == nil { + // Removing the peer has failed so we set a flag to mark that a removal was attempted. + // This can happen when the peer add routine from the switch is running in + // parallel to the receive routine of MConn. + // There is an error within MConn but the switch has not actually added the peer to the peer set yet. + // Setting this flag will prevent a peer from being added to a node's peer set afterwards. + peer.SetRemovalFailed() return false } diff --git a/p2p/peer_set_test.go b/p2p/peer_set_test.go index b61b43f10a..6501dd77a5 100644 --- a/p2p/peer_set_test.go +++ b/p2p/peer_set_test.go @@ -18,20 +18,24 @@ type mockPeer struct { id ID } -func (mp *mockPeer) FlushStop() { mp.Stop() } //nolint:errcheck // ignore error -func (mp *mockPeer) TrySend(chID byte, msgBytes []byte) bool { return true } -func (mp *mockPeer) Send(chID byte, msgBytes []byte) bool { return true } -func (mp *mockPeer) NodeInfo() NodeInfo { return DefaultNodeInfo{} } -func (mp *mockPeer) Status() ConnectionStatus { return ConnectionStatus{} } -func (mp *mockPeer) ID() ID { return mp.id } -func (mp *mockPeer) IsOutbound() bool { return false } -func (mp *mockPeer) IsPersistent() bool { return true } -func (mp *mockPeer) Get(s string) interface{} { return s } -func (mp *mockPeer) Set(string, interface{}) {} -func (mp *mockPeer) RemoteIP() net.IP { return mp.ip } -func (mp *mockPeer) SocketAddr() *NetAddress { return nil } -func (mp *mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} } -func (mp *mockPeer) CloseConn() error { return nil } +func (mp *mockPeer) FlushStop() { mp.Stop() } //nolint:errcheck // ignore error +func (mp *mockPeer) TrySendEnvelope(e Envelope) bool { return true } +func (mp *mockPeer) SendEnvelope(e Envelope) bool { return true } +func (mp *mockPeer) TrySend(_ byte, _ []byte) bool { return true } +func (mp *mockPeer) Send(_ byte, _ []byte) bool { return true } +func (mp *mockPeer) NodeInfo() NodeInfo { return DefaultNodeInfo{} } +func (mp *mockPeer) Status() ConnectionStatus { return ConnectionStatus{} } +func (mp *mockPeer) ID() ID { return mp.id } +func (mp *mockPeer) IsOutbound() bool { return false } +func (mp *mockPeer) IsPersistent() bool { return true } +func (mp *mockPeer) Get(s string) interface{} { return s } +func (mp *mockPeer) Set(string, interface{}) {} +func (mp *mockPeer) RemoteIP() net.IP { return mp.ip } +func (mp *mockPeer) SocketAddr() *NetAddress { return nil } +func (mp *mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} } +func (mp *mockPeer) CloseConn() error { return nil } +func (mp *mockPeer) SetRemovalFailed() {} +func (mp *mockPeer) GetRemovalFailed() bool { return false } // Returns a mock peer func newMockPeer(ip net.IP) *mockPeer { diff --git a/p2p/peer_test.go b/p2p/peer_test.go index f8808f14d4..f021267cf5 100644 --- a/p2p/peer_test.go +++ b/p2p/peer_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -14,6 +15,7 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/proto/tendermint/p2p" "github.com/tendermint/tendermint/config" tmconn "github.com/tendermint/tendermint/p2p/conn" @@ -70,7 +72,7 @@ func TestPeerSend(t *testing.T) { }) assert.True(p.CanSend(testCh)) - assert.True(p.Send(testCh, []byte("Asylum"))) + assert.True(SendEnvelopeShim(p, Envelope{ChannelID: testCh, Message: &p2p.Message{}}, p.Logger)) } func createOutboundPeerAndPerformHandshake( @@ -82,6 +84,9 @@ func createOutboundPeerAndPerformHandshake( {ID: testCh, Priority: 1}, } reactorsByCh := map[byte]Reactor{testCh: NewTestReactor(chDescs, true)} + msgTypeByChID := map[byte]proto.Message{ + testCh: &p2p.Message{}, + } pk := ed25519.GenPrivKey() pc, err := testOutboundPeerConn(addr, config, false, pk) if err != nil { @@ -94,7 +99,7 @@ func createOutboundPeerAndPerformHandshake( return nil, err } - p := newPeer(pc, mConfig, peerNodeInfo, reactorsByCh, chDescs, func(p Peer, r interface{}) {}) + p := newPeer(pc, mConfig, peerNodeInfo, reactorsByCh, msgTypeByChID, chDescs, func(p Peer, r interface{}) {}, newMetricsLabelCache()) p.SetLogger(log.TestingLogger().With("peer", addr)) return p, nil } diff --git a/p2p/pex/addrbook_test.go b/p2p/pex/addrbook_test.go index ad41d55626..14ba0e2219 100644 --- a/p2p/pex/addrbook_test.go +++ b/p2p/pex/addrbook_test.go @@ -3,7 +3,6 @@ package pex import ( "encoding/hex" "fmt" - "io/ioutil" "math" "net" "os" @@ -718,7 +717,7 @@ func assertMOldAndNNewAddrsInSelection(t *testing.T, m, n int, addrs []*p2p.NetA } func createTempFileName(prefix string) string { - f, err := ioutil.TempFile("", prefix) + f, err := os.CreateTemp("", prefix) if err != nil { panic(err) } diff --git a/p2p/pex/known_address.go b/p2p/pex/known_address.go index e98a9e97e0..33763c084c 100644 --- a/p2p/pex/known_address.go +++ b/p2p/pex/known_address.go @@ -94,17 +94,16 @@ func (ka *knownAddress) removeBucketRef(bucketIdx int) int { } /* - An address is bad if the address in question is a New address, has not been tried in the last - minute, and meets one of the following criteria: +An address is bad if the address in question is a New address, has not been tried in the last +minute, and meets one of the following criteria: - 1) It claims to be from the future - 2) It hasn't been seen in over a week - 3) It has failed at least three times and never succeeded - 4) It has failed ten times in the last week - - All addresses that meet these criteria are assumed to be worthless and not - worth keeping hold of. +1) It claims to be from the future +2) It hasn't been seen in over a week +3) It has failed at least three times and never succeeded +4) It has failed ten times in the last week +All addresses that meet these criteria are assumed to be worthless and not +worth keeping hold of. */ func (ka *knownAddress) isBad() bool { // Is Old --> good diff --git a/p2p/pex/pex_reactor.go b/p2p/pex/pex_reactor.go index a13e3170e0..bcc8c4cce8 100644 --- a/p2p/pex/pex_reactor.go +++ b/p2p/pex/pex_reactor.go @@ -184,6 +184,7 @@ func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { Priority: 1, SendQueueCapacity: 10, RecvMessageCapacity: maxMsgSize, + MessageType: &tmp2p.Message{}, }, } } @@ -236,16 +237,10 @@ func (r *Reactor) logErrAddrBook(err error) { } // Receive implements Reactor by handling incoming PEX messages. -func (r *Reactor) Receive(chID byte, src Peer, msgBytes []byte) { - msg, err := decodeMsg(msgBytes) - if err != nil { - r.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) - r.Switch.StopPeerForError(src, err) - return - } - r.Logger.Debug("Received message", "src", src, "chId", chID, "msg", msg) +func (r *Reactor) ReceiveEnvelope(e p2p.Envelope) { + r.Logger.Debug("Received message", "src", e.Src, "chId", e.ChannelID, "msg", e.Message) - switch msg := msg.(type) { + switch msg := e.Message.(type) { case *tmp2p.PexRequest: // NOTE: this is a prime candidate for amplification attacks, @@ -255,8 +250,8 @@ func (r *Reactor) Receive(chID byte, src Peer, msgBytes []byte) { // If we're a seed and this is an inbound peer, // respond once and disconnect. - if r.config.SeedMode && !src.IsOutbound() { - id := string(src.ID()) + if r.config.SeedMode && !e.Src.IsOutbound() { + id := string(e.Src.ID()) v := r.lastReceivedRequests.Get(id) if v != nil { // FlushStop/StopPeer are already @@ -266,36 +261,36 @@ func (r *Reactor) Receive(chID byte, src Peer, msgBytes []byte) { r.lastReceivedRequests.Set(id, time.Now()) // Send addrs and disconnect - r.SendAddrs(src, r.book.GetSelectionWithBias(biasToSelectNewPeers)) + r.SendAddrs(e.Src, r.book.GetSelectionWithBias(biasToSelectNewPeers)) go func() { // In a go-routine so it doesn't block .Receive. - src.FlushStop() - r.Switch.StopPeerGracefully(src) + e.Src.FlushStop() + r.Switch.StopPeerGracefully(e.Src) }() } else { // Check we're not receiving requests too frequently. - if err := r.receiveRequest(src); err != nil { - r.Switch.StopPeerForError(src, err) - r.book.MarkBad(src.SocketAddr(), defaultBanTime) + if err := r.receiveRequest(e.Src); err != nil { + r.Switch.StopPeerForError(e.Src, err) + r.book.MarkBad(e.Src.SocketAddr(), defaultBanTime) return } - r.SendAddrs(src, r.book.GetSelection()) + r.SendAddrs(e.Src, r.book.GetSelection()) } case *tmp2p.PexAddrs: // If we asked for addresses, add them to the book addrs, err := p2p.NetAddressesFromProto(msg.Addrs) if err != nil { - r.Switch.StopPeerForError(src, err) - r.book.MarkBad(src.SocketAddr(), defaultBanTime) + r.Switch.StopPeerForError(e.Src, err) + r.book.MarkBad(e.Src.SocketAddr(), defaultBanTime) return } - err = r.ReceiveAddrs(addrs, src) + err = r.ReceiveAddrs(addrs, e.Src) if err != nil { - r.Switch.StopPeerForError(src, err) + r.Switch.StopPeerForError(e.Src, err) if err == ErrUnsolicitedList { - r.book.MarkBad(src.SocketAddr(), defaultBanTime) + r.book.MarkBad(e.Src.SocketAddr(), defaultBanTime) } return } @@ -305,6 +300,23 @@ func (r *Reactor) Receive(chID byte, src Peer, msgBytes []byte) { } } +func (r *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) { + msg := &tmp2p.Message{} + err := proto.Unmarshal(msgBytes, msg) + if err != nil { + panic(err) + } + um, err := msg.Unwrap() + if err != nil { + panic(err) + } + r.ReceiveEnvelope(p2p.Envelope{ + ChannelID: chID, + Src: peer, + Message: um, + }) +} + // enforces a minimum amount of time between requests func (r *Reactor) receiveRequest(src Peer) error { id := string(src.ID()) @@ -348,7 +360,10 @@ func (r *Reactor) RequestAddrs(p Peer) { } r.Logger.Debug("Request addrs", "from", p) r.requestsSent.Set(id, struct{}{}) - p.Send(PexChannel, mustEncode(&tmp2p.PexRequest{})) + p2p.SendEnvelopeShim(p, p2p.Envelope{ //nolint: staticcheck + ChannelID: PexChannel, + Message: &tmp2p.PexRequest{}, + }, r.Logger) } // ReceiveAddrs adds the given addrs to the addrbook if theres an open @@ -406,7 +421,11 @@ func (r *Reactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { // SendAddrs sends addrs to the peer. func (r *Reactor) SendAddrs(p Peer, netAddrs []*p2p.NetAddress) { - p.Send(PexChannel, mustEncode(&tmp2p.PexAddrs{Addrs: p2p.NetAddressesToProto(netAddrs)})) + e := p2p.Envelope{ + ChannelID: PexChannel, + Message: &tmp2p.PexAddrs{Addrs: p2p.NetAddressesToProto(netAddrs)}, + } + p2p.SendEnvelopeShim(p, e, r.Logger) //nolint: staticcheck } // SetEnsurePeersPeriod sets period to ensure peers connected. @@ -763,43 +782,3 @@ func markAddrInBookBasedOnErr(addr *p2p.NetAddress, book AddrBook, err error) { book.MarkAttempt(addr) } } - -//----------------------------------------------------------------------------- -// Messages - -// mustEncode proto encodes a tmp2p.Message -func mustEncode(pb proto.Message) []byte { - msg := tmp2p.Message{} - switch pb := pb.(type) { - case *tmp2p.PexRequest: - msg.Sum = &tmp2p.Message_PexRequest{PexRequest: pb} - case *tmp2p.PexAddrs: - msg.Sum = &tmp2p.Message_PexAddrs{PexAddrs: pb} - default: - panic(fmt.Sprintf("Unknown message type %T", pb)) - } - - bz, err := msg.Marshal() - if err != nil { - panic(fmt.Errorf("unable to marshal %T: %w", pb, err)) - } - return bz -} - -func decodeMsg(bz []byte) (proto.Message, error) { - pb := &tmp2p.Message{} - - err := pb.Unmarshal(bz) - if err != nil { - return nil, err - } - - switch msg := pb.Sum.(type) { - case *tmp2p.Message_PexRequest: - return msg.PexRequest, nil - case *tmp2p.Message_PexAddrs: - return msg.PexAddrs, nil - default: - return nil, fmt.Errorf("unknown message: %T", msg) - } -} diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go index 4ed1254efb..b39587ebda 100644 --- a/p2p/pex/pex_reactor_test.go +++ b/p2p/pex/pex_reactor_test.go @@ -3,7 +3,6 @@ package pex import ( "encoding/hex" "fmt" - "io/ioutil" "os" "path/filepath" "testing" @@ -20,9 +19,7 @@ import ( tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" ) -var ( - cfg *config.P2PConfig -) +var cfg *config.P2PConfig func init() { cfg = config.DefaultP2PConfig() @@ -59,21 +56,21 @@ func TestPEXReactorAddRemovePeer(t *testing.T) { } // --- FAIL: TestPEXReactorRunning (11.10s) -// pex_reactor_test.go:411: expected all switches to be connected to at -// least one peer (switches: 0 => {outbound: 1, inbound: 0}, 1 => -// {outbound: 0, inbound: 1}, 2 => {outbound: 0, inbound: 0}, ) +// +// pex_reactor_test.go:411: expected all switches to be connected to at +// least one peer (switches: 0 => {outbound: 1, inbound: 0}, 1 => +// {outbound: 0, inbound: 1}, 2 => {outbound: 0, inbound: 0}, ) // // EXPLANATION: peers are getting rejected because in switch#addPeer we check // if any peer (who we already connected to) has the same IP. Even though local // peers have different IP addresses, they all have the same underlying remote // IP: 127.0.0.1. -// func TestPEXReactorRunning(t *testing.T) { N := 3 switches := make([]*p2p.Switch, N) // directory to store address books - dir, err := ioutil.TempDir("", "pex_reactor") + dir, err := os.MkdirTemp("", "pex_reactor") require.Nil(t, err) defer os.RemoveAll(dir) @@ -132,12 +129,11 @@ func TestPEXReactorReceive(t *testing.T) { r.RequestAddrs(peer) size := book.Size() - msg := mustEncode(&tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{peer.SocketAddr().ToProto()}}) - r.Receive(PexChannel, peer, msg) + msg := &tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{peer.SocketAddr().ToProto()}} + r.ReceiveEnvelope(p2p.Envelope{ChannelID: PexChannel, Src: peer, Message: msg}) assert.Equal(t, size+1, book.Size()) - msg = mustEncode(&tmp2p.PexRequest{}) - r.Receive(PexChannel, peer, msg) // should not panic. + r.ReceiveEnvelope(p2p.Envelope{ChannelID: PexChannel, Src: peer, Message: &tmp2p.PexRequest{}}) } func TestPEXReactorRequestMessageAbuse(t *testing.T) { @@ -156,20 +152,19 @@ func TestPEXReactorRequestMessageAbuse(t *testing.T) { require.True(t, book.HasAddress(peerAddr)) id := string(peer.ID()) - msg := mustEncode(&tmp2p.PexRequest{}) // first time creates the entry - r.Receive(PexChannel, peer, msg) + r.ReceiveEnvelope(p2p.Envelope{ChannelID: PexChannel, Src: peer, Message: &tmp2p.PexRequest{}}) assert.True(t, r.lastReceivedRequests.Has(id)) assert.True(t, sw.Peers().Has(peer.ID())) // next time sets the last time value - r.Receive(PexChannel, peer, msg) + r.ReceiveEnvelope(p2p.Envelope{ChannelID: PexChannel, Src: peer, Message: &tmp2p.PexRequest{}}) assert.True(t, r.lastReceivedRequests.Has(id)) assert.True(t, sw.Peers().Has(peer.ID())) // third time is too many too soon - peer is removed - r.Receive(PexChannel, peer, msg) + r.ReceiveEnvelope(p2p.Envelope{ChannelID: PexChannel, Src: peer, Message: &tmp2p.PexRequest{}}) assert.False(t, r.lastReceivedRequests.Has(id)) assert.False(t, sw.Peers().Has(peer.ID())) assert.True(t, book.IsBanned(peerAddr)) @@ -193,29 +188,29 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) { assert.True(t, r.requestsSent.Has(id)) assert.True(t, sw.Peers().Has(peer.ID())) - msg := mustEncode(&tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{peer.SocketAddr().ToProto()}}) + msg := &tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{peer.SocketAddr().ToProto()}} // receive some addrs. should clear the request - r.Receive(PexChannel, peer, msg) + r.ReceiveEnvelope(p2p.Envelope{ChannelID: PexChannel, Src: peer, Message: msg}) assert.False(t, r.requestsSent.Has(id)) assert.True(t, sw.Peers().Has(peer.ID())) // receiving more unsolicited addrs causes a disconnect and ban - r.Receive(PexChannel, peer, msg) + r.ReceiveEnvelope(p2p.Envelope{ChannelID: PexChannel, Src: peer, Message: msg}) assert.False(t, sw.Peers().Has(peer.ID())) assert.True(t, book.IsBanned(peer.SocketAddr())) } func TestCheckSeeds(t *testing.T) { // directory to store address books - dir, err := ioutil.TempDir("", "pex_reactor") + dir, err := os.MkdirTemp("", "pex_reactor") require.Nil(t, err) defer os.RemoveAll(dir) // 1. test creating peer with no seeds works peerSwitch := testCreateDefaultPeer(dir, 0) require.Nil(t, peerSwitch.Start()) - peerSwitch.Stop() // nolint:errcheck // ignore for tests + peerSwitch.Stop() //nolint:errcheck // ignore for tests // 2. create seed seed := testCreateSeed(dir, 1, []*p2p.NetAddress{}, []*p2p.NetAddress{}) @@ -223,43 +218,47 @@ func TestCheckSeeds(t *testing.T) { // 3. test create peer with online seed works peerSwitch = testCreatePeerWithSeed(dir, 2, seed) require.Nil(t, peerSwitch.Start()) - peerSwitch.Stop() // nolint:errcheck // ignore for tests + peerSwitch.Stop() //nolint:errcheck // ignore for tests // 4. test create peer with all seeds having unresolvable DNS fails badPeerConfig := &ReactorConfig{ - Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", - "d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657"}, + Seeds: []string{ + "ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", + "d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657", + }, } peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig) require.Error(t, peerSwitch.Start()) - peerSwitch.Stop() // nolint:errcheck // ignore for tests + peerSwitch.Stop() //nolint:errcheck // ignore for tests // 5. test create peer with one good seed address succeeds badPeerConfig = &ReactorConfig{ - Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", + Seeds: []string{ + "ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", "d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657", - seed.NetAddress().String()}, + seed.NetAddress().String(), + }, } peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig) require.Nil(t, peerSwitch.Start()) - peerSwitch.Stop() // nolint:errcheck // ignore for tests + peerSwitch.Stop() //nolint:errcheck // ignore for tests } func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) { // directory to store address books - dir, err := ioutil.TempDir("", "pex_reactor") + dir, err := os.MkdirTemp("", "pex_reactor") require.Nil(t, err) defer os.RemoveAll(dir) // 1. create seed seed := testCreateSeed(dir, 0, []*p2p.NetAddress{}, []*p2p.NetAddress{}) require.Nil(t, seed.Start()) - defer seed.Stop() // nolint:errcheck // ignore for tests + defer seed.Stop() //nolint:errcheck // ignore for tests // 2. create usual peer with only seed configured. peer := testCreatePeerWithSeed(dir, 1, seed) require.Nil(t, peer.Start()) - defer peer.Stop() // nolint:errcheck // ignore for tests + defer peer.Stop() //nolint:errcheck // ignore for tests // 3. check that the peer connects to seed immediately assertPeersWithTimeout(t, []*p2p.Switch{peer}, 10*time.Millisecond, 3*time.Second, 1) @@ -267,25 +266,25 @@ func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) { func TestConnectionSpeedForPeerReceivedFromSeed(t *testing.T) { // directory to store address books - dir, err := ioutil.TempDir("", "pex_reactor") + dir, err := os.MkdirTemp("", "pex_reactor") require.Nil(t, err) defer os.RemoveAll(dir) // 1. create peer peerSwitch := testCreateDefaultPeer(dir, 1) require.Nil(t, peerSwitch.Start()) - defer peerSwitch.Stop() // nolint:errcheck // ignore for tests + defer peerSwitch.Stop() //nolint:errcheck // ignore for tests // 2. Create seed which knows about the peer peerAddr := peerSwitch.NetAddress() seed := testCreateSeed(dir, 2, []*p2p.NetAddress{peerAddr}, []*p2p.NetAddress{peerAddr}) require.Nil(t, seed.Start()) - defer seed.Stop() // nolint:errcheck // ignore for tests + defer seed.Stop() //nolint:errcheck // ignore for tests // 3. create another peer with only seed configured. secondPeer := testCreatePeerWithSeed(dir, 3, seed) require.Nil(t, secondPeer.Start()) - defer secondPeer.Stop() // nolint:errcheck // ignore for tests + defer secondPeer.Stop() //nolint:errcheck // ignore for tests // 4. check that the second peer connects to seed immediately assertPeersWithTimeout(t, []*p2p.Switch{secondPeer}, 10*time.Millisecond, 3*time.Second, 1) @@ -296,7 +295,7 @@ func TestConnectionSpeedForPeerReceivedFromSeed(t *testing.T) { func TestPEXReactorSeedMode(t *testing.T) { // directory to store address books - dir, err := ioutil.TempDir("", "pex_reactor") + dir, err := os.MkdirTemp("", "pex_reactor") require.Nil(t, err) defer os.RemoveAll(dir) @@ -308,13 +307,13 @@ func TestPEXReactorSeedMode(t *testing.T) { sw.SetAddrBook(book) err = sw.Start() require.NoError(t, err) - defer sw.Stop() // nolint:errcheck // ignore for tests + defer sw.Stop() //nolint:errcheck // ignore for tests assert.Zero(t, sw.Peers().Size()) peerSwitch := testCreateDefaultPeer(dir, 1) require.NoError(t, peerSwitch.Start()) - defer peerSwitch.Stop() // nolint:errcheck // ignore for tests + defer peerSwitch.Stop() //nolint:errcheck // ignore for tests // 1. Test crawlPeers dials the peer pexR.crawlPeers([]*p2p.NetAddress{peerSwitch.NetAddress()}) @@ -335,7 +334,7 @@ func TestPEXReactorSeedMode(t *testing.T) { func TestPEXReactorDoesNotDisconnectFromPersistentPeerInSeedMode(t *testing.T) { // directory to store address books - dir, err := ioutil.TempDir("", "pex_reactor") + dir, err := os.MkdirTemp("", "pex_reactor") require.Nil(t, err) defer os.RemoveAll(dir) @@ -347,13 +346,13 @@ func TestPEXReactorDoesNotDisconnectFromPersistentPeerInSeedMode(t *testing.T) { sw.SetAddrBook(book) err = sw.Start() require.NoError(t, err) - defer sw.Stop() // nolint:errcheck // ignore for tests + defer sw.Stop() //nolint:errcheck // ignore for tests assert.Zero(t, sw.Peers().Size()) peerSwitch := testCreateDefaultPeer(dir, 1) require.NoError(t, peerSwitch.Start()) - defer peerSwitch.Stop() // nolint:errcheck // ignore for tests + defer peerSwitch.Stop() //nolint:errcheck // ignore for tests err = sw.AddPersistentPeers([]string{peerSwitch.NetAddress().String()}) require.NoError(t, err) @@ -373,7 +372,7 @@ func TestPEXReactorDoesNotDisconnectFromPersistentPeerInSeedMode(t *testing.T) { func TestPEXReactorDialsPeerUpToMaxAttemptsInSeedMode(t *testing.T) { // directory to store address books - dir, err := ioutil.TempDir("", "pex_reactor") + dir, err := os.MkdirTemp("", "pex_reactor") require.Nil(t, err) defer os.RemoveAll(dir) @@ -409,7 +408,7 @@ func TestPEXReactorSeedModeFlushStop(t *testing.T) { switches := make([]*p2p.Switch, N) // directory to store address books - dir, err := ioutil.TempDir("", "pex_reactor") + dir, err := os.MkdirTemp("", "pex_reactor") require.Nil(t, err) defer os.RemoveAll(dir) @@ -487,14 +486,34 @@ func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) { pexR.RequestAddrs(peer) size := book.Size() - msg := mustEncode(&tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{peer.SocketAddr().ToProto()}}) - pexR.Receive(PexChannel, peer, msg) + msg := &tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{peer.SocketAddr().ToProto()}} + pexR.ReceiveEnvelope(p2p.Envelope{ + ChannelID: PexChannel, + Src: peer, + Message: msg, + }) assert.Equal(t, size, book.Size()) pexR.AddPeer(peer) assert.Equal(t, size, book.Size()) } +func TestLegacyReactorReceiveBasic(t *testing.T) { + pexR, _ := createReactor(&ReactorConfig{}) + peer := p2p.CreateRandomPeer(false) + + pexR.InitPeer(peer) + pexR.AddPeer(peer) + m := &tmp2p.PexAddrs{} + wm := m.Wrap() + msg, err := proto.Marshal(wm) + assert.NoError(t, err) + + assert.NotPanics(t, func() { + pexR.Receive(PexChannel, peer, msg) + }) +} + func TestPEXReactorDialPeer(t *testing.T) { pexR, book := createReactor(&ReactorConfig{}) defer teardownReactor(book) @@ -619,7 +638,7 @@ func testCreateSeed(dir string, id int, knownAddrs, srcAddrs []*p2p.NetAddress) book := NewAddrBook(filepath.Join(dir, "addrbookSeed.json"), false) book.SetLogger(log.TestingLogger()) for j := 0; j < len(knownAddrs); j++ { - book.AddAddress(knownAddrs[j], srcAddrs[j]) // nolint:errcheck // ignore for tests + book.AddAddress(knownAddrs[j], srcAddrs[j]) //nolint:errcheck // ignore for tests book.MarkGood(knownAddrs[j].ID) } sw.SetAddrBook(book) @@ -646,7 +665,7 @@ func testCreatePeerWithSeed(dir string, id int, seed *p2p.Switch) *p2p.Switch { func createReactor(conf *ReactorConfig) (r *Reactor, book AddrBook) { // directory to store address book - dir, err := ioutil.TempDir("", "pex_reactor") + dir, err := os.MkdirTemp("", "pex_reactor") if err != nil { panic(err) } @@ -677,7 +696,6 @@ func createSwitchAndAddReactors(reactors ...p2p.Reactor) *p2p.Switch { } func TestPexVectors(t *testing.T) { - addr := tmp2p.NetAddress{ ID: "1", IP: "127.0.0.1", @@ -696,7 +714,9 @@ func TestPexVectors(t *testing.T) { for _, tc := range testCases { tc := tc - bz := mustEncode(tc.msg) + w := tc.msg.(p2p.Wrapper).Wrap() + bz, err := proto.Marshal(w) + require.NoError(t, err) require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) } diff --git a/p2p/switch.go b/p2p/switch.go index fa87cbccd7..9a298c72a0 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -6,9 +6,9 @@ import ( "sync" "time" + "github.com/gogo/protobuf/proto" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/cmap" - "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/p2p/conn" @@ -69,16 +69,17 @@ type PeerFilterFunc func(IPeerSet, Peer) error type Switch struct { service.BaseService - config *config.P2PConfig - reactors map[string]Reactor - chDescs []*conn.ChannelDescriptor - reactorsByCh map[byte]Reactor - peers *PeerSet - dialing *cmap.CMap - reconnecting *cmap.CMap - nodeInfo NodeInfo // our node info - nodeKey *NodeKey // our node privkey - addrBook AddrBook + config *config.P2PConfig + reactors map[string]Reactor + chDescs []*conn.ChannelDescriptor + reactorsByCh map[byte]Reactor + msgTypeByChID map[byte]proto.Message + peers *PeerSet + dialing *cmap.CMap + reconnecting *cmap.CMap + nodeInfo NodeInfo // our node info + nodeKey *NodeKey // our node privkey + addrBook AddrBook // peers addresses with whom we'll maintain constant connection persistentPeersAddrs []*NetAddress unconditionalPeerIDs map[ID]struct{} @@ -91,6 +92,7 @@ type Switch struct { rng *rand.Rand // seed for randomizing dial times and orders metrics *Metrics + mlc *metricsLabelCache } // NetAddress returns the address the switch is listening on. @@ -108,11 +110,13 @@ func NewSwitch( transport Transport, options ...SwitchOption, ) *Switch { + sw := &Switch{ config: cfg, reactors: make(map[string]Reactor), chDescs: make([]*conn.ChannelDescriptor, 0), reactorsByCh: make(map[byte]Reactor), + msgTypeByChID: make(map[byte]proto.Message), peers: NewPeerSet(), dialing: cmap.NewCMap(), reconnecting: cmap.NewCMap(), @@ -121,6 +125,7 @@ func NewSwitch( filterTimeout: defaultFilterTimeout, persistentPeersAddrs: make([]*NetAddress, 0), unconditionalPeerIDs: make(map[ID]struct{}), + mlc: newMetricsLabelCache(), } // Ensure we have a completely undeterministic PRNG. @@ -164,6 +169,7 @@ func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor { } sw.chDescs = append(sw.chDescs, chDesc) sw.reactorsByCh[chID] = reactor + sw.msgTypeByChID[chID] = chDesc.MessageType } sw.reactors[name] = reactor reactor.SetSwitch(sw) @@ -182,6 +188,7 @@ func (sw *Switch) RemoveReactor(name string, reactor Reactor) { } } delete(sw.reactorsByCh, chDesc.ID) + delete(sw.msgTypeByChID, chDesc.ID) } delete(sw.reactors, name) reactor.SetSwitch(nil) @@ -255,14 +262,49 @@ func (sw *Switch) OnStop() { //--------------------------------------------------------------------- // Peers +// BroadcastEnvelope runs a go routine for each attempted send, which will block trying +// to send for defaultSendTimeoutSeconds. Returns a channel which receives +// success values for each attempted send (false if times out). Channel will be +// closed once msg bytes are sent to all peers (or time out). +// BroadcastEnvelope sends to the peers using the SendEnvelope method. +// +// NOTE: BroadcastEnvelope uses goroutines, so order of broadcast may not be preserved. +func (sw *Switch) BroadcastEnvelope(e Envelope) chan bool { + sw.Logger.Debug("Broadcast", "channel", e.ChannelID) + + peers := sw.peers.List() + var wg sync.WaitGroup + wg.Add(len(peers)) + successChan := make(chan bool, len(peers)) + + for _, peer := range peers { + go func(p Peer) { + defer wg.Done() + success := SendEnvelopeShim(p, e, sw.Logger) + successChan <- success + }(peer) + } + + go func() { + wg.Wait() + close(successChan) + }() + + return successChan +} + // Broadcast runs a go routine for each attempted send, which will block trying // to send for defaultSendTimeoutSeconds. Returns a channel which receives // success values for each attempted send (false if times out). Channel will be // closed once msg bytes are sent to all peers (or time out). +// Broadcast sends to the peers using the Send method. // // NOTE: Broadcast uses goroutines, so order of broadcast may not be preserved. +// +// Deprecated: code looking to broadcast data to all peers should use BroadcastEnvelope. +// Broadcast will be removed in 0.37. func (sw *Switch) Broadcast(chID byte, msgBytes []byte) chan bool { - sw.Logger.Debug("Broadcast", "channel", chID, "msgBytes", log.NewLazySprintf("%X", msgBytes)) + sw.Logger.Debug("Broadcast", "channel", chID) peers := sw.peers.List() var wg sync.WaitGroup @@ -370,6 +412,10 @@ func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) { // https://github.com/tendermint/tendermint/issues/3338 if sw.peers.Remove(peer) { sw.metrics.Peers.Add(float64(-1)) + } else { + // Removal of the peer has failed. The function above sets a flag within the peer to mark this. + // We keep this message here as information to the developer. + sw.Logger.Debug("error on peer removal", ",", "peer", peer.ID()) } } @@ -379,8 +425,8 @@ func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) { // to the PEX/Addrbook to find the peer with the addr again // NOTE: this will keep trying even if the handshake or auth fails. // TODO: be more explicit with error types so we only retry on certain failures -// - ie. if we're getting ErrDuplicatePeer we can stop -// because the addrbook got us the peer back already +// - ie. if we're getting ErrDuplicatePeer we can stop +// because the addrbook got us the peer back already func (sw *Switch) reconnectToPeer(addr *NetAddress) { if sw.reconnecting.Has(string(addr.ID)) { return @@ -619,11 +665,13 @@ func (sw *Switch) IsPeerPersistent(na *NetAddress) bool { func (sw *Switch) acceptRoutine() { for { p, err := sw.transport.Accept(peerConfig{ - chDescs: sw.chDescs, - onPeerError: sw.StopPeerForError, - reactorsByCh: sw.reactorsByCh, - metrics: sw.metrics, - isPersistent: sw.IsPeerPersistent, + chDescs: sw.chDescs, + onPeerError: sw.StopPeerForError, + reactorsByCh: sw.reactorsByCh, + msgTypeByChID: sw.msgTypeByChID, + metrics: sw.metrics, + mlc: sw.mlc, + isPersistent: sw.IsPeerPersistent, }) if err != nil { switch err := err.(type) { @@ -722,11 +770,13 @@ func (sw *Switch) addOutboundPeerWithConfig( } p, err := sw.transport.Dial(*addr, peerConfig{ - chDescs: sw.chDescs, - onPeerError: sw.StopPeerForError, - isPersistent: sw.IsPeerPersistent, - reactorsByCh: sw.reactorsByCh, - metrics: sw.metrics, + chDescs: sw.chDescs, + onPeerError: sw.StopPeerForError, + isPersistent: sw.IsPeerPersistent, + reactorsByCh: sw.reactorsByCh, + msgTypeByChID: sw.msgTypeByChID, + metrics: sw.metrics, + mlc: sw.mlc, }) if err != nil { if e, ok := err.(ErrRejected); ok { @@ -824,6 +874,12 @@ func (sw *Switch) addPeer(p Peer) error { // so that if Receive errors, we will find the peer and remove it. // Add should not err since we already checked peers.Has(). if err := sw.peers.Add(p); err != nil { + switch err.(type) { + case ErrPeerRemoval: + sw.Logger.Error("Error starting peer ", + " err ", "Peer has already errored and removal was attempted.", + "peer", p.ID()) + } return err } sw.metrics.Peers.Add(float64(1)) diff --git a/p2p/switch_test.go b/p2p/switch_test.go index 36420d333c..5695d03dd6 100644 --- a/p2p/switch_test.go +++ b/p2p/switch_test.go @@ -4,7 +4,7 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/http/httptest" @@ -14,6 +14,7 @@ import ( "testing" "time" + "github.com/gogo/protobuf/proto" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -23,11 +24,10 @@ import ( "github.com/tendermint/tendermint/libs/log" tmsync "github.com/tendermint/tendermint/libs/sync" "github.com/tendermint/tendermint/p2p/conn" + p2pproto "github.com/tendermint/tendermint/proto/tendermint/p2p" ) -var ( - cfg *config.P2PConfig -) +var cfg *config.P2PConfig func init() { cfg = config.DefaultP2PConfig() @@ -36,9 +36,8 @@ func init() { } type PeerMessage struct { - PeerID ID - Bytes []byte - Counter int + Contents proto.Message + Counter int } type TestReactor struct { @@ -70,16 +69,34 @@ func (tr *TestReactor) AddPeer(peer Peer) {} func (tr *TestReactor) RemovePeer(peer Peer, reason interface{}) {} -func (tr *TestReactor) Receive(chID byte, peer Peer, msgBytes []byte) { +func (tr *TestReactor) ReceiveEnvelope(e Envelope) { if tr.logMessages { tr.mtx.Lock() defer tr.mtx.Unlock() - // fmt.Printf("Received: %X, %X\n", chID, msgBytes) - tr.msgsReceived[chID] = append(tr.msgsReceived[chID], PeerMessage{peer.ID(), msgBytes, tr.msgsCounter}) + // fmt.Printf("Received: %X, %X\n", e.ChannelID, e.Message) + tr.msgsReceived[e.ChannelID] = append(tr.msgsReceived[e.ChannelID], PeerMessage{Contents: e.Message, Counter: tr.msgsCounter}) tr.msgsCounter++ } } +func (tr *TestReactor) Receive(chID byte, peer Peer, msgBytes []byte) { + msg := &p2pproto.Message{} + err := proto.Unmarshal(msgBytes, msg) + if err != nil { + panic(err) + } + um, err := msg.Unwrap() + if err != nil { + panic(err) + } + + tr.ReceiveEnvelope(Envelope{ + ChannelID: chID, + Src: peer, + Message: um, + }) +} + func (tr *TestReactor) getMsgs(chID byte) []PeerMessage { tr.mtx.Lock() defer tr.mtx.Unlock() @@ -99,16 +116,17 @@ func MakeSwitchPair(t testing.TB, initSwitch func(int, *Switch) *Switch) (*Switc func initSwitchFunc(i int, sw *Switch) *Switch { sw.SetAddrBook(&AddrBookMock{ Addrs: make(map[string]struct{}), - OurAddrs: make(map[string]struct{})}) + OurAddrs: make(map[string]struct{}), + }) // Make two reactors of two channels each sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x00), Priority: 10}, - {ID: byte(0x01), Priority: 10}, + {ID: byte(0x00), Priority: 10, MessageType: &p2pproto.Message{}}, + {ID: byte(0x01), Priority: 10, MessageType: &p2pproto.Message{}}, }, true)) sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x02), Priority: 10}, - {ID: byte(0x03), Priority: 10}, + {ID: byte(0x02), Priority: 10, MessageType: &p2pproto.Message{}}, + {ID: byte(0x03), Priority: 10, MessageType: &p2pproto.Message{}}, }, true)) return sw @@ -135,31 +153,47 @@ func TestSwitches(t *testing.T) { } // Lets send some messages - ch0Msg := []byte("channel zero") - ch1Msg := []byte("channel foo") - ch2Msg := []byte("channel bar") - - s1.Broadcast(byte(0x00), ch0Msg) - s1.Broadcast(byte(0x01), ch1Msg) - s1.Broadcast(byte(0x02), ch2Msg) - + ch0Msg := &p2pproto.PexAddrs{ + Addrs: []p2pproto.NetAddress{ + { + ID: "1", + }, + }, + } + ch1Msg := &p2pproto.PexAddrs{ + Addrs: []p2pproto.NetAddress{ + { + ID: "1", + }, + }, + } + ch2Msg := &p2pproto.PexAddrs{ + Addrs: []p2pproto.NetAddress{ + { + ID: "2", + }, + }, + } + s1.BroadcastEnvelope(Envelope{ChannelID: byte(0x00), Message: ch0Msg}) + s1.BroadcastEnvelope(Envelope{ChannelID: byte(0x01), Message: ch1Msg}) + s1.BroadcastEnvelope(Envelope{ChannelID: byte(0x02), Message: ch2Msg}) assertMsgReceivedWithTimeout(t, ch0Msg, byte(0x00), - s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second) + s2.Reactor("foo").(*TestReactor), 200*time.Millisecond, 5*time.Second) assertMsgReceivedWithTimeout(t, ch1Msg, byte(0x01), - s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second) + s2.Reactor("foo").(*TestReactor), 200*time.Millisecond, 5*time.Second) assertMsgReceivedWithTimeout(t, ch2Msg, byte(0x02), - s2.Reactor("bar").(*TestReactor), 10*time.Millisecond, 5*time.Second) + s2.Reactor("bar").(*TestReactor), 200*time.Millisecond, 5*time.Second) } func assertMsgReceivedWithTimeout( t *testing.T, - msgBytes []byte, + msg proto.Message, channel byte, reactor *TestReactor, checkPeriod, @@ -170,9 +204,13 @@ func assertMsgReceivedWithTimeout( select { case <-ticker.C: msgs := reactor.getMsgs(channel) + expectedBytes, err := proto.Marshal(msgs[0].Contents) + require.NoError(t, err) + gotBytes, err := proto.Marshal(msg) + require.NoError(t, err) if len(msgs) > 0 { - if !bytes.Equal(msgs[0].Bytes, msgBytes) { - t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", msgBytes, msgs[0].Bytes) + if !bytes.Equal(expectedBytes, gotBytes) { + t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", msg, msgs[0].Counter) } return } @@ -400,7 +438,7 @@ func TestSwitchStopPeerForError(t *testing.T) { resp, err := http.Get(s.URL) require.NoError(t, err) defer resp.Body.Close() - buf, _ := ioutil.ReadAll(resp.Body) + buf, _ := io.ReadAll(resp.Body) return string(buf) } @@ -429,7 +467,10 @@ func TestSwitchStopPeerForError(t *testing.T) { // send messages to the peer from sw1 p := sw1.Peers().List()[0] - p.Send(0x1, []byte("here's a message to send")) + SendEnvelopeShim(p, Envelope{ + ChannelID: 0x1, + Message: &p2pproto.Message{}, + }, sw1.Logger) // stop sw2. this should cause the p to fail, // which results in calling StopPeerForError internally @@ -681,9 +722,11 @@ func (et errorTransport) NetAddress() NetAddress { func (et errorTransport) Accept(c peerConfig) (Peer, error) { return nil, et.acceptErr } + func (errorTransport) Dial(NetAddress, peerConfig) (Peer, error) { panic("not implemented") } + func (errorTransport) Cleanup(Peer) { panic("not implemented") } @@ -824,7 +867,7 @@ func BenchmarkSwitchBroadcast(b *testing.B) { // Send random message from foo channel to another for i := 0; i < b.N; i++ { chID := byte(i % 4) - successChan := s1.Broadcast(chID, []byte("test data")) + successChan := s1.BroadcastEnvelope(Envelope{ChannelID: chID}) for s := range successChan { if s { numSuccess++ @@ -836,3 +879,15 @@ func BenchmarkSwitchBroadcast(b *testing.B) { b.Logf("success: %v, failure: %v", numSuccess, numFailure) } + +func TestSwitchRemovalErr(t *testing.T) { + sw1, sw2 := MakeSwitchPair(t, func(i int, sw *Switch) *Switch { + return initSwitchFunc(i, sw) + }) + assert.Equal(t, len(sw1.Peers().List()), 1) + p := sw1.Peers().List()[0] + + sw2.StopPeerForError(p, fmt.Errorf("peer should error")) + + assert.Equal(t, sw2.peers.Add(p).Error(), ErrPeerRemoval{}.Error()) +} diff --git a/p2p/test_util.go b/p2p/test_util.go index 4e56f0193c..1d9a4883cb 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -149,8 +149,10 @@ func (sw *Switch) addPeerWithConnection(conn net.Conn) error { MConnConfig(sw.config), ni, sw.reactorsByCh, + sw.msgTypeByChID, sw.chDescs, sw.StopPeerForError, + sw.mlc, ) if err = sw.addPeer(p); err != nil { diff --git a/p2p/transport.go b/p2p/transport.go index 1257f38b30..416c946942 100644 --- a/p2p/transport.go +++ b/p2p/transport.go @@ -8,6 +8,7 @@ import ( "golang.org/x/net/netutil" + "github.com/gogo/protobuf/proto" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/libs/protoio" "github.com/tendermint/tendermint/p2p/conn" @@ -47,9 +48,11 @@ type peerConfig struct { // isPersistent allows you to set a function, which, given socket address // (for outbound peers) OR self-reported address (for inbound peers), tells // if the peer is persistent or not. - isPersistent func(*NetAddress) bool - reactorsByCh map[byte]Reactor - metrics *Metrics + isPersistent func(*NetAddress) bool + reactorsByCh map[byte]Reactor + msgTypeByChID map[byte]proto.Message + metrics *Metrics + mlc *metricsLabelCache } // Transport emits and connects to Peers. The implementation of Peer is left to @@ -519,8 +522,10 @@ func (mt *MultiplexTransport) wrapPeer( mt.mConfig, ni, cfg.reactorsByCh, + cfg.msgTypeByChID, cfg.chDescs, cfg.onPeerError, + cfg.mlc, PeerMetrics(cfg.metrics), ) diff --git a/p2p/transport_test.go b/p2p/transport_test.go index 7638de4cb2..adaab39955 100644 --- a/p2p/transport_test.go +++ b/p2p/transport_test.go @@ -79,8 +79,8 @@ func TestTransportMultiplexConnFilter(t *testing.T) { } _, err = mt.Accept(peerConfig{}) - if err, ok := err.(ErrRejected); ok { - if !err.IsFiltered() { + if e, ok := err.(ErrRejected); ok { + if !e.IsFiltered() { t.Errorf("expected peer to be filtered, got %v", err) } } else { @@ -386,8 +386,8 @@ func TestTransportMultiplexValidateNodeInfo(t *testing.T) { } _, err := mt.Accept(peerConfig{}) - if err, ok := err.(ErrRejected); ok { - if !err.IsNodeInfoInvalid() { + if e, ok := err.(ErrRejected); ok { + if !e.IsNodeInfoInvalid() { t.Errorf("expected NodeInfo to be invalid, got %v", err) } } else { @@ -425,8 +425,8 @@ func TestTransportMultiplexRejectMissmatchID(t *testing.T) { } _, err := mt.Accept(peerConfig{}) - if err, ok := err.(ErrRejected); ok { - if !err.IsAuthFailure() { + if e, ok := err.(ErrRejected); ok { + if !e.IsAuthFailure() { t.Errorf("expected auth failure, got %v", err) } } else { @@ -453,8 +453,8 @@ func TestTransportMultiplexDialRejectWrongID(t *testing.T) { _, err := dialer.Dial(*addr, peerConfig{}) if err != nil { t.Logf("connection failed: %v", err) - if err, ok := err.(ErrRejected); ok { - if !err.IsAuthFailure() { + if e, ok := err.(ErrRejected); ok { + if !e.IsAuthFailure() { t.Errorf("expected auth failure, got %v", err) } } else { @@ -490,8 +490,8 @@ func TestTransportMultiplexRejectIncompatible(t *testing.T) { }() _, err := mt.Accept(peerConfig{}) - if err, ok := err.(ErrRejected); ok { - if !err.IsIncompatible() { + if e, ok := err.(ErrRejected); ok { + if !e.IsIncompatible() { t.Errorf("expected to reject incompatible, got %v", err) } } else { @@ -517,8 +517,8 @@ func TestTransportMultiplexRejectSelf(t *testing.T) { }() if err := <-errc; err != nil { - if err, ok := err.(ErrRejected); ok { - if !err.IsSelf() { + if e, ok := err.(ErrRejected); ok { + if !e.IsSelf() { t.Errorf("expected to reject self, got: %v", err) } } else { @@ -529,8 +529,8 @@ func TestTransportMultiplexRejectSelf(t *testing.T) { } _, err := mt.Accept(peerConfig{}) - if err, ok := err.(ErrRejected); ok { - if !err.IsSelf() { + if e, ok := err.(ErrRejected); ok { + if !e.IsSelf() { t.Errorf("expected to reject self, got: %v", err) } } else { diff --git a/p2p/trust/metric_test.go b/p2p/trust/metric_test.go index 65caf38a23..c3adfd5d17 100644 --- a/p2p/trust/metric_test.go +++ b/p2p/trust/metric_test.go @@ -72,6 +72,7 @@ func TestTrustMetricCopyNilPointer(t *testing.T) { } // XXX: This test fails non-deterministically +// //nolint:unused,deadcode func _TestTrustMetricStopPause(t *testing.T) { // The TestTicker will provide manual control over diff --git a/p2p/trust/store_test.go b/p2p/trust/store_test.go index df0f14a044..c583d58aad 100644 --- a/p2p/trust/store_test.go +++ b/p2p/trust/store_test.go @@ -5,7 +5,6 @@ package trust import ( "fmt" - "io/ioutil" "os" "testing" @@ -17,7 +16,7 @@ import ( ) func TestTrustMetricStoreSaveLoad(t *testing.T) { - dir, err := ioutil.TempDir("", "trust_test") + dir, err := os.MkdirTemp("", "trust_test") require.NoError(t, err) defer os.Remove(dir) diff --git a/p2p/types.go b/p2p/types.go index b11765bb51..7a741bd295 100644 --- a/p2p/types.go +++ b/p2p/types.go @@ -1,8 +1,40 @@ package p2p import ( + "github.com/gogo/protobuf/proto" "github.com/tendermint/tendermint/p2p/conn" + tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" ) type ChannelDescriptor = conn.ChannelDescriptor type ConnectionStatus = conn.ConnectionStatus + +// Envelope contains a message with sender routing info. +type Envelope struct { + Src Peer // sender (empty if outbound) + Message proto.Message // message payload + ChannelID byte +} + +// Unwrapper is a Protobuf message that can contain a variety of inner messages +// (e.g. via oneof fields). If a Channel's message type implements Unwrapper, the +// p2p layer will automatically unwrap inbound messages so that reactors do not have to do this themselves. +type Unwrapper interface { + proto.Message + + // Unwrap will unwrap the inner message contained in this message. + Unwrap() (proto.Message, error) +} + +// Wrapper is a companion type to Unwrapper. It is a Protobuf message that can contain a variety of inner messages. The p2p layer will automatically wrap outbound messages so that the reactors do not have to do it themselves. +type Wrapper interface { + proto.Message + + // Wrap will take the underlying message and wrap it in its wrapper type. + Wrap() proto.Message +} + +var ( + _ Wrapper = &tmp2p.PexRequest{} + _ Wrapper = &tmp2p.PexAddrs{} +) diff --git a/p2p/upnp/upnp.go b/p2p/upnp/upnp.go index c00530acae..0df5a24cfd 100644 --- a/p2p/upnp/upnp.go +++ b/p2p/upnp/upnp.go @@ -10,7 +10,7 @@ import ( "encoding/xml" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "strconv" @@ -202,7 +202,7 @@ func localIPv4() (net.IP, error) { } func getServiceURL(rootURL string) (url, urnDomain string, err error) { - r, err := http.Get(rootURL) // nolint: gosec + r, err := http.Get(rootURL) //nolint: gosec if err != nil { return } @@ -299,7 +299,6 @@ type statusInfo struct { } func (n *upnpNAT) getExternalIPAddress() (info statusInfo, err error) { - message := "\r\n" + "" @@ -312,7 +311,7 @@ func (n *upnpNAT) getExternalIPAddress() (info statusInfo, err error) { return } var envelope Envelope - data, err := ioutil.ReadAll(response.Body) + data, err := io.ReadAll(response.Body) if err != nil { return } @@ -350,7 +349,8 @@ func (n *upnpNAT) AddPortMapping( externalPort, internalPort int, description string, - timeout int) (mappedExternalPort int, err error) { + timeout int, +) (mappedExternalPort int, err error) { // A single concatenation would break ARM compilation. message := "\r\n" + "" + strconv.Itoa(externalPort) @@ -374,7 +374,7 @@ func (n *upnpNAT) AddPortMapping( // TODO: check response to see if the port was forwarded // log.Println(message, response) // JAE: - // body, err := ioutil.ReadAll(response.Body) + // body, err := io.ReadAll(response.Body) // fmt.Println(string(body), err) mappedExternalPort = externalPort _ = response @@ -382,7 +382,6 @@ func (n *upnpNAT) AddPortMapping( } func (n *upnpNAT) DeletePortMapping(protocol string, externalPort, internalPort int) (err error) { - message := "\r\n" + "" + strconv.Itoa(externalPort) + "" + protocol + "" + diff --git a/privval/doc.go b/privval/doc.go index 7695ffe9d0..63e1d071da 100644 --- a/privval/doc.go +++ b/privval/doc.go @@ -1,13 +1,12 @@ /* - Package privval provides different implementations of the types.PrivValidator. -FilePV +# FilePV FilePV is the simplest implementation and developer default. It uses one file for the private key and another to store state. -SignerListenerEndpoint +# SignerListenerEndpoint SignerListenerEndpoint establishes a connection to an external process, like a Key Management Server (KMS), using a socket. @@ -15,15 +14,14 @@ SignerListenerEndpoint listens for the external KMS process to dial in. SignerListenerEndpoint takes a listener, which determines the type of connection (ie. encrypted over tcp, or unencrypted over unix). -SignerDialerEndpoint +# SignerDialerEndpoint SignerDialerEndpoint is a simple wrapper around a net.Conn. It's used by both IPCVal and TCPVal. -SignerClient +# SignerClient SignerClient handles remote validator connections that provide signing services. In production, it's recommended to wrap it with RetrySignerClient to avoid termination in case of temporary errors. - */ package privval diff --git a/privval/file.go b/privval/file.go index fa33bb0ae6..9d0ddaf182 100644 --- a/privval/file.go +++ b/privval/file.go @@ -4,7 +4,7 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "os" "time" "github.com/gogo/protobuf/proto" @@ -64,7 +64,7 @@ func (pvKey FilePVKey) Save() { panic(err) } - if err := tempfile.WriteFileAtomic(outFile, jsonBytes, 0600); err != nil { + if err := tempfile.WriteFileAtomic(outFile, jsonBytes, 0o600); err != nil { panic(err) } } @@ -90,7 +90,6 @@ type FilePVLastSignState struct { // we have already signed for this HRS, and can reuse the existing signature). // It panics if the HRS matches the arguments, there's a SignBytes, but no Signature. func (lss *FilePVLastSignState) CheckHRS(height int64, round int32, step int8) (bool, error) { - if lss.Height > height { return false, fmt.Errorf("height regression. Got %v, last height %v", height, lss.Height) } @@ -133,7 +132,7 @@ func (lss *FilePVLastSignState) Save() { if err != nil { panic(err) } - err = tempfile.WriteFileAtomic(outFile, jsonBytes, 0600) + err = tempfile.WriteFileAtomic(outFile, jsonBytes, 0o600) if err != nil { panic(err) } @@ -188,7 +187,7 @@ func LoadFilePVEmptyState(keyFilePath, stateFilePath string) *FilePV { // If loadState is true, we load from the stateFilePath. Otherwise, we use an empty LastSignState. func loadFilePV(keyFilePath, stateFilePath string, loadState bool) *FilePV { - keyJSONBytes, err := ioutil.ReadFile(keyFilePath) + keyJSONBytes, err := os.ReadFile(keyFilePath) if err != nil { tmos.Exit(err.Error()) } @@ -206,7 +205,7 @@ func loadFilePV(keyFilePath, stateFilePath string, loadState bool) *FilePV { pvState := FilePVLastSignState{} if loadState { - stateJSONBytes, err := ioutil.ReadFile(stateFilePath) + stateJSONBytes, err := os.ReadFile(stateFilePath) if err != nil { tmos.Exit(err.Error()) } @@ -384,8 +383,8 @@ func (pv *FilePV) signProposal(chainID string, proposal *tmproto.Proposal) error // Persist height/round/step and signature func (pv *FilePV) saveSigned(height int64, round int32, step int8, - signBytes []byte, sig []byte) { - + signBytes []byte, sig []byte, +) { pv.LastSignState.Height = height pv.LastSignState.Round = round pv.LastSignState.Step = step diff --git a/privval/file_test.go b/privval/file_test.go index 69bd768745..76d06eab7c 100644 --- a/privval/file_test.go +++ b/privval/file_test.go @@ -3,7 +3,6 @@ package privval import ( "encoding/base64" "fmt" - "io/ioutil" "os" "testing" "time" @@ -23,9 +22,9 @@ import ( func TestGenLoadValidator(t *testing.T) { assert := assert.New(t) - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") require.Nil(t, err) privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) @@ -41,9 +40,9 @@ func TestGenLoadValidator(t *testing.T) { } func TestResetValidator(t *testing.T) { - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") require.Nil(t, err) privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) @@ -72,9 +71,9 @@ func TestResetValidator(t *testing.T) { func TestLoadOrGenValidator(t *testing.T) { assert := assert.New(t) - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") require.Nil(t, err) tempKeyFilePath := tempKeyFile.Name() @@ -159,9 +158,9 @@ func TestUnmarshalValidatorKey(t *testing.T) { func TestSignVote(t *testing.T) { assert := assert.New(t) - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") require.Nil(t, err) privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) @@ -169,10 +168,14 @@ func TestSignVote(t *testing.T) { randbytes := tmrand.Bytes(tmhash.Size) randbytes2 := tmrand.Bytes(tmhash.Size) - block1 := types.BlockID{Hash: randbytes, - PartSetHeader: types.PartSetHeader{Total: 5, Hash: randbytes}} - block2 := types.BlockID{Hash: randbytes2, - PartSetHeader: types.PartSetHeader{Total: 10, Hash: randbytes2}} + block1 := types.BlockID{ + Hash: randbytes, + PartSetHeader: types.PartSetHeader{Total: 5, Hash: randbytes}, + } + block2 := types.BlockID{ + Hash: randbytes2, + PartSetHeader: types.PartSetHeader{Total: 10, Hash: randbytes2}, + } height, round := int64(10), int32(1) voteType := tmproto.PrevoteType @@ -212,9 +215,9 @@ func TestSignVote(t *testing.T) { func TestSignProposal(t *testing.T) { assert := assert.New(t) - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") require.Nil(t, err) privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) @@ -222,10 +225,14 @@ func TestSignProposal(t *testing.T) { randbytes := tmrand.Bytes(tmhash.Size) randbytes2 := tmrand.Bytes(tmhash.Size) - block1 := types.BlockID{Hash: randbytes, - PartSetHeader: types.PartSetHeader{Total: 5, Hash: randbytes}} - block2 := types.BlockID{Hash: randbytes2, - PartSetHeader: types.PartSetHeader{Total: 10, Hash: randbytes2}} + block1 := types.BlockID{ + Hash: randbytes, + PartSetHeader: types.PartSetHeader{Total: 5, Hash: randbytes}, + } + block2 := types.BlockID{ + Hash: randbytes2, + PartSetHeader: types.PartSetHeader{Total: 10, Hash: randbytes2}, + } height, round := int64(10), int32(1) // sign a proposal for first time @@ -260,9 +267,9 @@ func TestSignProposal(t *testing.T) { } func TestDifferByTimestamp(t *testing.T) { - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") require.Nil(t, err) privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) @@ -321,7 +328,8 @@ func TestDifferByTimestamp(t *testing.T) { } func newVote(addr types.Address, idx int32, height int64, round int32, - typ tmproto.SignedMsgType, blockID types.BlockID) *types.Vote { + typ tmproto.SignedMsgType, blockID types.BlockID, +) *types.Vote { return &types.Vote{ ValidatorAddress: addr, ValidatorIndex: idx, diff --git a/privval/msgs_test.go b/privval/msgs_test.go index bf532bd7b9..afefa0e77c 100644 --- a/privval/msgs_test.go +++ b/privval/msgs_test.go @@ -57,7 +57,7 @@ func exampleProposal() *types.Proposal { } } -// nolint:lll // ignore line length for tests +//nolint:lll // ignore line length for tests func TestPrivvalVectors(t *testing.T) { pk := ed25519.GenPrivKeyFromSecret([]byte("it's a secret")).PubKey() ppk, err := cryptoenc.PubKeyToProto(pk) diff --git a/privval/socket_listeners_test.go b/privval/socket_listeners_test.go index 5e95ec10ce..08a285bdf8 100644 --- a/privval/socket_listeners_test.go +++ b/privval/socket_listeners_test.go @@ -1,7 +1,6 @@ package privval import ( - "io/ioutil" "net" "os" "testing" @@ -29,7 +28,7 @@ type listenerTestCase struct { // testUnixAddr will attempt to obtain a platform-independent temporary file // name for a Unix socket func testUnixAddr() (string, error) { - f, err := ioutil.TempFile("", "tendermint-privval-test-*") + f, err := os.CreateTemp("", "tendermint-privval-test-*") if err != nil { return "", err } diff --git a/proto/README.md b/proto/README.md new file mode 100644 index 0000000000..ebecd82d14 --- /dev/null +++ b/proto/README.md @@ -0,0 +1,23 @@ +# Protocol Buffers + +This sections defines the types and messages shared across implementations. The definition of the data structures are located in the [core/data_structures](../spec/core/data_structures.md) for the core data types and ABCI definitions are located in the [ABCI](../spec/abci/README.md) section. + +## Process of Updates + +The `.proto` files within this section are core to the protocol and updates must be treated as such. + +### Steps + +1. Make an issue with the proposed change. + - Within in the issue members from both the Tendermint-go and Tendermint-rs team will leave comments. If there is not consensus on the change an [RFC](../rfc/README.md) may be requested. + 1a. Submission of an RFC as a pull request should be made to facilitate further discussion. + 1b. Merge the RFC. +2. Make the necessary changes to the `.proto` file(s), [core data structures](../spec/core/data_structures.md) and/or [ABCI protocol](../spec/abci/apps.md). +3. Open issues within Tendermint-go and Tendermint-rs repos. This is used to notify the teams that a change occurred in the spec. + 1. Tag the issue with a spec version label. This will notify the team the changed has been made on master but has not entered a release. + +### Versioning + +The spec repo aims to be versioned. Once it has been versioned, updates to the protobuf files will live on master. After a certain amount of time, decided on by Tendermint-go and Tendermint-rs team leads, a release will be made on the spec repo. The spec may contain minor releases as well, depending on the implementation these changes may lead to a breaking change. If so, the implementation team should open an issue within the spec repo requiring a major release of the spec. + +If the steps above were followed each implementation should have issues tagged with a spec change label. Once all issues have been completed the team should signify their readiness for release. diff --git a/proto/tendermint/blockchain/message.go b/proto/tendermint/blockchain/message.go new file mode 100644 index 0000000000..1d11e41764 --- /dev/null +++ b/proto/tendermint/blockchain/message.go @@ -0,0 +1,73 @@ +package blockchain + +import ( + "fmt" + + "github.com/gogo/protobuf/proto" + "github.com/tendermint/tendermint/p2p" +) + +var _ p2p.Wrapper = &StatusRequest{} +var _ p2p.Wrapper = &StatusResponse{} +var _ p2p.Wrapper = &NoBlockResponse{} +var _ p2p.Wrapper = &BlockResponse{} +var _ p2p.Wrapper = &BlockRequest{} + +const ( + BlockResponseMessagePrefixSize = 4 + BlockResponseMessageFieldKeySize = 1 +) + +func (m *BlockRequest) Wrap() proto.Message { + bm := &Message{} + bm.Sum = &Message_BlockRequest{BlockRequest: m} + return bm +} + +func (m *BlockResponse) Wrap() proto.Message { + bm := &Message{} + bm.Sum = &Message_BlockResponse{BlockResponse: m} + return bm +} + +func (m *NoBlockResponse) Wrap() proto.Message { + bm := &Message{} + bm.Sum = &Message_NoBlockResponse{NoBlockResponse: m} + return bm +} + +func (m *StatusRequest) Wrap() proto.Message { + bm := &Message{} + bm.Sum = &Message_StatusRequest{StatusRequest: m} + return bm +} + +func (m *StatusResponse) Wrap() proto.Message { + bm := &Message{} + bm.Sum = &Message_StatusResponse{StatusResponse: m} + return bm +} + +// Unwrap implements the p2p Wrapper interface and unwraps a wrapped blockchain +// message. +func (m *Message) Unwrap() (proto.Message, error) { + switch msg := m.Sum.(type) { + case *Message_BlockRequest: + return m.GetBlockRequest(), nil + + case *Message_BlockResponse: + return m.GetBlockResponse(), nil + + case *Message_NoBlockResponse: + return m.GetNoBlockResponse(), nil + + case *Message_StatusRequest: + return m.GetStatusRequest(), nil + + case *Message_StatusResponse: + return m.GetStatusResponse(), nil + + default: + return nil, fmt.Errorf("unknown message: %T", msg) + } +} diff --git a/proto/tendermint/consensus/message.go b/proto/tendermint/consensus/message.go new file mode 100644 index 0000000000..51ac3b48f5 --- /dev/null +++ b/proto/tendermint/consensus/message.go @@ -0,0 +1,109 @@ +package consensus + +import ( + "fmt" + + "github.com/gogo/protobuf/proto" + "github.com/tendermint/tendermint/p2p" +) + +var _ p2p.Wrapper = &VoteSetBits{} +var _ p2p.Wrapper = &VoteSetMaj23{} +var _ p2p.Wrapper = &Vote{} +var _ p2p.Wrapper = &ProposalPOL{} +var _ p2p.Wrapper = &Proposal{} +var _ p2p.Wrapper = &NewValidBlock{} +var _ p2p.Wrapper = &NewRoundStep{} +var _ p2p.Wrapper = &HasVote{} +var _ p2p.Wrapper = &BlockPart{} + +func (m *VoteSetBits) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_VoteSetBits{VoteSetBits: m} + return cm + +} + +func (m *VoteSetMaj23) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_VoteSetMaj23{VoteSetMaj23: m} + return cm +} + +func (m *HasVote) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_HasVote{HasVote: m} + return cm +} + +func (m *Vote) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_Vote{Vote: m} + return cm +} + +func (m *BlockPart) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_BlockPart{BlockPart: m} + return cm +} + +func (m *ProposalPOL) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_ProposalPol{ProposalPol: m} + return cm +} + +func (m *Proposal) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_Proposal{Proposal: m} + return cm +} + +func (m *NewValidBlock) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_NewValidBlock{NewValidBlock: m} + return cm +} + +func (m *NewRoundStep) Wrap() proto.Message { + cm := &Message{} + cm.Sum = &Message_NewRoundStep{NewRoundStep: m} + return cm +} + +// Unwrap implements the p2p Wrapper interface and unwraps a wrapped consensus +// proto message. +func (m *Message) Unwrap() (proto.Message, error) { + switch msg := m.Sum.(type) { + case *Message_NewRoundStep: + return m.GetNewRoundStep(), nil + + case *Message_NewValidBlock: + return m.GetNewValidBlock(), nil + + case *Message_Proposal: + return m.GetProposal(), nil + + case *Message_ProposalPol: + return m.GetProposalPol(), nil + + case *Message_BlockPart: + return m.GetBlockPart(), nil + + case *Message_Vote: + return m.GetVote(), nil + + case *Message_HasVote: + return m.GetHasVote(), nil + + case *Message_VoteSetMaj23: + return m.GetVoteSetMaj23(), nil + + case *Message_VoteSetBits: + return m.GetVoteSetBits(), nil + + default: + return nil, fmt.Errorf("unknown message: %T", msg) + } +} diff --git a/proto/tendermint/mempool/message.go b/proto/tendermint/mempool/message.go new file mode 100644 index 0000000000..e2e6c42a7d --- /dev/null +++ b/proto/tendermint/mempool/message.go @@ -0,0 +1,30 @@ +package mempool + +import ( + "fmt" + + "github.com/gogo/protobuf/proto" + "github.com/tendermint/tendermint/p2p" +) + +var _ p2p.Wrapper = &Txs{} +var _ p2p.Unwrapper = &Message{} + +// Wrap implements the p2p Wrapper interface and wraps a mempool message. +func (m *Txs) Wrap() proto.Message { + mm := &Message{} + mm.Sum = &Message_Txs{Txs: m} + return mm +} + +// Unwrap implements the p2p Wrapper interface and unwraps a wrapped mempool +// message. +func (m *Message) Unwrap() (proto.Message, error) { + switch msg := m.Sum.(type) { + case *Message_Txs: + return m.GetTxs(), nil + + default: + return nil, fmt.Errorf("unknown message: %T", msg) + } +} diff --git a/proto/tendermint/p2p/pex.go b/proto/tendermint/p2p/pex.go new file mode 100644 index 0000000000..0fc2e2b108 --- /dev/null +++ b/proto/tendermint/p2p/pex.go @@ -0,0 +1,32 @@ +package p2p + +import ( + "fmt" + + "github.com/gogo/protobuf/proto" +) + +func (m *PexAddrs) Wrap() proto.Message { + pm := &Message{} + pm.Sum = &Message_PexAddrs{PexAddrs: m} + return pm +} + +func (m *PexRequest) Wrap() proto.Message { + pm := &Message{} + pm.Sum = &Message_PexRequest{PexRequest: m} + return pm +} + +// Unwrap implements the p2p Wrapper interface and unwraps a wrapped PEX +// message. +func (m *Message) Unwrap() (proto.Message, error) { + switch msg := m.Sum.(type) { + case *Message_PexRequest: + return msg.PexRequest, nil + case *Message_PexAddrs: + return msg.PexAddrs, nil + default: + return nil, fmt.Errorf("unknown pex message: %T", msg) + } +} diff --git a/proto/tendermint/state/types.pb.go b/proto/tendermint/state/types.pb.go index 85f38cada4..6b57ca1ae8 100644 --- a/proto/tendermint/state/types.pb.go +++ b/proto/tendermint/state/types.pb.go @@ -199,6 +199,58 @@ func (m *ConsensusParamsInfo) GetLastHeightChanged() int64 { return 0 } +type ABCIResponsesInfo struct { + AbciResponses *ABCIResponses `protobuf:"bytes,1,opt,name=abci_responses,json=abciResponses,proto3" json:"abci_responses,omitempty"` + Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *ABCIResponsesInfo) Reset() { *m = ABCIResponsesInfo{} } +func (m *ABCIResponsesInfo) String() string { return proto.CompactTextString(m) } +func (*ABCIResponsesInfo) ProtoMessage() {} +func (*ABCIResponsesInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_ccfacf933f22bf93, []int{3} +} +func (m *ABCIResponsesInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ABCIResponsesInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ABCIResponsesInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ABCIResponsesInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ABCIResponsesInfo.Merge(m, src) +} +func (m *ABCIResponsesInfo) XXX_Size() int { + return m.Size() +} +func (m *ABCIResponsesInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ABCIResponsesInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ABCIResponsesInfo proto.InternalMessageInfo + +func (m *ABCIResponsesInfo) GetAbciResponses() *ABCIResponses { + if m != nil { + return m.AbciResponses + } + return nil +} + +func (m *ABCIResponsesInfo) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + type Version struct { Consensus version.Consensus `protobuf:"bytes,1,opt,name=consensus,proto3" json:"consensus"` Software string `protobuf:"bytes,2,opt,name=software,proto3" json:"software,omitempty"` @@ -208,7 +260,7 @@ func (m *Version) Reset() { *m = Version{} } func (m *Version) String() string { return proto.CompactTextString(m) } func (*Version) ProtoMessage() {} func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_ccfacf933f22bf93, []int{3} + return fileDescriptor_ccfacf933f22bf93, []int{4} } func (m *Version) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -284,7 +336,7 @@ func (m *State) Reset() { *m = State{} } func (m *State) String() string { return proto.CompactTextString(m) } func (*State) ProtoMessage() {} func (*State) Descriptor() ([]byte, []int) { - return fileDescriptor_ccfacf933f22bf93, []int{4} + return fileDescriptor_ccfacf933f22bf93, []int{5} } func (m *State) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -415,6 +467,7 @@ func init() { proto.RegisterType((*ABCIResponses)(nil), "tendermint.state.ABCIResponses") proto.RegisterType((*ValidatorsInfo)(nil), "tendermint.state.ValidatorsInfo") proto.RegisterType((*ConsensusParamsInfo)(nil), "tendermint.state.ConsensusParamsInfo") + proto.RegisterType((*ABCIResponsesInfo)(nil), "tendermint.state.ABCIResponsesInfo") proto.RegisterType((*Version)(nil), "tendermint.state.Version") proto.RegisterType((*State)(nil), "tendermint.state.State") } @@ -422,55 +475,58 @@ func init() { func init() { proto.RegisterFile("tendermint/state/types.proto", fileDescriptor_ccfacf933f22bf93) } var fileDescriptor_ccfacf933f22bf93 = []byte{ - // 763 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcf, 0x6f, 0xd3, 0x30, - 0x14, 0x6e, 0xe8, 0xb6, 0xb6, 0xce, 0xda, 0x0e, 0x8f, 0x43, 0xd6, 0xb1, 0xb4, 0x2b, 0x3f, 0x34, - 0x71, 0x48, 0xa5, 0x71, 0x40, 0x5c, 0x26, 0x2d, 0x2d, 0x62, 0x95, 0x26, 0x04, 0xd9, 0xb4, 0x03, - 0x97, 0xc8, 0x6d, 0xbc, 0x24, 0xa2, 0x4d, 0xa2, 0xd8, 0x2d, 0xe3, 0x0f, 0xe0, 0xbe, 0x2b, 0xff, - 0xd1, 0x8e, 0x3b, 0x22, 0x0e, 0x03, 0xba, 0x7f, 0x04, 0xd9, 0xce, 0x0f, 0xb7, 0x65, 0xd2, 0x10, - 0x37, 0xfb, 0x7d, 0xdf, 0xfb, 0xfc, 0xf9, 0xf9, 0x3d, 0x19, 0x3c, 0xa6, 0x38, 0x70, 0x70, 0x3c, - 0xf6, 0x03, 0xda, 0x21, 0x14, 0x51, 0xdc, 0xa1, 0x5f, 0x22, 0x4c, 0x8c, 0x28, 0x0e, 0x69, 0x08, - 0x37, 0x72, 0xd4, 0xe0, 0x68, 0xe3, 0x91, 0x1b, 0xba, 0x21, 0x07, 0x3b, 0x6c, 0x25, 0x78, 0x8d, - 0x6d, 0x49, 0x05, 0x0d, 0x86, 0xbe, 0x2c, 0xd2, 0x90, 0x8f, 0xe0, 0xf1, 0x39, 0xb4, 0xb5, 0x84, - 0x4e, 0xd1, 0xc8, 0x77, 0x10, 0x0d, 0xe3, 0x84, 0xb1, 0xb3, 0xc4, 0x88, 0x50, 0x8c, 0xc6, 0xa9, - 0x80, 0x2e, 0xc1, 0x53, 0x1c, 0x13, 0x3f, 0x0c, 0xe6, 0x0e, 0x68, 0xba, 0x61, 0xe8, 0x8e, 0x70, - 0x87, 0xef, 0x06, 0x93, 0xf3, 0x0e, 0xf5, 0xc7, 0x98, 0x50, 0x34, 0x8e, 0x04, 0xa1, 0xfd, 0x43, - 0x01, 0xd5, 0x43, 0xb3, 0xdb, 0xb7, 0x30, 0x89, 0xc2, 0x80, 0x60, 0x02, 0xbb, 0x40, 0x75, 0xf0, - 0xc8, 0x9f, 0xe2, 0xd8, 0xa6, 0x17, 0x44, 0x53, 0x5a, 0xc5, 0x3d, 0x75, 0xbf, 0x6d, 0x48, 0xc5, - 0x60, 0x97, 0x34, 0xd2, 0x84, 0x9e, 0xe0, 0x9e, 0x5e, 0x58, 0xc0, 0x49, 0x97, 0x04, 0x1e, 0x80, - 0x0a, 0x0e, 0x1c, 0x7b, 0x30, 0x0a, 0x87, 0x9f, 0xb4, 0x07, 0x2d, 0x65, 0x4f, 0xdd, 0xdf, 0xbd, - 0x53, 0xe2, 0x4d, 0xe0, 0x98, 0x8c, 0x68, 0x95, 0x71, 0xb2, 0x82, 0x3d, 0xa0, 0x0e, 0xb0, 0xeb, - 0x07, 0x89, 0x42, 0x91, 0x2b, 0x3c, 0xb9, 0x53, 0xc1, 0x64, 0x5c, 0xa1, 0x01, 0x06, 0xd9, 0xba, - 0xfd, 0x55, 0x01, 0xb5, 0xb3, 0xb4, 0xa0, 0xa4, 0x1f, 0x9c, 0x87, 0xb0, 0x0b, 0xaa, 0x59, 0x89, - 0x6d, 0x82, 0xa9, 0xa6, 0x70, 0x69, 0x5d, 0x96, 0x16, 0x05, 0xcc, 0x12, 0x4f, 0x30, 0xb5, 0xd6, - 0xa7, 0xd2, 0x0e, 0x1a, 0x60, 0x73, 0x84, 0x08, 0xb5, 0x3d, 0xec, 0xbb, 0x1e, 0xb5, 0x87, 0x1e, - 0x0a, 0x5c, 0xec, 0xf0, 0x7b, 0x16, 0xad, 0x87, 0x0c, 0x3a, 0xe2, 0x48, 0x57, 0x00, 0xed, 0x6f, - 0x0a, 0xd8, 0xec, 0x32, 0x9f, 0x01, 0x99, 0x90, 0xf7, 0xfc, 0xfd, 0xb8, 0x19, 0x0b, 0x6c, 0x0c, - 0xd3, 0xb0, 0x2d, 0xde, 0x35, 0xf1, 0xb3, 0xbb, 0xec, 0x67, 0x41, 0xc0, 0x5c, 0xb9, 0xba, 0x69, - 0x16, 0xac, 0xfa, 0x70, 0x3e, 0xfc, 0xcf, 0xde, 0x3c, 0x50, 0x3a, 0x13, 0x8d, 0x03, 0x0f, 0x41, - 0x25, 0x53, 0x4b, 0x7c, 0xec, 0xc8, 0x3e, 0x92, 0x06, 0xcb, 0x9d, 0x24, 0x1e, 0xf2, 0x2c, 0xd8, - 0x00, 0x65, 0x12, 0x9e, 0xd3, 0xcf, 0x28, 0xc6, 0xfc, 0xc8, 0x8a, 0x95, 0xed, 0xdb, 0xbf, 0xd7, - 0xc0, 0xea, 0x09, 0x9b, 0x23, 0xf8, 0x1a, 0x94, 0x12, 0xad, 0xe4, 0x98, 0x2d, 0x63, 0x71, 0xd6, - 0x8c, 0xc4, 0x54, 0x72, 0x44, 0xca, 0x87, 0xcf, 0x41, 0x79, 0xe8, 0x21, 0x3f, 0xb0, 0x7d, 0x71, - 0xa7, 0x8a, 0xa9, 0xce, 0x6e, 0x9a, 0xa5, 0x2e, 0x8b, 0xf5, 0x7b, 0x56, 0x89, 0x83, 0x7d, 0x07, - 0x3e, 0x03, 0x35, 0x3f, 0xf0, 0xa9, 0x8f, 0x46, 0x49, 0x25, 0xb4, 0x1a, 0xaf, 0x40, 0x35, 0x89, - 0x8a, 0x22, 0xc0, 0x17, 0x80, 0x97, 0x44, 0xb4, 0x59, 0xca, 0x2c, 0x72, 0x66, 0x9d, 0x01, 0xbc, - 0x8f, 0x12, 0xae, 0x05, 0xaa, 0x12, 0xd7, 0x77, 0xb4, 0x95, 0x65, 0xef, 0xe2, 0xa9, 0x78, 0x56, - 0xbf, 0x67, 0x6e, 0x32, 0xef, 0xb3, 0x9b, 0xa6, 0x7a, 0x9c, 0x4a, 0xf5, 0x7b, 0x96, 0x9a, 0xe9, - 0xf6, 0x1d, 0x78, 0x0c, 0xea, 0x92, 0x26, 0x1b, 0x4e, 0x6d, 0x95, 0xab, 0x36, 0x0c, 0x31, 0xb9, - 0x46, 0x3a, 0xb9, 0xc6, 0x69, 0x3a, 0xb9, 0x66, 0x99, 0xc9, 0x5e, 0xfe, 0x6c, 0x2a, 0x56, 0x35, - 0xd3, 0x62, 0x28, 0x7c, 0x0b, 0xea, 0x01, 0xbe, 0xa0, 0x76, 0xd6, 0xac, 0x44, 0x5b, 0xbb, 0x57, - 0x7b, 0xd7, 0x58, 0x5a, 0x3e, 0x29, 0xf0, 0x00, 0x00, 0x49, 0xa3, 0x74, 0x2f, 0x0d, 0x29, 0x83, - 0x19, 0xe1, 0xd7, 0x92, 0x44, 0xca, 0xf7, 0x33, 0xc2, 0xd2, 0x24, 0x23, 0x5d, 0xa0, 0xcb, 0xdd, - 0x9c, 0xeb, 0x65, 0x8d, 0x5d, 0xe1, 0x8f, 0xb5, 0x9d, 0x37, 0x76, 0x9e, 0x9d, 0xb4, 0xf8, 0x5f, - 0xc7, 0x0c, 0xfc, 0xe7, 0x98, 0xbd, 0x03, 0x4f, 0xe7, 0xc6, 0x6c, 0x41, 0x3f, 0xb3, 0xa7, 0x72, - 0x7b, 0x2d, 0x69, 0xee, 0xe6, 0x85, 0x52, 0x8f, 0x69, 0x23, 0xc6, 0x98, 0x4c, 0x46, 0x94, 0xd8, - 0x1e, 0x22, 0x9e, 0xb6, 0xde, 0x52, 0xf6, 0xd6, 0x45, 0x23, 0x5a, 0x22, 0x7e, 0x84, 0x88, 0x07, - 0xb7, 0x40, 0x19, 0x45, 0x91, 0xa0, 0x54, 0x39, 0xa5, 0x84, 0xa2, 0x88, 0x41, 0xe6, 0x87, 0xab, - 0x99, 0xae, 0x5c, 0xcf, 0x74, 0xe5, 0xd7, 0x4c, 0x57, 0x2e, 0x6f, 0xf5, 0xc2, 0xf5, 0xad, 0x5e, - 0xf8, 0x7e, 0xab, 0x17, 0x3e, 0xbe, 0x72, 0x7d, 0xea, 0x4d, 0x06, 0xc6, 0x30, 0x1c, 0x77, 0xe4, - 0x3f, 0x25, 0x5f, 0x8a, 0x8f, 0x6d, 0xf1, 0x4b, 0x1c, 0xac, 0xf1, 0xf8, 0xcb, 0x3f, 0x01, 0x00, - 0x00, 0xff, 0xff, 0xa5, 0x17, 0xac, 0x23, 0x2d, 0x07, 0x00, 0x00, + // 805 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcd, 0x8e, 0xe3, 0x44, + 0x10, 0x8e, 0xc9, 0x6e, 0x7e, 0xca, 0x93, 0x64, 0xb7, 0x07, 0x21, 0x6f, 0x96, 0x75, 0xb2, 0xe1, + 0x47, 0x23, 0x0e, 0x8e, 0xb4, 0x1c, 0x10, 0x97, 0x95, 0xd6, 0x09, 0xb0, 0x91, 0x56, 0x08, 0x3c, + 0xa3, 0x39, 0x70, 0xb1, 0x3a, 0x71, 0x8f, 0x6d, 0x91, 0xd8, 0x96, 0xbb, 0x13, 0x86, 0x07, 0xe0, + 0x3e, 0x57, 0xde, 0x68, 0x8e, 0x73, 0x44, 0x1c, 0x06, 0xc8, 0xbc, 0x08, 0xea, 0x1f, 0xdb, 0x9d, + 0x84, 0x91, 0x06, 0xed, 0xad, 0x5d, 0xf5, 0xd5, 0x57, 0x5f, 0x55, 0x57, 0xb5, 0xe1, 0x63, 0x46, + 0x92, 0x80, 0xe4, 0xab, 0x38, 0x61, 0x63, 0xca, 0x30, 0x23, 0x63, 0xf6, 0x6b, 0x46, 0xa8, 0x93, + 0xe5, 0x29, 0x4b, 0xd1, 0x93, 0xca, 0xeb, 0x08, 0x6f, 0xff, 0xc3, 0x30, 0x0d, 0x53, 0xe1, 0x1c, + 0xf3, 0x93, 0xc4, 0xf5, 0x9f, 0x6b, 0x2c, 0x78, 0xbe, 0x88, 0x75, 0x92, 0xbe, 0x9e, 0x42, 0xd8, + 0x77, 0xbc, 0xc3, 0x03, 0xef, 0x06, 0x2f, 0xe3, 0x00, 0xb3, 0x34, 0x57, 0x88, 0x17, 0x07, 0x88, + 0x0c, 0xe7, 0x78, 0x55, 0x10, 0xd8, 0x9a, 0x7b, 0x43, 0x72, 0x1a, 0xa7, 0xc9, 0x4e, 0x82, 0x41, + 0x98, 0xa6, 0xe1, 0x92, 0x8c, 0xc5, 0xd7, 0x7c, 0x7d, 0x31, 0x66, 0xf1, 0x8a, 0x50, 0x86, 0x57, + 0x99, 0x04, 0x8c, 0xfe, 0x34, 0xa0, 0xf3, 0xc6, 0x9d, 0xcc, 0x3c, 0x42, 0xb3, 0x34, 0xa1, 0x84, + 0xa2, 0x09, 0x98, 0x01, 0x59, 0xc6, 0x1b, 0x92, 0xfb, 0xec, 0x92, 0x5a, 0xc6, 0xb0, 0x7e, 0x62, + 0xbe, 0x1a, 0x39, 0x5a, 0x33, 0x78, 0x91, 0x4e, 0x11, 0x30, 0x95, 0xd8, 0xb3, 0x4b, 0x0f, 0x82, + 0xe2, 0x48, 0xd1, 0x6b, 0x68, 0x93, 0x24, 0xf0, 0xe7, 0xcb, 0x74, 0xf1, 0xb3, 0xf5, 0xc1, 0xd0, + 0x38, 0x31, 0x5f, 0xbd, 0xbc, 0x97, 0xe2, 0x9b, 0x24, 0x70, 0x39, 0xd0, 0x6b, 0x11, 0x75, 0x42, + 0x53, 0x30, 0xe7, 0x24, 0x8c, 0x13, 0xc5, 0x50, 0x17, 0x0c, 0x9f, 0xdc, 0xcb, 0xe0, 0x72, 0xac, + 0xe4, 0x80, 0x79, 0x79, 0x1e, 0xfd, 0x66, 0x40, 0xf7, 0xbc, 0x68, 0x28, 0x9d, 0x25, 0x17, 0x29, + 0x9a, 0x40, 0xa7, 0x6c, 0xb1, 0x4f, 0x09, 0xb3, 0x0c, 0x41, 0x6d, 0xeb, 0xd4, 0xb2, 0x81, 0x65, + 0xe0, 0x29, 0x61, 0xde, 0xd1, 0x46, 0xfb, 0x42, 0x0e, 0x1c, 0x2f, 0x31, 0x65, 0x7e, 0x44, 0xe2, + 0x30, 0x62, 0xfe, 0x22, 0xc2, 0x49, 0x48, 0x02, 0x51, 0x67, 0xdd, 0x7b, 0xca, 0x5d, 0x6f, 0x85, + 0x67, 0x22, 0x1d, 0xa3, 0xdf, 0x0d, 0x38, 0x9e, 0x70, 0x9d, 0x09, 0x5d, 0xd3, 0x1f, 0xc4, 0xfd, + 0x09, 0x31, 0x1e, 0x3c, 0x59, 0x14, 0x66, 0x5f, 0xde, 0xab, 0xd2, 0xf3, 0xf2, 0x50, 0xcf, 0x1e, + 0x81, 0xfb, 0xe8, 0xfa, 0x76, 0x50, 0xf3, 0x7a, 0x8b, 0x5d, 0xf3, 0xff, 0xd6, 0x46, 0xe1, 0xe9, + 0xce, 0xfd, 0x0b, 0x61, 0xdf, 0x42, 0x97, 0xf7, 0xd7, 0xcf, 0x0b, 0xab, 0x92, 0x35, 0x70, 0xf6, + 0x77, 0xc2, 0xd9, 0x09, 0xf6, 0x3a, 0x3c, 0xac, 0x9a, 0xa5, 0x8f, 0xa0, 0x21, 0x75, 0xa8, 0xfc, + 0xea, 0x6b, 0x14, 0x41, 0xf3, 0x5c, 0x4e, 0x2b, 0x7a, 0x03, 0xed, 0xb2, 0x04, 0x95, 0xe5, 0x85, + 0x9e, 0x45, 0x4d, 0x75, 0x55, 0xbe, 0x2a, 0xbc, 0x8a, 0x42, 0x7d, 0x68, 0xd1, 0xf4, 0x82, 0xfd, + 0x82, 0x73, 0x22, 0xf2, 0xb4, 0xbd, 0xf2, 0x7b, 0xf4, 0x4f, 0x03, 0x1e, 0x9f, 0x72, 0xa1, 0xe8, + 0x6b, 0x68, 0x2a, 0x2e, 0x95, 0xe6, 0xd9, 0x61, 0x31, 0x4a, 0x94, 0x4a, 0x51, 0xe0, 0xd1, 0xe7, + 0xd0, 0x5a, 0x44, 0x38, 0x4e, 0xfc, 0x58, 0x36, 0xb2, 0xed, 0x9a, 0xdb, 0xdb, 0x41, 0x73, 0xc2, + 0x6d, 0xb3, 0xa9, 0xd7, 0x14, 0xce, 0x59, 0x80, 0x3e, 0x83, 0x6e, 0x9c, 0xc4, 0x2c, 0xc6, 0x4b, + 0xd5, 0x7e, 0xab, 0x2b, 0xca, 0xee, 0x28, 0xab, 0xec, 0x3c, 0xfa, 0x02, 0xc4, 0x3d, 0xc8, 0xd9, + 0x2e, 0x90, 0x75, 0x81, 0xec, 0x71, 0x87, 0x18, 0x5e, 0x85, 0xf5, 0xa0, 0xa3, 0x61, 0xe3, 0xc0, + 0x7a, 0x74, 0xa8, 0x5d, 0xce, 0x87, 0x88, 0x9a, 0x4d, 0xdd, 0x63, 0xae, 0x7d, 0x7b, 0x3b, 0x30, + 0xdf, 0x15, 0x54, 0xb3, 0xa9, 0x67, 0x96, 0xbc, 0xb3, 0x00, 0xbd, 0x83, 0x9e, 0xc6, 0xc9, 0x5f, + 0x04, 0xeb, 0xb1, 0x60, 0xed, 0x3b, 0xf2, 0xb9, 0x70, 0x8a, 0xe7, 0xc2, 0x39, 0x2b, 0x9e, 0x0b, + 0xb7, 0xc5, 0x69, 0xaf, 0xfe, 0x1a, 0x18, 0x5e, 0xa7, 0xe4, 0xe2, 0x5e, 0xf4, 0x1d, 0xf4, 0x12, + 0x72, 0xc9, 0xfc, 0x72, 0x43, 0xa8, 0xd5, 0x78, 0xd0, 0x4e, 0x75, 0x79, 0x58, 0xb5, 0x9e, 0xe8, + 0x35, 0x80, 0xc6, 0xd1, 0x7c, 0x10, 0x87, 0x16, 0xc1, 0x85, 0x88, 0xb2, 0x34, 0x92, 0xd6, 0xc3, + 0x84, 0xf0, 0x30, 0x4d, 0xc8, 0x04, 0x6c, 0x7d, 0x85, 0x2a, 0xbe, 0x72, 0x9b, 0xda, 0xe2, 0xb2, + 0x9e, 0x57, 0xdb, 0x54, 0x45, 0xab, 0xbd, 0xfa, 0xcf, 0xdd, 0x86, 0xf7, 0xdc, 0xed, 0xef, 0xe1, + 0xd3, 0x9d, 0xdd, 0xde, 0xe3, 0x2f, 0xe5, 0x99, 0x42, 0xde, 0x50, 0x5b, 0xf6, 0x5d, 0xa2, 0x42, + 0x63, 0x31, 0x88, 0x39, 0xa1, 0xeb, 0x25, 0xa3, 0x7e, 0x84, 0x69, 0x64, 0x1d, 0x0d, 0x8d, 0x93, + 0x23, 0x39, 0x88, 0x9e, 0xb4, 0xbf, 0xc5, 0x34, 0x42, 0xcf, 0xa0, 0x85, 0xb3, 0x4c, 0x42, 0x3a, + 0x02, 0xd2, 0xc4, 0x59, 0xc6, 0x5d, 0xee, 0x8f, 0xd7, 0x5b, 0xdb, 0xb8, 0xd9, 0xda, 0xc6, 0xdf, + 0x5b, 0xdb, 0xb8, 0xba, 0xb3, 0x6b, 0x37, 0x77, 0x76, 0xed, 0x8f, 0x3b, 0xbb, 0xf6, 0xd3, 0x57, + 0x61, 0xcc, 0xa2, 0xf5, 0xdc, 0x59, 0xa4, 0xab, 0xb1, 0xfe, 0x23, 0xab, 0x8e, 0xf2, 0x6f, 0xba, + 0xff, 0x1f, 0x9e, 0x37, 0x84, 0xfd, 0xcb, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x1a, 0xb9, + 0x2e, 0xa2, 0x07, 0x00, 0x00, } func (m *ABCIResponses) Marshal() (dAtA []byte, err error) { @@ -612,6 +668,46 @@ func (m *ConsensusParamsInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ABCIResponsesInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ABCIResponsesInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ABCIResponsesInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 + } + if m.AbciResponses != nil { + { + size, err := m.AbciResponses.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *Version) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -747,12 +843,12 @@ func (m *State) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x32 } - n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):]) - if err10 != nil { - return 0, err10 + n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):]) + if err11 != nil { + return 0, err11 } - i -= n10 - i = encodeVarintTypes(dAtA, i, uint64(n10)) + i -= n11 + i = encodeVarintTypes(dAtA, i, uint64(n11)) i-- dAtA[i] = 0x2a { @@ -854,6 +950,22 @@ func (m *ConsensusParamsInfo) Size() (n int) { return n } +func (m *ABCIResponsesInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AbciResponses != nil { + l = m.AbciResponses.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + return n +} + func (m *Version) Size() (n int) { if m == nil { return 0 @@ -1291,6 +1403,111 @@ func (m *ConsensusParamsInfo) Unmarshal(dAtA []byte) error { } return nil } +func (m *ABCIResponsesInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ABCIResponsesInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ABCIResponsesInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AbciResponses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AbciResponses == nil { + m.AbciResponses = &ABCIResponses{} + } + if err := m.AbciResponses.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Version) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/proto/tendermint/state/types.proto b/proto/tendermint/state/types.proto index 919da91e52..f3fdc0ef39 100644 --- a/proto/tendermint/state/types.proto +++ b/proto/tendermint/state/types.proto @@ -32,6 +32,11 @@ message ConsensusParamsInfo { int64 last_height_changed = 2; } +message ABCIResponsesInfo { + ABCIResponses abci_responses = 1; + int64 height = 2; +} + message Version { tendermint.version.Consensus consensus = 1 [(gogoproto.nullable) = false]; string software = 2; diff --git a/proto/tendermint/statesync/message.go b/proto/tendermint/statesync/message.go new file mode 100644 index 0000000000..66f583674f --- /dev/null +++ b/proto/tendermint/statesync/message.go @@ -0,0 +1,58 @@ +package statesync + +import ( + "fmt" + + "github.com/gogo/protobuf/proto" + "github.com/tendermint/tendermint/p2p" +) + +var _ p2p.Wrapper = &ChunkRequest{} +var _ p2p.Wrapper = &ChunkResponse{} +var _ p2p.Wrapper = &SnapshotsRequest{} +var _ p2p.Wrapper = &SnapshotsResponse{} + +func (m *SnapshotsResponse) Wrap() proto.Message { + sm := &Message{} + sm.Sum = &Message_SnapshotsResponse{SnapshotsResponse: m} + return sm +} + +func (m *SnapshotsRequest) Wrap() proto.Message { + sm := &Message{} + sm.Sum = &Message_SnapshotsRequest{SnapshotsRequest: m} + return sm +} + +func (m *ChunkResponse) Wrap() proto.Message { + sm := &Message{} + sm.Sum = &Message_ChunkResponse{ChunkResponse: m} + return sm +} + +func (m *ChunkRequest) Wrap() proto.Message { + sm := &Message{} + sm.Sum = &Message_ChunkRequest{ChunkRequest: m} + return sm +} + +// Unwrap implements the p2p Wrapper interface and unwraps a wrapped state sync +// proto message. +func (m *Message) Unwrap() (proto.Message, error) { + switch msg := m.Sum.(type) { + case *Message_ChunkRequest: + return m.GetChunkRequest(), nil + + case *Message_ChunkResponse: + return m.GetChunkResponse(), nil + + case *Message_SnapshotsRequest: + return m.GetSnapshotsRequest(), nil + + case *Message_SnapshotsResponse: + return m.GetSnapshotsResponse(), nil + + default: + return nil, fmt.Errorf("unknown message: %T", msg) + } +} diff --git a/proxy/app_conn.go b/proxy/app_conn.go index d302b5affa..f4bb888b72 100644 --- a/proxy/app_conn.go +++ b/proxy/app_conn.go @@ -5,7 +5,7 @@ import ( "github.com/tendermint/tendermint/abci/types" ) -//go:generate mockery --case underscore --name AppConnConsensus|AppConnMempool|AppConnQuery|AppConnSnapshot +//go:generate ../scripts/mockery_generate.sh AppConnConsensus|AppConnMempool|AppConnQuery|AppConnSnapshot //---------------------------------------------------------------------------------------- // Enforce which abci msgs can be sent on a connection at the type level diff --git a/proxy/client.go b/proxy/client.go index e78e827abe..68498d574c 100644 --- a/proxy/client.go +++ b/proxy/client.go @@ -11,6 +11,8 @@ import ( e2e "github.com/tendermint/tendermint/test/e2e/app" ) +//go:generate ../scripts/mockery_generate.sh ClientCreator + // ClientCreator creates new ABCI clients. type ClientCreator interface { // NewABCIClient returns a new ABCI client. diff --git a/proxy/mocks/app_conn_consensus.go b/proxy/mocks/app_conn_consensus.go index 7803a31893..eccf74fc31 100644 --- a/proxy/mocks/app_conn_consensus.go +++ b/proxy/mocks/app_conn_consensus.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.14.0. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/proxy/mocks/app_conn_mempool.go b/proxy/mocks/app_conn_mempool.go index e33bc4b3ac..05e23dd433 100644 --- a/proxy/mocks/app_conn_mempool.go +++ b/proxy/mocks/app_conn_mempool.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.14.0. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/proxy/mocks/app_conn_query.go b/proxy/mocks/app_conn_query.go index 5cbd2dfd49..544ab765ef 100644 --- a/proxy/mocks/app_conn_query.go +++ b/proxy/mocks/app_conn_query.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.14.0. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/proxy/mocks/app_conn_snapshot.go b/proxy/mocks/app_conn_snapshot.go index 6f2c81bb47..e3d5cb6cda 100644 --- a/proxy/mocks/app_conn_snapshot.go +++ b/proxy/mocks/app_conn_snapshot.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.14.0. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/proxy/mocks/client_creator.go b/proxy/mocks/client_creator.go index e4b924ab52..eced0aeff6 100644 --- a/proxy/mocks/client_creator.go +++ b/proxy/mocks/client_creator.go @@ -1,10 +1,9 @@ -// Code generated by mockery v1.1.1. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks import ( mock "github.com/stretchr/testify/mock" - abcicli "github.com/tendermint/tendermint/abci/client" ) @@ -35,3 +34,18 @@ func (_m *ClientCreator) NewABCIClient() (abcicli.Client, error) { return r0, r1 } + +type mockConstructorTestingTNewClientCreator interface { + mock.TestingT + Cleanup(func()) +} + +// NewClientCreator creates a new instance of ClientCreator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewClientCreator(t mockConstructorTestingTNewClientCreator) *ClientCreator { + mock := &ClientCreator{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/proxy/multi_app_conn_test.go b/proxy/multi_app_conn_test.go index 34b0d0830e..a7e77148e1 100644 --- a/proxy/multi_app_conn_test.go +++ b/proxy/multi_app_conn_test.go @@ -53,7 +53,7 @@ func TestAppConns_Failure(t *testing.T) { }() quitCh := make(chan struct{}) - var recvQuitCh <-chan struct{} // nolint:gosimple + var recvQuitCh <-chan struct{} //nolint:gosimple recvQuitCh = quitCh clientCreatorMock := &mocks.ClientCreator{} diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go index 9a64d97f16..a317d65e14 100644 --- a/rpc/client/http/http.go +++ b/rpc/client/http/http.go @@ -39,24 +39,24 @@ the example for more details. Example: - c, err := New("http://192.168.1.10:26657", "/websocket") - if err != nil { - // handle error - } + c, err := New("http://192.168.1.10:26657", "/websocket") + if err != nil { + // handle error + } - // call Start/Stop if you're subscribing to events - err = c.Start() - if err != nil { - // handle error - } - defer c.Stop() + // call Start/Stop if you're subscribing to events + err = c.Start() + if err != nil { + // handle error + } + defer c.Stop() - res, err := c.Status() - if err != nil { - // handle error - } + res, err := c.Status() + if err != nil { + // handle error + } - // handle result + // handle result */ type HTTP struct { remote string diff --git a/rpc/client/main_test.go b/rpc/client/main_test.go index c97311c810..4c0534868f 100644 --- a/rpc/client/main_test.go +++ b/rpc/client/main_test.go @@ -1,7 +1,6 @@ package client_test import ( - "io/ioutil" "os" "testing" @@ -14,7 +13,7 @@ var node *nm.Node func TestMain(m *testing.M) { // start a tendermint node (and kvstore) in the background to test against - dir, err := ioutil.TempDir("/tmp", "rpc-client-test") + dir, err := os.MkdirTemp("/tmp", "rpc-client-test") if err != nil { panic(err) } diff --git a/rpc/core/abci.go b/rpc/core/abci.go index 34aad0f3de..cfd1fef26a 100644 --- a/rpc/core/abci.go +++ b/rpc/core/abci.go @@ -9,7 +9,7 @@ import ( ) // ABCIQuery queries the application for some information. -// More: https://docs.tendermint.com/master/rpc/#/ABCI/abci_query +// More: https://docs.tendermint.com/v0.34/rpc/#/ABCI/abci_query func ABCIQuery( ctx *rpctypes.Context, path string, @@ -31,7 +31,7 @@ func ABCIQuery( } // ABCIInfo gets some info about the application. -// More: https://docs.tendermint.com/master/rpc/#/ABCI/abci_info +// More: https://docs.tendermint.com/v0.34/rpc/#/ABCI/abci_info func ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { resInfo, err := GetEnvironment().ProxyAppQuery.InfoSync(proxy.RequestInfo) if err != nil { diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index e389d3f082..f7bc4bbaf5 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -18,7 +18,7 @@ import ( // BlockchainInfo gets block headers for minHeight <= height <= maxHeight. // Block headers are returned in descending order (highest first). -// More: https://docs.tendermint.com/master/rpc/#/Info/blockchain +// More: https://docs.tendermint.com/v0.34/rpc/#/Info/blockchain func BlockchainInfo(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { // maximum 20 block metas const limit int64 = 20 @@ -81,7 +81,7 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { // Block gets block at a given height. // If no height is provided, it will fetch the latest block. -// More: https://docs.tendermint.com/master/rpc/#/Info/block +// More: https://docs.tendermint.com/v0.34/rpc/#/Info/block func Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) { height, err := getHeight(GetEnvironment().BlockStore.Height(), heightPtr) if err != nil { @@ -97,7 +97,7 @@ func Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) } // BlockByHash gets block by hash. -// More: https://docs.tendermint.com/master/rpc/#/Info/block_by_hash +// More: https://docs.tendermint.com/v0.34/rpc/#/Info/block_by_hash func BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) { env := GetEnvironment() block := env.BlockStore.LoadBlockByHash(hash) @@ -111,7 +111,7 @@ func BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error // Commit gets block commit at a given height. // If no height is provided, it will fetch the commit for the latest block. -// More: https://docs.tendermint.com/master/rpc/#/Info/commit +// More: https://docs.tendermint.com/v0.34/rpc/#/Info/commit func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) { env := GetEnvironment() height, err := getHeight(env.BlockStore.Height(), heightPtr) @@ -207,11 +207,12 @@ func hashDataRoots(blocks []*ctypes.ResultBlock) []byte { // BlockResults gets ABCIResults at a given height. // If no height is provided, it will fetch results for the latest block. +// When DiscardABCIResponses is enabled, an error will be returned. // // Results are for the height of the block containing the txs. // Thus response.results.deliver_tx[5] is the results of executing // getBlock(h).Txs[5] -// More: https://docs.tendermint.com/master/rpc/#/Info/block_results +// More: https://docs.tendermint.com/v0.34/rpc/#/Info/block_results func BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) { env := GetEnvironment() height, err := getHeight(env.BlockStore.Height(), heightPtr) diff --git a/rpc/core/blocks_test.go b/rpc/core/blocks_test.go index 19082f2cbc..9735cd3b78 100644 --- a/rpc/core/blocks_test.go +++ b/rpc/core/blocks_test.go @@ -85,12 +85,14 @@ func TestBlockResults(t *testing.T) { BeginBlock: &abci.ResponseBeginBlock{}, } - env := &Environment{} - env.StateStore = sm.NewStore(dbm.NewMemDB()) - err := env.StateStore.SaveABCIResponses(100, results) + globalEnv = &Environment{} + globalEnv.StateStore = sm.NewStore(dbm.NewMemDB(), sm.StoreOptions{ + DiscardABCIResponses: false, + }) + err := globalEnv.StateStore.SaveABCIResponses(100, results) require.NoError(t, err) - env.BlockStore = mockBlockStore{height: 100} - SetEnvironment(env) + globalEnv.BlockStore = mockBlockStore{height: 100} + SetEnvironment(globalEnv) testCases := []struct { height int64 diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index 29797ac9c2..4d66d8d826 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -14,7 +14,7 @@ import ( // validators are sorted by their voting power - this is the canonical order // for the validators in the set as used in computing their Merkle root. // -// More: https://docs.tendermint.com/master/rpc/#/Info/validators +// More: https://docs.tendermint.com/v0.34/rpc/#/Info/validators func Validators(ctx *rpctypes.Context, heightPtr *int64, pagePtr, perPagePtr *int) (*ctypes.ResultValidators, error) { // The latest validator that we know is the NextValidator of the last block. height, err := getHeight(latestUncommittedHeight(), heightPtr) @@ -47,7 +47,7 @@ func Validators(ctx *rpctypes.Context, heightPtr *int64, pagePtr, perPagePtr *in // DumpConsensusState dumps consensus state. // UNSTABLE -// More: https://docs.tendermint.com/master/rpc/#/Info/dump_consensus_state +// More: https://docs.tendermint.com/v0.34/rpc/#/Info/dump_consensus_state func DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) { // Get Peer consensus states. peers := GetEnvironment().P2PPeers.Peers().List() @@ -80,7 +80,7 @@ func DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState // ConsensusState returns a concise summary of the consensus state. // UNSTABLE -// More: https://docs.tendermint.com/master/rpc/#/Info/consensus_state +// More: https://docs.tendermint.com/v0.34/rpc/#/Info/consensus_state func ConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) { // Get self round state. bz, err := GetEnvironment().ConsensusState.GetRoundStateSimpleJSON() @@ -89,7 +89,7 @@ func ConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) // ConsensusParams gets the consensus parameters at the given block height. // If no height is provided, it will fetch the latest consensus params. -// More: https://docs.tendermint.com/master/rpc/#/Info/consensus_params +// More: https://docs.tendermint.com/v0.34/rpc/#/Info/consensus_params func ConsensusParams(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultConsensusParams, error) { // The latest consensus params that we know is the consensus params after the // last block. diff --git a/rpc/core/doc.go b/rpc/core/doc.go index 77ace4e2cf..495aed0cf8 100644 --- a/rpc/core/doc.go +++ b/rpc/core/doc.go @@ -2,7 +2,7 @@ Package core defines the Tendermint RPC endpoints. Tendermint ships with its own JSONRPC library - -https://github.com/tendermint/tendermint/tree/master/rpc/jsonrpc. +https://github.com/tendermint/tendermint/tree/v0.34.x/rpc/jsonrpc. ## Get the list diff --git a/rpc/core/events.go b/rpc/core/events.go index 91c7e62a44..b457cdc8a9 100644 --- a/rpc/core/events.go +++ b/rpc/core/events.go @@ -19,7 +19,7 @@ const ( ) // Subscribe for events via WebSocket. -// More: https://docs.tendermint.com/master/rpc/#/Websocket/subscribe +// More: https://docs.tendermint.com/v0.34/rpc/#/Websocket/subscribe func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) { addr := ctx.RemoteAddr() env := GetEnvironment() @@ -106,7 +106,7 @@ func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, er } // Unsubscribe from events via WebSocket. -// More: https://docs.tendermint.com/master/rpc/#/Websocket/unsubscribe +// More: https://docs.tendermint.com/v0.34/rpc/#/Websocket/unsubscribe func Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) { addr := ctx.RemoteAddr() env := GetEnvironment() @@ -123,7 +123,7 @@ func Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe } // UnsubscribeAll from all events via WebSocket. -// More: https://docs.tendermint.com/master/rpc/#/Websocket/unsubscribe_all +// More: https://docs.tendermint.com/v0.34/rpc/#/Websocket/unsubscribe_all func UnsubscribeAll(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) { addr := ctx.RemoteAddr() env := GetEnvironment() diff --git a/rpc/core/evidence.go b/rpc/core/evidence.go index 373e7c442d..ac5b8f7a3a 100644 --- a/rpc/core/evidence.go +++ b/rpc/core/evidence.go @@ -10,7 +10,7 @@ import ( ) // BroadcastEvidence broadcasts evidence of the misbehavior. -// More: https://docs.tendermint.com/master/rpc/#/Info/broadcast_evidence +// More: https://docs.tendermint.com/v0.34/rpc/#/Info/broadcast_evidence func BroadcastEvidence(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { if ev == nil { return nil, errors.New("no evidence was provided") diff --git a/rpc/core/health.go b/rpc/core/health.go index 97ea56865c..1a11544a8f 100644 --- a/rpc/core/health.go +++ b/rpc/core/health.go @@ -7,7 +7,7 @@ import ( // Health gets node health. Returns empty result (200 OK) on success, no // response - in case of an error. -// More: https://docs.tendermint.com/master/rpc/#/Info/health +// More: https://docs.tendermint.com/v0.34/rpc/#/Info/health func Health(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) { return &ctypes.ResultHealth{}, nil } diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index 276fa4f53e..a21fb189cb 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -18,7 +18,7 @@ import ( // BroadcastTxAsync returns right away, with no response. Does not wait for // CheckTx nor DeliverTx results. -// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_async +// More: https://docs.tendermint.com/v0.34/rpc/#/Tx/broadcast_tx_async func BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { err := GetEnvironment().Mempool.CheckTx(tx, nil, mempl.TxInfo{}) @@ -30,7 +30,7 @@ func BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadca // BroadcastTxSync returns with the response from CheckTx. Does not wait for // DeliverTx result. -// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_sync +// More: https://docs.tendermint.com/v0.34/rpc/#/Tx/broadcast_tx_sync func BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { resCh := make(chan *abci.Response, 1) err := GetEnvironment().Mempool.CheckTx(tx, func(res *abci.Response) { @@ -60,7 +60,7 @@ func BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcas } // BroadcastTxCommit returns with the responses from CheckTx and DeliverTx. -// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_commit +// More: https://docs.tendermint.com/v0.34/rpc/#/Tx/broadcast_tx_commit func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { subscriber := ctx.RemoteAddr() env := GetEnvironment() @@ -150,7 +150,7 @@ func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadc // UnconfirmedTxs gets unconfirmed transactions (maximum ?limit entries) // including their number. -// More: https://docs.tendermint.com/master/rpc/#/Info/unconfirmed_txs +// More: https://docs.tendermint.com/v0.34/rpc/#/Info/unconfirmed_txs func UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*ctypes.ResultUnconfirmedTxs, error) { // reuse per_page validator limit := validatePerPage(limitPtr) @@ -165,7 +165,7 @@ func UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*ctypes.ResultUnconfi } // NumUnconfirmedTxs gets number of unconfirmed transactions. -// More: https://docs.tendermint.com/master/rpc/#/Info/num_unconfirmed_txs +// More: https://docs.tendermint.com/v0.34/rpc/#/Info/num_unconfirmed_txs func NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) { env := GetEnvironment() return &ctypes.ResultUnconfirmedTxs{ @@ -176,7 +176,7 @@ func NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, err // CheckTx checks the transaction without executing it. The transaction won't // be added to the mempool either. -// More: https://docs.tendermint.com/master/rpc/#/Tx/check_tx +// More: https://docs.tendermint.com/v0.34/rpc/#/Tx/check_tx func CheckTx(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { res, err := GetEnvironment().ProxyAppMempool.CheckTxSync(abci.RequestCheckTx{Tx: tx}) if err != nil { diff --git a/rpc/core/net.go b/rpc/core/net.go index fd80d63877..a984407229 100644 --- a/rpc/core/net.go +++ b/rpc/core/net.go @@ -11,7 +11,7 @@ import ( ) // NetInfo returns network info. -// More: https://docs.tendermint.com/master/rpc/#/Info/net_info +// More: https://docs.tendermint.com/v0.34/rpc/#/Info/net_info func NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) { env := GetEnvironment() peersList := env.P2PPeers.Peers().List() @@ -95,7 +95,7 @@ func UnsafeDialPeers(ctx *rpctypes.Context, peers []string, persistent, uncondit } // Genesis returns genesis file. -// More: https://docs.tendermint.com/master/rpc/#/Info/genesis +// More: https://docs.tendermint.com/v0.34/rpc/#/Info/genesis func Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { env := GetEnvironment() if len(env.genChunks) > 1 { diff --git a/rpc/core/routes.go b/rpc/core/routes.go index 4c3d2e6215..9907310f16 100644 --- a/rpc/core/routes.go +++ b/rpc/core/routes.go @@ -17,22 +17,22 @@ var Routes = map[string]*rpc.RPCFunc{ "health": rpc.NewRPCFunc(Health, ""), "status": rpc.NewRPCFunc(Status, ""), "net_info": rpc.NewRPCFunc(NetInfo, ""), - "blockchain": rpc.NewRPCFunc(BlockchainInfo, "minHeight,maxHeight"), - "genesis": rpc.NewRPCFunc(Genesis, ""), - "genesis_chunked": rpc.NewRPCFunc(GenesisChunked, "chunk"), - "block": rpc.NewRPCFunc(Block, "height"), - "block_by_hash": rpc.NewRPCFunc(BlockByHash, "hash"), - "block_results": rpc.NewRPCFunc(BlockResults, "height"), - "commit": rpc.NewRPCFunc(Commit, "height"), + "blockchain": rpc.NewRPCFunc(BlockchainInfo, "minHeight,maxHeight", rpc.Cacheable()), + "genesis": rpc.NewRPCFunc(Genesis, "", rpc.Cacheable()), + "genesis_chunked": rpc.NewRPCFunc(GenesisChunked, "chunk", rpc.Cacheable()), + "block": rpc.NewRPCFunc(Block, "height", rpc.Cacheable("height")), + "block_by_hash": rpc.NewRPCFunc(BlockByHash, "hash", rpc.Cacheable()), + "block_results": rpc.NewRPCFunc(BlockResults, "height", rpc.Cacheable("height")), + "commit": rpc.NewRPCFunc(Commit, "height", rpc.Cacheable("height")), "data_commitment": rpc.NewRPCFunc(DataCommitment, "beginBlock,endBlock"), - "check_tx": rpc.NewRPCFunc(CheckTx, "tx"), - "tx": rpc.NewRPCFunc(Tx, "hash,prove"), + "check_tx": rpc.NewRPCFunc(CheckTx, "tx", rpc.Cacheable()), + "tx": rpc.NewRPCFunc(Tx, "hash,prove", rpc.Cacheable()), "tx_search": rpc.NewRPCFunc(TxSearch, "query,prove,page,per_page,order_by"), "block_search": rpc.NewRPCFunc(BlockSearch, "query,page,per_page,order_by"), - "validators": rpc.NewRPCFunc(Validators, "height,page,per_page"), + "validators": rpc.NewRPCFunc(Validators, "height,page,per_page", rpc.Cacheable("height")), "dump_consensus_state": rpc.NewRPCFunc(DumpConsensusState, ""), "consensus_state": rpc.NewRPCFunc(ConsensusState, ""), - "consensus_params": rpc.NewRPCFunc(ConsensusParams, "height"), + "consensus_params": rpc.NewRPCFunc(ConsensusParams, "height", rpc.Cacheable("height")), "unconfirmed_txs": rpc.NewRPCFunc(UnconfirmedTxs, "limit"), "num_unconfirmed_txs": rpc.NewRPCFunc(NumUnconfirmedTxs, ""), @@ -43,7 +43,7 @@ var Routes = map[string]*rpc.RPCFunc{ // abci API "abci_query": rpc.NewRPCFunc(ABCIQuery, "path,data,height,prove"), - "abci_info": rpc.NewRPCFunc(ABCIInfo, ""), + "abci_info": rpc.NewRPCFunc(ABCIInfo, "", rpc.Cacheable()), // evidence API "broadcast_evidence": rpc.NewRPCFunc(BroadcastEvidence, "evidence"), diff --git a/rpc/core/status.go b/rpc/core/status.go index 37a07b0238..80c7f4ec91 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -12,7 +12,7 @@ import ( // Status returns Tendermint status including node info, pubkey, latest block // hash, app hash, block height and time. -// More: https://docs.tendermint.com/master/rpc/#/Info/status +// More: https://docs.tendermint.com/v0.34/rpc/#/Info/status func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { var ( earliestBlockHeight int64 diff --git a/rpc/core/tx.go b/rpc/core/tx.go index 9016916db2..7ffe435bbb 100644 --- a/rpc/core/tx.go +++ b/rpc/core/tx.go @@ -62,7 +62,7 @@ func Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error // list of transactions (maximum ?per_page entries) and the total count. // NOTE: proveTx isn't respected but is left in the function signature to // conform to the endpoint exposed by Tendermint -// More: https://docs.tendermint.com/master/rpc/#/Info/tx_search +// More: https://docs.tendermint.com/v0.34/rpc/#/Info/tx_search func TxSearch( ctx *rpctypes.Context, query string, diff --git a/rpc/jsonrpc/client/http_json_client.go b/rpc/jsonrpc/client/http_json_client.go index fbfe073e01..8fbf9fa4ff 100644 --- a/rpc/jsonrpc/client/http_json_client.go +++ b/rpc/jsonrpc/client/http_json_client.go @@ -5,7 +5,7 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -217,7 +217,7 @@ func (c *Client) Call( defer httpResponse.Body.Close() - responseBytes, err := ioutil.ReadAll(httpResponse.Body) + responseBytes, err := io.ReadAll(httpResponse.Body) if err != nil { return nil, fmt.Errorf("failed to read response body: %w", err) } @@ -265,7 +265,7 @@ func (c *Client) sendBatch(ctx context.Context, requests []*jsonRPCBufferedReque defer httpResponse.Body.Close() - responseBytes, err := ioutil.ReadAll(httpResponse.Body) + responseBytes, err := io.ReadAll(httpResponse.Body) if err != nil { return nil, fmt.Errorf("read response body: %w", err) } diff --git a/rpc/jsonrpc/client/http_json_client_test.go b/rpc/jsonrpc/client/http_json_client_test.go index 4b82ff1eb4..03134dff58 100644 --- a/rpc/jsonrpc/client/http_json_client_test.go +++ b/rpc/jsonrpc/client/http_json_client_test.go @@ -1,7 +1,7 @@ package client import ( - "io/ioutil" + "io" "log" "net/http" "net/http/httptest" @@ -21,7 +21,7 @@ func TestHTTPClientMakeHTTPDialer(t *testing.T) { defer tsTLS.Close() // This silences a TLS handshake error, caused by the dialer just immediately // disconnecting, which we can just ignore. - tsTLS.Config.ErrorLog = log.New(ioutil.Discard, "", 0) + tsTLS.Config.ErrorLog = log.New(io.Discard, "", 0) for _, testURL := range []string{ts.URL, tsTLS.URL} { u, err := newParsedURL(testURL) diff --git a/rpc/jsonrpc/client/http_uri_client.go b/rpc/jsonrpc/client/http_uri_client.go index 3f376ddb0f..e5d5a5c587 100644 --- a/rpc/jsonrpc/client/http_uri_client.go +++ b/rpc/jsonrpc/client/http_uri_client.go @@ -3,7 +3,7 @@ package client import ( "context" "fmt" - "io/ioutil" + "io" "net/http" "strings" @@ -52,8 +52,8 @@ func NewURI(remote string) (*URIClient, error) { // Call issues a POST form HTTP request. func (c *URIClient) Call(ctx context.Context, method string, - params map[string]interface{}, result interface{}) (interface{}, error) { - + params map[string]interface{}, result interface{}, +) (interface{}, error) { values, err := argsToURLValues(params) if err != nil { return nil, fmt.Errorf("failed to encode params: %w", err) @@ -76,7 +76,7 @@ func (c *URIClient) Call(ctx context.Context, method string, } defer resp.Body.Close() - responseBytes, err := ioutil.ReadAll(resp.Body) + responseBytes, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("read response body: %w", err) } diff --git a/rpc/jsonrpc/client/ws_client.go b/rpc/jsonrpc/client/ws_client.go index 44bf5f0987..5a4839b045 100644 --- a/rpc/jsonrpc/client/ws_client.go +++ b/rpc/jsonrpc/client/ws_client.go @@ -30,7 +30,7 @@ const ( // the remote server. // // WSClient is safe for concurrent use by multiple goroutines. -type WSClient struct { // nolint: maligned +type WSClient struct { //nolint: maligned conn *websocket.Conn Address string // IP:PORT or /path/to/socket @@ -89,8 +89,10 @@ func NewWS(remoteAddr, endpoint string, options ...func(*WSClient)) (*WSClient, if err != nil { return nil, err } - // default to ws protocol, unless wss is explicitly specified - if parsedURL.Scheme != protoWSS { + // default to ws protocol, unless wss or https is specified + if parsedURL.Scheme == protoHTTPS { + parsedURL.Scheme = protoWSS + } else if parsedURL.Scheme != protoWSS { parsedURL.Scheme = protoWS } @@ -265,7 +267,7 @@ func (c *WSClient) dial() error { Proxy: http.ProxyFromEnvironment, } rHeader := http.Header{} - conn, _, err := dialer.Dial(c.protocol+"://"+c.Address+c.Endpoint, rHeader) // nolint:bodyclose + conn, _, err := dialer.Dial(c.protocol+"://"+c.Address+c.Endpoint, rHeader) //nolint:bodyclose if err != nil { return err } diff --git a/rpc/jsonrpc/client/ws_client_test.go b/rpc/jsonrpc/client/ws_client_test.go index 2e6403806c..b4ac8f83b9 100644 --- a/rpc/jsonrpc/client/ws_client_test.go +++ b/rpc/jsonrpc/client/ws_client_test.go @@ -72,7 +72,7 @@ func TestWSClientReconnectsAfterReadFailure(t *testing.T) { defer s.Close() c := startClient(t, "//"+s.Listener.Addr().String()) - defer c.Stop() // nolint:errcheck // ignore for tests + defer c.Stop() //nolint:errcheck // ignore for tests wg.Add(1) go callWgDoneOnResult(t, c, &wg) @@ -104,7 +104,7 @@ func TestWSClientReconnectsAfterWriteFailure(t *testing.T) { s := httptest.NewServer(h) c := startClient(t, "//"+s.Listener.Addr().String()) - defer c.Stop() // nolint:errcheck // ignore for tests + defer c.Stop() //nolint:errcheck // ignore for tests wg.Add(2) go callWgDoneOnResult(t, c, &wg) @@ -132,7 +132,7 @@ func TestWSClientReconnectFailure(t *testing.T) { s := httptest.NewServer(h) c := startClient(t, "//"+s.Listener.Addr().String()) - defer c.Stop() // nolint:errcheck // ignore for tests + defer c.Stop() //nolint:errcheck // ignore for tests go func() { for { @@ -181,7 +181,7 @@ func TestNotBlockingOnStop(t *testing.T) { timeout := 2 * time.Second s := httptest.NewServer(&myHandler{}) c := startClient(t, "//"+s.Listener.Addr().String()) - c.Call(context.Background(), "a", make(map[string]interface{})) // nolint:errcheck // ignore for tests + c.Call(context.Background(), "a", make(map[string]interface{})) //nolint:errcheck // ignore for tests // Let the readRoutine get around to blocking time.Sleep(time.Second) passCh := make(chan struct{}) diff --git a/rpc/jsonrpc/doc.go b/rpc/jsonrpc/doc.go index b014fe38dd..d6dcae0db7 100644 --- a/rpc/jsonrpc/doc.go +++ b/rpc/jsonrpc/doc.go @@ -1,7 +1,7 @@ // HTTP RPC server supporting calls via uri params, jsonrpc over HTTP, and jsonrpc over // websockets // -// Client Requests +// # Client Requests // // Suppose we want to expose the rpc function `HelloWorld(name string, num int)`. // @@ -9,12 +9,12 @@ // // As a GET request, it would have URI encoded parameters, and look like: // -// curl 'http://localhost:8008/hello_world?name="my_world"&num=5' +// curl 'http://localhost:8008/hello_world?name="my_world"&num=5' // // Note the `'` around the url, which is just so bash doesn't ignore the quotes in `"my_world"`. // This should also work: // -// curl http://localhost:8008/hello_world?name=\"my_world\"&num=5 +// curl http://localhost:8008/hello_world?name=\"my_world\"&num=5 // // A GET request to `/` returns a list of available endpoints. // For those which take arguments, the arguments will be listed in order, with `_` where the actual value should be. @@ -23,20 +23,19 @@ // // As a POST request, we use JSONRPC. For instance, the same request would have this as the body: // -// { -// "jsonrpc": "2.0", -// "id": "anything", -// "method": "hello_world", -// "params": { -// "name": "my_world", -// "num": 5 -// } -// } +// { +// "jsonrpc": "2.0", +// "id": "anything", +// "method": "hello_world", +// "params": { +// "name": "my_world", +// "num": 5 +// } +// } // // With the above saved in file `data.json`, we can make the request with // -// curl --data @data.json http://localhost:8008 -// +// curl --data @data.json http://localhost:8008 // // WebSocket (JSONRPC) // @@ -44,42 +43,42 @@ // Websocket connections are available at their own endpoint, typically `/websocket`, // though this is configurable when starting the server. // -// Server Definition +// # Server Definition // // Define some types and routes: // -// type ResultStatus struct { -// Value string -// } +// type ResultStatus struct { +// Value string +// } // // Define some routes // -// var Routes = map[string]*rpcserver.RPCFunc{ -// "status": rpcserver.NewRPCFunc(Status, "arg"), -// } +// var Routes = map[string]*rpcserver.RPCFunc{ +// "status": rpcserver.NewRPCFunc(Status, "arg"), +// } // // An rpc function: // -// func Status(v string) (*ResultStatus, error) { -// return &ResultStatus{v}, nil -// } +// func Status(v string) (*ResultStatus, error) { +// return &ResultStatus{v}, nil +// } // // Now start the server: // -// mux := http.NewServeMux() -// rpcserver.RegisterRPCFuncs(mux, Routes) -// wm := rpcserver.NewWebsocketManager(Routes) -// mux.HandleFunc("/websocket", wm.WebsocketHandler) -// logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) -// listener, err := rpc.Listen("0.0.0.0:8080", rpcserver.Config{}) -// if err != nil { panic(err) } -// go rpcserver.Serve(listener, mux, logger) +// mux := http.NewServeMux() +// rpcserver.RegisterRPCFuncs(mux, Routes) +// wm := rpcserver.NewWebsocketManager(Routes) +// mux.HandleFunc("/websocket", wm.WebsocketHandler) +// logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) +// listener, err := rpc.Listen("0.0.0.0:8080", rpcserver.Config{}) +// if err != nil { panic(err) } +// go rpcserver.Serve(listener, mux, logger) // // Note that unix sockets are supported as well (eg. `/path/to/socket` instead of `0.0.0.0:8008`) // Now see all available endpoints by sending a GET request to `0.0.0.0:8008`. // Each route is available as a GET request, as a JSONRPCv2 POST request, and via JSONRPCv2 over websockets. // -// Examples +// # Examples // -// - [Tendermint](https://github.com/tendermint/tendermint/blob/master/rpc/core/routes.go) +// - [Tendermint](https://github.com/tendermint/tendermint/blob/v0.34.x/rpc/core/routes.go) package jsonrpc diff --git a/rpc/jsonrpc/jsonrpc_test.go b/rpc/jsonrpc/jsonrpc_test.go index 84956bae95..c322dfcea9 100644 --- a/rpc/jsonrpc/jsonrpc_test.go +++ b/rpc/jsonrpc/jsonrpc_test.go @@ -7,8 +7,10 @@ import ( "encoding/json" "fmt" "net/http" + "net/url" "os" "os/exec" + "strings" "testing" "time" @@ -37,9 +39,7 @@ const ( testVal = "acbd" ) -var ( - ctx = context.Background() -) +var ctx = context.Background() type ResultEcho struct { Value string `json:"value"` @@ -57,6 +57,10 @@ type ResultEchoDataBytes struct { Value tmbytes.HexBytes `json:"value"` } +type ResultEchoWithDefault struct { + Value int `json:"value"` +} + // Define some routes var Routes = map[string]*server.RPCFunc{ "echo": server.NewRPCFunc(EchoResult, "arg"), @@ -64,6 +68,7 @@ var Routes = map[string]*server.RPCFunc{ "echo_bytes": server.NewRPCFunc(EchoBytesResult, "arg"), "echo_data_bytes": server.NewRPCFunc(EchoDataBytesResult, "arg"), "echo_int": server.NewRPCFunc(EchoIntResult, "arg"), + "echo_default": server.NewRPCFunc(EchoWithDefault, "arg", server.Cacheable("arg")), } func EchoResult(ctx *types.Context, v string) (*ResultEcho, error) { @@ -86,6 +91,14 @@ func EchoDataBytesResult(ctx *types.Context, v tmbytes.HexBytes) (*ResultEchoDat return &ResultEchoDataBytes{v}, nil } +func EchoWithDefault(ctx *types.Context, v *int) (*ResultEchoWithDefault, error) { + val := -1 + if v != nil { + val = *v + } + return &ResultEchoWithDefault{val}, nil +} + func TestMain(m *testing.M) { setup() code := m.Run() @@ -199,26 +212,47 @@ func echoDataBytesViaHTTP(cl client.Caller, bytes tmbytes.HexBytes) (tmbytes.Hex return result.Value, nil } +func echoWithDefaultViaHTTP(cl client.Caller, v *int) (int, error) { + params := map[string]interface{}{} + if v != nil { + params["arg"] = *v + } + result := new(ResultEchoWithDefault) + if _, err := cl.Call(ctx, "echo_default", params, result); err != nil { + return 0, err + } + return result.Value, nil +} + func testWithHTTPClient(t *testing.T, cl client.HTTPClient) { val := testVal got, err := echoViaHTTP(cl, val) - require.Nil(t, err) + require.NoError(t, err) assert.Equal(t, got, val) val2 := randBytes(t) got2, err := echoBytesViaHTTP(cl, val2) - require.Nil(t, err) + require.NoError(t, err) assert.Equal(t, got2, val2) val3 := tmbytes.HexBytes(randBytes(t)) got3, err := echoDataBytesViaHTTP(cl, val3) - require.Nil(t, err) + require.NoError(t, err) assert.Equal(t, got3, val3) val4 := tmrand.Intn(10000) got4, err := echoIntViaHTTP(cl, val4) - require.Nil(t, err) + require.NoError(t, err) assert.Equal(t, got4, val4) + + got5, err := echoWithDefaultViaHTTP(cl, nil) + require.NoError(t, err) + assert.Equal(t, got5, -1) + + val6 := tmrand.Intn(10000) + got6, err := echoWithDefaultViaHTTP(cl, &val6) + require.NoError(t, err) + assert.Equal(t, got6, val6) } func echoViaWS(cl *client.WSClient, val string) (string, error) { @@ -233,7 +267,6 @@ func echoViaWS(cl *client.WSClient, val string) (string, error) { msg := <-cl.ResponsesCh if msg.Error != nil { return "", err - } result := new(ResultEcho) err = json.Unmarshal(msg.Result, result) @@ -255,7 +288,6 @@ func echoBytesViaWS(cl *client.WSClient, bytes []byte) ([]byte, error) { msg := <-cl.ResponsesCh if msg.Error != nil { return []byte{}, msg.Error - } result := new(ResultEchoBytes) err = json.Unmarshal(msg.Result, result) @@ -399,6 +431,74 @@ func TestWSClientPingPong(t *testing.T) { time.Sleep(6 * time.Second) } +func TestJSONRPCCaching(t *testing.T) { + httpAddr := strings.Replace(tcpAddr, "tcp://", "http://", 1) + cl, err := client.DefaultHTTPClient(httpAddr) + require.NoError(t, err) + + // Not supplying the arg should result in not caching + params := make(map[string]interface{}) + req, err := types.MapToRequest(types.JSONRPCIntID(1000), "echo_default", params) + require.NoError(t, err) + + res1, err := rawJSONRPCRequest(t, cl, httpAddr, req) + defer func() { _ = res1.Body.Close() }() + require.NoError(t, err) + assert.Equal(t, "", res1.Header.Get("Cache-control")) + + // Supplying the arg should result in caching + params["arg"] = tmrand.Intn(10000) + req, err = types.MapToRequest(types.JSONRPCIntID(1001), "echo_default", params) + require.NoError(t, err) + + res2, err := rawJSONRPCRequest(t, cl, httpAddr, req) + defer func() { _ = res2.Body.Close() }() + require.NoError(t, err) + assert.Equal(t, "public, max-age=86400", res2.Header.Get("Cache-control")) +} + +func rawJSONRPCRequest(t *testing.T, cl *http.Client, url string, req interface{}) (*http.Response, error) { + reqBytes, err := json.Marshal(req) + require.NoError(t, err) + + reqBuf := bytes.NewBuffer(reqBytes) + httpReq, err := http.NewRequest(http.MethodPost, url, reqBuf) + require.NoError(t, err) + + httpReq.Header.Set("Content-type", "application/json") + + return cl.Do(httpReq) +} + +func TestURICaching(t *testing.T) { + httpAddr := strings.Replace(tcpAddr, "tcp://", "http://", 1) + cl, err := client.DefaultHTTPClient(httpAddr) + require.NoError(t, err) + + // Not supplying the arg should result in not caching + args := url.Values{} + res1, err := rawURIRequest(t, cl, httpAddr+"/echo_default", args) + defer func() { _ = res1.Body.Close() }() + require.NoError(t, err) + assert.Equal(t, "", res1.Header.Get("Cache-control")) + + // Supplying the arg should result in caching + args.Set("arg", fmt.Sprintf("%d", tmrand.Intn(10000))) + res2, err := rawURIRequest(t, cl, httpAddr+"/echo_default", args) + defer func() { _ = res2.Body.Close() }() + require.NoError(t, err) + assert.Equal(t, "public, max-age=86400", res2.Header.Get("Cache-control")) +} + +func rawURIRequest(t *testing.T, cl *http.Client, url string, args url.Values) (*http.Response, error) { + req, err := http.NewRequest(http.MethodPost, url, strings.NewReader(args.Encode())) + require.NoError(t, err) + + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + return cl.Do(req) +} + func randBytes(t *testing.T) []byte { n := tmrand.Intn(10) + 2 buf := make([]byte, n) diff --git a/rpc/jsonrpc/server/http_json_handler.go b/rpc/jsonrpc/server/http_json_handler.go index 28dfcbf8a9..db162f17ab 100644 --- a/rpc/jsonrpc/server/http_json_handler.go +++ b/rpc/jsonrpc/server/http_json_handler.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "reflect" "sort" @@ -19,7 +19,7 @@ import ( // jsonrpc calls grab the given method's function info and runs reflect.Call func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - b, err := ioutil.ReadAll(r.Body) + b, err := io.ReadAll(r.Body) if err != nil { res := types.RPCInvalidRequestError(nil, fmt.Errorf("error reading request body: %w", err), @@ -55,6 +55,11 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han requests = []types.RPCRequest{request} } + // Set the default response cache to true unless + // 1. Any RPC request error. + // 2. Any RPC request doesn't allow to be cached. + // 3. Any RPC request has the height argument and the value is 0 (the default). + cache := true for _, request := range requests { request := request @@ -72,11 +77,13 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han responses, types.RPCInvalidRequestError(request.ID, fmt.Errorf("path %s is invalid", r.URL.Path)), ) + cache = false continue } rpcFunc, ok := funcMap[request.Method] - if !ok || rpcFunc.ws { + if !ok || (rpcFunc.ws) { responses = append(responses, types.RPCMethodNotFoundError(request.ID)) + cache = false continue } ctx := &types.Context{JSONReq: &request, HTTPReq: r} @@ -88,11 +95,16 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han responses, types.RPCInvalidParamsError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)), ) + cache = false continue } args = append(args, fnArgs...) } + if cache && !rpcFunc.cacheableWithArgs(args) { + cache = false + } + returns := rpcFunc.f.Call(args) result, err := unreflectResult(returns) if err != nil { @@ -103,7 +115,13 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han } if len(responses) > 0 { - if wErr := WriteRPCResponseHTTP(w, responses...); wErr != nil { + var wErr error + if cache { + wErr = WriteCacheableRPCResponseHTTP(w, responses...) + } else { + wErr = WriteRPCResponseHTTP(w, responses...) + } + if wErr != nil { logger.Error("failed to write responses", "res", responses, "err", wErr) } } @@ -128,7 +146,6 @@ func mapParamsToArgs( params map[string]json.RawMessage, argsOffset int, ) ([]reflect.Value, error) { - values := make([]reflect.Value, len(rpcFunc.argNames)) for i, argName := range rpcFunc.argNames { argType := rpcFunc.args[i+argsOffset] @@ -153,7 +170,6 @@ func arrayParamsToArgs( params []json.RawMessage, argsOffset int, ) ([]reflect.Value, error) { - if len(rpcFunc.argNames) != len(params) { return nil, fmt.Errorf("expected %v parameters (%v), got %v (%v)", len(rpcFunc.argNames), rpcFunc.argNames, len(params), params) @@ -176,8 +192,9 @@ func arrayParamsToArgs( // array. // // Example: -// rpcFunc.args = [rpctypes.Context string] -// rpcFunc.argNames = ["arg"] +// +// rpcFunc.args = [rpctypes.Context string] +// rpcFunc.argNames = ["arg"] func jsonParamsToArgs(rpcFunc *RPCFunc, raw []byte) ([]reflect.Value, error) { const argsOffset = 1 @@ -237,5 +254,5 @@ func writeListOfEndpoints(w http.ResponseWriter, r *http.Request, funcMap map[st buf.WriteString("") w.Header().Set("Content-Type", "text/html") w.WriteHeader(200) - w.Write(buf.Bytes()) // nolint: errcheck + w.Write(buf.Bytes()) //nolint: errcheck } diff --git a/rpc/jsonrpc/server/http_json_handler_test.go b/rpc/jsonrpc/server/http_json_handler_test.go index a5c14e59a2..44caedd3e1 100644 --- a/rpc/jsonrpc/server/http_json_handler_test.go +++ b/rpc/jsonrpc/server/http_json_handler_test.go @@ -3,7 +3,7 @@ package server import ( "bytes" "encoding/json" - "io/ioutil" + "io" "net/http" "net/http/httptest" "strings" @@ -18,7 +18,8 @@ import ( func testMux() *http.ServeMux { funcMap := map[string]*RPCFunc{ - "c": NewRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"), + "c": NewRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"), + "block": NewRPCFunc(func(ctx *types.Context, h int) (string, error) { return "block", nil }, "height", Cacheable("height")), } mux := http.NewServeMux() buf := new(bytes.Buffer) @@ -66,7 +67,7 @@ func TestRPCParams(t *testing.T) { defer res.Body.Close() // Always expecting back a JSONRPCResponse assert.NotZero(t, res.StatusCode, "#%d: should always return code", i) - blob, err := ioutil.ReadAll(res.Body) + blob, err := io.ReadAll(res.Body) if err != nil { t.Errorf("#%d: err reading body: %v", i, err) continue @@ -113,7 +114,7 @@ func TestJSONRPCID(t *testing.T) { res := rec.Result() // Always expecting back a JSONRPCResponse assert.NotZero(t, res.StatusCode, "#%d: should always return code", i) - blob, err := ioutil.ReadAll(res.Body) + blob, err := io.ReadAll(res.Body) if err != nil { t.Errorf("#%d: err reading body: %v", i, err) continue @@ -143,7 +144,7 @@ func TestRPCNotification(t *testing.T) { // Always expecting back a JSONRPCResponse require.True(t, statusOK(res.StatusCode), "should always return 2XX") - blob, err := ioutil.ReadAll(res.Body) + blob, err := io.ReadAll(res.Body) res.Body.Close() require.Nil(t, err, "reading from the body should not give back an error") require.Equal(t, len(blob), 0, "a notification SHOULD NOT be responded to by the server") @@ -179,7 +180,7 @@ func TestRPCNotificationInBatch(t *testing.T) { res := rec.Result() // Always expecting back a JSONRPCResponse assert.True(t, statusOK(res.StatusCode), "#%d: should always return 2XX", i) - blob, err := ioutil.ReadAll(res.Body) + blob, err := io.ReadAll(res.Body) if err != nil { t.Errorf("#%d: err reading body: %v", i, err) continue @@ -227,3 +228,52 @@ func TestUnknownRPCPath(t *testing.T) { require.Equal(t, http.StatusNotFound, res.StatusCode, "should always return 404") res.Body.Close() } + +func TestRPCResponseCache(t *testing.T) { + mux := testMux() + body := strings.NewReader(`{"jsonrpc": "2.0","method":"block","id": 0, "params": ["1"]}`) + req, _ := http.NewRequest("Get", "http://localhost/", body) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + res := rec.Result() + + // Always expecting back a JSONRPCResponse + require.True(t, statusOK(res.StatusCode), "should always return 2XX") + require.Equal(t, "public, max-age=86400", res.Header.Get("Cache-control")) + + _, err := io.ReadAll(res.Body) + res.Body.Close() + require.Nil(t, err, "reading from the body should not give back an error") + + // send a request with default height. + body = strings.NewReader(`{"jsonrpc": "2.0","method":"block","id": 0, "params": ["0"]}`) + req, _ = http.NewRequest("Get", "http://localhost/", body) + rec = httptest.NewRecorder() + mux.ServeHTTP(rec, req) + res = rec.Result() + + // Always expecting back a JSONRPCResponse + require.True(t, statusOK(res.StatusCode), "should always return 2XX") + require.Equal(t, "", res.Header.Get("Cache-control")) + + _, err = io.ReadAll(res.Body) + + res.Body.Close() + require.Nil(t, err, "reading from the body should not give back an error") + + // send a request with default height, but as empty set of parameters. + body = strings.NewReader(`{"jsonrpc": "2.0","method":"block","id": 0, "params": []}`) + req, _ = http.NewRequest("Get", "http://localhost/", body) + rec = httptest.NewRecorder() + mux.ServeHTTP(rec, req) + res = rec.Result() + + // Always expecting back a JSONRPCResponse + require.True(t, statusOK(res.StatusCode), "should always return 2XX") + require.Equal(t, "", res.Header.Get("Cache-control")) + + _, err = io.ReadAll(res.Body) + + res.Body.Close() + require.Nil(t, err, "reading from the body should not give back an error") +} diff --git a/rpc/jsonrpc/server/http_server.go b/rpc/jsonrpc/server/http_server.go index f653e6cc6d..617e1bbdc6 100644 --- a/rpc/jsonrpc/server/http_server.go +++ b/rpc/jsonrpc/server/http_server.go @@ -53,10 +53,11 @@ func DefaultConfig() *Config { func Serve(listener net.Listener, handler http.Handler, logger log.Logger, config *Config) error { logger.Info("serve", "msg", log.NewLazySprintf("Starting RPC HTTP server on %s", listener.Addr())) s := &http.Server{ - Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: config.MaxBodyBytes}, logger), - ReadTimeout: config.ReadTimeout, - WriteTimeout: config.WriteTimeout, - MaxHeaderBytes: config.MaxHeaderBytes, + Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: config.MaxBodyBytes}, logger), + ReadTimeout: config.ReadTimeout, + ReadHeaderTimeout: config.ReadTimeout, + WriteTimeout: config.WriteTimeout, + MaxHeaderBytes: config.MaxHeaderBytes, } err := s.Serve(listener) logger.Info("RPC HTTP server stopped", "err", err) @@ -78,10 +79,11 @@ func ServeTLS( logger.Info("serve tls", "msg", log.NewLazySprintf("Starting RPC HTTPS server on %s (cert: %q, key: %q)", listener.Addr(), certFile, keyFile)) s := &http.Server{ - Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: config.MaxBodyBytes}, logger), - ReadTimeout: config.ReadTimeout, - WriteTimeout: config.WriteTimeout, - MaxHeaderBytes: config.MaxHeaderBytes, + Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: config.MaxBodyBytes}, logger), + ReadTimeout: config.ReadTimeout, + ReadHeaderTimeout: config.ReadTimeout, + WriteTimeout: config.WriteTimeout, + MaxHeaderBytes: config.MaxHeaderBytes, } err := s.ServeTLS(listener, certFile, keyFile) @@ -115,6 +117,22 @@ func WriteRPCResponseHTTPError( // WriteRPCResponseHTTP marshals res as JSON (with indent) and writes it to w. func WriteRPCResponseHTTP(w http.ResponseWriter, res ...types.RPCResponse) error { + return writeRPCResponseHTTP(w, []httpHeader{}, res...) +} + +// WriteCacheableRPCResponseHTTP marshals res as JSON (with indent) and writes +// it to w. Adds cache-control to the response header and sets the expiry to +// one day. +func WriteCacheableRPCResponseHTTP(w http.ResponseWriter, res ...types.RPCResponse) error { + return writeRPCResponseHTTP(w, []httpHeader{{"Cache-Control", "public, max-age=86400"}}, res...) +} + +type httpHeader struct { + name string + value string +} + +func writeRPCResponseHTTP(w http.ResponseWriter, headers []httpHeader, res ...types.RPCResponse) error { var v interface{} if len(res) == 1 { v = res[0] @@ -127,6 +145,9 @@ func WriteRPCResponseHTTP(w http.ResponseWriter, res ...types.RPCResponse) error return fmt.Errorf("json marshal: %w", err) } w.Header().Set("Content-Type", "application/json") + for _, header := range headers { + w.Header().Set(header.name, header.value) + } w.WriteHeader(200) _, err = w.Write(jsonBytes) return err @@ -164,7 +185,6 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler // Without this, Chrome & Firefox were retrying aborted ajax requests, // at least to my localhost. if e := recover(); e != nil { - // If RPCResponse if res, ok := e.(types.RPCResponse); ok { if wErr := WriteRPCResponseHTTP(rww, res); wErr != nil { diff --git a/rpc/jsonrpc/server/http_server_test.go b/rpc/jsonrpc/server/http_server_test.go index c662e070f1..e1c499200f 100644 --- a/rpc/jsonrpc/server/http_server_test.go +++ b/rpc/jsonrpc/server/http_server_test.go @@ -4,7 +4,7 @@ import ( "crypto/tls" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/http/httptest" @@ -102,7 +102,7 @@ func TestServeTLS(t *testing.T) { defer res.Body.Close() assert.Equal(t, http.StatusOK, res.StatusCode) - body, err := ioutil.ReadAll(res.Body) + body, err := io.ReadAll(res.Body) require.NoError(t, err) assert.Equal(t, []byte("some body"), body) } @@ -112,14 +112,15 @@ func TestWriteRPCResponseHTTP(t *testing.T) { // one argument w := httptest.NewRecorder() - err := WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse(id, &sampleResult{"hello"})) + err := WriteCacheableRPCResponseHTTP(w, types.NewRPCSuccessResponse(id, &sampleResult{"hello"})) require.NoError(t, err) resp := w.Result() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) _ = resp.Body.Close() require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) assert.Equal(t, "application/json", resp.Header.Get("Content-Type")) + assert.Equal(t, "public, max-age=86400", resp.Header.Get("Cache-control")) assert.Equal(t, `{ "jsonrpc": "2.0", "id": -1, @@ -135,7 +136,7 @@ func TestWriteRPCResponseHTTP(t *testing.T) { types.NewRPCSuccessResponse(id, &sampleResult{"world"})) require.NoError(t, err) resp = w.Result() - body, err = ioutil.ReadAll(resp.Body) + body, err = io.ReadAll(resp.Body) _ = resp.Body.Close() require.NoError(t, err) @@ -167,7 +168,7 @@ func TestWriteRPCResponseHTTPError(t *testing.T) { types.RPCInternalError(types.JSONRPCIntID(-1), errors.New("foo"))) require.NoError(t, err) resp := w.Result() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) _ = resp.Body.Close() require.NoError(t, err) assert.Equal(t, http.StatusInternalServerError, resp.StatusCode) diff --git a/rpc/jsonrpc/server/http_uri_handler.go b/rpc/jsonrpc/server/http_uri_handler.go index 6609cb8372..e99a1b0ac7 100644 --- a/rpc/jsonrpc/server/http_uri_handler.go +++ b/rpc/jsonrpc/server/http_uri_handler.go @@ -63,7 +63,14 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit } return } - if err := WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse(dummyID, result)); err != nil { + + resp := types.NewRPCSuccessResponse(dummyID, result) + if rpcFunc.cacheableWithArgs(args) { + err = WriteCacheableRPCResponseHTTP(w, resp) + } else { + err = WriteRPCResponseHTTP(w, resp) + } + if err != nil { logger.Error("failed to write response", "res", result, "err", err) return } diff --git a/rpc/jsonrpc/server/rpc_func.go b/rpc/jsonrpc/server/rpc_func.go index e5855c3140..8a5053666c 100644 --- a/rpc/jsonrpc/server/rpc_func.go +++ b/rpc/jsonrpc/server/rpc_func.go @@ -23,40 +23,96 @@ func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, logger lo mux.HandleFunc("/", handleInvalidJSONRPCPaths(makeJSONRPCHandler(funcMap, logger))) } -// Function introspection +type Option func(*RPCFunc) + +// Cacheable enables returning a cache control header from RPC functions to +// which it is applied. +// +// `noCacheDefArgs` is a list of argument names that, if omitted or set to +// their defaults when calling the RPC function, will skip the response +// caching. +func Cacheable(noCacheDefArgs ...string) Option { + return func(r *RPCFunc) { + r.cacheable = true + r.noCacheDefArgs = make(map[string]interface{}) + for _, arg := range noCacheDefArgs { + r.noCacheDefArgs[arg] = nil + } + } +} + +// Ws enables WebSocket communication. +func Ws() Option { + return func(r *RPCFunc) { + r.ws = true + } +} // RPCFunc contains the introspected type information for a function type RPCFunc struct { - f reflect.Value // underlying rpc function - args []reflect.Type // type of each function arg - returns []reflect.Type // type of each return arg - argNames []string // name of each argument - ws bool // websocket only + f reflect.Value // underlying rpc function + args []reflect.Type // type of each function arg + returns []reflect.Type // type of each return arg + argNames []string // name of each argument + cacheable bool // enable cache control + ws bool // enable websocket communication + noCacheDefArgs map[string]interface{} // a lookup table of args that, if not supplied or are set to default values, cause us to not cache } // NewRPCFunc wraps a function for introspection. // f is the function, args are comma separated argument names -func NewRPCFunc(f interface{}, args string) *RPCFunc { - return newRPCFunc(f, args, false) +func NewRPCFunc(f interface{}, args string, options ...Option) *RPCFunc { + return newRPCFunc(f, args, options...) } // NewWSRPCFunc wraps a function for introspection and use in the websockets. -func NewWSRPCFunc(f interface{}, args string) *RPCFunc { - return newRPCFunc(f, args, true) +func NewWSRPCFunc(f interface{}, args string, options ...Option) *RPCFunc { + options = append(options, Ws()) + return newRPCFunc(f, args, options...) +} + +// cacheableWithArgs returns whether or not a call to this function is cacheable, +// given the specified arguments. +func (f *RPCFunc) cacheableWithArgs(args []reflect.Value) bool { + if !f.cacheable { + return false + } + // Skip the context variable common to all RPC functions + for i := 1; i < len(f.args); i++ { + // f.argNames does not include the context variable + argName := f.argNames[i-1] + if _, hasDefault := f.noCacheDefArgs[argName]; hasDefault { + // Argument with default value was not supplied + if i >= len(args) { + return false + } + // Argument with default value is set to its zero value + if args[i].IsZero() { + return false + } + } + } + return true } -func newRPCFunc(f interface{}, args string, ws bool) *RPCFunc { +func newRPCFunc(f interface{}, args string, options ...Option) *RPCFunc { var argNames []string if args != "" { argNames = strings.Split(args, ",") } - return &RPCFunc{ + + r := &RPCFunc{ f: reflect.ValueOf(f), args: funcArgTypes(f), returns: funcReturnTypes(f), argNames: argNames, - ws: ws, } + + for _, opt := range options { + opt(r) + } + + return r } // return a function's argument types diff --git a/rpc/jsonrpc/types/types.go b/rpc/jsonrpc/types/types.go index ca7dd3de94..33eb0a6c9d 100644 --- a/rpc/jsonrpc/types/types.go +++ b/rpc/jsonrpc/types/types.go @@ -215,15 +215,17 @@ func (resp RPCResponse) String() string { } // From the JSON-RPC 2.0 spec: +// // If there was an error in detecting the id in the Request object (e.g. Parse -// error/Invalid Request), it MUST be Null. +// error/Invalid Request), it MUST be Null. func RPCParseError(err error) RPCResponse { return NewRPCErrorResponse(nil, -32700, "Parse error. Invalid JSON", err.Error()) } // From the JSON-RPC 2.0 spec: +// // If there was an error in detecting the id in the Request object (e.g. Parse -// error/Invalid Request), it MUST be Null. +// error/Invalid Request), it MUST be Null. func RPCInvalidRequestError(id jsonrpcid, err error) RPCResponse { return NewRPCErrorResponse(id, -32600, "Invalid Request", err.Error()) } @@ -276,9 +278,12 @@ type Context struct { // RemoteAddr returns the remote address (usually a string "IP:port"). // If neither HTTPReq nor WSConn is set, an empty string is returned. // HTTP: -// http.Request#RemoteAddr +// +// http.Request#RemoteAddr +// // WS: -// result of GetRemoteAddr +// +// result of GetRemoteAddr func (ctx *Context) RemoteAddr() string { if ctx.HTTPReq != nil { return ctx.HTTPReq.RemoteAddr @@ -291,10 +296,13 @@ func (ctx *Context) RemoteAddr() string { // Context returns the request's context. // The returned context is always non-nil; it defaults to the background context. // HTTP: -// The context is canceled when the client's connection closes, the request -// is canceled (with HTTP/2), or when the ServeHTTP method returns. +// +// The context is canceled when the client's connection closes, the request +// is canceled (with HTTP/2), or when the ServeHTTP method returns. +// // WS: -// The context is canceled when the client's connections closes. +// +// The context is canceled when the client's connections closes. func (ctx *Context) Context() context.Context { if ctx.HTTPReq != nil { return ctx.HTTPReq.Context() @@ -307,7 +315,6 @@ func (ctx *Context) Context() context.Context { //---------------------------------------- // SOCKETS -// // Determine if its a unix or tcp socket. // If tcp, must specify the port; `0.0.0.0` will return incorrectly as "unix" since there's no port // TODO: deprecate diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index 5ddb0a5f83..1819dc8cc6 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -51,10 +51,10 @@ info: ws ws://localhost:26657/websocket > { "jsonrpc": "2.0", "method": "subscribe", "params": ["tm.event='NewBlock'"], "id": 1 } - version: "Master" + version: "v0.34" license: name: Apache 2.0 - url: https://github.com/tendermint/tendermint/blob/master/LICENSE + url: https://github.com/tendermint/tendermint/blob/main/LICENSE servers: - url: https://rpc.cosmos.network description: Cosmos mainnet node to interact with the Tendermint RPC @@ -83,7 +83,7 @@ paths: description: | If you want to be sure that the transaction is included in a block, you can subscribe for the result using JSONRPC via a websocket. See - https://docs.tendermint.com/master/app-dev/subscribing-to-events-via-websocket.html + https://docs.tendermint.com/v0.34/app-dev/subscribing-to-events-via-websocket.html If you haven't received anything after a couple of blocks, resend it. If the same happens again, send it to some other node. A few reasons why it could happen: @@ -95,7 +95,7 @@ paths: Please refer to - https://docs.tendermint.com/master/tendermint-core/using-tendermint.html#formatting + https://docs.tendermint.com/v0.34/tendermint-core/using-tendermint.html#formatting for formatting/encoding rules. parameters: - in: query @@ -127,7 +127,7 @@ paths: description: | If you want to be sure that the transaction is included in a block, you can subscribe for the result using JSONRPC via a websocket. See - https://docs.tendermint.com/master/app-dev/subscribing-to-events-via-websocket.html + https://docs.tendermint.com/v0.34/app-dev/subscribing-to-events-via-websocket.html If you haven't received anything after a couple of blocks, resend it. If the same happens again, send it to some other node. A few reasons why it could happen: @@ -139,7 +139,7 @@ paths: 3. node can be offline Please refer to - https://docs.tendermint.com/master/tendermint-core/using-tendermint.html#formatting + https://docs.tendermint.com/v0.34/tendermint-core/using-tendermint.html#formatting for formatting/encoding rules. parameters: - in: query @@ -172,7 +172,7 @@ paths: IMPORTANT: use only for testing and development. In production, use BroadcastTxSync or BroadcastTxAsync. You can subscribe for the transaction result using JSONRPC via a websocket. See - https://docs.tendermint.com/master/app-dev/subscribing-to-events-via-websocket.html + https://docs.tendermint.com/v0.34/app-dev/subscribing-to-events-via-websocket.html CONTRACT: only returns error if mempool.CheckTx() errs or if we timeout waiting for tx to commit. @@ -181,7 +181,7 @@ paths: will contain a non-OK ABCI code. Please refer to - https://docs.tendermint.com/master/tendermint-core/using-tendermint.html#formatting + https://docs.tendermint.com/v0.34/tendermint-core/using-tendermint.html#formatting for formatting/encoding rules. parameters: - in: query @@ -214,8 +214,11 @@ paths: The transaction won't be added to the mempool. Please refer to - https://docs.tendermint.com/master/tendermint-core/using-tendermint.html#formatting + https://docs.tendermint.com/v0.34/tendermint-core/using-tendermint.html#formatting for formatting/encoding rules. + + Upon success, the `Cache-Control` header will be set with the default + maximum age. parameters: - in: query name: tx @@ -623,9 +626,12 @@ paths: tags: - Info description: | - Get block headers for minHeight <= height maxHeight. + Get block headers for minHeight <= height <= maxHeight. At most 20 items will be returned. + + Upon success, the `Cache-Control` header will be set with the default + maximum age. responses: "200": description: Block headers, returned in descending order (highest first). @@ -655,6 +661,9 @@ paths: - Info description: | Get Block. + + If the `height` field is set to a non-default value, upon success, the + `Cache-Control` header will be set with the default maximum age. responses: "200": description: Block informations. @@ -684,6 +693,9 @@ paths: - Info description: | Get Block By Hash. + + Upon success, the `Cache-Control` header will be set with the default + maximum age. responses: "200": description: Block informations. @@ -704,7 +716,7 @@ paths: parameters: - in: query name: height - description: height to return. If no height is provided, it will fetch informations regarding the latest block. + description: height to return. If no height is provided, it will fetch information regarding the latest block. schema: type: integer default: 0 @@ -712,7 +724,11 @@ paths: tags: - Info description: | - Get block_results. + Get block_results. When the `discard_abci_responses` storage flag is + enabled, this endpoint will return an error. + + If the `height` field is set to a non-default value, upon success, the + `Cache-Control` header will be set with the default maximum age. responses: "200": description: Block results. @@ -742,6 +758,9 @@ paths: - Info description: | Get Commit. + + If the `height` field is set to a non-default value, upon success, the + `Cache-Control` header will be set with the default maximum age. responses: "200": description: | @@ -790,6 +809,9 @@ paths: - Info description: | Get Validators. Validators are sorted by voting power. + + If the `height` field is set to a non-default value, upon success, the + `Cache-Control` header will be set with the default maximum age. responses: "200": description: Commit results. @@ -811,6 +833,9 @@ paths: - Info description: | Get genesis. + + Upon success, the `Cache-Control` header will be set with the default + maximum age. responses: "200": description: Genesis results. @@ -889,6 +914,9 @@ paths: - Info description: | Get consensus parameters. + + If the `height` field is set to a non-default value, upon success, the + `Cache-Control` header will be set with the default maximum age. responses: "200": description: consensus parameters results. @@ -1109,14 +1137,14 @@ paths: parameters: - in: query name: hash - description: transaction Hash to retrive + description: hash of transaction to retrieve required: true schema: type: string example: "0xD70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED" - in: query name: prove - description: Include proofs of the transactions inclusion in the block + description: Include proofs of the transaction's inclusion in the block required: false schema: type: boolean @@ -1125,7 +1153,10 @@ paths: tags: - Info description: | - Get a trasasction + Get a transaction + + Upon success, the `Cache-Control` header will be set with the default + maximum age. responses: "200": description: Get a transaction` @@ -1141,12 +1172,15 @@ paths: $ref: "#/components/schemas/ErrorResponse" /abci_info: get: - summary: Get some info about the application. + summary: Get info about the application. operationId: abci_info tags: - ABCI description: | - Get some info about the application. + Get info about the application. + + Upon success, the `Cache-Control` header will be set with the default + maximum age. responses: "200": description: Get some info about the application. diff --git a/scripts/mockery_generate.sh b/scripts/mockery_generate.sh index 481a98fe94..2d6f40e638 100755 --- a/scripts/mockery_generate.sh +++ b/scripts/mockery_generate.sh @@ -1,3 +1,15 @@ #!/bin/sh +# +# Invoke Mockery v2 to update generated mocks for the given type. +# +# This script runs a locally-installed "mockery" if available, otherwise it +# runs the published Docker container. This legerdemain is so that the CI build +# and a local build can work off the same script. +# +if ! which mockery ; then + mockery() { + docker run --rm -v "$PWD":/w --workdir=/w vektra/mockery:v2.12.3 + } +fi -go run github.com/vektra/mockery/v2@v2.12.3 --disable-version-string --case underscore --name $* +mockery --disable-version-string --case underscore --name "$@" diff --git a/scripts/qa/reporting/README.md b/scripts/qa/reporting/README.md new file mode 100644 index 0000000000..088332837a --- /dev/null +++ b/scripts/qa/reporting/README.md @@ -0,0 +1,48 @@ +# Reporting Scripts + +This directory contains just one utility script at present that is used in +reporting/QA. + +## Latency vs Throughput Plotting + +[`latency_throughput.py`](./latency_throughput.py) is a Python script that uses +[matplotlib] to plot a graph of transaction latency vs throughput rate based on +the CSV output generated by the [loadtime reporting +tool](../../../test/loadtime/cmd/report/). + +### Setup + +Execute the following within this directory (the same directory as the +`latency_throughput.py` file). + +```bash +# Create a virtual environment into which to install your dependencies +python3 -m venv .venv + +# Activate the virtual environment +source .venv/bin/activate + +# Install dependencies listed in requirements.txt +pip install -r requirements.txt + +# Show usage instructions and parameters +./latency_throughput.py --help +``` + +### Running + +```bash +# Do the following while ensuring that the virtual environment is activated (see +# the Setup steps). +# +# This will generate a plot in a PNG file called 'tm034.png' in the current +# directory based on the reporting tool CSV output in the "raw.csv" file. The +# '-t' flag overrides the default title at the top of the plot. + +./latency_throughput.py \ + -t 'Tendermint v0.34.x Latency vs Throughput' \ + ./tm034.png \ + /path/to/csv/files/raw.csv +``` + +[matplotlib]: https://matplotlib.org/ diff --git a/scripts/qa/reporting/latency_throughput.py b/scripts/qa/reporting/latency_throughput.py new file mode 100755 index 0000000000..2cdab72ac7 --- /dev/null +++ b/scripts/qa/reporting/latency_throughput.py @@ -0,0 +1,170 @@ +#!/usr/bin/env python3 +""" +A simple script to parse the CSV output from the loadtime reporting tool (see +https://github.com/tendermint/tendermint/tree/main/test/loadtime/cmd/report). + +Produces a plot of average transaction latency vs total transaction throughput +according to the number of load testing tool WebSocket connections to the +Tendermint node. +""" + +import argparse +import csv +import logging +import sys +import matplotlib.pyplot as plt +import numpy as np + +DEFAULT_TITLE = "Tendermint latency vs throughput" + + +def main(): + parser = argparse.ArgumentParser( + description="Renders a latency vs throughput diagram " + "for a set of transactions provided by the loadtime reporting tool", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('-t', + '--title', + default=DEFAULT_TITLE, + help='Plot title') + parser.add_argument('output_image', + help='Output image file (in PNG format)') + parser.add_argument( + 'input_csv_file', + nargs='+', + help="CSV input file from which to read transaction data " + "- must have been generated by the loadtime reporting tool") + args = parser.parse_args() + + logging.basicConfig(format='%(levelname)s\t%(message)s', + stream=sys.stdout, + level=logging.INFO) + plot_latency_vs_throughput(args.input_csv_file, + args.output_image, + title=args.title) + + +def plot_latency_vs_throughput(input_files, output_image, title=DEFAULT_TITLE): + avg_latencies, throughput_rates = process_input_files(input_files, ) + + fig, ax = plt.subplots() + + connections = sorted(avg_latencies.keys()) + for c in connections: + tr = np.array(throughput_rates[c]) + al = np.array(avg_latencies[c]) + label = '%d connection%s' % (c, '' if c == 1 else 's') + ax.plot(tr, al, 'o-', label=label) + + ax.set_title(title) + ax.set_xlabel('Throughput rate (tx/s)') + ax.set_ylabel('Average transaction latency (s)') + + plt.legend(loc='upper left') + plt.savefig(output_image) + + +def process_input_files(input_files): + # Experimental data from which we will derive the latency vs throughput + # statistics + experiments = {} + + for input_file in input_files: + logging.info('Reading %s...' % input_file) + + with open(input_file, 'rt') as inf: + reader = csv.DictReader(inf) + for tx in reader: + experiments = process_tx(experiments, tx) + + return compute_experiments_stats(experiments) + + +def process_tx(experiments, tx): + exp_id = tx['experiment_id'] + # Block time is nanoseconds from the epoch - convert to seconds + block_time = float(tx['block_time']) / (10**9) + # Duration is also in nanoseconds - convert to seconds + duration = float(tx['duration_ns']) / (10**9) + connections = int(tx['connections']) + rate = int(tx['rate']) + + if exp_id not in experiments: + experiments[exp_id] = { + 'connections': connections, + 'rate': rate, + 'block_time_min': block_time, + # We keep track of the latency associated with the minimum block + # time to estimate the start time of the experiment + 'block_time_min_duration': duration, + 'block_time_max': block_time, + 'total_latencies': duration, + 'tx_count': 1, + } + logging.info('Found experiment %s with rate=%d, connections=%d' % + (exp_id, rate, connections)) + else: + # Validation + for field in ['connections', 'rate']: + val = int(tx[field]) + if val != experiments[exp_id][field]: + raise Exception( + 'Found multiple distinct values for field ' + '"%s" for the same experiment (%s): %d and %d' % + (field, exp_id, val, experiments[exp_id][field])) + + if block_time < experiments[exp_id]['block_time_min']: + experiments[exp_id]['block_time_min'] = block_time + experiments[exp_id]['block_time_min_duration'] = duration + if block_time > experiments[exp_id]['block_time_max']: + experiments[exp_id]['block_time_max'] = block_time + + experiments[exp_id]['total_latencies'] += duration + experiments[exp_id]['tx_count'] += 1 + + return experiments + + +def compute_experiments_stats(experiments): + """Compute average latency vs throughput rate statistics from the given + experiments""" + stats = {} + + # Compute average latency and throughput rate for each experiment + for exp_id, exp in experiments.items(): + conns = exp['connections'] + avg_latency = exp['total_latencies'] / exp['tx_count'] + exp_start_time = exp['block_time_min'] - exp['block_time_min_duration'] + exp_duration = exp['block_time_max'] - exp_start_time + throughput_rate = exp['tx_count'] / exp_duration + if conns not in stats: + stats[conns] = [] + + stats[conns].append({ + 'avg_latency': avg_latency, + 'throughput_rate': throughput_rate, + }) + + # Sort stats for each number of connections in order of increasing + # throughput rate, and then extract average latencies and throughput rates + # as separate data series. + conns = sorted(stats.keys()) + avg_latencies = {} + throughput_rates = {} + for c in conns: + stats[c] = sorted(stats[c], key=lambda s: s['throughput_rate']) + avg_latencies[c] = [] + throughput_rates[c] = [] + for s in stats[c]: + avg_latencies[c].append(s['avg_latency']) + throughput_rates[c].append(s['throughput_rate']) + logging.info('For %d connection(s): ' + 'throughput rate = %.6f tx/s\t' + 'average latency = %.6fs' % + (c, s['throughput_rate'], s['avg_latency'])) + + return (avg_latencies, throughput_rates) + + +if __name__ == "__main__": + main() diff --git a/scripts/qa/reporting/requirements.txt b/scripts/qa/reporting/requirements.txt new file mode 100644 index 0000000000..4486cd522e --- /dev/null +++ b/scripts/qa/reporting/requirements.txt @@ -0,0 +1,11 @@ +contourpy==1.0.5 +cycler==0.11.0 +fonttools==4.37.4 +kiwisolver==1.4.4 +matplotlib==3.6.1 +numpy==1.23.4 +packaging==21.3 +Pillow==9.2.0 +pyparsing==3.0.9 +python-dateutil==2.8.2 +six==1.16.0 diff --git a/spec/README.md b/spec/README.md new file mode 100644 index 0000000000..e7563175ce --- /dev/null +++ b/spec/README.md @@ -0,0 +1,93 @@ +--- +order: 1 +title: Overview +parent: + title: Spec + order: 7 +--- + +# Tendermint Spec + +This is a markdown specification of the Tendermint blockchain. +It defines the base data structures, how they are validated, +and how they are communicated over the network. + +If you find discrepancies between the spec and the code that +do not have an associated issue or pull request on github, +please submit them to our [bug bounty](https://tendermint.com/security)! + +## Contents + +- [Overview](#overview) + +### Data Structures + +- [Encoding and Digests](./core/encoding.md) +- [Blockchain](./core/data_structures.md) +- [State](./core/state.md) + +### Consensus Protocol + +- [Consensus Algorithm](./consensus/consensus.md) +- [Creating a proposal](./consensus/creating-proposal.md) +- [Time](./consensus/bft-time.md) +- [Light-Client](./consensus/light-client/README.md) + +### P2P and Network Protocols + +- [The Base P2P Layer](./p2p/node.md): multiplex the protocols ("reactors") on authenticated and encrypted TCP connections +- [Peer Exchange (PEX)](./p2p/messages/pex.md): gossip known peer addresses so peers can find each other +- [Block Sync](./p2p/messages/block-sync.md): gossip blocks so peers can catch up quickly +- [Consensus](./p2p/messages/consensus.md): gossip votes and block parts so new blocks can be committed +- [Mempool](./p2p/messages/mempool.md): gossip transactions so they get included in blocks +- [Evidence](./p2p/messages/evidence.md): sending invalid evidence will stop the peer + +### RPC + +- [RPC SPEC](./rpc/README.md): Specification of the Tendermint remote procedure call interface. + +### Software + +- [ABCI](./abci/README.md): Details about interactions between the + application and consensus engine over ABCI +- [Write-Ahead Log](./consensus/wal.md): Details about how the consensus + engine preserves data and recovers from crash failures + +## Overview + +Tendermint provides Byzantine Fault Tolerant State Machine Replication using +hash-linked batches of transactions. Such transaction batches are called "blocks". +Hence, Tendermint defines a "blockchain". + +Each block in Tendermint has a unique index - its Height. +Height's in the blockchain are monotonic. +Each block is committed by a known set of weighted Validators. +Membership and weighting within this validator set may change over time. +Tendermint guarantees the safety and liveness of the blockchain +so long as less than 1/3 of the total weight of the Validator set +is malicious or faulty. + +A commit in Tendermint is a set of signed messages from more than 2/3 of +the total weight of the current Validator set. Validators take turns proposing +blocks and voting on them. Once enough votes are received, the block is considered +committed. These votes are included in the _next_ block as proof that the previous block +was committed - they cannot be included in the current block, as that block has already been +created. + +Once a block is committed, it can be executed against an application. +The application returns results for each of the transactions in the block. +The application can also return changes to be made to the validator set, +as well as a cryptographic digest of its latest state. + +Tendermint is designed to enable efficient verification and authentication +of the latest state of the blockchain. To achieve this, it embeds +cryptographic commitments to certain information in the block "header". +This information includes the contents of the block (eg. the transactions), +the validator set committing the block, as well as the various results returned by the application. +Note, however, that block execution only occurs _after_ a block is committed. +Thus, application results can only be included in the _next_ block. + +Also note that information like the transaction results and the validator set are never +directly included in the block - only their cryptographic digests (Merkle roots) are. +Hence, verification of a block requires a separate data structure to store this information. +We call this the `State`. Block verification also requires access to the previous block. diff --git a/spec/abci/README.md b/spec/abci/README.md new file mode 100644 index 0000000000..6c71393ce0 --- /dev/null +++ b/spec/abci/README.md @@ -0,0 +1,27 @@ +--- +order: 1 +parent: + title: ABCI + order: 2 +--- + +# ABCI + +ABCI stands for "**A**pplication **B**lock**c**hain **I**nterface". +ABCI is the interface between Tendermint (a state-machine replication engine) +and your application (the actual state machine). It consists of a set of +_methods_, each with a corresponding `Request` and `Response`message type. +To perform state-machine replication, Tendermint calls the ABCI methods on the +ABCI application by sending the `Request*` messages and receiving the `Response*` messages in return. + +All ABCI messages and methods are defined in [protocol buffers](https://github.com/tendermint/tendermint/blob/v0.34.x/proto/abci/types.proto). +This allows Tendermint to run with applications written in many programming languages. + +This specification is split as follows: + +- [Methods and Types](./abci.md) - complete details on all ABCI methods and + message types +- [Applications](./apps.md) - how to manage ABCI application state and other + details about building ABCI applications +- [Client and Server](./client-server.md) - for those looking to implement their + own ABCI application servers diff --git a/spec/abci/abci.md b/spec/abci/abci.md new file mode 100644 index 0000000000..55c82a36ba --- /dev/null +++ b/spec/abci/abci.md @@ -0,0 +1,775 @@ +--- +order: 1 +title: Method and Types +--- + +# Methods and Types + +## Connections + +ABCI applications can run either within the _same_ process as the Tendermint +state-machine replication engine, or as a _separate_ process from the state-machine +replication engine. When run within the same process, Tendermint will call the ABCI +application methods directly as Go method calls. + +When Tendermint and the ABCI application are run as separate processes, Tendermint +opens four connections to the application for ABCI methods. The connections each +handle a subset of the ABCI method calls. These subsets are defined as follows: + +#### **Consensus** connection + +* Driven by a consensus protocol and is responsible for block execution. +* Handles the `InitChain`, `BeginBlock`, `DeliverTx`, `EndBlock`, and `Commit` method +calls. + +#### **Mempool** connection + +* For validating new transactions, before they're shared or included in a block. +* Handles the `CheckTx` calls. + +#### **Info** connection + +* For initialization and for queries from the user. +* Handles the `Info` and `Query` calls. + +#### **Snapshot** connection + +* For serving and restoring [state sync snapshots](apps.md#state-sync). +* Handles the `ListSnapshots`, `LoadSnapshotChunk`, `OfferSnapshot`, and `ApplySnapshotChunk` calls. + +Additionally, there is a `Flush` method that is called on every connection, +and an `Echo` method that is just for debugging. + +More details on managing state across connections can be found in the section on +[ABCI Applications](apps.md). + +## Errors + +The `Query`, `CheckTx` and `DeliverTx` methods include a `Code` field in their `Response*`. +This field is meant to contain an application-specific response code. +A response code of `0` indicates no error. Any other response code +indicates to Tendermint that an error occurred. + +These methods also return a `Codespace` string to Tendermint. This field is +used to disambiguate `Code` values returned by different domains of the +application. The `Codespace` is a namespace for the `Code`. + +The `Echo`, `Info`, `InitChain`, `BeginBlock`, `EndBlock`, `Commit` methods +do not return errors. An error in any of these methods represents a critical +issue that Tendermint has no reasonable way to handle. If there is an error in one +of these methods, the application must crash to ensure that the error is safely +handled by an operator. + +The handling of non-zero response codes by Tendermint is described below + +### CheckTx + +The `CheckTx` ABCI method controls what transactions are considered for inclusion in a block. +When Tendermint receives a `ResponseCheckTx` with a non-zero `Code`, the associated +transaction will be not be added to Tendermint's mempool or it will be removed if +it is already included. + +### DeliverTx + +The `DeliverTx` ABCI method delivers transactions from Tendermint to the application. +When Tendermint recieves a `ResponseDeliverTx` with a non-zero `Code`, the response code is logged. +The transaction was already included in a block, so the `Code` does not influence +Tendermint consensus. + +### Query + +The `Query` ABCI method query queries the application for information about application state. +When Tendermint receives a `ResponseQuery` with a non-zero `Code`, this code is +returned directly to the client that initiated the query. + +## Events + +The `CheckTx`, `BeginBlock`, `DeliverTx`, `EndBlock` methods include an `Events` +field in their `Response*`. Applications may respond to these ABCI methods with a set of events. +Events allow applications to associate metadata about ABCI method execution with the +transactions and blocks this metadata relates to. +Events returned via these ABCI methods do not impact Tendermint consensus in any way +and instead exist to power subscriptions and queries of Tendermint state. + +An `Event` contains a `type` and a list of `EventAttributes`, which are key-value +string pairs denoting metadata about what happened during the method's execution. +`Event` values can be used to index transactions and blocks according to what happened +during their execution. Note that the set of events returned for a block from +`BeginBlock` and `EndBlock` are merged. In case both methods return the same +key, only the value defined in `EndBlock` is used. + +Each event has a `type` which is meant to categorize the event for a particular +`Response*` or `Tx`. A `Response*` or `Tx` may contain multiple events with duplicate +`type` values, where each distinct entry is meant to categorize attributes for a +particular event. Every key and value in an event's attributes must be UTF-8 +encoded strings along with the event type itself. + +```protobuf +message Event { + string type = 1; + repeated EventAttribute attributes = 2; +} +``` + +The attributes of an `Event` consist of a `key`, a `value`, and an `index` flag. The +index flag notifies the Tendermint indexer to index the attribute. The value of +the `index` flag is non-deterministic and may vary across different nodes in the network. + +```protobuf +message EventAttribute { + bytes key = 1; + bytes value = 2; + bool index = 3; // nondeterministic +} +``` + +Example: + +```go + abci.ResponseDeliverTx{ + // ... + Events: []abci.Event{ + { + Type: "validator.provisions", + Attributes: []abci.EventAttribute{ + abci.EventAttribute{Key: []byte("address"), Value: []byte("..."), Index: true}, + abci.EventAttribute{Key: []byte("amount"), Value: []byte("..."), Index: true}, + abci.EventAttribute{Key: []byte("balance"), Value: []byte("..."), Index: true}, + }, + }, + { + Type: "validator.provisions", + Attributes: []abci.EventAttribute{ + abci.EventAttribute{Key: []byte("address"), Value: []byte("..."), Index: true}, + abci.EventAttribute{Key: []byte("amount"), Value: []byte("..."), Index: false}, + abci.EventAttribute{Key: []byte("balance"), Value: []byte("..."), Index: false}, + }, + }, + { + Type: "validator.slashed", + Attributes: []abci.EventAttribute{ + abci.EventAttribute{Key: []byte("address"), Value: []byte("..."), Index: false}, + abci.EventAttribute{Key: []byte("amount"), Value: []byte("..."), Index: true}, + abci.EventAttribute{Key: []byte("reason"), Value: []byte("..."), Index: true}, + }, + }, + // ... + }, +} +``` + +## EvidenceType + +Tendermint's security model relies on the use of "evidence". Evidence is proof of +malicious behaviour by a network participant. It is the responsibility of Tendermint +to detect such malicious behaviour. When malicious behavior is detected, Tendermint +will gossip evidence of the behavior to other nodes and commit the evidence to +the chain once it is verified by all validators. This evidence will then be +passed it on to the application through the ABCI. It is the responsibility of the +application to handle the evidence and exercise punishment. + +EvidenceType has the following protobuf format: + +```proto +enum EvidenceType { + UNKNOWN = 0; + DUPLICATE_VOTE = 1; + LIGHT_CLIENT_ATTACK = 2; +} +``` + +There are two forms of evidence: Duplicate Vote and Light Client Attack. More +information can be found in either [data structures](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/core/data_structures.md) +or [accountability](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/accountability/) + +## Determinism + +ABCI applications must implement deterministic finite-state machines to be +securely replicated by the Tendermint consensus engine. This means block execution +over the Consensus Connection must be strictly deterministic: given the same +ordered set of requests, all nodes will compute identical responses, for all +BeginBlock, DeliverTx, EndBlock, and Commit. This is critical, because the +responses are included in the header of the next block, either via a Merkle root +or directly, so all nodes must agree on exactly what they are. + +For this reason, it is recommended that applications not be exposed to any +external user or process except via the ABCI connections to a consensus engine +like Tendermint Core. The application must only change its state based on input +from block execution (BeginBlock, DeliverTx, EndBlock, Commit), and not through +any other kind of request. This is the only way to ensure all nodes see the same +transactions and compute the same results. + +If there is some non-determinism in the state machine, consensus will eventually +fail as nodes disagree over the correct values for the block header. The +non-determinism must be fixed and the nodes restarted. + +Sources of non-determinism in applications may include: + +* Hardware failures + * Cosmic rays, overheating, etc. +* Node-dependent state + * Random numbers + * Time +* Underspecification + * Library version changes + * Race conditions + * Floating point numbers + * JSON serialization + * Iterating through hash-tables/maps/dictionaries +* External Sources + * Filesystem + * Network calls (eg. some external REST API service) + +See [#56](https://github.com/tendermint/abci/issues/56) for original discussion. + +Note that some methods (`Query, CheckTx, DeliverTx`) return +explicitly non-deterministic data in the form of `Info` and `Log` fields. The `Log` is +intended for the literal output from the application's logger, while the +`Info` is any additional info that should be returned. These are the only fields +that are not included in block header computations, so we don't need agreement +on them. All other fields in the `Response*` must be strictly deterministic. + +## Block Execution + +The first time a new blockchain is started, Tendermint calls +`InitChain`. From then on, the following sequence of methods is executed for each +block: + +`BeginBlock, [DeliverTx], EndBlock, Commit` + +where one `DeliverTx` is called for each transaction in the block. +The result is an updated application state. +Cryptographic commitments to the results of DeliverTx, EndBlock, and +Commit are included in the header of the next block. + +## State Sync + +State sync allows new nodes to rapidly bootstrap by discovering, fetching, and applying +state machine snapshots instead of replaying historical blocks. For more details, see the +[state sync section](../spec/p2p/messages/state-sync.md). + +New nodes will discover and request snapshots from other nodes in the P2P network. +A Tendermint node that receives a request for snapshots from a peer will call +`ListSnapshots` on its application to retrieve any local state snapshots. After receiving + snapshots from peers, the new node will offer each snapshot received from a peer +to its local application via the `OfferSnapshot` method. + +Snapshots may be quite large and are thus broken into smaller "chunks" that can be +assembled into the whole snapshot. Once the application accepts a snapshot and +begins restoring it, Tendermint will fetch snapshot "chunks" from existing nodes. +The node providing "chunks" will fetch them from its local application using +the `LoadSnapshotChunk` method. + +As the new node receives "chunks" it will apply them sequentially to the local +application with `ApplySnapshotChunk`. When all chunks have been applied, the application +`AppHash` is retrieved via an `Info` query. The `AppHash` is then compared to +the blockchain's `AppHash` which is verified via [light client verification](../spec/light-client/verification/README.md). + +## Messages + +### Echo + +* **Request**: + * `Message (string)`: A string to echo back +* **Response**: + * `Message (string)`: The input string +* **Usage**: + * Echo a string to test an abci client/server implementation + +### Flush + +* **Usage**: + * Signals that messages queued on the client should be flushed to + the server. It is called periodically by the client + implementation to ensure asynchronous requests are actually + sent, and is called immediately to make a synchronous request, + which returns when the Flush response comes back. + +### Info + +* **Request**: + + | Name | Type | Description | Field Number | + |---------------|--------|------------------------------------------|--------------| + | version | string | The Tendermint software semantic version | 1 | + | block_version | uint64 | The Tendermint Block Protocol version | 2 | + | p2p_version | uint64 | The Tendermint P2P Protocol version | 3 | + | abci_version | string | The Tendermint ABCI semantic version | 4 | + +* **Response**: + + | Name | Type | Description | Field Number | + |---------------------|--------|--------------------------------------------------|--------------| + | data | string | Some arbitrary information | 1 | + | version | string | The application software semantic version | 2 | + | app_version | uint64 | The application protocol version | 3 | + | last_block_height | int64 | Latest block for which the app has called Commit | 4 | + | last_block_app_hash | bytes | Latest result of Commit | 5 | + +* **Usage**: + * Return information about the application state. + * Used to sync Tendermint with the application during a handshake + that happens on startup. + * The returned `app_version` will be included in the Header of every block. + * Tendermint expects `last_block_app_hash` and `last_block_height` to + be updated during `Commit`, ensuring that `Commit` is never + called twice for the same block height. + +> Note: Semantic version is a reference to [semantic versioning](https://semver.org/). Semantic versions in info will be displayed as X.X.x. + +### InitChain + +* **Request**: + + | Name | Type | Description | Field Number | + |------------------|--------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------|--------------| + | time | [google.protobuf.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Timestamp) | Genesis time | 1 | + | chain_id | string | ID of the blockchain. | 2 | + | consensus_params | [ConsensusParams](#consensusparams) | Initial consensus-critical parameters. | 3 | + | validators | repeated [ValidatorUpdate](#validatorupdate) | Initial genesis validators, sorted by voting power. | 4 | + | app_state_bytes | bytes | Serialized initial application state. JSON bytes. | 5 | + | initial_height | int64 | Height of the initial block (typically `1`). | 6 | + +* **Response**: + + | Name | Type | Description | Field Number | + |------------------|----------------------------------------------|-------------------------------------------------|--------------| + | consensus_params | [ConsensusParams](#consensusparams) | Initial consensus-critical parameters (optional | 1 | + | validators | repeated [ValidatorUpdate](#validatorupdate) | Initial validator set (optional). | 2 | + | app_hash | bytes | Initial application hash. | 3 | + +* **Usage**: + * Called once upon genesis. + * If ResponseInitChain.Validators is empty, the initial validator set will be the RequestInitChain.Validators + * If ResponseInitChain.Validators is not empty, it will be the initial + validator set (regardless of what is in RequestInitChain.Validators). + * This allows the app to decide if it wants to accept the initial validator + set proposed by tendermint (ie. in the genesis file), or if it wants to use + a different one (perhaps computed based on some application specific + information in the genesis file). + +### Query + +* **Request**: + + | Name | Type | Description | Field Number | + |--------|--------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | data | bytes | Raw query bytes. Can be used with or in lieu of Path. | 1 | + | path | string | Path field of the request URI. Can be used with or in lieu of `data`. Apps MUST interpret `/store` as a query by key on the underlying store. The key SHOULD be specified in the `data` field. Apps SHOULD allow queries over specific types like `/accounts/...` or `/votes/...` | 2 | + | height | int64 | The block height for which you want the query (default=0 returns data for the latest committed block). Note that this is the height of the block containing the application's Merkle root hash, which represents the state as it was after committing the block at Height-1 | 3 | + | prove | bool | Return Merkle proof with response if possible | 4 | + +* **Response**: + + | Name | Type | Description | Field Number | + |-----------|-----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | code | uint32 | Response code. | 1 | + | log | string | The output of the application's logger. **May be non-deterministic.** | 3 | + | info | string | Additional information. **May be non-deterministic.** | 4 | + | index | int64 | The index of the key in the tree. | 5 | + | key | bytes | The key of the matching data. | 6 | + | value | bytes | The value of the matching data. | 7 | + | proof_ops | [ProofOps](#proofops) | Serialized proof for the value data, if requested, to be verified against the `app_hash` for the given Height. | 8 | + | height | int64 | The block height from which data was derived. Note that this is the height of the block containing the application's Merkle root hash, which represents the state as it was after committing the block at Height-1 | 9 | + | codespace | string | Namespace for the `code`. | 10 | + +* **Usage**: + * Query for data from the application at current or past height. + * Optionally return Merkle proof. + * Merkle proof includes self-describing `type` field to support many types + of Merkle trees and encoding formats. + +### BeginBlock + +* **Request**: + + | Name | Type | Description | Field Number | + |----------------------|---------------------------------------------|-------------------------------------------------------------------------------------------------------------------|--------------| + | hash | bytes | The block's hash. This can be derived from the block header. | 1 | + | header | [Header](../core/data_structures.md#header) | The block header. | 2 | + | last_commit_info | [LastCommitInfo](#lastcommitinfo) | Info about the last commit, including the round, and the list of validators and which ones signed the last block. | 3 | + | byzantine_validators | repeated [Evidence](#evidence) | List of evidence of validators that acted maliciously. | 4 | + +* **Response**: + + | Name | Type | Description | Field Number | + |--------|---------------------------|-------------------------------------|--------------| + | events | repeated [Event](#events) | type & Key-Value events for indexing | 1 | + +* **Usage**: + * Signals the beginning of a new block. + * Called prior to any `DeliverTx` method calls. + * The header contains the height, timestamp, and more - it exactly matches the + Tendermint block header. We may seek to generalize this in the future. + * The `LastCommitInfo` and `ByzantineValidators` can be used to determine + rewards and punishments for the validators. + +### CheckTx + +* **Request**: + + | Name | Type | Description | Field Number | + |------|-------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | tx | bytes | The request transaction bytes | 1 | + | type | CheckTxType | One of `CheckTx_New` or `CheckTx_Recheck`. `CheckTx_New` is the default and means that a full check of the tranasaction is required. `CheckTx_Recheck` types are used when the mempool is initiating a normal recheck of a transaction. | 2 | + +* **Response**: + + | Name | Type | Description | Field Number | + |------------|---------------------------|-----------------------------------------------------------------------|--------------| + | code | uint32 | Response code. | 1 | + | data | bytes | Result bytes, if any. | 2 | + | log | string | The output of the application's logger. **May be non-deterministic.** | 3 | + | info | string | Additional information. **May be non-deterministic.** | 4 | + | gas_wanted | int64 | Amount of gas requested for transaction. | 5 | + | gas_used | int64 | Amount of gas consumed by transaction. | 6 | + | events | repeated [Event](#events) | Type & Key-Value events for indexing transactions (eg. by account). | 7 | + | codespace | string | Namespace for the `code`. | 8 | + | sender | string | The transaction's sender (e.g. the signer) | 9 | + | priority | int64 | The transaction's priority (for mempool ordering) | 10 | + +* **Usage**: + + * Technically optional - not involved in processing blocks. + * Guardian of the mempool: every node runs `CheckTx` before letting a + transaction into its local mempool. + * The transaction may come from an external user or another node + * `CheckTx` validates the transaction against the current state of the application, + for example, checking signatures and account balances, but does not apply any + of the state changes described in the transaction. + not running code in a virtual machine. + * Transactions where `ResponseCheckTx.Code != 0` will be rejected - they will not be broadcast to + other nodes or included in a proposal block. + * Tendermint attributes no other value to the response code + +### DeliverTx + +* **Request**: + + | Name | Type | Description | Field Number | + |------|-------|--------------------------------|--------------| + | tx | bytes | The request transaction bytes. | 1 | + +* **Response**: + + | Name | Type | Description | Field Number | + |------------|---------------------------|-----------------------------------------------------------------------|--------------| + | code | uint32 | Response code. | 1 | + | data | bytes | Result bytes, if any. | 2 | + | log | string | The output of the application's logger. **May be non-deterministic.** | 3 | + | info | string | Additional information. **May be non-deterministic.** | 4 | + | gas_wanted | int64 | Amount of gas requested for transaction. | 5 | + | gas_used | int64 | Amount of gas consumed by transaction. | 6 | + | events | repeated [Event](#events) | Type & Key-Value events for indexing transactions (eg. by account). | 7 | + | codespace | string | Namespace for the `code`. | 8 | + +* **Usage**: + * [**Required**] The core method of the application. + * When `DeliverTx` is called, the application must execute the transaction in full before returning control to Tendermint. + * `ResponseDeliverTx.Code == 0` only if the transaction is fully valid. + +### EndBlock + +* **Request**: + + | Name | Type | Description | Field Number | + |--------|-------|------------------------------------|--------------| + | height | int64 | Height of the block just executed. | 1 | + +* **Response**: + + | Name | Type | Description | Field Number | + |-------------------------|----------------------------------------------|-----------------------------------------------------------------|--------------| + | validator_updates | repeated [ValidatorUpdate](#validatorupdate) | Changes to validator set (set voting power to 0 to remove). | 1 | + | consensus_param_updates | [ConsensusParams](#consensusparams) | Changes to consensus-critical time, size, and other parameters. | 2 | + | events | repeated [Event](#events) | Type & Key-Value events for indexing | 3 | + +* **Usage**: + * Signals the end of a block. + * Called after all the transactions for the current block have been delivered, prior to the block's `Commit` message. + * Optional `validator_updates` triggered by block `H`. These updates affect validation + for blocks `H+1`, `H+2`, and `H+3`. + * Heights following a validator update are affected in the following way: + * `H+1`: `NextValidatorsHash` includes the new `validator_updates` value. + * `H+2`: The validator set change takes effect and `ValidatorsHash` is updated. + * `H+3`: `LastCommitInfo` is changed to include the altered validator set. + * `consensus_param_updates` returned for block `H` apply to the consensus + params for block `H+1`. For more information on the consensus parameters, + see the [application spec entry on consensus parameters](../spec/abci/apps.md#consensus-parameters). + +### Commit + +* **Request**: + + | Name | Type | Description | Field Number | + |--------|-------|------------------------------------|--------------| + + Commit signals the application to persist application state. It takes no parameters. +* **Response**: + + | Name | Type | Description | Field Number | + |---------------|-------|------------------------------------------------------------------------|--------------| + | data | bytes | The Merkle root hash of the application state. | 2 | + | retain_height | int64 | Blocks below this height may be removed. Defaults to `0` (retain all). | 3 | + +* **Usage**: + * Signal the application to persist the application state. + * Return an (optional) Merkle root hash of the application state + * `ResponseCommit.Data` is included as the `Header.AppHash` in the next block + * it may be empty + * Later calls to `Query` can return proofs about the application state anchored + in this Merkle root hash + * Note developers can return whatever they want here (could be nothing, or a + constant string, etc.), so long as it is deterministic - it must not be a + function of anything that did not come from the + BeginBlock/DeliverTx/EndBlock methods. + * Use `RetainHeight` with caution! If all nodes in the network remove historical + blocks then this data is permanently lost, and no new nodes will be able to + join the network and bootstrap. Historical blocks may also be required for + other purposes, e.g. auditing, replay of non-persisted heights, light client + verification, and so on. + +### ListSnapshots + +* **Request**: + + | Name | Type | Description | Field Number | + |--------|-------|------------------------------------|--------------| + + Empty request asking the application for a list of snapshots. + +* **Response**: + + | Name | Type | Description | Field Number | + |-----------|--------------------------------|--------------------------------|--------------| + | snapshots | repeated [Snapshot](#snapshot) | List of local state snapshots. | 1 | + +* **Usage**: + * Used during state sync to discover available snapshots on peers. + * See `Snapshot` data type for details. + +### LoadSnapshotChunk + +* **Request**: + + | Name | Type | Description | Field Number | + |--------|--------|-----------------------------------------------------------------------|--------------| + | height | uint64 | The height of the snapshot the chunks belongs to. | 1 | + | format | uint32 | The application-specific format of the snapshot the chunk belongs to. | 2 | + | chunk | uint32 | The chunk index, starting from `0` for the initial chunk. | 3 | + +* **Response**: + + | Name | Type | Description | Field Number | + |-------|-------|-------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | chunk | bytes | The binary chunk contents, in an arbitray format. Chunk messages cannot be larger than 16 MB _including metadata_, so 10 MB is a good starting point. | 1 | + +* **Usage**: + * Used during state sync to retrieve snapshot chunks from peers. + +### OfferSnapshot + +* **Request**: + + | Name | Type | Description | Field Number | + |----------|-----------------------|--------------------------------------------------------------------------|--------------| + | snapshot | [Snapshot](#snapshot) | The snapshot offered for restoration. | 1 | + | app_hash | bytes | The light client-verified app hash for this height, from the blockchain. | 2 | + +* **Response**: + + | Name | Type | Description | Field Number | + |--------|-------------------|-----------------------------------|--------------| + | result | [Result](#result) | The result of the snapshot offer. | 1 | + +#### Result + +```proto + enum Result { + UNKNOWN = 0; // Unknown result, abort all snapshot restoration + ACCEPT = 1; // Snapshot is accepted, start applying chunks. + ABORT = 2; // Abort snapshot restoration, and don't try any other snapshots. + REJECT = 3; // Reject this specific snapshot, try others. + REJECT_FORMAT = 4; // Reject all snapshots with this `format`, try others. + REJECT_SENDER = 5; // Reject all snapshots from all senders of this snapshot, try others. + } +``` + +* **Usage**: + * `OfferSnapshot` is called when bootstrapping a node using state sync. The application may + accept or reject snapshots as appropriate. Upon accepting, Tendermint will retrieve and + apply snapshot chunks via `ApplySnapshotChunk`. The application may also choose to reject a + snapshot in the chunk response, in which case it should be prepared to accept further + `OfferSnapshot` calls. + * Only `AppHash` can be trusted, as it has been verified by the light client. Any other data + can be spoofed by adversaries, so applications should employ additional verification schemes + to avoid denial-of-service attacks. The verified `AppHash` is automatically checked against + the restored application at the end of snapshot restoration. + * For more information, see the `Snapshot` data type or the [state sync section](../spec/p2p/messages/state-sync.md). + +### ApplySnapshotChunk + +* **Request**: + + | Name | Type | Description | Field Number | + |--------|--------|-----------------------------------------------------------------------------|--------------| + | index | uint32 | The chunk index, starting from `0`. Tendermint applies chunks sequentially. | 1 | + | chunk | bytes | The binary chunk contents, as returned by `LoadSnapshotChunk`. | 2 | + | sender | string | The P2P ID of the node who sent this chunk. | 3 | + +* **Response**: + + | Name | Type | Description | Field Number | + |----------------|---------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | result | Result (see below) | The result of applying this chunk. | 1 | + | refetch_chunks | repeated uint32 | Refetch and reapply the given chunks, regardless of `result`. Only the listed chunks will be refetched, and reapplied in sequential order. | 2 | + | reject_senders | repeated string | Reject the given P2P senders, regardless of `Result`. Any chunks already applied will not be refetched unless explicitly requested, but queued chunks from these senders will be discarded, and new chunks or other snapshots rejected. | 3 | + +```proto + enum Result { + UNKNOWN = 0; // Unknown result, abort all snapshot restoration + ACCEPT = 1; // The chunk was accepted. + ABORT = 2; // Abort snapshot restoration, and don't try any other snapshots. + RETRY = 3; // Reapply this chunk, combine with `RefetchChunks` and `RejectSenders` as appropriate. + RETRY_SNAPSHOT = 4; // Restart this snapshot from `OfferSnapshot`, reusing chunks unless instructed otherwise. + REJECT_SNAPSHOT = 5; // Reject this snapshot, try a different one. + } +``` + +* **Usage**: + * The application can choose to refetch chunks and/or ban P2P peers as appropriate. Tendermint + will not do this unless instructed by the application. + * The application may want to verify each chunk, e.g. by attaching chunk hashes in + `Snapshot.Metadata` and/or incrementally verifying contents against `AppHash`. + * When all chunks have been accepted, Tendermint will make an ABCI `Info` call to verify that + `LastBlockAppHash` and `LastBlockHeight` matches the expected values, and record the + `AppVersion` in the node state. It then switches to fast sync or consensus and joins the + network. + * If Tendermint is unable to retrieve the next chunk after some time (e.g. because no suitable + peers are available), it will reject the snapshot and try a different one via `OfferSnapshot`. + The application should be prepared to reset and accept it or abort as appropriate. + +## Data Types + +Most of the data structures used in ABCI are shared [common data structures](../spec/core/data_structures.md). In certain cases, ABCI uses different data structures which are documented here: + +### Validator + +* **Fields**: + + | Name | Type | Description | Field Number | + |---------|-------|---------------------------------------------------------------------|--------------| + | address | bytes | [Address](../core/data_structures.md#address) of validator | 1 | + | power | int64 | Voting power of the validator | 3 | + +* **Usage**: + * Validator identified by address + * Used in RequestBeginBlock as part of VoteInfo + * Does not include PubKey to avoid sending potentially large quantum pubkeys + over the ABCI + +### ValidatorUpdate + +* **Fields**: + + | Name | Type | Description | Field Number | + |---------|--------------------------------------------------|-------------------------------|--------------| + | pub_key | [Public Key](../core/data_structures.md#pub_key) | Public key of the validator | 1 | + | power | int64 | Voting power of the validator | 2 | + +* **Usage**: + * Validator identified by PubKey + * Used to tell Tendermint to update the validator set + +### VoteInfo + +* **Fields**: + + | Name | Type | Description | Field Number | + |-------------------|-------------------------|--------------------------------------------------------------|--------------| + | validator | [Validator](#validator) | A validator | 1 | + | signed_last_block | bool | Indicates whether or not the validator signed the last block | 2 | + +* **Usage**: + * Indicates whether a validator signed the last block, allowing for rewards + based on validator availability + +### Evidence + +* **Fields**: + + | Name | Type | Description | Field Number | + |--------------------|--------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------|--------------| + | type | [EvidenceType](#evidencetype) | Type of the evidence. An enum of possible evidence's. | 1 | + | validator | [Validator](#validator) | The offending validator | 2 | + | height | int64 | Height when the offense occurred | 3 | + | time | [google.protobuf.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Timestamp) | Time of the block that was committed at the height that the offense occurred | 4 | + | total_voting_power | int64 | Total voting power of the validator set at height `Height` | 5 | + +#### EvidenceType + +* **Fields** + + EvidenceType is an enum with the listed fields: + + | Name | Field Number | + |---------------------|--------------| + | UNKNOWN | 0 | + | DUPLICATE_VOTE | 1 | + | LIGHT_CLIENT_ATTACK | 2 | + +### LastCommitInfo + +* **Fields**: + + | Name | Type | Description | Field Number | + |-------|--------------------------------|-----------------------------------------------------------------------------------------------------------------------|--------------| + | round | int32 | Commit round. Reflects the total amount of rounds it took to come to consensus for the current block. | 1 | + | votes | repeated [VoteInfo](#voteinfo) | List of validators addresses in the last validator set with their voting power and whether or not they signed a vote. | 2 | + +### ConsensusParams + +* **Fields**: + + | Name | Type | Description | Field Number | + |-----------|---------------------------------------------------------------|------------------------------------------------------------------------------|--------------| + | block | [BlockParams](../core/data_structures.md#blockparams) | Parameters limiting the size of a block and time between consecutive blocks. | 1 | + | evidence | [EvidenceParams](../core/data_structures.md#evidenceparams) | Parameters limiting the validity of evidence of byzantine behaviour. | 2 | + | validator | [ValidatorParams](../core/data_structures.md#validatorparams) | Parameters limiting the types of public keys validators can use. | 3 | + | version | [VersionsParams](../core/data_structures.md#versionparams) | The ABCI application version. | 4 | + +### ProofOps + +* **Fields**: + + | Name | Type | Description | Field Number | + |------|------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | ops | repeated [ProofOp](#proofop) | List of chained Merkle proofs, of possibly different types. The Merkle root of one op is the value being proven in the next op. The Merkle root of the final op should equal the ultimate root hash being verified against.. | 1 | + +### ProofOp + +* **Fields**: + + | Name | Type | Description | Field Number | + |------|--------|------------------------------------------------|--------------| + | type | string | Type of Merkle proof and how it's encoded. | 1 | + | key | bytes | Key in the Merkle tree that this proof is for. | 2 | + | data | bytes | Encoded Merkle proof for the key. | 3 | + +### Snapshot + +* **Fields**: + + | Name | Type | Description | Field Number | + |----------|--------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | height | uint64 | The height at which the snapshot was taken (after commit). | 1 | + | format | uint32 | An application-specific snapshot format, allowing applications to version their snapshot data format and make backwards-incompatible changes. Tendermint does not interpret this. | 2 | + | chunks | uint32 | The number of chunks in the snapshot. Must be at least 1 (even if empty). | 3 | + | hash | bytes | TAn arbitrary snapshot hash. Must be equal only for identical snapshots across nodes. Tendermint does not interpret the hash, it only compares them. | 3 | + | metadata | bytes | Arbitrary application metadata, for example chunk hashes or other verification data. | 3 | + +* **Usage**: + * Used for state sync snapshots, see the [state sync section](../spec/p2p/messages/state-sync.md) for details. + * A snapshot is considered identical across nodes only if _all_ fields are equal (including + `Metadata`). Chunks may be retrieved from all nodes that have the same snapshot. + * When sent across the network, a snapshot message can be at most 4 MB. diff --git a/spec/abci/apps.md b/spec/abci/apps.md new file mode 100644 index 0000000000..d16073d457 --- /dev/null +++ b/spec/abci/apps.md @@ -0,0 +1,671 @@ +--- +order: 2 +title: Applications +--- + +# Applications + +Please ensure you've first read the spec for [ABCI Methods and Types](abci.md) + +Here we cover the following components of ABCI applications: + +- [Connection State](#connection-state) - the interplay between ABCI connections and application state + and the differences between `CheckTx` and `DeliverTx`. +- [Transaction Results](#transaction-results) - rules around transaction + results and validity +- [Validator Set Updates](#validator-updates) - how validator sets are + changed during `InitChain` and `EndBlock` +- [Query](#query) - standards for using the `Query` method and proofs about the + application state +- [Crash Recovery](#crash-recovery) - handshake protocol to synchronize + Tendermint and the application on startup. +- [State Sync](#state-sync) - rapid bootstrapping of new nodes by restoring state machine snapshots + +## Connection State + +Since Tendermint maintains four concurrent ABCI connections, it is typical +for an application to maintain a distinct state for each, and for the states to +be synchronized during `Commit`. + +### Concurrency + +In principle, each of the four ABCI connections operate concurrently with one +another. This means applications need to ensure access to state is +thread safe. In practice, both the +[default in-process ABCI client](https://github.com/tendermint/tendermint/blob/v0.34.4/abci/client/local_client.go#L18) +and the +[default Go ABCI +server](https://github.com/tendermint/tendermint/blob/v0.34.4/abci/server/socket_server.go#L32) +use global locks across all connections, so they are not +concurrent at all. This means if your app is written in Go, and compiled in-process with Tendermint +using the default `NewLocalClient`, or run out-of-process using the default `SocketServer`, +ABCI messages from all connections will be linearizable (received one at a +time). + +The existence of this global mutex means Go application developers can get +thread safety for application state by routing *all* reads and writes through the ABCI +system. Thus it may be *unsafe* to expose application state directly to an RPC +interface, and unless explicit measures are taken, all queries should be routed through the ABCI Query method. + +### BeginBlock + +The BeginBlock request can be used to run some code at the beginning of +every block. It also allows Tendermint to send the current block hash +and header to the application, before it sends any of the transactions. + +The app should remember the latest height and header (ie. from which it +has run a successful Commit) so that it can tell Tendermint where to +pick up from when it restarts. See information on the Handshake, below. + +### Commit + +Application state should only be persisted to disk during `Commit`. + +Before `Commit` is called, Tendermint locks and flushes the mempool so that no new messages will +be received on the mempool connection. This provides an opportunity to safely update all four connection +states to the latest committed state at once. + +When `Commit` completes, it unlocks the mempool. + +WARNING: if the ABCI app logic processing the `Commit` message sends a +`/broadcast_tx_sync` or `/broadcast_tx_commit` and waits for the response +before proceeding, it will deadlock. Executing those `broadcast_tx` calls +involves acquiring a lock that is held during the `Commit` call, so it's not +possible. If you make the call to the `broadcast_tx` endpoints concurrently, +that's no problem, it just can't be part of the sequential logic of the +`Commit` function. + +### Consensus Connection + +The Consensus Connection should maintain a `DeliverTxState` - the working state +for block execution. It should be updated by the calls to `BeginBlock`, `DeliverTx`, +and `EndBlock` during block execution and committed to disk as the "latest +committed state" during `Commit`. + +Updates made to the `DeliverTxState` by each method call must be readable by each subsequent method - +ie. the updates are linearizable. + +### Mempool Connection + +The mempool Connection should maintain a `CheckTxState` +to sequentially process pending transactions in the mempool that have +not yet been committed. It should be initialized to the latest committed state +at the end of every `Commit`. + +Before calling `Commit`, Tendermint will lock and flush the mempool connection, +ensuring that all existing CheckTx are responded to and no new ones can begin. +The `CheckTxState` may be updated concurrently with the `DeliverTxState`, as +messages may be sent concurrently on the Consensus and Mempool connections. + +After `Commit`, while still holding the mempool lock, CheckTx is run again on all transactions that remain in the +node's local mempool after filtering those included in the block. +An additional `Type` parameter is made available to the CheckTx function that +indicates whether an incoming transaction is new (`CheckTxType_New`), or a +recheck (`CheckTxType_Recheck`). + +Finally, after re-checking transactions in the mempool, Tendermint will unlock +the mempool connection. New transactions are once again able to be processed through CheckTx. + +Note that CheckTx is just a weak filter to keep invalid transactions out of the block chain. +CheckTx doesn't have to check everything that affects transaction validity; the +expensive things can be skipped. It's weak because a Byzantine node doesn't +care about CheckTx; it can propose a block full of invalid transactions if it wants. + +#### Replay Protection + +To prevent old transactions from being replayed, CheckTx must implement +replay protection. + +It is possible for old transactions to be sent to the application. So +it is important CheckTx implements some logic to handle them. + +### Query Connection + +The Info Connection should maintain a `QueryState` for answering queries from the user, +and for initialization when Tendermint first starts up (both described further +below). +It should always contain the latest committed state associated with the +latest committed block. + +`QueryState` should be set to the latest `DeliverTxState` at the end of every `Commit`, +after the full block has been processed and the state committed to disk. +Otherwise it should never be modified. + +Tendermint Core currently uses the Query connection to filter peers upon +connecting, according to IP address or node ID. For instance, +returning non-OK ABCI response to either of the following queries will +cause Tendermint to not connect to the corresponding peer: + +- `p2p/filter/addr/`, where `` is an IP address. +- `p2p/filter/id/`, where `` is the hex-encoded node ID (the hash of + the node's p2p pubkey). + +Note: these query formats are subject to change! + +### Snapshot Connection + +The Snapshot Connection is optional, and is only used to serve state sync snapshots for other nodes +and/or restore state sync snapshots to a local node being bootstrapped. + +For more information, see [the state sync section of this document](#state-sync). + +## Transaction Results + +The `Info` and `Log` fields are non-deterministic values for debugging/convenience purposes +that are otherwise ignored. + +The `Data` field must be strictly deterministic, but can be arbitrary data. + +### Gas + +Ethereum introduced the notion of `gas` as an abstract representation of the +cost of resources used by nodes when processing transactions. Every operation in the +Ethereum Virtual Machine uses some amount of gas, and gas can be accepted at a market-variable price. +Users propose a maximum amount of gas for their transaction; if the tx uses less, they get +the difference credited back. Tendermint adopts a similar abstraction, +though uses it only optionally and weakly, allowing applications to define +their own sense of the cost of execution. + +In Tendermint, the [ConsensusParams.Block.MaxGas](../proto/types/params.proto) limits the amount of `gas` that can be used in a block. +The default value is `-1`, meaning no limit, or that the concept of gas is +meaningless. + +Responses contain a `GasWanted` and `GasUsed` field. The former is the maximum +amount of gas the sender of a tx is willing to use, and the latter is how much it actually +used. Applications should enforce that `GasUsed <= GasWanted` - ie. tx execution +should halt before it can use more resources than it requested. + +When `MaxGas > -1`, Tendermint enforces the following rules: + +- `GasWanted <= MaxGas` for all txs in the mempool +- `(sum of GasWanted in a block) <= MaxGas` when proposing a block + +If `MaxGas == -1`, no rules about gas are enforced. + +Note that Tendermint does not currently enforce anything about Gas in the consensus, only the mempool. +This means it does not guarantee that committed blocks satisfy these rules! +It is the application's responsibility to return non-zero response codes when gas limits are exceeded. + +The `GasUsed` field is ignored completely by Tendermint. That said, applications should enforce: + +- `GasUsed <= GasWanted` for any given transaction +- `(sum of GasUsed in a block) <= MaxGas` for every block + +In the future, we intend to add a `Priority` field to the responses that can be +used to explicitly prioritize txs in the mempool for inclusion in a block +proposal. See [#1861](https://github.com/tendermint/tendermint/issues/1861). + +### CheckTx + +If `Code != 0`, it will be rejected from the mempool and hence +not broadcasted to other peers and not included in a proposal block. + +`Data` contains the result of the CheckTx transaction execution, if any. It is +semantically meaningless to Tendermint. + +`Events` include any events for the execution, though since the transaction has not +been committed yet, they are effectively ignored by Tendermint. + +### DeliverTx + +DeliverTx is the workhorse of the blockchain. Tendermint sends the +DeliverTx requests asynchronously but in order, and relies on the +underlying socket protocol (ie. TCP) to ensure they are received by the +app in order. They have already been ordered in the global consensus by +the Tendermint protocol. + +If DeliverTx returns `Code != 0`, the transaction will be considered invalid, +though it is still included in the block. + +DeliverTx also returns a [Code, Data, and Log](../../proto/abci/types.proto#L189-L191). + +`Data` contains the result of the CheckTx transaction execution, if any. It is +semantically meaningless to Tendermint. + +Both the `Code` and `Data` are included in a structure that is hashed into the +`LastResultsHash` of the next block header. + +`Events` include any events for the execution, which Tendermint will use to index +the transaction by. This allows transactions to be queried according to what +events took place during their execution. + +## Updating the Validator Set + +The application may set the validator set during InitChain, and may update it during +EndBlock. + +Note that the maximum total power of the validator set is bounded by +`MaxTotalVotingPower = MaxInt64 / 8`. Applications are responsible for ensuring +they do not make changes to the validator set that cause it to exceed this +limit. + +Additionally, applications must ensure that a single set of updates does not contain any duplicates - +a given public key can only appear once within a given update. If an update includes +duplicates, the block execution will fail irrecoverably. + +### InitChain + +The `InitChain` method can return a list of validators. +If the list is empty, Tendermint will use the validators loaded in the genesis +file. +If the list returned by `InitChain` is not empty, Tendermint will use its contents as the validator set. +This way the application can set the initial validator set for the +blockchain. + +### EndBlock + +Updates to the Tendermint validator set can be made by returning +`ValidatorUpdate` objects in the `ResponseEndBlock`: + +```protobuf +message ValidatorUpdate { + tendermint.crypto.keys.PublicKey pub_key + int64 power +} + +message PublicKey { + oneof { + ed25519 bytes = 1; + } +``` + +The `pub_key` currently supports only one type: + +- `type = "ed25519"` + +The `power` is the new voting power for the validator, with the +following rules: + +- power must be non-negative +- if power is 0, the validator must already exist, and will be removed from the + validator set +- if power is non-0: + - if the validator does not already exist, it will be added to the validator + set with the given power + - if the validator does already exist, its power will be adjusted to the given power +- the total power of the new validator set must not exceed MaxTotalVotingPower + +Note the updates returned in block `H` will only take effect at block `H+2`. + +## Consensus Parameters + +ConsensusParams enforce certain limits in the blockchain, like the maximum size +of blocks, amount of gas used in a block, and the maximum acceptable age of +evidence. They can be set in InitChain and updated in EndBlock. + +### BlockParams.MaxBytes + +The maximum size of a complete Protobuf encoded block. +This is enforced by Tendermint consensus. + +This implies a maximum transaction size that is this MaxBytes, less the expected size of +the header, the validator set, and any included evidence in the block. + +Must have `0 < MaxBytes < 100 MB`. + +### BlockParams.MaxGas + +The maximum of the sum of `GasWanted` that will be allowed in a proposed block. +This is *not* enforced by Tendermint consensus. +It is left to the app to enforce (ie. if txs are included past the +limit, they should return non-zero codes). It is used by Tendermint to limit the +txs included in a proposed block. + +Must have `MaxGas >= -1`. +If `MaxGas == -1`, no limit is enforced. + +### EvidenceParams.MaxAgeDuration + +This is the maximum age of evidence in time units. +This is enforced by Tendermint consensus. + +If a block includes evidence older than this (AND the evidence was created more +than `MaxAgeNumBlocks` ago), the block will be rejected (validators won't vote +for it). + +Must have `MaxAgeDuration > 0`. + +### EvidenceParams.MaxAgeNumBlocks + +This is the maximum age of evidence in blocks. +This is enforced by Tendermint consensus. + +If a block includes evidence older than this (AND the evidence was created more +than `MaxAgeDuration` ago), the block will be rejected (validators won't vote +for it). + +Must have `MaxAgeNumBlocks > 0`. + +### EvidenceParams.MaxNum + +This is the maximum number of evidence that can be committed to a single block. + +The product of this and the `MaxEvidenceBytes` must not exceed the size of +a block minus it's overhead ( ~ `MaxBytes`). + +Must have `MaxNum > 0`. + +### Updates + +The application may set the ConsensusParams during InitChain, and update them during +EndBlock. If the ConsensusParams is empty, it will be ignored. Each field +that is not empty will be applied in full. For instance, if updating the +Block.MaxBytes, applications must also set the other Block fields (like +Block.MaxGas), even if they are unchanged, as they will otherwise cause the +value to be updated to 0. + +#### InitChain + +ResponseInitChain includes a ConsensusParams. +If ConsensusParams is nil, Tendermint will use the params loaded in the genesis +file. If ConsensusParams is not nil, Tendermint will use it. +This way the application can determine the initial consensus params for the +blockchain. + +#### EndBlock + +ResponseEndBlock includes a ConsensusParams. +If ConsensusParams nil, Tendermint will do nothing. +If ConsensusParam is not nil, Tendermint will use it. +This way the application can update the consensus params over time. + +Note the updates returned in block `H` will take effect right away for block +`H+1`. + +## Query + +Query is a generic method with lots of flexibility to enable diverse sets +of queries on application state. Tendermint makes use of Query to filter new peers +based on ID and IP, and exposes Query to the user over RPC. + +Note that calls to Query are not replicated across nodes, but rather query the +local node's state - hence they may return stale reads. For reads that require +consensus, use a transaction. + +The most important use of Query is to return Merkle proofs of the application state at some height +that can be used for efficient application-specific light-clients. + +Note Tendermint has technically no requirements from the Query +message for normal operation - that is, the ABCI app developer need not implement +Query functionality if they do not wish too. + +### Query Proofs + +The Tendermint block header includes a number of hashes, each providing an +anchor for some type of proof about the blockchain. The `ValidatorsHash` enables +quick verification of the validator set, the `DataHash` gives quick +verification of the transactions included in the block, etc. + +The `AppHash` is unique in that it is application specific, and allows for +application-specific Merkle proofs about the state of the application. +While some applications keep all relevant state in the transactions themselves +(like Bitcoin and its UTXOs), others maintain a separated state that is +computed deterministically *from* transactions, but is not contained directly in +the transactions themselves (like Ethereum contracts and accounts). +For such applications, the `AppHash` provides a much more efficient way to verify light-client proofs. + +ABCI applications can take advantage of more efficient light-client proofs for +their state as follows: + +- return the Merkle root of the deterministic application state in +`ResponseCommit.Data`. This Merkle root will be included as the `AppHash` in the next block. +- return efficient Merkle proofs about that application state in `ResponseQuery.Proof` + that can be verified using the `AppHash` of the corresponding block. + +For instance, this allows an application's light-client to verify proofs of +absence in the application state, something which is much less efficient to do using the block hash. + +Some applications (eg. Ethereum, Cosmos-SDK) have multiple "levels" of Merkle trees, +where the leaves of one tree are the root hashes of others. To support this, and +the general variability in Merkle proofs, the `ResponseQuery.Proof` has some minimal structure: + +```protobuf +message ProofOps { + repeated ProofOp ops +} + +message ProofOp { + string type = 1; + bytes key = 2; + bytes data = 3; +} +``` + +Each `ProofOp` contains a proof for a single key in a single Merkle tree, of the specified `type`. +This allows ABCI to support many different kinds of Merkle trees, encoding +formats, and proofs (eg. of presence and absence) just by varying the `type`. +The `data` contains the actual encoded proof, encoded according to the `type`. +When verifying the full proof, the root hash for one ProofOp is the value being +verified for the next ProofOp in the list. The root hash of the final ProofOp in +the list should match the `AppHash` being verified against. + +### Peer Filtering + +When Tendermint connects to a peer, it sends two queries to the ABCI application +using the following paths, with no additional data: + +- `/p2p/filter/addr/`, where `` denote the IP address and + the port of the connection +- `p2p/filter/id/`, where `` is the peer node ID (ie. the + pubkey.Address() for the peer's PubKey) + +If either of these queries return a non-zero ABCI code, Tendermint will refuse +to connect to the peer. + +### Paths + +Queries are directed at paths, and may optionally include additional data. + +The expectation is for there to be some number of high level paths +differentiating concerns, like `/p2p`, `/store`, and `/app`. Currently, +Tendermint only uses `/p2p`, for filtering peers. For more advanced use, see the +implementation of +[Query in the Cosmos-SDK](https://github.com/cosmos/cosmos-sdk/blob/v0.23.1/baseapp/baseapp.go#L333). + +## Crash Recovery + +On startup, Tendermint calls the `Info` method on the Info Connection to get the latest +committed state of the app. The app MUST return information consistent with the +last block it succesfully completed Commit for. + +If the app succesfully committed block H, then `last_block_height = H` and `last_block_app_hash = `. If the app +failed during the Commit of block H, then `last_block_height = H-1` and +`last_block_app_hash = `. + +We now distinguish three heights, and describe how Tendermint syncs itself with +the app. + +```md +storeBlockHeight = height of the last block Tendermint saw a commit for +stateBlockHeight = height of the last block for which Tendermint completed all + block processing and saved all ABCI results to disk +appBlockHeight = height of the last block for which ABCI app succesfully + completed Commit + +``` + +Note we always have `storeBlockHeight >= stateBlockHeight` and `storeBlockHeight >= appBlockHeight` +Note also Tendermint never calls Commit on an ABCI app twice for the same height. + +The procedure is as follows. + +First, some simple start conditions: + +If `appBlockHeight == 0`, then call InitChain. + +If `storeBlockHeight == 0`, we're done. + +Now, some sanity checks: + +If `storeBlockHeight < appBlockHeight`, error +If `storeBlockHeight < stateBlockHeight`, panic +If `storeBlockHeight > stateBlockHeight+1`, panic + +Now, the meat: + +If `storeBlockHeight == stateBlockHeight && appBlockHeight < storeBlockHeight`, +replay all blocks in full from `appBlockHeight` to `storeBlockHeight`. +This happens if we completed processing the block, but the app forgot its height. + +If `storeBlockHeight == stateBlockHeight && appBlockHeight == storeBlockHeight`, we're done. +This happens if we crashed at an opportune spot. + +If `storeBlockHeight == stateBlockHeight+1` +This happens if we started processing the block but didn't finish. + +If `appBlockHeight < stateBlockHeight` + replay all blocks in full from `appBlockHeight` to `storeBlockHeight-1`, + and replay the block at `storeBlockHeight` using the WAL. +This happens if the app forgot the last block it committed. + +If `appBlockHeight == stateBlockHeight`, + replay the last block (storeBlockHeight) in full. +This happens if we crashed before the app finished Commit + +If `appBlockHeight == storeBlockHeight` + update the state using the saved ABCI responses but dont run the block against the real app. +This happens if we crashed after the app finished Commit but before Tendermint saved the state. + +## State Sync + +A new node joining the network can simply join consensus at the genesis height and replay all +historical blocks until it is caught up. However, for large chains this can take a significant +amount of time, often on the order of days or weeks. + +State sync is an alternative mechanism for bootstrapping a new node, where it fetches a snapshot +of the state machine at a given height and restores it. Depending on the application, this can +be several orders of magnitude faster than replaying blocks. + +Note that state sync does not currently backfill historical blocks, so the node will have a +truncated block history - users are advised to consider the broader network implications of this in +terms of block availability and auditability. This functionality may be added in the future. + +For details on the specific ABCI calls and types, see the [methods and types section](abci.md). + +### Taking Snapshots + +Applications that want to support state syncing must take state snapshots at regular intervals. How +this is accomplished is entirely up to the application. A snapshot consists of some metadata and +a set of binary chunks in an arbitrary format: + +- `Height (uint64)`: The height at which the snapshot is taken. It must be taken after the given + height has been committed, and must not contain data from any later heights. + +- `Format (uint32)`: An arbitrary snapshot format identifier. This can be used to version snapshot + formats, e.g. to switch from Protobuf to MessagePack for serialization. The application can use + this when restoring to choose whether to accept or reject a snapshot. + +- `Chunks (uint32)`: The number of chunks in the snapshot. Each chunk contains arbitrary binary + data, and should be less than 16 MB; 10 MB is a good starting point. + +- `Hash ([]byte)`: An arbitrary hash of the snapshot. This is used to check whether a snapshot is + the same across nodes when downloading chunks. + +- `Metadata ([]byte)`: Arbitrary snapshot metadata, e.g. chunk hashes for verification or any other + necessary info. + +For a snapshot to be considered the same across nodes, all of these fields must be identical. When +sent across the network, snapshot metadata messages are limited to 4 MB. + +When a new node is running state sync and discovering snapshots, Tendermint will query an existing +application via the ABCI `ListSnapshots` method to discover available snapshots, and load binary +snapshot chunks via `LoadSnapshotChunk`. The application is free to choose how to implement this +and which formats to use, but must provide the following guarantees: + +- **Consistent:** A snapshot must be taken at a single isolated height, unaffected by + concurrent writes. This can be accomplished by using a data store that supports ACID + transactions with snapshot isolation. + +- **Asynchronous:** Taking a snapshot can be time-consuming, so it must not halt chain progress, + for example by running in a separate thread. + +- **Deterministic:** A snapshot taken at the same height in the same format must be identical + (at the byte level) across nodes, including all metadata. This ensures good availability of + chunks, and that they fit together across nodes. + +A very basic approach might be to use a datastore with MVCC transactions (such as RocksDB), +start a transaction immediately after block commit, and spawn a new thread which is passed the +transaction handle. This thread can then export all data items, serialize them using e.g. +Protobuf, hash the byte stream, split it into chunks, and store the chunks in the file system +along with some metadata - all while the blockchain is applying new blocks in parallel. + +A more advanced approach might include incremental verification of individual chunks against the +chain app hash, parallel or batched exports, compression, and so on. + +Old snapshots should be removed after some time - generally only the last two snapshots are needed +(to prevent the last one from being removed while a node is restoring it). + +### Bootstrapping a Node + +An empty node can be state synced by setting the configuration option `statesync.enabled = +true`. The node also needs the chain genesis file for basic chain info, and configuration for +light client verification of the restored snapshot: a set of Tendermint RPC servers, and a +trusted header hash and corresponding height from a trusted source, via the `statesync` +configuration section. + +Once started, the node will connect to the P2P network and begin discovering snapshots. These +will be offered to the local application via the `OfferSnapshot` ABCI method. Once a snapshot +is accepted Tendermint will fetch and apply the snapshot chunks. After all chunks have been +successfully applied, Tendermint verifies the app's `AppHash` against the chain using the light +client, then switches the node to normal consensus operation. + +#### Snapshot Discovery + +When the empty node join the P2P network, it asks all peers to report snapshots via the +`ListSnapshots` ABCI call (limited to 10 per node). After some time, the node picks the most +suitable snapshot (generally prioritized by height, format, and number of peers), and offers it +to the application via `OfferSnapshot`. The application can choose a number of responses, +including accepting or rejecting it, rejecting the offered format, rejecting the peer who sent +it, and so on. Tendermint will keep discovering and offering snapshots until one is accepted or +the application aborts. + +#### Snapshot Restoration + +Once a snapshot has been accepted via `OfferSnapshot`, Tendermint begins downloading chunks from +any peers that have the same snapshot (i.e. that have identical metadata fields). Chunks are +spooled in a temporary directory, and then given to the application in sequential order via +`ApplySnapshotChunk` until all chunks have been accepted. + +The method for restoring snapshot chunks is entirely up to the application. + +During restoration, the application can respond to `ApplySnapshotChunk` with instructions for how +to continue. This will typically be to accept the chunk and await the next one, but it can also +ask for chunks to be refetched (either the current one or any number of previous ones), P2P peers +to be banned, snapshots to be rejected or retried, and a number of other responses - see the ABCI +reference for details. + +If Tendermint fails to fetch a chunk after some time, it will reject the snapshot and try a +different one via `OfferSnapshot` - the application can choose whether it wants to support +restarting restoration, or simply abort with an error. + +#### Snapshot Verification + +Once all chunks have been accepted, Tendermint issues an `Info` ABCI call to retrieve the +`LastBlockAppHash`. This is compared with the trusted app hash from the chain, retrieved and +verified using the light client. Tendermint also checks that `LastBlockHeight` corresponds to the +height of the snapshot. + +This verification ensures that an application is valid before joining the network. However, the +snapshot restoration may take a long time to complete, so applications may want to employ additional +verification during the restore to detect failures early. This might e.g. include incremental +verification of each chunk against the app hash (using bundled Merkle proofs), checksums to +protect against data corruption by the disk or network, and so on. However, it is important to +note that the only trusted information available is the app hash, and all other snapshot metadata +can be spoofed by adversaries. + +Apps may also want to consider state sync denial-of-service vectors, where adversaries provide +invalid or harmful snapshots to prevent nodes from joining the network. The application can +counteract this by asking Tendermint to ban peers. As a last resort, node operators can use +P2P configuration options to whitelist a set of trusted peers that can provide valid snapshots. + +#### Transition to Consensus + +Once the snapshots have all been restored, Tendermint gathers additional information necessary for +bootstrapping the node (e.g. chain ID, consensus parameters, validator sets, and block headers) +from the genesis file and light client RPC servers. It also fetches and records the `AppVersion` +from the ABCI application. + +Once the state machine has been restored and Tendermint has gathered this additional +information, it transitions to block sync (if enabled) to fetch any remaining blocks up the chain +head, and then transitions to regular consensus operation. At this point the node operates like +any other node, apart from having a truncated block history at the height of the restored snapshot. diff --git a/spec/abci/client-server.md b/spec/abci/client-server.md new file mode 100644 index 0000000000..07621d7189 --- /dev/null +++ b/spec/abci/client-server.md @@ -0,0 +1,113 @@ +--- +order: 3 +title: Client and Server +--- + +# Client and Server + +This section is for those looking to implement their own ABCI Server, perhaps in +a new programming language. + +You are expected to have read [ABCI Methods and Types](./abci.md) and [ABCI +Applications](./apps.md). + +## Message Protocol + +The message protocol consists of pairs of requests and responses defined in the +[protobuf file](https://github.com/tendermint/tendermint/blob/v0.34.x/proto/tendermint/abci/types.proto). + +Some messages have no fields, while others may include byte-arrays, strings, integers, +or custom protobuf types. + +For more details on protobuf, see the [documentation](https://developers.google.com/protocol-buffers/docs/overview). + +For each request, a server should respond with the corresponding +response, where the order of requests is preserved in the order of +responses. + +## Server Implementations + +To use ABCI in your programming language of choice, there must be a ABCI +server in that language. Tendermint supports three implementations of the ABCI, written in Go: + +- In-process ([Golang](https://github.com/tendermint/tendermint/tree/v0.34.x/abci), [Rust](https://github.com/tendermint/rust-abci)) +- ABCI-socket +- GRPC + +The latter two can be tested using the `abci-cli` by setting the `--abci` flag +appropriately (ie. to `socket` or `grpc`). + +See examples, in various stages of maintenance, in +[Go](https://github.com/tendermint/tendermint/tree/v0.34.x/abci/server), +[JavaScript](https://github.com/tendermint/js-abci), +[C++](https://github.com/mdyring/cpp-tmsp), and +[Java](https://github.com/jTendermint/jabci). + +### In Process + +The simplest implementation uses function calls within Golang. +This means ABCI applications written in Golang can be compiled with Tendermint Core and run as a single binary. + +### GRPC + +If GRPC is available in your language, this is the easiest approach, +though it will have significant performance overhead. + +To get started with GRPC, copy in the [protobuf +file](https://github.com/tendermint/tendermint/blob/v0.34.x/proto/tendermint/abci/types.proto) +and compile it using the GRPC plugin for your language. For instance, +for golang, the command is `protoc --go_out=plugins=grpc:. types.proto`. +See the [grpc documentation for more details](http://www.grpc.io/docs/). +`protoc` will autogenerate all the necessary code for ABCI client and +server in your language, including whatever interface your application +must satisfy to be used by the ABCI server for handling requests. + +Note the length-prefixing used in the socket implementation (TSP) does not apply for GRPC. + +### TSP + +Tendermint Socket Protocol is an asynchronous, raw socket server which provides ordered message passing over unix or tcp. +Messages are serialized using Protobuf3 and length-prefixed with a [signed Varint](https://developers.google.com/protocol-buffers/docs/encoding?csw=1#signed-integers) + +If GRPC is not available in your language, or you require higher +performance, or otherwise enjoy programming, you may implement your own +ABCI server using the Tendermint Socket Protocol. The first step is still to auto-generate the relevant data +types and codec in your language using `protoc`. In addition to being proto3 encoded, messages coming over +the socket are length-prefixed to facilitate use as a streaming protocol. proto3 doesn't have an +official length-prefix standard, so we use our own. The first byte in +the prefix represents the length of the Big Endian encoded length. The +remaining bytes in the prefix are the Big Endian encoded length. + +For example, if the proto3 encoded ABCI message is 0xDEADBEEF (4 +bytes), the length-prefixed message is 0x0104DEADBEEF. If the proto3 +encoded ABCI message is 65535 bytes long, the length-prefixed message +would be like 0x02FFFF.... + +The benefit of using this `varint` encoding over the old version (where integers were encoded as `` is that +it is the standard way to encode integers in Protobuf. It is also generally shorter. + +As noted above, this prefixing does not apply for GRPC. + +An ABCI server must also be able to support multiple connections, as +Tendermint uses four connections. + +### Async vs Sync + +The main ABCI server (ie. non-GRPC) provides ordered asynchronous messages. +This is useful for DeliverTx and CheckTx, since it allows Tendermint to forward +transactions to the app before it's finished processing previous ones. + +Thus, DeliverTx and CheckTx messages are sent asynchronously, while all other +messages are sent synchronously. + +## Client + +There are currently two use-cases for an ABCI client. One is a testing +tool, as in the `abci-cli`, which allows ABCI requests to be sent via +command line. The other is a consensus engine, such as Tendermint Core, +which makes requests to the application every time a new transaction is +received or a block is committed. + +It is unlikely that you will need to implement a client. For details of +our client, see +[here](https://github.com/tendermint/tendermint/tree/v0.34.x/abci/client). diff --git a/spec/blockchain/blockchain.md b/spec/blockchain/blockchain.md new file mode 100644 index 0000000000..fcc080ee72 --- /dev/null +++ b/spec/blockchain/blockchain.md @@ -0,0 +1,3 @@ +# Blockchain + +Deprecated see [core/data_structures.md](../core/data_structures.md) diff --git a/spec/blockchain/encoding.md b/spec/blockchain/encoding.md new file mode 100644 index 0000000000..aa2c9ab3f3 --- /dev/null +++ b/spec/blockchain/encoding.md @@ -0,0 +1,3 @@ +# Encoding + +Deprecated see [core/data_structures.md](../core/encoding.md) diff --git a/spec/blockchain/readme.md b/spec/blockchain/readme.md new file mode 100644 index 0000000000..10ad466907 --- /dev/null +++ b/spec/blockchain/readme.md @@ -0,0 +1,14 @@ +--- +order: 1 +parent: + title: Blockchain + order: false +--- + +# Blockchain + +This section describes the core types and functionality of the Tendermint protocol implementation. + +[Core Data Structures](../core/data_structures.md) +[Encoding](../core/encoding.md) +[State](../core/state.md) diff --git a/spec/blockchain/state.md b/spec/blockchain/state.md new file mode 100644 index 0000000000..f4f1d95251 --- /dev/null +++ b/spec/blockchain/state.md @@ -0,0 +1,3 @@ +# State + +Deprecated see [core/state.md](../core/state.md) diff --git a/spec/consensus/bft-time.md b/spec/consensus/bft-time.md new file mode 100644 index 0000000000..cec3b91ab9 --- /dev/null +++ b/spec/consensus/bft-time.md @@ -0,0 +1,55 @@ +--- +order: 2 +--- +# BFT Time + +Tendermint provides a deterministic, Byzantine fault-tolerant, source of time. +Time in Tendermint is defined with the Time field of the block header. + +It satisfies the following properties: + +- Time Monotonicity: Time is monotonically increasing, i.e., given +a header H1 for height h1 and a header H2 for height `h2 = h1 + 1`, `H1.Time < H2.Time`. +- Time Validity: Given a set of Commit votes that forms the `block.LastCommit` field, a range of +valid values for the Time field of the block header is defined only by +Precommit messages (from the LastCommit field) sent by correct processes, i.e., +a faulty process cannot arbitrarily increase the Time value. + +In the context of Tendermint, time is of type int64 and denotes UNIX time in milliseconds, i.e., +corresponds to the number of milliseconds since January 1, 1970. Before defining rules that need to be enforced by the +Tendermint consensus protocol, so the properties above holds, we introduce the following definition: + +- median of a Commit is equal to the median of `Vote.Time` fields of the `Vote` messages, +where the value of `Vote.Time` is counted number of times proportional to the process voting power. As in Tendermint +the voting power is not uniform (one process one vote), a vote message is actually an aggregator of the same votes whose +number is equal to the voting power of the process that has casted the corresponding votes message. + +Let's consider the following example: + +- we have four processes p1, p2, p3 and p4, with the following voting power distribution (p1, 23), (p2, 27), (p3, 10) +and (p4, 10). The total voting power is 70 (`N = 3f+1`, where `N` is the total voting power, and `f` is the maximum voting +power of the faulty processes), so we assume that the faulty processes have at most 23 of voting power. +Furthermore, we have the following vote messages in some LastCommit field (we ignore all fields except Time field): + - (p1, 100), (p2, 98), (p3, 1000), (p4, 500). We assume that p3 and p4 are faulty processes. Let's assume that the + `block.LastCommit` message contains votes of processes p2, p3 and p4. Median is then chosen the following way: + the value 98 is counted 27 times, the value 1000 is counted 10 times and the value 500 is counted also 10 times. + So the median value will be the value 98. No matter what set of messages with at least `2f+1` voting power we + choose, the median value will always be between the values sent by correct processes. + +We ensure Time Monotonicity and Time Validity properties by the following rules: + +- let rs denotes `RoundState` (consensus internal state) of some process. Then +`rs.ProposalBlock.Header.Time == median(rs.LastCommit) && +rs.Proposal.Timestamp == rs.ProposalBlock.Header.Time`. + +- Furthermore, when creating the `vote` message, the following rules for determining `vote.Time` field should hold: + + - if `rs.LockedBlock` is defined then + `vote.Time = max(rs.LockedBlock.Timestamp + time.Millisecond, time.Now())`, where `time.Now()` + denotes local Unix time in milliseconds + + - else if `rs.Proposal` is defined then + `vote.Time = max(rs.Proposal.Timestamp + time.Millisecond,, time.Now())`, + + - otherwise, `vote.Time = time.Now())`. In this case vote is for `nil` so it is not taken into account for + the timestamp of the next block. diff --git a/spec/consensus/consensus-paper/IEEEtran.bst b/spec/consensus/consensus-paper/IEEEtran.bst new file mode 100644 index 0000000000..53fbc030aa --- /dev/null +++ b/spec/consensus/consensus-paper/IEEEtran.bst @@ -0,0 +1,2417 @@ +%% +%% IEEEtran.bst +%% BibTeX Bibliography Style file for IEEE Journals and Conferences (unsorted) +%% Version 1.12 (2007/01/11) +%% +%% Copyright (c) 2003-2007 Michael Shell +%% +%% Original starting code base and algorithms obtained from the output of +%% Patrick W. Daly's makebst package as well as from prior versions of +%% IEEE BibTeX styles: +%% +%% 1. Howard Trickey and Oren Patashnik's ieeetr.bst (1985/1988) +%% 2. Silvano Balemi and Richard H. Roy's IEEEbib.bst (1993) +%% +%% Support sites: +%% http://www.michaelshell.org/tex/ieeetran/ +%% http://www.ctan.org/tex-archive/macros/latex/contrib/IEEEtran/ +%% and/or +%% http://www.ieee.org/ +%% +%% For use with BibTeX version 0.99a or later +%% +%% This is a numerical citation style. +%% +%%************************************************************************* +%% Legal Notice: +%% This code is offered as-is without any warranty either expressed or +%% implied; without even the implied warranty of MERCHANTABILITY or +%% FITNESS FOR A PARTICULAR PURPOSE! +%% User assumes all risk. +%% In no event shall IEEE or any contributor to this code be liable for +%% any damages or losses, including, but not limited to, incidental, +%% consequential, or any other damages, resulting from the use or misuse +%% of any information contained here. +%% +%% All comments are the opinions of their respective authors and are not +%% necessarily endorsed by the IEEE. +%% +%% This work is distributed under the LaTeX Project Public License (LPPL) +%% ( http://www.latex-project.org/ ) version 1.3, and may be freely used, +%% distributed and modified. A copy of the LPPL, version 1.3, is included +%% in the base LaTeX documentation of all distributions of LaTeX released +%% 2003/12/01 or later. +%% Retain all contribution notices and credits. +%% ** Modified files should be clearly indicated as such, including ** +%% ** renaming them and changing author support contact information. ** +%% +%% File list of work: IEEEabrv.bib, IEEEfull.bib, IEEEexample.bib, +%% IEEEtran.bst, IEEEtranS.bst, IEEEtranSA.bst, +%% IEEEtranN.bst, IEEEtranSN.bst, IEEEtran_bst_HOWTO.pdf +%%************************************************************************* +% +% +% Changelog: +% +% 1.00 (2002/08/13) Initial release +% +% 1.10 (2002/09/27) +% 1. Corrected minor bug for improperly formed warning message when a +% book was not given a title. Thanks to Ming Kin Lai for reporting this. +% 2. Added support for CTLname_format_string and CTLname_latex_cmd fields +% in the BST control entry type. +% +% 1.11 (2003/04/02) +% 1. Fixed bug with URLs containing underscores when using url.sty. Thanks +% to Ming Kin Lai for reporting this. +% +% 1.12 (2007/01/11) +% 1. Fixed bug with unwanted comma before "et al." when an entry contained +% more than two author names. Thanks to Pallav Gupta for reporting this. +% 2. Fixed bug with anomalous closing quote in tech reports that have a +% type, but without a number or address. Thanks to Mehrdad Mirreza for +% reporting this. +% 3. Use braces in \providecommand in begin.bib to better support +% latex2html. TeX style length assignments OK with recent versions +% of latex2html - 1.71 (2002/2/1) or later is strongly recommended. +% Use of the language field still causes trouble with latex2html. +% Thanks to Federico Beffa for reporting this. +% 4. Added IEEEtran.bst ID and version comment string to .bbl output. +% 5. Provide a \BIBdecl hook that allows the user to execute commands +% just prior to the first entry. +% 6. Use default urlstyle (is using url.sty) of "same" rather than rm to +% better work with a wider variety of bibliography styles. +% 7. Changed month abbreviations from Sept., July and June to Sep., Jul., +% and Jun., respectively, as IEEE now does. Thanks to Moritz Borgmann +% for reporting this. +% 8. Control entry types should not be considered when calculating longest +% label width. +% 9. Added alias www for electronic/online. +% 10. Added CTLname_url_prefix control entry type. + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%% DEFAULTS FOR THE CONTROLS OF THE BST STYLE %% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% These are the defaults for the user adjustable controls. The values used +% here can be overridden by the user via IEEEtranBSTCTL entry type. + +% NOTE: The recommended LaTeX command to invoke a control entry type is: +% +%\makeatletter +%\def\bstctlcite{\@ifnextchar[{\@bstctlcite}{\@bstctlcite[@auxout]}} +%\def\@bstctlcite[#1]#2{\@bsphack +% \@for\@citeb:=#2\do{% +% \edef\@citeb{\expandafter\@firstofone\@citeb}% +% \if@filesw\immediate\write\csname #1\endcsname{\string\citation{\@citeb}}\fi}% +% \@esphack} +%\makeatother +% +% It is called at the start of the document, before the first \cite, like: +% \bstctlcite{IEEEexample:BSTcontrol} +% +% IEEEtran.cls V1.6 and later does provide this command. + + + +% #0 turns off the display of the number for articles. +% #1 enables +FUNCTION {default.is.use.number.for.article} { #1 } + + +% #0 turns off the display of the paper and type fields in @inproceedings. +% #1 enables +FUNCTION {default.is.use.paper} { #1 } + + +% #0 turns off the forced use of "et al." +% #1 enables +FUNCTION {default.is.forced.et.al} { #0 } + +% The maximum number of names that can be present beyond which an "et al." +% usage is forced. Be sure that num.names.shown.with.forced.et.al (below) +% is not greater than this value! +% Note: There are many instances of references in IEEE journals which have +% a very large number of authors as well as instances in which "et al." is +% used profusely. +FUNCTION {default.max.num.names.before.forced.et.al} { #10 } + +% The number of names that will be shown with a forced "et al.". +% Must be less than or equal to max.num.names.before.forced.et.al +FUNCTION {default.num.names.shown.with.forced.et.al} { #1 } + + +% #0 turns off the alternate interword spacing for entries with URLs. +% #1 enables +FUNCTION {default.is.use.alt.interword.spacing} { #1 } + +% If alternate interword spacing for entries with URLs is enabled, this is +% the interword spacing stretch factor that will be used. For example, the +% default "4" here means that the interword spacing in entries with URLs can +% stretch to four times normal. Does not have to be an integer. Note that +% the value specified here can be overridden by the user in their LaTeX +% code via a command such as: +% "\providecommand\BIBentryALTinterwordstretchfactor{1.5}" in addition to +% that via the IEEEtranBSTCTL entry type. +FUNCTION {default.ALTinterwordstretchfactor} { "4" } + + +% #0 turns off the "dashification" of repeated (i.e., identical to those +% of the previous entry) names. IEEE normally does this. +% #1 enables +FUNCTION {default.is.dash.repeated.names} { #1 } + + +% The default name format control string. +FUNCTION {default.name.format.string}{ "{f.~}{vv~}{ll}{, jj}" } + + +% The default LaTeX font command for the names. +FUNCTION {default.name.latex.cmd}{ "" } + + +% The default URL prefix. +FUNCTION {default.name.url.prefix}{ "[Online]. Available:" } + + +% Other controls that cannot be accessed via IEEEtranBSTCTL entry type. + +% #0 turns off the terminal startup banner/completed message so as to +% operate more quietly. +% #1 enables +FUNCTION {is.print.banners.to.terminal} { #1 } + + + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%% FILE VERSION AND BANNER %% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +FUNCTION{bst.file.version} { "1.12" } +FUNCTION{bst.file.date} { "2007/01/11" } +FUNCTION{bst.file.website} { "http://www.michaelshell.org/tex/ieeetran/bibtex/" } + +FUNCTION {banner.message} +{ is.print.banners.to.terminal + { "-- IEEEtran.bst version" " " * bst.file.version * + " (" * bst.file.date * ") " * "by Michael Shell." * + top$ + "-- " bst.file.website * + top$ + "-- See the " quote$ * "IEEEtran_bst_HOWTO.pdf" * quote$ * " manual for usage information." * + top$ + } + { skip$ } + if$ +} + +FUNCTION {completed.message} +{ is.print.banners.to.terminal + { "" + top$ + "Done." + top$ + } + { skip$ } + if$ +} + + + + +%%%%%%%%%%%%%%%%%%%%%% +%% STRING CONSTANTS %% +%%%%%%%%%%%%%%%%%%%%%% + +FUNCTION {bbl.and}{ "and" } +FUNCTION {bbl.etal}{ "et~al." } +FUNCTION {bbl.editors}{ "eds." } +FUNCTION {bbl.editor}{ "ed." } +FUNCTION {bbl.edition}{ "ed." } +FUNCTION {bbl.volume}{ "vol." } +FUNCTION {bbl.of}{ "of" } +FUNCTION {bbl.number}{ "no." } +FUNCTION {bbl.in}{ "in" } +FUNCTION {bbl.pages}{ "pp." } +FUNCTION {bbl.page}{ "p." } +FUNCTION {bbl.chapter}{ "ch." } +FUNCTION {bbl.paper}{ "paper" } +FUNCTION {bbl.part}{ "pt." } +FUNCTION {bbl.patent}{ "Patent" } +FUNCTION {bbl.patentUS}{ "U.S." } +FUNCTION {bbl.revision}{ "Rev." } +FUNCTION {bbl.series}{ "ser." } +FUNCTION {bbl.standard}{ "Std." } +FUNCTION {bbl.techrep}{ "Tech. Rep." } +FUNCTION {bbl.mthesis}{ "Master's thesis" } +FUNCTION {bbl.phdthesis}{ "Ph.D. dissertation" } +FUNCTION {bbl.st}{ "st" } +FUNCTION {bbl.nd}{ "nd" } +FUNCTION {bbl.rd}{ "rd" } +FUNCTION {bbl.th}{ "th" } + + +% This is the LaTeX spacer that is used when a larger than normal space +% is called for (such as just before the address:publisher). +FUNCTION {large.space} { "\hskip 1em plus 0.5em minus 0.4em\relax " } + +% The LaTeX code for dashes that are used to represent repeated names. +% Note: Some older IEEE journals used something like +% "\rule{0.275in}{0.5pt}\," which is fairly thick and runs right along +% the baseline. However, IEEE now uses a thinner, above baseline, +% six dash long sequence. +FUNCTION {repeated.name.dashes} { "------" } + + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%% PREDEFINED STRING MACROS %% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +MACRO {jan} {"Jan."} +MACRO {feb} {"Feb."} +MACRO {mar} {"Mar."} +MACRO {apr} {"Apr."} +MACRO {may} {"May"} +MACRO {jun} {"Jun."} +MACRO {jul} {"Jul."} +MACRO {aug} {"Aug."} +MACRO {sep} {"Sep."} +MACRO {oct} {"Oct."} +MACRO {nov} {"Nov."} +MACRO {dec} {"Dec."} + + + +%%%%%%%%%%%%%%%%%% +%% ENTRY FIELDS %% +%%%%%%%%%%%%%%%%%% + +ENTRY + { address + assignee + author + booktitle + chapter + day + dayfiled + edition + editor + howpublished + institution + intype + journal + key + language + month + monthfiled + nationality + note + number + organization + pages + paper + publisher + school + series + revision + title + type + url + volume + year + yearfiled + CTLuse_article_number + CTLuse_paper + CTLuse_forced_etal + CTLmax_names_forced_etal + CTLnames_show_etal + CTLuse_alt_spacing + CTLalt_stretch_factor + CTLdash_repeated_names + CTLname_format_string + CTLname_latex_cmd + CTLname_url_prefix + } + {} + { label } + + + + +%%%%%%%%%%%%%%%%%%%%%%% +%% INTEGER VARIABLES %% +%%%%%%%%%%%%%%%%%%%%%%% + +INTEGERS { prev.status.punct this.status.punct punct.std + punct.no punct.comma punct.period + prev.status.space this.status.space space.std + space.no space.normal space.large + prev.status.quote this.status.quote quote.std + quote.no quote.close + prev.status.nline this.status.nline nline.std + nline.no nline.newblock + status.cap cap.std + cap.no cap.yes} + +INTEGERS { longest.label.width multiresult nameptr namesleft number.label numnames } + +INTEGERS { is.use.number.for.article + is.use.paper + is.forced.et.al + max.num.names.before.forced.et.al + num.names.shown.with.forced.et.al + is.use.alt.interword.spacing + is.dash.repeated.names} + + +%%%%%%%%%%%%%%%%%%%%%% +%% STRING VARIABLES %% +%%%%%%%%%%%%%%%%%%%%%% + +STRINGS { bibinfo + longest.label + oldname + s + t + ALTinterwordstretchfactor + name.format.string + name.latex.cmd + name.url.prefix} + + + + +%%%%%%%%%%%%%%%%%%%%%%%%% +%% LOW LEVEL FUNCTIONS %% +%%%%%%%%%%%%%%%%%%%%%%%%% + +FUNCTION {initialize.controls} +{ default.is.use.number.for.article 'is.use.number.for.article := + default.is.use.paper 'is.use.paper := + default.is.forced.et.al 'is.forced.et.al := + default.max.num.names.before.forced.et.al 'max.num.names.before.forced.et.al := + default.num.names.shown.with.forced.et.al 'num.names.shown.with.forced.et.al := + default.is.use.alt.interword.spacing 'is.use.alt.interword.spacing := + default.is.dash.repeated.names 'is.dash.repeated.names := + default.ALTinterwordstretchfactor 'ALTinterwordstretchfactor := + default.name.format.string 'name.format.string := + default.name.latex.cmd 'name.latex.cmd := + default.name.url.prefix 'name.url.prefix := +} + + +% This IEEEtran.bst features a very powerful and flexible mechanism for +% controlling the capitalization, punctuation, spacing, quotation, and +% newlines of the formatted entry fields. (Note: IEEEtran.bst does not need +% or use the newline/newblock feature, but it has been implemented for +% possible future use.) The output states of IEEEtran.bst consist of +% multiple independent attributes and, as such, can be thought of as being +% vectors, rather than the simple scalar values ("before.all", +% "mid.sentence", etc.) used in most other .bst files. +% +% The more flexible and complex design used here was motivated in part by +% IEEE's rather unusual bibliography style. For example, IEEE ends the +% previous field item with a period and large space prior to the publisher +% address; the @electronic entry types use periods as inter-item punctuation +% rather than the commas used by the other entry types; and URLs are never +% followed by periods even though they are the last item in the entry. +% Although it is possible to accommodate these features with the conventional +% output state system, the seemingly endless exceptions make for convoluted, +% unreliable and difficult to maintain code. +% +% IEEEtran.bst's output state system can be easily understood via a simple +% illustration of two most recently formatted entry fields (on the stack): +% +% CURRENT_ITEM +% "PREVIOUS_ITEM +% +% which, in this example, is to eventually appear in the bibliography as: +% +% "PREVIOUS_ITEM," CURRENT_ITEM +% +% It is the job of the output routine to take the previous item off of the +% stack (while leaving the current item at the top of the stack), apply its +% trailing punctuation (including closing quote marks) and spacing, and then +% to write the result to BibTeX's output buffer: +% +% "PREVIOUS_ITEM," +% +% Punctuation (and spacing) between items is often determined by both of the +% items rather than just the first one. The presence of quotation marks +% further complicates the situation because, in standard English, trailing +% punctuation marks are supposed to be contained within the quotes. +% +% IEEEtran.bst maintains two output state (aka "status") vectors which +% correspond to the previous and current (aka "this") items. Each vector +% consists of several independent attributes which track punctuation, +% spacing, quotation, and newlines. Capitalization status is handled by a +% separate scalar because the format routines, not the output routine, +% handle capitalization and, therefore, there is no need to maintain the +% capitalization attribute for both the "previous" and "this" items. +% +% When a format routine adds a new item, it copies the current output status +% vector to the previous output status vector and (usually) resets the +% current (this) output status vector to a "standard status" vector. Using a +% "standard status" vector in this way allows us to redefine what we mean by +% "standard status" at the start of each entry handler and reuse the same +% format routines under the various inter-item separation schemes. For +% example, the standard status vector for the @book entry type may use +% commas for item separators, while the @electronic type may use periods, +% yet both entry handlers exploit many of the exact same format routines. +% +% Because format routines have write access to the output status vector of +% the previous item, they can override the punctuation choices of the +% previous format routine! Therefore, it becomes trivial to implement rules +% such as "Always use a period and a large space before the publisher." By +% pushing the generation of the closing quote mark to the output routine, we +% avoid all the problems caused by having to close a quote before having all +% the information required to determine what the punctuation should be. +% +% The IEEEtran.bst output state system can easily be expanded if needed. +% For instance, it is easy to add a "space.tie" attribute value if the +% bibliography rules mandate that two items have to be joined with an +% unbreakable space. + +FUNCTION {initialize.status.constants} +{ #0 'punct.no := + #1 'punct.comma := + #2 'punct.period := + #0 'space.no := + #1 'space.normal := + #2 'space.large := + #0 'quote.no := + #1 'quote.close := + #0 'cap.no := + #1 'cap.yes := + #0 'nline.no := + #1 'nline.newblock := +} + +FUNCTION {std.status.using.comma} +{ punct.comma 'punct.std := + space.normal 'space.std := + quote.no 'quote.std := + nline.no 'nline.std := + cap.no 'cap.std := +} + +FUNCTION {std.status.using.period} +{ punct.period 'punct.std := + space.normal 'space.std := + quote.no 'quote.std := + nline.no 'nline.std := + cap.yes 'cap.std := +} + +FUNCTION {initialize.prev.this.status} +{ punct.no 'prev.status.punct := + space.no 'prev.status.space := + quote.no 'prev.status.quote := + nline.no 'prev.status.nline := + punct.no 'this.status.punct := + space.no 'this.status.space := + quote.no 'this.status.quote := + nline.no 'this.status.nline := + cap.yes 'status.cap := +} + +FUNCTION {this.status.std} +{ punct.std 'this.status.punct := + space.std 'this.status.space := + quote.std 'this.status.quote := + nline.std 'this.status.nline := +} + +FUNCTION {cap.status.std}{ cap.std 'status.cap := } + +FUNCTION {this.to.prev.status} +{ this.status.punct 'prev.status.punct := + this.status.space 'prev.status.space := + this.status.quote 'prev.status.quote := + this.status.nline 'prev.status.nline := +} + + +FUNCTION {not} +{ { #0 } + { #1 } + if$ +} + +FUNCTION {and} +{ { skip$ } + { pop$ #0 } + if$ +} + +FUNCTION {or} +{ { pop$ #1 } + { skip$ } + if$ +} + + +% convert the strings "yes" or "no" to #1 or #0 respectively +FUNCTION {yes.no.to.int} +{ "l" change.case$ duplicate$ + "yes" = + { pop$ #1 } + { duplicate$ "no" = + { pop$ #0 } + { "unknown boolean " quote$ * swap$ * quote$ * + " in " * cite$ * warning$ + #0 + } + if$ + } + if$ +} + + +% pushes true if the single char string on the stack is in the +% range of "0" to "9" +FUNCTION {is.num} +{ chr.to.int$ + duplicate$ "0" chr.to.int$ < not + swap$ "9" chr.to.int$ > not and +} + +% multiplies the integer on the stack by a factor of 10 +FUNCTION {bump.int.mag} +{ #0 'multiresult := + { duplicate$ #0 > } + { #1 - + multiresult #10 + + 'multiresult := + } + while$ +pop$ +multiresult +} + +% converts a single character string on the stack to an integer +FUNCTION {char.to.integer} +{ duplicate$ + is.num + { chr.to.int$ "0" chr.to.int$ - } + {"noninteger character " quote$ * swap$ * quote$ * + " in integer field of " * cite$ * warning$ + #0 + } + if$ +} + +% converts a string on the stack to an integer +FUNCTION {string.to.integer} +{ duplicate$ text.length$ 'namesleft := + #1 'nameptr := + #0 'numnames := + { nameptr namesleft > not } + { duplicate$ nameptr #1 substring$ + char.to.integer numnames bump.int.mag + + 'numnames := + nameptr #1 + + 'nameptr := + } + while$ +pop$ +numnames +} + + + + +% The output routines write out the *next* to the top (previous) item on the +% stack, adding punctuation and such as needed. Since IEEEtran.bst maintains +% the output status for the top two items on the stack, these output +% routines have to consider the previous output status (which corresponds to +% the item that is being output). Full independent control of punctuation, +% closing quote marks, spacing, and newblock is provided. +% +% "output.nonnull" does not check for the presence of a previous empty +% item. +% +% "output" does check for the presence of a previous empty item and will +% remove an empty item rather than outputing it. +% +% "output.warn" is like "output", but will issue a warning if it detects +% an empty item. + +FUNCTION {output.nonnull} +{ swap$ + prev.status.punct punct.comma = + { "," * } + { skip$ } + if$ + prev.status.punct punct.period = + { add.period$ } + { skip$ } + if$ + prev.status.quote quote.close = + { "''" * } + { skip$ } + if$ + prev.status.space space.normal = + { " " * } + { skip$ } + if$ + prev.status.space space.large = + { large.space * } + { skip$ } + if$ + write$ + prev.status.nline nline.newblock = + { newline$ "\newblock " write$ } + { skip$ } + if$ +} + +FUNCTION {output} +{ duplicate$ empty$ + 'pop$ + 'output.nonnull + if$ +} + +FUNCTION {output.warn} +{ 't := + duplicate$ empty$ + { pop$ "empty " t * " in " * cite$ * warning$ } + 'output.nonnull + if$ +} + +% "fin.entry" is the output routine that handles the last item of the entry +% (which will be on the top of the stack when "fin.entry" is called). + +FUNCTION {fin.entry} +{ this.status.punct punct.no = + { skip$ } + { add.period$ } + if$ + this.status.quote quote.close = + { "''" * } + { skip$ } + if$ +write$ +newline$ +} + + +FUNCTION {is.last.char.not.punct} +{ duplicate$ + "}" * add.period$ + #-1 #1 substring$ "." = +} + +FUNCTION {is.multiple.pages} +{ 't := + #0 'multiresult := + { multiresult not + t empty$ not + and + } + { t #1 #1 substring$ + duplicate$ "-" = + swap$ duplicate$ "," = + swap$ "+" = + or or + { #1 'multiresult := } + { t #2 global.max$ substring$ 't := } + if$ + } + while$ + multiresult +} + +FUNCTION {capitalize}{ "u" change.case$ "t" change.case$ } + +FUNCTION {emphasize} +{ duplicate$ empty$ + { pop$ "" } + { "\emph{" swap$ * "}" * } + if$ +} + +FUNCTION {do.name.latex.cmd} +{ name.latex.cmd + empty$ + { skip$ } + { name.latex.cmd "{" * swap$ * "}" * } + if$ +} + +% IEEEtran.bst uses its own \BIBforeignlanguage command which directly +% invokes the TeX hyphenation patterns without the need of the Babel +% package. Babel does a lot more than switch hyphenation patterns and +% its loading can cause unintended effects in many class files (such as +% IEEEtran.cls). +FUNCTION {select.language} +{ duplicate$ empty$ 'pop$ + { language empty$ 'skip$ + { "\BIBforeignlanguage{" language * "}{" * swap$ * "}" * } + if$ + } + if$ +} + +FUNCTION {tie.or.space.prefix} +{ duplicate$ text.length$ #3 < + { "~" } + { " " } + if$ + swap$ +} + +FUNCTION {get.bbl.editor} +{ editor num.names$ #1 > 'bbl.editors 'bbl.editor if$ } + +FUNCTION {space.word}{ " " swap$ * " " * } + + +% Field Conditioners, Converters, Checkers and External Interfaces + +FUNCTION {empty.field.to.null.string} +{ duplicate$ empty$ + { pop$ "" } + { skip$ } + if$ +} + +FUNCTION {either.or.check} +{ empty$ + { pop$ } + { "can't use both " swap$ * " fields in " * cite$ * warning$ } + if$ +} + +FUNCTION {empty.entry.warn} +{ author empty$ title empty$ howpublished empty$ + month empty$ year empty$ note empty$ url empty$ + and and and and and and + { "all relevant fields are empty in " cite$ * warning$ } + 'skip$ + if$ +} + + +% The bibinfo system provides a way for the electronic parsing/acquisition +% of a bibliography's contents as is done by ReVTeX. For example, a field +% could be entered into the bibliography as: +% \bibinfo{volume}{2} +% Only the "2" would show up in the document, but the LaTeX \bibinfo command +% could do additional things with the information. IEEEtran.bst does provide +% a \bibinfo command via "\providecommand{\bibinfo}[2]{#2}". However, it is +% currently not used as the bogus bibinfo functions defined here output the +% entry values directly without the \bibinfo wrapper. The bibinfo functions +% themselves (and the calls to them) are retained for possible future use. +% +% bibinfo.check avoids acting on missing fields while bibinfo.warn will +% issue a warning message if a missing field is detected. Prior to calling +% the bibinfo functions, the user should push the field value and then its +% name string, in that order. + +FUNCTION {bibinfo.check} +{ swap$ duplicate$ missing$ + { pop$ pop$ "" } + { duplicate$ empty$ + { swap$ pop$ } + { swap$ pop$ } + if$ + } + if$ +} + +FUNCTION {bibinfo.warn} +{ swap$ duplicate$ missing$ + { swap$ "missing " swap$ * " in " * cite$ * warning$ pop$ "" } + { duplicate$ empty$ + { swap$ "empty " swap$ * " in " * cite$ * warning$ } + { swap$ pop$ } + if$ + } + if$ +} + + +% IEEE separates large numbers with more than 4 digits into groups of +% three. IEEE uses a small space to separate these number groups. +% Typical applications include patent and page numbers. + +% number of consecutive digits required to trigger the group separation. +FUNCTION {large.number.trigger}{ #5 } + +% For numbers longer than the trigger, this is the blocksize of the groups. +% The blocksize must be less than the trigger threshold, and 2 * blocksize +% must be greater than the trigger threshold (can't do more than one +% separation on the initial trigger). +FUNCTION {large.number.blocksize}{ #3 } + +% What is actually inserted between the number groups. +FUNCTION {large.number.separator}{ "\," } + +% So as to save on integer variables by reusing existing ones, numnames +% holds the current number of consecutive digits read and nameptr holds +% the number that will trigger an inserted space. +FUNCTION {large.number.separate} +{ 't := + "" + #0 'numnames := + large.number.trigger 'nameptr := + { t empty$ not } + { t #-1 #1 substring$ is.num + { numnames #1 + 'numnames := } + { #0 'numnames := + large.number.trigger 'nameptr := + } + if$ + t #-1 #1 substring$ swap$ * + t #-2 global.max$ substring$ 't := + numnames nameptr = + { duplicate$ #1 nameptr large.number.blocksize - substring$ swap$ + nameptr large.number.blocksize - #1 + global.max$ substring$ + large.number.separator swap$ * * + nameptr large.number.blocksize - 'numnames := + large.number.blocksize #1 + 'nameptr := + } + { skip$ } + if$ + } + while$ +} + +% Converts all single dashes "-" to double dashes "--". +FUNCTION {n.dashify} +{ large.number.separate + 't := + "" + { t empty$ not } + { t #1 #1 substring$ "-" = + { t #1 #2 substring$ "--" = not + { "--" * + t #2 global.max$ substring$ 't := + } + { { t #1 #1 substring$ "-" = } + { "-" * + t #2 global.max$ substring$ 't := + } + while$ + } + if$ + } + { t #1 #1 substring$ * + t #2 global.max$ substring$ 't := + } + if$ + } + while$ +} + + +% This function detects entries with names that are identical to that of +% the previous entry and replaces the repeated names with dashes (if the +% "is.dash.repeated.names" user control is nonzero). +FUNCTION {name.or.dash} +{ 's := + oldname empty$ + { s 'oldname := s } + { s oldname = + { is.dash.repeated.names + { repeated.name.dashes } + { s 'oldname := s } + if$ + } + { s 'oldname := s } + if$ + } + if$ +} + +% Converts the number string on the top of the stack to +% "numerical ordinal form" (e.g., "7" to "7th"). There is +% no artificial limit to the upper bound of the numbers as the +% least significant digit always determines the ordinal form. +FUNCTION {num.to.ordinal} +{ duplicate$ #-1 #1 substring$ "1" = + { bbl.st * } + { duplicate$ #-1 #1 substring$ "2" = + { bbl.nd * } + { duplicate$ #-1 #1 substring$ "3" = + { bbl.rd * } + { bbl.th * } + if$ + } + if$ + } + if$ +} + +% If the string on the top of the stack begins with a number, +% (e.g., 11th) then replace the string with the leading number +% it contains. Otherwise retain the string as-is. s holds the +% extracted number, t holds the part of the string that remains +% to be scanned. +FUNCTION {extract.num} +{ duplicate$ 't := + "" 's := + { t empty$ not } + { t #1 #1 substring$ + t #2 global.max$ substring$ 't := + duplicate$ is.num + { s swap$ * 's := } + { pop$ "" 't := } + if$ + } + while$ + s empty$ + 'skip$ + { pop$ s } + if$ +} + +% Converts the word number string on the top of the stack to +% Arabic string form. Will be successful up to "tenth". +FUNCTION {word.to.num} +{ duplicate$ "l" change.case$ 's := + s "first" = + { pop$ "1" } + { skip$ } + if$ + s "second" = + { pop$ "2" } + { skip$ } + if$ + s "third" = + { pop$ "3" } + { skip$ } + if$ + s "fourth" = + { pop$ "4" } + { skip$ } + if$ + s "fifth" = + { pop$ "5" } + { skip$ } + if$ + s "sixth" = + { pop$ "6" } + { skip$ } + if$ + s "seventh" = + { pop$ "7" } + { skip$ } + if$ + s "eighth" = + { pop$ "8" } + { skip$ } + if$ + s "ninth" = + { pop$ "9" } + { skip$ } + if$ + s "tenth" = + { pop$ "10" } + { skip$ } + if$ +} + + +% Converts the string on the top of the stack to numerical +% ordinal (e.g., "11th") form. +FUNCTION {convert.edition} +{ duplicate$ empty$ 'skip$ + { duplicate$ #1 #1 substring$ is.num + { extract.num + num.to.ordinal + } + { word.to.num + duplicate$ #1 #1 substring$ is.num + { num.to.ordinal } + { "edition ordinal word " quote$ * edition * quote$ * + " may be too high (or improper) for conversion" * " in " * cite$ * warning$ + } + if$ + } + if$ + } + if$ +} + + + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%% LATEX BIBLIOGRAPHY CODE %% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +FUNCTION {start.entry} +{ newline$ + "\bibitem{" write$ + cite$ write$ + "}" write$ + newline$ + "" + initialize.prev.this.status +} + +% Here we write out all the LaTeX code that we will need. The most involved +% code sequences are those that control the alternate interword spacing and +% foreign language hyphenation patterns. The heavy use of \providecommand +% gives users a way to override the defaults. Special thanks to Javier Bezos, +% Johannes Braams, Robin Fairbairns, Heiko Oberdiek, Donald Arseneau and all +% the other gurus on comp.text.tex for their help and advice on the topic of +% \selectlanguage, Babel and BibTeX. +FUNCTION {begin.bib} +{ "% Generated by IEEEtran.bst, version: " bst.file.version * " (" * bst.file.date * ")" * + write$ newline$ + preamble$ empty$ 'skip$ + { preamble$ write$ newline$ } + if$ + "\begin{thebibliography}{" longest.label * "}" * + write$ newline$ + "\providecommand{\url}[1]{#1}" + write$ newline$ + "\csname url@samestyle\endcsname" + write$ newline$ + "\providecommand{\newblock}{\relax}" + write$ newline$ + "\providecommand{\bibinfo}[2]{#2}" + write$ newline$ + "\providecommand{\BIBentrySTDinterwordspacing}{\spaceskip=0pt\relax}" + write$ newline$ + "\providecommand{\BIBentryALTinterwordstretchfactor}{" + ALTinterwordstretchfactor * "}" * + write$ newline$ + "\providecommand{\BIBentryALTinterwordspacing}{\spaceskip=\fontdimen2\font plus " + write$ newline$ + "\BIBentryALTinterwordstretchfactor\fontdimen3\font minus \fontdimen4\font\relax}" + write$ newline$ + "\providecommand{\BIBforeignlanguage}[2]{{%" + write$ newline$ + "\expandafter\ifx\csname l@#1\endcsname\relax" + write$ newline$ + "\typeout{** WARNING: IEEEtran.bst: No hyphenation pattern has been}%" + write$ newline$ + "\typeout{** loaded for the language `#1'. Using the pattern for}%" + write$ newline$ + "\typeout{** the default language instead.}%" + write$ newline$ + "\else" + write$ newline$ + "\language=\csname l@#1\endcsname" + write$ newline$ + "\fi" + write$ newline$ + "#2}}" + write$ newline$ + "\providecommand{\BIBdecl}{\relax}" + write$ newline$ + "\BIBdecl" + write$ newline$ +} + +FUNCTION {end.bib} +{ newline$ "\end{thebibliography}" write$ newline$ } + +FUNCTION {if.url.alt.interword.spacing} +{ is.use.alt.interword.spacing + {url empty$ 'skip$ {"\BIBentryALTinterwordspacing" write$ newline$} if$} + { skip$ } + if$ +} + +FUNCTION {if.url.std.interword.spacing} +{ is.use.alt.interword.spacing + {url empty$ 'skip$ {"\BIBentrySTDinterwordspacing" write$ newline$} if$} + { skip$ } + if$ +} + + + + +%%%%%%%%%%%%%%%%%%%%%%%% +%% LONGEST LABEL PASS %% +%%%%%%%%%%%%%%%%%%%%%%%% + +FUNCTION {initialize.longest.label} +{ "" 'longest.label := + #1 'number.label := + #0 'longest.label.width := +} + +FUNCTION {longest.label.pass} +{ type$ "ieeetranbstctl" = + { skip$ } + { number.label int.to.str$ 'label := + number.label #1 + 'number.label := + label width$ longest.label.width > + { label 'longest.label := + label width$ 'longest.label.width := + } + { skip$ } + if$ + } + if$ +} + + + + +%%%%%%%%%%%%%%%%%%%%% +%% FORMAT HANDLERS %% +%%%%%%%%%%%%%%%%%%%%% + +%% Lower Level Formats (used by higher level formats) + +FUNCTION {format.address.org.or.pub.date} +{ 't := + "" + year empty$ + { "empty year in " cite$ * warning$ } + { skip$ } + if$ + address empty$ t empty$ and + year empty$ and month empty$ and + { skip$ } + { this.to.prev.status + this.status.std + cap.status.std + address "address" bibinfo.check * + t empty$ + { skip$ } + { punct.period 'prev.status.punct := + space.large 'prev.status.space := + address empty$ + { skip$ } + { ": " * } + if$ + t * + } + if$ + year empty$ month empty$ and + { skip$ } + { t empty$ address empty$ and + { skip$ } + { ", " * } + if$ + month empty$ + { year empty$ + { skip$ } + { year "year" bibinfo.check * } + if$ + } + { month "month" bibinfo.check * + year empty$ + { skip$ } + { " " * year "year" bibinfo.check * } + if$ + } + if$ + } + if$ + } + if$ +} + + +FUNCTION {format.names} +{ 'bibinfo := + duplicate$ empty$ 'skip$ { + this.to.prev.status + this.status.std + 's := + "" 't := + #1 'nameptr := + s num.names$ 'numnames := + numnames 'namesleft := + { namesleft #0 > } + { s nameptr + name.format.string + format.name$ + bibinfo bibinfo.check + 't := + nameptr #1 > + { nameptr num.names.shown.with.forced.et.al #1 + = + numnames max.num.names.before.forced.et.al > + is.forced.et.al and and + { "others" 't := + #1 'namesleft := + } + { skip$ } + if$ + namesleft #1 > + { ", " * t do.name.latex.cmd * } + { s nameptr "{ll}" format.name$ duplicate$ "others" = + { 't := } + { pop$ } + if$ + t "others" = + { " " * bbl.etal emphasize * } + { numnames #2 > + { "," * } + { skip$ } + if$ + bbl.and + space.word * t do.name.latex.cmd * + } + if$ + } + if$ + } + { t do.name.latex.cmd } + if$ + nameptr #1 + 'nameptr := + namesleft #1 - 'namesleft := + } + while$ + cap.status.std + } if$ +} + + + + +%% Higher Level Formats + +%% addresses/locations + +FUNCTION {format.address} +{ address duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + cap.status.std + } + if$ +} + + + +%% author/editor names + +FUNCTION {format.authors}{ author "author" format.names } + +FUNCTION {format.editors} +{ editor "editor" format.names duplicate$ empty$ 'skip$ + { ", " * + get.bbl.editor + capitalize + * + } + if$ +} + + + +%% date + +FUNCTION {format.date} +{ + month "month" bibinfo.check duplicate$ empty$ + year "year" bibinfo.check duplicate$ empty$ + { swap$ 'skip$ + { this.to.prev.status + this.status.std + cap.status.std + "there's a month but no year in " cite$ * warning$ } + if$ + * + } + { this.to.prev.status + this.status.std + cap.status.std + swap$ 'skip$ + { + swap$ + " " * swap$ + } + if$ + * + } + if$ +} + +FUNCTION {format.date.electronic} +{ month "month" bibinfo.check duplicate$ empty$ + year "year" bibinfo.check duplicate$ empty$ + { swap$ + { pop$ } + { "there's a month but no year in " cite$ * warning$ + pop$ ")" * "(" swap$ * + this.to.prev.status + punct.no 'this.status.punct := + space.normal 'this.status.space := + quote.no 'this.status.quote := + cap.yes 'status.cap := + } + if$ + } + { swap$ + { swap$ pop$ ")" * "(" swap$ * } + { "(" swap$ * ", " * swap$ * ")" * } + if$ + this.to.prev.status + punct.no 'this.status.punct := + space.normal 'this.status.space := + quote.no 'this.status.quote := + cap.yes 'status.cap := + } + if$ +} + + + +%% edition/title + +% Note: IEEE considers the edition to be closely associated with +% the title of a book. So, in IEEEtran.bst the edition is normally handled +% within the formatting of the title. The format.edition function is +% retained here for possible future use. +FUNCTION {format.edition} +{ edition duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + convert.edition + status.cap + { "t" } + { "l" } + if$ change.case$ + "edition" bibinfo.check + "~" * bbl.edition * + cap.status.std + } + if$ +} + +% This is used to format the booktitle of a conference proceedings. +% Here we use the "intype" field to provide the user a way to +% override the word "in" (e.g., with things like "presented at") +% Use of intype stops the emphasis of the booktitle to indicate that +% we no longer mean the written conference proceedings, but the +% conference itself. +FUNCTION {format.in.booktitle} +{ booktitle "booktitle" bibinfo.check duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + select.language + intype missing$ + { emphasize + bbl.in " " * + } + { intype " " * } + if$ + swap$ * + cap.status.std + } + if$ +} + +% This is used to format the booktitle of collection. +% Here the "intype" field is not supported, but "edition" is. +FUNCTION {format.in.booktitle.edition} +{ booktitle "booktitle" bibinfo.check duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + select.language + emphasize + edition empty$ 'skip$ + { ", " * + edition + convert.edition + "l" change.case$ + * "~" * bbl.edition * + } + if$ + bbl.in " " * swap$ * + cap.status.std + } + if$ +} + +FUNCTION {format.article.title} +{ title duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + "t" change.case$ + } + if$ + "title" bibinfo.check + duplicate$ empty$ 'skip$ + { quote.close 'this.status.quote := + is.last.char.not.punct + { punct.std 'this.status.punct := } + { punct.no 'this.status.punct := } + if$ + select.language + "``" swap$ * + cap.status.std + } + if$ +} + +FUNCTION {format.article.title.electronic} +{ title duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + cap.status.std + "t" change.case$ + } + if$ + "title" bibinfo.check + duplicate$ empty$ + { skip$ } + { select.language } + if$ +} + +FUNCTION {format.book.title.edition} +{ title "title" bibinfo.check + duplicate$ empty$ + { "empty title in " cite$ * warning$ } + { this.to.prev.status + this.status.std + select.language + emphasize + edition empty$ 'skip$ + { ", " * + edition + convert.edition + status.cap + { "t" } + { "l" } + if$ + change.case$ + * "~" * bbl.edition * + } + if$ + cap.status.std + } + if$ +} + +FUNCTION {format.book.title} +{ title "title" bibinfo.check + duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + cap.status.std + select.language + emphasize + } + if$ +} + + + +%% journal + +FUNCTION {format.journal} +{ journal duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + cap.status.std + select.language + emphasize + } + if$ +} + + + +%% how published + +FUNCTION {format.howpublished} +{ howpublished duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + cap.status.std + } + if$ +} + + + +%% institutions/organization/publishers/school + +FUNCTION {format.institution} +{ institution duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + cap.status.std + } + if$ +} + +FUNCTION {format.organization} +{ organization duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + cap.status.std + } + if$ +} + +FUNCTION {format.address.publisher.date} +{ publisher "publisher" bibinfo.warn format.address.org.or.pub.date } + +FUNCTION {format.address.publisher.date.nowarn} +{ publisher "publisher" bibinfo.check format.address.org.or.pub.date } + +FUNCTION {format.address.organization.date} +{ organization "organization" bibinfo.check format.address.org.or.pub.date } + +FUNCTION {format.school} +{ school duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + cap.status.std + } + if$ +} + + + +%% volume/number/series/chapter/pages + +FUNCTION {format.volume} +{ volume empty.field.to.null.string + duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + bbl.volume + status.cap + { capitalize } + { skip$ } + if$ + swap$ tie.or.space.prefix + "volume" bibinfo.check + * * + cap.status.std + } + if$ +} + +FUNCTION {format.number} +{ number empty.field.to.null.string + duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + status.cap + { bbl.number capitalize } + { bbl.number } + if$ + swap$ tie.or.space.prefix + "number" bibinfo.check + * * + cap.status.std + } + if$ +} + +FUNCTION {format.number.if.use.for.article} +{ is.use.number.for.article + { format.number } + { "" } + if$ +} + +% IEEE does not seem to tie the series so closely with the volume +% and number as is done in other bibliography styles. Instead the +% series is treated somewhat like an extension of the title. +FUNCTION {format.series} +{ series empty$ + { "" } + { this.to.prev.status + this.status.std + bbl.series " " * + series "series" bibinfo.check * + cap.status.std + } + if$ +} + + +FUNCTION {format.chapter} +{ chapter empty$ + { "" } + { this.to.prev.status + this.status.std + type empty$ + { bbl.chapter } + { type "l" change.case$ + "type" bibinfo.check + } + if$ + chapter tie.or.space.prefix + "chapter" bibinfo.check + * * + cap.status.std + } + if$ +} + + +% The intended use of format.paper is for paper numbers of inproceedings. +% The paper type can be overridden via the type field. +% We allow the type to be displayed even if the paper number is absent +% for things like "postdeadline paper" +FUNCTION {format.paper} +{ is.use.paper + { paper empty$ + { type empty$ + { "" } + { this.to.prev.status + this.status.std + type "type" bibinfo.check + cap.status.std + } + if$ + } + { this.to.prev.status + this.status.std + type empty$ + { bbl.paper } + { type "type" bibinfo.check } + if$ + " " * paper + "paper" bibinfo.check + * + cap.status.std + } + if$ + } + { "" } + if$ +} + + +FUNCTION {format.pages} +{ pages duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + duplicate$ is.multiple.pages + { + bbl.pages swap$ + n.dashify + } + { + bbl.page swap$ + } + if$ + tie.or.space.prefix + "pages" bibinfo.check + * * + cap.status.std + } + if$ +} + + + +%% technical report number + +FUNCTION {format.tech.report.number} +{ number "number" bibinfo.check + this.to.prev.status + this.status.std + cap.status.std + type duplicate$ empty$ + { pop$ + bbl.techrep + } + { skip$ } + if$ + "type" bibinfo.check + swap$ duplicate$ empty$ + { pop$ } + { tie.or.space.prefix * * } + if$ +} + + + +%% note + +FUNCTION {format.note} +{ note empty$ + { "" } + { this.to.prev.status + this.status.std + punct.period 'this.status.punct := + note #1 #1 substring$ + duplicate$ "{" = + { skip$ } + { status.cap + { "u" } + { "l" } + if$ + change.case$ + } + if$ + note #2 global.max$ substring$ * "note" bibinfo.check + cap.yes 'status.cap := + } + if$ +} + + + +%% patent + +FUNCTION {format.patent.date} +{ this.to.prev.status + this.status.std + year empty$ + { monthfiled duplicate$ empty$ + { "monthfiled" bibinfo.check pop$ "" } + { "monthfiled" bibinfo.check } + if$ + dayfiled duplicate$ empty$ + { "dayfiled" bibinfo.check pop$ "" * } + { "dayfiled" bibinfo.check + monthfiled empty$ + { "dayfiled without a monthfiled in " cite$ * warning$ + * + } + { " " swap$ * * } + if$ + } + if$ + yearfiled empty$ + { "no year or yearfiled in " cite$ * warning$ } + { yearfiled "yearfiled" bibinfo.check + swap$ + duplicate$ empty$ + { pop$ } + { ", " * swap$ * } + if$ + } + if$ + } + { month duplicate$ empty$ + { "month" bibinfo.check pop$ "" } + { "month" bibinfo.check } + if$ + day duplicate$ empty$ + { "day" bibinfo.check pop$ "" * } + { "day" bibinfo.check + month empty$ + { "day without a month in " cite$ * warning$ + * + } + { " " swap$ * * } + if$ + } + if$ + year "year" bibinfo.check + swap$ + duplicate$ empty$ + { pop$ } + { ", " * swap$ * } + if$ + } + if$ + cap.status.std +} + +FUNCTION {format.patent.nationality.type.number} +{ this.to.prev.status + this.status.std + nationality duplicate$ empty$ + { "nationality" bibinfo.warn pop$ "" } + { "nationality" bibinfo.check + duplicate$ "l" change.case$ "united states" = + { pop$ bbl.patentUS } + { skip$ } + if$ + " " * + } + if$ + type empty$ + { bbl.patent "type" bibinfo.check } + { type "type" bibinfo.check } + if$ + * + number duplicate$ empty$ + { "number" bibinfo.warn pop$ } + { "number" bibinfo.check + large.number.separate + swap$ " " * swap$ * + } + if$ + cap.status.std +} + + + +%% standard + +FUNCTION {format.organization.institution.standard.type.number} +{ this.to.prev.status + this.status.std + organization duplicate$ empty$ + { pop$ + institution duplicate$ empty$ + { "institution" bibinfo.warn } + { "institution" bibinfo.warn " " * } + if$ + } + { "organization" bibinfo.warn " " * } + if$ + type empty$ + { bbl.standard "type" bibinfo.check } + { type "type" bibinfo.check } + if$ + * + number duplicate$ empty$ + { "number" bibinfo.check pop$ } + { "number" bibinfo.check + large.number.separate + swap$ " " * swap$ * + } + if$ + cap.status.std +} + +FUNCTION {format.revision} +{ revision empty$ + { "" } + { this.to.prev.status + this.status.std + bbl.revision + revision tie.or.space.prefix + "revision" bibinfo.check + * * + cap.status.std + } + if$ +} + + +%% thesis + +FUNCTION {format.master.thesis.type} +{ this.to.prev.status + this.status.std + type empty$ + { + bbl.mthesis + } + { + type "type" bibinfo.check + } + if$ +cap.status.std +} + +FUNCTION {format.phd.thesis.type} +{ this.to.prev.status + this.status.std + type empty$ + { + bbl.phdthesis + } + { + type "type" bibinfo.check + } + if$ +cap.status.std +} + + + +%% URL + +FUNCTION {format.url} +{ url empty$ + { "" } + { this.to.prev.status + this.status.std + cap.yes 'status.cap := + name.url.prefix " " * + "\url{" * url * "}" * + punct.no 'this.status.punct := + punct.period 'prev.status.punct := + space.normal 'this.status.space := + space.normal 'prev.status.space := + quote.no 'this.status.quote := + } + if$ +} + + + + +%%%%%%%%%%%%%%%%%%%% +%% ENTRY HANDLERS %% +%%%%%%%%%%%%%%%%%%%% + + +% Note: In many journals, IEEE (or the authors) tend not to show the number +% for articles, so the display of the number is controlled here by the +% switch "is.use.number.for.article" +FUNCTION {article} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.authors "author" output.warn + name.or.dash + format.article.title "title" output.warn + format.journal "journal" bibinfo.check "journal" output.warn + format.volume output + format.number.if.use.for.article output + format.pages output + format.date "year" output.warn + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {book} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + author empty$ + { format.editors "author and editor" output.warn } + { format.authors output.nonnull } + if$ + name.or.dash + format.book.title.edition output + format.series output + author empty$ + { skip$ } + { format.editors output } + if$ + format.address.publisher.date output + format.volume output + format.number output + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {booklet} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.authors output + name.or.dash + format.article.title "title" output.warn + format.howpublished "howpublished" bibinfo.check output + format.organization "organization" bibinfo.check output + format.address "address" bibinfo.check output + format.date output + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {electronic} +{ std.status.using.period + start.entry + if.url.alt.interword.spacing + format.authors output + name.or.dash + format.date.electronic output + format.article.title.electronic output + format.howpublished "howpublished" bibinfo.check output + format.organization "organization" bibinfo.check output + format.address "address" bibinfo.check output + format.note output + format.url output + fin.entry + empty.entry.warn + if.url.std.interword.spacing +} + +FUNCTION {inbook} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + author empty$ + { format.editors "author and editor" output.warn } + { format.authors output.nonnull } + if$ + name.or.dash + format.book.title.edition output + format.series output + format.address.publisher.date output + format.volume output + format.number output + format.chapter output + format.pages output + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {incollection} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.authors "author" output.warn + name.or.dash + format.article.title "title" output.warn + format.in.booktitle.edition "booktitle" output.warn + format.series output + format.editors output + format.address.publisher.date.nowarn output + format.volume output + format.number output + format.chapter output + format.pages output + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {inproceedings} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.authors "author" output.warn + name.or.dash + format.article.title "title" output.warn + format.in.booktitle "booktitle" output.warn + format.series output + format.editors output + format.volume output + format.number output + publisher empty$ + { format.address.organization.date output } + { format.organization "organization" bibinfo.check output + format.address.publisher.date output + } + if$ + format.paper output + format.pages output + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {manual} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.authors output + name.or.dash + format.book.title.edition "title" output.warn + format.howpublished "howpublished" bibinfo.check output + format.organization "organization" bibinfo.check output + format.address "address" bibinfo.check output + format.date output + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {mastersthesis} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.authors "author" output.warn + name.or.dash + format.article.title "title" output.warn + format.master.thesis.type output.nonnull + format.school "school" bibinfo.warn output + format.address "address" bibinfo.check output + format.date "year" output.warn + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {misc} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.authors output + name.or.dash + format.article.title output + format.howpublished "howpublished" bibinfo.check output + format.organization "organization" bibinfo.check output + format.address "address" bibinfo.check output + format.pages output + format.date output + format.note output + format.url output + fin.entry + empty.entry.warn + if.url.std.interword.spacing +} + +FUNCTION {patent} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.authors output + name.or.dash + format.article.title output + format.patent.nationality.type.number output + format.patent.date output + format.note output + format.url output + fin.entry + empty.entry.warn + if.url.std.interword.spacing +} + +FUNCTION {periodical} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.editors output + name.or.dash + format.book.title "title" output.warn + format.series output + format.volume output + format.number output + format.organization "organization" bibinfo.check output + format.date "year" output.warn + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {phdthesis} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.authors "author" output.warn + name.or.dash + format.article.title "title" output.warn + format.phd.thesis.type output.nonnull + format.school "school" bibinfo.warn output + format.address "address" bibinfo.check output + format.date "year" output.warn + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {proceedings} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.editors output + name.or.dash + format.book.title "title" output.warn + format.series output + format.volume output + format.number output + publisher empty$ + { format.address.organization.date output } + { format.organization "organization" bibinfo.check output + format.address.publisher.date output + } + if$ + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {standard} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.authors output + name.or.dash + format.book.title "title" output.warn + format.howpublished "howpublished" bibinfo.check output + format.organization.institution.standard.type.number output + format.revision output + format.date output + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {techreport} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.authors "author" output.warn + name.or.dash + format.article.title "title" output.warn + format.howpublished "howpublished" bibinfo.check output + format.institution "institution" bibinfo.warn output + format.address "address" bibinfo.check output + format.tech.report.number output.nonnull + format.date "year" output.warn + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {unpublished} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.authors "author" output.warn + name.or.dash + format.article.title "title" output.warn + format.date output + format.note "note" output.warn + format.url output + fin.entry + if.url.std.interword.spacing +} + + +% The special entry type which provides the user interface to the +% BST controls +FUNCTION {IEEEtranBSTCTL} +{ is.print.banners.to.terminal + { "** IEEEtran BST control entry " quote$ * cite$ * quote$ * " detected." * + top$ + } + { skip$ } + if$ + CTLuse_article_number + empty$ + { skip$ } + { CTLuse_article_number + yes.no.to.int + 'is.use.number.for.article := + } + if$ + CTLuse_paper + empty$ + { skip$ } + { CTLuse_paper + yes.no.to.int + 'is.use.paper := + } + if$ + CTLuse_forced_etal + empty$ + { skip$ } + { CTLuse_forced_etal + yes.no.to.int + 'is.forced.et.al := + } + if$ + CTLmax_names_forced_etal + empty$ + { skip$ } + { CTLmax_names_forced_etal + string.to.integer + 'max.num.names.before.forced.et.al := + } + if$ + CTLnames_show_etal + empty$ + { skip$ } + { CTLnames_show_etal + string.to.integer + 'num.names.shown.with.forced.et.al := + } + if$ + CTLuse_alt_spacing + empty$ + { skip$ } + { CTLuse_alt_spacing + yes.no.to.int + 'is.use.alt.interword.spacing := + } + if$ + CTLalt_stretch_factor + empty$ + { skip$ } + { CTLalt_stretch_factor + 'ALTinterwordstretchfactor := + "\renewcommand{\BIBentryALTinterwordstretchfactor}{" + ALTinterwordstretchfactor * "}" * + write$ newline$ + } + if$ + CTLdash_repeated_names + empty$ + { skip$ } + { CTLdash_repeated_names + yes.no.to.int + 'is.dash.repeated.names := + } + if$ + CTLname_format_string + empty$ + { skip$ } + { CTLname_format_string + 'name.format.string := + } + if$ + CTLname_latex_cmd + empty$ + { skip$ } + { CTLname_latex_cmd + 'name.latex.cmd := + } + if$ + CTLname_url_prefix + missing$ + { skip$ } + { CTLname_url_prefix + 'name.url.prefix := + } + if$ + + + num.names.shown.with.forced.et.al max.num.names.before.forced.et.al > + { "CTLnames_show_etal cannot be greater than CTLmax_names_forced_etal in " cite$ * warning$ + max.num.names.before.forced.et.al 'num.names.shown.with.forced.et.al := + } + { skip$ } + if$ +} + + +%%%%%%%%%%%%%%%%%%% +%% ENTRY ALIASES %% +%%%%%%%%%%%%%%%%%%% +FUNCTION {conference}{inproceedings} +FUNCTION {online}{electronic} +FUNCTION {internet}{electronic} +FUNCTION {webpage}{electronic} +FUNCTION {www}{electronic} +FUNCTION {default.type}{misc} + + + +%%%%%%%%%%%%%%%%%% +%% MAIN PROGRAM %% +%%%%%%%%%%%%%%%%%% + +READ + +EXECUTE {initialize.controls} +EXECUTE {initialize.status.constants} +EXECUTE {banner.message} + +EXECUTE {initialize.longest.label} +ITERATE {longest.label.pass} + +EXECUTE {begin.bib} +ITERATE {call.type$} +EXECUTE {end.bib} + +EXECUTE{completed.message} + + +%% That's all folks, mds. diff --git a/spec/consensus/consensus-paper/IEEEtran.cls b/spec/consensus/consensus-paper/IEEEtran.cls new file mode 100644 index 0000000000..9c967d555f --- /dev/null +++ b/spec/consensus/consensus-paper/IEEEtran.cls @@ -0,0 +1,4733 @@ +%% +%% IEEEtran.cls 2011/11/03 version V1.8 based on +%% IEEEtran.cls 2007/03/05 version V1.7a +%% The changes in V1.8 are made with a single goal in mind: +%% to change the look of the output using the [conference] option +%% and the default font size (10pt) to match the Word template more closely. +%% These changes may well have undesired side effects when other options +%% are in force! +%% +%% +%% This is the official IEEE LaTeX class for authors of the Institute of +%% Electrical and Electronics Engineers (IEEE) Transactions journals and +%% conferences. +%% +%% Support sites: +%% http://www.michaelshell.org/tex/ieeetran/ +%% http://www.ctan.org/tex-archive/macros/latex/contrib/IEEEtran/ +%% and +%% http://www.ieee.org/ +%% +%% Based on the original 1993 IEEEtran.cls, but with many bug fixes +%% and enhancements (from both JVH and MDS) over the 1996/7 version. +%% +%% +%% Contributors: +%% Gerry Murray (1993), Silvano Balemi (1993), +%% Jon Dixon (1996), Peter N"uchter (1996), +%% Juergen von Hagen (2000), and Michael Shell (2001-2007) +%% +%% +%% Copyright (c) 1993-2000 by Gerry Murray, Silvano Balemi, +%% Jon Dixon, Peter N"uchter, +%% Juergen von Hagen +%% and +%% Copyright (c) 2001-2007 by Michael Shell +%% +%% Current maintainer (V1.3 to V1.7): Michael Shell +%% See: +%% http://www.michaelshell.org/ +%% for current contact information. +%% +%% Special thanks to Peter Wilson (CUA) and Donald Arseneau +%% for allowing the inclusion of the \@ifmtarg command +%% from their ifmtarg LaTeX package. +%% +%%************************************************************************* +%% Legal Notice: +%% This code is offered as-is without any warranty either expressed or +%% implied; without even the implied warranty of MERCHANTABILITY or +%% FITNESS FOR A PARTICULAR PURPOSE! +%% User assumes all risk. +%% In no event shall IEEE or any contributor to this code be liable for +%% any damages or losses, including, but not limited to, incidental, +%% consequential, or any other damages, resulting from the use or misuse +%% of any information contained here. +%% +%% All comments are the opinions of their respective authors and are not +%% necessarily endorsed by the IEEE. +%% +%% This work is distributed under the LaTeX Project Public License (LPPL) +%% ( http://www.latex-project.org/ ) version 1.3, and may be freely used, +%% distributed and modified. A copy of the LPPL, version 1.3, is included +%% in the base LaTeX documentation of all distributions of LaTeX released +%% 2003/12/01 or later. +%% Retain all contribution notices and credits. +%% ** Modified files should be clearly indicated as such, including ** +%% ** renaming them and changing author support contact information. ** +%% +%% File list of work: IEEEtran.cls, IEEEtran_HOWTO.pdf, bare_adv.tex, +%% bare_conf.tex, bare_jrnl.tex, bare_jrnl_compsoc.tex +%% +%% Major changes to the user interface should be indicated by an +%% increase in the version numbers. If a version is a beta, it will +%% be indicated with a BETA suffix, i.e., 1.4 BETA. +%% Small changes can be indicated by appending letters to the version +%% such as "IEEEtran_v14a.cls". +%% In all cases, \Providesclass, any \typeout messages to the user, +%% \IEEEtransversionmajor and \IEEEtransversionminor must reflect the +%% correct version information. +%% The changes should also be documented via source comments. +%%************************************************************************* +%% +% +% Available class options +% e.g., \documentclass[10pt,conference]{IEEEtran} +% +% *** choose only one from each category *** +% +% 9pt, 10pt, 11pt, 12pt +% Sets normal font size. The default is 10pt. +% +% conference, journal, technote, peerreview, peerreviewca +% determines format mode - conference papers, journal papers, +% correspondence papers (technotes), or peer review papers. The user +% should also select 9pt when using technote. peerreview is like +% journal mode, but provides for a single-column "cover" title page for +% anonymous peer review. The paper title (without the author names) is +% repeated at the top of the page after the cover page. For peer review +% papers, the \IEEEpeerreviewmaketitle command must be executed (will +% automatically be ignored for non-peerreview modes) at the place the +% cover page is to end, usually just after the abstract (keywords are +% not normally used with peer review papers). peerreviewca is like +% peerreview, but allows the author names to be entered and formatted +% as with conference mode so that author affiliation and contact +% information can be easily seen on the cover page. +% The default is journal. +% +% draft, draftcls, draftclsnofoot, final +% determines if paper is formatted as a widely spaced draft (for +% handwritten editor comments) or as a properly typeset final version. +% draftcls restricts draft mode to the class file while all other LaTeX +% packages (i.e., \usepackage{graphicx}) will behave as final - allows +% for a draft paper with visible figures, etc. draftclsnofoot is like +% draftcls, but does not display the date and the word "DRAFT" at the foot +% of the pages. If using one of the draft modes, the user will probably +% also want to select onecolumn. +% The default is final. +% +% letterpaper, a4paper +% determines paper size: 8.5in X 11in or 210mm X 297mm. CHANGING THE PAPER +% SIZE WILL NOT ALTER THE TYPESETTING OF THE DOCUMENT - ONLY THE MARGINS +% WILL BE AFFECTED. In particular, documents using the a4paper option will +% have reduced side margins (A4 is narrower than US letter) and a longer +% bottom margin (A4 is longer than US letter). For both cases, the top +% margins will be the same and the text will be horizontally centered. +% For final submission to IEEE, authors should use US letter (8.5 X 11in) +% paper. Note that authors should ensure that all post-processing +% (ps, pdf, etc.) uses the same paper specificiation as the .tex document. +% Problems here are by far the number one reason for incorrect margins. +% IEEEtran will automatically set the default paper size under pdflatex +% (without requiring a change to pdftex.cfg), so this issue is more +% important to dvips users. Fix config.ps, config.pdf, or ~/.dvipsrc for +% dvips, or use the dvips -t papersize option instead as needed. See the +% testflow documentation +% http://www.ctan.org/tex-archive/macros/latex/contrib/IEEEtran/testflow +% for more details on dvips paper size configuration. +% The default is letterpaper. +% +% oneside, twoside +% determines if layout follows single sided or two sided (duplex) +% printing. The only notable change is with the headings at the top of +% the pages. +% The default is oneside. +% +% onecolumn, twocolumn +% determines if text is organized into one or two columns per page. One +% column mode is usually used only with draft papers. +% The default is twocolumn. +% +% compsoc +% Use the format of the IEEE Computer Society. +% +% romanappendices +% Use the "Appendix I" convention when numbering appendices. IEEEtran.cls +% now defaults to Alpha "Appendix A" convention - the opposite of what +% v1.6b and earlier did. +% +% captionsoff +% disables the display of the figure/table captions. Some IEEE journals +% request that captions be removed and figures/tables be put on pages +% of their own at the end of an initial paper submission. The endfloat +% package can be used with this class option to achieve this format. +% +% nofonttune +% turns off tuning of the font interword spacing. Maybe useful to those +% not using the standard Times fonts or for those who have already "tuned" +% their fonts. +% The default is to enable IEEEtran to tune font parameters. +% +% +%---------- +% Available CLASSINPUTs provided (all are macros unless otherwise noted): +% \CLASSINPUTbaselinestretch +% \CLASSINPUTinnersidemargin +% \CLASSINPUToutersidemargin +% \CLASSINPUTtoptextmargin +% \CLASSINPUTbottomtextmargin +% +% Available CLASSINFOs provided: +% \ifCLASSINFOpdf (TeX if conditional) +% \CLASSINFOpaperwidth (macro) +% \CLASSINFOpaperheight (macro) +% \CLASSINFOnormalsizebaselineskip (length) +% \CLASSINFOnormalsizeunitybaselineskip (length) +% +% Available CLASSOPTIONs provided: +% all class option flags (TeX if conditionals) unless otherwise noted, +% e.g., \ifCLASSOPTIONcaptionsoff +% point size options provided as a single macro: +% \CLASSOPTIONpt +% which will be defined as 9, 10, 11, or 12 depending on the document's +% normalsize point size. +% also, class option peerreviewca implies the use of class option peerreview +% and classoption draft implies the use of class option draftcls + + + + + +\ProvidesClass{IEEEtran}[2012/11/21 V1.8c by Harald Hanche-Olsen and Anders Christensen] +\typeout{-- Based on V1.7a by Michael Shell} +\typeout{-- See the "IEEEtran_HOWTO" manual for usage information.} +\typeout{-- http://www.michaelshell.org/tex/ieeetran/} +\NeedsTeXFormat{LaTeX2e} + +% IEEEtran.cls version numbers, provided as of V1.3 +% These values serve as a way a .tex file can +% determine if the new features are provided. +% The version number of this IEEEtrans.cls can be obtained from +% these values. i.e., V1.4 +% KEEP THESE AS INTEGERS! i.e., NO {4a} or anything like that- +% (no need to enumerate "a" minor changes here) +\def\IEEEtransversionmajor{1} +\def\IEEEtransversionminor{7} + +% These do nothing, but provide them like in article.cls +\newif\if@restonecol +\newif\if@titlepage + + +% class option conditionals +\newif\ifCLASSOPTIONonecolumn \CLASSOPTIONonecolumnfalse +\newif\ifCLASSOPTIONtwocolumn \CLASSOPTIONtwocolumntrue + +\newif\ifCLASSOPTIONoneside \CLASSOPTIONonesidetrue +\newif\ifCLASSOPTIONtwoside \CLASSOPTIONtwosidefalse + +\newif\ifCLASSOPTIONfinal \CLASSOPTIONfinaltrue +\newif\ifCLASSOPTIONdraft \CLASSOPTIONdraftfalse +\newif\ifCLASSOPTIONdraftcls \CLASSOPTIONdraftclsfalse +\newif\ifCLASSOPTIONdraftclsnofoot \CLASSOPTIONdraftclsnofootfalse + +\newif\ifCLASSOPTIONpeerreview \CLASSOPTIONpeerreviewfalse +\newif\ifCLASSOPTIONpeerreviewca \CLASSOPTIONpeerreviewcafalse + +\newif\ifCLASSOPTIONjournal \CLASSOPTIONjournaltrue +\newif\ifCLASSOPTIONconference \CLASSOPTIONconferencefalse +\newif\ifCLASSOPTIONtechnote \CLASSOPTIONtechnotefalse + +\newif\ifCLASSOPTIONnofonttune \CLASSOPTIONnofonttunefalse + +\newif\ifCLASSOPTIONcaptionsoff \CLASSOPTIONcaptionsofffalse + +\newif\ifCLASSOPTIONcompsoc \CLASSOPTIONcompsocfalse + +\newif\ifCLASSOPTIONromanappendices \CLASSOPTIONromanappendicesfalse + + +% class info conditionals + +% indicates if pdf (via pdflatex) output +\newif\ifCLASSINFOpdf \CLASSINFOpdffalse + + +% V1.6b internal flag to show if using a4paper +\newif\if@IEEEusingAfourpaper \@IEEEusingAfourpaperfalse + + + +% IEEEtran class scratch pad registers +% dimen +\newdimen\@IEEEtrantmpdimenA +\newdimen\@IEEEtrantmpdimenB +% count +\newcount\@IEEEtrantmpcountA +\newcount\@IEEEtrantmpcountB +% token list +\newtoks\@IEEEtrantmptoksA + +% we use \CLASSOPTIONpt so that we can ID the point size (even for 9pt docs) +% as well as LaTeX's \@ptsize to retain some compatability with some +% external packages +\def\@ptsize{0} +% LaTeX does not support 9pt, so we set \@ptsize to 0 - same as that of 10pt +\DeclareOption{9pt}{\def\CLASSOPTIONpt{9}\def\@ptsize{0}} +\DeclareOption{10pt}{\def\CLASSOPTIONpt{10}\def\@ptsize{0}} +\DeclareOption{11pt}{\def\CLASSOPTIONpt{11}\def\@ptsize{1}} +\DeclareOption{12pt}{\def\CLASSOPTIONpt{12}\def\@ptsize{2}} + + + +\DeclareOption{letterpaper}{\setlength{\paperheight}{11in}% + \setlength{\paperwidth}{8.5in}% + \@IEEEusingAfourpaperfalse + \def\CLASSOPTIONpaper{letter}% + \def\CLASSINFOpaperwidth{8.5in}% + \def\CLASSINFOpaperheight{11in}} + + +\DeclareOption{a4paper}{\setlength{\paperheight}{297mm}% + \setlength{\paperwidth}{210mm}% + \@IEEEusingAfourpapertrue + \def\CLASSOPTIONpaper{a4}% + \def\CLASSINFOpaperwidth{210mm}% + \def\CLASSINFOpaperheight{297mm}} + +\DeclareOption{oneside}{\@twosidefalse\@mparswitchfalse + \CLASSOPTIONonesidetrue\CLASSOPTIONtwosidefalse} +\DeclareOption{twoside}{\@twosidetrue\@mparswitchtrue + \CLASSOPTIONtwosidetrue\CLASSOPTIONonesidefalse} + +\DeclareOption{onecolumn}{\CLASSOPTIONonecolumntrue\CLASSOPTIONtwocolumnfalse} +\DeclareOption{twocolumn}{\CLASSOPTIONtwocolumntrue\CLASSOPTIONonecolumnfalse} + +% If the user selects draft, then this class AND any packages +% will go into draft mode. +\DeclareOption{draft}{\CLASSOPTIONdrafttrue\CLASSOPTIONdraftclstrue + \CLASSOPTIONdraftclsnofootfalse} +% draftcls is for a draft mode which will not affect any packages +% used by the document. +\DeclareOption{draftcls}{\CLASSOPTIONdraftfalse\CLASSOPTIONdraftclstrue + \CLASSOPTIONdraftclsnofootfalse} +% draftclsnofoot is like draftcls, but without the footer. +\DeclareOption{draftclsnofoot}{\CLASSOPTIONdraftfalse\CLASSOPTIONdraftclstrue + \CLASSOPTIONdraftclsnofoottrue} +\DeclareOption{final}{\CLASSOPTIONdraftfalse\CLASSOPTIONdraftclsfalse + \CLASSOPTIONdraftclsnofootfalse} + +\DeclareOption{journal}{\CLASSOPTIONpeerreviewfalse\CLASSOPTIONpeerreviewcafalse + \CLASSOPTIONjournaltrue\CLASSOPTIONconferencefalse\CLASSOPTIONtechnotefalse} + +\DeclareOption{conference}{\CLASSOPTIONpeerreviewfalse\CLASSOPTIONpeerreviewcafalse + \CLASSOPTIONjournalfalse\CLASSOPTIONconferencetrue\CLASSOPTIONtechnotefalse} + +\DeclareOption{technote}{\CLASSOPTIONpeerreviewfalse\CLASSOPTIONpeerreviewcafalse + \CLASSOPTIONjournalfalse\CLASSOPTIONconferencefalse\CLASSOPTIONtechnotetrue} + +\DeclareOption{peerreview}{\CLASSOPTIONpeerreviewtrue\CLASSOPTIONpeerreviewcafalse + \CLASSOPTIONjournalfalse\CLASSOPTIONconferencefalse\CLASSOPTIONtechnotefalse} + +\DeclareOption{peerreviewca}{\CLASSOPTIONpeerreviewtrue\CLASSOPTIONpeerreviewcatrue + \CLASSOPTIONjournalfalse\CLASSOPTIONconferencefalse\CLASSOPTIONtechnotefalse} + +\DeclareOption{nofonttune}{\CLASSOPTIONnofonttunetrue} + +\DeclareOption{captionsoff}{\CLASSOPTIONcaptionsofftrue} + +\DeclareOption{compsoc}{\CLASSOPTIONcompsoctrue} + +\DeclareOption{romanappendices}{\CLASSOPTIONromanappendicestrue} + + +% default to US letter paper, 10pt, twocolumn, one sided, final, journal +\ExecuteOptions{letterpaper,10pt,twocolumn,oneside,final,journal} +% overrride these defaults per user requests +\ProcessOptions + + + +% Computer Society conditional execution command +\long\def\@IEEEcompsoconly#1{\relax\ifCLASSOPTIONcompsoc\relax#1\relax\fi\relax} +% inverse +\long\def\@IEEEnotcompsoconly#1{\relax\ifCLASSOPTIONcompsoc\else\relax#1\relax\fi\relax} +% compsoc conference +\long\def\@IEEEcompsocconfonly#1{\relax\ifCLASSOPTIONcompsoc\ifCLASSOPTIONconference\relax#1\relax\fi\fi\relax} +% compsoc not conference +\long\def\@IEEEcompsocnotconfonly#1{\relax\ifCLASSOPTIONcompsoc\ifCLASSOPTIONconference\else\relax#1\relax\fi\fi\relax} + + +% IEEE uses Times Roman font, so we'll default to Times. +% These three commands make up the entire times.sty package. +\renewcommand{\sfdefault}{phv} +\renewcommand{\rmdefault}{ptm} +\renewcommand{\ttdefault}{pcr} + +\@IEEEcompsoconly{\typeout{-- Using IEEE Computer Society mode.}} + +% V1.7 compsoc nonconference papers, use Palatino/Palladio as the main text font, +% not Times Roman. +\@IEEEcompsocnotconfonly{\renewcommand{\rmdefault}{ppl}} + +% enable Times/Palatino main text font +\normalfont\selectfont + + + + + +% V1.7 conference notice message hook +\def\@IEEEconsolenoticeconference{\typeout{}% +\typeout{** Conference Paper **}% +\typeout{Before submitting the final camera ready copy, remember to:}% +\typeout{}% +\typeout{ 1. Manually equalize the lengths of two columns on the last page}% +\typeout{ of your paper;}% +\typeout{}% +\typeout{ 2. Ensure that any PostScript and/or PDF output post-processing}% +\typeout{ uses only Type 1 fonts and that every step in the generation}% +\typeout{ process uses the appropriate paper size.}% +\typeout{}} + + +% we can send console reminder messages to the user here +\AtEndDocument{\ifCLASSOPTIONconference\@IEEEconsolenoticeconference\fi} + + +% warn about the use of single column other than for draft mode +\ifCLASSOPTIONtwocolumn\else% + \ifCLASSOPTIONdraftcls\else% + \typeout{** ATTENTION: Single column mode is not typically used with IEEE publications.}% + \fi% +\fi + + +% V1.7 improved paper size setting code. +% Set pdfpage and dvips paper sizes. Conditional tests are similar to that +% of ifpdf.sty. Retain within {} to ensure tested macros are never altered, +% even if only effect is to set them to \relax. +% if \pdfoutput is undefined or equal to relax, output a dvips special +{\@ifundefined{pdfoutput}{\AtBeginDvi{\special{papersize=\CLASSINFOpaperwidth,\CLASSINFOpaperheight}}}{% +% pdfoutput is defined and not equal to \relax +% check for pdfpageheight existence just in case someone sets pdfoutput +% under non-pdflatex. If exists, set them regardless of value of \pdfoutput. +\@ifundefined{pdfpageheight}{\relax}{\global\pdfpagewidth\paperwidth +\global\pdfpageheight\paperheight}% +% if using \pdfoutput=0 under pdflatex, send dvips papersize special +\ifcase\pdfoutput +\AtBeginDvi{\special{papersize=\CLASSINFOpaperwidth,\CLASSINFOpaperheight}}% +\else +% we are using pdf output, set CLASSINFOpdf flag +\global\CLASSINFOpdftrue +\fi}} + +% let the user know the selected papersize +\typeout{-- Using \CLASSINFOpaperwidth\space x \CLASSINFOpaperheight\space +(\CLASSOPTIONpaper)\space paper.} + +\ifCLASSINFOpdf +\typeout{-- Using PDF output.} +\else +\typeout{-- Using DVI output.} +\fi + + +% The idea hinted here is for LaTeX to generate markleft{} and markright{} +% automatically for you after you enter \author{}, \journal{}, +% \journaldate{}, journalvol{}, \journalnum{}, etc. +% However, there may be some backward compatibility issues here as +% well as some special applications for IEEEtran.cls and special issues +% that may require the flexible \markleft{}, \markright{} and/or \markboth{}. +% We'll leave this as an open future suggestion. +%\newcommand{\journal}[1]{\def\@journal{#1}} +%\def\@journal{} + + + +% pointsize values +% used with ifx to determine the document's normal size +\def\@IEEEptsizenine{9} +\def\@IEEEptsizeten{10} +\def\@IEEEptsizeeleven{11} +\def\@IEEEptsizetwelve{12} + + + +% FONT DEFINITIONS (No sizexx.clo file needed) +% V1.6 revised font sizes, displayskip values and +% revised normalsize baselineskip to reduce underfull vbox problems +% on the 58pc = 696pt = 9.5in text height we want +% normalsize #lines/column baselineskip (aka leading) +% 9pt 63 11.0476pt (truncated down) +% 10pt 58 12pt (exact) +% 11pt 52 13.3846pt (truncated down) +% 12pt 50 13.92pt (exact) +% + +% we need to store the nominal baselineskip for the given font size +% in case baselinestretch ever changes. +% this is a dimen, so it will not hold stretch or shrink +\newdimen\@IEEEnormalsizeunitybaselineskip +\@IEEEnormalsizeunitybaselineskip\baselineskip + +\ifx\CLASSOPTIONpt\@IEEEptsizenine +\typeout{-- This is a 9 point document.} +\def\normalsize{\@setfontsize{\normalsize}{9}{11.0476pt}}% +\setlength{\@IEEEnormalsizeunitybaselineskip}{11.0476pt}% +\normalsize +\abovedisplayskip 1.5ex plus3pt minus1pt% +\belowdisplayskip \abovedisplayskip% +\abovedisplayshortskip 0pt plus3pt% +\belowdisplayshortskip 1.5ex plus3pt minus1pt +\def\small{\@setfontsize{\small}{8.5}{10pt}} +\def\footnotesize{\@setfontsize{\footnotesize}{8}{9pt}} +\def\scriptsize{\@setfontsize{\scriptsize}{7}{8pt}} +\def\tiny{\@setfontsize{\tiny}{5}{6pt}} +% sublargesize is the same as large - 10pt +\def\sublargesize{\@setfontsize{\sublargesize}{10}{12pt}} +\def\large{\@setfontsize{\large}{10}{12pt}} +\def\Large{\@setfontsize{\Large}{12}{14pt}} +\def\LARGE{\@setfontsize{\LARGE}{14}{17pt}} +\def\huge{\@setfontsize{\huge}{17}{20pt}} +\def\Huge{\@setfontsize{\Huge}{20}{24pt}} +\fi + + +% Check if we have selected 10 points +\ifx\CLASSOPTIONpt\@IEEEptsizeten +\typeout{-- This is a 10 point document.} +\def\normalsize{\@setfontsize{\normalsize}{10}{11}}% +\setlength{\@IEEEnormalsizeunitybaselineskip}{11pt}% +\normalsize +\abovedisplayskip 1.5ex plus4pt minus2pt% +\belowdisplayskip \abovedisplayskip% +\abovedisplayshortskip 0pt plus4pt% +\belowdisplayshortskip 1.5ex plus4pt minus2pt +\def\small{\@setfontsize{\small}{9}{10pt}} +\def\footnotesize{\@setfontsize{\footnotesize}{8}{9pt}} +\def\scriptsize{\@setfontsize{\scriptsize}{7}{8pt}} +\def\tiny{\@setfontsize{\tiny}{5}{6pt}} +% sublargesize is a tad smaller than large - 11pt +\def\sublargesize{\@setfontsize{\sublargesize}{11}{13.4pt}} +\def\large{\@setfontsize{\large}{12}{14pt}} +\def\Large{\@setfontsize{\Large}{14}{17pt}} +\def\LARGE{\@setfontsize{\LARGE}{17}{20pt}} +\def\huge{\@setfontsize{\huge}{20}{24pt}} +\def\Huge{\@setfontsize{\Huge}{24}{28pt}} +\fi + + +% Check if we have selected 11 points +\ifx\CLASSOPTIONpt\@IEEEptsizeeleven +\typeout{-- This is an 11 point document.} +\def\normalsize{\@setfontsize{\normalsize}{11}{13.3846pt}}% +\setlength{\@IEEEnormalsizeunitybaselineskip}{13.3846pt}% +\normalsize +\abovedisplayskip 1.5ex plus5pt minus3pt% +\belowdisplayskip \abovedisplayskip% +\abovedisplayshortskip 0pt plus5pt% +\belowdisplayshortskip 1.5ex plus5pt minus3pt +\def\small{\@setfontsize{\small}{10}{12pt}} +\def\footnotesize{\@setfontsize{\footnotesize}{9}{10.5pt}} +\def\scriptsize{\@setfontsize{\scriptsize}{8}{9pt}} +\def\tiny{\@setfontsize{\tiny}{6}{7pt}} +% sublargesize is the same as large - 12pt +\def\sublargesize{\@setfontsize{\sublargesize}{12}{14pt}} +\def\large{\@setfontsize{\large}{12}{14pt}} +\def\Large{\@setfontsize{\Large}{14}{17pt}} +\def\LARGE{\@setfontsize{\LARGE}{17}{20pt}} +\def\huge{\@setfontsize{\huge}{20}{24pt}} +\def\Huge{\@setfontsize{\Huge}{24}{28pt}} +\fi + + +% Check if we have selected 12 points +\ifx\CLASSOPTIONpt\@IEEEptsizetwelve +\typeout{-- This is a 12 point document.} +\def\normalsize{\@setfontsize{\normalsize}{12}{13.92pt}}% +\setlength{\@IEEEnormalsizeunitybaselineskip}{13.92pt}% +\normalsize +\abovedisplayskip 1.5ex plus6pt minus4pt% +\belowdisplayskip \abovedisplayskip% +\abovedisplayshortskip 0pt plus6pt% +\belowdisplayshortskip 1.5ex plus6pt minus4pt +\def\small{\@setfontsize{\small}{10}{12pt}} +\def\footnotesize{\@setfontsize{\footnotesize}{9}{10.5pt}} +\def\scriptsize{\@setfontsize{\scriptsize}{8}{9pt}} +\def\tiny{\@setfontsize{\tiny}{6}{7pt}} +% sublargesize is the same as large - 14pt +\def\sublargesize{\@setfontsize{\sublargesize}{14}{17pt}} +\def\large{\@setfontsize{\large}{14}{17pt}} +\def\Large{\@setfontsize{\Large}{17}{20pt}} +\def\LARGE{\@setfontsize{\LARGE}{20}{24pt}} +\def\huge{\@setfontsize{\huge}{22}{26pt}} +\def\Huge{\@setfontsize{\Huge}{24}{28pt}} +\fi + + +% V1.6 The Computer Modern Fonts will issue a substitution warning for +% 24pt titles (24.88pt is used instead) increase the substitution +% tolerance to turn off this warning +\def\fontsubfuzz{.9pt} +% However, the default (and correct) Times font will scale exactly as needed. + + +% warn the user in case they forget to use the 9pt option with +% technote +\ifCLASSOPTIONtechnote% + \ifx\CLASSOPTIONpt\@IEEEptsizenine\else% + \typeout{** ATTENTION: Technotes are normally 9pt documents.}% + \fi% +\fi + + +% V1.7 +% Improved \textunderscore to provide a much better fake _ when used with +% OT1 encoding. Under OT1, detect use of pcr or cmtt \ttfamily and use +% available true _ glyph for those two typewriter fonts. +\def\@IEEEstringptm{ptm} % Times Roman family +\def\@IEEEstringppl{ppl} % Palatino Roman family +\def\@IEEEstringphv{phv} % Helvetica Sans Serif family +\def\@IEEEstringpcr{pcr} % Courier typewriter family +\def\@IEEEstringcmtt{cmtt} % Computer Modern typewriter family +\DeclareTextCommandDefault{\textunderscore}{\leavevmode +\ifx\f@family\@IEEEstringpcr\string_\else +\ifx\f@family\@IEEEstringcmtt\string_\else +\ifx\f@family\@IEEEstringptm\kern 0em\vbox{\hrule\@width 0.5em\@height 0.5pt\kern -0.3ex}\else +\ifx\f@family\@IEEEstringppl\kern 0em\vbox{\hrule\@width 0.5em\@height 0.5pt\kern -0.3ex}\else +\ifx\f@family\@IEEEstringphv\kern -0.03em\vbox{\hrule\@width 0.62em\@height 0.52pt\kern -0.33ex}\kern -0.03em\else +\kern 0.09em\vbox{\hrule\@width 0.6em\@height 0.44pt\kern -0.63pt\kern -0.42ex}\kern 0.09em\fi\fi\fi\fi\fi\relax} + + + + +% set the default \baselinestretch +\def\baselinestretch{1} +\ifCLASSOPTIONdraftcls + \def\baselinestretch{1.5}% default baselinestretch for draft modes +\fi + + +% process CLASSINPUT baselinestretch +\ifx\CLASSINPUTbaselinestretch\@IEEEundefined +\else + \edef\baselinestretch{\CLASSINPUTbaselinestretch} % user CLASSINPUT override + \typeout{** ATTENTION: Overriding \string\baselinestretch\space to + \baselinestretch\space via \string\CLASSINPUT.} +\fi + +\normalsize % make \baselinestretch take affect + + + + +% store the normalsize baselineskip +\newdimen\CLASSINFOnormalsizebaselineskip +\CLASSINFOnormalsizebaselineskip=\baselineskip\relax +% and the normalsize unity (baselinestretch=1) baselineskip +% we could save a register by giving the user access to +% \@IEEEnormalsizeunitybaselineskip. However, let's protect +% its read only internal status +\newdimen\CLASSINFOnormalsizeunitybaselineskip +\CLASSINFOnormalsizeunitybaselineskip=\@IEEEnormalsizeunitybaselineskip\relax +% store the nominal value of jot +\newdimen\IEEEnormaljot +\IEEEnormaljot=0.25\baselineskip\relax + +% set \jot +\jot=\IEEEnormaljot\relax + + + + +% V1.6, we are now going to fine tune the interword spacing +% The default interword glue for Times under TeX appears to use a +% nominal interword spacing of 25% (relative to the font size, i.e., 1em) +% a maximum of 40% and a minimum of 19%. +% For example, 10pt text uses an interword glue of: +% +% 2.5pt plus 1.49998pt minus 0.59998pt +% +% However, IEEE allows for a more generous range which reduces the need +% for hyphenation, especially for two column text. Furthermore, IEEE +% tends to use a little bit more nominal space between the words. +% IEEE's interword spacing percentages appear to be: +% 35% nominal +% 23% minimum +% 50% maximum +% (They may even be using a tad more for the largest fonts such as 24pt.) +% +% for bold text, IEEE increases the spacing a little more: +% 37.5% nominal +% 23% minimum +% 55% maximum + +% here are the interword spacing ratios we'll use +% for medium (normal weight) +\def\@IEEEinterspaceratioM{0.35} +\def\@IEEEinterspaceMINratioM{0.23} +\def\@IEEEinterspaceMAXratioM{0.50} + +% for bold +\def\@IEEEinterspaceratioB{0.375} +\def\@IEEEinterspaceMINratioB{0.23} +\def\@IEEEinterspaceMAXratioB{0.55} + + +% command to revise the interword spacing for the current font under TeX: +% \fontdimen2 = nominal interword space +% \fontdimen3 = interword stretch +% \fontdimen4 = interword shrink +% since all changes to the \fontdimen are global, we can enclose these commands +% in braces to confine any font attribute or length changes +\def\@@@IEEEsetfontdimens#1#2#3{{% +\setlength{\@IEEEtrantmpdimenB}{\f@size pt}% grab the font size in pt, could use 1em instead. +\setlength{\@IEEEtrantmpdimenA}{#1\@IEEEtrantmpdimenB}% +\fontdimen2\font=\@IEEEtrantmpdimenA\relax +\addtolength{\@IEEEtrantmpdimenA}{-#2\@IEEEtrantmpdimenB}% +\fontdimen3\font=-\@IEEEtrantmpdimenA\relax +\setlength{\@IEEEtrantmpdimenA}{#1\@IEEEtrantmpdimenB}% +\addtolength{\@IEEEtrantmpdimenA}{-#3\@IEEEtrantmpdimenB}% +\fontdimen4\font=\@IEEEtrantmpdimenA\relax}} + +% revise the interword spacing for each font weight +\def\@@IEEEsetfontdimens{{% +\mdseries +\@@@IEEEsetfontdimens{\@IEEEinterspaceratioM}{\@IEEEinterspaceMAXratioM}{\@IEEEinterspaceMINratioM}% +\bfseries +\@@@IEEEsetfontdimens{\@IEEEinterspaceratioB}{\@IEEEinterspaceMAXratioB}{\@IEEEinterspaceMINratioB}% +}} + +% revise the interword spacing for each font shape +% \slshape is not often used for IEEE work and is not altered here. The \scshape caps are +% already a tad too large in the free LaTeX fonts (as compared to what IEEE uses) so we +% won't alter these either. +\def\@IEEEsetfontdimens{{% +\normalfont +\@@IEEEsetfontdimens +\normalfont\itshape +\@@IEEEsetfontdimens +}} + +% command to revise the interword spacing for each font size (and shape +% and weight). Only the \rmfamily is done here as \ttfamily uses a +% fixed spacing and \sffamily is not used as the main text of IEEE papers. +\def\@IEEEtunefonts{{\selectfont\rmfamily +\tiny\@IEEEsetfontdimens +\scriptsize\@IEEEsetfontdimens +\footnotesize\@IEEEsetfontdimens +\small\@IEEEsetfontdimens +\normalsize\@IEEEsetfontdimens +\sublargesize\@IEEEsetfontdimens +\large\@IEEEsetfontdimens +\LARGE\@IEEEsetfontdimens +\huge\@IEEEsetfontdimens +\Huge\@IEEEsetfontdimens}} + +% if the nofonttune class option is not given, revise the interword spacing +% now - in case IEEEtran makes any default length measurements, and make +% sure all the default fonts are loaded +\ifCLASSOPTIONnofonttune\else +\@IEEEtunefonts +\fi + +% and again at the start of the document in case the user loaded different fonts +\AtBeginDocument{\ifCLASSOPTIONnofonttune\else\@IEEEtunefonts\fi} + + + +% V1.6 +% LaTeX is a little to quick to use hyphenations +% So, we increase the penalty for their use and raise +% the badness level that triggers an underfull hbox +% warning. The author may still have to tweak things, +% but the appearance will be much better "right out +% of the box" than that under V1.5 and prior. +% TeX default is 50 +\hyphenpenalty=750 +% If we didn't adjust the interword spacing, 2200 might be better. +% The TeX default is 1000 +\hbadness=1350 +% IEEE does not use extra spacing after punctuation +\frenchspacing + +% V1.7 increase this a tad to discourage equation breaks +\binoppenalty=1000 % default 700 +\relpenalty=800 % default 500 + + +% margin note stuff +\marginparsep 10pt +\marginparwidth 20pt +\marginparpush 25pt + + +% if things get too close, go ahead and let them touch +\lineskip 0pt +\normallineskip 0pt +\lineskiplimit 0pt +\normallineskiplimit 0pt + +% The distance from the lower edge of the text body to the +% footline +\footskip 0.4in + +% normally zero, should be relative to font height. +% put in a little rubber to help stop some bad breaks (underfull vboxes) +\parskip 0ex plus 0.2ex minus 0.1ex +\ifCLASSOPTIONconference +\parskip 6pt plus 2pt minus 1pt +\fi + +\parindent 1.0em +\ifCLASSOPTIONconference +\parindent 14.45pt +\fi + +\topmargin -49.0pt +\headheight 12pt +\headsep 0.25in + +% use the normal font baselineskip +% so that \topskip is unaffected by changes in \baselinestretch +\topskip=\@IEEEnormalsizeunitybaselineskip +\textheight 58pc % 9.63in, 696pt +% Tweak textheight to a perfect integer number of lines/page. +% The normal baselineskip for each document point size is used +% to determine these values. +\ifx\CLASSOPTIONpt\@IEEEptsizenine\textheight=63\@IEEEnormalsizeunitybaselineskip\fi % 63 lines/page +\ifx\CLASSOPTIONpt\@IEEEptsizeten\textheight=58\@IEEEnormalsizeunitybaselineskip\fi % 58 lines/page +\ifx\CLASSOPTIONpt\@IEEEptsizeeleven\textheight=52\@IEEEnormalsizeunitybaselineskip\fi % 52 lines/page +\ifx\CLASSOPTIONpt\@IEEEptsizetwelve\textheight=50\@IEEEnormalsizeunitybaselineskip\fi % 50 lines/page + + +\columnsep 1.5pc +\textwidth 184.2mm + + +% the default side margins are equal +\if@IEEEusingAfourpaper +\oddsidemargin 14.32mm +\evensidemargin 14.32mm +\else +\oddsidemargin 0.680in +\evensidemargin 0.680in +\fi +% compensate for LaTeX's 1in offset +\addtolength{\oddsidemargin}{-1in} +\addtolength{\evensidemargin}{-1in} + + + +% adjust margins for conference mode +\ifCLASSOPTIONconference + \topmargin -0.25in + % we retain the reserved, but unused space for headers + \addtolength{\topmargin}{-\headheight} + \addtolength{\topmargin}{-\headsep} + \textheight 9.25in % The standard for conferences (668.4975pt) + % Tweak textheight to a perfect integer number of lines/page. + \ifx\CLASSOPTIONpt\@IEEEptsizenine\textheight=61\@IEEEnormalsizeunitybaselineskip\fi % 61 lines/page + \ifx\CLASSOPTIONpt\@IEEEptsizeten\textheight=62\@IEEEnormalsizeunitybaselineskip\fi % 62 lines/page + \ifx\CLASSOPTIONpt\@IEEEptsizeeleven\textheight=50\@IEEEnormalsizeunitybaselineskip\fi % 50 lines/page + \ifx\CLASSOPTIONpt\@IEEEptsizetwelve\textheight=48\@IEEEnormalsizeunitybaselineskip\fi % 48 lines/page +\fi + + +% compsoc conference +\ifCLASSOPTIONcompsoc +\ifCLASSOPTIONconference + % compsoc conference use a larger value for columnsep + \columnsep 0.375in + % compsoc conferences want 1in top margin, 1.125in bottom margin + \topmargin 0in + \addtolength{\topmargin}{-6pt}% we tweak this a tad to better comply with top of line stuff + % we retain the reserved, but unused space for headers + \addtolength{\topmargin}{-\headheight} + \addtolength{\topmargin}{-\headsep} + \textheight 8.875in % (641.39625pt) + % Tweak textheight to a perfect integer number of lines/page. + \ifx\CLASSOPTIONpt\@IEEEptsizenine\textheight=58\@IEEEnormalsizeunitybaselineskip\fi % 58 lines/page + \ifx\CLASSOPTIONpt\@IEEEptsizeten\textheight=53\@IEEEnormalsizeunitybaselineskip\fi % 53 lines/page + \ifx\CLASSOPTIONpt\@IEEEptsizeeleven\textheight=48\@IEEEnormalsizeunitybaselineskip\fi % 48 lines/page + \ifx\CLASSOPTIONpt\@IEEEptsizetwelve\textheight=46\@IEEEnormalsizeunitybaselineskip\fi % 46 lines/page + \textwidth 6.5in + % the default side margins are equal + \if@IEEEusingAfourpaper + \oddsidemargin 22.45mm + \evensidemargin 22.45mm + \else + \oddsidemargin 1in + \evensidemargin 1in + \fi + % compensate for LaTeX's 1in offset + \addtolength{\oddsidemargin}{-1in} + \addtolength{\evensidemargin}{-1in} +\fi\fi + + + +% draft mode settings override that of all other modes +% provides a nice 1in margin all around the paper and extra +% space between the lines for editor's comments +\ifCLASSOPTIONdraftcls + % want 1in from top of paper to text + \setlength{\topmargin}{-\headsep}% + \addtolength{\topmargin}{-\headheight}% + % we want 1in side margins regardless of paper type + \oddsidemargin 0in + \evensidemargin 0in + % set the text width + \setlength{\textwidth}{\paperwidth}% + \addtolength{\textwidth}{-2.0in}% + \setlength{\textheight}{\paperheight}% + \addtolength{\textheight}{-2.0in}% + % digitize textheight to be an integer number of lines. + % this may cause the bottom margin to be off a tad + \addtolength{\textheight}{-1\topskip}% + \divide\textheight by \baselineskip% + \multiply\textheight by \baselineskip% + \addtolength{\textheight}{\topskip}% +\fi + + + +% process CLASSINPUT inner/outer margin +% if inner margin defined, but outer margin not, set outer to inner. +\ifx\CLASSINPUTinnersidemargin\@IEEEundefined +\else + \ifx\CLASSINPUToutersidemargin\@IEEEundefined + \edef\CLASSINPUToutersidemargin{\CLASSINPUTinnersidemargin} + \fi +\fi + +\ifx\CLASSINPUToutersidemargin\@IEEEundefined +\else + % if outer margin defined, but inner margin not, set inner to outer. + \ifx\CLASSINPUTinnersidemargin\@IEEEundefined + \edef\CLASSINPUTinnersidemargin{\CLASSINPUToutersidemargin} + \fi + \setlength{\oddsidemargin}{\CLASSINPUTinnersidemargin} + \ifCLASSOPTIONtwoside + \setlength{\evensidemargin}{\CLASSINPUToutersidemargin} + \else + \setlength{\evensidemargin}{\CLASSINPUTinnersidemargin} + \fi + \addtolength{\oddsidemargin}{-1in} + \addtolength{\evensidemargin}{-1in} + \setlength{\textwidth}{\paperwidth} + \addtolength{\textwidth}{-\CLASSINPUTinnersidemargin} + \addtolength{\textwidth}{-\CLASSINPUToutersidemargin} + \typeout{** ATTENTION: Overriding inner side margin to \CLASSINPUTinnersidemargin\space and + outer side margin to \CLASSINPUToutersidemargin\space via \string\CLASSINPUT.} +\fi + + + +% process CLASSINPUT top/bottom text margin +% if toptext margin defined, but bottomtext margin not, set bottomtext to toptext margin +\ifx\CLASSINPUTtoptextmargin\@IEEEundefined +\else + \ifx\CLASSINPUTbottomtextmargin\@IEEEundefined + \edef\CLASSINPUTbottomtextmargin{\CLASSINPUTtoptextmargin} + \fi +\fi + +\ifx\CLASSINPUTbottomtextmargin\@IEEEundefined +\else + % if bottomtext margin defined, but toptext margin not, set toptext to bottomtext margin + \ifx\CLASSINPUTtoptextmargin\@IEEEundefined + \edef\CLASSINPUTtoptextmargin{\CLASSINPUTbottomtextmargin} + \fi + \setlength{\topmargin}{\CLASSINPUTtoptextmargin} + \addtolength{\topmargin}{-1in} + \addtolength{\topmargin}{-\headheight} + \addtolength{\topmargin}{-\headsep} + \setlength{\textheight}{\paperheight} + \addtolength{\textheight}{-\CLASSINPUTtoptextmargin} + \addtolength{\textheight}{-\CLASSINPUTbottomtextmargin} + % in the default format we use the normal baselineskip as topskip + % we only need 0.7 of this to clear typical top text and we need + % an extra 0.3 spacing at the bottom for descenders. This will + % correct for both. + \addtolength{\topmargin}{-0.3\@IEEEnormalsizeunitybaselineskip} + \typeout{** ATTENTION: Overriding top text margin to \CLASSINPUTtoptextmargin\space and + bottom text margin to \CLASSINPUTbottomtextmargin\space via \string\CLASSINPUT.} +\fi + + + + + + + +% LIST SPACING CONTROLS + +% Controls the amount of EXTRA spacing +% above and below \trivlist +% Both \list and IED lists override this. +% However, \trivlist will use this as will most +% things built from \trivlist like the \center +% environment. +\topsep 0.5\baselineskip + +% Controls the additional spacing around lists preceded +% or followed by blank lines. IEEE does not increase +% spacing before or after paragraphs so it is set to zero. +% \z@ is the same as zero, but faster. +\partopsep \z@ + +% Controls the spacing between paragraphs in lists. +% IEEE does not increase spacing before or after paragraphs +% so this is also zero. +% With IEEEtran.cls, global changes to +% this value DO affect lists (but not IED lists). +\parsep \z@ + +% Controls the extra spacing between list items. +% IEEE does not put extra spacing between items. +% With IEEEtran.cls, global changes to this value DO affect +% lists (but not IED lists). +\itemsep \z@ + +% \itemindent is the amount to indent the FIRST line of a list +% item. It is auto set to zero within the \list environment. To alter +% it, you have to do so when you call the \list. +% However, IEEE uses this for the theorem environment +% There is an alternative value for this near \leftmargini below +\itemindent -1em + +% \leftmargin, the spacing from the left margin of the main text to +% the left of the main body of a list item is set by \list. +% Hence this statement does nothing for lists. +% But, quote and verse do use it for indention. +\leftmargin 2em + +% we retain this stuff from the older IEEEtran.cls so that \list +% will work the same way as before. However, itemize, enumerate and +% description (IED) could care less about what these are as they +% all are overridden. +\leftmargini 2em +%\itemindent 2em % Alternative values: sometimes used. +%\leftmargini 0em +\leftmarginii 1em +\leftmarginiii 1.5em +\leftmarginiv 1.5em +\leftmarginv 1.0em +\leftmarginvi 1.0em +\labelsep 0.5em +\labelwidth \z@ + + +% The old IEEEtran.cls behavior of \list is retained. +% However, the new V1.3 IED list environments override all the +% @list stuff (\@listX is called within \list for the +% appropriate level just before the user's list_decl is called). +% \topsep is now 2pt as IEEE puts a little extra space around +% lists - used by those non-IED macros that depend on \list. +% Note that \parsep and \itemsep are not redefined as in +% the sizexx.clo \@listX (which article.cls uses) so global changes +% of these values DO affect \list +% +\def\@listi{\leftmargin\leftmargini \topsep 2pt plus 1pt minus 1pt} +\let\@listI\@listi +\def\@listii{\leftmargin\leftmarginii\labelwidth\leftmarginii% + \advance\labelwidth-\labelsep \topsep 2pt} +\def\@listiii{\leftmargin\leftmarginiii\labelwidth\leftmarginiii% + \advance\labelwidth-\labelsep \topsep 2pt} +\def\@listiv{\leftmargin\leftmarginiv\labelwidth\leftmarginiv% + \advance\labelwidth-\labelsep \topsep 2pt} +\def\@listv{\leftmargin\leftmarginv\labelwidth\leftmarginv% + \advance\labelwidth-\labelsep \topsep 2pt} +\def\@listvi{\leftmargin\leftmarginvi\labelwidth\leftmarginvi% + \advance\labelwidth-\labelsep \topsep 2pt} + + +% IEEE uses 5) not 5. +\def\labelenumi{\theenumi)} \def\theenumi{\arabic{enumi}} + +% IEEE uses a) not (a) +\def\labelenumii{\theenumii)} \def\theenumii{\alph{enumii}} + +% IEEE uses iii) not iii. +\def\labelenumiii{\theenumiii)} \def\theenumiii{\roman{enumiii}} + +% IEEE uses A) not A. +\def\labelenumiv{\theenumiv)} \def\theenumiv{\Alph{enumiv}} + +% exactly the same as in article.cls +\def\p@enumii{\theenumi} +\def\p@enumiii{\theenumi(\theenumii)} +\def\p@enumiv{\p@enumiii\theenumiii} + +% itemized list label styles +\def\labelitemi{$\bullet$} +\def\labelitemii{$\circ$} +\def\labelitemiii{\vrule height 0.8ex depth -0.2ex width 0.6ex} +\def\labelitemiv{$\ast$} + + + +% **** V1.3 ENHANCEMENTS **** +% Itemize, Enumerate and Description (IED) List Controls +% *************************** +% +% +% IEEE seems to use at least two different values by +% which ITEMIZED list labels are indented to the right +% For The Journal of Lightwave Technology (JLT) and The Journal +% on Selected Areas in Communications (JSAC), they tend to use +% an indention equal to \parindent. For Transactions on Communications +% they tend to indent ITEMIZED lists a little more--- 1.3\parindent. +% We'll provide both values here for you so that you can choose +% which one you like in your document using a command such as: +% setlength{\IEEEilabelindent}{\IEEEilabelindentB} +\newdimen\IEEEilabelindentA +\IEEEilabelindentA \parindent + +\newdimen\IEEEilabelindentB +\IEEEilabelindentB 1.3\parindent +% However, we'll default to using \parindent +% which makes more sense to me +\newdimen\IEEEilabelindent +\IEEEilabelindent \IEEEilabelindentA + + +% This controls the default amount the enumerated list labels +% are indented to the right. +% Normally, this is the same as the paragraph indention +\newdimen\IEEEelabelindent +\IEEEelabelindent \parindent + +% This controls the default amount the description list labels +% are indented to the right. +% Normally, this is the same as the paragraph indention +\newdimen\IEEEdlabelindent +\IEEEdlabelindent \parindent + +% This is the value actually used within the IED lists. +% The IED environments automatically set its value to +% one of the three values above, so global changes do +% not have any effect +\newdimen\IEEElabelindent +\IEEElabelindent \parindent + +% The actual amount labels will be indented is +% \IEEElabelindent multiplied by the factor below +% corresponding to the level of nesting depth +% This provides a means by which the user can +% alter the effective \IEEElabelindent for deeper +% levels +% There may not be such a thing as correct "standard IEEE" +% values. What IEEE actually does may depend on the specific +% circumstances. +% The first list level almost always has full indention. +% The second levels I've seen have only 75% of the normal indentation +% Three level or greater nestings are very rare. I am guessing +% that they don't use any indentation. +\def\IEEElabelindentfactori{1.0} % almost always one +\def\IEEElabelindentfactorii{0.75} % 0.0 or 1.0 may be used in some cases +\def\IEEElabelindentfactoriii{0.0} % 0.75? 0.5? 0.0? +\def\IEEElabelindentfactoriv{0.0} +\def\IEEElabelindentfactorv{0.0} +\def\IEEElabelindentfactorvi{0.0} + +% value actually used within IED lists, it is auto +% set to one of the 6 values above +% global changes here have no effect +\def\IEEElabelindentfactor{1.0} + +% This controls the default spacing between the end of the IED +% list labels and the list text, when normal text is used for +% the labels. +\newdimen\IEEEiednormlabelsep +\IEEEiednormlabelsep \parindent + +% This controls the default spacing between the end of the IED +% list labels and the list text, when math symbols are used for +% the labels (nomenclature lists). IEEE usually increases the +% spacing in these cases +\newdimen\IEEEiedmathlabelsep +\IEEEiedmathlabelsep 1.2em + +% This controls the extra vertical separation put above and +% below each IED list. IEEE usually puts a little extra spacing +% around each list. However, this spacing is barely noticeable. +\newskip\IEEEiedtopsep +\IEEEiedtopsep 2pt plus 1pt minus 1pt + + +% This command is executed within each IED list environment +% at the beginning of the list. You can use this to set the +% parameters for some/all your IED list(s) without disturbing +% global parameters that affect things other than lists. +% i.e., renewcommand{\IEEEiedlistdecl}{\setlength{\labelsep}{5em}} +% will alter the \labelsep for the next list(s) until +% \IEEEiedlistdecl is redefined. +\def\IEEEiedlistdecl{\relax} + +% This command provides an easy way to set \leftmargin based +% on the \labelwidth, \labelsep and the argument \IEEElabelindent +% Usage: \IEEEcalcleftmargin{width-to-indent-the-label} +% output is in the \leftmargin variable, i.e., effectively: +% \leftmargin = argument + \labelwidth + \labelsep +% Note controlled spacing here, shield end of lines with % +\def\IEEEcalcleftmargin#1{\setlength{\leftmargin}{#1}% +\addtolength{\leftmargin}{\labelwidth}% +\addtolength{\leftmargin}{\labelsep}} + +% This command provides an easy way to set \labelwidth to the +% width of the given text. It is the same as +% \settowidth{\labelwidth}{label-text} +% and useful as a shorter alternative. +% Typically used to set \labelwidth to be the width +% of the longest label in the list +\def\IEEEsetlabelwidth#1{\settowidth{\labelwidth}{#1}} + +% When this command is executed, IED lists will use the +% IEEEiedmathlabelsep label separation rather than the normal +% spacing. To have an effect, this command must be executed via +% the \IEEEiedlistdecl or within the option of the IED list +% environments. +\def\IEEEusemathlabelsep{\setlength{\labelsep}{\IEEEiedmathlabelsep}} + +% A flag which controls whether the IED lists automatically +% calculate \leftmargin from \IEEElabelindent, \labelwidth and \labelsep +% Useful if you want to specify your own \leftmargin +% This flag must be set (\IEEEnocalcleftmargintrue or \IEEEnocalcleftmarginfalse) +% via the \IEEEiedlistdecl or within the option of the IED list +% environments to have an effect. +\newif\ifIEEEnocalcleftmargin +\IEEEnocalcleftmarginfalse + +% A flag which controls whether \IEEElabelindent is multiplied by +% the \IEEElabelindentfactor for each list level. +% This flag must be set via the \IEEEiedlistdecl or within the option +% of the IED list environments to have an effect. +\newif\ifIEEEnolabelindentfactor +\IEEEnolabelindentfactorfalse + + +% internal variable to indicate type of IED label +% justification +% 0 - left; 1 - center; 2 - right +\def\@IEEEiedjustify{0} + + +% commands to allow the user to control IED +% label justifications. Use these commands within +% the IED environment option or in the \IEEEiedlistdecl +% Note that changing the normal list justifications +% is nonstandard and IEEE may not like it if you do so! +% I include these commands as they may be helpful to +% those who are using these enhanced list controls for +% other non-IEEE related LaTeX work. +% itemize and enumerate automatically default to right +% justification, description defaults to left. +\def\IEEEiedlabeljustifyl{\def\@IEEEiedjustify{0}}%left +\def\IEEEiedlabeljustifyc{\def\@IEEEiedjustify{1}}%center +\def\IEEEiedlabeljustifyr{\def\@IEEEiedjustify{2}}%right + + + + +% commands to save to and restore from the list parameter copies +% this allows us to set all the list parameters within +% the list_decl and prevent \list (and its \@list) +% from overriding any of our parameters +% V1.6 use \edefs instead of dimen's to conserve dimen registers +% Note controlled spacing here, shield end of lines with % +\def\@IEEEsavelistparams{\edef\@IEEEiedtopsep{\the\topsep}% +\edef\@IEEEiedlabelwidth{\the\labelwidth}% +\edef\@IEEEiedlabelsep{\the\labelsep}% +\edef\@IEEEiedleftmargin{\the\leftmargin}% +\edef\@IEEEiedpartopsep{\the\partopsep}% +\edef\@IEEEiedparsep{\the\parsep}% +\edef\@IEEEieditemsep{\the\itemsep}% +\edef\@IEEEiedrightmargin{\the\rightmargin}% +\edef\@IEEEiedlistparindent{\the\listparindent}% +\edef\@IEEEieditemindent{\the\itemindent}} + +% Note controlled spacing here +\def\@IEEErestorelistparams{\topsep\@IEEEiedtopsep\relax% +\labelwidth\@IEEEiedlabelwidth\relax% +\labelsep\@IEEEiedlabelsep\relax% +\leftmargin\@IEEEiedleftmargin\relax% +\partopsep\@IEEEiedpartopsep\relax% +\parsep\@IEEEiedparsep\relax% +\itemsep\@IEEEieditemsep\relax% +\rightmargin\@IEEEiedrightmargin\relax% +\listparindent\@IEEEiedlistparindent\relax% +\itemindent\@IEEEieditemindent\relax} + + +% v1.6b provide original LaTeX IED list environments +% note that latex.ltx defines \itemize and \enumerate, but not \description +% which must be created by the base classes +% save original LaTeX itemize and enumerate +\let\LaTeXitemize\itemize +\let\endLaTeXitemize\enditemize +\let\LaTeXenumerate\enumerate +\let\endLaTeXenumerate\endenumerate + +% provide original LaTeX description environment from article.cls +\newenvironment{LaTeXdescription} + {\list{}{\labelwidth\z@ \itemindent-\leftmargin + \let\makelabel\descriptionlabel}} + {\endlist} +\newcommand*\descriptionlabel[1]{\hspace\labelsep + \normalfont\bfseries #1} + + +% override LaTeX's default IED lists +\def\itemize{\@IEEEitemize} +\def\enditemize{\@endIEEEitemize} +\def\enumerate{\@IEEEenumerate} +\def\endenumerate{\@endIEEEenumerate} +\def\description{\@IEEEdescription} +\def\enddescription{\@endIEEEdescription} + +% provide the user with aliases - may help those using packages that +% override itemize, enumerate, or description +\def\IEEEitemize{\@IEEEitemize} +\def\endIEEEitemize{\@endIEEEitemize} +\def\IEEEenumerate{\@IEEEenumerate} +\def\endIEEEenumerate{\@endIEEEenumerate} +\def\IEEEdescription{\@IEEEdescription} +\def\endIEEEdescription{\@endIEEEdescription} + + +% V1.6 we want to keep the IEEEtran IED list definitions as our own internal +% commands so they are protected against redefinition +\def\@IEEEitemize{\@ifnextchar[{\@@IEEEitemize}{\@@IEEEitemize[\relax]}} +\def\@IEEEenumerate{\@ifnextchar[{\@@IEEEenumerate}{\@@IEEEenumerate[\relax]}} +\def\@IEEEdescription{\@ifnextchar[{\@@IEEEdescription}{\@@IEEEdescription[\relax]}} +\def\@endIEEEitemize{\endlist} +\def\@endIEEEenumerate{\endlist} +\def\@endIEEEdescription{\endlist} + + +% DO NOT ALLOW BLANK LINES TO BE IN THESE IED ENVIRONMENTS +% AS THIS WILL FORCE NEW PARAGRAPHS AFTER THE IED LISTS +% IEEEtran itemized list MDS 1/2001 +% Note controlled spacing here, shield end of lines with % +\def\@@IEEEitemize[#1]{% + \ifnum\@itemdepth>3\relax\@toodeep\else% + \ifnum\@listdepth>5\relax\@toodeep\else% + \advance\@itemdepth\@ne% + \edef\@itemitem{labelitem\romannumeral\the\@itemdepth}% + % get the labelindentfactor for this level + \advance\@listdepth\@ne% we need to know what the level WILL be + \edef\IEEElabelindentfactor{\csname IEEElabelindentfactor\romannumeral\the\@listdepth\endcsname}% + \advance\@listdepth-\@ne% undo our increment + \def\@IEEEiedjustify{2}% right justified labels are default + % set other defaults + \IEEEnocalcleftmarginfalse% + \IEEEnolabelindentfactorfalse% + \topsep\IEEEiedtopsep% + \IEEElabelindent\IEEEilabelindent% + \labelsep\IEEEiednormlabelsep% + \partopsep 0ex% + \parsep 0ex% + \itemsep \parskip% + \rightmargin 0em% + \listparindent 0em% + \itemindent 0em% + % calculate the label width + % the user can override this later if + % they specified a \labelwidth + \settowidth{\labelwidth}{\csname labelitem\romannumeral\the\@itemdepth\endcsname}% + \@IEEEsavelistparams% save our list parameters + \list{\csname\@itemitem\endcsname}{% + \@IEEErestorelistparams% override any list{} changes + % to our globals + \let\makelabel\@IEEEiedmakelabel% v1.6b setup \makelabel + \IEEEiedlistdecl% let user alter parameters + #1\relax% + % If the user has requested not to use the + % labelindent factor, don't revise \labelindent + \ifIEEEnolabelindentfactor\relax% + \else\IEEElabelindent=\IEEElabelindentfactor\labelindent% + \fi% + % Unless the user has requested otherwise, + % calculate our left margin based + % on \IEEElabelindent, \labelwidth and + % \labelsep + \ifIEEEnocalcleftmargin\relax% + \else\IEEEcalcleftmargin{\IEEElabelindent}% + \fi}\fi\fi}% + + +% DO NOT ALLOW BLANK LINES TO BE IN THESE IED ENVIRONMENTS +% AS THIS WILL FORCE NEW PARAGRAPHS AFTER THE IED LISTS +% IEEEtran enumerate list MDS 1/2001 +% Note controlled spacing here, shield end of lines with % +\def\@@IEEEenumerate[#1]{% + \ifnum\@enumdepth>3\relax\@toodeep\else% + \ifnum\@listdepth>5\relax\@toodeep\else% + \advance\@enumdepth\@ne% + \edef\@enumctr{enum\romannumeral\the\@enumdepth}% + % get the labelindentfactor for this level + \advance\@listdepth\@ne% we need to know what the level WILL be + \edef\IEEElabelindentfactor{\csname IEEElabelindentfactor\romannumeral\the\@listdepth\endcsname}% + \advance\@listdepth-\@ne% undo our increment + \def\@IEEEiedjustify{2}% right justified labels are default + % set other defaults + \IEEEnocalcleftmarginfalse% + \IEEEnolabelindentfactorfalse% + \topsep\IEEEiedtopsep% + \IEEElabelindent\IEEEelabelindent% + \labelsep\IEEEiednormlabelsep% + \partopsep 0ex% + \parsep 0ex% + \itemsep 0ex% + \rightmargin 0em% + \listparindent 0em% + \itemindent 0em% + % calculate the label width + % We'll set it to the width suitable for all labels using + % normalfont 1) to 9) + % The user can override this later + \settowidth{\labelwidth}{9)}% + \@IEEEsavelistparams% save our list parameters + \list{\csname label\@enumctr\endcsname}{\usecounter{\@enumctr}% + \@IEEErestorelistparams% override any list{} changes + % to our globals + \let\makelabel\@IEEEiedmakelabel% v1.6b setup \makelabel + \IEEEiedlistdecl% let user alter parameters + #1\relax% + % If the user has requested not to use the + % IEEElabelindent factor, don't revise \IEEElabelindent + \ifIEEEnolabelindentfactor\relax% + \else\IEEElabelindent=\IEEElabelindentfactor\IEEElabelindent% + \fi% + % Unless the user has requested otherwise, + % calculate our left margin based + % on \IEEElabelindent, \labelwidth and + % \labelsep + \ifIEEEnocalcleftmargin\relax% + \else\IEEEcalcleftmargin{\IEEElabelindent}% + \fi}\fi\fi}% + + +% DO NOT ALLOW BLANK LINES TO BE IN THESE IED ENVIRONMENTS +% AS THIS WILL FORCE NEW PARAGRAPHS AFTER THE IED LISTS +% IEEEtran description list MDS 1/2001 +% Note controlled spacing here, shield end of lines with % +\def\@@IEEEdescription[#1]{% + \ifnum\@listdepth>5\relax\@toodeep\else% + % get the labelindentfactor for this level + \advance\@listdepth\@ne% we need to know what the level WILL be + \edef\IEEElabelindentfactor{\csname IEEElabelindentfactor\romannumeral\the\@listdepth\endcsname}% + \advance\@listdepth-\@ne% undo our increment + \def\@IEEEiedjustify{0}% left justified labels are default + % set other defaults + \IEEEnocalcleftmarginfalse% + \IEEEnolabelindentfactorfalse% + \topsep\IEEEiedtopsep% + \IEEElabelindent\IEEEdlabelindent% + % assume normal labelsep + \labelsep\IEEEiednormlabelsep% + \partopsep 0ex% + \parsep 0ex% + \itemsep 0ex% + \rightmargin 0em% + \listparindent 0em% + \itemindent 0em% + % Bogus label width in case the user forgets + % to set it. + % TIP: If you want to see what a variable's width is you + % can use the TeX command \showthe\width-variable to + % display it on the screen during compilation + % (This might be helpful to know when you need to find out + % which label is the widest) + \settowidth{\labelwidth}{Hello}% + \@IEEEsavelistparams% save our list parameters + \list{}{\@IEEErestorelistparams% override any list{} changes + % to our globals + \let\makelabel\@IEEEiedmakelabel% v1.6b setup \makelabel + \IEEEiedlistdecl% let user alter parameters + #1\relax% + % If the user has requested not to use the + % labelindent factor, don't revise \IEEElabelindent + \ifIEEEnolabelindentfactor\relax% + \else\IEEElabelindent=\IEEElabelindentfactor\IEEElabelindent% + \fi% + % Unless the user has requested otherwise, + % calculate our left margin based + % on \IEEElabelindent, \labelwidth and + % \labelsep + \ifIEEEnocalcleftmargin\relax% + \else\IEEEcalcleftmargin{\IEEElabelindent}\relax% + \fi}\fi} + +% v1.6b we use one makelabel that does justification as needed. +\def\@IEEEiedmakelabel#1{\relax\if\@IEEEiedjustify 0\relax +\makebox[\labelwidth][l]{\normalfont #1}\else +\if\@IEEEiedjustify 1\relax +\makebox[\labelwidth][c]{\normalfont #1}\else +\makebox[\labelwidth][r]{\normalfont #1}\fi\fi} + + +% VERSE and QUOTE +% V1.7 define environments with newenvironment +\newenvironment{verse}{\let\\=\@centercr + \list{}{\itemsep\z@ \itemindent -1.5em \listparindent \itemindent + \rightmargin\leftmargin\advance\leftmargin 1.5em}\item\relax} + {\endlist} +\newenvironment{quotation}{\list{}{\listparindent 1.5em \itemindent\listparindent + \rightmargin\leftmargin \parsep 0pt plus 1pt}\item\relax} + {\endlist} +\newenvironment{quote}{\list{}{\rightmargin\leftmargin}\item\relax} + {\endlist} + + +% \titlepage +% provided only for backward compatibility. \maketitle is the correct +% way to create the title page. +\newif\if@restonecol +\def\titlepage{\@restonecolfalse\if@twocolumn\@restonecoltrue\onecolumn + \else \newpage \fi \thispagestyle{empty}\c@page\z@} +\def\endtitlepage{\if@restonecol\twocolumn \else \newpage \fi} + +% standard values from article.cls +\arraycolsep 5pt +\arrayrulewidth .4pt +\doublerulesep 2pt + +\tabcolsep 6pt +\tabbingsep 0.5em + + +%% FOOTNOTES +% +%\skip\footins 10pt plus 4pt minus 2pt +% V1.6 respond to changes in font size +% space added above the footnotes (if present) +\skip\footins 0.9\baselineskip plus 0.4\baselineskip minus 0.2\baselineskip + +% V1.6, we need to make \footnotesep responsive to changes +% in \baselineskip or strange spacings will result when in +% draft mode. Here is a little LaTeX secret - \footnotesep +% determines the height of an invisible strut that is placed +% *above* the baseline of footnotes after the first. Since +% LaTeX considers the space for characters to be 0.7/baselineskip +% above the baseline and 0.3/baselineskip below it, we need to +% use 0.7/baselineskip as a \footnotesep to maintain equal spacing +% between all the lines of the footnotes. IEEE often uses a tad +% more, so use 0.8\baselineskip. This slightly larger value also helps +% the text to clear the footnote marks. Note that \thanks in IEEEtran +% uses its own value of \footnotesep which is set in \maketitle. +{\footnotesize +\global\footnotesep 0.8\baselineskip} + +\def\unnumberedfootnote{\gdef\@thefnmark{\quad}\@footnotetext} + +\skip\@mpfootins 0.3\baselineskip +\fboxsep = 3pt +\fboxrule = .4pt +% V1.6 use 1em, then use LaTeX2e's \@makefnmark +% Note that IEEE normally *left* aligns the footnote marks, so we don't need +% box resizing tricks here. +%\long\def\@makefnmark{\scriptsize\normalfont\@thefnmark} +\long\def\@makefntext#1{\parindent 1em\indent\hbox{\@makefnmark}#1}% V1.6 use 1em +\long\def\@maketablefntext#1{\raggedleft\leavevmode\hbox{\@makefnmark}#1} +% V1.7 compsoc does not use superscipts for footnote marks +\ifCLASSOPTIONcompsoc +\def\@IEEEcompsocmakefnmark{\hbox{\normalfont\@thefnmark.\ }} +\long\def\@makefntext#1{\parindent 1em\indent\hbox{\@IEEEcompsocmakefnmark}#1} +\fi + +% IEEE does not use footnote rules. Or do they? +\def\footnoterule{\vskip-2pt \hrule height 0.6pt depth \z@ \vskip1.6pt\relax} +\toks@\expandafter{\@setminipage\let\footnoterule\relax\footnotesep\z@} +\edef\@setminipage{\the\toks@} + +% V1.7 for compsoc, IEEE uses a footnote rule only for \thanks. We devise a "one-shot" +% system to implement this. +\newif\if@IEEEenableoneshotfootnoterule +\@IEEEenableoneshotfootnoterulefalse +\ifCLASSOPTIONcompsoc +\def\footnoterule{\relax\if@IEEEenableoneshotfootnoterule +\kern-5pt +\hbox to \columnwidth{\hfill\vrule width 0.5\columnwidth height 0.4pt\hfill} +\kern4.6pt +\global\@IEEEenableoneshotfootnoterulefalse +\else +\relax +\fi} +\fi + +% V1.6 do not allow LaTeX to break a footnote across multiple pages +\interfootnotelinepenalty=10000 + +% V1.6 discourage breaks within equations +% Note that amsmath normally sets this to 10000, +% but LaTeX2e normally uses 100. +\interdisplaylinepenalty=2500 + +% default allows section depth up to /paragraph +\setcounter{secnumdepth}{4} + +% technotes do not allow /paragraph +\ifCLASSOPTIONtechnote + \setcounter{secnumdepth}{3} +\fi +% neither do compsoc conferences +\@IEEEcompsocconfonly{\setcounter{secnumdepth}{3}} + + +\newcounter{section} +\newcounter{subsection}[section] +\newcounter{subsubsection}[subsection] +\newcounter{paragraph}[subsubsection] + +% used only by IEEEtran's IEEEeqnarray as other packages may +% have their own, different, implementations +\newcounter{IEEEsubequation}[equation] + +% as shown when called by user from \ref, \label and in table of contents +\def\theequation{\arabic{equation}} % 1 +\def\theIEEEsubequation{\theequation\alph{IEEEsubequation}} % 1a (used only by IEEEtran's IEEEeqnarray) +\ifCLASSOPTIONcompsoc +% compsoc is all arabic +\def\thesection{\arabic{section}} +\def\thesubsection{\thesection.\arabic{subsection}} +\def\thesubsubsection{\thesubsection.\arabic{subsubsection}} +\def\theparagraph{\thesubsubsection.\arabic{paragraph}} +\else +\def\thesection{\Roman{section}} % I +% V1.7, \mbox prevents breaks around - +\def\thesubsection{\mbox{\thesection-\Alph{subsection}}} % I-A +% V1.7 use I-A1 format used by IEEE rather than I-A.1 +\def\thesubsubsection{\thesubsection\arabic{subsubsection}} % I-A1 +\def\theparagraph{\thesubsubsection\alph{paragraph}} % I-A1a +\fi + +% From Heiko Oberdiek. Because of the \mbox in \thesubsection, we need to +% tell hyperref to disable the \mbox command when making PDF bookmarks. +% This done already with hyperref.sty version 6.74o and later, but +% it will not hurt to do it here again for users of older versions. +\@ifundefined{pdfstringdefPreHook}{\let\pdfstringdefPreHook\@empty}{}% +\g@addto@macro\pdfstringdefPreHook{\let\mbox\relax} + + +% Main text forms (how shown in main text headings) +% V1.6, using \thesection in \thesectiondis allows changes +% in the former to automatically appear in the latter +\ifCLASSOPTIONcompsoc + \ifCLASSOPTIONconference% compsoc conference + \def\thesectiondis{\thesection.} + \def\thesubsectiondis{\thesectiondis\arabic{subsection}.} + \def\thesubsubsectiondis{\thesubsectiondis\arabic{subsubsection}.} + \def\theparagraphdis{\thesubsubsectiondis\arabic{paragraph}.} + \else% compsoc not conferencs + \def\thesectiondis{\thesection} + \def\thesubsectiondis{\thesectiondis.\arabic{subsection}} + \def\thesubsubsectiondis{\thesubsectiondis.\arabic{subsubsection}} + \def\theparagraphdis{\thesubsubsectiondis.\arabic{paragraph}} + \fi +\else% not compsoc + \def\thesectiondis{\thesection.} % I. + \def\thesubsectiondis{\Alph{subsection}.} % B. + \def\thesubsubsectiondis{\arabic{subsubsection})} % 3) + \def\theparagraphdis{\alph{paragraph})} % d) +\fi + +% just like LaTeX2e's \@eqnnum +\def\theequationdis{{\normalfont \normalcolor (\theequation)}}% (1) +% IEEEsubequation used only by IEEEtran's IEEEeqnarray +\def\theIEEEsubequationdis{{\normalfont \normalcolor (\theIEEEsubequation)}}% (1a) +% redirect LaTeX2e's equation number display and all that depend on +% it, through IEEEtran's \theequationdis +\def\@eqnnum{\theequationdis} + + + +% V1.7 provide string macros as article.cls does +\def\contentsname{Contents} +\def\listfigurename{List of Figures} +\def\listtablename{List of Tables} +\def\refname{References} +\def\indexname{Index} +\def\figurename{Fig.} +\def\tablename{TABLE} +\@IEEEcompsocconfonly{\def\figurename{Figure}\def\tablename{Table}} +\def\partname{Part} +\def\appendixname{Appendix} +\def\abstractname{Abstract} +% IEEE specific names +\def\IEEEkeywordsname{Keywords} +\def\IEEEproofname{Proof} + + +% LIST OF FIGURES AND TABLES AND TABLE OF CONTENTS +% +\def\@pnumwidth{1.55em} +\def\@tocrmarg{2.55em} +\def\@dotsep{4.5} +\setcounter{tocdepth}{3} + +% adjusted some spacings here so that section numbers will not easily +% collide with the section titles. +% VIII; VIII-A; and VIII-A.1 are usually the worst offenders. +% MDS 1/2001 +\def\tableofcontents{\section*{\contentsname}\@starttoc{toc}} +\def\l@section#1#2{\addpenalty{\@secpenalty}\addvspace{1.0em plus 1pt}% + \@tempdima 2.75em \begingroup \parindent \z@ \rightskip \@pnumwidth% + \parfillskip-\@pnumwidth {\bfseries\leavevmode #1}\hfil\hbox to\@pnumwidth{\hss #2}\par% + \endgroup} +% argument format #1:level, #2:labelindent,#3:labelsep +\def\l@subsection{\@dottedtocline{2}{2.75em}{3.75em}} +\def\l@subsubsection{\@dottedtocline{3}{6.5em}{4.5em}} +% must provide \l@ defs for ALL sublevels EVEN if tocdepth +% is such as they will not appear in the table of contents +% these defs are how TOC knows what level these things are! +\def\l@paragraph{\@dottedtocline{4}{6.5em}{5.5em}} +\def\l@subparagraph{\@dottedtocline{5}{6.5em}{6.5em}} +\def\listoffigures{\section*{\listfigurename}\@starttoc{lof}} +\def\l@figure{\@dottedtocline{1}{0em}{2.75em}} +\def\listoftables{\section*{\listtablename}\@starttoc{lot}} +\let\l@table\l@figure + + +%% Definitions for floats +%% +%% Normal Floats +\floatsep 1\baselineskip plus 0.2\baselineskip minus 0.2\baselineskip +\textfloatsep 1.7\baselineskip plus 0.2\baselineskip minus 0.4\baselineskip +\@fptop 0pt plus 1fil +\@fpsep 0.75\baselineskip plus 2fil +\@fpbot 0pt plus 1fil +\def\topfraction{0.9} +\def\bottomfraction{0.4} +\def\floatpagefraction{0.8} +% V1.7, let top floats approach 90% of page +\def\textfraction{0.1} + +%% Double Column Floats +\dblfloatsep 1\baselineskip plus 0.2\baselineskip minus 0.2\baselineskip + +\dbltextfloatsep 1.7\baselineskip plus 0.2\baselineskip minus 0.4\baselineskip +% Note that it would be nice if the rubber here actually worked in LaTeX2e. +% There is a long standing limitation in LaTeX, first discovered (to the best +% of my knowledge) by Alan Jeffrey in 1992. LaTeX ignores the stretchable +% portion of \dbltextfloatsep, and as a result, double column figures can and +% do result in an non-integer number of lines in the main text columns with +% underfull vbox errors as a consequence. A post to comp.text.tex +% by Donald Arseneau confirms that this had not yet been fixed in 1998. +% IEEEtran V1.6 will fix this problem for you in the titles, but it doesn't +% protect you from other double floats. Happy vspace'ing. + +\@dblfptop 0pt plus 1fil +\@dblfpsep 0.75\baselineskip plus 2fil +\@dblfpbot 0pt plus 1fil +\def\dbltopfraction{0.8} +\def\dblfloatpagefraction{0.8} +\setcounter{dbltopnumber}{4} + +\intextsep 1\baselineskip plus 0.2\baselineskip minus 0.2\baselineskip +\setcounter{topnumber}{2} +\setcounter{bottomnumber}{2} +\setcounter{totalnumber}{4} + + + +% article class provides these, we should too. +\newlength\abovecaptionskip +\newlength\belowcaptionskip +% but only \abovecaptionskip is used above figure captions and *below* table +% captions +\setlength\abovecaptionskip{0.65\baselineskip} +\setlength\belowcaptionskip{0.75\baselineskip} +% V1.6 create hooks in case the caption spacing ever needs to be +% overridden by a user +\def\@IEEEfigurecaptionsepspace{\vskip\abovecaptionskip\relax}% +\def\@IEEEtablecaptionsepspace{\vskip\belowcaptionskip\relax}% + + +% 1.6b revise caption system so that \@makecaption uses two arguments +% as with LaTeX2e. Otherwise, there will be problems when using hyperref. +\def\@IEEEtablestring{table} + +\ifCLASSOPTIONcompsoc +% V1.7 compsoc \@makecaption +\ifCLASSOPTIONconference% compsoc conference +\long\def\@makecaption#1#2{% +% test if is a for a figure or table +\ifx\@captype\@IEEEtablestring% +% if a table, do table caption +\normalsize\begin{center}{\normalfont\sffamily\normalsize {#1.}~ #2}\end{center}% +\@IEEEtablecaptionsepspace +% if not a table, format it as a figure +\else +\@IEEEfigurecaptionsepspace +\setbox\@tempboxa\hbox{\normalfont\sffamily\normalsize {#1.}~ #2}% +\ifdim \wd\@tempboxa >\hsize% +% if caption is longer than a line, let it wrap around +\setbox\@tempboxa\hbox{\normalfont\sffamily\normalsize {#1.}~ }% +\parbox[t]{\hsize}{\normalfont\sffamily\normalsize \noindent\unhbox\@tempboxa#2}% +% if caption is shorter than a line, center +\else% +\hbox to\hsize{\normalfont\sffamily\normalsize\hfil\box\@tempboxa\hfil}% +\fi\fi} +\else% nonconference compsoc +\long\def\@makecaption#1#2{% +% test if is a for a figure or table +\ifx\@captype\@IEEEtablestring% +% if a table, do table caption +\normalsize\begin{center}{\normalfont\sffamily\normalsize #1}\\{\normalfont\sffamily\normalsize #2}\end{center}% +\@IEEEtablecaptionsepspace +% if not a table, format it as a figure +\else +\@IEEEfigurecaptionsepspace +\setbox\@tempboxa\hbox{\normalfont\sffamily\normalsize {#1.}~ #2}% +\ifdim \wd\@tempboxa >\hsize% +% if caption is longer than a line, let it wrap around +\setbox\@tempboxa\hbox{\normalfont\sffamily\normalsize {#1.}~ }% +\parbox[t]{\hsize}{\normalfont\sffamily\normalsize \noindent\unhbox\@tempboxa#2}% +% if caption is shorter than a line, left justify +\else% +\hbox to\hsize{\normalfont\sffamily\normalsize\box\@tempboxa\hfil}% +\fi\fi} +\fi + +\else% traditional noncompsoc \@makecaption +\long\def\@makecaption#1#2{% +% test if is a for a figure or table +\ifx\@captype\@IEEEtablestring% +% if a table, do table caption +\footnotesize{\centering\normalfont\footnotesize#1.\qquad\scshape #2\par}% +\@IEEEtablecaptionsepspace +% if not a table, format it as a figure +\else +\@IEEEfigurecaptionsepspace +% 3/2001 use footnotesize, not small; use two nonbreaking spaces, not one +\setbox\@tempboxa\hbox{\normalfont\footnotesize {#1.}~~ #2}% +\ifdim \wd\@tempboxa >\hsize% +% if caption is longer than a line, let it wrap around +\setbox\@tempboxa\hbox{\normalfont\footnotesize {#1.}~~ }% +\parbox[t]{\hsize}{\normalfont\footnotesize\noindent\unhbox\@tempboxa#2}% +% if caption is shorter than a line, center if conference, left justify otherwise +\else% +\ifCLASSOPTIONconference \hbox to\hsize{\normalfont\footnotesize\box\@tempboxa\hfil}% +\else \hbox to\hsize{\normalfont\footnotesize\box\@tempboxa\hfil}% +\fi\fi\fi} +\fi + + + +% V1.7 disable captions class option, do so in a way that retains operation of \label +% within \caption +\ifCLASSOPTIONcaptionsoff +\long\def\@makecaption#1#2{\vspace*{2em}\footnotesize\begin{center}{\footnotesize #1}\end{center}% +\let\@IEEEtemporiglabeldefsave\label +\let\@IEEEtemplabelargsave\relax +\def\label##1{\gdef\@IEEEtemplabelargsave{##1}}% +\setbox\@tempboxa\hbox{#2}% +\let\label\@IEEEtemporiglabeldefsave +\ifx\@IEEEtemplabelargsave\relax\else\label{\@IEEEtemplabelargsave}\fi} +\fi + + +% V1.7 define end environments with \def not \let so as to work OK with +% preview-latex +\newcounter{figure} +\def\thefigure{\@arabic\c@figure} +\def\fps@figure{tbp} +\def\ftype@figure{1} +\def\ext@figure{lof} +\def\fnum@figure{\figurename~\thefigure} +\def\figure{\@float{figure}} +\def\endfigure{\end@float} +\@namedef{figure*}{\@dblfloat{figure}} +\@namedef{endfigure*}{\end@dblfloat} +\newcounter{table} +\ifCLASSOPTIONcompsoc +\def\thetable{\arabic{table}} +\else +\def\thetable{\@Roman\c@table} +\fi +\def\fps@table{tbp} +\def\ftype@table{2} +\def\ext@table{lot} +\def\fnum@table{\tablename~\thetable} +% V1.6 IEEE uses 8pt text for tables +% to default to footnotesize, we hack into LaTeX2e's \@floatboxreset and pray +\def\table{\def\@floatboxreset{\reset@font\scriptsize\@setminipage}% + \let\@makefntext\@maketablefntext + \@float{table}} +\def\endtable{\end@float} +% v1.6b double column tables need to default to footnotesize as well. +\@namedef{table*}{\def\@floatboxreset{\reset@font\scriptsize\@setminipage}\@dblfloat{table}} +\@namedef{endtable*}{\end@dblfloat} + + + + +%% +%% START OF IEEEeqnarry DEFINITIONS +%% +%% Inspired by the concepts, examples, and previous works of LaTeX +%% coders and developers such as Donald Arseneau, Fred Bartlett, +%% David Carlisle, Tony Liu, Frank Mittelbach, Piet van Oostrum, +%% Roland Winkler and Mark Wooding. +%% I don't make the claim that my work here is even near their calibre. ;) + + +% hook to allow easy changeover to IEEEtran.cls/tools.sty error reporting +\def\@IEEEclspkgerror{\ClassError{IEEEtran}} + +\newif\if@IEEEeqnarraystarform% flag to indicate if the environment was called as the star form +\@IEEEeqnarraystarformfalse + +\newif\if@advanceIEEEeqncolcnt% tracks if the environment should advance the col counter +% allows a way to make an \IEEEeqnarraybox that can be used within an \IEEEeqnarray +% used by IEEEeqnarraymulticol so that it can work properly in both +\@advanceIEEEeqncolcnttrue + +\newcount\@IEEEeqnnumcols % tracks how many IEEEeqnarray cols are defined +\newcount\@IEEEeqncolcnt % tracks how many IEEEeqnarray cols the user actually used + + +% The default math style used by the columns +\def\IEEEeqnarraymathstyle{\displaystyle} +% The default text style used by the columns +% default to using the current font +\def\IEEEeqnarraytextstyle{\relax} + +% like the iedlistdecl but for \IEEEeqnarray +\def\IEEEeqnarraydecl{\relax} +\def\IEEEeqnarrayboxdecl{\relax} + +% \yesnumber is the opposite of \nonumber +% a novel concept with the same def as the equationarray package +% However, we give IEEE versions too since some LaTeX packages such as +% the MDWtools mathenv.sty redefine \nonumber to something else. +\providecommand{\yesnumber}{\global\@eqnswtrue} +\def\IEEEyesnumber{\global\@eqnswtrue} +\def\IEEEnonumber{\global\@eqnswfalse} + + +\def\IEEEyessubnumber{\global\@IEEEissubequationtrue\global\@eqnswtrue% +\if@IEEEeqnarrayISinner% only do something inside an IEEEeqnarray +\if@IEEElastlinewassubequation\addtocounter{equation}{-1}\else\setcounter{IEEEsubequation}{1}\fi% +\def\@currentlabel{\p@IEEEsubequation\theIEEEsubequation}\fi} + +% flag to indicate that an equation is a sub equation +\newif\if@IEEEissubequation% +\@IEEEissubequationfalse + +% allows users to "push away" equations that get too close to the equation numbers +\def\IEEEeqnarraynumspace{\hphantom{\if@IEEEissubequation\theIEEEsubequationdis\else\theequationdis\fi}} + +% provides a way to span multiple columns within IEEEeqnarray environments +% will consider \if@advanceIEEEeqncolcnt before globally advancing the +% column counter - so as to work within \IEEEeqnarraybox +% usage: \IEEEeqnarraymulticol{number cols. to span}{col type}{cell text} +\long\def\IEEEeqnarraymulticol#1#2#3{\multispan{#1}% +% check if column is defined +\relax\expandafter\ifx\csname @IEEEeqnarraycolDEF#2\endcsname\@IEEEeqnarraycolisdefined% +\csname @IEEEeqnarraycolPRE#2\endcsname#3\relax\relax\relax\relax\relax% +\relax\relax\relax\relax\relax\csname @IEEEeqnarraycolPOST#2\endcsname% +\else% if not, error and use default type +\@IEEEclspkgerror{Invalid column type "#2" in \string\IEEEeqnarraymulticol.\MessageBreak +Using a default centering column instead}% +{You must define IEEEeqnarray column types before use.}% +\csname @IEEEeqnarraycolPRE@IEEEdefault\endcsname#3\relax\relax\relax\relax\relax% +\relax\relax\relax\relax\relax\csname @IEEEeqnarraycolPOST@IEEEdefault\endcsname% +\fi% +% advance column counter only if the IEEEeqnarray environment wants it +\if@advanceIEEEeqncolcnt\global\advance\@IEEEeqncolcnt by #1\relax\fi} + +% like \omit, but maintains track of the column counter for \IEEEeqnarray +\def\IEEEeqnarrayomit{\omit\if@advanceIEEEeqncolcnt\global\advance\@IEEEeqncolcnt by 1\relax\fi} + + +% provides a way to define a letter referenced column type +% usage: \IEEEeqnarraydefcol{col. type letter/name}{pre insertion text}{post insertion text} +\def\IEEEeqnarraydefcol#1#2#3{\expandafter\def\csname @IEEEeqnarraycolPRE#1\endcsname{#2}% +\expandafter\def\csname @IEEEeqnarraycolPOST#1\endcsname{#3}% +\expandafter\def\csname @IEEEeqnarraycolDEF#1\endcsname{1}} + + +% provides a way to define a numerically referenced inter-column glue types +% usage: \IEEEeqnarraydefcolsep{col. glue number}{glue definition} +\def\IEEEeqnarraydefcolsep#1#2{\expandafter\def\csname @IEEEeqnarraycolSEP\romannumeral #1\endcsname{#2}% +\expandafter\def\csname @IEEEeqnarraycolSEPDEF\romannumeral #1\endcsname{1}} + + +\def\@IEEEeqnarraycolisdefined{1}% just a macro for 1, used for checking undefined column types + + +% expands and appends the given argument to the \@IEEEtrantmptoksA token list +% used to build up the \halign preamble +\def\@IEEEappendtoksA#1{\edef\@@IEEEappendtoksA{\@IEEEtrantmptoksA={\the\@IEEEtrantmptoksA #1}}% +\@@IEEEappendtoksA} + +% also appends to \@IEEEtrantmptoksA, but does not expand the argument +% uses \toks8 as a scratchpad register +\def\@IEEEappendNOEXPANDtoksA#1{\toks8={#1}% +\edef\@@IEEEappendNOEXPANDtoksA{\@IEEEtrantmptoksA={\the\@IEEEtrantmptoksA\the\toks8}}% +\@@IEEEappendNOEXPANDtoksA} + +% define some common column types for the user +% math +\IEEEeqnarraydefcol{l}{$\IEEEeqnarraymathstyle}{$\hfil} +\IEEEeqnarraydefcol{c}{\hfil$\IEEEeqnarraymathstyle}{$\hfil} +\IEEEeqnarraydefcol{r}{\hfil$\IEEEeqnarraymathstyle}{$} +\IEEEeqnarraydefcol{L}{$\IEEEeqnarraymathstyle{}}{{}$\hfil} +\IEEEeqnarraydefcol{C}{\hfil$\IEEEeqnarraymathstyle{}}{{}$\hfil} +\IEEEeqnarraydefcol{R}{\hfil$\IEEEeqnarraymathstyle{}}{{}$} +% text +\IEEEeqnarraydefcol{s}{\IEEEeqnarraytextstyle}{\hfil} +\IEEEeqnarraydefcol{t}{\hfil\IEEEeqnarraytextstyle}{\hfil} +\IEEEeqnarraydefcol{u}{\hfil\IEEEeqnarraytextstyle}{} + +% vertical rules +\IEEEeqnarraydefcol{v}{}{\vrule width\arrayrulewidth} +\IEEEeqnarraydefcol{vv}{\vrule width\arrayrulewidth\hfil}{\hfil\vrule width\arrayrulewidth} +\IEEEeqnarraydefcol{V}{}{\vrule width\arrayrulewidth\hskip\doublerulesep\vrule width\arrayrulewidth} +\IEEEeqnarraydefcol{VV}{\vrule width\arrayrulewidth\hskip\doublerulesep\vrule width\arrayrulewidth\hfil}% +{\hfil\vrule width\arrayrulewidth\hskip\doublerulesep\vrule width\arrayrulewidth} + +% horizontal rules +\IEEEeqnarraydefcol{h}{}{\leaders\hrule height\arrayrulewidth\hfil} +\IEEEeqnarraydefcol{H}{}{\leaders\vbox{\hrule width\arrayrulewidth\vskip\doublerulesep\hrule width\arrayrulewidth}\hfil} + +% plain +\IEEEeqnarraydefcol{x}{}{} +\IEEEeqnarraydefcol{X}{$}{$} + +% the default column type to use in the event a column type is not defined +\IEEEeqnarraydefcol{@IEEEdefault}{\hfil$\IEEEeqnarraymathstyle}{$\hfil} + + +% a zero tabskip (used for "-" col types) +\def\@IEEEeqnarraycolSEPzero{0pt plus 0pt minus 0pt} +% a centering tabskip (used for "+" col types) +\def\@IEEEeqnarraycolSEPcenter{1000pt plus 0pt minus 1000pt} + +% top level default tabskip glues for the start, end, and inter-column +% may be reset within environments not always at the top level, e.g., \IEEEeqnarraybox +\edef\@IEEEeqnarraycolSEPdefaultstart{\@IEEEeqnarraycolSEPcenter}% default start glue +\edef\@IEEEeqnarraycolSEPdefaultend{\@IEEEeqnarraycolSEPcenter}% default end glue +\edef\@IEEEeqnarraycolSEPdefaultmid{\@IEEEeqnarraycolSEPzero}% default inter-column glue + + + +% creates a vertical rule that extends from the bottom to the top a a cell +% Provided in case other packages redefine \vline some other way. +% usage: \IEEEeqnarrayvrule[rule thickness] +% If no argument is provided, \arrayrulewidth will be used for the rule thickness. +\newcommand\IEEEeqnarrayvrule[1][\arrayrulewidth]{\vrule\@width#1\relax} + +% creates a blank separator row +% usage: \IEEEeqnarrayseprow[separation length][font size commands] +% default is \IEEEeqnarrayseprow[0.25\normalbaselineskip][\relax] +% blank arguments inherit the default values +% uses \skip5 as a scratch register - calls \@IEEEeqnarraystrutsize which uses more scratch registers +\def\IEEEeqnarrayseprow{\relax\@ifnextchar[{\@IEEEeqnarrayseprow}{\@IEEEeqnarrayseprow[0.25\normalbaselineskip]}} +\def\@IEEEeqnarrayseprow[#1]{\relax\@ifnextchar[{\@@IEEEeqnarrayseprow[#1]}{\@@IEEEeqnarrayseprow[#1][\relax]}} +\def\@@IEEEeqnarrayseprow[#1][#2]{\def\@IEEEeqnarrayseprowARGONE{#1}% +\ifx\@IEEEeqnarrayseprowARGONE\@empty% +% get the skip value, based on the font commands +% use skip5 because \IEEEeqnarraystrutsize uses \skip0, \skip2, \skip3 +% assign within a bogus box to confine the font changes +{\setbox0=\hbox{#2\relax\global\skip5=0.25\normalbaselineskip}}% +\else% +{\setbox0=\hbox{#2\relax\global\skip5=#1}}% +\fi% +\@IEEEeqnarrayhoptolastcolumn\IEEEeqnarraystrutsize{\skip5}{0pt}[\relax]\relax} + +% creates a blank separator row, but omits all the column templates +% usage: \IEEEeqnarrayseprowcut[separation length][font size commands] +% default is \IEEEeqnarrayseprowcut[0.25\normalbaselineskip][\relax] +% blank arguments inherit the default values +% uses \skip5 as a scratch register - calls \@IEEEeqnarraystrutsize which uses more scratch registers +\def\IEEEeqnarrayseprowcut{\multispan{\@IEEEeqnnumcols}\relax% span all the cols +% advance column counter only if the IEEEeqnarray environment wants it +\if@advanceIEEEeqncolcnt\global\advance\@IEEEeqncolcnt by \@IEEEeqnnumcols\relax\fi% +\@ifnextchar[{\@IEEEeqnarrayseprowcut}{\@IEEEeqnarrayseprowcut[0.25\normalbaselineskip]}} +\def\@IEEEeqnarrayseprowcut[#1]{\relax\@ifnextchar[{\@@IEEEeqnarrayseprowcut[#1]}{\@@IEEEeqnarrayseprowcut[#1][\relax]}} +\def\@@IEEEeqnarrayseprowcut[#1][#2]{\def\@IEEEeqnarrayseprowARGONE{#1}% +\ifx\@IEEEeqnarrayseprowARGONE\@empty% +% get the skip value, based on the font commands +% use skip5 because \IEEEeqnarraystrutsize uses \skip0, \skip2, \skip3 +% assign within a bogus box to confine the font changes +{\setbox0=\hbox{#2\relax\global\skip5=0.25\normalbaselineskip}}% +\else% +{\setbox0=\hbox{#2\relax\global\skip5=#1}}% +\fi% +\IEEEeqnarraystrutsize{\skip5}{0pt}[\relax]\relax} + + + +% draws a single rule across all the columns optional +% argument determines the rule width, \arrayrulewidth is the default +% updates column counter as needed and turns off struts +% usage: \IEEEeqnarrayrulerow[rule line thickness] +\def\IEEEeqnarrayrulerow{\multispan{\@IEEEeqnnumcols}\relax% span all the cols +% advance column counter only if the IEEEeqnarray environment wants it +\if@advanceIEEEeqncolcnt\global\advance\@IEEEeqncolcnt by \@IEEEeqnnumcols\relax\fi% +\@ifnextchar[{\@IEEEeqnarrayrulerow}{\@IEEEeqnarrayrulerow[\arrayrulewidth]}} +\def\@IEEEeqnarrayrulerow[#1]{\leaders\hrule height#1\hfil\relax% put in our rule +% turn off any struts +\IEEEeqnarraystrutsize{0pt}{0pt}[\relax]\relax} + + +% draws a double rule by using a single rule row, a separator row, and then +% another single rule row +% first optional argument determines the rule thicknesses, \arrayrulewidth is the default +% second optional argument determines the rule spacing, \doublerulesep is the default +% usage: \IEEEeqnarraydblrulerow[rule line thickness][rule spacing] +\def\IEEEeqnarraydblrulerow{\multispan{\@IEEEeqnnumcols}\relax% span all the cols +% advance column counter only if the IEEEeqnarray environment wants it +\if@advanceIEEEeqncolcnt\global\advance\@IEEEeqncolcnt by \@IEEEeqnnumcols\relax\fi% +\@ifnextchar[{\@IEEEeqnarraydblrulerow}{\@IEEEeqnarraydblrulerow[\arrayrulewidth]}} +\def\@IEEEeqnarraydblrulerow[#1]{\relax\@ifnextchar[{\@@IEEEeqnarraydblrulerow[#1]}% +{\@@IEEEeqnarraydblrulerow[#1][\doublerulesep]}} +\def\@@IEEEeqnarraydblrulerow[#1][#2]{\def\@IEEEeqnarraydblrulerowARG{#1}% +% we allow the user to say \IEEEeqnarraydblrulerow[][] +\ifx\@IEEEeqnarraydblrulerowARG\@empty% +\@IEEEeqnarrayrulerow[\arrayrulewidth]% +\else% +\@IEEEeqnarrayrulerow[#1]\relax% +\fi% +\def\@IEEEeqnarraydblrulerowARG{#2}% +\ifx\@IEEEeqnarraydblrulerowARG\@empty% +\\\IEEEeqnarrayseprow[\doublerulesep][\relax]% +\else% +\\\IEEEeqnarrayseprow[#2][\relax]% +\fi% +\\\multispan{\@IEEEeqnnumcols}% +% advance column counter only if the IEEEeqnarray environment wants it +\if@advanceIEEEeqncolcnt\global\advance\@IEEEeqncolcnt by \@IEEEeqnnumcols\relax\fi% +\def\@IEEEeqnarraydblrulerowARG{#1}% +\ifx\@IEEEeqnarraydblrulerowARG\@empty% +\@IEEEeqnarrayrulerow[\arrayrulewidth]% +\else% +\@IEEEeqnarrayrulerow[#1]% +\fi% +} + +% draws a double rule by using a single rule row, a separator (cutting) row, and then +% another single rule row +% first optional argument determines the rule thicknesses, \arrayrulewidth is the default +% second optional argument determines the rule spacing, \doublerulesep is the default +% usage: \IEEEeqnarraydblrulerow[rule line thickness][rule spacing] +\def\IEEEeqnarraydblrulerowcut{\multispan{\@IEEEeqnnumcols}\relax% span all the cols +% advance column counter only if the IEEEeqnarray environment wants it +\if@advanceIEEEeqncolcnt\global\advance\@IEEEeqncolcnt by \@IEEEeqnnumcols\relax\fi% +\@ifnextchar[{\@IEEEeqnarraydblrulerowcut}{\@IEEEeqnarraydblrulerowcut[\arrayrulewidth]}} +\def\@IEEEeqnarraydblrulerowcut[#1]{\relax\@ifnextchar[{\@@IEEEeqnarraydblrulerowcut[#1]}% +{\@@IEEEeqnarraydblrulerowcut[#1][\doublerulesep]}} +\def\@@IEEEeqnarraydblrulerowcut[#1][#2]{\def\@IEEEeqnarraydblrulerowARG{#1}% +% we allow the user to say \IEEEeqnarraydblrulerow[][] +\ifx\@IEEEeqnarraydblrulerowARG\@empty% +\@IEEEeqnarrayrulerow[\arrayrulewidth]% +\else% +\@IEEEeqnarrayrulerow[#1]% +\fi% +\def\@IEEEeqnarraydblrulerowARG{#2}% +\ifx\@IEEEeqnarraydblrulerowARG\@empty% +\\\IEEEeqnarrayseprowcut[\doublerulesep][\relax]% +\else% +\\\IEEEeqnarrayseprowcut[#2][\relax]% +\fi% +\\\multispan{\@IEEEeqnnumcols}% +% advance column counter only if the IEEEeqnarray environment wants it +\if@advanceIEEEeqncolcnt\global\advance\@IEEEeqncolcnt by \@IEEEeqnnumcols\relax\fi% +\def\@IEEEeqnarraydblrulerowARG{#1}% +\ifx\@IEEEeqnarraydblrulerowARG\@empty% +\@IEEEeqnarrayrulerow[\arrayrulewidth]% +\else% +\@IEEEeqnarrayrulerow[#1]% +\fi% +} + + + +% inserts a full row's worth of &'s +% relies on \@IEEEeqnnumcols to provide the correct number of columns +% uses \@IEEEtrantmptoksA, \count0 as scratch registers +\def\@IEEEeqnarrayhoptolastcolumn{\@IEEEtrantmptoksA={}\count0=1\relax% +\loop% add cols if the user did not use them all +\ifnum\count0<\@IEEEeqnnumcols\relax% +\@IEEEappendtoksA{&}% +\advance\count0 by 1\relax% update the col count +\repeat% +\the\@IEEEtrantmptoksA%execute the &'s +} + + + +\newif\if@IEEEeqnarrayISinner % flag to indicate if we are within the lines +\@IEEEeqnarrayISinnerfalse % of an IEEEeqnarray - after the IEEEeqnarraydecl + +\edef\@IEEEeqnarrayTHEstrutheight{0pt} % height and depth of IEEEeqnarray struts +\edef\@IEEEeqnarrayTHEstrutdepth{0pt} + +\edef\@IEEEeqnarrayTHEmasterstrutheight{0pt} % default height and depth of +\edef\@IEEEeqnarrayTHEmasterstrutdepth{0pt} % struts within an IEEEeqnarray + +\edef\@IEEEeqnarrayTHEmasterstrutHSAVE{0pt} % saved master strut height +\edef\@IEEEeqnarrayTHEmasterstrutDSAVE{0pt} % and depth + +\newif\if@IEEEeqnarrayusemasterstrut % flag to indicate that the master strut value +\@IEEEeqnarrayusemasterstruttrue % is to be used + + + +% saves the strut height and depth of the master strut +\def\@IEEEeqnarraymasterstrutsave{\relax% +\expandafter\skip0=\@IEEEeqnarrayTHEmasterstrutheight\relax% +\expandafter\skip2=\@IEEEeqnarrayTHEmasterstrutdepth\relax% +% remove stretchability +\dimen0\skip0\relax% +\dimen2\skip2\relax% +% save values +\edef\@IEEEeqnarrayTHEmasterstrutHSAVE{\the\dimen0}% +\edef\@IEEEeqnarrayTHEmasterstrutDSAVE{\the\dimen2}} + +% restores the strut height and depth of the master strut +\def\@IEEEeqnarraymasterstrutrestore{\relax% +\expandafter\skip0=\@IEEEeqnarrayTHEmasterstrutHSAVE\relax% +\expandafter\skip2=\@IEEEeqnarrayTHEmasterstrutDSAVE\relax% +% remove stretchability +\dimen0\skip0\relax% +\dimen2\skip2\relax% +% restore values +\edef\@IEEEeqnarrayTHEmasterstrutheight{\the\dimen0}% +\edef\@IEEEeqnarrayTHEmasterstrutdepth{\the\dimen2}} + + +% globally restores the strut height and depth to the +% master values and sets the master strut flag to true +\def\@IEEEeqnarraystrutreset{\relax% +\expandafter\skip0=\@IEEEeqnarrayTHEmasterstrutheight\relax% +\expandafter\skip2=\@IEEEeqnarrayTHEmasterstrutdepth\relax% +% remove stretchability +\dimen0\skip0\relax% +\dimen2\skip2\relax% +% restore values +\xdef\@IEEEeqnarrayTHEstrutheight{\the\dimen0}% +\xdef\@IEEEeqnarrayTHEstrutdepth{\the\dimen2}% +\global\@IEEEeqnarrayusemasterstruttrue} + + +% if the master strut is not to be used, make the current +% values of \@IEEEeqnarrayTHEstrutheight, \@IEEEeqnarrayTHEstrutdepth +% and the use master strut flag, global +% this allows user strut commands issued in the last column to be carried +% into the isolation/strut column +\def\@IEEEeqnarrayglobalizestrutstatus{\relax% +\if@IEEEeqnarrayusemasterstrut\else% +\xdef\@IEEEeqnarrayTHEstrutheight{\@IEEEeqnarrayTHEstrutheight}% +\xdef\@IEEEeqnarrayTHEstrutdepth{\@IEEEeqnarrayTHEstrutdepth}% +\global\@IEEEeqnarrayusemasterstrutfalse% +\fi} + + + +% usage: \IEEEeqnarraystrutsize{height}{depth}[font size commands] +% If called outside the lines of an IEEEeqnarray, sets the height +% and depth of both the master and local struts. If called inside +% an IEEEeqnarray line, sets the height and depth of the local strut +% only and sets the flag to indicate the use of the local strut +% values. If the height or depth is left blank, 0.7\normalbaselineskip +% and 0.3\normalbaselineskip will be used, respectively. +% The optional argument can be used to evaluate the lengths under +% a different font size and styles. If none is specified, the current +% font is used. +% uses scratch registers \skip0, \skip2, \skip3, \dimen0, \dimen2 +\def\IEEEeqnarraystrutsize#1#2{\relax\@ifnextchar[{\@IEEEeqnarraystrutsize{#1}{#2}}{\@IEEEeqnarraystrutsize{#1}{#2}[\relax]}} +\def\@IEEEeqnarraystrutsize#1#2[#3]{\def\@IEEEeqnarraystrutsizeARG{#1}% +\ifx\@IEEEeqnarraystrutsizeARG\@empty% +{\setbox0=\hbox{#3\relax\global\skip3=0.7\normalbaselineskip}}% +\skip0=\skip3\relax% +\else% arg one present +{\setbox0=\hbox{#3\relax\global\skip3=#1\relax}}% +\skip0=\skip3\relax% +\fi% if null arg +\def\@IEEEeqnarraystrutsizeARG{#2}% +\ifx\@IEEEeqnarraystrutsizeARG\@empty% +{\setbox0=\hbox{#3\relax\global\skip3=0.3\normalbaselineskip}}% +\skip2=\skip3\relax% +\else% arg two present +{\setbox0=\hbox{#3\relax\global\skip3=#2\relax}}% +\skip2=\skip3\relax% +\fi% if null arg +% remove stretchability, just to be safe +\dimen0\skip0\relax% +\dimen2\skip2\relax% +% dimen0 = height, dimen2 = depth +\if@IEEEeqnarrayISinner% inner does not touch master strut size +\edef\@IEEEeqnarrayTHEstrutheight{\the\dimen0}% +\edef\@IEEEeqnarrayTHEstrutdepth{\the\dimen2}% +\@IEEEeqnarrayusemasterstrutfalse% do not use master +\else% outer, have to set master strut too +\edef\@IEEEeqnarrayTHEmasterstrutheight{\the\dimen0}% +\edef\@IEEEeqnarrayTHEmasterstrutdepth{\the\dimen2}% +\edef\@IEEEeqnarrayTHEstrutheight{\the\dimen0}% +\edef\@IEEEeqnarrayTHEstrutdepth{\the\dimen2}% +\@IEEEeqnarrayusemasterstruttrue% use master strut +\fi} + + +% usage: \IEEEeqnarraystrutsizeadd{added height}{added depth}[font size commands] +% If called outside the lines of an IEEEeqnarray, adds the given height +% and depth to both the master and local struts. +% If called inside an IEEEeqnarray line, adds the given height and depth +% to the local strut only and sets the flag to indicate the use +% of the local strut values. +% In both cases, if a height or depth is left blank, 0pt is used instead. +% The optional argument can be used to evaluate the lengths under +% a different font size and styles. If none is specified, the current +% font is used. +% uses scratch registers \skip0, \skip2, \skip3, \dimen0, \dimen2 +\def\IEEEeqnarraystrutsizeadd#1#2{\relax\@ifnextchar[{\@IEEEeqnarraystrutsizeadd{#1}{#2}}{\@IEEEeqnarraystrutsizeadd{#1}{#2}[\relax]}} +\def\@IEEEeqnarraystrutsizeadd#1#2[#3]{\def\@IEEEeqnarraystrutsizearg{#1}% +\ifx\@IEEEeqnarraystrutsizearg\@empty% +\skip0=0pt\relax% +\else% arg one present +{\setbox0=\hbox{#3\relax\global\skip3=#1}}% +\skip0=\skip3\relax% +\fi% if null arg +\def\@IEEEeqnarraystrutsizearg{#2}% +\ifx\@IEEEeqnarraystrutsizearg\@empty% +\skip2=0pt\relax% +\else% arg two present +{\setbox0=\hbox{#3\relax\global\skip3=#2}}% +\skip2=\skip3\relax% +\fi% if null arg +% remove stretchability, just to be safe +\dimen0\skip0\relax% +\dimen2\skip2\relax% +% dimen0 = height, dimen2 = depth +\if@IEEEeqnarrayISinner% inner does not touch master strut size +% get local strut size +\expandafter\skip0=\@IEEEeqnarrayTHEstrutheight\relax% +\expandafter\skip2=\@IEEEeqnarrayTHEstrutdepth\relax% +% add it to the user supplied values +\advance\dimen0 by \skip0\relax% +\advance\dimen2 by \skip2\relax% +% update the local strut size +\edef\@IEEEeqnarrayTHEstrutheight{\the\dimen0}% +\edef\@IEEEeqnarrayTHEstrutdepth{\the\dimen2}% +\@IEEEeqnarrayusemasterstrutfalse% do not use master +\else% outer, have to set master strut too +% get master strut size +\expandafter\skip0=\@IEEEeqnarrayTHEmasterstrutheight\relax% +\expandafter\skip2=\@IEEEeqnarrayTHEmasterstrutdepth\relax% +% add it to the user supplied values +\advance\dimen0 by \skip0\relax% +\advance\dimen2 by \skip2\relax% +% update the local and master strut sizes +\edef\@IEEEeqnarrayTHEmasterstrutheight{\the\dimen0}% +\edef\@IEEEeqnarrayTHEmasterstrutdepth{\the\dimen2}% +\edef\@IEEEeqnarrayTHEstrutheight{\the\dimen0}% +\edef\@IEEEeqnarrayTHEstrutdepth{\the\dimen2}% +\@IEEEeqnarrayusemasterstruttrue% use master strut +\fi} + + +% allow user a way to see the struts +\newif\ifIEEEvisiblestruts +\IEEEvisiblestrutsfalse + +% inserts an invisible strut using the master or local strut values +% uses scratch registers \skip0, \skip2, \dimen0, \dimen2 +\def\@IEEEeqnarrayinsertstrut{\relax% +\if@IEEEeqnarrayusemasterstrut +% get master strut size +\expandafter\skip0=\@IEEEeqnarrayTHEmasterstrutheight\relax% +\expandafter\skip2=\@IEEEeqnarrayTHEmasterstrutdepth\relax% +\else% +% get local strut size +\expandafter\skip0=\@IEEEeqnarrayTHEstrutheight\relax% +\expandafter\skip2=\@IEEEeqnarrayTHEstrutdepth\relax% +\fi% +% remove stretchability, probably not needed +\dimen0\skip0\relax% +\dimen2\skip2\relax% +% dimen0 = height, dimen2 = depth +% allow user to see struts if desired +\ifIEEEvisiblestruts% +\vrule width0.2pt height\dimen0 depth\dimen2\relax% +\else% +\vrule width0pt height\dimen0 depth\dimen2\relax\fi} + + +% creates an invisible strut, useable even outside \IEEEeqnarray +% if \IEEEvisiblestrutstrue, the strut will be visible and 0.2pt wide. +% usage: \IEEEstrut[height][depth][font size commands] +% default is \IEEEstrut[0.7\normalbaselineskip][0.3\normalbaselineskip][\relax] +% blank arguments inherit the default values +% uses \dimen0, \dimen2, \skip0, \skip2 +\def\IEEEstrut{\relax\@ifnextchar[{\@IEEEstrut}{\@IEEEstrut[0.7\normalbaselineskip]}} +\def\@IEEEstrut[#1]{\relax\@ifnextchar[{\@@IEEEstrut[#1]}{\@@IEEEstrut[#1][0.3\normalbaselineskip]}} +\def\@@IEEEstrut[#1][#2]{\relax\@ifnextchar[{\@@@IEEEstrut[#1][#2]}{\@@@IEEEstrut[#1][#2][\relax]}} +\def\@@@IEEEstrut[#1][#2][#3]{\mbox{#3\relax% +\def\@IEEEstrutARG{#1}% +\ifx\@IEEEstrutARG\@empty% +\skip0=0.7\normalbaselineskip\relax% +\else% +\skip0=#1\relax% +\fi% +\def\@IEEEstrutARG{#2}% +\ifx\@IEEEstrutARG\@empty% +\skip2=0.3\normalbaselineskip\relax% +\else% +\skip2=#2\relax% +\fi% +% remove stretchability, probably not needed +\dimen0\skip0\relax% +\dimen2\skip2\relax% +\ifIEEEvisiblestruts% +\vrule width0.2pt height\dimen0 depth\dimen2\relax% +\else% +\vrule width0.0pt height\dimen0 depth\dimen2\relax\fi}} + + +% enables strut mode by setting a default strut size and then zeroing the +% \baselineskip, \lineskip, \lineskiplimit and \jot +\def\IEEEeqnarraystrutmode{\IEEEeqnarraystrutsize{0.7\normalbaselineskip}{0.3\normalbaselineskip}[\relax]% +\baselineskip=0pt\lineskip=0pt\lineskiplimit=0pt\jot=0pt} + + + +\def\IEEEeqnarray{\@IEEEeqnarraystarformfalse\@IEEEeqnarray} +\def\endIEEEeqnarray{\end@IEEEeqnarray} + +\@namedef{IEEEeqnarray*}{\@IEEEeqnarraystarformtrue\@IEEEeqnarray} +\@namedef{endIEEEeqnarray*}{\end@IEEEeqnarray} + + +% \IEEEeqnarray is an enhanced \eqnarray. +% The star form defaults to not putting equation numbers at the end of each row. +% usage: \IEEEeqnarray[decl]{cols} +\def\@IEEEeqnarray{\relax\@ifnextchar[{\@@IEEEeqnarray}{\@@IEEEeqnarray[\relax]}} +\def\@@IEEEeqnarray[#1]#2{% + % default to showing the equation number or not based on whether or not + % the star form was involked + \if@IEEEeqnarraystarform\global\@eqnswfalse + \else% not the star form + \global\@eqnswtrue + \fi% if star form + \@IEEEissubequationfalse% default to no subequations + \@IEEElastlinewassubequationfalse% assume last line is not a sub equation + \@IEEEeqnarrayISinnerfalse% not yet within the lines of the halign + \@IEEEeqnarraystrutsize{0pt}{0pt}[\relax]% turn off struts by default + \@IEEEeqnarrayusemasterstruttrue% use master strut till user asks otherwise + \IEEEvisiblestrutsfalse% diagnostic mode defaults to off + % no extra space unless the user specifically requests it + \lineskip=0pt\relax + \lineskiplimit=0pt\relax + \baselineskip=\normalbaselineskip\relax% + \jot=\IEEEnormaljot\relax% + \mathsurround\z@\relax% no extra spacing around math + \@advanceIEEEeqncolcnttrue% advance the col counter for each col the user uses, + % used in \IEEEeqnarraymulticol and in the preamble build + \stepcounter{equation}% advance equation counter before first line + \setcounter{IEEEsubequation}{0}% no subequation yet + \def\@currentlabel{\p@equation\theequation}% redefine the ref label + \IEEEeqnarraydecl\relax% allow a way for the user to make global overrides + #1\relax% allow user to override defaults + \let\\\@IEEEeqnarraycr% replace newline with one that can put in eqn. numbers + \global\@IEEEeqncolcnt\z@% col. count = 0 for first line + \@IEEEbuildpreamble #2\end\relax% build the preamble and put it into \@IEEEtrantmptoksA + % put in the column for the equation number + \ifnum\@IEEEeqnnumcols>0\relax\@IEEEappendtoksA{&}\fi% col separator for those after the first + \toks0={##}% + % advance the \@IEEEeqncolcnt for the isolation col, this helps with error checking + \@IEEEappendtoksA{\global\advance\@IEEEeqncolcnt by 1\relax}% + % add the isolation column + \@IEEEappendtoksA{\tabskip\z@skip\bgroup\the\toks0\egroup}% + % advance the \@IEEEeqncolcnt for the equation number col, this helps with error checking + \@IEEEappendtoksA{&\global\advance\@IEEEeqncolcnt by 1\relax}% + % add the equation number col to the preamble + \@IEEEappendtoksA{\tabskip\z@skip\hb@xt@\z@\bgroup\hss\the\toks0\egroup}% + % note \@IEEEeqnnumcols does not count the equation col or isolation col + % set the starting tabskip glue as determined by the preamble build + \tabskip=\@IEEEBPstartglue\relax + % begin the display alignment + \@IEEEeqnarrayISinnertrue% commands are now within the lines + $$\everycr{}\halign to\displaywidth\bgroup + % "exspand" the preamble + \span\the\@IEEEtrantmptoksA\cr} + +% enter isolation/strut column (or the next column if the user did not use +% every column), record the strut status, complete the columns, do the strut if needed, +% restore counters to correct values and exit +\def\end@IEEEeqnarray{\@IEEEeqnarrayglobalizestrutstatus&\@@IEEEeqnarraycr\egroup% +\if@IEEElastlinewassubequation\global\advance\c@IEEEsubequation\m@ne\fi% +\global\advance\c@equation\m@ne% +$$\@ignoretrue} + +% need a way to remember if last line is a subequation +\newif\if@IEEElastlinewassubequation% +\@IEEElastlinewassubequationfalse + +% IEEEeqnarray uses a modifed \\ instead of the plain \cr to +% end rows. This allows for things like \\*[vskip amount] +% This "cr" macros are modified versions those for LaTeX2e's eqnarray +% the {\ifnum0=`} braces must be kept away from the last column to avoid +% altering spacing of its math, so we use & to advance to the next column +% as there is an isolation/strut column after the user's columns +\def\@IEEEeqnarraycr{\@IEEEeqnarrayglobalizestrutstatus&% save strut status and advance to next column + {\ifnum0=`}\fi + \@ifstar{% + \global\@eqpen\@M\@IEEEeqnarrayYCR + }{% + \global\@eqpen\interdisplaylinepenalty \@IEEEeqnarrayYCR + }% +} + +\def\@IEEEeqnarrayYCR{\@testopt\@IEEEeqnarrayXCR\z@skip} + +\def\@IEEEeqnarrayXCR[#1]{% + \ifnum0=`{\fi}% + \@@IEEEeqnarraycr + \noalign{\penalty\@eqpen\vskip\jot\vskip #1\relax}}% + +\def\@@IEEEeqnarraycr{\@IEEEtrantmptoksA={}% clear token register + \advance\@IEEEeqncolcnt by -1\relax% adjust col count because of the isolation column + \ifnum\@IEEEeqncolcnt>\@IEEEeqnnumcols\relax + \@IEEEclspkgerror{Too many columns within the IEEEeqnarray\MessageBreak + environment}% + {Use fewer \string &'s or put more columns in the IEEEeqnarry column\MessageBreak + specifications.}\relax% + \else + \loop% add cols if the user did not use them all + \ifnum\@IEEEeqncolcnt<\@IEEEeqnnumcols\relax + \@IEEEappendtoksA{&}% + \advance\@IEEEeqncolcnt by 1\relax% update the col count + \repeat + % this number of &'s will take us the the isolation column + \fi + % execute the &'s + \the\@IEEEtrantmptoksA% + % handle the strut/isolation column + \@IEEEeqnarrayinsertstrut% do the strut if needed + \@IEEEeqnarraystrutreset% reset the strut system for next line or IEEEeqnarray + &% and enter the equation number column + % is this line needs an equation number, display it and advance the + % (sub)equation counters, record what type this line was + \if@eqnsw% + \if@IEEEissubequation\theIEEEsubequationdis\addtocounter{equation}{1}\stepcounter{IEEEsubequation}% + \global\@IEEElastlinewassubequationtrue% + \else% display a standard equation number, initialize the IEEEsubequation counter + \theequationdis\stepcounter{equation}\setcounter{IEEEsubequation}{0}% + \global\@IEEElastlinewassubequationfalse\fi% + \fi% + % reset the eqnsw flag to indicate default preference of the display of equation numbers + \if@IEEEeqnarraystarform\global\@eqnswfalse\else\global\@eqnswtrue\fi + \global\@IEEEissubequationfalse% reset the subequation flag + % reset the number of columns the user actually used + \global\@IEEEeqncolcnt\z@\relax + % the real end of the line + \cr} + + + + + +% \IEEEeqnarraybox is like \IEEEeqnarray except the box form puts everything +% inside a vtop, vbox, or vcenter box depending on the letter in the second +% optional argument (t,b,c). Vbox is the default. Unlike \IEEEeqnarray, +% equation numbers are not displayed and \IEEEeqnarraybox can be nested. +% \IEEEeqnarrayboxm is for math mode (like \array) and does not put the vbox +% within an hbox. +% \IEEEeqnarrayboxt is for text mode (like \tabular) and puts the vbox within +% a \hbox{$ $} construct. +% \IEEEeqnarraybox will auto detect whether to use \IEEEeqnarrayboxm or +% \IEEEeqnarrayboxt depending on the math mode. +% The third optional argument specifies the width this box is to be set to - +% natural width is the default. +% The * forms do not add \jot line spacing +% usage: \IEEEeqnarraybox[decl][pos][width]{cols} +\def\IEEEeqnarrayboxm{\@IEEEeqnarraystarformfalse\@IEEEeqnarrayboxHBOXSWfalse\@IEEEeqnarraybox} +\def\endIEEEeqnarrayboxm{\end@IEEEeqnarraybox} +\@namedef{IEEEeqnarrayboxm*}{\@IEEEeqnarraystarformtrue\@IEEEeqnarrayboxHBOXSWfalse\@IEEEeqnarraybox} +\@namedef{endIEEEeqnarrayboxm*}{\end@IEEEeqnarraybox} + +\def\IEEEeqnarrayboxt{\@IEEEeqnarraystarformfalse\@IEEEeqnarrayboxHBOXSWtrue\@IEEEeqnarraybox} +\def\endIEEEeqnarrayboxt{\end@IEEEeqnarraybox} +\@namedef{IEEEeqnarrayboxt*}{\@IEEEeqnarraystarformtrue\@IEEEeqnarrayboxHBOXSWtrue\@IEEEeqnarraybox} +\@namedef{endIEEEeqnarrayboxt*}{\end@IEEEeqnarraybox} + +\def\IEEEeqnarraybox{\@IEEEeqnarraystarformfalse\ifmmode\@IEEEeqnarrayboxHBOXSWfalse\else\@IEEEeqnarrayboxHBOXSWtrue\fi% +\@IEEEeqnarraybox} +\def\endIEEEeqnarraybox{\end@IEEEeqnarraybox} + +\@namedef{IEEEeqnarraybox*}{\@IEEEeqnarraystarformtrue\ifmmode\@IEEEeqnarrayboxHBOXSWfalse\else\@IEEEeqnarrayboxHBOXSWtrue\fi% +\@IEEEeqnarraybox} +\@namedef{endIEEEeqnarraybox*}{\end@IEEEeqnarraybox} + +% flag to indicate if the \IEEEeqnarraybox needs to put things into an hbox{$ $} +% for \vcenter in non-math mode +\newif\if@IEEEeqnarrayboxHBOXSW% +\@IEEEeqnarrayboxHBOXSWfalse + +\def\@IEEEeqnarraybox{\relax\@ifnextchar[{\@@IEEEeqnarraybox}{\@@IEEEeqnarraybox[\relax]}} +\def\@@IEEEeqnarraybox[#1]{\relax\@ifnextchar[{\@@@IEEEeqnarraybox[#1]}{\@@@IEEEeqnarraybox[#1][b]}} +\def\@@@IEEEeqnarraybox[#1][#2]{\relax\@ifnextchar[{\@@@@IEEEeqnarraybox[#1][#2]}{\@@@@IEEEeqnarraybox[#1][#2][\relax]}} + +% #1 = decl; #2 = t,b,c; #3 = width, #4 = col specs +\def\@@@@IEEEeqnarraybox[#1][#2][#3]#4{\@IEEEeqnarrayISinnerfalse % not yet within the lines of the halign + \@IEEEeqnarraymasterstrutsave% save current master strut values + \@IEEEeqnarraystrutsize{0pt}{0pt}[\relax]% turn off struts by default + \@IEEEeqnarrayusemasterstruttrue% use master strut till user asks otherwise + \IEEEvisiblestrutsfalse% diagnostic mode defaults to off + % no extra space unless the user specifically requests it + \lineskip=0pt\relax% + \lineskiplimit=0pt\relax% + \baselineskip=\normalbaselineskip\relax% + \jot=\IEEEnormaljot\relax% + \mathsurround\z@\relax% no extra spacing around math + % the default end glues are zero for an \IEEEeqnarraybox + \edef\@IEEEeqnarraycolSEPdefaultstart{\@IEEEeqnarraycolSEPzero}% default start glue + \edef\@IEEEeqnarraycolSEPdefaultend{\@IEEEeqnarraycolSEPzero}% default end glue + \edef\@IEEEeqnarraycolSEPdefaultmid{\@IEEEeqnarraycolSEPzero}% default inter-column glue + \@advanceIEEEeqncolcntfalse% do not advance the col counter for each col the user uses, + % used in \IEEEeqnarraymulticol and in the preamble build + \IEEEeqnarrayboxdecl\relax% allow a way for the user to make global overrides + #1\relax% allow user to override defaults + \let\\\@IEEEeqnarrayboxcr% replace newline with one that allows optional spacing + \@IEEEbuildpreamble #4\end\relax% build the preamble and put it into \@IEEEtrantmptoksA + % add an isolation column to the preamble to stop \\'s {} from getting into the last col + \ifnum\@IEEEeqnnumcols>0\relax\@IEEEappendtoksA{&}\fi% col separator for those after the first + \toks0={##}% + % add the isolation column to the preamble + \@IEEEappendtoksA{\tabskip\z@skip\bgroup\the\toks0\egroup}% + % set the starting tabskip glue as determined by the preamble build + \tabskip=\@IEEEBPstartglue\relax + % begin the alignment + \everycr{}% + % use only the very first token to determine the positioning + % this stops some problems when the user uses more than one letter, + % but is probably not worth the effort + % \noindent is used as a delimiter + \def\@IEEEgrabfirstoken##1##2\noindent{\let\@IEEEgrabbedfirstoken=##1}% + \@IEEEgrabfirstoken#2\relax\relax\noindent + % \@IEEEgrabbedfirstoken has the first token, the rest are discarded + % if we need to put things into and hbox and go into math mode, do so now + \if@IEEEeqnarrayboxHBOXSW \leavevmode \hbox \bgroup $\fi% + % use the appropriate vbox type + \if\@IEEEgrabbedfirstoken t\relax\vtop\else\if\@IEEEgrabbedfirstoken c\relax% + \vcenter\else\vbox\fi\fi\bgroup% + \@IEEEeqnarrayISinnertrue% commands are now within the lines + \ifx#3\relax\halign\else\halign to #3\relax\fi% + \bgroup + % "exspand" the preamble + \span\the\@IEEEtrantmptoksA\cr} + +% carry strut status and enter the isolation/strut column, +% exit from math mode if needed, and exit +\def\end@IEEEeqnarraybox{\@IEEEeqnarrayglobalizestrutstatus% carry strut status +&% enter isolation/strut column +\@IEEEeqnarrayinsertstrut% do strut if needed +\@IEEEeqnarraymasterstrutrestore% restore the previous master strut values +% reset the strut system for next IEEEeqnarray +% (sets local strut values back to previous master strut values) +\@IEEEeqnarraystrutreset% +% ensure last line, exit from halign, close vbox +\crcr\egroup\egroup% +% exit from math mode and close hbox if needed +\if@IEEEeqnarrayboxHBOXSW $\egroup\fi} + + + +% IEEEeqnarraybox uses a modifed \\ instead of the plain \cr to +% end rows. This allows for things like \\[vskip amount] +% This "cr" macros are modified versions those for LaTeX2e's eqnarray +% For IEEEeqnarraybox, \\* is the same as \\ +% the {\ifnum0=`} braces must be kept away from the last column to avoid +% altering spacing of its math, so we use & to advance to the isolation/strut column +% carry strut status into isolation/strut column +\def\@IEEEeqnarrayboxcr{\@IEEEeqnarrayglobalizestrutstatus% carry strut status +&% enter isolation/strut column +\@IEEEeqnarrayinsertstrut% do strut if needed +% reset the strut system for next line or IEEEeqnarray +\@IEEEeqnarraystrutreset% +{\ifnum0=`}\fi% +\@ifstar{\@IEEEeqnarrayboxYCR}{\@IEEEeqnarrayboxYCR}} + +% test and setup the optional argument to \\[] +\def\@IEEEeqnarrayboxYCR{\@testopt\@IEEEeqnarrayboxXCR\z@skip} + +% IEEEeqnarraybox does not automatically increase line spacing by \jot +\def\@IEEEeqnarrayboxXCR[#1]{\ifnum0=`{\fi}% +\cr\noalign{\if@IEEEeqnarraystarform\else\vskip\jot\fi\vskip#1\relax}} + + + +% starts the halign preamble build +\def\@IEEEbuildpreamble{\@IEEEtrantmptoksA={}% clear token register +\let\@IEEEBPcurtype=u%current column type is not yet known +\let\@IEEEBPprevtype=s%the previous column type was the start +\let\@IEEEBPnexttype=u%next column type is not yet known +% ensure these are valid +\def\@IEEEBPcurglue={0pt plus 0pt minus 0pt}% +\def\@IEEEBPcurcolname{@IEEEdefault}% name of current column definition +% currently acquired numerically referenced glue +% use a name that is easier to remember +\let\@IEEEBPcurnum=\@IEEEtrantmpcountA% +\@IEEEBPcurnum=0% +% tracks number of columns in the preamble +\@IEEEeqnnumcols=0% +% record the default end glues +\edef\@IEEEBPstartglue{\@IEEEeqnarraycolSEPdefaultstart}% +\edef\@IEEEBPendglue{\@IEEEeqnarraycolSEPdefaultend}% +% now parse the user's column specifications +\@@IEEEbuildpreamble} + + +% parses and builds the halign preamble +\def\@@IEEEbuildpreamble#1#2{\let\@@nextIEEEbuildpreamble=\@@IEEEbuildpreamble% +% use only the very first token to check the end +% \noindent is used as a delimiter as \end can be present here +\def\@IEEEgrabfirstoken##1##2\noindent{\let\@IEEEgrabbedfirstoken=##1}% +\@IEEEgrabfirstoken#1\relax\relax\noindent +\ifx\@IEEEgrabbedfirstoken\end\let\@@nextIEEEbuildpreamble=\@@IEEEfinishpreamble\else% +% identify current and next token type +\@IEEEgetcoltype{#1}{\@IEEEBPcurtype}{1}% current, error on invalid +\@IEEEgetcoltype{#2}{\@IEEEBPnexttype}{0}% next, no error on invalid next +% if curtype is a glue, get the glue def +\if\@IEEEBPcurtype g\@IEEEgetcurglue{#1}{\@IEEEBPcurglue}\fi% +% if curtype is a column, get the column def and set the current column name +\if\@IEEEBPcurtype c\@IEEEgetcurcol{#1}\fi% +% if curtype is a numeral, acquire the user defined glue +\if\@IEEEBPcurtype n\@IEEEprocessNcol{#1}\fi% +% process the acquired glue +\if\@IEEEBPcurtype g\@IEEEprocessGcol\fi% +% process the acquired col +\if\@IEEEBPcurtype c\@IEEEprocessCcol\fi% +% ready prevtype for next col spec. +\let\@IEEEBPprevtype=\@IEEEBPcurtype% +% be sure and put back the future token(s) as a group +\fi\@@nextIEEEbuildpreamble{#2}} + + +% executed just after preamble build is completed +% warn about zero cols, and if prevtype type = u, put in end tabskip glue +\def\@@IEEEfinishpreamble#1{\ifnum\@IEEEeqnnumcols<1\relax +\@IEEEclspkgerror{No column specifiers declared for IEEEeqnarray}% +{At least one column type must be declared for each IEEEeqnarray.}% +\fi%num cols less than 1 +%if last type undefined, set default end tabskip glue +\if\@IEEEBPprevtype u\@IEEEappendtoksA{\tabskip=\@IEEEBPendglue}\fi} + + +% Identify and return the column specifier's type code +\def\@IEEEgetcoltype#1#2#3{% +% use only the very first token to determine the type +% \noindent is used as a delimiter as \end can be present here +\def\@IEEEgrabfirstoken##1##2\noindent{\let\@IEEEgrabbedfirstoken=##1}% +\@IEEEgrabfirstoken#1\relax\relax\noindent +% \@IEEEgrabfirstoken has the first token, the rest are discarded +% n = number +% g = glue (any other char in catagory 12) +% c = letter +% e = \end +% u = undefined +% third argument: 0 = no error message, 1 = error on invalid char +\let#2=u\relax% assume invalid until know otherwise +\ifx\@IEEEgrabbedfirstoken\end\let#2=e\else +\ifcat\@IEEEgrabbedfirstoken\relax\else% screen out control sequences +\if0\@IEEEgrabbedfirstoken\let#2=n\else +\if1\@IEEEgrabbedfirstoken\let#2=n\else +\if2\@IEEEgrabbedfirstoken\let#2=n\else +\if3\@IEEEgrabbedfirstoken\let#2=n\else +\if4\@IEEEgrabbedfirstoken\let#2=n\else +\if5\@IEEEgrabbedfirstoken\let#2=n\else +\if6\@IEEEgrabbedfirstoken\let#2=n\else +\if7\@IEEEgrabbedfirstoken\let#2=n\else +\if8\@IEEEgrabbedfirstoken\let#2=n\else +\if9\@IEEEgrabbedfirstoken\let#2=n\else +\ifcat,\@IEEEgrabbedfirstoken\let#2=g\relax +\else\ifcat a\@IEEEgrabbedfirstoken\let#2=c\relax\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi +\if#2u\relax +\if0\noexpand#3\relax\else\@IEEEclspkgerror{Invalid character in column specifications}% +{Only letters, numerals and certain other symbols are allowed \MessageBreak +as IEEEeqnarray column specifiers.}\fi\fi} + + +% identify the current letter referenced column +% if invalid, use a default column +\def\@IEEEgetcurcol#1{\expandafter\ifx\csname @IEEEeqnarraycolDEF#1\endcsname\@IEEEeqnarraycolisdefined% +\def\@IEEEBPcurcolname{#1}\else% invalid column name +\@IEEEclspkgerror{Invalid column type "#1" in column specifications.\MessageBreak +Using a default centering column instead}% +{You must define IEEEeqnarray column types before use.}% +\def\@IEEEBPcurcolname{@IEEEdefault}\fi} + + +% identify and return the predefined (punctuation) glue value +\def\@IEEEgetcurglue#1#2{% +% ! = \! (neg small) -0.16667em (-3/18 em) +% , = \, (small) 0.16667em ( 3/18 em) +% : = \: (med) 0.22222em ( 4/18 em) +% ; = \; (large) 0.27778em ( 5/18 em) +% ' = \quad 1em +% " = \qquad 2em +% . = 0.5\arraycolsep +% / = \arraycolsep +% ? = 2\arraycolsep +% * = 1fil +% + = \@IEEEeqnarraycolSEPcenter +% - = \@IEEEeqnarraycolSEPzero +% Note that all em values are referenced to the math font (textfont2) fontdimen6 +% value for 1em. +% +% use only the very first token to determine the type +% this prevents errant tokens from getting in the main text +% \noindent is used as a delimiter here +\def\@IEEEgrabfirstoken##1##2\noindent{\let\@IEEEgrabbedfirstoken=##1}% +\@IEEEgrabfirstoken#1\relax\relax\noindent +% get the math font 1em value +% LaTeX2e's NFSS2 does not preload the fonts, but \IEEEeqnarray needs +% to gain access to the math (\textfont2) font's spacing parameters. +% So we create a bogus box here that uses the math font to ensure +% that \textfont2 is loaded and ready. If this is not done, +% the \textfont2 stuff here may not work. +% Thanks to Bernd Raichle for his 1997 post on this topic. +{\setbox0=\hbox{$\displaystyle\relax$}}% +% fontdimen6 has the width of 1em (a quad). +\@IEEEtrantmpdimenA=\fontdimen6\textfont2\relax% +% identify the glue value based on the first token +% we discard anything after the first +\if!\@IEEEgrabbedfirstoken\@IEEEtrantmpdimenA=-0.16667\@IEEEtrantmpdimenA\edef#2{\the\@IEEEtrantmpdimenA}\else +\if,\@IEEEgrabbedfirstoken\@IEEEtrantmpdimenA=0.16667\@IEEEtrantmpdimenA\edef#2{\the\@IEEEtrantmpdimenA}\else +\if:\@IEEEgrabbedfirstoken\@IEEEtrantmpdimenA=0.22222\@IEEEtrantmpdimenA\edef#2{\the\@IEEEtrantmpdimenA}\else +\if;\@IEEEgrabbedfirstoken\@IEEEtrantmpdimenA=0.27778\@IEEEtrantmpdimenA\edef#2{\the\@IEEEtrantmpdimenA}\else +\if'\@IEEEgrabbedfirstoken\@IEEEtrantmpdimenA=1\@IEEEtrantmpdimenA\edef#2{\the\@IEEEtrantmpdimenA}\else +\if"\@IEEEgrabbedfirstoken\@IEEEtrantmpdimenA=2\@IEEEtrantmpdimenA\edef#2{\the\@IEEEtrantmpdimenA}\else +\if.\@IEEEgrabbedfirstoken\@IEEEtrantmpdimenA=0.5\arraycolsep\edef#2{\the\@IEEEtrantmpdimenA}\else +\if/\@IEEEgrabbedfirstoken\edef#2{\the\arraycolsep}\else +\if?\@IEEEgrabbedfirstoken\@IEEEtrantmpdimenA=2\arraycolsep\edef#2{\the\@IEEEtrantmpdimenA}\else +\if *\@IEEEgrabbedfirstoken\edef#2{0pt plus 1fil minus 0pt}\else +\if+\@IEEEgrabbedfirstoken\edef#2{\@IEEEeqnarraycolSEPcenter}\else +\if-\@IEEEgrabbedfirstoken\edef#2{\@IEEEeqnarraycolSEPzero}\else +\edef#2{\@IEEEeqnarraycolSEPzero}% +\@IEEEclspkgerror{Invalid predefined inter-column glue type "#1" in\MessageBreak +column specifications. Using a default value of\MessageBreak +0pt instead}% +{Only !,:;'"./?*+ and - are valid predefined glue types in the\MessageBreak +IEEEeqnarray column specifications.}\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi} + + + +% process a numerical digit from the column specification +% and look up the corresponding user defined glue value +% can transform current type from n to g or a as the user defined glue is acquired +\def\@IEEEprocessNcol#1{\if\@IEEEBPprevtype g% +\@IEEEclspkgerror{Back-to-back inter-column glue specifiers in column\MessageBreak +specifications. Ignoring consecutive glue specifiers\MessageBreak +after the first}% +{You cannot have two or more glue types next to each other\MessageBreak +in the IEEEeqnarray column specifications.}% +\let\@IEEEBPcurtype=a% abort this glue, future digits will be discarded +\@IEEEBPcurnum=0\relax% +\else% if we previously aborted a glue +\if\@IEEEBPprevtype a\@IEEEBPcurnum=0\let\@IEEEBPcurtype=a%maintain digit abortion +\else%acquire this number +% save the previous type before the numerical digits started +\if\@IEEEBPprevtype n\else\let\@IEEEBPprevsavedtype=\@IEEEBPprevtype\fi% +\multiply\@IEEEBPcurnum by 10\relax% +\advance\@IEEEBPcurnum by #1\relax% add in number, \relax is needed to stop TeX's number scan +\if\@IEEEBPnexttype n\else%close acquisition +\expandafter\ifx\csname @IEEEeqnarraycolSEPDEF\expandafter\romannumeral\number\@IEEEBPcurnum\endcsname\@IEEEeqnarraycolisdefined% +\edef\@IEEEBPcurglue{\csname @IEEEeqnarraycolSEP\expandafter\romannumeral\number\@IEEEBPcurnum\endcsname}% +\else%user glue not defined +\@IEEEclspkgerror{Invalid user defined inter-column glue type "\number\@IEEEBPcurnum" in\MessageBreak +column specifications. Using a default value of\MessageBreak +0pt instead}% +{You must define all IEEEeqnarray numerical inter-column glue types via\MessageBreak +\string\IEEEeqnarraydefcolsep \space before they are used in column specifications.}% +\edef\@IEEEBPcurglue{\@IEEEeqnarraycolSEPzero}% +\fi% glue defined or not +\let\@IEEEBPcurtype=g% change the type to reflect the acquired glue +\let\@IEEEBPprevtype=\@IEEEBPprevsavedtype% restore the prev type before this number glue +\@IEEEBPcurnum=0\relax%ready for next acquisition +\fi%close acquisition, get glue +\fi%discard or acquire number +\fi%prevtype glue or not +} + + +% process an acquired glue +% add any acquired column/glue pair to the preamble +\def\@IEEEprocessGcol{\if\@IEEEBPprevtype a\let\@IEEEBPcurtype=a%maintain previous glue abortions +\else +% if this is the start glue, save it, but do nothing else +% as this is not used in the preamble, but before +\if\@IEEEBPprevtype s\edef\@IEEEBPstartglue{\@IEEEBPcurglue}% +\else%not the start glue +\if\@IEEEBPprevtype g%ignore if back to back glues +\@IEEEclspkgerror{Back-to-back inter-column glue specifiers in column\MessageBreak +specifications. Ignoring consecutive glue specifiers\MessageBreak +after the first}% +{You cannot have two or more glue types next to each other\MessageBreak +in the IEEEeqnarray column specifications.}% +\let\@IEEEBPcurtype=a% abort this glue +\else% not a back to back glue +\if\@IEEEBPprevtype c\relax% if the previoustype was a col, add column/glue pair to preamble +\ifnum\@IEEEeqnnumcols>0\relax\@IEEEappendtoksA{&}\fi +\toks0={##}% +% make preamble advance col counter if this environment needs this +\if@advanceIEEEeqncolcnt\@IEEEappendtoksA{\global\advance\@IEEEeqncolcnt by 1\relax}\fi +% insert the column defintion into the preamble, being careful not to expand +% the column definition +\@IEEEappendtoksA{\tabskip=\@IEEEBPcurglue}% +\@IEEEappendNOEXPANDtoksA{\begingroup\csname @IEEEeqnarraycolPRE}% +\@IEEEappendtoksA{\@IEEEBPcurcolname}% +\@IEEEappendNOEXPANDtoksA{\endcsname}% +\@IEEEappendtoksA{\the\toks0}% +\@IEEEappendNOEXPANDtoksA{\relax\relax\relax\relax\relax% +\relax\relax\relax\relax\relax\csname @IEEEeqnarraycolPOST}% +\@IEEEappendtoksA{\@IEEEBPcurcolname}% +\@IEEEappendNOEXPANDtoksA{\endcsname\relax\relax\relax\relax\relax% +\relax\relax\relax\relax\relax\endgroup}% +\advance\@IEEEeqnnumcols by 1\relax%one more column in the preamble +\else% error: non-start glue with no pending column +\@IEEEclspkgerror{Inter-column glue specifier without a prior column\MessageBreak +type in the column specifications. Ignoring this glue\MessageBreak +specifier}% +{Except for the first and last positions, glue can be placed only\MessageBreak +between column types.}% +\let\@IEEEBPcurtype=a% abort this glue +\fi% previous was a column +\fi% back-to-back glues +\fi% is start column glue +\fi% prev type not a +} + + +% process an acquired letter referenced column and, if necessary, add it to the preamble +\def\@IEEEprocessCcol{\if\@IEEEBPnexttype g\else +\if\@IEEEBPnexttype n\else +% we have a column followed by something other than a glue (or numeral glue) +% so we must add this column to the preamble now +\ifnum\@IEEEeqnnumcols>0\relax\@IEEEappendtoksA{&}\fi%col separator for those after the first +\if\@IEEEBPnexttype e\@IEEEappendtoksA{\tabskip=\@IEEEBPendglue\relax}\else%put in end glue +\@IEEEappendtoksA{\tabskip=\@IEEEeqnarraycolSEPdefaultmid\relax}\fi% or default mid glue +\toks0={##}% +% make preamble advance col counter if this environment needs this +\if@advanceIEEEeqncolcnt\@IEEEappendtoksA{\global\advance\@IEEEeqncolcnt by 1\relax}\fi +% insert the column definition into the preamble, being careful not to expand +% the column definition +\@IEEEappendNOEXPANDtoksA{\begingroup\csname @IEEEeqnarraycolPRE}% +\@IEEEappendtoksA{\@IEEEBPcurcolname}% +\@IEEEappendNOEXPANDtoksA{\endcsname}% +\@IEEEappendtoksA{\the\toks0}% +\@IEEEappendNOEXPANDtoksA{\relax\relax\relax\relax\relax% +\relax\relax\relax\relax\relax\csname @IEEEeqnarraycolPOST}% +\@IEEEappendtoksA{\@IEEEBPcurcolname}% +\@IEEEappendNOEXPANDtoksA{\endcsname\relax\relax\relax\relax\relax% +\relax\relax\relax\relax\relax\endgroup}% +\advance\@IEEEeqnnumcols by 1\relax%one more column in the preamble +\fi%next type not numeral +\fi%next type not glue +} + + +%% +%% END OF IEEEeqnarry DEFINITIONS +%% + + + + +% set up the running headings, this complex because of all the different +% modes IEEEtran supports +\if@twoside + \ifCLASSOPTIONtechnote + \def\ps@headings{% + \def\@oddhead{\hbox{}\scriptsize\leftmark \hfil \thepage} + \def\@evenhead{\scriptsize\thepage \hfil \leftmark\hbox{}} + \ifCLASSOPTIONdraftcls + \ifCLASSOPTIONdraftclsnofoot + \def\@oddfoot{}\def\@evenfoot{}% + \else + \def\@oddfoot{\scriptsize\@date\hfil DRAFT} + \def\@evenfoot{\scriptsize DRAFT\hfil\@date} + \fi + \else + \def\@oddfoot{}\def\@evenfoot{} + \fi} + \else % not a technote + \def\ps@headings{% + \ifCLASSOPTIONconference + \def\@oddhead{} + \def\@evenhead{} + \else + \def\@oddhead{\hbox{}\scriptsize\rightmark \hfil \thepage} + \def\@evenhead{\scriptsize\thepage \hfil \leftmark\hbox{}} + \fi + \ifCLASSOPTIONdraftcls + \def\@oddhead{\hbox{}\scriptsize\rightmark \hfil \thepage} + \def\@evenhead{\scriptsize\thepage \hfil \leftmark\hbox{}} + \ifCLASSOPTIONdraftclsnofoot + \def\@oddfoot{}\def\@evenfoot{}% + \else + \def\@oddfoot{\scriptsize\@date\hfil DRAFT} + \def\@evenfoot{\scriptsize DRAFT\hfil\@date} + \fi + \else + \def\@oddfoot{}\def\@evenfoot{}% + \fi} + \fi +\else % single side +\def\ps@headings{% + \ifCLASSOPTIONconference + \def\@oddhead{} + \def\@evenhead{} + \else + \def\@oddhead{\hbox{}\scriptsize\leftmark \hfil \thepage} + \def\@evenhead{} + \fi + \ifCLASSOPTIONdraftcls + \def\@oddhead{\hbox{}\scriptsize\leftmark \hfil \thepage} + \def\@evenhead{} + \ifCLASSOPTIONdraftclsnofoot + \def\@oddfoot{} + \else + \def\@oddfoot{\scriptsize \@date \hfil DRAFT} + \fi + \else + \def\@oddfoot{} + \fi + \def\@evenfoot{}} +\fi + + +% title page style +\def\ps@IEEEtitlepagestyle{\def\@oddfoot{}\def\@evenfoot{}% +\ifCLASSOPTIONconference + \def\@oddhead{}% + \def\@evenhead{}% +\else + \def\@oddhead{\hbox{}\scriptsize\leftmark \hfil \thepage}% + \def\@evenhead{\scriptsize\thepage \hfil \leftmark\hbox{}}% +\fi +\ifCLASSOPTIONdraftcls + \def\@oddhead{\hbox{}\scriptsize\leftmark \hfil \thepage}% + \def\@evenhead{\scriptsize\thepage \hfil \leftmark\hbox{}}% + \ifCLASSOPTIONdraftclsnofoot\else + \def\@oddfoot{\scriptsize \@date\hfil DRAFT}% + \def\@evenfoot{\scriptsize DRAFT\hfil \@date}% + \fi +\else + % all non-draft mode footers + \if@IEEEusingpubid + % for title pages that are using a pubid + % do not repeat pubid if using peer review option + \ifCLASSOPTIONpeerreview + \else + \footskip 0pt% + \ifCLASSOPTIONcompsoc + \def\@oddfoot{\hss\normalfont\scriptsize\raisebox{-1.5\@IEEEnormalsizeunitybaselineskip}[0ex][0ex]{\@IEEEpubid}\hss}% + \def\@evenfoot{\hss\normalfont\scriptsize\raisebox{-1.5\@IEEEnormalsizeunitybaselineskip}[0ex][0ex]{\@IEEEpubid}\hss}% + \else + \def\@oddfoot{\hss\normalfont\footnotesize\raisebox{1.5ex}[1.5ex]{\@IEEEpubid}\hss}% + \def\@evenfoot{\hss\normalfont\footnotesize\raisebox{1.5ex}[1.5ex]{\@IEEEpubid}\hss}% + \fi + \fi + \fi +\fi} + + +% peer review cover page style +\def\ps@IEEEpeerreviewcoverpagestyle{% +\def\@oddhead{}\def\@evenhead{}% +\def\@oddfoot{}\def\@evenfoot{}% +\ifCLASSOPTIONdraftcls + \ifCLASSOPTIONdraftclsnofoot\else + \def\@oddfoot{\scriptsize \@date\hfil DRAFT}% + \def\@evenfoot{\scriptsize DRAFT\hfil \@date}% + \fi +\else + % non-draft mode footers + \if@IEEEusingpubid + \footskip 0pt% + \ifCLASSOPTIONcompsoc + \def\@oddfoot{\hss\normalfont\scriptsize\raisebox{-1.5\@IEEEnormalsizeunitybaselineskip}[0ex][0ex]{\@IEEEpubid}\hss}% + \def\@evenfoot{\hss\normalfont\scriptsize\raisebox{-1.5\@IEEEnormalsizeunitybaselineskip}[0ex][0ex]{\@IEEEpubid}\hss}% + \else + \def\@oddfoot{\hss\normalfont\footnotesize\raisebox{1.5ex}[1.5ex]{\@IEEEpubid}\hss}% + \def\@evenfoot{\hss\normalfont\footnotesize\raisebox{1.5ex}[1.5ex]{\@IEEEpubid}\hss}% + \fi + \fi +\fi} + + +% start with empty headings +\def\rightmark{}\def\leftmark{} + + +%% Defines the command for putting the header. \footernote{TEXT} is the same +%% as \markboth{TEXT}{TEXT}. +%% Note that all the text is forced into uppercase, if you have some text +%% that needs to be in lower case, for instance et. al., then either manually +%% set \leftmark and \rightmark or use \MakeLowercase{et. al.} within the +%% arguments to \markboth. +\def\markboth#1#2{\def\leftmark{\@IEEEcompsoconly{\sffamily}\MakeUppercase{#1}}% +\def\rightmark{\@IEEEcompsoconly{\sffamily}\MakeUppercase{#2}}} +\def\footernote#1{\markboth{#1}{#1}} + +\def\today{\ifcase\month\or + January\or February\or March\or April\or May\or June\or + July\or August\or September\or October\or November\or December\fi + \space\number\day, \number\year} + + + + +%% CITATION AND BIBLIOGRAPHY COMMANDS +%% +%% V1.6 no longer supports the older, nonstandard \shortcite and \citename setup stuff +% +% +% Modify Latex2e \@citex to separate citations with "], [" +\def\@citex[#1]#2{% + \let\@citea\@empty + \@cite{\@for\@citeb:=#2\do + {\@citea\def\@citea{], [}% + \edef\@citeb{\expandafter\@firstofone\@citeb\@empty}% + \if@filesw\immediate\write\@auxout{\string\citation{\@citeb}}\fi + \@ifundefined{b@\@citeb}{\mbox{\reset@font\bfseries ?}% + \G@refundefinedtrue + \@latex@warning + {Citation `\@citeb' on page \thepage \space undefined}}% + {\hbox{\csname b@\@citeb\endcsname}}}}{#1}} + +% V1.6 we create hooks for the optional use of Donald Arseneau's +% cite.sty package. cite.sty is "smart" and will notice that the +% following format controls are already defined and will not +% redefine them. The result will be the proper sorting of the +% citation numbers and auto detection of 3 or more entry "ranges" - +% all in IEEE style: [1], [2], [5]--[7], [12] +% This also allows for an optional note, i.e., \cite[mynote]{..}. +% If the \cite with note has more than one reference, the note will +% be applied to the last of the listed references. It is generally +% desired that if a note is given, only one reference is listed in +% that \cite. +% Thanks to Mr. Arseneau for providing the required format arguments +% to produce the IEEE style. +\def\citepunct{], [} +\def\citedash{]--[} + +% V1.7 default to using same font for urls made by url.sty +\AtBeginDocument{\csname url@samestyle\endcsname} + +% V1.6 class files should always provide these +\def\newblock{\hskip .11em\@plus.33em\@minus.07em} +\let\@openbib@code\@empty + + +% Provide support for the control entries of IEEEtran.bst V1.00 and later. +% V1.7 optional argument allows for a different aux file to be specified in +% order to handle multiple bibliographies. For example, with multibib.sty: +% \newcites{sec}{Secondary Literature} +% \bstctlcite[@auxoutsec]{BSTcontrolhak} +\def\bstctlcite{\@ifnextchar[{\@bstctlcite}{\@bstctlcite[@auxout]}} +\def\@bstctlcite[#1]#2{\@bsphack + \@for\@citeb:=#2\do{% + \edef\@citeb{\expandafter\@firstofone\@citeb}% + \if@filesw\immediate\write\csname #1\endcsname{\string\citation{\@citeb}}\fi}% + \@esphack} + +% V1.6 provide a way for a user to execute a command just before +% a given reference number - used to insert a \newpage to balance +% the columns on the last page +\edef\@IEEEtriggerrefnum{0} % the default of zero means that + % the command is not executed +\def\@IEEEtriggercmd{\newpage} + +% allow the user to alter the triggered command +\long\def\IEEEtriggercmd#1{\long\def\@IEEEtriggercmd{#1}} + +% allow user a way to specify the reference number just before the +% command is executed +\def\IEEEtriggeratref#1{\@IEEEtrantmpcountA=#1% +\edef\@IEEEtriggerrefnum{\the\@IEEEtrantmpcountA}}% + +% trigger command at the given reference +\def\@IEEEbibitemprefix{\@IEEEtrantmpcountA=\@IEEEtriggerrefnum\relax% +\advance\@IEEEtrantmpcountA by -1\relax% +\ifnum\c@enumiv=\@IEEEtrantmpcountA\relax\@IEEEtriggercmd\relax\fi} + + +\def\@biblabel#1{[#1]} + +% compsoc journals left align the reference numbers +\@IEEEcompsocnotconfonly{\def\@biblabel#1{[#1]\hfill}} + +% controls bib item spacing +\def\IEEEbibitemsep{2.5pt plus .5pt} + +\@IEEEcompsocconfonly{\def\IEEEbibitemsep{1\baselineskip plus 0.25\baselineskip minus 0.25\baselineskip}} + + +\def\thebibliography#1{\section*{\refname}% + \addcontentsline{toc}{section}{\refname}% + % V1.6 add some rubber space here and provide a command trigger + \footnotesize\@IEEEcompsocconfonly{\small}\vskip 0.3\baselineskip plus 0.1\baselineskip minus 0.1\baselineskip% + \list{\@biblabel{\@arabic\c@enumiv}}% + {\settowidth\labelwidth{\@biblabel{#1}}% + \leftmargin\labelwidth + \labelsep 1em + \advance\leftmargin\labelsep\relax + \itemsep \IEEEbibitemsep\relax + \usecounter{enumiv}% + \let\p@enumiv\@empty + \renewcommand\theenumiv{\@arabic\c@enumiv}}% + \let\@IEEElatexbibitem\bibitem% + \def\bibitem{\@IEEEbibitemprefix\@IEEElatexbibitem}% +\def\newblock{\hskip .11em plus .33em minus .07em}% +% originally: +% \sloppy\clubpenalty4000\widowpenalty4000% +% by adding the \interlinepenalty here, we make it more +% difficult, but not impossible, for LaTeX to break within a reference. +% IEEE almost never breaks a reference (but they do it more often with +% technotes). You may get an underfull vbox warning around the bibliography, +% but the final result will be much more like what IEEE will publish. +% MDS 11/2000 +\ifCLASSOPTIONtechnote\sloppy\clubpenalty4000\widowpenalty4000\interlinepenalty100% +\else\sloppy\clubpenalty4000\widowpenalty4000\interlinepenalty500\fi% + \sfcode`\.=1000\relax} +\let\endthebibliography=\endlist + + + + +% TITLE PAGE COMMANDS +% +% +% \IEEEmembership is used to produce the sublargesize italic font used to indicate author +% IEEE membership. compsoc uses a large size sans slant font +\def\IEEEmembership#1{{\@IEEEnotcompsoconly{\sublargesize}\normalfont\@IEEEcompsoconly{\sffamily}\textit{#1}}} + + +% \IEEEauthorrefmark{} produces a footnote type symbol to indicate author affiliation. +% When given an argument of 1 to 9, \IEEEauthorrefmark{} follows the standard LaTeX footnote +% symbol sequence convention. However, for arguments 10 and above, \IEEEauthorrefmark{} +% reverts to using lower case roman numerals, so it cannot overflow. Do note that you +% cannot use \footnotemark[] in place of \IEEEauthorrefmark{} within \author as the footnote +% symbols will have been turned off to prevent \thanks from creating footnote marks. +% \IEEEauthorrefmark{} produces a symbol that appears to LaTeX as having zero vertical +% height - this allows for a more compact line packing, but the user must ensure that +% the interline spacing is large enough to prevent \IEEEauthorrefmark{} from colliding +% with the text above. +% V1.7 make this a robust command +\DeclareRobustCommand*{\IEEEauthorrefmark}[1]{\raisebox{0pt}[0pt][0pt]{\textsuperscript{\footnotesize\ensuremath{\ifcase#1\or *\or \dagger\or \ddagger\or% + \mathsection\or \mathparagraph\or \|\or **\or \dagger\dagger% + \or \ddagger\ddagger \else\textsuperscript{\expandafter\romannumeral#1}\fi}}}} + + +% FONT CONTROLS AND SPACINGS FOR CONFERENCE MODE AUTHOR NAME AND AFFILIATION BLOCKS +% +% The default font styles for the author name and affiliation blocks (confmode) +\def\@IEEEauthorblockNstyle{\normalfont\@IEEEcompsocnotconfonly{\sffamily}\sublargesize\@IEEEcompsocconfonly{\large}} +\def\@IEEEauthorblockAstyle{\normalfont\@IEEEcompsocnotconfonly{\sffamily}\@IEEEcompsocconfonly{\itshape}\normalsize\@IEEEcompsocconfonly{\large}} +% The default if the user does not use an author block +\def\@IEEEauthordefaulttextstyle{\normalfont\@IEEEcompsocnotconfonly{\sffamily}\sublargesize} + +% spacing from title (or special paper notice) to author name blocks (confmode) +% can be negative +\def\@IEEEauthorblockconfadjspace{-0.25em} +% compsoc conferences need more space here +\@IEEEcompsocconfonly{\def\@IEEEauthorblockconfadjspace{0.75\@IEEEnormalsizeunitybaselineskip}} +\ifCLASSOPTIONconference\def\@IEEEauthorblockconfadjspace{20pt}\fi + +% spacing between name and affiliation blocks (confmode) +% This can be negative. +% IEEE doesn't want any added spacing here, but I will leave these +% controls in place in case they ever change their mind. +% Personally, I like 0.75ex. +%\def\@IEEEauthorblockNtopspace{0.75ex} +%\def\@IEEEauthorblockAtopspace{0.75ex} +\def\@IEEEauthorblockNtopspace{0.0ex} +\def\@IEEEauthorblockAtopspace{0.0ex} +% baseline spacing within name and affiliation blocks (confmode) +% must be positive, spacings below certain values will make +% the position of line of text sensitive to the contents of the +% line above it i.e., whether or not the prior line has descenders, +% subscripts, etc. For this reason it is a good idea to keep +% these above 2.6ex +\def\@IEEEauthorblockNinterlinespace{2.6ex} +\def\@IEEEauthorblockAinterlinespace{2.75ex} + +% This tracks the required strut size. +% See the \@IEEEauthorhalign command for the actual default value used. +\def\@IEEEauthorblockXinterlinespace{2.7ex} + +% variables to retain font size and style across groups +% values given here have no effect as they will be overwritten later +\gdef\@IEEESAVESTATEfontsize{10} +\gdef\@IEEESAVESTATEfontbaselineskip{12} +\gdef\@IEEESAVESTATEfontencoding{OT1} +\gdef\@IEEESAVESTATEfontfamily{ptm} +\gdef\@IEEESAVESTATEfontseries{m} +\gdef\@IEEESAVESTATEfontshape{n} + +% saves the current font attributes +\def\@IEEEcurfontSAVE{\global\let\@IEEESAVESTATEfontsize\f@size% +\global\let\@IEEESAVESTATEfontbaselineskip\f@baselineskip% +\global\let\@IEEESAVESTATEfontencoding\f@encoding% +\global\let\@IEEESAVESTATEfontfamily\f@family% +\global\let\@IEEESAVESTATEfontseries\f@series% +\global\let\@IEEESAVESTATEfontshape\f@shape} + +% restores the saved font attributes +\def\@IEEEcurfontRESTORE{\fontsize{\@IEEESAVESTATEfontsize}{\@IEEESAVESTATEfontbaselineskip}% +\fontencoding{\@IEEESAVESTATEfontencoding}% +\fontfamily{\@IEEESAVESTATEfontfamily}% +\fontseries{\@IEEESAVESTATEfontseries}% +\fontshape{\@IEEESAVESTATEfontshape}% +\selectfont} + + +% variable to indicate if the current block is the first block in the column +\newif\if@IEEEprevauthorblockincol \@IEEEprevauthorblockincolfalse + + +% the command places a strut with height and depth = \@IEEEauthorblockXinterlinespace +% we use this technique to have complete manual control over the spacing of the lines +% within the halign environment. +% We set the below baseline portion at 30%, the above +% baseline portion at 70% of the total length. +% Responds to changes in the document's \baselinestretch +\def\@IEEEauthorstrutrule{\@IEEEtrantmpdimenA\@IEEEauthorblockXinterlinespace% +\@IEEEtrantmpdimenA=\baselinestretch\@IEEEtrantmpdimenA% +\rule[-0.3\@IEEEtrantmpdimenA]{0pt}{\@IEEEtrantmpdimenA}} + + +% blocks to hold the authors' names and affilations. +% Makes formatting easy for conferences +% +% use real definitions in conference mode +% name block +\def\IEEEauthorblockN#1{\relax\@IEEEauthorblockNstyle% set the default text style +\gdef\@IEEEauthorblockXinterlinespace{0pt}% disable strut for spacer row +% the \expandafter hides the \cr in conditional tex, see the array.sty docs +% for details, probably not needed here as the \cr is in a macro +% do a spacer row if needed +\if@IEEEprevauthorblockincol\expandafter\@IEEEauthorblockNtopspaceline\fi +\global\@IEEEprevauthorblockincoltrue% we now have a block in this column +%restore the correct strut value +\gdef\@IEEEauthorblockXinterlinespace{\@IEEEauthorblockNinterlinespace}% +% input the author names +#1% +% end the row if the user did not already +\crcr} +% spacer row for names +\def\@IEEEauthorblockNtopspaceline{\cr\noalign{\vskip\@IEEEauthorblockNtopspace}} +% +% affiliation block +\def\IEEEauthorblockA#1{\relax\@IEEEauthorblockAstyle% set the default text style +\gdef\@IEEEauthorblockXinterlinespace{0pt}%disable strut for spacer row +% the \expandafter hides the \cr in conditional tex, see the array.sty docs +% for details, probably not needed here as the \cr is in a macro +% do a spacer row if needed +\if@IEEEprevauthorblockincol\expandafter\@IEEEauthorblockAtopspaceline\fi +\global\@IEEEprevauthorblockincoltrue% we now have a block in this column +%restore the correct strut value +\gdef\@IEEEauthorblockXinterlinespace{\@IEEEauthorblockAinterlinespace}% +% input the author affiliations +#1% +% end the row if the user did not already +\crcr} +% spacer row for affiliations +\def\@IEEEauthorblockAtopspaceline{\cr\noalign{\vskip\@IEEEauthorblockAtopspace}} + + +% allow papers to compile even if author blocks are used in modes other +% than conference or peerreviewca. For such cases, we provide dummy blocks. +\ifCLASSOPTIONconference +\else + \ifCLASSOPTIONpeerreviewca\else + % not conference or peerreviewca mode + \def\IEEEauthorblockN#1{#1}% + \def\IEEEauthorblockA#1{#1}% + \fi +\fi + + + +% we provide our own halign so as not to have to depend on tabular +\def\@IEEEauthorhalign{\@IEEEauthordefaulttextstyle% default text style + \lineskip=0pt\relax% disable line spacing + \lineskiplimit=0pt\relax% + \baselineskip=0pt\relax% + \@IEEEcurfontSAVE% save the current font + \mathsurround\z@\relax% no extra spacing around math + \let\\\@IEEEauthorhaligncr% replace newline with halign friendly one + \tabskip=0pt\relax% no column spacing + \everycr{}% ensure no problems here + \@IEEEprevauthorblockincolfalse% no author blocks yet + \def\@IEEEauthorblockXinterlinespace{2.7ex}% default interline space + \vtop\bgroup%vtop box + \halign\bgroup&\relax\hfil\@IEEEcurfontRESTORE\relax ##\relax + \hfil\@IEEEcurfontSAVE\@IEEEauthorstrutrule\cr} + +% ensure last line, exit from halign, close vbox +\def\end@IEEEauthorhalign{\crcr\egroup\egroup} + +% handle bogus star form +\def\@IEEEauthorhaligncr{{\ifnum0=`}\fi\@ifstar{\@@IEEEauthorhaligncr}{\@@IEEEauthorhaligncr}} + +% test and setup the optional argument to \\[] +\def\@@IEEEauthorhaligncr{\@testopt\@@@IEEEauthorhaligncr\z@skip} + +% end the line and do the optional spacer +\def\@@@IEEEauthorhaligncr[#1]{\ifnum0=`{\fi}\cr\noalign{\vskip#1\relax}} + + + +% flag to prevent multiple \and warning messages +\newif\if@IEEEWARNand +\@IEEEWARNandtrue + +% if in conference or peerreviewca modes, we support the use of \and as \author is a +% tabular environment, otherwise we warn the user that \and is invalid +% outside of conference or peerreviewca modes. +\def\and{\relax} % provide a bogus \and that we will then override + +\renewcommand{\and}[1][\relax]{\if@IEEEWARNand\typeout{** WARNING: \noexpand\and is valid only + when in conference or peerreviewca}\typeout{modes (line \the\inputlineno).}\fi\global\@IEEEWARNandfalse} + +\ifCLASSOPTIONconference% +\renewcommand{\and}[1][\hfill]{\end{@IEEEauthorhalign}#1\begin{@IEEEauthorhalign}}% +\fi +\ifCLASSOPTIONpeerreviewca +\renewcommand{\and}[1][\hfill]{\end{@IEEEauthorhalign}#1\begin{@IEEEauthorhalign}}% +\fi + + +% page clearing command +% based on LaTeX2e's \cleardoublepage, but allows different page styles +% for the inserted blank pages +\def\@IEEEcleardoublepage#1{\clearpage\if@twoside\ifodd\c@page\else +\hbox{}\thispagestyle{#1}\newpage\if@twocolumn\hbox{}\thispagestyle{#1}\newpage\fi\fi\fi} + + +% user command to invoke the title page +\def\maketitle{\par% + \begingroup% + \normalfont% + \def\thefootnote{}% the \thanks{} mark type is empty + \def\footnotemark{}% and kill space from \thanks within author + \let\@makefnmark\relax% V1.7, must *really* kill footnotemark to remove all \textsuperscript spacing as well. + \footnotesize% equal spacing between thanks lines + \footnotesep 0.7\baselineskip%see global setting of \footnotesep for more info + % V1.7 disable \thanks note indention for compsoc + \@IEEEcompsoconly{\long\def\@makefntext##1{\parindent 1em\noindent\hbox{\@makefnmark}##1}}% + \normalsize% + \ifCLASSOPTIONpeerreview + \newpage\global\@topnum\z@ \@maketitle\@IEEEstatictitlevskip\@IEEEaftertitletext% + \thispagestyle{IEEEpeerreviewcoverpagestyle}\@thanks% + \else + \if@twocolumn% + \ifCLASSOPTIONtechnote% + \newpage\global\@topnum\z@ \@maketitle\@IEEEstatictitlevskip\@IEEEaftertitletext% + \else + \twocolumn[\@maketitle\@IEEEstatictitlevskip\@IEEEaftertitletext]% + \fi + \else + \newpage\global\@topnum\z@ \@maketitle\@IEEEstatictitlevskip\@IEEEaftertitletext% + \fi + \thispagestyle{IEEEtitlepagestyle}\@thanks% + \fi + % pullup page for pubid if used. + \if@IEEEusingpubid + \enlargethispage{-\@IEEEpubidpullup}% + \fi + \endgroup + \setcounter{footnote}{0}\let\maketitle\relax\let\@maketitle\relax + \gdef\@thanks{}% + % v1.6b do not clear these as we will need the title again for peer review papers + % \gdef\@author{}\gdef\@title{}% + \let\thanks\relax} + + + +% V1.7 parbox to format \@IEEEcompsoctitleabstractindextext +\long\def\@IEEEcompsoctitleabstractindextextbox#1{\parbox{0.915\textwidth}{#1}} + +% formats the Title, authors names, affiliations and special paper notice +% THIS IS A CONTROLLED SPACING COMMAND! Do not allow blank lines or unintentional +% spaces to enter the definition - use % at the end of each line +\def\@maketitle{\newpage +\begingroup\centering +\ifCLASSOPTIONtechnote% technotes + {\bfseries\large\@IEEEcompsoconly{\sffamily}\@title\par}\vskip 1.3em{\lineskip .5em\@IEEEcompsoconly{\sffamily}\@author + \@IEEEspecialpapernotice\par{\@IEEEcompsoconly{\vskip 1.5em\relax + \@IEEEcompsoctitleabstractindextextbox{\@IEEEcompsoctitleabstractindextext}\par + \hfill\@IEEEcompsocdiamondline\hfill\hbox{}\par}}}\relax +\else% not a technote + \vskip0.2em{\Huge\@IEEEcompsoconly{\sffamily}\@IEEEcompsocconfonly{\normalfont\normalsize\vskip 2\@IEEEnormalsizeunitybaselineskip + \bfseries\Large}\@title\par}\vskip1.0em\par% + % V1.6 handle \author differently if in conference mode + \ifCLASSOPTIONconference% + {\@IEEEspecialpapernotice\mbox{}\vskip\@IEEEauthorblockconfadjspace% + \mbox{}\hfill\begin{@IEEEauthorhalign}\@author\end{@IEEEauthorhalign}\hfill\mbox{}\par}\relax + \else% peerreviewca, peerreview or journal + \ifCLASSOPTIONpeerreviewca + % peerreviewca handles author names just like conference mode + {\@IEEEcompsoconly{\sffamily}\@IEEEspecialpapernotice\mbox{}\vskip\@IEEEauthorblockconfadjspace% + \mbox{}\hfill\begin{@IEEEauthorhalign}\@author\end{@IEEEauthorhalign}\hfill\mbox{}\par + {\@IEEEcompsoconly{\vskip 1.5em\relax + \@IEEEcompsoctitleabstractindextextbox{\@IEEEcompsoctitleabstractindextext}\par\hfill + \@IEEEcompsocdiamondline\hfill\hbox{}\par}}}\relax + \else% journal or peerreview + {\lineskip.5em\@IEEEcompsoconly{\sffamily}\sublargesize\@author\@IEEEspecialpapernotice\par + {\@IEEEcompsoconly{\vskip 1.5em\relax + \@IEEEcompsoctitleabstractindextextbox{\@IEEEcompsoctitleabstractindextext}\par\hfill + \@IEEEcompsocdiamondline\hfill\hbox{}\par}}}\relax + \fi + \fi +\fi\par\endgroup} + + + +% V1.7 Computer Society "diamond line" which follows index terms for nonconference papers +\def\@IEEEcompsocdiamondline{\vrule depth 0pt height 0.5pt width 4cm\hspace{7.5pt}% +\raisebox{-3.5pt}{\fontfamily{pzd}\fontencoding{U}\fontseries{m}\fontshape{n}\fontsize{11}{12}\selectfont\char70}% +\hspace{7.5pt}\vrule depth 0pt height 0.5pt width 4cm\relax} + +% V1.7 standard LateX2e \thanks, but with \itshape under compsoc. Also make it a \long\def +% We also need to trigger the one-shot footnote rule +\def\@IEEEtriggeroneshotfootnoterule{\global\@IEEEenableoneshotfootnoteruletrue} + + +\long\def\thanks#1{\footnotemark + \protected@xdef\@thanks{\@thanks + \protect\footnotetext[\the\c@footnote]{\@IEEEcompsoconly{\itshape + \protect\@IEEEtriggeroneshotfootnoterule\relax}\ignorespaces#1}}} +\let\@thanks\@empty + +% V1.7 allow \author to contain \par's. This is needed to allow \thanks to contain \par. +\long\def\author#1{\gdef\@author{#1}} + + +% in addition to setting up IEEEitemize, we need to remove a baselineskip space above and +% below it because \list's \pars introduce blank lines because of the footnote struts. +\def\@IEEEsetupcompsocitemizelist{\def\labelitemi{$\bullet$}% +\setlength{\IEEElabelindent}{0pt}\setlength{\parskip}{0pt}% +\setlength{\partopsep}{0pt}\setlength{\topsep}{0.5\baselineskip}\vspace{-1\baselineskip}\relax} + + +% flag for fake non-compsoc \IEEEcompsocthanksitem - prevents line break on very first item +\newif\if@IEEEbreakcompsocthanksitem \@IEEEbreakcompsocthanksitemfalse + +\ifCLASSOPTIONcompsoc +% V1.7 compsoc bullet item \thanks +% also, we need to redefine this to destroy the argument in \@IEEEdynamictitlevspace +\long\def\IEEEcompsocitemizethanks#1{\relax\@IEEEbreakcompsocthanksitemfalse\footnotemark + \protected@xdef\@thanks{\@thanks + \protect\footnotetext[\the\c@footnote]{\itshape\protect\@IEEEtriggeroneshotfootnoterule + {\let\IEEEiedlistdecl\relax\protect\begin{IEEEitemize}[\protect\@IEEEsetupcompsocitemizelist]\ignorespaces#1\relax + \protect\end{IEEEitemize}}\protect\vspace{-1\baselineskip}}}} +\DeclareRobustCommand*{\IEEEcompsocthanksitem}{\item} +\else +% non-compsoc, allow for dual compilation via rerouting to normal \thanks +\long\def\IEEEcompsocitemizethanks#1{\thanks{#1}} +% redirect to "pseudo-par" \hfil\break\indent after swallowing [] from \IEEEcompsocthanksitem[] +\DeclareRobustCommand{\IEEEcompsocthanksitem}{\@ifnextchar [{\@IEEEthanksswallowoptionalarg}% +{\@IEEEthanksswallowoptionalarg[\relax]}} +% be sure and break only after first item, be sure and ignore spaces after optional argument +\def\@IEEEthanksswallowoptionalarg[#1]{\relax\if@IEEEbreakcompsocthanksitem\hfil\break +\indent\fi\@IEEEbreakcompsocthanksitemtrue\ignorespaces} +\fi + + +% V1.6b define the \IEEEpeerreviewmaketitle as needed +\ifCLASSOPTIONpeerreview +\def\IEEEpeerreviewmaketitle{\@IEEEcleardoublepage{empty}% +\ifCLASSOPTIONtwocolumn +\twocolumn[\@IEEEpeerreviewmaketitle\@IEEEdynamictitlevspace] +\else +\newpage\@IEEEpeerreviewmaketitle\@IEEEstatictitlevskip +\fi +\thispagestyle{IEEEtitlepagestyle}} +\else +% \IEEEpeerreviewmaketitle does nothing if peer review option has not been selected +\def\IEEEpeerreviewmaketitle{\relax} +\fi + +% peerreview formats the repeated title like the title in journal papers. +\def\@IEEEpeerreviewmaketitle{\begin{center}\@IEEEcompsoconly{\sffamily}% +\normalfont\normalsize\vskip0.2em{\Huge\@title\par}\vskip1.0em\par +\end{center}} + + + +% V1.6 +% this is a static rubber spacer between the title/authors and the main text +% used for single column text, or when the title appears in the first column +% of two column text (technotes). +\def\@IEEEstatictitlevskip{{\normalfont\normalsize +% adjust spacing to next text +% v1.6b handle peer review papers +\ifCLASSOPTIONpeerreview +% for peer review papers, the same value is used for both title pages +% regardless of the other paper modes + \vskip 1\baselineskip plus 0.375\baselineskip minus 0.1875\baselineskip +\else + \ifCLASSOPTIONconference% conference + \vskip 0.6\baselineskip + \else% + \ifCLASSOPTIONtechnote% technote + \vskip 1\baselineskip plus 0.375\baselineskip minus 0.1875\baselineskip% + \else% journal uses more space + \vskip 2.5\baselineskip plus 0.75\baselineskip minus 0.375\baselineskip% + \fi + \fi +\fi}} + + +% V1.6 +% This is a dynamically determined rigid spacer between the title/authors +% and the main text. This is used only for single column titles over two +% column text (most common) +% This is bit tricky because we have to ensure that the textheight of the +% main text is an integer multiple of \baselineskip +% otherwise underfull vbox problems may develop in the second column of the +% text on the titlepage +% The possible use of \IEEEpubid must also be taken into account. +\def\@IEEEdynamictitlevspace{{% + % we run within a group so that all the macros can be forgotten when we are done + \long\def\thanks##1{\relax}%don't allow \thanks to run when we evaluate the vbox height + \long\def\IEEEcompsocitemizethanks##1{\relax}%don't allow \IEEEcompsocitemizethanks to run when we evaluate the vbox height + \normalfont\normalsize% we declare more descriptive variable names + \let\@IEEEmaintextheight=\@IEEEtrantmpdimenA%height of the main text columns + \let\@IEEEINTmaintextheight=\@IEEEtrantmpdimenB%height of the main text columns with integer # lines + % set the nominal and minimum values for the title spacer + % the dynamic algorithm will not allow the spacer size to + % become less than \@IEEEMINtitlevspace - instead it will be + % lengthened + % default to journal values + \def\@IEEENORMtitlevspace{2.5\baselineskip}% + \def\@IEEEMINtitlevspace{2\baselineskip}% + % conferences and technotes need tighter spacing + \ifCLASSOPTIONconference%conference + \def\@IEEENORMtitlevspace{1\baselineskip}% + \def\@IEEEMINtitlevspace{0.75\baselineskip}% + \fi + \ifCLASSOPTIONtechnote%technote + \def\@IEEENORMtitlevspace{1\baselineskip}% + \def\@IEEEMINtitlevspace{0.75\baselineskip}% + \fi% + % get the height that the title will take up + \ifCLASSOPTIONpeerreview + \settoheight{\@IEEEmaintextheight}{\vbox{\hsize\textwidth \@IEEEpeerreviewmaketitle}}% + \else + \settoheight{\@IEEEmaintextheight}{\vbox{\hsize\textwidth \@maketitle}}% + \fi + \@IEEEmaintextheight=-\@IEEEmaintextheight% title takes away from maintext, so reverse sign + % add the height of the page textheight + \advance\@IEEEmaintextheight by \textheight% + % correct for title pages using pubid + \ifCLASSOPTIONpeerreview\else + % peerreview papers use the pubid on the cover page only. + % And the cover page uses a static spacer. + \if@IEEEusingpubid\advance\@IEEEmaintextheight by -\@IEEEpubidpullup\fi + \fi% + % subtract off the nominal value of the title bottom spacer + \advance\@IEEEmaintextheight by -\@IEEENORMtitlevspace% + % \topskip takes away some too + \advance\@IEEEmaintextheight by -\topskip% + % calculate the column height of the main text for lines + % now we calculate the main text height as if holding + % an integer number of \normalsize lines after the first + % and discard any excess fractional remainder + % we subtracted the first line, because the first line + % is placed \topskip into the maintext, not \baselineskip like the + % rest of the lines. + \@IEEEINTmaintextheight=\@IEEEmaintextheight% + \divide\@IEEEINTmaintextheight by \baselineskip% + \multiply\@IEEEINTmaintextheight by \baselineskip% + % now we calculate how much the title spacer height will + % have to be reduced from nominal (\@IEEEREDUCEmaintextheight is always + % a positive value) so that the maintext area will contain an integer + % number of normal size lines + % we change variable names here (to avoid confusion) as we no longer + % need \@IEEEINTmaintextheight and can reuse its dimen register + \let\@IEEEREDUCEmaintextheight=\@IEEEINTmaintextheight% + \advance\@IEEEREDUCEmaintextheight by -\@IEEEmaintextheight% + \advance\@IEEEREDUCEmaintextheight by \baselineskip% + % this is the calculated height of the spacer + % we change variable names here (to avoid confusion) as we no longer + % need \@IEEEmaintextheight and can reuse its dimen register + \let\@IEEECOMPENSATElen=\@IEEEmaintextheight% + \@IEEECOMPENSATElen=\@IEEENORMtitlevspace% set the nominal value + % we go with the reduced length if it is smaller than an increase + \ifdim\@IEEEREDUCEmaintextheight < 0.5\baselineskip\relax% + \advance\@IEEECOMPENSATElen by -\@IEEEREDUCEmaintextheight% + % if the resulting spacer is too small back out and go with an increase instead + \ifdim\@IEEECOMPENSATElen<\@IEEEMINtitlevspace\relax% + \advance\@IEEECOMPENSATElen by \baselineskip% + \fi% + \else% + % go with an increase because it is closer to the nominal than a decrease + \advance\@IEEECOMPENSATElen by -\@IEEEREDUCEmaintextheight% + \advance\@IEEECOMPENSATElen by \baselineskip% + \fi% + % set the calculated rigid spacer + \vspace{\@IEEECOMPENSATElen}}} + + + +% V1.6 +% we allow the user access to the last part of the title area +% useful in emergencies such as when a different spacing is needed +% This text is NOT compensated for in the dynamic sizer. +\let\@IEEEaftertitletext=\relax +\long\def\IEEEaftertitletext#1{\def\@IEEEaftertitletext{#1}} + +% V1.7 provide a way for users to enter abstract and keywords +% into the onecolumn title are. This text is compensated for +% in the dynamic sizer. +\let\@IEEEcompsoctitleabstractindextext=\relax +\long\def\IEEEcompsoctitleabstractindextext#1{\def\@IEEEcompsoctitleabstractindextext{#1}} +% V1.7 provide a way for users to get the \@IEEEcompsoctitleabstractindextext if +% not in compsoc journal mode - this way abstract and keywords can be placed +% in their conventional position if not in compsoc mode. +\def\IEEEdisplaynotcompsoctitleabstractindextext{% +\ifCLASSOPTIONcompsoc% display if compsoc conf +\ifCLASSOPTIONconference\@IEEEcompsoctitleabstractindextext\fi +\else% or if not compsoc +\@IEEEcompsoctitleabstractindextext\fi} + + +% command to allow alteration of baselinestretch, but only if the current +% baselineskip is unity. Used to tweak the compsoc abstract and keywords line spacing. +\def\@IEEEtweakunitybaselinestretch#1{{\def\baselinestretch{1}\selectfont +\global\@tempskipa\baselineskip}\ifnum\@tempskipa=\baselineskip% +\def\baselinestretch{#1}\selectfont\fi\relax} + + +% abstract and keywords are in \small, except +% for 9pt docs in which they are in \footnotesize +% Because 9pt docs use an 8pt footnotesize, \small +% becomes a rather awkward 8.5pt +\def\@IEEEabskeysecsize{\small} +\ifx\CLASSOPTIONpt\@IEEEptsizenine + \def\@IEEEabskeysecsize{\footnotesize} +\fi + +% compsoc journals use \footnotesize, compsoc conferences use normalsize +\@IEEEcompsoconly{\def\@IEEEabskeysecsize{\footnotesize}} +\@IEEEcompsocconfonly{\def\@IEEEabskeysecsize{\normalsize}} + + + + +% V1.6 have abstract and keywords strip leading spaces, pars and newlines +% so that spacing is more tightly controlled. +\def\abstract{\normalfont + \if@twocolumn + \par\@IEEEabskeysecsize\bfseries\leavevmode\kern-1pt\textit{\abstractname}---\relax + \else + \begin{center}\vspace{-1.78ex}\@IEEEabskeysecsize\textbf{\abstractname}\end{center}\quotation\@IEEEabskeysecsize + \fi\@IEEEgobbleleadPARNLSP} +% V1.6 IEEE wants only 1 pica from end of abstract to introduction heading when in +% conference mode (the heading already has this much above it) +\def\endabstract{\relax\ifCLASSOPTIONconference\vspace{0ex}\else\vspace{1.34ex}\fi\par\if@twocolumn\else\endquotation\fi + \normalfont\normalsize} + +\def\IEEEkeywords{\normalfont + \if@twocolumn + \@IEEEabskeysecsize\bfseries\leavevmode\kern-1pt\textit{\IEEEkeywordsname}---\relax + \else + \begin{center}\@IEEEabskeysecsize\textbf{\IEEEkeywordsname}\end{center}\quotation\@IEEEabskeysecsize + \fi\itshape\@IEEEgobbleleadPARNLSP} +\def\endIEEEkeywords{\relax\ifCLASSOPTIONtechnote\vspace{1.34ex}\else\vspace{0.5ex}\fi + \par\if@twocolumn\else\endquotation\fi% + \normalfont\normalsize} + +% V1.7 compsoc keywords index terms +\ifCLASSOPTIONcompsoc + \ifCLASSOPTIONconference% compsoc conference +\def\abstract{\normalfont + \begin{center}\@IEEEabskeysecsize\textbf{\large\abstractname}\end{center}\vskip 0.5\baselineskip plus 0.1\baselineskip minus 0.1\baselineskip + \if@twocolumn\else\quotation\fi\itshape\@IEEEabskeysecsize% + \par\@IEEEgobbleleadPARNLSP} +\def\IEEEkeywords{\normalfont\vskip 1.5\baselineskip plus 0.25\baselineskip minus 0.25\baselineskip + \begin{center}\@IEEEabskeysecsize\textbf{\large\IEEEkeywordsname}\end{center}\vskip 0.5\baselineskip plus 0.1\baselineskip minus 0.1\baselineskip + \if@twocolumn\else\quotation\fi\itshape\@IEEEabskeysecsize% + \par\@IEEEgobbleleadPARNLSP} + \else% compsoc not conference +\def\abstract{\normalfont\@IEEEtweakunitybaselinestretch{1.15}\sffamily + \if@twocolumn + \@IEEEabskeysecsize\noindent\textbf{\abstractname}---\relax + \else + \begin{center}\vspace{-1.78ex}\@IEEEabskeysecsize\textbf{\abstractname}\end{center}\quotation\@IEEEabskeysecsize% + \fi\@IEEEgobbleleadPARNLSP} +\def\IEEEkeywords{\normalfont\@IEEEtweakunitybaselinestretch{1.15}\sffamily + \if@twocolumn + \@IEEEabskeysecsize\vskip 0.5\baselineskip plus 0.25\baselineskip minus 0.25\baselineskip\noindent + \textbf{\IEEEkeywordsname}---\relax + \else + \begin{center}\@IEEEabskeysecsize\textbf{\IEEEkeywordsname}\end{center}\quotation\@IEEEabskeysecsize% + \fi\@IEEEgobbleleadPARNLSP} + \fi +\fi + + + +% gobbles all leading \, \\ and \par, upon finding first token that +% is not a \ , \\ or a \par, it ceases and returns that token +% +% used to strip leading \, \\ and \par from the input +% so that such things in the beginning of an environment will not +% affect the formatting of the text +\long\def\@IEEEgobbleleadPARNLSP#1{\let\@IEEEswallowthistoken=0% +\let\@IEEEgobbleleadPARNLSPtoken#1% +\let\@IEEEgobbleleadPARtoken=\par% +\let\@IEEEgobbleleadNLtoken=\\% +\let\@IEEEgobbleleadSPtoken=\ % +\def\@IEEEgobbleleadSPMACRO{\ }% +\ifx\@IEEEgobbleleadPARNLSPtoken\@IEEEgobbleleadPARtoken% +\let\@IEEEswallowthistoken=1% +\fi% +\ifx\@IEEEgobbleleadPARNLSPtoken\@IEEEgobbleleadNLtoken% +\let\@IEEEswallowthistoken=1% +\fi% +\ifx\@IEEEgobbleleadPARNLSPtoken\@IEEEgobbleleadSPtoken% +\let\@IEEEswallowthistoken=1% +\fi% +% a control space will come in as a macro +% when it is the last one on a line +\ifx\@IEEEgobbleleadPARNLSPtoken\@IEEEgobbleleadSPMACRO% +\let\@IEEEswallowthistoken=1% +\fi% +% if we have to swallow this token, do so and taste the next one +% else spit it out and stop gobbling +\ifx\@IEEEswallowthistoken 1\let\@IEEEnextgobbleleadPARNLSP=\@IEEEgobbleleadPARNLSP\else% +\let\@IEEEnextgobbleleadPARNLSP=#1\fi% +\@IEEEnextgobbleleadPARNLSP}% + + + + +% TITLING OF SECTIONS +\def\@IEEEsectpunct{:\ \,} % Punctuation after run-in section heading (headings which are + % part of the paragraphs), need little bit more than a single space + % spacing from section number to title +% compsoc conferences use regular period/space punctuation +\ifCLASSOPTIONcompsoc +\ifCLASSOPTIONconference +\def\@IEEEsectpunct{.\ } +\fi\fi + +\def\@seccntformat#1{\hb@xt@ 1.4em{\csname the#1dis\endcsname\hss\relax}} +\def\@seccntformatinl#1{\hb@xt@ 1.1em{\csname the#1dis\endcsname\hss\relax}} +\def\@seccntformatch#1{\csname the#1dis\endcsname\hskip 1em\relax} + +\ifCLASSOPTIONcompsoc +% compsoc journals need extra spacing +\ifCLASSOPTIONconference\else +\def\@seccntformat#1{\csname the#1dis\endcsname\hskip 1em\relax} +\fi\fi + +%v1.7 put {} after #6 to allow for some types of user font control +%and use \@@par rather than \par +\def\@sect#1#2#3#4#5#6[#7]#8{% + \ifnum #2>\c@secnumdepth + \let\@svsec\@empty + \else + \refstepcounter{#1}% + % load section label and spacer into \@svsec + \ifnum #2=1 + \protected@edef\@svsec{\@seccntformatch{#1}\relax}% + \else + \ifnum #2>2 + \protected@edef\@svsec{\@seccntformatinl{#1}\relax}% + \else + \protected@edef\@svsec{\@seccntformat{#1}\relax}% + \fi + \fi + \fi% + \@tempskipa #5\relax + \ifdim \@tempskipa>\z@% tempskipa determines whether is treated as a high + \begingroup #6{\relax% or low level heading + \noindent % subsections are NOT indented + % print top level headings. \@svsec is label, #8 is heading title + % IEEE does not block indent the section title text, it flows like normal + {\hskip #3\relax\@svsec}{\interlinepenalty \@M #8\@@par}}% + \endgroup + \addcontentsline{toc}{#1}{\ifnum #2>\c@secnumdepth\relax\else + \protect\numberline{\csname the#1\endcsname}\fi#7}% + \else % printout low level headings + % svsechd seems to swallow the trailing space, protect it with \mbox{} + % got rid of sectionmark stuff + \def\@svsechd{#6{\hskip #3\relax\@svsec #8\@IEEEsectpunct\mbox{}}% + \addcontentsline{toc}{#1}{\ifnum #2>\c@secnumdepth\relax\else + \protect\numberline{\csname the#1\endcsname}\fi#7}}% + \fi%skip down + \@xsect{#5}} + + +% section* handler +%v1.7 put {} after #4 to allow for some types of user font control +%and use \@@par rather than \par +\def\@ssect#1#2#3#4#5{\@tempskipa #3\relax + \ifdim \@tempskipa>\z@ + %\begingroup #4\@hangfrom{\hskip #1}{\interlinepenalty \@M #5\par}\endgroup + % IEEE does not block indent the section title text, it flows like normal + \begingroup \noindent #4{\relax{\hskip #1}{\interlinepenalty \@M #5\@@par}}\endgroup + % svsechd swallows the trailing space, protect it with \mbox{} + \else \def\@svsechd{#4{\hskip #1\relax #5\@IEEEsectpunct\mbox{}}}\fi + \@xsect{#3}} + + +%% SECTION heading spacing and font +%% +% arguments are: #1 - sectiontype name +% (for \@sect) #2 - section level +% #3 - section heading indent +% #4 - top separation (absolute value used, neg indicates not to indent main text) +% If negative, make stretch parts negative too! +% #5 - (absolute value used) positive: bottom separation after heading, +% negative: amount to indent main text after heading +% Both #4 and #5 negative means to indent main text and use negative top separation +% #6 - font control +% You've got to have \normalfont\normalsize in the font specs below to prevent +% trouble when you do something like: +% \section{Note}{\ttfamily TT-TEXT} is known to ... +% IEEE sometimes REALLY stretches the area before a section +% heading by up to about 0.5in. However, it may not be a good +% idea to let LaTeX have quite this much rubber. +\ifCLASSOPTIONconference% +% IEEE wants section heading spacing to decrease for conference mode +\def\section{\@startsection{section}{1}{\z@}{1.5ex plus 1.5ex minus 0.5ex}% +{1sp}{\normalfont\normalsize\centering\scshape}}% +\def\subsection{\@startsection{subsection}{2}{\z@}{1.5ex plus 1.5ex minus 0.5ex}% +{1sp}{\normalfont\normalsize\itshape}}% +\else % for journals +\def\section{\@startsection{section}{1}{\z@}{3.0ex plus 1.5ex minus 1.5ex}% V1.6 3.0ex from 3.5ex +{0.7ex plus 1ex minus 0ex}{\normalfont\normalsize\centering\scshape}}% +\def\subsection{\@startsection{subsection}{2}{\z@}{3.5ex plus 1.5ex minus 1.5ex}% +{0.7ex plus .5ex minus 0ex}{\normalfont\normalsize\itshape}}% +\fi + +% for both journals and conferences +% decided to put in a little rubber above the section, might help somebody +\def\subsubsection{\@startsection{subsubsection}{3}{\parindent}{0ex plus 0.1ex minus 0.1ex}% +{0ex}{\normalfont\normalsize\itshape}}% +\def\paragraph{\@startsection{paragraph}{4}{2\parindent}{0ex plus 0.1ex minus 0.1ex}% +{0ex}{\normalfont\normalsize\itshape}}% + + +% compsoc +\ifCLASSOPTIONcompsoc +\ifCLASSOPTIONconference +% compsoc conference +\def\section{\@startsection{section}{1}{\z@}{1\baselineskip plus 0.25\baselineskip minus 0.25\baselineskip}% +{1\baselineskip plus 0.25\baselineskip minus 0.25\baselineskip}{\normalfont\large\bfseries}}% +\def\subsection{\@startsection{subsection}{2}{\z@}{1\baselineskip plus 0.25\baselineskip minus 0.25\baselineskip}% +{1\baselineskip plus 0.25\baselineskip minus 0.25\baselineskip}{\normalfont\sublargesize\bfseries}}% +\def\subsubsection{\@startsection{subsubsection}{3}{\z@}{1\baselineskip plus 0.25\baselineskip minus 0.25\baselineskip}% +{0ex}{\normalfont\normalsize\bfseries}}% +\def\paragraph{\@startsection{paragraph}{4}{2\parindent}{0ex plus 0.1ex minus 0.1ex}% +{0ex}{\normalfont\normalsize}}% +\else% compsoc journals +% use negative top separation as compsoc journals do not indent paragraphs after section titles +\def\section{\@startsection{section}{1}{\z@}{-3ex plus -2ex minus -1.5ex}% +{0.7ex plus 1ex minus 0ex}{\normalfont\large\sffamily\bfseries\scshape}}% +% Note that subsection and smaller may not be correct for the Computer Society, +% I have to look up an example. +\def\subsection{\@startsection{subsection}{2}{\z@}{-3.5ex plus -1.5ex minus -1.5ex}% +{0.7ex plus .5ex minus 0ex}{\normalfont\normalsize\sffamily\bfseries}}% +\def\subsubsection{\@startsection{subsubsection}{3}{\z@}{-2.5ex plus -1ex minus -1ex}% +{0.5ex plus 0.5ex minus 0ex}{\normalfont\normalsize\sffamily\itshape}}% +\def\paragraph{\@startsection{paragraph}{4}{2\parindent}{-0ex plus -0.1ex minus -0.1ex}% +{0ex}{\normalfont\normalsize}}% +\fi\fi + + + + +%% ENVIRONMENTS +% "box" symbols at end of proofs +\def\IEEEQEDclosed{\mbox{\rule[0pt]{1.3ex}{1.3ex}}} % for a filled box +% V1.6 some journals use an open box instead that will just fit around a closed one +\def\IEEEQEDopen{{\setlength{\fboxsep}{0pt}\setlength{\fboxrule}{0.2pt}\fbox{\rule[0pt]{0pt}{1.3ex}\rule[0pt]{1.3ex}{0pt}}}} +\ifCLASSOPTIONcompsoc +\def\IEEEQED{\IEEEQEDopen} % default to open for compsoc +\else +\def\IEEEQED{\IEEEQEDclosed} % otherwise default to closed +\fi + +% v1.7 name change to avoid namespace collision with amsthm. Also add support +% for an optional argument. +\def\IEEEproof{\@ifnextchar[{\@IEEEproof}{\@IEEEproof[\IEEEproofname]}} +\def\@IEEEproof[#1]{\par\noindent\hspace{2em}{\itshape #1: }} +\def\endIEEEproof{\hspace*{\fill}~\IEEEQED\par} + + +%\itemindent is set to \z@ by list, so define new temporary variable +\newdimen\@IEEEtmpitemindent +\def\@begintheorem#1#2{\@IEEEtmpitemindent\itemindent\topsep 0pt\rmfamily\trivlist% + \item[\hskip \labelsep{\indent\itshape #1\ #2:}]\itemindent\@IEEEtmpitemindent} +\def\@opargbegintheorem#1#2#3{\@IEEEtmpitemindent\itemindent\topsep 0pt\rmfamily \trivlist% +% V1.6 IEEE is back to using () around theorem names which are also in italics +% Thanks to Christian Peel for reporting this. + \item[\hskip\labelsep{\indent\itshape #1\ #2\ (#3):}]\itemindent\@IEEEtmpitemindent} +% V1.7 remove bogus \unskip that caused equations in theorems to collide with +% lines below. +\def\@endtheorem{\endtrivlist} + +% V1.6 +% display command for the section the theorem is in - so that \thesection +% is not used as this will be in Roman numerals when we want arabic. +% LaTeX2e uses \def\@thmcounter#1{\noexpand\arabic{#1}} for the theorem number +% (second part) display and \def\@thmcountersep{.} as a separator. +% V1.7 intercept calls to the section counter and reroute to \@IEEEthmcounterinsection +% to allow \appendix(ices} to override as needed. +% +% special handler for sections, allows appendix(ices) to override +\gdef\@IEEEthmcounterinsection#1{\arabic{#1}} +% string macro +\edef\@IEEEstringsection{section} + +% redefine the #1#2[#3] form of newtheorem to use a hook to \@IEEEthmcounterinsection +% if section in_counter is used +\def\@xnthm#1#2[#3]{% + \expandafter\@ifdefinable\csname #1\endcsname + {\@definecounter{#1}\@newctr{#1}[#3]% + \edef\@IEEEstringtmp{#3} + \ifx\@IEEEstringtmp\@IEEEstringsection + \expandafter\xdef\csname the#1\endcsname{% + \noexpand\@IEEEthmcounterinsection{#3}\@thmcountersep + \@thmcounter{#1}}% + \else + \expandafter\xdef\csname the#1\endcsname{% + \expandafter\noexpand\csname the#3\endcsname \@thmcountersep + \@thmcounter{#1}}% + \fi + \global\@namedef{#1}{\@thm{#1}{#2}}% + \global\@namedef{end#1}{\@endtheorem}}} + + + +%% SET UP THE DEFAULT PAGESTYLE +\ps@headings +\pagenumbering{arabic} + +% normally the page counter starts at 1 +\setcounter{page}{1} +% however, for peerreview the cover sheet is page 0 or page -1 +% (for duplex printing) +\ifCLASSOPTIONpeerreview + \if@twoside + \setcounter{page}{-1} + \else + \setcounter{page}{0} + \fi +\fi + +% standard book class behavior - let bottom line float up and down as +% needed when single sided +\ifCLASSOPTIONtwoside\else\raggedbottom\fi +% if two column - turn on twocolumn, allow word spacings to stretch more and +% enforce a rigid position for the last lines +\ifCLASSOPTIONtwocolumn +% the peer review option delays invoking twocolumn + \ifCLASSOPTIONpeerreview\else + \twocolumn + \fi +\sloppy +\flushbottom +\fi + + + + +% \APPENDIX and \APPENDICES definitions + +% This is the \@ifmtarg command from the LaTeX ifmtarg package +% by Peter Wilson (CUA) and Donald Arseneau +% \@ifmtarg is used to determine if an argument to a command +% is present or not. +% For instance: +% \@ifmtarg{#1}{\typeout{empty}}{\typeout{has something}} +% \@ifmtarg is used with our redefined \section command if +% \appendices is invoked. +% The command \section will behave slightly differently depending +% on whether the user specifies a title: +% \section{My appendix title} +% or not: +% \section{} +% This way, we can eliminate the blank lines where the title +% would be, and the unneeded : after Appendix in the table of +% contents +\begingroup +\catcode`\Q=3 +\long\gdef\@ifmtarg#1{\@xifmtarg#1QQ\@secondoftwo\@firstoftwo\@nil} +\long\gdef\@xifmtarg#1#2Q#3#4#5\@nil{#4} +\endgroup +% end of \@ifmtarg defs + + +% V1.7 +% command that allows the one time saving of the original definition +% of section to \@IEEEappendixsavesection for \appendix or \appendices +% we don't save \section here as it may be redefined later by other +% packages (hyperref.sty, etc.) +\def\@IEEEsaveoriginalsectiononce{\let\@IEEEappendixsavesection\section +\let\@IEEEsaveoriginalsectiononce\relax} + +% neat trick to grab and process the argument from \section{argument} +% we process differently if the user invoked \section{} with no +% argument (title) +% note we reroute the call to the old \section* +\def\@IEEEprocessthesectionargument#1{% +\@ifmtarg{#1}{% +\@IEEEappendixsavesection*{\appendixname~\thesectiondis}% +\addcontentsline{toc}{section}{\appendixname~\thesection}}{% +\@IEEEappendixsavesection*{\appendixname~\thesectiondis \\* #1}% +\addcontentsline{toc}{section}{\appendixname~\thesection: #1}}} + +% we use this if the user calls \section{} after +% \appendix-- which has no meaning. So, we ignore the +% command and its argument. Then, warn the user. +\def\@IEEEdestroythesectionargument#1{\typeout{** WARNING: Ignoring useless +\protect\section\space in Appendix (line \the\inputlineno).}} + + +% remember \thesection forms will be displayed in \ref calls +% and in the Table of Contents. +% The \sectiondis form is used in the actual heading itself + +% appendix command for one single appendix +% normally has no heading. However, if you want a +% heading, you can do so via the optional argument: +% \appendix[Optional Heading] +\def\appendix{\relax} +\renewcommand{\appendix}[1][]{\@IEEEsaveoriginalsectiononce\par + % v1.6 keep hyperref's identifiers unique + \gdef\theHsection{Appendix.A}% + % v1.6 adjust hyperref's string name for the section + \xdef\Hy@chapapp{appendix}% + \setcounter{section}{0}% + \setcounter{subsection}{0}% + \setcounter{subsubsection}{0}% + \setcounter{paragraph}{0}% + \gdef\thesection{A}% + \gdef\thesectiondis{}% + \gdef\thesubsection{\Alph{subsection}}% + \gdef\@IEEEthmcounterinsection##1{A} + \refstepcounter{section}% update the \ref counter + \@ifmtarg{#1}{\@IEEEappendixsavesection*{\appendixname}% + \addcontentsline{toc}{section}{\appendixname}}{% + \@IEEEappendixsavesection*{\appendixname~\\* #1}% + \addcontentsline{toc}{section}{\appendixname: #1}}% + % redefine \section command for appendix + % leave \section* as is + \def\section{\@ifstar{\@IEEEappendixsavesection*}{% + \@IEEEdestroythesectionargument}}% throw out the argument + % of the normal form +} + + + +% appendices command for multiple appendices +% user then calls \section with an argument (possibly empty) to +% declare the individual appendices +\def\appendices{\@IEEEsaveoriginalsectiononce\par + % v1.6 keep hyperref's identifiers unique + \gdef\theHsection{Appendix.\Alph{section}}% + % v1.6 adjust hyperref's string name for the section + \xdef\Hy@chapapp{appendix}% + \setcounter{section}{-1}% we want \refstepcounter to use section 0 + \setcounter{subsection}{0}% + \setcounter{subsubsection}{0}% + \setcounter{paragraph}{0}% + \ifCLASSOPTIONromanappendices% + \gdef\thesection{\Roman{section}}% + \gdef\thesectiondis{\Roman{section}}% + \@IEEEcompsocconfonly{\gdef\thesectiondis{\Roman{section}.}}% + \gdef\@IEEEthmcounterinsection##1{A\arabic{##1}} + \else% + \gdef\thesection{\Alph{section}}% + \gdef\thesectiondis{\Alph{section}}% + \@IEEEcompsocconfonly{\gdef\thesectiondis{\Alph{section}.}}% + \gdef\@IEEEthmcounterinsection##1{\Alph{##1}} + \fi% + \refstepcounter{section}% update the \ref counter + \setcounter{section}{0}% NEXT \section will be the FIRST appendix + % redefine \section command for appendices + % leave \section* as is + \def\section{\@ifstar{\@IEEEappendixsavesection*}{% process the *-form + \refstepcounter{section}% or is a new section so, + \@IEEEprocessthesectionargument}}% process the argument + % of the normal form +} + + + +% \IEEEPARstart +% Definition for the big two line drop cap letter at the beginning of the +% first paragraph of journal papers. The first argument is the first letter +% of the first word, the second argument is the remaining letters of the +% first word which will be rendered in upper case. +% In V1.6 this has been completely rewritten to: +% +% 1. no longer have problems when the user begins an environment +% within the paragraph that uses \IEEEPARstart. +% 2. auto-detect and use the current font family +% 3. revise handling of the space at the end of the first word so that +% interword glue will now work as normal. +% 4. produce correctly aligned edges for the (two) indented lines. +% +% We generalize things via control macros - playing with these is fun too. +% +% V1.7 added more control macros to make it easy for IEEEtrantools.sty users +% to change the font style. +% +% the number of lines that are indented to clear it +% may need to increase if using decenders +\def\@IEEEPARstartDROPLINES{2} +% minimum number of lines left on a page to allow a \@IEEEPARstart +% Does not take into consideration rubber shrink, so it tends to +% be overly cautious +\def\@IEEEPARstartMINPAGELINES{2} +% V1.7 the height of the drop cap is adjusted to match the height of this text +% in the current font (when \IEEEPARstart is called). +\def\@IEEEPARstartHEIGHTTEXT{T} +% the depth the letter is lowered below the baseline +% the height (and size) of the letter is determined by the sum +% of this value and the height of the \@IEEEPARstartHEIGHTTEXT in the current +% font. It is a good idea to set this value in terms of the baselineskip +% so that it can respond to changes therein. +\def\@IEEEPARstartDROPDEPTH{1.1\baselineskip} +% V1.7 the font the drop cap will be rendered in, +% can take zero or one argument. +\def\@IEEEPARstartFONTSTYLE{\bfseries} +% V1.7 any additional, non-font related commands needed to modify +% the drop cap letter, can take zero or one argument. +\def\@IEEEPARstartCAPSTYLE{\MakeUppercase} +% V1.7 the font that will be used to render the rest of the word, +% can take zero or one argument. +\def\@IEEEPARstartWORDFONTSTYLE{\relax} +% V1.7 any additional, non-font related commands needed to modify +% the rest of the word, can take zero or one argument. +\def\@IEEEPARstartWORDCAPSTYLE{\MakeUppercase} +% This is the horizontal separation distance from the drop letter to the main text. +% Lengths that depend on the font (e.g., ex, em, etc.) will be referenced +% to the font that is active when \IEEEPARstart is called. +\def\@IEEEPARstartSEP{0.15em} +% V1.7 horizontal offset applied to the left of the drop cap. +\def\@IEEEPARstartHOFFSET{0em} +% V1.7 Italic correction command applied at the end of the drop cap. +\def\@IEEEPARstartITLCORRECT{\/} + +% V1.7 compoc uses nonbold drop cap and small caps word style +\ifCLASSOPTIONcompsoc +\def\@IEEEPARstartFONTSTYLE{\mdseries} +\def\@IEEEPARstartWORDFONTSTYLE{\scshape} +\def\@IEEEPARstartWORDCAPSTYLE{\relax} +\fi + +% definition of \IEEEPARstart +% THIS IS A CONTROLLED SPACING AREA, DO NOT ALLOW SPACES WITHIN THESE LINES +% +% The token \@IEEEPARstartfont will be globally defined after the first use +% of \IEEEPARstart and will be a font command which creates the big letter +% The first argument is the first letter of the first word and the second +% argument is the rest of the first word(s). +\def\IEEEPARstart#1#2{\par{% +% if this page does not have enough space, break it and lets start +% on a new one +\@IEEEtranneedspace{\@IEEEPARstartMINPAGELINES\baselineskip}{\relax}% +% V1.7 move this up here in case user uses \textbf for \@IEEEPARstartFONTSTYLE +% which uses command \leavevmode which causes an unwanted \indent to be issued +\noindent +% calculate the desired height of the big letter +% it extends from the top of \@IEEEPARstartHEIGHTTEXT in the current font +% down to \@IEEEPARstartDROPDEPTH below the current baseline +\settoheight{\@IEEEtrantmpdimenA}{\@IEEEPARstartHEIGHTTEXT}% +\addtolength{\@IEEEtrantmpdimenA}{\@IEEEPARstartDROPDEPTH}% +% extract the name of the current font in bold +% and place it in \@IEEEPARstartFONTNAME +\def\@IEEEPARstartGETFIRSTWORD##1 ##2\relax{##1}% +{\@IEEEPARstartFONTSTYLE{\selectfont\edef\@IEEEPARstartFONTNAMESPACE{\fontname\font\space}% +\xdef\@IEEEPARstartFONTNAME{\expandafter\@IEEEPARstartGETFIRSTWORD\@IEEEPARstartFONTNAMESPACE\relax}}}% +% define a font based on this name with a point size equal to the desired +% height of the drop letter +\font\@IEEEPARstartsubfont\@IEEEPARstartFONTNAME\space at \@IEEEtrantmpdimenA\relax% +% save this value as a counter (integer) value (sp points) +\@IEEEtrantmpcountA=\@IEEEtrantmpdimenA% +% now get the height of the actual letter produced by this font size +\settoheight{\@IEEEtrantmpdimenB}{\@IEEEPARstartsubfont\@IEEEPARstartCAPSTYLE{#1}}% +% If something bogus happens like the first argument is empty or the +% current font is strange, do not allow a zero height. +\ifdim\@IEEEtrantmpdimenB=0pt\relax% +\typeout{** WARNING: IEEEPARstart drop letter has zero height! (line \the\inputlineno)}% +\typeout{ Forcing the drop letter font size to 10pt.}% +\@IEEEtrantmpdimenB=10pt% +\fi% +% and store it as a counter +\@IEEEtrantmpcountB=\@IEEEtrantmpdimenB% +% Since a font size doesn't exactly correspond to the height of the capital +% letters in that font, the actual height of the letter, \@IEEEtrantmpcountB, +% will be less than that desired, \@IEEEtrantmpcountA +% we need to raise the font size, \@IEEEtrantmpdimenA +% by \@IEEEtrantmpcountA / \@IEEEtrantmpcountB +% But, TeX doesn't have floating point division, so we have to use integer +% division. Hence the use of the counters. +% We need to reduce the denominator so that the loss of the remainder will +% have minimal affect on the accuracy of the result +\divide\@IEEEtrantmpcountB by 200% +\divide\@IEEEtrantmpcountA by \@IEEEtrantmpcountB% +% Then reequalize things when we use TeX's ability to multiply by +% floating point values +\@IEEEtrantmpdimenB=0.005\@IEEEtrantmpdimenA% +\multiply\@IEEEtrantmpdimenB by \@IEEEtrantmpcountA% +% \@IEEEPARstartfont is globaly set to the calculated font of the big letter +% We need to carry this out of the local calculation area to to create the +% big letter. +\global\font\@IEEEPARstartfont\@IEEEPARstartFONTNAME\space at \@IEEEtrantmpdimenB% +% Now set \@IEEEtrantmpdimenA to the width of the big letter +% We need to carry this out of the local calculation area to set the +% hanging indent +\settowidth{\global\@IEEEtrantmpdimenA}{\@IEEEPARstartfont +\@IEEEPARstartCAPSTYLE{#1\@IEEEPARstartITLCORRECT}}}% +% end of the isolated calculation environment +% add in the extra clearance we want +\advance\@IEEEtrantmpdimenA by \@IEEEPARstartSEP\relax% +% add in the optional offset +\advance\@IEEEtrantmpdimenA by \@IEEEPARstartHOFFSET\relax% +% V1.7 don't allow negative offsets to produce negative hanging indents +\@IEEEtrantmpdimenB\@IEEEtrantmpdimenA +\ifnum\@IEEEtrantmpdimenB < 0 \@IEEEtrantmpdimenB 0pt\fi +% \@IEEEtrantmpdimenA has the width of the big letter plus the +% separation space and \@IEEEPARstartfont is the font we need to use +% Now, we make the letter and issue the hanging indent command +% The letter is placed in a box of zero width and height so that other +% text won't be displaced by it. +\hangindent\@IEEEtrantmpdimenB\hangafter=-\@IEEEPARstartDROPLINES% +\makebox[0pt][l]{\hspace{-\@IEEEtrantmpdimenA}% +\raisebox{-\@IEEEPARstartDROPDEPTH}[0pt][0pt]{\hspace{\@IEEEPARstartHOFFSET}% +\@IEEEPARstartfont\@IEEEPARstartCAPSTYLE{#1\@IEEEPARstartITLCORRECT}% +\hspace{\@IEEEPARstartSEP}}}% +{\@IEEEPARstartWORDFONTSTYLE{\@IEEEPARstartWORDCAPSTYLE{\selectfont#2}}}} + + + + + + +% determines if the space remaining on a given page is equal to or greater +% than the specified space of argument one +% if not, execute argument two (only if the remaining space is greater than zero) +% and issue a \newpage +% +% example: \@IEEEtranneedspace{2in}{\vfill} +% +% Does not take into consideration rubber shrinkage, so it tends to +% be overly cautious +% Based on an example posted by Donald Arseneau +% Note this macro uses \@IEEEtrantmpdimenB internally for calculations, +% so DO NOT PASS \@IEEEtrantmpdimenB to this routine +% if you need a dimen register, import with \@IEEEtrantmpdimenA instead +\def\@IEEEtranneedspace#1#2{\penalty-100\begingroup%shield temp variable +\@IEEEtrantmpdimenB\pagegoal\advance\@IEEEtrantmpdimenB-\pagetotal% space left +\ifdim #1>\@IEEEtrantmpdimenB\relax% not enough space left +\ifdim\@IEEEtrantmpdimenB>\z@\relax #2\fi% +\newpage% +\fi\endgroup} + + + +% IEEEbiography ENVIRONMENT +% Allows user to enter biography leaving place for picture (adapts to font size) +% As of V1.5, a new optional argument allows you to have a real graphic! +% V1.5 and later also fixes the "colliding biographies" which could happen when a +% biography's text was shorter than the space for the photo. +% MDS 7/2001 +% V1.6 prevent multiple biographies from making multiple TOC entries +\newif\if@IEEEbiographyTOCentrynotmade +\global\@IEEEbiographyTOCentrynotmadetrue + +% biography counter so hyperref can jump directly to the biographies +% and not just the previous section +\newcounter{IEEEbiography} +\setcounter{IEEEbiography}{0} + +% photo area size +\def\@IEEEBIOphotowidth{1.0in} % width of the biography photo area +\def\@IEEEBIOphotodepth{1.25in} % depth (height) of the biography photo area +% area cleared for photo +\def\@IEEEBIOhangwidth{1.14in} % width cleared for the biography photo area +\def\@IEEEBIOhangdepth{1.25in} % depth cleared for the biography photo area + % actual depth will be a multiple of + % \baselineskip, rounded up +\def\@IEEEBIOskipN{4\baselineskip}% nominal value of the vskip above the biography + +\newenvironment{IEEEbiography}[2][]{\normalfont\@IEEEcompsoconly{\sffamily}\footnotesize% +\unitlength 1in\parskip=0pt\par\parindent 1em\interlinepenalty500% +% we need enough space to support the hanging indent +% the nominal value of the spacer +% and one extra line for good measure +\@IEEEtrantmpdimenA=\@IEEEBIOhangdepth% +\advance\@IEEEtrantmpdimenA by \@IEEEBIOskipN% +\advance\@IEEEtrantmpdimenA by 1\baselineskip% +% if this page does not have enough space, break it and lets start +% with a new one +\@IEEEtranneedspace{\@IEEEtrantmpdimenA}{\relax}% +% nominal spacer can strech, not shrink use 1fil so user can out stretch with \vfill +\vskip \@IEEEBIOskipN plus 1fil minus 0\baselineskip% +% the default box for where the photo goes +\def\@IEEEtempbiographybox{{\setlength{\fboxsep}{0pt}\framebox{% +\begin{minipage}[b][\@IEEEBIOphotodepth][c]{\@IEEEBIOphotowidth}\centering PLACE\\ PHOTO\\ HERE \end{minipage}}}}% +% +% detect if the optional argument was supplied, this requires the +% \@ifmtarg command as defined in the appendix section above +% and if so, override the default box with what they want +\@ifmtarg{#1}{\relax}{\def\@IEEEtempbiographybox{\mbox{\begin{minipage}[b][\@IEEEBIOphotodepth][c]{\@IEEEBIOphotowidth}% +\centering% +#1% +\end{minipage}}}}% end if optional argument supplied +% Make an entry into the table of contents only if we have not done so before +\if@IEEEbiographyTOCentrynotmade% +% link labels to the biography counter so hyperref will jump +% to the biography, not the previous section +\setcounter{IEEEbiography}{-1}% +\refstepcounter{IEEEbiography}% +\addcontentsline{toc}{section}{Biographies}% +\global\@IEEEbiographyTOCentrynotmadefalse% +\fi% +% one more biography +\refstepcounter{IEEEbiography}% +% Make an entry for this name into the table of contents +\addcontentsline{toc}{subsection}{#2}% +% V1.6 properly handle if a new paragraph should occur while the +% hanging indent is still active. Do this by redefining \par so +% that it will not start a new paragraph. (But it will appear to the +% user as if it did.) Also, strip any leading pars, newlines, or spaces. +\let\@IEEEBIOORGparCMD=\par% save the original \par command +\edef\par{\hfil\break\indent}% the new \par will not be a "real" \par +\settoheight{\@IEEEtrantmpdimenA}{\@IEEEtempbiographybox}% get height of biography box +\@IEEEtrantmpdimenB=\@IEEEBIOhangdepth% +\@IEEEtrantmpcountA=\@IEEEtrantmpdimenB% countA has the hang depth +\divide\@IEEEtrantmpcountA by \baselineskip% calculates lines needed to produce the hang depth +\advance\@IEEEtrantmpcountA by 1% ensure we overestimate +% set the hanging indent +\hangindent\@IEEEBIOhangwidth% +\hangafter-\@IEEEtrantmpcountA% +% reference the top of the photo area to the top of a capital T +\settoheight{\@IEEEtrantmpdimenB}{\mbox{T}}% +% set the photo box, give it zero width and height so as not to disturb anything +\noindent\makebox[0pt][l]{\hspace{-\@IEEEBIOhangwidth}\raisebox{\@IEEEtrantmpdimenB}[0pt][0pt]{% +\raisebox{-\@IEEEBIOphotodepth}[0pt][0pt]{\@IEEEtempbiographybox}}}% +% now place the author name and begin the bio text +\noindent\textbf{#2\ }\@IEEEgobbleleadPARNLSP}{\relax\let\par=\@IEEEBIOORGparCMD\par% +% 7/2001 V1.5 detect when the biography text is shorter than the photo area +% and pad the unused area - preventing a collision from the next biography entry +% MDS +\ifnum \prevgraf <\@IEEEtrantmpcountA\relax% detect when the biography text is shorter than the photo + \advance\@IEEEtrantmpcountA by -\prevgraf% calculate how many lines we need to pad + \advance\@IEEEtrantmpcountA by -1\relax% we compensate for the fact that we indented an extra line + \@IEEEtrantmpdimenA=\baselineskip% calculate the length of the padding + \multiply\@IEEEtrantmpdimenA by \@IEEEtrantmpcountA% + \noindent\rule{0pt}{\@IEEEtrantmpdimenA}% insert an invisible support strut +\fi% +\par\normalfont} + + + +% V1.6 +% added biography without a photo environment +\newenvironment{IEEEbiographynophoto}[1]{% +% Make an entry into the table of contents only if we have not done so before +\if@IEEEbiographyTOCentrynotmade% +% link labels to the biography counter so hyperref will jump +% to the biography, not the previous section +\setcounter{IEEEbiography}{-1}% +\refstepcounter{IEEEbiography}% +\addcontentsline{toc}{section}{Biographies}% +\global\@IEEEbiographyTOCentrynotmadefalse% +\fi% +% one more biography +\refstepcounter{IEEEbiography}% +% Make an entry for this name into the table of contents +\addcontentsline{toc}{subsection}{#1}% +\normalfont\@IEEEcompsoconly{\sffamily}\footnotesize\interlinepenalty500% +\vskip 4\baselineskip plus 1fil minus 0\baselineskip% +\parskip=0pt\par% +\noindent\textbf{#1\ }\@IEEEgobbleleadPARNLSP}{\relax\par\normalfont} + + +% provide the user with some old font commands +% got this from article.cls +\DeclareOldFontCommand{\rm}{\normalfont\rmfamily}{\mathrm} +\DeclareOldFontCommand{\sf}{\normalfont\sffamily}{\mathsf} +\DeclareOldFontCommand{\tt}{\normalfont\ttfamily}{\mathtt} +\DeclareOldFontCommand{\bf}{\normalfont\bfseries}{\mathbf} +\DeclareOldFontCommand{\it}{\normalfont\itshape}{\mathit} +\DeclareOldFontCommand{\sl}{\normalfont\slshape}{\@nomath\sl} +\DeclareOldFontCommand{\sc}{\normalfont\scshape}{\@nomath\sc} +\DeclareRobustCommand*\cal{\@fontswitch\relax\mathcal} +\DeclareRobustCommand*\mit{\@fontswitch\relax\mathnormal} + + +% SPECIAL PAPER NOTICE COMMANDS +% +% holds the special notice text +\def\@IEEEspecialpapernotice{\relax} + +% for special papers, like invited papers, the user can do: +% \IEEEspecialpapernotice{(Invited Paper)} before \maketitle +\def\IEEEspecialpapernotice#1{\ifCLASSOPTIONconference% +\def\@IEEEspecialpapernotice{{\Large#1\vspace*{1em}}}% +\else% +\def\@IEEEspecialpapernotice{{\\*[1.5ex]\sublargesize\textit{#1}}\vspace*{-2ex}}% +\fi} + + + + +% PUBLISHER ID COMMANDS +% to insert a publisher's ID footer +% V1.6 \IEEEpubid has been changed so that the change in page size and style +% occurs in \maketitle. \IEEEpubid must now be issued prior to \maketitle +% use \IEEEpubidadjcol as before - in the second column of the title page +% These changes allow \maketitle to take the reduced page height into +% consideration when dynamically setting the space between the author +% names and the maintext. +% +% the amount the main text is pulled up to make room for the +% publisher's ID footer +% IEEE uses about 1.3\baselineskip for journals, +% dynamic title spacing will clean up the fraction +\def\@IEEEpubidpullup{1.3\baselineskip} +\ifCLASSOPTIONtechnote +% for technotes it must be an integer of baselineskip as there can be no +% dynamic title spacing for two column mode technotes (the title is in the +% in first column) and we should maintain an integer number of lines in the +% second column +% There are some examples (such as older issues of "Transactions on +% Information Theory") in which IEEE really pulls the text off the ID for +% technotes - about 0.55in (or 4\baselineskip). We'll use 2\baselineskip +% and call it even. +\def\@IEEEpubidpullup{2\baselineskip} +\fi + +% V1.7 compsoc does not use a pullup +\ifCLASSOPTIONcompsoc +\def\@IEEEpubidpullup{0pt} +\fi + +% holds the ID text +\def\@IEEEpubid{\relax} + +% flag so \maketitle can tell if \IEEEpubid was called +\newif\if@IEEEusingpubid +\global\@IEEEusingpubidfalse +% issue this command in the page to have the ID at the bottom +% V1.6 use before \maketitle +\def\IEEEpubid#1{\def\@IEEEpubid{#1}\global\@IEEEusingpubidtrue} + + +% command which will pull up (shorten) the column it is executed in +% to make room for the publisher ID. Place in the second column of +% the title page when using \IEEEpubid +% Is smart enough not to do anything when in single column text or +% if the user hasn't called \IEEEpubid +% currently needed in for the second column of a page with the +% publisher ID. If not needed in future releases, please provide this +% command and define it as \relax for backward compatibility +% v1.6b do not allow command to operate if the peer review option has been +% selected because \IEEEpubidadjcol will not be on the cover page. +% V1.7 do nothing if compsoc +\def\IEEEpubidadjcol{\ifCLASSOPTIONcompsoc\else\ifCLASSOPTIONpeerreview\else +\if@twocolumn\if@IEEEusingpubid\enlargethispage{-\@IEEEpubidpullup}\fi\fi\fi\fi} + +% Special thanks to Peter Wilson, Daniel Luecking, and the other +% gurus at comp.text.tex, for helping me to understand how best to +% implement the IEEEpubid command in LaTeX. + + + +%% Lockout some commands under various conditions + +% general purpose bit bucket +\newsavebox{\@IEEEtranrubishbin} + +% flags to prevent multiple warning messages +\newif\if@IEEEWARNthanks +\newif\if@IEEEWARNIEEEPARstart +\newif\if@IEEEWARNIEEEbiography +\newif\if@IEEEWARNIEEEbiographynophoto +\newif\if@IEEEWARNIEEEpubid +\newif\if@IEEEWARNIEEEpubidadjcol +\newif\if@IEEEWARNIEEEmembership +\newif\if@IEEEWARNIEEEaftertitletext +\@IEEEWARNthankstrue +\@IEEEWARNIEEEPARstarttrue +\@IEEEWARNIEEEbiographytrue +\@IEEEWARNIEEEbiographynophototrue +\@IEEEWARNIEEEpubidtrue +\@IEEEWARNIEEEpubidadjcoltrue +\@IEEEWARNIEEEmembershiptrue +\@IEEEWARNIEEEaftertitletexttrue + + +%% Lockout some commands when in various modes, but allow them to be restored if needed +%% +% save commands which might be locked out +% so that the user can later restore them if needed +\let\@IEEESAVECMDthanks\thanks +\let\@IEEESAVECMDIEEEPARstart\IEEEPARstart +\let\@IEEESAVECMDIEEEbiography\IEEEbiography +\let\@IEEESAVECMDendIEEEbiography\endIEEEbiography +\let\@IEEESAVECMDIEEEbiographynophoto\IEEEbiographynophoto +\let\@IEEESAVECMDendIEEEbiographynophoto\endIEEEbiographynophoto +\let\@IEEESAVECMDIEEEpubid\IEEEpubid +\let\@IEEESAVECMDIEEEpubidadjcol\IEEEpubidadjcol +\let\@IEEESAVECMDIEEEmembership\IEEEmembership +\let\@IEEESAVECMDIEEEaftertitletext\IEEEaftertitletext + + +% disable \IEEEPARstart when in draft mode +% This may have originally been done because the pre-V1.6 drop letter +% algorithm had problems with a non-unity baselinestretch +% At any rate, it seems too formal to have a drop letter in a draft +% paper. +\ifCLASSOPTIONdraftcls +\def\IEEEPARstart#1#2{#1#2\if@IEEEWARNIEEEPARstart\typeout{** ATTENTION: \noexpand\IEEEPARstart + is disabled in draft mode (line \the\inputlineno).}\fi\global\@IEEEWARNIEEEPARstartfalse} +\fi +% and for technotes +\ifCLASSOPTIONtechnote +\def\IEEEPARstart#1#2{#1#2\if@IEEEWARNIEEEPARstart\typeout{** WARNING: \noexpand\IEEEPARstart + is locked out for technotes (line \the\inputlineno).}\fi\global\@IEEEWARNIEEEPARstartfalse} +\fi + + +% lockout unneeded commands when in conference mode +\ifCLASSOPTIONconference +% when locked out, \thanks, \IEEEbiography, \IEEEbiographynophoto, \IEEEpubid, +% \IEEEmembership and \IEEEaftertitletext will all swallow their given text. +% \IEEEPARstart will output a normal character instead +% warn the user about these commands only once to prevent the console screen +% from filling up with redundant messages +\def\thanks#1{\if@IEEEWARNthanks\typeout{** WARNING: \noexpand\thanks + is locked out when in conference mode (line \the\inputlineno).}\fi\global\@IEEEWARNthanksfalse} +\def\IEEEPARstart#1#2{#1#2\if@IEEEWARNIEEEPARstart\typeout{** WARNING: \noexpand\IEEEPARstart + is locked out when in conference mode (line \the\inputlineno).}\fi\global\@IEEEWARNIEEEPARstartfalse} + + +% LaTeX treats environments and commands with optional arguments differently. +% the actual ("internal") command is stored as \\commandname +% (accessed via \csname\string\commandname\endcsname ) +% the "external" command \commandname is a macro with code to determine +% whether or not the optional argument is presented and to provide the +% default if it is absent. So, in order to save and restore such a command +% we would have to save and restore \\commandname as well. But, if LaTeX +% ever changes the way it names the internal names, the trick would break. +% Instead let us just define a new environment so that the internal +% name can be left undisturbed. +\newenvironment{@IEEEbogusbiography}[2][]{\if@IEEEWARNIEEEbiography\typeout{** WARNING: \noexpand\IEEEbiography + is locked out when in conference mode (line \the\inputlineno).}\fi\global\@IEEEWARNIEEEbiographyfalse% +\setbox\@IEEEtranrubishbin\vbox\bgroup}{\egroup\relax} +% and make biography point to our bogus biography +\let\IEEEbiography=\@IEEEbogusbiography +\let\endIEEEbiography=\end@IEEEbogusbiography + +\renewenvironment{IEEEbiographynophoto}[1]{\if@IEEEWARNIEEEbiographynophoto\typeout{** WARNING: \noexpand\IEEEbiographynophoto + is locked out when in conference mode (line \the\inputlineno).}\fi\global\@IEEEWARNIEEEbiographynophotofalse% +\setbox\@IEEEtranrubishbin\vbox\bgroup}{\egroup\relax} + +\def\IEEEpubid#1{\if@IEEEWARNIEEEpubid\typeout{** WARNING: \noexpand\IEEEpubid + is locked out when in conference mode (line \the\inputlineno).}\fi\global\@IEEEWARNIEEEpubidfalse} +\def\IEEEpubidadjcol{\if@IEEEWARNIEEEpubidadjcol\typeout{** WARNING: \noexpand\IEEEpubidadjcol + is locked out when in conference mode (line \the\inputlineno).}\fi\global\@IEEEWARNIEEEpubidadjcolfalse} +\def\IEEEmembership#1{\if@IEEEWARNIEEEmembership\typeout{** WARNING: \noexpand\IEEEmembership + is locked out when in conference mode (line \the\inputlineno).}\fi\global\@IEEEWARNIEEEmembershipfalse} +\def\IEEEaftertitletext#1{\if@IEEEWARNIEEEaftertitletext\typeout{** WARNING: \noexpand\IEEEaftertitletext + is locked out when in conference mode (line \the\inputlineno).}\fi\global\@IEEEWARNIEEEaftertitletextfalse} +\fi + + +% provide a way to restore the commands that are locked out +\def\IEEEoverridecommandlockouts{% +\typeout{** ATTENTION: Overriding command lockouts (line \the\inputlineno).}% +\let\thanks\@IEEESAVECMDthanks% +\let\IEEEPARstart\@IEEESAVECMDIEEEPARstart% +\let\IEEEbiography\@IEEESAVECMDIEEEbiography% +\let\endIEEEbiography\@IEEESAVECMDendIEEEbiography% +\let\IEEEbiographynophoto\@IEEESAVECMDIEEEbiographynophoto% +\let\endIEEEbiographynophoto\@IEEESAVECMDendIEEEbiographynophoto% +\let\IEEEpubid\@IEEESAVECMDIEEEpubid% +\let\IEEEpubidadjcol\@IEEESAVECMDIEEEpubidadjcol% +\let\IEEEmembership\@IEEESAVECMDIEEEmembership% +\let\IEEEaftertitletext\@IEEESAVECMDIEEEaftertitletext} + + + +% need a backslash character for typeout output +{\catcode`\|=0 \catcode`\\=12 +|xdef|@IEEEbackslash{\}} + + +% hook to allow easy disabling of all legacy warnings +\def\@IEEElegacywarn#1#2{\typeout{** ATTENTION: \@IEEEbackslash #1 is deprecated (line \the\inputlineno). +Use \@IEEEbackslash #2 instead.}} + + +% provide for legacy commands +\def\authorblockA{\@IEEElegacywarn{authorblockA}{IEEEauthorblockA}\IEEEauthorblockA} +\def\authorblockN{\@IEEElegacywarn{authorblockN}{IEEEauthorblockN}\IEEEauthorblockN} +\def\authorrefmark{\@IEEElegacywarn{authorrefmark}{IEEEauthorrefmark}\IEEEauthorrefmark} +\def\PARstart{\@IEEElegacywarn{PARstart}{IEEEPARstart}\IEEEPARstart} +\def\pubid{\@IEEElegacywarn{pubid}{IEEEpubid}\IEEEpubid} +\def\pubidadjcol{\@IEEElegacywarn{pubidadjcol}{IEEEpubidadjcol}\IEEEpubidadjcol} +\def\QED{\@IEEElegacywarn{QED}{IEEEQED}\IEEEQED} +\def\QEDclosed{\@IEEElegacywarn{QEDclosed}{IEEEQEDclosed}\IEEEQEDclosed} +\def\QEDopen{\@IEEElegacywarn{QEDopen}{IEEEQEDopen}\IEEEQEDopen} +\def\specialpapernotice{\@IEEElegacywarn{specialpapernotice}{IEEEspecialpapernotice}\IEEEspecialpapernotice} + + + +% provide for legacy environments +\def\biography{\@IEEElegacywarn{biography}{IEEEbiography}\IEEEbiography} +\def\biographynophoto{\@IEEElegacywarn{biographynophoto}{IEEEbiographynophoto}\IEEEbiographynophoto} +\def\keywords{\@IEEElegacywarn{keywords}{IEEEkeywords}\IEEEkeywords} +\def\endbiography{\endIEEEbiography} +\def\endbiographynophoto{\endIEEEbiographynophoto} +\def\endkeywords{\endIEEEkeywords} + + +% provide for legacy IED commands/lengths when possible +\let\labelindent\IEEElabelindent +\def\calcleftmargin{\@IEEElegacywarn{calcleftmargin}{IEEEcalcleftmargin}\IEEEcalcleftmargin} +\def\setlabelwidth{\@IEEElegacywarn{setlabelwidth}{IEEEsetlabelwidth}\IEEEsetlabelwidth} +\def\usemathlabelsep{\@IEEElegacywarn{usemathlabelsep}{IEEEusemathlabelsep}\IEEEusemathlabelsep} +\def\iedlabeljustifyc{\@IEEElegacywarn{iedlabeljustifyc}{IEEEiedlabeljustifyc}\IEEEiedlabeljustifyc} +\def\iedlabeljustifyl{\@IEEElegacywarn{iedlabeljustifyl}{IEEEiedlabeljustifyl}\IEEEiedlabeljustifyl} +\def\iedlabeljustifyr{\@IEEElegacywarn{iedlabeljustifyr}{IEEEiedlabeljustifyr}\IEEEiedlabeljustifyr} + + + +% let \proof use the IEEEtran version even after amsthm is loaded +% \proof is now deprecated in favor of \IEEEproof +\AtBeginDocument{\def\proof{\@IEEElegacywarn{proof}{IEEEproof}\IEEEproof}\def\endproof{\endIEEEproof}} + +% V1.7 \overrideIEEEmargins is no longer supported. +\def\overrideIEEEmargins{% +\typeout{** WARNING: \string\overrideIEEEmargins \space no longer supported (line \the\inputlineno).}% +\typeout{** Use the \string\CLASSINPUTinnersidemargin, \string\CLASSINPUToutersidemargin \space controls instead.}} + + +\endinput + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%% End of IEEEtran.cls %%%%%%%%%%%%%%%%%%%%%%%%%%%% +% That's all folks! + diff --git a/spec/consensus/consensus-paper/README.md b/spec/consensus/consensus-paper/README.md new file mode 100644 index 0000000000..33e3958061 --- /dev/null +++ b/spec/consensus/consensus-paper/README.md @@ -0,0 +1,24 @@ +# Tendermint-spec + +The repository contains the specification (and the proofs) of the Tendermint +consensus protocol. + +## How to install Latex on Mac OS + +MacTex is Latex distribution for Mac OS. You can download it [here](http://www.tug.org/mactex/mactex-download.html). + +Popular IDE for Latex-based projects is TexStudio. It can be downloaded +[here](https://www.texstudio.org/). + +## How to build project + +In order to compile the latex files (and write bibliography), execute + +`$ pdflatex paper`
+`$ bibtex paper`
+`$ pdflatex paper`
+`$ pdflatex paper`
+ +The generated file is paper.pdf. You can open it with + +`$ open paper.pdf` diff --git a/spec/consensus/consensus-paper/algorithmicplus.sty b/spec/consensus/consensus-paper/algorithmicplus.sty new file mode 100644 index 0000000000..de7ca01ea2 --- /dev/null +++ b/spec/consensus/consensus-paper/algorithmicplus.sty @@ -0,0 +1,195 @@ +% ALGORITHMICPLUS STYLE +% for LaTeX version 2e +% Original ``algorithmic.sty'' by -- 1994 Peter Williams +% Bug fix (13 July 2004) by Arnaud Giersch +% Includes ideas from 'algorithmicext' by Martin Biely +% and 'distribalgo' by Xavier Defago +% Modifications: Martin Hutle +% +% This style file is free software; you can redistribute it and/or +% modify it under the terms of the GNU Lesser General Public +% License as published by the Free Software Foundation; either +% version 2 of the License, or (at your option) any later version. +% +% This style file is distributed in the hope that it will be useful, +% but WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +% Lesser General Public License for more details. +% +% You should have received a copy of the GNU Lesser General Public +% License along with this style file; if not, write to the +% Free Software Foundation, Inc., 59 Temple Place - Suite 330, +% Boston, MA 02111-1307, USA. +% +\NeedsTeXFormat{LaTeX2e} +\ProvidesPackage{algorithmicplus} +\typeout{Document Style `algorithmicplus' - environment, replaces `algorithmic'} +% +\RequirePackage{ifthen} +\RequirePackage{calc} +\newboolean{ALC@noend} +\setboolean{ALC@noend}{false} +\newcounter{ALC@line} +\newcounter{ALC@rem} +\newcounter{ALC@depth} +\newcounter{ALCPLUS@lastline} +\newlength{\ALC@tlm} +% +\DeclareOption{noend}{\setboolean{ALC@noend}{true}} +% +\ProcessOptions +% +% ALGORITHMIC +\newcommand{\algorithmiclnosize}{\small} +\newcommand{\algorithmiclnofont}{\tt} +\newcommand{\algorithmiclnodelimiter}{:} +% +\newcommand{\algorithmicrequire}{\textbf{Require:}} +\newcommand{\algorithmicensure}{\textbf{Ensure:}} +\newcommand{\algorithmiccomment}[1]{\{#1\}} +\newcommand{\algorithmicend}{\textbf{end}} +\newcommand{\algorithmicif}{\textbf{if}} +\newcommand{\algorithmicthen}{\textbf{then}} +\newcommand{\algorithmicelse}{\textbf{else}} +\newcommand{\algorithmicelsif}{\algorithmicelse\ \algorithmicif} +\newcommand{\algorithmicendif}{\algorithmicend\ \algorithmicif} +\newcommand{\algorithmicfor}{\textbf{for}} +\newcommand{\algorithmicforall}{\textbf{for all}} +\newcommand{\algorithmicdo}{\textbf{do}} +\newcommand{\algorithmicendfor}{\algorithmicend\ \algorithmicfor} +\newcommand{\algorithmicwhile}{\textbf{while}} +\newcommand{\algorithmicendwhile}{\algorithmicend\ \algorithmicwhile} +\newcommand{\algorithmicloop}{\textbf{loop}} +\newcommand{\algorithmicendloop}{\algorithmicend\ \algorithmicloop} +\newcommand{\algorithmicrepeat}{\textbf{repeat}} +\newcommand{\algorithmicuntil}{\textbf{until}} +\def\ALC@item[#1]{% +\if@noparitem \@donoparitem + \else \if@inlabel \indent \par \fi + \ifhmode \unskip\unskip \par \fi + \if@newlist \if@nobreak \@nbitem \else + \addpenalty\@beginparpenalty + \addvspace\@topsep \addvspace{-\parskip}\fi + \else \addpenalty\@itempenalty \addvspace\itemsep + \fi + \global\@inlabeltrue +\fi +\everypar{\global\@minipagefalse\global\@newlistfalse + \if@inlabel\global\@inlabelfalse \hskip -\parindent \box\@labels + \penalty\z@ \fi + \everypar{}}\global\@nobreakfalse +\if@noitemarg \@noitemargfalse \if@nmbrlist \refstepcounter{\@listctr}\fi \fi +\sbox\@tempboxa{\makelabel{#1}}% +\global\setbox\@labels + \hbox{\unhbox\@labels \hskip \itemindent + \hskip -\labelwidth \hskip -\ALC@tlm + \ifdim \wd\@tempboxa >\labelwidth + \box\@tempboxa + \else \hbox to\labelwidth {\unhbox\@tempboxa}\fi + \hskip \ALC@tlm}\ignorespaces} +% +\newenvironment{algorithmic}[1][0]{ +\setcounter{ALC@depth}{\@listdepth}% +\let\@listdepth\c@ALC@depth% +\let\@item\ALC@item + \newcommand{\ALC@lno}{% +\ifthenelse{\equal{\arabic{ALC@rem}}{0}} +{{\algorithmiclnosize\algorithmiclnofont \arabic{ALC@line}\algorithmiclnodelimiter}}{}% +} +\let\@listii\@listi +\let\@listiii\@listi +\let\@listiv\@listi +\let\@listv\@listi +\let\@listvi\@listi +\let\@listvii\@listi + \newenvironment{ALC@g}{ + \begin{list}{\ALC@lno}{ \itemsep\z@ \itemindent\z@ + \listparindent\z@ \rightmargin\z@ + \topsep\z@ \partopsep\z@ \parskip\z@\parsep\z@ + \leftmargin 1em + \addtolength{\ALC@tlm}{\leftmargin} + } + } + {\end{list}} + \newcommand{\ALC@it}{\refstepcounter{ALC@line}\addtocounter{ALC@rem}{1}\ifthenelse{\equal{\arabic{ALC@rem}}{#1}}{\setcounter{ALC@rem}{0}}{}\item} + \newcommand{\ALC@com}[1]{\ifthenelse{\equal{##1}{default}}% +{}{\ \algorithmiccomment{##1}}} + \newcommand{\REQUIRE}{\item[\algorithmicrequire]} + \newcommand{\ENSURE}{\item[\algorithmicensure]} + \newcommand{\STATE}{\ALC@it} + \newcommand{\COMMENT}[1]{\algorithmiccomment{##1}} + \newenvironment{ALC@if}{\begin{ALC@g}}{\end{ALC@g}} + \newenvironment{ALC@for}{\begin{ALC@g}}{\end{ALC@g}} + \newenvironment{ALC@whl}{\begin{ALC@g}}{\end{ALC@g}} + \newenvironment{ALC@loop}{\begin{ALC@g}}{\end{ALC@g}} + \newenvironment{ALC@rpt}{\begin{ALC@g}}{\end{ALC@g}} + \renewcommand{\\}{\@centercr} + \newcommand{\IF}[2][default]{\ALC@it\algorithmicif\ ##2\ \algorithmicthen% +\ALC@com{##1}\begin{ALC@if}} + \newcommand{\ELSE}[1][default]{\end{ALC@if}\ALC@it\algorithmicelse% +\ALC@com{##1}\begin{ALC@if}} + \newcommand{\ELSIF}[2][default]% +{\end{ALC@if}\ALC@it\algorithmicelsif\ ##2\ \algorithmicthen% +\ALC@com{##1}\begin{ALC@if}} + \newcommand{\FOR}[2][default]{\ALC@it\algorithmicfor\ ##2\ \algorithmicdo% +\ALC@com{##1}\begin{ALC@for}} + \newcommand{\FORALL}[2][default]{\ALC@it\algorithmicforall\ ##2\ % +\algorithmicdo% +\ALC@com{##1}\begin{ALC@for}} + \newcommand{\WHILE}[2][default]{\ALC@it\algorithmicwhile\ ##2\ % +\algorithmicdo% +\ALC@com{##1}\begin{ALC@whl}} + \newcommand{\LOOP}[1][default]{\ALC@it\algorithmicloop% +\ALC@com{##1}\begin{ALC@loop}} + \newcommand{\REPEAT}[1][default]{\ALC@it\algorithmicrepeat% +\ALC@com{##1}\begin{ALC@rpt}} + \newcommand{\UNTIL}[1]{\end{ALC@rpt}\ALC@it\algorithmicuntil\ ##1} + \ifthenelse{\boolean{ALC@noend}}{ + \newcommand{\ENDIF}{\end{ALC@if}} + \newcommand{\ENDFOR}{\end{ALC@for}} + \newcommand{\ENDWHILE}{\end{ALC@whl}} + \newcommand{\ENDLOOP}{\end{ALC@loop}} + }{ + \newcommand{\ENDIF}{\end{ALC@if}\ALC@it\algorithmicendif} + \newcommand{\ENDFOR}{\end{ALC@for}\ALC@it\algorithmicendfor} + \newcommand{\ENDWHILE}{\end{ALC@whl}\ALC@it\algorithmicendwhile} + \newcommand{\ENDLOOP}{\end{ALC@loop}\ALC@it\algorithmicendloop} + } + \renewcommand{\@toodeep}{} + \begin{list}{\ALC@lno}{\setcounter{ALC@line}{0}\setcounter{ALC@rem}{0}% + \itemsep\z@ \itemindent\z@ \listparindent\z@% + \partopsep\z@ \parskip\z@ \parsep\z@% + \labelsep 0.5em \topsep 0.2em% +\ifthenelse{\equal{#1}{0}} + {\labelwidth 0.5em } + {\labelwidth 1.2em } +\leftmargin\labelwidth \addtolength{\leftmargin}{\labelsep} + \ALC@tlm\labelsep + } +} +{% +\setcounter{ALCPLUS@lastline}{\value{ALC@line}}% +\end{list}} + +\newcommand{\continuecounting}{\setcounter{ALC@line}{\value{ALCPLUS@lastline}}} +\newcommand{\startcounting}[1]{\setcounter{ALC@line}{#1}\addtocounter{ALC@line}{-1}} + +\newcommand{\EMPTY}{\item[]} +\newcommand{\SPACE}{\vspace{3mm}} +\newcommand{\SHORTSPACE}{\vspace{1mm}} +\newcommand{\newlinetag}[3]{\newcommand{#1}[#2]{\item[#3]}} +\newcommand{\newconstruct}[5]{% + \newenvironment{ALC@\string#1}{\begin{ALC@g}}{\end{ALC@g}} + \newcommand{#1}[2][default]{\ALC@it#2\ ##2\ #3% + \ALC@com{##1}\begin{ALC@\string#1}} + \ifthenelse{\boolean{ALC@noend}}{ + \newcommand{#4}{\end{ALC@\string#1}} + }{ + \newcommand{#4}{\end{ALC@\string#1}\ALC@it#5} + } +} + +\newconstruct{\INDENT}{}{}{\ENDINDENT}{} + +\newcommand{\setlinenosize}[1]{\renewcommand{\algorithmiclnosize}{#1}} +\newcommand{\setlinenofont}[1]{\renewcommand{\algorithmiclnofont}{#1}} diff --git a/spec/consensus/consensus-paper/conclusion.tex b/spec/consensus/consensus-paper/conclusion.tex new file mode 100644 index 0000000000..dd17ccf44d --- /dev/null +++ b/spec/consensus/consensus-paper/conclusion.tex @@ -0,0 +1,16 @@ +\section{Conclusion} \label{sec:conclusion} + +We have proposed a new Byzantine-fault tolerant consensus algorithm that is the +core of the Tendermint BFT SMR platform. The algorithm is designed for the wide +area network with high number of mutually distrusted nodes that communicate +over gossip based peer-to-peer network. It has only a single mode of execution +and the communication pattern is very similar to the "normal" case of the +state-of-the art PBFT algorithm. The algorithm ensures termination with a novel +mechanism that takes advantage of the gossip based communication between nodes. +The proposed algorithm and the proofs are simple and elegant, and we believe +that this makes it easier to understand and implement correctly. + +\section*{Acknowledgment} + +We would like to thank Anton Kaliaev, Ismail Khoffi and Dahlia Malkhi for comments on an earlier version of the paper. We also want to thank Marko Vukolic, Ming Chuan Lin, Maria Potop-Butucaru, Sara Tucci, Antonella Del Pozzo and Yackolley Amoussou-Guenou for pointing out the liveness issues +in the previous version of the algorithm. Finally, we want to thank the Tendermint team members and all project contributors for making Tendermint such a great platform. diff --git a/spec/consensus/consensus-paper/consensus.tex b/spec/consensus/consensus-paper/consensus.tex new file mode 100644 index 0000000000..3265b61c75 --- /dev/null +++ b/spec/consensus/consensus-paper/consensus.tex @@ -0,0 +1,397 @@ + +\section{Tendermint consensus algorithm} \label{sec:tendermint} + +\newcommand\Disseminate{\textbf{Disseminate}} + +\newcommand\Proposal{\mathsf{PROPOSAL}} +\newcommand\ProposalPart{\mathsf{PROPOSAL\mbox{-}PART}} +\newcommand\PrePrepare{\mathsf{INIT}} \newcommand\Prevote{\mathsf{PREVOTE}} +\newcommand\Precommit{\mathsf{PRECOMMIT}} +\newcommand\Decision{\mathsf{DECISION}} + +\newcommand\ViewChange{\mathsf{VC}} +\newcommand\ViewChangeAck{\mathsf{VC\mbox{-}ACK}} +\newcommand\NewPrePrepare{\mathsf{VC\mbox{-}INIT}} +\newcommand\coord{\mathsf{proposer}} + +\newcommand\newHeight{newHeight} \newcommand\newRound{newRound} +\newcommand\nil{nil} \newcommand\id{id} \newcommand{\propose}{propose} +\newcommand\prevote{prevote} \newcommand\prevoteWait{prevoteWait} +\newcommand\precommit{precommit} \newcommand\precommitWait{precommitWait} +\newcommand\commit{commit} + +\newcommand\timeoutPropose{timeoutPropose} +\newcommand\timeoutPrevote{timeoutPrevote} +\newcommand\timeoutPrecommit{timeoutPrecommit} +\newcommand\proofOfLocking{proof\mbox{-}of\mbox{-}locking} + +\begin{algorithm}[htb!] \def\baselinestretch{1} \scriptsize\raggedright + \begin{algorithmic}[1] + \SHORTSPACE + \INIT{} + \STATE $h_p := 0$ + \COMMENT{current height, or consensus instance we are currently executing} + \STATE $round_p := 0$ \COMMENT{current round number} + \STATE $step_p \in \set{\propose, \prevote, \precommit}$ + \STATE $decision_p[] := nil$ + \STATE $lockedValue_p := nil$ + \STATE $lockedRound_p := -1$ + \STATE $validValue_p := nil$ + \STATE $validRound_p := -1$ + \ENDINIT + \SHORTSPACE + \STATE \textbf{upon} start \textbf{do} $StartRound(0)$ + \SHORTSPACE + \FUNCTION{$StartRound(round)$} \label{line:tab:startRound} + \STATE $round_p \assign round$ + \STATE $step_p \assign \propose$ + \IF{$\coord(h_p, round_p) = p$} + \IF{$validValue_p \neq \nil$} \label{line:tab:isThereLockedValue} + \STATE $proposal \assign validValue_p$ \ELSE \STATE $proposal \assign + getValue()$ + \label{line:tab:getValidValue} + \ENDIF + \STATE \Broadcast\ $\li{\Proposal,h_p, round_p, proposal, validRound_p}$ + \label{line:tab:send-proposal} + \ELSE + \STATE \textbf{schedule} $OnTimeoutPropose(h_p, + round_p)$ to be executed \textbf{after} $\timeoutPropose(round_p)$ + \ENDIF + \ENDFUNCTION + + \SPACE + \UPON{$\li{\Proposal,h_p,round_p, v, -1}$ \From\ $\coord(h_p,round_p)$ + \With\ $step_p = \propose$} \label{line:tab:recvProposal} + \IF{$valid(v) \wedge (lockedRound_p = -1 \vee lockedValue_p = v$)} + \label{line:tab:accept-proposal-2} + \STATE \Broadcast \ $\li{\Prevote,h_p,round_p,id(v)}$ + \label{line:tab:prevote-proposal} + \ELSE + \label{line:tab:acceptProposal1} + \STATE \Broadcast \ $\li{\Prevote,h_p,round_p,\nil}$ + \label{line:tab:prevote-nil} + \ENDIF + \STATE $step_p \assign \prevote$ \label{line:tab:setStateToPrevote1} + \ENDUPON + + \SPACE + \UPON{$\li{\Proposal,h_p,round_p, v, vr}$ \From\ $\coord(h_p,round_p)$ + \textbf{AND} $2f+1$ $\li{\Prevote,h_p, vr,id(v)}$ \With\ $step_p = \propose \wedge (vr \ge 0 \wedge vr < round_p)$} + \label{line:tab:acceptProposal} + \IF{$valid(v) \wedge (lockedRound_p \le vr + \vee lockedValue_p = v)$} \label{line:tab:cond-prevote-higher-proposal} + \STATE \Broadcast \ $\li{\Prevote,h_p,round_p,id(v)}$ + \label{line:tab:prevote-higher-proposal} + \ELSE + \label{line:tab:acceptProposal2} + \STATE \Broadcast \ $\li{\Prevote,h_p,round_p,\nil}$ + \label{line:tab:prevote-nil2} + \ENDIF + \STATE $step_p \assign \prevote$ \label{line:tab:setStateToPrevote3} + \ENDUPON + + \SPACE + \UPON{$2f+1$ $\li{\Prevote,h_p, round_p,*}$ \With\ $step_p = \prevote$ for the first time} + \label{line:tab:recvAny2/3Prevote} + \STATE \textbf{schedule} $OnTimeoutPrevote(h_p, round_p)$ to be executed \textbf{after} $\timeoutPrevote(round_p)$ \label{line:tab:timeoutPrevote} + \ENDUPON + + \SPACE + \UPON{$\li{\Proposal,h_p,round_p, v, *}$ \From\ $\coord(h_p,round_p)$ + \textbf{AND} $2f+1$ $\li{\Prevote,h_p, round_p,id(v)}$ \With\ $valid(v) \wedge step_p \ge \prevote$ for the first time} + \label{line:tab:recvPrevote} + \IF{$step_p = \prevote$} + \STATE $lockedValue_p \assign v$ \label{line:tab:setLockedValue} + \STATE $lockedRound_p \assign round_p$ \label{line:tab:setLockedRound} + \STATE \Broadcast \ $\li{\Precommit,h_p,round_p,id(v))}$ + \label{line:tab:precommit-v} + \STATE $step_p \assign \precommit$ \label{line:tab:setStateToCommit} + \ENDIF + \STATE $validValue_p \assign v$ \label{line:tab:setValidRound} + \STATE $validRound_p \assign round_p$ \label{line:tab:setValidValue} + \ENDUPON + + \SHORTSPACE + \UPON{$2f+1$ $\li{\Prevote,h_p,round_p, \nil}$ + \With\ $step_p = \prevote$} + \STATE \Broadcast \ $\li{\Precommit,h_p,round_p, \nil}$ + \label{line:tab:precommit-v-1} + \STATE $step_p \assign \precommit$ + \ENDUPON + + \SPACE + \UPON{$2f+1$ $\li{\Precommit,h_p,round_p,*}$ for the first time} + \label{line:tab:startTimeoutPrecommit} + \STATE \textbf{schedule} $OnTimeoutPrecommit(h_p, round_p)$ to be executed \textbf{after} $\timeoutPrecommit(round_p)$ + + \ENDUPON + + \SPACE + \UPON{$\li{\Proposal,h_p,r, v, *}$ \From\ $\coord(h_p,r)$ \textbf{AND} + $2f+1$ $\li{\Precommit,h_p,r,id(v)}$ \With\ $decision_p[h_p] = \nil$} + \label{line:tab:onDecideRule} + \IF{$valid(v)$} \label{line:tab:validDecisionValue} + \STATE $decision_p[h_p] = v$ \label{line:tab:decide} + \STATE$h_p \assign h_p + 1$ \label{line:tab:increaseHeight} + \STATE reset $lockedRound_p$, $lockedValue_p$, $validRound_p$ and $validValue_p$ to initial values + and empty message log + \STATE $StartRound(0)$ + \ENDIF + \ENDUPON + + \SHORTSPACE + \UPON{$f+1$ $\li{*,h_p,round, *, *}$ \textbf{with} $round > round_p$} + \label{line:tab:skipRounds} + \STATE $StartRound(round)$ \label{line:tab:nextRound2} + \ENDUPON + + \SHORTSPACE + \FUNCTION{$OnTimeoutPropose(height,round)$} \label{line:tab:onTimeoutPropose} + \IF{$height = h_p \wedge round = round_p \wedge step_p = \propose$} + \STATE \Broadcast \ $\li{\Prevote,h_p,round_p, \nil}$ + \label{line:tab:prevote-nil-on-timeout} + \STATE $step_p \assign \prevote$ + \ENDIF + \ENDFUNCTION + + \SHORTSPACE + \FUNCTION{$OnTimeoutPrevote(height,round)$} \label{line:tab:onTimeoutPrevote} + \IF{$height = h_p \wedge round = round_p \wedge step_p = \prevote$} + \STATE \Broadcast \ $\li{\Precommit,h_p,round_p,\nil}$ + \label{line:tab:precommit-nil-onTimeout} + \STATE $step_p \assign \precommit$ + \ENDIF + \ENDFUNCTION + + \SHORTSPACE + \FUNCTION{$OnTimeoutPrecommit(height,round)$} \label{line:tab:onTimeoutPrecommit} + \IF{$height = h_p \wedge round = round_p$} + \STATE $StartRound(round_p + 1)$ \label{line:tab:nextRound} + \ENDIF + \ENDFUNCTION + \end{algorithmic} \caption{Tendermint consensus algorithm} + \label{alg:tendermint} +\end{algorithm} + +In this section we present the Tendermint Byzantine fault-tolerant consensus +algorithm. The algorithm is specified by the pseudo-code shown in +Algorithm~\ref{alg:tendermint}. We present the algorithm as a set of \emph{upon +rules} that are executed atomically\footnote{In case several rules are active +at the same time, the first rule to be executed is picked randomly. The +correctness of the algorithm does not depend on the order in which rules are +executed.}. We assume that processes exchange protocol messages using a gossip +protocol and that both sent and received messages are stored in a local message +log for every process. An upon rule is triggered once the message log contains +messages such that the corresponding condition evaluates to $\tt{true}$. The +condition that assumes reception of $X$ messages of a particular type and +content denotes reception of messages whose senders have aggregate voting power at +least equal to $X$. For example, the condition $2f+1$ $\li{\Precommit,h_p,r,id(v)}$, +evaluates to true upon reception of $\Precommit$ messages for height $h_p$, +a round $r$ and with value equal to $id(v)$ whose senders have aggregate voting +power at least equal to $2f+1$. Some of the rules ends with "for the first time" constraint +to denote that it is triggered only the first time a corresponding condition evaluates +to $\tt{true}$. This is because those rules do not always change the state of algorithm +variables so without this constraint, the algorithm could keep +executing those rules forever. The variables with index $p$ are process local state +variables, while variables without index $p$ are value placeholders. The sign +$*$ denotes any value. + +We denote with $n$ the total voting power of processes in the system, and we +assume that the total voting power of faulty processes in the system is bounded +with a system parameter $f$. The algorithm assumes that $n > 3f$, i.e., it +requires that the total voting power of faulty processes is smaller than one +third of the total voting power. For simplicity we present the algorithm for +the case $n = 3f + 1$. + +The algorithm proceeds in rounds, where each round has a dedicated +\emph{proposer}. The mapping of rounds to proposers is known to all processes +and is given as a function $\coord(h, round)$, returning the proposer for +the round $round$ in the consensus instance $h$. We +assume that the proposer selection function is weighted round-robin, where +processes are rotated proportional to their voting power\footnote{A validator +with more voting power is selected more frequently, proportional to its power. +More precisely, during a sequence of rounds of size $n$, every process is +proposer in a number of rounds equal to its voting power.}. +The internal protocol state transitions are triggered by message reception and +by expiration of timeouts. There are three timeouts in Algorithm \ref{alg:tendermint}: +$\timeoutPropose$, $\timeoutPrevote$ and $\timeoutPrecommit$. +The timeouts prevent the algorithm from blocking and +waiting forever for some condition to be true, ensure that processes continuously +transition between rounds, and guarantee that eventually (after GST) communication +between correct processes is timely and reliable so they can decide. +The last role is achieved by increasing the timeouts with every new round $r$, +i.e, $timeoutX(r) = initTimeoutX + r*timeoutDelta$; +they are reset for every new height (consensus +instance). + +Processes exchange the following messages in Tendermint: $\Proposal$, +$\Prevote$ and $\Precommit$. The $\Proposal$ message is used by the proposer of +the current round to suggest a potential decision value, while $\Prevote$ and +$\Precommit$ are votes for a proposed value. According to the classification of +consensus algorithms from \cite{RMS10:dsn}, Tendermint, like PBFT +\cite{CL02:tcs} and DLS \cite{DLS88:jacm}, belongs to class 3, so it requires +two voting steps (three communication exchanges in total) to decide a value. +The Tendermint consensus algorithm is designed for the blockchain context where +the value to decide is a block of transactions (ie. it is potentially quite +large, consisting of many transactions). Therefore, in the Algorithm +\ref{alg:tendermint} (similar as in \cite{CL02:tcs}) we are explicit about +sending a value (block of transactions) and a small, constant size value id (a +unique value identifier, normally a hash of the value, i.e., if $\id(v) = +\id(v')$, then $v=v'$). The $\Proposal$ message is the only one carrying the +value; $\Prevote$ and $\Precommit$ messages carry the value id. A correct +process decides on a value $v$ in Tendermint upon receiving the $\Proposal$ for +$v$ and $2f+1$ voting-power equivalent $\Precommit$ messages for $\id(v)$ in +some round $r$. In order to send $\Precommit$ message for $v$ in a round $r$, a +correct process waits to receive the $\Proposal$ and $2f+1$ of the +corresponding $\Prevote$ messages in the round $r$. Otherwise, +it sends $\Precommit$ message with a special $\nil$ value. +This ensures that correct processes can $\Precommit$ only a +single value (or $\nil$) in a round. As +proposers may be faulty, the proposed value is treated by correct processes as +a suggestion (it is not blindly accepted), and a correct process tells others +if it accepted the $\Proposal$ for value $v$ by sending $\Prevote$ message for +$\id(v)$; otherwise it sends $\Prevote$ message with the special $\nil$ value. + +Every process maintains the following variables in the Algorithm +\ref{alg:tendermint}: $step$, $lockedValue$, $lockedRound$, $validValue$ and +$validRound$. The $step$ denotes the current state of the internal Tendermint +state machine, i.e., it reflects the stage of the algorithm execution in the +current round. The $lockedValue$ stores the most recent value (with respect to +a round number) for which a $\Precommit$ message has been sent. The +$lockedRound$ is the last round in which the process sent a $\Precommit$ +message that is not $\nil$. We also say that a correct process locks a value +$v$ in a round $r$ by setting $lockedValue = v$ and $lockedRound = r$ before +sending $\Precommit$ message for $\id(v)$. As a correct process can decide a +value $v$ only if $2f+1$ $\Precommit$ messages for $\id(v)$ are received, this +implies that a possible decision value is a value that is locked by at least +$f+1$ voting power equivalent of correct processes. Therefore, any value $v$ +for which $\Proposal$ and $2f+1$ of the corresponding $\Prevote$ messages are +received in some round $r$ is a \emph{possible decision} value. The role of the +$validValue$ variable is to store the most recent possible decision value; the +$validRound$ is the last round in which $validValue$ is updated. Apart from +those variables, a process also stores the current consensus instance ($h_p$, +called \emph{height} in Tendermint), and the current round number ($round_p$) +and attaches them to every message. Finally, a process also stores an array of +decisions, $decision_p$ (Tendermint assumes a sequence of consensus instances, +one for each height). + +Every round starts by a proposer suggesting a value with the $\Proposal$ +message (see line \ref{line:tab:send-proposal}). In the initial round of each +height, the proposer is free to chose the value to suggest. In the +Algorithm~\ref{alg:tendermint}, a correct process obtains a value to propose +using an external function $getValue()$ that returns a valid value to +propose. In the following rounds, a correct proposer will suggest a new value +only if $validValue = \nil$; otherwise $validValue$ is proposed (see +lines~\ref{line:tab:isThereLockedValue}-\ref{line:tab:getValidValue}). +In addition to the value proposed, the $\Proposal$ message also +contains the $validRound$ so other processes are informed about the last round +in which the proposer observed $validValue$ as a possible decision value. +Note that if a correct proposer $p$ sends $validValue$ with the $validRound$ in the +$\Proposal$, this implies that the process $p$ received $\Proposal$ and the +corresponding $2f+1$ $\Prevote$ messages for $validValue$ in the round +$validRound$. +If a correct process sends $\Proposal$ message with $validValue$ ($validRound > -1$) +at time $t > GST$, by the \emph{Gossip communication} property, the +corresponding $\Proposal$ and the $\Prevote$ messages will be received by all +correct processes before time $t+\Delta$. Therefore, all correct processes will +be able to verify the correctness of the suggested value as it is supported by +the $\Proposal$ and the corresponding $2f+1$ voting power equivalent $\Prevote$ +messages. + +A correct process $p$ accepts the proposal for a value $v$ (send $\Prevote$ +for $id(v)$) if an external \emph{valid} function returns $true$ for the value +$v$, and if $p$ hasn't locked any value ($lockedRound = -1$) or $p$ has locked +the value $v$ ($lockedValue = v$); see the line +\ref{line:tab:accept-proposal-2}. In case the proposed pair is $(v,vr \ge 0)$ and a +correct process $p$ has locked some value, it will accept +$v$ if it is a more recent possible decision value\footnote{As +explained above, the possible decision value in a round $r$ is the one for +which $\Proposal$ and the corresponding $2f+1$ $\Prevote$ messages are received +for the round $r$.}, $vr > lockedRound_p$, or if $lockedValue = v$ +(see line~\ref{line:tab:cond-prevote-higher-proposal}). Otherwise, a correct +process will reject the proposal by sending $\Prevote$ message with $\nil$ +value. A correct process will send $\Prevote$ message with $\nil$ value also in +case $\timeoutPropose$ expired (it is triggered when a correct process starts a +new round) and a process has not sent $\Prevote$ message in the current round +yet (see the line \ref{line:tab:onTimeoutPropose}). + +If a correct process receives $\Proposal$ message for some value $v$ and $2f+1$ +$\Prevote$ messages for $\id(v)$, then it sends $\Precommit$ message with +$\id(v)$. Otherwise, it sends $\Precommit$ $\nil$. A correct process will send +$\Precommit$ message with $\nil$ value also in case $\timeoutPrevote$ expired +(it is started when a correct process sent $\Prevote$ message and received any +$2f+1$ $\Prevote$ messages) and a process has not sent $\Precommit$ message in +the current round yet (see the line \ref{line:tab:onTimeoutPrecommit}). A +correct process decides on some value $v$ if it receives in some round $r$ +$\Proposal$ message for $v$ and $2f+1$ $\Precommit$ messages with $\id(v)$ (see +the line \ref{line:tab:decide}). To prevent the algorithm from blocking and +waiting forever for this condition to be true, the Algorithm +\ref{alg:tendermint} relies on $\timeoutPrecommit$. It is triggered after a +process receives any set of $2f+1$ $\Precommit$ messages for the current round. +If the $\timeoutPrecommit$ expires and a process has not decided yet, the +process starts the next round (see the line \ref{line:tab:onTimeoutPrecommit}). +When a correct process $p$ decides, it starts the next consensus instance +(for the next height). The \emph{Gossip communication} property ensures +that $\Proposal$ and $2f+1$ $\Prevote$ messages that led $p$ to decide +are eventually received by all correct processes, so they will also decide. + +\subsection{Termination mechanism} + +Tendermint ensures termination by a novel mechanism that benefits from the +gossip based nature of communication (see \emph{Gossip communication} +property). It requires managing two additional variables, $validValue$ and +$validRound$ that are then used by the proposer during the propose step as +explained above. The $validValue$ and $validRound$ are updated to $v$ and $r$ +by a correct process in a round $r$ when the process receives valid $\Proposal$ +message for the value $v$ and the corresponding $2f+1$ $\Prevote$ messages for +$id(v)$ in the round $r$ (see the rule at line~\ref{line:tab:recvPrevote}). + +We now give briefly the intuition how managing and proposing $validValue$ +and $validRound$ ensures termination. Formal treatment is left for +Section~\ref{sec:proof}. + +The first thing to note is that during good period, because of the +\emph{Gossip communication} property, if a correct process $p$ locks a value +$v$ in some round $r$, all correct processes will update $validValue$ to $v$ +and $validRound$ to $r$ before the end of the round $r$ (we prove this formally +in the Section~\ref{sec:proof}). The intuition is that messages that led to $p$ +locking a value $v$ in the round $r$ will be gossiped to all correct processes +before the end of the round $r$, so it will update $validValue$ and +$validRound$ (the line~\ref{line:tab:recvPrevote}). Therefore, if a correct +process locks some value during good period, $validValue$ and $validRound$ are +updated by all correct processes so that the value proposed in the following +rounds will be acceptable by all correct processes. Note +that it could happen that during good period, no correct process locks a value, +but some correct process $q$ updates $validValue$ and $validRound$ during some +round. As no correct process locks a value in this case, $validValue_q$ and +$validRound_q$ will also be acceptable by all correct processes as +$validRound_q > lockedRound_c$ for every correct process $c$ and as the +\emph{Gossip communication} property ensures that the corresponding $\Prevote$ +messages that $q$ received in the round $validRound_q$ are received by all +correct processes $\Delta$ time later. + +Finally, it could happen that after GST, there is a long sequence of rounds in which +no correct process neither locks a value nor update $validValue$ and $validRound$. +In this case, during this sequence of rounds, the proposed value suggested by correct +processes was not accepted by all correct processes. Note that this sequence of rounds +is always finite as at the beginning of every +round there is at least a single correct process $c$ such that $validValue_c$ +and $validRound_c$ are acceptable by every correct process. This is true as +there exists a correct process $c$ such that for every other correct process +$p$, $validRound_c > lockedRound_p$ or $validValue_c = lockedValue_p$. This is +true as $c$ is the process that has locked a value in the most recent round +among all correct processes (or no correct process locked any value). Therefore, +eventually $c$ will be the proper in some round and the proposed value will be accepted +by all correct processes, terminating therefore this sequence of +rounds. + +Therefore, updating $validValue$ and $validRound$ variables, and the +\emph{Gossip communication} property, together ensures that eventually, during +the good period, there exists a round with a correct proposer whose proposed +value will be accepted by all correct processes, and all correct processes will +terminate in that round. Note that this mechanism, contrary to the common +termination mechanism illustrated in the +Figure~\ref{ch3:fig:coordinator-change}, does not require exchanging any +additional information in addition to messages already sent as part of what is +normally being called "normal" case. + diff --git a/spec/consensus/consensus-paper/definitions.tex b/spec/consensus/consensus-paper/definitions.tex new file mode 100644 index 0000000000..454dd445df --- /dev/null +++ b/spec/consensus/consensus-paper/definitions.tex @@ -0,0 +1,126 @@ +\section{Definitions} \label{sec:definitions} + +\subsection{Model} + +We consider a system of processes that communicate by exchanging messages. +Processes can be correct or faulty, where a faulty process can behave in an +arbitrary way, i.e., we consider Byzantine faults. We assume that each process +has some amount of voting power (voting power of a process can be $0$). +Processes in our model are not part of a single administrative domain; +therefore we cannot enforce a direct network connectivity between all +processes. Instead, we assume that each process is connected to a subset of +processes called peers, such that there is an indirect communication channel +between all correct processes. Communication between processes is established +using a gossip protocol \cite{Dem1987:gossip}. + +Formally, we model the network communication using a variant of the \emph{partially +synchronous system model}~\cite{DLS88:jacm}: in all executions of the system +there is a bound $\Delta$ and an instant GST (Global Stabilization Time) such +that all communication among correct processes after GST is reliable and +$\Delta$-timely, i.e., if a correct process $p$ sends message $m$ at time $t +\ge GST$ to a correct process $q$, then $q$ will receive $m$ before $t + +\Delta$\footnote{Note that as we do not assume direct communication channels + among all correct processes, this implies that before the message $m$ + reaches $q$, it might pass through a number of correct processes that will +forward the message $m$ using gossip protocol towards $q$.}. +In addition to the standard \emph{partially + synchronous system model}~\cite{DLS88:jacm}, we assume an auxiliary property +that captures gossip-based nature of communication\footnote{The details of the Tendermint gossip protocol will be discussed in a separate + technical report. }: + + +\begin{itemize} \item \emph{Gossip communication:} If a correct process $p$ + sends some message $m$ at time $t$, all correct processes will receive + $m$ before $max\{t, GST\} + \Delta$. Furthermore, if a correct process $p$ + receives some message $m$ at time $t$, all correct processes will receive + $m$ before $max\{t, GST\} + \Delta$. \end{itemize} + + +The bound $\Delta$ and GST are system +parameters whose values are not required to be known for the safety of our +algorithm. Termination of the algorithm is guaranteed within a bounded duration +after GST. In practice, the algorithm will work correctly in the slightly +weaker variant of the model where the system alternates between (long enough) +good periods (corresponds to the \emph{after} GST period where system is +reliable and $\Delta$-timely) and bad periods (corresponds to the period +\emph{before} GST during which the system is asynchronous and messages can be +lost), but consideration of the GST model simplifies the discussion. + +We assume that process steps (which might include sending and receiving +messages) take zero time. Processes are equipped with clocks so they can +measure local timeouts. +Spoofing/impersonation attacks are assumed to be impossible at all times due to +the use of public-key cryptography, i.e., we assume that all protocol messages contains a digital signature. +Therefore, when a correct +process $q$ receives a signed message $m$ from its peer, the process $q$ can +verify who was the original sender of the message $m$ and if the message signature is valid. +We do not explicitly state a signature verification step in the pseudo-code of the algorithm to improve readability; +we assume that only messages with the valid signature are considered at that level (and messages with invalid signatures +are dropped). + + + +%Messages that are being gossiped are created by the consensus layer. We can + %think about consensus protocol as a content creator, which %defines what + %messages should be disseminated using the gossip protocol. A correct + %process creates the message for dissemination either i) %explicitly, by + %invoking \emph{send} function as part of the consensus protocol or ii) + %implicitly, by receiving a message from some other %process. Note that in + %the case ii) gossiping of messages is implicit, i.e., it happens without + %explicit send clause in the consensus algorithm %whenever a correct + %process receives some messages in the consensus algorithm\footnote{If a + %message is received by a correct process at %the consensus level then it + %is considered valid from the protocol point of view, i.e., it has a + %correct signature, a proper message structure %and a valid height and + %round number.}. + +%\item Processes keep resending messages (in case of failures or message loss) + %until all its peers get them. This ensures that every message %sent or + %received by a correct process is eventually received by all correct + %processes. + +\subsection{State Machine Replication} + +State machine replication (SMR) is a general approach for replicating services +modeled as a deterministic state machine~\cite{Lam78:cacm,Sch90:survey}. The +key idea of this approach is to guarantee that all replicas start in the same +state and then apply requests from clients in the same order, thereby +guaranteeing that the replicas' states will not diverge. Following +Schneider~\cite{Sch90:survey}, we note that the following is key for +implementing a replicated state machine tolerant to (Byzantine) faults: + +\begin{itemize} \item \emph{Replica Coordination.} All [non-faulty] replicas + receive and process the same sequence of requests. \end{itemize} + +Moreover, as Schneider also notes, this property can be decomposed into two +parts, \emph{Agreement} and \emph{Order}: Agreement requires all (non-faulty) +replicas to receive all requests, and Order requires that the order of received +requests is the same at all replicas. + +There is an additional requirement that needs to be ensured by Byzantine +tolerant state machine replication: only requests (called transactions in the +Tendermint terminology) proposed by clients are executed. In Tendermint, +transaction verification is the responsibility of the service that is being +replicated; upon receiving a transaction from the client, the Tendermint +process will ask the service if the request is valid, and only valid requests +will be processed. + + \subsection{Consensus} \label{sec:consensus} + +Tendermint solves state machine replication by sequentially executing consensus +instances to agree on each block of transactions that are +then executed by the service being replicated. We consider a variant of the +Byzantine consensus problem called Validity Predicate-based Byzantine consensus +that is motivated by blockchain systems~\cite{GLR17:red-belly-bc}. The problem +is defined by an agreement, a termination, and a validity property. + + \begin{itemize} \item \emph{Agreement:} No two correct processes decide on + different values. \item \emph{Termination:} All correct processes + eventually decide on a value. \item \emph{Validity:} A decided value + is valid, i.e., it satisfies the predefined predicate denoted + \emph{valid()}. \end{itemize} + + This variant of the Byzantine consensus problem has an application-specific + \emph{valid()} predicate to indicate whether a value is valid. In the context + of blockchain systems, for example, a value is not valid if it does not + contain an appropriate hash of the last value (block) added to the blockchain. diff --git a/spec/consensus/consensus-paper/homodel.sty b/spec/consensus/consensus-paper/homodel.sty new file mode 100644 index 0000000000..19f83e926e --- /dev/null +++ b/spec/consensus/consensus-paper/homodel.sty @@ -0,0 +1,32 @@ +\newcommand{\NC}{\mbox{\it NC}} +\newcommand{\HO}{\mbox{\it HO}} +\newcommand{\AS}{\mbox{\it AS}} +\newcommand{\SK}{\mbox{\it SK}} +\newcommand{\SHO}{\mbox{\it SHO}} +\newcommand{\AHO}{\mbox{\it AHO}} +\newcommand{\CONS}{\mbox{\it CONS}} +\newcommand{\K}{\mbox{\it K}} + +\newcommand{\Alg}{\mathcal{A}} +\newcommand{\Pred}{\mathcal{P}} +\newcommand{\Spr}{S_p^r} +\newcommand{\Tpr}{T_p^r} +\newcommand{\mupr}{\vec{\mu}_p^{\,r}} + +\newcommand{\MSpr}{S_p^{\rho}} +\newcommand{\MTpr}{T_p^{\rho}} + + + +\newconstruct{\SEND}{$\Spr$:}{}{\ENDSEND}{} +\newconstruct{\TRAN}{$\Tpr$:}{}{\ENDTRAN}{} +\newconstruct{\ROUND}{\textbf{Round}}{\!\textbf{:}}{\ENDROUND}{} +\newconstruct{\VARIABLES}{\textbf{Variables:}}{}{\ENDVARIABLES}{} +\newconstruct{\INIT}{\textbf{Initialization:}}{}{\ENDINIT}{} + +\newconstruct{\MSEND}{$\MSpr$:}{}{\ENDMSEND}{} +\newconstruct{\MTRAN}{$\MTpr$:}{}{\ENDMTRAN}{} + +\newconstruct{\SROUND}{\textbf{Selection Round}}{\!\textbf{:}}{\ENDSROUND}{} +\newconstruct{\VROUND}{\textbf{Validation Round}}{\!\textbf{:}}{\ENDVROUND}{} +\newconstruct{\DROUND}{\textbf{Decision Round}}{\!\textbf{:}}{\ENDDROUND}{} diff --git a/spec/consensus/consensus-paper/intro.tex b/spec/consensus/consensus-paper/intro.tex new file mode 100644 index 0000000000..493b509e91 --- /dev/null +++ b/spec/consensus/consensus-paper/intro.tex @@ -0,0 +1,138 @@ +\section{Introduction} \label{sec:tendermint} + +Consensus is a fundamental problem in distributed computing. It +is important because of it's role in State Machine Replication (SMR), a generic +approach for replicating services that can be modeled as a deterministic state +machine~\cite{Lam78:cacm, Sch90:survey}. The key idea of this approach is that +service replicas start in the same initial state, and then execute requests +(also called transactions) in the same order; thereby guaranteeing that +replicas stay in sync with each other. The role of consensus in the SMR +approach is ensuring that all replicas receive transactions in the same order. +Traditionally, deployments of SMR based systems are in data-center settings +(local area network), have a small number of replicas (three to seven) and are +typically part of a single administration domain (e.g., Chubby +\cite{Bur:osdi06}); therefore they handle benign (crash) failures only, as more +general forms of failure (in particular, malicious or Byzantine faults) are +considered to occur with only negligible probability. + +The success of cryptocurrencies and blockchain systems in recent years (e.g., +\cite{Nak2012:bitcoin, But2014:ethereum}) pose a whole new set of challenges on +the design and deployment of SMR based systems: reaching agreement over wide +area network, among large number of nodes (hundreds or thousands) that are not +part of the same administrative domain, and where a subset of nodes can behave +maliciously (Byzantine faults). Furthermore, contrary to the previous +data-center deployments where nodes are fully connected to each other, in +blockchain systems, a node is only connected to a subset of other nodes, so +communication is achieved by gossip-based peer-to-peer protocols. +The new requirements demand designs and algorithms that are not necessarily +present in the classical academic literature on Byzantine fault tolerant +consensus (or SMR) systems (e.g., \cite{DLS88:jacm, CL02:tcs}) as the primary +focus was different setup. + +In this paper we describe a novel Byzantine-fault tolerant consensus algorithm +that is the core of the BFT SMR platform called Tendermint\footnote{The + Tendermint platform is available open source at + https://github.com/tendermint/tendermint.}. The Tendermint platform consists of +a high-performance BFT SMR implementation written in Go, a flexible interface +for +building arbitrary deterministic applications above the consensus, and a suite +of tools for deployment and management. + +The Tendermint consensus algorithm is inspired by the PBFT SMR +algorithm~\cite{CL99:osdi} and the DLS algorithm for authenticated faults (the +Algorithm 2 from \cite{DLS88:jacm}). Similar to DLS algorithm, Tendermint +proceeds in +rounds\footnote{Tendermint is not presented in the basic round model of + \cite{DLS88:jacm}. Furthermore, we use the term round differently than in + \cite{DLS88:jacm}; in Tendermint a round denotes a sequence of communication + steps instead of a single communication step in \cite{DLS88:jacm}.}, where each +round has a dedicated proposer (also called coordinator or +leader) and a process proceeds to a new round as part of normal +processing (not only in case the proposer is faulty or suspected as being faulty +by enough processes as in PBFT). +The communication pattern of each round is very similar to the "normal" case +of PBFT. Therefore, in preferable conditions (correct proposer, timely and +reliable communication between correct processes), Tendermint decides in three +communication steps (the same as PBFT). + +The major novelty and contribution of the Tendermint consensus algorithm is a +new termination mechanism. As explained in \cite{MHS09:opodis, RMS10:dsn}, the +existing BFT consensus (and SMR) algorithms for the partially synchronous +system model (for example PBFT~\cite{CL99:osdi}, \cite{DLS88:jacm}, +\cite{MA06:tdsc}) typically relies on the communication pattern illustrated in +Figure~\ref{ch3:fig:coordinator-change} for termination. The +Figure~\ref{ch3:fig:coordinator-change} illustrates messages exchanged during +the proposer change when processes start a new round\footnote{There is no + consistent terminology in the distributed computing terminology on naming + sequence of communication steps that corresponds to a logical unit. It is + sometimes called a round, phase or a view.}. It guarantees that eventually (ie. +after some Global Stabilization Time, GST), there exists a round with a correct +proposer that will bring the system into a univalent configuration. +Intuitively, in a round in which the proposed value is accepted +by all correct processes, and communication between correct processes is +timely and reliable, all correct processes decide. + + +\begin{figure}[tbh!] \def\rdstretch{5} \def\ystretch{3} \centering + \begin{rounddiag}{4}{2} \round{1}{~} \rdmessage{1}{1}{$v_1$} + \rdmessage{2}{1}{$v_2$} \rdmessage{3}{1}{$v_3$} \rdmessage{4}{1}{$v_4$} + \round{2}{~} \rdmessage{1}{1}{$x, [v_{1..4}]$} + \rdmessage{1}{2}{$~~~~~~x, [v_{1..4}]$} \rdmessage{1}{3}{$~~~~~~~~x, + [v_{1..4}]$} \rdmessage{1}{4}{$~~~~~~~x, [v_{1..4}]$} \end{rounddiag} + \vspace{-5mm} \caption{\boldmath Proposer (coordinator) change: $p_1$ is the + new proposer.} \label{ch3:fig:coordinator-change} \end{figure} + +To ensure that a proposed value is accepted by all correct +processes\footnote{The proposed value is not blindly accepted by correct + processes in BFT algorithms. A correct process always verifies if the proposed + value is safe to be accepted so that safety properties of consensus are not + violated.} +a proposer will 1) build the global state by receiving messages from other +processes, 2) select the safe value to propose and 3) send the selected value +together with the signed messages +received in the first step to support it. The +value $v_i$ that a correct process sends to the next proposer normally +corresponds to a value the process considers as acceptable for a decision: + +\begin{itemize} \item in PBFT~\cite{CL99:osdi} and DLS~\cite{DLS88:jacm} it is + not the value itself but a set of $2f+1$ signed messages with the same + value id, \item in Fast Byzantine Paxos~\cite{MA06:tdsc} the value + itself is being sent. \end{itemize} + +In both cases, using this mechanism in our system model (ie. high +number of nodes over gossip based network) would have high communication +complexity that increases with the number of processes: in the first case as +the message sent depends on the total number of processes, and in the second +case as the value (block of transactions) is sent by each process. The set of +messages received in the first step are normally piggybacked on the proposal +message (in the Figure~\ref{ch3:fig:coordinator-change} denoted with +$[v_{1..4}]$) to justify the choice of the selected value $x$. Note that +sending this message also does not scale with the number of processes in the +system. + +We designed a novel termination mechanism for Tendermint that better suits the +system model we consider. It does not require additional communication (neither +sending new messages nor piggybacking information on the existing messages) and +it is fully based on the communication pattern that is very similar to the +normal case in PBFT \cite{CL99:osdi}. Therefore, there is only a single mode of +execution in Tendermint, i.e., there is no separation between the normal and +the recovery mode, which is the case in other PBFT-like protocols (e.g., +\cite{CL99:osdi}, \cite{Ver09:spinning} or \cite{Cle09:aardvark}). We believe +this makes Tendermint simpler to understand and implement correctly. + +Note that the orthogonal approach for reducing message complexity in order to +improve +scalability and decentralization (number of processes) of BFT consensus +algorithms is using advanced cryptography (for example Boneh-Lynn-Shacham (BLS) +signatures \cite{BLS2001:crypto}) as done for example in SBFT +\cite{Gue2018:sbft}. + +The remainder of the paper is as follows: Section~\ref{sec:definitions} defines +the system model and gives the problem definitions. Tendermint +consensus algorithm is presented in Section~\ref{sec:tendermint} and the +proofs are given in Section~\ref{sec:proof}. We conclude in +Section~\ref{sec:conclusion}. + + + + diff --git a/spec/consensus/consensus-paper/latex8.bst b/spec/consensus/consensus-paper/latex8.bst new file mode 100644 index 0000000000..2c7af56479 --- /dev/null +++ b/spec/consensus/consensus-paper/latex8.bst @@ -0,0 +1,1124 @@ + +% --------------------------------------------------------------- +% +% $Id: latex8.bst,v 1.1 1995/09/15 15:13:49 ienne Exp $ +% +% by Paolo.Ienne@di.epfl.ch +% + +% --------------------------------------------------------------- +% +% no guarantee is given that the format corresponds perfectly to +% IEEE 8.5" x 11" Proceedings, but most features should be ok. +% +% --------------------------------------------------------------- +% +% `latex8' from BibTeX standard bibliography style `abbrv' +% version 0.99a for BibTeX versions 0.99a or later, LaTeX version 2.09. +% Copyright (C) 1985, all rights reserved. +% Copying of this file is authorized only if either +% (1) you make absolutely no changes to your copy, including name, or +% (2) if you do make changes, you name it something other than +% btxbst.doc, plain.bst, unsrt.bst, alpha.bst, and abbrv.bst. +% This restriction helps ensure that all standard styles are identical. +% The file btxbst.doc has the documentation for this style. + +ENTRY + { address + author + booktitle + chapter + edition + editor + howpublished + institution + journal + key + month + note + number + organization + pages + publisher + school + series + title + type + volume + year + } + {} + { label } + +INTEGERS { output.state before.all mid.sentence after.sentence after.block } + +FUNCTION {init.state.consts} +{ #0 'before.all := + #1 'mid.sentence := + #2 'after.sentence := + #3 'after.block := +} + +STRINGS { s t } + +FUNCTION {output.nonnull} +{ 's := + output.state mid.sentence = + { ", " * write$ } + { output.state after.block = + { add.period$ write$ + newline$ + "\newblock " write$ + } + { output.state before.all = + 'write$ + { add.period$ " " * write$ } + if$ + } + if$ + mid.sentence 'output.state := + } + if$ + s +} + +FUNCTION {output} +{ duplicate$ empty$ + 'pop$ + 'output.nonnull + if$ +} + +FUNCTION {output.check} +{ 't := + duplicate$ empty$ + { pop$ "empty " t * " in " * cite$ * warning$ } + 'output.nonnull + if$ +} + +FUNCTION {output.bibitem} +{ newline$ + "\bibitem{" write$ + cite$ write$ + "}" write$ + newline$ + "" + before.all 'output.state := +} + +FUNCTION {fin.entry} +{ add.period$ + write$ + newline$ +} + +FUNCTION {new.block} +{ output.state before.all = + 'skip$ + { after.block 'output.state := } + if$ +} + +FUNCTION {new.sentence} +{ output.state after.block = + 'skip$ + { output.state before.all = + 'skip$ + { after.sentence 'output.state := } + if$ + } + if$ +} + +FUNCTION {not} +{ { #0 } + { #1 } + if$ +} + +FUNCTION {and} +{ 'skip$ + { pop$ #0 } + if$ +} + +FUNCTION {or} +{ { pop$ #1 } + 'skip$ + if$ +} + +FUNCTION {new.block.checka} +{ empty$ + 'skip$ + 'new.block + if$ +} + +FUNCTION {new.block.checkb} +{ empty$ + swap$ empty$ + and + 'skip$ + 'new.block + if$ +} + +FUNCTION {new.sentence.checka} +{ empty$ + 'skip$ + 'new.sentence + if$ +} + +FUNCTION {new.sentence.checkb} +{ empty$ + swap$ empty$ + and + 'skip$ + 'new.sentence + if$ +} + +FUNCTION {field.or.null} +{ duplicate$ empty$ + { pop$ "" } + 'skip$ + if$ +} + +FUNCTION {emphasize} +{ duplicate$ empty$ + { pop$ "" } + { "{\em " swap$ * "}" * } + if$ +} + +INTEGERS { nameptr namesleft numnames } + +FUNCTION {format.names} +{ 's := + #1 'nameptr := + s num.names$ 'numnames := + numnames 'namesleft := + { namesleft #0 > } + { s nameptr "{f.~}{vv~}{ll}{, jj}" format.name$ 't := + nameptr #1 > + { namesleft #1 > + { ", " * t * } + { numnames #2 > + { "," * } + 'skip$ + if$ + t "others" = + { " et~al." * } + { " and " * t * } + if$ + } + if$ + } + 't + if$ + nameptr #1 + 'nameptr := + + namesleft #1 - 'namesleft := + } + while$ +} + +FUNCTION {format.authors} +{ author empty$ + { "" } + { author format.names } + if$ +} + +FUNCTION {format.editors} +{ editor empty$ + { "" } + { editor format.names + editor num.names$ #1 > + { ", editors" * } + { ", editor" * } + if$ + } + if$ +} + +FUNCTION {format.title} +{ title empty$ + { "" } + { title "t" change.case$ } + if$ +} + +FUNCTION {n.dashify} +{ 't := + "" + { t empty$ not } + { t #1 #1 substring$ "-" = + { t #1 #2 substring$ "--" = not + { "--" * + t #2 global.max$ substring$ 't := + } + { { t #1 #1 substring$ "-" = } + { "-" * + t #2 global.max$ substring$ 't := + } + while$ + } + if$ + } + { t #1 #1 substring$ * + t #2 global.max$ substring$ 't := + } + if$ + } + while$ +} + +FUNCTION {format.date} +{ year empty$ + { month empty$ + { "" } + { "there's a month but no year in " cite$ * warning$ + month + } + if$ + } + { month empty$ + 'year + { month " " * year * } + if$ + } + if$ +} + +FUNCTION {format.btitle} +{ title emphasize +} + +FUNCTION {tie.or.space.connect} +{ duplicate$ text.length$ #3 < + { "~" } + { " " } + if$ + swap$ * * +} + +FUNCTION {either.or.check} +{ empty$ + 'pop$ + { "can't use both " swap$ * " fields in " * cite$ * warning$ } + if$ +} + +FUNCTION {format.bvolume} +{ volume empty$ + { "" } + { "volume" volume tie.or.space.connect + series empty$ + 'skip$ + { " of " * series emphasize * } + if$ + "volume and number" number either.or.check + } + if$ +} + +FUNCTION {format.number.series} +{ volume empty$ + { number empty$ + { series field.or.null } + { output.state mid.sentence = + { "number" } + { "Number" } + if$ + number tie.or.space.connect + series empty$ + { "there's a number but no series in " cite$ * warning$ } + { " in " * series * } + if$ + } + if$ + } + { "" } + if$ +} + +FUNCTION {format.edition} +{ edition empty$ + { "" } + { output.state mid.sentence = + { edition "l" change.case$ " edition" * } + { edition "t" change.case$ " edition" * } + if$ + } + if$ +} + +INTEGERS { multiresult } + +FUNCTION {multi.page.check} +{ 't := + #0 'multiresult := + { multiresult not + t empty$ not + and + } + { t #1 #1 substring$ + duplicate$ "-" = + swap$ duplicate$ "," = + swap$ "+" = + or or + { #1 'multiresult := } + { t #2 global.max$ substring$ 't := } + if$ + } + while$ + multiresult +} + +FUNCTION {format.pages} +{ pages empty$ + { "" } + { pages multi.page.check + { "pages" pages n.dashify tie.or.space.connect } + { "page" pages tie.or.space.connect } + if$ + } + if$ +} + +FUNCTION {format.vol.num.pages} +{ volume field.or.null + number empty$ + 'skip$ + { "(" number * ")" * * + volume empty$ + { "there's a number but no volume in " cite$ * warning$ } + 'skip$ + if$ + } + if$ + pages empty$ + 'skip$ + { duplicate$ empty$ + { pop$ format.pages } + { ":" * pages n.dashify * } + if$ + } + if$ +} + +FUNCTION {format.chapter.pages} +{ chapter empty$ + 'format.pages + { type empty$ + { "chapter" } + { type "l" change.case$ } + if$ + chapter tie.or.space.connect + pages empty$ + 'skip$ + { ", " * format.pages * } + if$ + } + if$ +} + +FUNCTION {format.in.ed.booktitle} +{ booktitle empty$ + { "" } + { editor empty$ + { "In " booktitle emphasize * } + { "In " format.editors * ", " * booktitle emphasize * } + if$ + } + if$ +} + +FUNCTION {empty.misc.check} + +{ author empty$ title empty$ howpublished empty$ + month empty$ year empty$ note empty$ + and and and and and + key empty$ not and + { "all relevant fields are empty in " cite$ * warning$ } + 'skip$ + if$ +} + +FUNCTION {format.thesis.type} +{ type empty$ + 'skip$ + { pop$ + type "t" change.case$ + } + if$ +} + +FUNCTION {format.tr.number} +{ type empty$ + { "Technical Report" } + 'type + if$ + number empty$ + { "t" change.case$ } + { number tie.or.space.connect } + if$ +} + +FUNCTION {format.article.crossref} +{ key empty$ + { journal empty$ + { "need key or journal for " cite$ * " to crossref " * crossref * + warning$ + "" + } + { "In {\em " journal * "\/}" * } + if$ + } + { "In " key * } + if$ + " \cite{" * crossref * "}" * +} + +FUNCTION {format.crossref.editor} +{ editor #1 "{vv~}{ll}" format.name$ + editor num.names$ duplicate$ + #2 > + { pop$ " et~al." * } + { #2 < + 'skip$ + { editor #2 "{ff }{vv }{ll}{ jj}" format.name$ "others" = + { " et~al." * } + { " and " * editor #2 "{vv~}{ll}" format.name$ * } + if$ + } + if$ + } + if$ +} + +FUNCTION {format.book.crossref} +{ volume empty$ + { "empty volume in " cite$ * "'s crossref of " * crossref * warning$ + "In " + } + { "Volume" volume tie.or.space.connect + " of " * + } + if$ + editor empty$ + editor field.or.null author field.or.null = + or + { key empty$ + { series empty$ + { "need editor, key, or series for " cite$ * " to crossref " * + crossref * warning$ + "" * + } + { "{\em " * series * "\/}" * } + if$ + } + { key * } + if$ + } + { format.crossref.editor * } + if$ + " \cite{" * crossref * "}" * +} + +FUNCTION {format.incoll.inproc.crossref} +{ editor empty$ + editor field.or.null author field.or.null = + or + { key empty$ + { booktitle empty$ + { "need editor, key, or booktitle for " cite$ * " to crossref " * + crossref * warning$ + "" + } + { "In {\em " booktitle * "\/}" * } + if$ + } + { "In " key * } + if$ + } + { "In " format.crossref.editor * } + if$ + " \cite{" * crossref * "}" * +} + +FUNCTION {article} +{ output.bibitem + format.authors "author" output.check + new.block + format.title "title" output.check + new.block + crossref missing$ + { journal emphasize "journal" output.check + format.vol.num.pages output + format.date "year" output.check + } + { format.article.crossref output.nonnull + format.pages output + } + if$ + new.block + note output + fin.entry +} + +FUNCTION {book} +{ output.bibitem + author empty$ + { format.editors "author and editor" output.check } + { format.authors output.nonnull + crossref missing$ + { "author and editor" editor either.or.check } + 'skip$ + if$ + } + if$ + new.block + format.btitle "title" output.check + crossref missing$ + { format.bvolume output + new.block + format.number.series output + new.sentence + publisher "publisher" output.check + address output + } + { new.block + format.book.crossref output.nonnull + } + if$ + format.edition output + format.date "year" output.check + new.block + note output + fin.entry +} + +FUNCTION {booklet} +{ output.bibitem + format.authors output + new.block + format.title "title" output.check + howpublished address new.block.checkb + howpublished output + address output + format.date output + new.block + note output + fin.entry +} + +FUNCTION {inbook} +{ output.bibitem + author empty$ + { format.editors "author and editor" output.check } + { format.authors output.nonnull + + crossref missing$ + { "author and editor" editor either.or.check } + 'skip$ + if$ + } + if$ + new.block + format.btitle "title" output.check + crossref missing$ + { format.bvolume output + format.chapter.pages "chapter and pages" output.check + new.block + format.number.series output + new.sentence + publisher "publisher" output.check + address output + } + { format.chapter.pages "chapter and pages" output.check + new.block + format.book.crossref output.nonnull + } + if$ + format.edition output + format.date "year" output.check + new.block + note output + fin.entry +} + +FUNCTION {incollection} +{ output.bibitem + format.authors "author" output.check + new.block + format.title "title" output.check + new.block + crossref missing$ + { format.in.ed.booktitle "booktitle" output.check + format.bvolume output + format.number.series output + format.chapter.pages output + new.sentence + publisher "publisher" output.check + address output + format.edition output + format.date "year" output.check + } + { format.incoll.inproc.crossref output.nonnull + format.chapter.pages output + } + if$ + new.block + note output + fin.entry +} + +FUNCTION {inproceedings} +{ output.bibitem + format.authors "author" output.check + new.block + format.title "title" output.check + new.block + crossref missing$ + { format.in.ed.booktitle "booktitle" output.check + format.bvolume output + format.number.series output + format.pages output + address empty$ + { organization publisher new.sentence.checkb + organization output + publisher output + format.date "year" output.check + } + { address output.nonnull + format.date "year" output.check + new.sentence + organization output + publisher output + } + if$ + } + { format.incoll.inproc.crossref output.nonnull + format.pages output + } + if$ + new.block + note output + fin.entry +} + +FUNCTION {conference} { inproceedings } + +FUNCTION {manual} +{ output.bibitem + author empty$ + { organization empty$ + 'skip$ + { organization output.nonnull + address output + } + if$ + } + { format.authors output.nonnull } + if$ + new.block + format.btitle "title" output.check + author empty$ + { organization empty$ + { address new.block.checka + address output + } + 'skip$ + if$ + } + { organization address new.block.checkb + organization output + address output + } + if$ + format.edition output + format.date output + new.block + note output + fin.entry +} + +FUNCTION {mastersthesis} +{ output.bibitem + format.authors "author" output.check + new.block + format.title "title" output.check + new.block + "Master's thesis" format.thesis.type output.nonnull + school "school" output.check + address output + format.date "year" output.check + new.block + note output + fin.entry +} + +FUNCTION {misc} +{ output.bibitem + format.authors output + title howpublished new.block.checkb + format.title output + howpublished new.block.checka + howpublished output + format.date output + new.block + note output + fin.entry + empty.misc.check +} + +FUNCTION {phdthesis} +{ output.bibitem + format.authors "author" output.check + new.block + format.btitle "title" output.check + new.block + "PhD thesis" format.thesis.type output.nonnull + school "school" output.check + address output + format.date "year" output.check + new.block + note output + fin.entry +} + +FUNCTION {proceedings} +{ output.bibitem + editor empty$ + { organization output } + { format.editors output.nonnull } + + if$ + new.block + format.btitle "title" output.check + format.bvolume output + format.number.series output + address empty$ + { editor empty$ + { publisher new.sentence.checka } + { organization publisher new.sentence.checkb + organization output + } + if$ + publisher output + format.date "year" output.check + } + { address output.nonnull + format.date "year" output.check + new.sentence + editor empty$ + 'skip$ + { organization output } + if$ + publisher output + } + if$ + new.block + note output + fin.entry +} + +FUNCTION {techreport} +{ output.bibitem + format.authors "author" output.check + new.block + format.title "title" output.check + new.block + format.tr.number output.nonnull + institution "institution" output.check + address output + format.date "year" output.check + new.block + note output + fin.entry +} + +FUNCTION {unpublished} +{ output.bibitem + format.authors "author" output.check + new.block + format.title "title" output.check + new.block + note "note" output.check + format.date output + fin.entry +} + +FUNCTION {default.type} { misc } + +MACRO {jan} {"Jan."} + +MACRO {feb} {"Feb."} + +MACRO {mar} {"Mar."} + +MACRO {apr} {"Apr."} + +MACRO {may} {"May"} + +MACRO {jun} {"June"} + +MACRO {jul} {"July"} + +MACRO {aug} {"Aug."} + +MACRO {sep} {"Sept."} + +MACRO {oct} {"Oct."} + +MACRO {nov} {"Nov."} + +MACRO {dec} {"Dec."} + +MACRO {acmcs} {"ACM Comput. Surv."} + +MACRO {acta} {"Acta Inf."} + +MACRO {cacm} {"Commun. ACM"} + +MACRO {ibmjrd} {"IBM J. Res. Dev."} + +MACRO {ibmsj} {"IBM Syst.~J."} + +MACRO {ieeese} {"IEEE Trans. Softw. Eng."} + +MACRO {ieeetc} {"IEEE Trans. Comput."} + +MACRO {ieeetcad} + {"IEEE Trans. Comput.-Aided Design Integrated Circuits"} + +MACRO {ipl} {"Inf. Process. Lett."} + +MACRO {jacm} {"J.~ACM"} + +MACRO {jcss} {"J.~Comput. Syst. Sci."} + +MACRO {scp} {"Sci. Comput. Programming"} + +MACRO {sicomp} {"SIAM J. Comput."} + +MACRO {tocs} {"ACM Trans. Comput. Syst."} + +MACRO {tods} {"ACM Trans. Database Syst."} + +MACRO {tog} {"ACM Trans. Gr."} + +MACRO {toms} {"ACM Trans. Math. Softw."} + +MACRO {toois} {"ACM Trans. Office Inf. Syst."} + +MACRO {toplas} {"ACM Trans. Prog. Lang. Syst."} + +MACRO {tcs} {"Theoretical Comput. Sci."} + +READ + +FUNCTION {sortify} +{ purify$ + "l" change.case$ +} + +INTEGERS { len } + +FUNCTION {chop.word} +{ 's := + 'len := + s #1 len substring$ = + { s len #1 + global.max$ substring$ } + 's + if$ +} + +FUNCTION {sort.format.names} +{ 's := + #1 'nameptr := + "" + s num.names$ 'numnames := + numnames 'namesleft := + { namesleft #0 > } + { nameptr #1 > + { " " * } + 'skip$ + if$ + s nameptr "{vv{ } }{ll{ }}{ f{ }}{ jj{ }}" format.name$ 't := + nameptr numnames = t "others" = and + { "et al" * } + { t sortify * } + if$ + nameptr #1 + 'nameptr := + namesleft #1 - 'namesleft := + } + while$ +} + +FUNCTION {sort.format.title} +{ 't := + "A " #2 + "An " #3 + "The " #4 t chop.word + chop.word + chop.word + sortify + #1 global.max$ substring$ +} + +FUNCTION {author.sort} +{ author empty$ + { key empty$ + { "to sort, need author or key in " cite$ * warning$ + "" + } + { key sortify } + if$ + } + { author sort.format.names } + if$ +} + +FUNCTION {author.editor.sort} +{ author empty$ + { editor empty$ + { key empty$ + { "to sort, need author, editor, or key in " cite$ * warning$ + "" + } + { key sortify } + if$ + } + { editor sort.format.names } + if$ + } + { author sort.format.names } + if$ +} + +FUNCTION {author.organization.sort} +{ author empty$ + + { organization empty$ + { key empty$ + { "to sort, need author, organization, or key in " cite$ * warning$ + "" + } + { key sortify } + if$ + } + { "The " #4 organization chop.word sortify } + if$ + } + { author sort.format.names } + if$ +} + +FUNCTION {editor.organization.sort} +{ editor empty$ + { organization empty$ + { key empty$ + { "to sort, need editor, organization, or key in " cite$ * warning$ + "" + } + { key sortify } + if$ + } + { "The " #4 organization chop.word sortify } + if$ + } + { editor sort.format.names } + if$ +} + +FUNCTION {presort} +{ type$ "book" = + type$ "inbook" = + or + 'author.editor.sort + { type$ "proceedings" = + 'editor.organization.sort + { type$ "manual" = + 'author.organization.sort + 'author.sort + if$ + } + if$ + } + if$ + " " + * + year field.or.null sortify + * + " " + * + title field.or.null + sort.format.title + * + #1 entry.max$ substring$ + 'sort.key$ := +} + +ITERATE {presort} + +SORT + +STRINGS { longest.label } + +INTEGERS { number.label longest.label.width } + +FUNCTION {initialize.longest.label} +{ "" 'longest.label := + #1 'number.label := + #0 'longest.label.width := +} + +FUNCTION {longest.label.pass} +{ number.label int.to.str$ 'label := + number.label #1 + 'number.label := + label width$ longest.label.width > + { label 'longest.label := + label width$ 'longest.label.width := + } + 'skip$ + if$ +} + +EXECUTE {initialize.longest.label} + +ITERATE {longest.label.pass} + +FUNCTION {begin.bib} +{ preamble$ empty$ + 'skip$ + { preamble$ write$ newline$ } + if$ + "\begin{thebibliography}{" longest.label * + "}\setlength{\itemsep}{-1ex}\small" * write$ newline$ +} + +EXECUTE {begin.bib} + +EXECUTE {init.state.consts} + +ITERATE {call.type$} + +FUNCTION {end.bib} +{ newline$ + "\end{thebibliography}" write$ newline$ +} + +EXECUTE {end.bib} + +% end of file latex8.bst +% --------------------------------------------------------------- + + + diff --git a/spec/consensus/consensus-paper/latex8.sty b/spec/consensus/consensus-paper/latex8.sty new file mode 100644 index 0000000000..1e6b0dc7e6 --- /dev/null +++ b/spec/consensus/consensus-paper/latex8.sty @@ -0,0 +1,168 @@ +% --------------------------------------------------------------- +% +% $Id: latex8.sty,v 1.2 1995/09/15 15:31:13 ienne Exp $ +% +% by Paolo.Ienne@di.epfl.ch +% +% --------------------------------------------------------------- +% +% no guarantee is given that the format corresponds perfectly to +% IEEE 8.5" x 11" Proceedings, but most features should be ok. +% +% --------------------------------------------------------------- +% with LaTeX2e: +% ============= +% +% use as +% \documentclass[times,10pt,twocolumn]{article} +% \usepackage{latex8} +% \usepackage{times} +% +% --------------------------------------------------------------- + +% with LaTeX 2.09: +% ================ +% +% use as +% \documentstyle[times,art10,twocolumn,latex8]{article} +% +% --------------------------------------------------------------- +% with both versions: +% =================== +% +% specify \pagestyle{empty} to omit page numbers in the final +% version +% +% specify references as +% \bibliographystyle{latex8} +% \bibliography{...your files...} +% +% use Section{} and SubSection{} instead of standard section{} +% and subsection{} to obtain headings in the form +% "1.3. My heading" +% +% --------------------------------------------------------------- + +\typeout{IEEE 8.5 x 11-Inch Proceedings Style `latex8.sty'.} + +% ten point helvetica bold required for captions +% in some sites the name of the helvetica bold font may differ, +% change the name here: +\font\tenhv = phvb at 10pt +%\font\tenhv = phvb7t at 10pt + +% eleven point times bold required for second-order headings +% \font\elvbf = cmbx10 scaled 1100 +\font\elvbf = ptmb scaled 1100 + +% set dimensions of columns, gap between columns, and paragraph indent +\setlength{\textheight}{8.875in} +\setlength{\textwidth}{6.875in} +\setlength{\columnsep}{0.3125in} +\setlength{\topmargin}{0in} +\setlength{\headheight}{0in} +\setlength{\headsep}{0in} +\setlength{\parindent}{1pc} +\setlength{\oddsidemargin}{-.304in} +\setlength{\evensidemargin}{-.304in} + +% memento from size10.clo +% \normalsize{\@setfontsize\normalsize\@xpt\@xiipt} +% \small{\@setfontsize\small\@ixpt{11}} +% \footnotesize{\@setfontsize\footnotesize\@viiipt{9.5}} +% \scriptsize{\@setfontsize\scriptsize\@viipt\@viiipt} +% \tiny{\@setfontsize\tiny\@vpt\@vipt} +% \large{\@setfontsize\large\@xiipt{14}} +% \Large{\@setfontsize\Large\@xivpt{18}} +% \LARGE{\@setfontsize\LARGE\@xviipt{22}} +% \huge{\@setfontsize\huge\@xxpt{25}} +% \Huge{\@setfontsize\Huge\@xxvpt{30}} + +\def\@maketitle + { + \newpage + \null + \vskip .375in + \begin{center} + {\Large \bf \@title \par} + % additional two empty lines at the end of the title + \vspace*{24pt} + { + \large + \lineskip .5em + \begin{tabular}[t]{c} + \@author + \end{tabular} + \par + } + % additional small space at the end of the author name + \vskip .5em + { + \large + \begin{tabular}[t]{c} + \@affiliation + \end{tabular} + \par + \ifx \@empty \@email + \else + \begin{tabular}{r@{~}l} + E-mail: & {\tt \@email} + \end{tabular} + \par + \fi + } + % additional empty line at the end of the title block + \vspace*{12pt} + \end{center} + } + +\def\abstract + {% + \centerline{\large\bf Abstract}% + \vspace*{12pt}% + \it% + } + +\def\endabstract + { + % additional empty line at the end of the abstract + \vspace*{12pt} + } + +\def\affiliation#1{\gdef\@affiliation{#1}} \gdef\@affiliation{} + +\def\email#1{\gdef\@email{#1}} +\gdef\@email{} + +\newlength{\@ctmp} +\newlength{\@figindent} +\setlength{\@figindent}{1pc} + +\long\def\@makecaption#1#2{ + \vskip 10pt + \setbox\@tempboxa\hbox{\tenhv\noindent #1.~#2} + \setlength{\@ctmp}{\hsize} + \addtolength{\@ctmp}{-\@figindent}\addtolength{\@ctmp}{-\@figindent} + % IF longer than one indented paragraph line + \ifdim \wd\@tempboxa >\@ctmp + % THEN set as an indented paragraph + \begin{list}{}{\leftmargin\@figindent \rightmargin\leftmargin} + \item[]\tenhv #1.~#2\par + \end{list} + \else + % ELSE center + \hbox to\hsize{\hfil\box\@tempboxa\hfil} + \fi} + +% correct heading spacing and type +\def\section{\@startsection {section}{1}{\z@} + {14pt plus 2pt minus 2pt}{14pt plus 2pt minus 2pt} {\large\bf}} +\def\subsection{\@startsection {subsection}{2}{\z@} + {13pt plus 2pt minus 2pt}{13pt plus 2pt minus 2pt} {\elvbf}} + +% add the period after section numbers +\newcommand{\Section}[1]{\section{\hskip -1em.~#1}} +\newcommand{\SubSection}[1]{\subsection{\hskip -1em.~#1}} + +% end of file latex8.sty +% --------------------------------------------------------------- diff --git a/spec/consensus/consensus-paper/lit.bib b/spec/consensus/consensus-paper/lit.bib new file mode 100644 index 0000000000..4abc83e70c --- /dev/null +++ b/spec/consensus/consensus-paper/lit.bib @@ -0,0 +1,1659 @@ +%--- conferences -------------------------------------------------- +@STRING{WDAG96 = "Proceedings of the 10th International Workshop + on Distributed Algorithms (WDAG'96)"} +@STRING{WDAG97 = "Proceedings of the 11th International Workshop + on Distributed Algorithms (WDAG'97)"} +@STRING{DISC98 = "Proceedings of the 12th International Conference + on Distributed Computing ({DISC}'98)"} +@STRING{DISC99 = "Proceedings of the 13th International Conference + on Distributed Computing ({DISC}'99)"} +@STRING{DISC98 = "Proceedings of the 13th International Conference + on Distributed Computing ({DISC}'98)"} +@STRING{DISC99 = "Proceedings of the 13th International Conference + on Distributed Computing ({DISC}'99)"} +@STRING{DISC00 = "Proceedings of the 14th International Conference + on Distributed Computing ({DISC}'00)"} +@STRING{DISC01 = "Proceedings of the 15th International Conference + on Distributed Computing ({DISC}'01)"} +@STRING{DISC02 = "Proceedings of the 16th International Conference + on Distributed Computing ({DISC}'02)"} +@STRING{DISC03 = "Proceedings of the 17th International Conference + on Distributed Computing ({DISC}'03)"} +@STRING{DISC04 = "Proceedings of the 18th International Conference + on Distributed Computing ({DISC}'04)"} +@STRING{DISC05 = "Proceedings of the 19th International Conference + on Distributed Computing ({DISC}'05)"} +@STRING{PODC83 = "Proceeding of the 1st Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'83)"} +@STRING{PODC91 = "Proceeding of the 9th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'91)"} +@STRING{PODC94 = "Proceeding of the 12th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'94)"} +@STRING{PODC95 = "Proceeding of the 13th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'95)"} +@STRING{PODC96 = "Proceeding of the 14th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'96)"} +@STRING{PODC97 = "Proceeding of the 15th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'97)"} +@STRING{PODC98 = "Proceeding of the 16th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'98)"} +@STRING{PODC99 = "Proceeding of the 17th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'99)"} +@STRING{PODC00 = "Proceeding of the 18th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'00)"} +@STRING{PODC01 = "Proceeding of the 19th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'01)"} +@STRING{PODC02 = "Proceeding of the 20th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'02)"} +@STRING{PODC03 = "Proceeding of the 21st Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'03)"} +@STRING{PODC03 = "Proceeding of the 22nd Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'03)"} +@STRING{PODC04 = "Proceeding of the 23rd Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'04)"} +@STRING{PODC05 = "Proceeding of the 24th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'05)"} +@STRING{PODC06 = "Proceedings of the 25th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'06)"} +@STRING{PODC07 = "Proceedings of the 26th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'07)"} +@STRING{STOC91 = "Proceedings of the 23rd Annual {ACM} Symposium on + Theory of Computing ({STOC}'91)"} +@STRING{WSS01 = "Proceedings of the 5th International Workshop on + Self-Stabilizing Systems ({WSS} '01)"} +@STRING{SSS06 = "Proceedings of the 8th International Symposium on + Stabilization, Safety, and Security of Distributed + Systems ({SSS} '06)"} +@STRING{DSN00 = "Dependable Systems and Networks ({DSN} 2000)"} +@STRING{DSN05 = "Dependable Systems and Networks ({DSN} 2005)"} +@STRING{DSN06 = "Dependable Systems and Networks ({DSN} 2006)"} +@STRING{DSN07 = "Dependable Systems and Networks ({DSN} 2007)"} + +%--- journals ----------------------------------------------------- +@STRING{PPL = "Parallel Processing Letters"} +@STRING{IPL = "Information Processing Letters"} +@STRING{DC = "Distributed Computing"} +@STRING{JACM = "Journal of the ACM"} +@STRING{IC = "Information and Control"} +@STRING{TCS = "Theoretical Computer Science"} +@STRING{ACMTCS = "ACM Transactions on Computer Systems"} +@STRING{TDSC = "Transactions on Dependable and Secure Computing"} +@STRING{TPLS = "ACM Trans. Program. Lang. Syst."} + +%--- publisher ---------------------------------------------------- +@STRING{ACM = "ACM Press"} +@STRING{IEEE = "IEEE"} +@STRING{SPR = "Springer-Verlag"} + +%--- institution -------------------------------------------------- +@STRING{TUAuto = {Technische Universit\"at Wien, Department of + Automation}} +@STRING{TUECS = {Technische Universit\"at Wien, Embedded Computing + Systems Group}} + + +%------------------------------------------------------------------ +@article{ABND+90:jacm, + author = {Hagit Attiya and Amotz Bar-Noy and Danny Dolev and + David Peleg and R{\"u}diger Reischuk}, + title = {Renaming in an asynchronous environment}, + journal = JACM, + volume = {37}, + number = {3}, + year = {1990}, + pages = {524--548}, + publisher = ACM, + address = {New York, NY, USA}, +} + +@article{ABND95:jacm, + author = {Hagit Attiya and Amotz Bar-Noy and Danny Dolev}, + title = {Sharing memory robustly in message-passing systems}, + journal = JACM, + volume = {42}, + number = {1}, + year = {1995}, + pages = {124--142}, + publisher = ACM, + address = {New York, NY, USA}, +} + +@inproceedings{ACKM04:podc, + author = {Ittai Abraham and Gregory Chockler and Idit Keidar + and Dahlia Malkhi}, + title = {Byzantine disk paxos: optimal resilience with + byzantine shared memory.}, + booktitle = PODC04, + year = {2004}, + pages = {226-235} +} + +@article{ACKM05:dc, + author = {Ittai Abraham and Gregory Chockler and Idit Keidar + and Dahlia Malkhi}, + title = {Byzantine disk paxos: optimal resilience with + byzantine shared memory.}, + journal = DC, + volume = {18}, + number = {5}, + year = {2006}, + pages = {387-408} +} + +@article{ACT00:dc, + author = "Marcos Kawazoe Aguilera and Wei Chen and Sam Toueg", + title = "Failure Detection and Consensus in the + Crash-Recovery Model", + journal = DC, + year = 2000, + month = apr, + volume = 13, + number = 2, + pages = "99--125", + url = + "http://www.cs.cornell.edu/home/sam/FDpapers/crash-recovery-finaldcversion.ps" +} + +@article{ACT00:siam, + author = "Marcos Kawazoe Aguilera and Wei Chen and Sam Toueg", + title = "On quiescent reliable communication", + journal = "SIAM Journal of Computing", + year = 2000, + volume = 29, + number = 6, + pages = "2040--2073", + month = apr +} + +@inproceedings{ACT97:wdag, + author = "Marcos Kawazoe Aguilera and Wei Chen and Sam Toueg", + title = "Heartbeat: A Timeout-Free Failure Detector for + Quiescent Reliable Communication", + booktitle = WDAG97, + year = 1997, + pages = "126--140", + url = + "http://simon.cs.cornell.edu/Info/People/weichen/research/mypapers/wdag97final.ps" +} + +@article{ACT98:disc, + author = "Marcos Kawazoe Aguilera and Wei Chen and Sam Toueg", + title = "Failure Detection and Consensus in the + Crash-Recovery Model", + journal = DISC98, + year = 1998, + pages = "231--245", + publisher = SPR +} + +@article{ACT99:tcs, + author = "Marcos Kawazoe Aguilera and Wei Chen and Sam Toueg", + title = "Using the Heartbeat Failure Detector for Quiescent + Reliable Communication and Consensus in + Partitionable Networks", + journal = "Theoretical Computer Science", + year = 1999, + month = jun, + volume = 220, + number = 1, + pages = "3--30", + url = + "http://www.cs.cornell.edu/home/sam/FDpapers/TCS98final.ps" +} + +@inproceedings{ADGF+04:ispdc, + author = {Anceaume, Emmanuelle and Delporte-Gallet, Carole and + Fauconnier, Hugues and Hurfin, Michel and Le Lann, + G{\'e}rard }, + title = {Designing Modular Services in the Scattered + Byzantine Failure Model.}, + booktitle = {ISPDC/HeteroPar}, + year = {2004}, + pages = {262-269} +} + +@inproceedings{ADGF+06:dsn, + author = {Marcos Kawazoe Aguilera and Carole Delporte-Gallet + and Hugues Fauconnier and Sam Toueg}, + title = {Consensus with Byzantine Failures and Little System + Synchrony.}, + booktitle = DSN06, + year = {2006}, + pages = {147-155} +} + +@inproceedings{ADGFT01:disc, + author = "Marcos Kawazoe Aguilera and Carole Delporte-Gallet + and Hugues Fauconnier and Sam Toueg", + title = "Stable Leader Election", + booktitle = DISC01, + year = 2001, + pages = "108--122", + publisher = SPR +} + +@inproceedings{ADGFT03:podc, + author = "Marcos K. Aguilera and Carole Delporte-Gallet and + Hugues Fauconnier and Sam Toueg", + title = "On implementing {O}mega with weak reliability and + synchrony assumptions", + booktitle = PODC03, + year = 2003, + publisher = ACM +} + +@inproceedings{ADGFT04:podc, + author = {Marcos K. Aguilera and Carole Delporte-Gallet and + Hugues Fauconnier and Sam Toueg}, + title = {Communication-efficient leader election and + consensus with limited link synchrony}, + booktitle = PODC04, + year = 2004, + pages = {328--337}, + address = {St. John's, Newfoundland, Canada}, + publisher = ACM +} + +@inproceedings{ADGFT06:dsn, + author = {Marcos Kawazoe Aguilera and Carole Delporte-Gallet + and Hugues Fauconnier and Sam Toueg}, + title = {Consensus with Byzantine Failures and Little System + Synchrony.}, + booktitle = DSN06, + year = 2006, + pages = {147-155}, + ee = + {http://doi.ieeecomputersociety.org/10.1109/DSN.2006.22}, + bibsource = {DBLP, http://dblp.uni-trier.de} +} + +@inproceedings{ADLS91:stoc, + author = "Hagit Attiya and Cynthia Dwork and Nancy A. Lynch + and Larry J. Stockmeyer", + title = "Bounds on the Time to Reach Agreement in the + Presence of Timing Uncertainty", + booktitle = STOC91, + year = 1991, + pages = "359--369", +} + +@article{AT99:ipl, + author = "Marcos Kawazoe Aguilera and Sam Toueg", + title = "A Simple Bivalency Proof that t -Resilient Consensus + Requires t + 1 Rounds", + journal = IPL, + volume = "71", + number = "3-4", + pages = "155--158", + year = "1999" +} + +@Book{AW04:book, + author = {Attiya, Hagit and Welch, Jennifer}, + title = {Distributed Computing}, + publisher = {John Wiley {\&} Sons}, + edition = {2nd}, + year = {2004} +} + +@Book{AW98:book, + author = {Hagit Attiya and Jennifer Welch}, + title = {Distributed Computing}, + publisher = {McGraw-Hill Publishing Company}, + year = {1998} +} + +@InBook{AW98:book:chap12, + author = {Hagit Attiya and Jennifer Welch}, + title = {Distributed Computing}, + publisher = {McGraw-Hill Publishing Company}, + year = {1998}, + chapter = {12, "Improving the fault-tolerance of algorithms"} +} + +@inproceedings{ABHMS11:disc, + author = {Hagit Attiya and + Fatemeh Borran and + Martin Hutle and + Zarko Milosevic and + Andr{\'e} Schiper}, + title = {Structured Derivation of Semi-Synchronous Algorithms}, + booktitle = {DISC}, + year = {2011}, + pages = {374-388} +} + +@inproceedings{BCBG+07:podc, + author = {Martin Biely and Bernadette Charron-Bost and Antoine + Gaillard and Martin Hutle and Andr{\'e} Schiper and + Josef Widder}, + title = {Tolerating Corrupted Communication}, + publisher = ACM, + booktitle = PODC07, + year = {2007} +} + +@InProceedings{BCBT96:wdag, + author = {Anindya Basu and Bernadette Charron-Bost and Sam + Toueg}, + title = {Simulating Reliable Links with Unreliable Links in + the Presence of Process Crashes}, + pages = {105--122}, + booktitle = {WDAG 1996}, + editor = {Babao{\u g}lu, {\"O}zalp}, + year = {1996}, + month = {Oct}, + volume = {1151}, + ISBN = {3-540-61769-8}, + pubisher = {Springer}, + series = {Lecture Notes in Computer Science}, +} + +@article{BDFG03:sigact, + author = "R. Boichat and P. Dutta and S. Frolund and + R. Guerraoui", + title = "Reconstructing {P}axos", + journal = "ACM SIGACT News", + year = "2003", + volume = "34", + number = "1", + pages = "47-67" +} + +@unpublished{BHR+06:note, + author = "Martin Biely and Martin Hutle and Sergio Rajsbaum + and Ulrich Schmid and Corentin Travers and Josef + Widder", + title = "Discussion note on moving timely links", + note = "Unpublished", + month = apr, + year = 2006 +} + +@article{BHRT03:jda, + author = {Roberto Baldoni and Jean-Michel H{\'e}lary and + Michel Raynal and L{\'e}naick Tanguy}, + title = {Consensus in Byzantine asynchronous systems.}, + journal = {J. Discrete Algorithms}, + volume = {1}, + number = {2}, + year = {2003}, + pages = {185-210}, + ee = {http://dx.doi.org/10.1016/S1570-8667(03)00025-X}, + bibsource = {DBLP, http://dblp.uni-trier.de} +} + +@unpublished{BHSS08:tdsc, + author = {Fatemeh Borran and Martin Hutle and Nuno Santos and + Andr{\'e} Schiper}, + title = {Solving Consensus with Communication Predicates: + A~Quantitative Approach}, + note = {Under submission}, + year = {2008} +} + +@inproceedings{Ben83:podc, + author = {Michael Ben-Or}, + title = {Another Advantage of Free Choice: Completely + Asynchronous Agreement Protocols}, + booktitle = {PODC}, + year = {1983}, +} + +@inproceedings{Bra04:podc, + author = {Bracha, Gabriel}, + title = {An asynchronous [(n - 1)/3]-resilient consensus protocol}, + booktitle = {PODC '84: Proceedings of the third annual ACM symposium on Principles of distributed computing}, + year = {1984}, + isbn = {0-89791-143-1}, + pages = {154--162}, + location = {Vancouver, British Columbia, Canada}, + doi = {http://doi.acm.org/10.1145/800222.806743}, + publisher = {ACM}, + address = {New York, NY, USA}, + } + + +@inproceedings{CBGS00:dsn, + author = "Bernadette Charron-Bost and Rachid Guerraoui and + Andr{\'{e}} Schiper", + title = "Synchronous System and Perfect Failure Detector: + {S}olvability and efficiency issues", + booktitle = DSN00, + publisher = "{IEEE} Computer Society", + address = "New York, {USA}", + pages = "523--532", + year = "2000" +} + +@inproceedings{CBS06:prdc, + author = {Bernadette Charron-Bost and Andr{\'e} Schiper}, + title = {Improving Fast Paxos: being optimistic with no + overhead}, + booktitle = {Pacific Rim Dependable Computing, Proceedings}, + year = {2006} +} + +@article{CBS09, + author = {B. Charron-Bost and A. Schiper}, + title = {The {H}eard-{O}f model: computing in distributed systems with benign failures}, + journal ={Distributed Computing}, + number = {1}, + volume = {22}, + pages = {49-71}, + year ={2009} + } + + +@article{CBS07:sigact, + author = {Bernadette Charron-Bost and Andr\'{e} Schiper}, + title = {Harmful dogmas in fault tolerant distributed + computing}, + journal = {SIGACT News}, + volume = {38}, + number = {1}, + year = {2007}, + pages = {53--61}, +} + +@techreport{CBS07:tr, + author = {Charron-Bost, Bernadette and Schiper, Andr{\'{e}}}, + title = {The Heard-Of Model: Unifying all Benign Failures}, + institution = {EPFL}, + year = 2007, + OPTnumber = {LSR-REPORT-2006-004} +} + +@article{CELT00:jacm, + author = {Soma Chaudhuri and Maurice Erlihy and Nancy A. Lynch + and Mark R. Tuttle}, + title = {Tight bounds for k-set agreement}, + journal = JACM, + volume = {47}, + number = {5}, + year = {2000}, + pages = {912--943}, + publisher = ACM, + address = {New York, NY, USA}, +} + +@article{CF99:tpds, + author = "Flaviu Cristian and Christof Fetzer", + title = "The Timed Asynchronous Distributed System Model", + journal = "IEEE Transactions on Parallel and Distributed + Systems", + volume = "10", + number = "6", + pages = "642--657", + year = "1999" +} + +@article{CHT96:jacm, + author = "Tushar Deepak Chandra and Vassos Hadzilacos and Sam + Toueg", + title = "The Weakest Failure Detector for Solving Consensus", + journal = {JACM}, + year = {1996}, +} + +@article{CL02:tcs, + author = {Miguel Castro and Barbara Liskov}, + title = {Practical byzantine fault tolerance and proactive + recovery}, + journal = {ACMTCS}, + year = {2002}, +} + +@inproceedings{CL99:osdi, + author = {Miguel Castro and Barbara Liskov}, + title = {Practical byzantine fault tolerance and proactive + recovery}, + booktitle = {Proceedings of the 3rd Symposium on Operating + Systems Design and Implementation}, + year = {1999}, + month = feb +} + +@inproceedings{CT91:podc, + author = {Tushar Deepak Chandra and Sam Toueg}, + title = {Unreliable Failure Detectors for Asynchronous + Systems (Preliminary Version)}, + booktitle = PODC91, + year = {1991}, + pages = {325-340} +} + +@article{CT96:jacm1, + author = "Tushar Deepak Chandra and Sam Toueg", + title = "Unreliable Failure Detectors for Reliable + Distributed Systems", + journal = {JACM}, + year = {1996}, +} + +@inproceedings{CTA00:dsn, + author = "Wei Chen and Sam Toueg and Marcos Kawazoe Aguilera", + title = "On the Quality of Service of Failure Detectors", + booktitle = "Proceedings IEEE International Conference on + Dependable Systems and Networks (DSN / FTCS'30)", + address = "New York City, USA", + year = 2000 +} + +@TechReport{DFKM96:tr, + author = {Danny Dolev and Roy Friedman and Idit Keidar and + Dahlia Malkhi}, + title = {Failure detectors in omission failure environments}, + institution = {Department of Computer Science, Cornell University}, + year = {1996}, + type = {Technical Report}, + number = {96-1608} +} + +@inproceedings{DG02:podc, + author = {Partha Dutta and Rachid Guerraoui}, + title = {The inherent price of indulgence}, + booktitle = PODC02, + year = 2002, + pages = {88--97}, + location = {Monterey, California}, + publisher = ACM, + address = {New York, NY, USA}, +} + +@inproceedings{DGFG+04:podc, + author = {Carole Delporte-Gallet and Hugues Fauconnier and + Rachid Guerraoui and Vassos Hadzilacos and Petr + Kouznetsov and Sam Toueg}, + title = {The weakest failure detectors to solve certain + fundamental problems in distributed computing}, + booktitle = PODC04, + year = 2004, + pages = {338--346}, + location = {St. John's, Newfoundland, Canada}, + publisher = ACM, + address = {New York, NY, USA} +} + +@inproceedings{DGL05:dsn, + author = {Partha Dutta and Rachid Guerraoui and Leslie + Lamport}, + title = {How Fast Can Eventual Synchrony Lead to Consensus?}, + booktitle = {Proceedings of the 2005 International Conference on + Dependable Systems and Networks (DSN'05)}, + pages = {22--27}, + year = {2005}, + address = {Los Alamitos, CA, USA} +} + +@article{DLS88:jacm, + author = "Cynthia Dwork and Nancy Lynch and Larry Stockmeyer", + title = "Consensus in the Presence of Partial Synchrony", + journal = {JACM}, + year = {1988}, +} + +@article{DPLL00:tcs, + author = "De Prisco, Roberto and Butler Lampson and Nancy + Lynch", + title = "Revisiting the {PAXOS} algorithm", + journal = TCS, + volume = "243", + number = "1--2", + pages = "35--91", + year = "2000" +} + +@techreport{DS97:tr, + author = {A. Doudou and A. Schiper}, + title = {Muteness Failure Detectors for Consensus with + {B}yzantine Processes}, + institution = {EPFL, Dept d'Informatique}, + year = {1997}, + type = {TR}, + month = {October}, + number = {97/230}, +} + +@inproceedings{DS98:podc, + author = {A. Doudou and A. Schiper}, + title = {Muteness Detectors for Consensus with {B}yzantine + Processes ({B}rief {A}nnouncement)}, + booktitle = {PODC}, + month = jul, + year = {1998} +} + +@article{DSU04:survey, + author = {D{\'e}fago, Xavier and Schiper, Andr{\'e} and Urb\'{a}n, P{\'e}ter}, + title = {Total order broadcast and multicast algorithms: Taxonomy and survey}, + journal = {ACM Comput. Surv.}, + issue_date = {December 2004}, + volume = {36}, + number = {4}, + month = dec, + year = {2004}, + issn = {0360-0300}, + pages = {372--421}, + numpages = {50}, + publisher = {ACM}, + address = {New York, NY, USA}, + keywords = {Distributed systems, agreement problems, atomic broadcast, atomic multicast, classification, distributed algorithms, fault-tolerance, global ordering, group communication, message passing, survey, taxonomy, total ordering}, +} + +@article{DeCandia07:dynamo, + author = {DeCandia, Giuseppe and Hastorun, Deniz and Jampani, Madan and Kakulapati, Gunavardhan and Lakshman, Avinash and Pilchin, Alex and Sivasubramanian, Swaminathan and Vosshall, Peter and Vogels, Werner}, + title = {Dynamo: amazon's highly available key-value store}, + journal = {SIGOPS Oper. Syst. Rev.}, + issue_date = {December 2007}, + volume = {41}, + number = {6}, + month = oct, + year = {2007}, + issn = {0163-5980}, + pages = {205--220}, + numpages = {16}, + publisher = {ACM}, + address = {New York, NY, USA}, + keywords = {performance, reliability, scalability}, +} + + +@book{Dol00:book, + author = {Shlomi Dolev}, + title = {Self-Stabilization}, + publisher = {The MIT Press}, + year = {2000} +} + +@inproceedings{FC95:podc, + author = "Christof Fetzer and Flaviu Cristian", + title = "Lower Bounds for Convergence Function Based Clock + Synchronization", + booktitle = PODC95, + year = 1995, + pages = "137--143" +} + +@article{FLP85:jacm, + author = "Michael J. Fischer and Nancy A. Lynch and + M. S. Paterson", + title = "Impossibility of Distributed Consensus with one + Faulty Process", + journal = {JACM}, + year = {1985}, +} + +@article{FMR05:tdsc, + author = {Roy Friedman and Achour Most{\'e}faoui and Michel + Raynal}, + title = {Simple and Efficient Oracle-Based Consensus + Protocols for Asynchronous Byzantine Systems.}, + journal = TDSC, + volume = {2}, + number = {1}, + year = {2005}, + pages = {46-56}, + ee = {http://dx.doi.org/10.1109/TDSC.2005.13}, + bibsource = {DBLP, http://dblp.uni-trier.de} +} + +@inproceedings{FS04:podc, + author = "Christof Fetzer and Ulrich Schmid", + title = "Brief announcement: on the possibility of consensus + in asynchronous systems with finite average response + times.", + booktitle = PODC04, + year = 2004, + pages = 402 +} + +@InProceedings{GL00:disc, + author = {Eli Gafni and Lesli Lamport}, + title = {Disk Paxos}, + booktitle = DISC00, + pages = {330--344}, + year = {2000}, +} + +@Article{GL03:dc, + author = {Eli Gafni and Lesli Lamport}, + title = {Disk Paxos}, + journal = DC, + year = 2003, + volume = {16}, + number = {1}, + pages = {1--20} +} + +@inproceedings{GP01:wss, + author = "Felix C. G{\"a}rtner and Stefan Pleisch", + title = "({I}m)Possibilities of Predicate Detection in + Crash-Affected Systems", + booktitle = WSS01, + year = 2001, + pages = "98--113" +} + +@inproceedings{GP02:disc, + author = "Felix C. G{\"a}rtner and Stefan Pleisch", + title = "Failure Detection Sequencers: Necessary and + Sufficient Information about Failures to Solve + Predicate Detection", + booktitle = DISC02, + year = 2002, + pages = "280--294" +} + +@inproceedings{GS96:wdag, + author = {Rachid Guerraoui and Andr{\'e} Schiper}, + title = {{``Gamma-Accurate''} Failure Detectors}, + booktitle = WDAG96, + year = {1996}, + pages = {269--286}, + publisher = SPR, + address = {London, UK} +} + +@inproceedings{Gaf98:podc, + author = {Eli Gafni}, + title = {Round-by-round fault detectors (extended abstract): + unifying synchrony and asynchrony}, + booktitle = PODC98, + year = {1998}, + pages = {143--152}, + address = {Puerto Vallarta, Mexico}, + publisher = ACM +} + +@incollection{Gra78:book, + author = {Jim N. Gray}, + title = {Notes on data base operating systems}, + booktitle = {Operating Systems: An Advanced Course}, + chapter = {3.F}, + publisher = {Springer}, + year = {1978}, + editor = {R. Bayer, R.M. Graham, G. Seegm\"uller}, + volume = {60}, + series = {Lecture Notes in Computer Science}, + address = {New York}, + pages = {465}, +} + +@InProceedings{HMR98:srds, + author = {Hurfin, M. and Mostefaoui, A. and Raynal, M.}, + title = {Consensus in asynchronous systems where processes + can crash and recover}, + booktitle = {Seventeenth IEEE Symposium on Reliable Distributed + Systems, Proceedings. }, + pages = { 280--286}, + year = {1998}, + address = {West Lafayette, IN}, + month = oct, + organization = {IEEE} +} + +@inproceedings{HMSZ06:sss, + author = "Martin Hutle and Dahlia Malkhi and Ulrich Schmid and + Lidong Zhou", + title = "Brief Announcement: Chasing the Weakest System Model + for Implementing {$\Omega$} and Consensus", + booktitle = SSS06, + year = 2006 +} + +@incollection{HT93:ds, + author = {Hadzilacos, Vassos and Toueg, Sam}, + title = {Fault-tolerant broadcasts and related problems}, + booktitle = {Distributed systems (2nd Ed.)}, + editor = {Mullender, Sape}, + year = {1993}, + isbn = {0-201-62427-3}, + pages = {97--145}, + numpages = {49} +} + + +@inproceedings{HS06:opodis, + author = {Heinrich Moser and Ulrich Schmid}, + title = {Optimal Clock Synchronization Revisited: Upper and + Lower Bounds in Real-Time Systems}, + booktitle = { Principles of Distributed Systems}, + pages = {94--109}, + year = {2006}, + volume = {4305}, + series = {Lecture Notes in Computer Science}, + publisher = SPR +} + +@techreport{HS06:tr, + author = {Martin Hutle and Andr{\'e} Schiper}, + title = { Communication predicates: A high-level abstraction + for coping with transient and dynamic faults}, + institution = {EPFL}, + number = { LSR-REPORT-2006-006 }, + year = {2006} +} + +@inproceedings{HS07:dsn, + author = {Martin Hutle and Andr{\'e} Schiper}, + title = { Communication predicates: A high-level abstraction + for coping with transient and dynamic faults}, + year = 2007, + booktitle = DSN07, + publisher = IEEE, + location = {Edinburgh,UK}, + pages = {92--10}, + month = jun +} + +@article{Her91:tpls, + author = {Maurice Herlihy}, + title = {Wait-free synchronization}, + journal = TPLS, + volume = {13}, + number = {1}, + year = {1991}, + pages = {124--149}, + publisher = ACM, + address = {New York, NY, USA}, +} + +@article{Kot09:zyzzyva, + author = {Kotla, Ramakrishna and Alvisi, Lorenzo and Dahlin, Mike and Clement, Allen and Wong, Edmund}, + title = {Zyzzyva: Speculative Byzantine fault tolerance}, + journal = {ACM Trans. Comput. Syst.}, + issue_date = {December 2009}, + volume = {27}, + number = {4}, + month = jan, + year = {2010}, + issn = {0734-2071}, + pages = {7:1--7:39}, + articleno = {7}, + numpages = {39}, + publisher = {ACM}, + address = {New York, NY, USA}, + keywords = {Byzantine fault tolerance, output commit, replication, speculative execution}, +} + + +@inproceedings{KMMS97:opodis, + author = "Kim Potter Kihlstrom and Louise E. Moser and + P. M. Melliar-Smith", + title = "Solving Consensus in a Byzantine Environment Using + an Unreliable Fault Detector", + booktitle = "Proceedings of the International Conference on + Principles of Distributed Systems (OPODIS)", + year = 1997, + month = dec, + address = "Chantilly, France", + pages = "61--75" +} + +@inproceedings{KS06:podc, + author = {Idit Keidar and Alexander Shraer}, + title = {Timeliness, failure-detectors, and consensus + performance}, + booktitle = PODC06, + year = {2006}, + pages = {169--178}, + location = {Denver, Colorado, USA}, + publisher = {ACM Press}, + address = {New York, NY, USA}, +} + +@InProceedings{LFA99:disc, + author = {Mikel Larrea and Antonio Fern\'andez and Sergio + Ar\'evalo}, + title = {Efficient algorithms to implement unreliable failure + detectors in partially synchronous systems}, + year = 1999, + month = sep, + pages = {34-48}, + series = "LNCS 1693", + booktitle = DISC99, + publisher = SPR, + address = {Bratislava, Slovaquia}, +} + +@article{LL84:ic, + author = "Jennifer Lundelius and Nancy A. Lynch", + title = "An Upper and Lower Bound for Clock Synchronization", + journal = IC, + volume = 62, + number = {2/3}, + year = 1984, + pages = {190--204} +} + +@techreport{LLS03:tr, + title = {How to Implement a Timer-free Perfect Failure + Detector in Partially Synchronous Systems}, + author = {Le Lann, G\'erard and Schmid, Ulrich}, + institution = TUAuto, + number = "183/1-127", + month = jan, + year = 2003 +} + +@article{LSP82:tpls, + author = {Leslie Lamport and Robert Shostak and Marshall + Pease}, + title = {The {B}yzantine Generals Problem}, + journal = {ACM Trans. Program. Lang. Syst.}, + year = {1982}, +} + +@inproceedings{Lam01:podc, + author = {Butler Lampson}, + title = {The ABCD's of Paxos}, + booktitle = {PODC}, + year = {2001}, + +} + +@inproceedings{Lam03:fddc, + author = {Leslie Lamport}, + title = {Lower Bounds for Asynchronous Consensus}, + booktitle = {Future Directions in Distributed Computing}, + pages = {22--23}, + year = {2003}, + editor = {Andr{\'e} Schiper and Alex A. Shvartsman and Hakim + Weatherspoon and Ben Y. Zhao}, + number = {2584}, + series = {Lecture Notes in Computer Science}, + publisher = SPR +} + +@techreport{Lam04:tr, + author = {Leslie Lamport}, + title = {Lower Bounds for Asynchronous Consensus}, + institution = {Microsoft Research}, + year = {2004}, + number = {MSR-TR-2004-72} +} + +@techreport{Lam05:tr, + author = {Leslie Lamport}, + title = {Fast Paxos}, + institution = {Microsoft Research}, + year = {2005}, + number = {MSR-TR-2005-12} +} + +@techreport{Lam05:tr-33, + author = {Leslie Lamport}, + title = {Generalized Consensus and Paxos}, + institution = {Microsoft Research}, + year = {2005}, + number = {MSR-TR-2005-33} +} + +@Misc{Lam06:slides, + author = {Leslie Lamport}, + title = {Byzantine Paxos}, + howpublished = {Unpublished slides}, + year = {2006} +} + +@Article{Lam86:dc, + author = {Lesli Lamport}, + title = {On Interprocess Communication--Part I: Basic + Formalism, Part II: Algorithms}, + journal = DC, + year = 1986, + volume = 1, + number = 2, + pages = {77--101} +} + +@Article {Lam98:tcs, + author = {Leslie Lamport}, + title = {The part-time parliament}, + journal = ACMTCS, + year = 1998, + volume = 16, + number = 2, + month = may, + pages = {133-169}, +} + +@book{Lyn96:book, + author = {Nancy Lynch}, + title = {Distributed Algorithms}, + publisher = {Morgan Kaufman}, + year = {1996}, +} + +@inproceedings{MA05:dsn, + author = {Martin, J.-P. and Alvisi, L. }, + title = {Fast Byzantine consensus}, + booktitle = DSN05, + pages = {402--411}, + year = {2005}, + month = jun, + organization = {IEEE}, +} + +@article{MA06:tdsc, + author = {Martin, J.-P. and Alvisi, L. }, + title = {Fast {B}yzantine Consensus}, + journal = {TDSC}, + year = {2006}, +} + +@InProceedings{MOZ05:dsn, + author = {Dahlia Malkhi and Florin Oprea and Lidong Zhou}, + title = {{$\Omega$} Meets Paxos: Leader Election and + Stability without Eventual Timely Links}, + booktitle = DSN05, + year = {2005} +} + +@inproceedings{MR00:podc, + author = "Achour Most{\'e}faoui and Michel Raynal", + title = "k-set agreement with limited accuracy failure + detectors", + booktitle = PODC00, + year = 2000, + pages = {143--152}, + location = {Portland, Oregon, United States}, + publisher = ACM +} + +@article{MR01:ppl, + author = "Achour Most{\'e}faoui and Michel Raynal", + title = "Leader-Based Consensus", + journal = PPL, + volume = 11, + number = 1, + year = 2001, + pages = {95--107} +} + +@techreport{OGS97:tr, + author = "Rui Oliveira and Rachid Guerraoui and {Andr\'e} + Schiper", + title = "Consensus in the crash-recover model", + number = "TR-97/239", + year = "1997" +} + +@article{PSL80:jacm, + author = {M. Pease and R. Shostak and L. Lamport}, + title = {Reaching Agreement in the Presence of Faults}, + journal = JACM, + volume = {27}, + number = {2}, + year = {1980}, + pages = {228--234}, + publisher = ACM, + address = ACMADDR, +} + +@article{ST87:jacm, + author = "T. K. Srikanth and Sam Toueg", + title = "Optimal clock synchronization", + journal = JACM, + volume = 34, + number = 3, + year = 1987, + pages = "626--645" +} + +@article{ST87:dc, + author = {T. K. Srikanth and Sam Toueg,}, + title = {Simulating authenticated broadcasts to derive simple fault-tolerant algorithms}, + journal = DC, + volume = {2}, + number = {2}, + year = {1987}, + pages = {80-94} +} + + +@inproceedings{SW89:stacs, + author = {Santoro, Nicola and Widmayer, Peter}, + title = {Time is not a healer}, + booktitle = {Proc.\ 6th Annual Symposium on Theor.\ Aspects of + Computer Science (STACS'89)}, + publisher = "Springer-Verlag", + series = {LNCS}, + volume = "349", + address = "Paderborn, Germany", + pages = "304-313", + year = "1989", + month = feb, +} + +@inproceedings{SW90:sigal, + author = {Nicola Santoro and Peter Widmayer}, + title = {Distributed Function Evaluation in the Presence of + Transmission Faults.}, + booktitle = {SIGAL International Symposium on Algorithms}, + year = {1990}, + pages = {358-367} +} + +@inproceedings{SWR02:icdcs, + author = {Ulrich Schmid and Bettina Weiss and John Rushby}, + title = {Formally Verified Byzantine Agreement in Presence of + Link Faults}, + booktitle = "22nd International Conference on Distributed + Computing Systems (ICDCS'02)", + year = 2002, + month = jul # " 2-5, ", + pages = "608--616", + address = "Vienna, Austria", +} + +@incollection{Sch93a:mullender, + Author = {F. B. Schneider}, + Title = {What Good are Models and What Models are Good}, + BookTitle = {Distributed Systems}, + Year = {1993}, + Editor = {Sape Mullender}, + Publisher = {ACM Press}, + Pages = {169-197}, +} + +@article{VL96:ic, + author = {George Varghese and Nancy A. Lynch}, + title = {A Tradeoff Between Safety and Liveness for + Randomized Coordinated Attack.}, + journal = {Inf. Comput.}, + volume = {128}, + number = {1}, + year = 1996, + pages = {57--71} +} + +@inproceedings{WGWB07:dsn, + title = {Synchronous Consensus with Mortal Byzantines}, + author = {Josef Widder and Günther Gridling and Bettina Weiss + and Jean-Paul Blanquart}, + year = {2007}, + booktitle = DSN07, + publisher = IEEE +} + +@inproceedings{Wid03:disc, + author = {Josef Widder}, + title = {Booting clock Synchronization in Partially + Synchronous Systems}, + booktitle = DISC03, + year = {2003}, + pages = {121--135} +} + +@techreport{Zie04:tr, + author = {Piotr Zieli{\'n}ski}, + title = {Paxos at War}, + institution = {University of Cambridge}, + year = {2004}, + number = {UCAM-CL-TR-593}, +} + +@article{Lam78:cacm, + author = {Leslie Lamport}, + title = {Time, clocks, and the ordering of events in a + distributed system}, + journal = {Commun. ACM}, + year = {1978}, +} + +@Article{Gue06:cj, + author = {Guerraoui, R. and Raynal, M.}, + journal = {The {C}omputer {J}ournal}, + title = {The {A}lpha of {I}ndulgent {C}onsensus}, + year = {2006} +} + +@Article{Gue03:toc, + affiliation = {EPFL}, + author = {Guerraoui, Rachid and Raynal, Michel}, + journal = {{IEEE} {T}rans. on {C}omputers}, + title = {The {I}nformation {S}tructure of {I}ndulgent {C}onsensus}, + year = {2004}, +} + +@techreport{Cas00, + author = {Castro, Miguel}, + title = {Practical {B}yzantine Fault-Tolerance. {PhD} thesis}, + institution = {MIT}, + year = 2000, +} + +@inproceedings{SongRSD08:icdcn, + author = {Yee Jiun Song and + Robbert van Renesse and + Fred B. Schneider and + Danny Dolev}, + title = {The Building Blocks of Consensus}, + booktitle = {ICDCN}, + year = {2008}, +} + + +@inproceedings{BS09:icdcn, + author = {Borran, Fatemeh and Schiper, Andr{\'e}}, + + title = {A {L}eader-free {B}yzantine {C}onsensus {A}lgorithm}, + note = {To appear in ICDCN, 2010}, +} + + +@inproceedings{MHS09:opodis, + author = {Zarko Milosevic and Martin Hutle and Andr{\'e} + Schiper}, + title = {Unifying {B}yzantine Consensus Algorithms with {W}eak + {I}nteractive {C}onsistency}, + note = {To appear in OPODIS 2009}, +} + +@inproceedings{MRR:dsn02, + author = {Most\'{e}faoui, Achour and Rajsbaum, Sergio and Raynal, Michel}, + title = {A Versatile and Modular Consensus Protocol}, + booktitle = {DSN}, + year = {2002}, + } + +@article{MR98:dc, + author = {Dahlia Malkhi and + Michael K. Reiter}, + title = {Byzantine Quorum Systems}, + journal = {Distributed Computing}, + year = {1998}, +} + +@inproceedings{Rei:ccs94, + author = {Reiter, Michael K.}, + title = {Secure agreement protocols: reliable and atomic group multicast in rampart}, + booktitle = {CCS}, + year = {1994}, + pages = {68--80}, + numpages = {13} +} + + +@techreport{RMS09-tr, + author = {Olivier R\"utti and Zarko Milosevic and Andr\'e Schiper}, + title = {{G}eneric construction of consensus algorithm for benign and {B}yzantine faults}, + institution = {EPFL-IC}, + number = {LSR-REPORT-2009-005}, + year = 2009, +} + +@inproceedings{Li:srds07, + author = {Li, Harry C. and Clement, Allen and Aiyer, Amitanand S. and Alvisi, Lorenzo}, + title = {The Paxos Register}, + booktitle = {SRDS}, + year = {2007}, + } + + @article{Amir11:prime, + author = {Amir, Yair and Coan, Brian and Kirsch, Jonathan and Lane, John}, + title = {Prime: Byzantine Replication under Attack}, + journal = {IEEE Trans. Dependable Secur. Comput.}, + issue_date = {July 2011}, + volume = {8}, + number = {4}, + month = jul, + year = {2011}, + issn = {1545-5971}, + pages = {564--577}, + numpages = {14}, + publisher = {IEEE Computer Society Press}, + address = {Los Alamitos, CA, USA}, + keywords = {Performance under attack, Byzantine fault tolerance, replicated state machines, distributed systems.}, +} + +@inproceedings{Mao08:mencius, + author = {Mao, Yanhua and Junqueira, Flavio P. and Marzullo, Keith}, + title = {Mencius: building efficient replicated state machines for WANs}, + booktitle = {OSDI}, + year = {2008}, + pages = {369--384}, + numpages = {16} +} + +@article{Sch90:survey, + author = {Schneider, Fred B.}, + title = {Implementing fault-tolerant services using the state machine approach: a tutorial}, + journal = {ACM Comput. Surv.}, + volume = {22}, + number = {4}, + month = dec, + year = {1990} +} + + +@techreport{HT94:TR, + author = {Hadzilacos, Vassos and Toueg, Sam}, + title = {A Modular Approach to Fault-Tolerant Broadcasts and Related Problems}, + year = {1994}, + source = {http://www.ncstrl.org:8900/ncstrl/servlet/search?formname=detail\&id=oai%3Ancstrlh%3Acornellcs%3ACORNELLCS%3ATR94-1425}, + publisher = {Cornell University}, + address = {Ithaca, NY, USA}, +} + +@inproceedings{Ver09:spinning, + author = {Veronese, Giuliana Santos and Correia, Miguel and Bessani, Alysson Neves and Lung, Lau Cheuk}, + title = {Spin One's Wheels? Byzantine Fault Tolerance with a Spinning Primary}, + booktitle = {SRDS}, + year = {2009}, + numpages = {10} +} + +@inproceedings{Cle09:aardvark, + author = {Clement, Allen and Wong, Edmund and Alvisi, Lorenzo and Dahlin, Mike and Marchetti, Mirco}, + title = {Making Byzantine fault tolerant systems tolerate Byzantine faults}, + booktitle = {NSDI}, + year = {2009}, + pages = {153--168}, + numpages = {16} +} + +@inproceedings{Aiyer05:barB, + author = {Aiyer, Amitanand S. and Alvisi, Lorenzo and Clement, Allen and Dahlin, Mike and Martin, Jean-Philippe and Porth, Carl}, + title = {BAR fault tolerance for cooperative services}, + booktitle = {SOSP}, + year = {2005}, + pages = {45--58}, + numpages = {14} +} + +@inproceedings{Cach01:crypto, + author = {Cachin, Christian and Kursawe, Klaus and Petzold, Frank and Shoup, Victor}, + title = {Secure and Efficient Asynchronous Broadcast Protocols}, + booktitle = {CRYPTO}, + year = {2001}, + pages = {524--541}, + numpages = {18} +} + +@article{Moniz11:ritas, + author = {Moniz, Henrique and Neves, Nuno Ferreria and Correia, Miguel and Verissimo, Paulo}, + title = {RITAS: Services for Randomized Intrusion Tolerance}, + journal = {IEEE Trans. Dependable Secur. Comput.}, + volume = {8}, + number = {1}, + month = jan, + year = {2011}, + pages = {122--136}, + numpages = {15} +} + +@inproceedings{MHS11:jabc, + author = {Milosevic, Zarko and Hutle, Martin and Schiper, Andre}, + title = {On the Reduction of Atomic Broadcast to Consensus with Byzantine Faults}, + booktitle = {SRDS}, + year = {2011}, + pages = {235--244}, + numpages = {10} +} + +@incollection{DHSZ03, + author={Driscoll, Kevin and Hall, Brendan and Sivencrona, Håkan and Zumsteg, Phil}, + title={Byzantine Fault Tolerance, from Theory to Reality}, + year={2003}, + booktitle={Computer Safety, Reliability, and Security}, + volume={2788}, + pages={235--248} +} + +@inproceedings{RMES:dsn07, + author = {Olivier R{\"u}tti and + Sergio Mena and + Richard Ekwall and + Andr{\'e} Schiper}, + title = {On the Cost of Modularity in Atomic Broadcast}, + booktitle = {DSN}, + year = {2007}, + pages = {635-644} +} + +@article{Ben:jc92, + author = {Charles H. Bennett and + Fran\c{c}ois Bessette and + Gilles Brassard and + Louis Salvail and + John A. Smolin}, + title = {Experimental Quantum Cryptography}, + journal = {J. Cryptology}, + volume = {5}, + number = {1}, + year = {1992}, + pages = {3-28} +} + +@inproceedings{Aiyer:disc08, + author = {Aiyer, Amitanand S. and Alvisi, Lorenzo and Bazzi, Rida A. and Clement, Allen}, + title = {Matrix Signatures: From MACs to Digital Signatures in Distributed Systems}, + booktitle = {DISC}, + year = {2008}, + pages = {16--31}, + numpages = {16} +} + +@inproceedings{Biel13:dsn, + author = {Biely, Martin and Delgado, Pamela and Milosevic, Zarko and Schiper, Andr{\'e}}, + title = {Distal: A Framework for Implementing Fault-tolerant Distributed Algorithms}, + note = {To appear in DSN, 2013}, + year = 2013 +} + +@inproceedings{BS10:icdcn, + author = {Borran, Fatemeh and Schiper, Andr{\'e}}, + title = {A leader-free Byzantine consensus algorithm}, + booktitle = {ICDCN}, + year = {2010}, + pages = {67--78}, + numpages = {12} +} + +@article{Cor06:cj, + author = {Correia, Miguel and Neves, Nuno Ferreira and Ver\'{\i}ssimo, Paulo}, + title = {From Consensus to Atomic Broadcast: Time-Free Byzantine-Resistant Protocols without Signatures}, + journal = {Comput. J.}, + volume = {49}, + number = {1}, + year = {2006}, + pages = {82--96}, + numpages = {15} +} + +@inproceedings{RMS10:dsn, + author = {Olivier R{\"u}tti and + Zarko Milosevic and + Andr{\'e} Schiper}, + title = {Generic construction of consensus algorithms for benign + and Byzantine faults}, + booktitle = {DSN}, + year = {2010}, + pages = {343-352} +} + + + +@inproceedings{HKJR:usenix10, + author = {Hunt, Patrick and Konar, Mahadev and Junqueira, Flavio P. and Reed, Benjamin}, + title = {ZooKeeper: wait-free coordination for internet-scale systems}, + OPTbooktitle = {Proceedings of the 2010 USENIX conference on USENIX annual technical conference}, + booktitle = {USENIXATC}, + year = {2010}, + OPTlocation = {Boston, MA}, + pages = {11}, + numpages = {1}, + OPTurl = {http://dl.acm.org/citation.cfm?id=1855840.1855851}, + acmid = {1855851}, + OPTpublisher = {USENIX Association}, + OPTaddress = {Berkeley, CA, USA}, +} + +@inproceedings{Bur:osdi06, + author = {Burrows, Mike}, + title = {The Chubby lock service for loosely-coupled distributed systems}, + booktitle = {OSDI}, + year = {2006}, + pages = {335--350}, + numpages = {16}, +} + +@INPROCEEDINGS{Mao09:hotdep, + author = {Yanhua Mao and Flavio P. Junqueira and Keith Marzullo}, + title = {Towards low latency state machine replication for uncivil wide-area networks}, + booktitle = {HotDep}, + year = {2009} +} + +@inproceedings{Chun07:a2m, + author = {Chun, Byung-Gon and Maniatis, Petros and Shenker, Scott and Kubiatowicz, John}, + title = {Attested append-only memory: making adversaries stick to their word}, + booktitle = {SOSP}, + year = {2007}, + pages = {189--204}, + numpages = {16} +} + +@TECHREPORT{MBS:epfltr, + author = {Zarko Milosevic and Martin Biely and Andr\'e Schiper}, + title = {Bounded {D}elay in {B}yzantine {T}olerant {S}tate {M}achine {R}eplication}, + year = 2013, + month = april, + institution = {EPFL}, + number = {185962}, +} + +@book{BH09:datacenter, + author = {Barroso, Luiz Andre and Hoelzle, Urs}, + title = {The Datacenter as a Computer: An Introduction to the Design of Warehouse-Scale Machines}, + year = {2009}, + isbn = {159829556X, 9781598295566}, + edition = {1st}, + publisher = {Morgan and Claypool Publishers}, +} + +@inproceedings{Kir11:csiirw, + author = {Kirsch, Jonathan and Goose, Stuart and Amir, Yair and Skare, Paul}, + title = {Toward survivable SCADA}, + booktitle = {CSIIRW}, + year = {2011}, + pages = {21:1--21:1}, + articleno = {21}, + numpages = {1} +} + +@inproceedings{Ongaro14:raft, + author = {Ongaro, Diego and Ousterhout, John}, + title = {In Search of an Understandable Consensus Algorithm}, + booktitle = {Proceedings of the 2014 USENIX Conference on USENIX Annual Technical Conference}, + series = {USENIX ATC'14}, + year = {2014}, + isbn = {978-1-931971-10-2}, + location = {Philadelphia, PA}, + pages = {305--320}, + numpages = {16}, + url = {http://dl.acm.org/citation.cfm?id=2643634.2643666}, + acmid = {2643666}, + publisher = {USENIX Association}, + address = {Berkeley, CA, USA}, +} + +@article{GLR17:red-belly-bc, + author = {Tyler Crain and + Vincent Gramoli and + Mikel Larrea and + Michel Raynal}, + title = {Leader/Randomization/Signature-free Byzantine Consensus for Consortium + Blockchains}, + journal = {CoRR}, + volume = {abs/1702.03068}, + year = {2017}, + url = {http://arxiv.org/abs/1702.03068}, + archivePrefix = {arXiv}, + eprint = {1702.03068}, + timestamp = {Wed, 07 Jun 2017 14:41:08 +0200}, + biburl = {http://dblp.org/rec/bib/journals/corr/CrainGLR17}, + bibsource = {dblp computer science bibliography, http://dblp.org} +} + + +@misc{Nak2012:bitcoin, + added-at = {2014-04-17T08:33:06.000+0200}, + author = {Nakamoto, Satoshi}, + biburl = {https://www.bibsonomy.org/bibtex/23db66df0fc9fa2b5033f096a901f1c36/ngnn}, + interhash = {423c2cdff70ba0cd0bca55ebb164d770}, + intrahash = {3db66df0fc9fa2b5033f096a901f1c36}, + keywords = {imported}, + timestamp = {2014-04-17T08:33:06.000+0200}, + title = {Bitcoin: A peer-to-peer electronic cash system}, + url = {http://www.bitcoin.org/bitcoin.pdf}, + year = 2009 +} + +@misc{But2014:ethereum, + author = {Vitalik Buterin}, + title = {Ethereum: A next-generation smart contract and decentralized application platform}, + year = {2014}, + howpublished = {\url{https://github.com/ethereum/wiki/wiki/White-Paper}}, + note = {Accessed: 2018-07-11}, + url = {https://github.com/ethereum/wiki/wiki/White-Paper}, +} + +@inproceedings{Dem1987:gossip, + author = {Demers, Alan and Greene, Dan and Hauser, Carl and Irish, Wes and Larson, John and Shenker, Scott and Sturgis, Howard and Swinehart, Dan and Terry, Doug}, + title = {Epidemic Algorithms for Replicated Database Maintenance}, + booktitle = {Proceedings of the Sixth Annual ACM Symposium on Principles of Distributed Computing}, + series = {PODC '87}, + year = {1987}, + isbn = {0-89791-239-X}, + location = {Vancouver, British Columbia, Canada}, + pages = {1--12}, + numpages = {12}, + url = {http://doi.acm.org/10.1145/41840.41841}, + doi = {10.1145/41840.41841}, + acmid = {41841}, + publisher = {ACM}, + address = {New York, NY, USA}, +} + +@article{Gue2018:sbft, + author = {Guy Golan{-}Gueta and + Ittai Abraham and + Shelly Grossman and + Dahlia Malkhi and + Benny Pinkas and + Michael K. Reiter and + Dragos{-}Adrian Seredinschi and + Orr Tamir and + Alin Tomescu}, + title = {{SBFT:} a Scalable Decentralized Trust Infrastructure for Blockchains}, + journal = {CoRR}, + volume = {abs/1804.01626}, + year = {2018}, + url = {http://arxiv.org/abs/1804.01626}, + archivePrefix = {arXiv}, + eprint = {1804.01626}, + timestamp = {Tue, 01 May 2018 19:46:29 +0200}, + biburl = {https://dblp.org/rec/bib/journals/corr/abs-1804-01626}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@inproceedings{BLS2001:crypto, + author = {Boneh, Dan and Lynn, Ben and Shacham, Hovav}, + title = {Short Signatures from the Weil Pairing}, + booktitle = {Proceedings of the 7th International Conference on the Theory and Application of Cryptology and Information Security: Advances in Cryptology}, + series = {ASIACRYPT '01}, + year = {2001}, + isbn = {3-540-42987-5}, + pages = {514--532}, + numpages = {19}, + url = {http://dl.acm.org/citation.cfm?id=647097.717005}, + acmid = {717005}, + publisher = {Springer-Verlag}, + address = {Berlin, Heidelberg}, +} + + diff --git a/spec/consensus/consensus-paper/paper.tex b/spec/consensus/consensus-paper/paper.tex new file mode 100644 index 0000000000..22f8b405fc --- /dev/null +++ b/spec/consensus/consensus-paper/paper.tex @@ -0,0 +1,153 @@ +%\documentclass[conference]{IEEEtran} +\documentclass[conference,onecolumn,draft,a4paper]{IEEEtran} +% Add the compsoc option for Computer Society conferences. +% +% If IEEEtran.cls has not been installed into the LaTeX system files, +% manually specify the path to it like: +% \documentclass[conference]{../sty/IEEEtran} + + + +% *** GRAPHICS RELATED PACKAGES *** +% +\ifCLASSINFOpdf +\else +\fi + +% correct bad hyphenation here +\hyphenation{op-tical net-works semi-conduc-tor} + +%\usepackage[caption=false,font=footnotesize]{subfig} +\usepackage{tikz} +\usetikzlibrary{decorations,shapes,backgrounds,calc} +\tikzstyle{msg}=[->,black,>=latex] +\tikzstyle{rubber}=[|<->|] +\tikzstyle{announce}=[draw=blue,fill=blue,shape=diamond,right,minimum + height=2mm,minimum width=1.6667mm,inner sep=0pt] +\tikzstyle{decide}=[draw=red,fill=red,shape=isosceles triangle,right,minimum + height=2mm,minimum width=1.6667mm,inner sep=0pt,shape border rotate=90] +\tikzstyle{cast}=[draw=green!50!black,fill=green!50!black,shape=circle,left,minimum + height=2mm,minimum width=1.6667mm,inner sep=0pt] + + +\usepackage{multirow} +\usepackage{graphicx} +\usepackage{epstopdf} +\usepackage{amssymb} +\usepackage{rounddiag} +\graphicspath{{../}} + +\usepackage{technote} +\usepackage{homodel} +\usepackage{enumerate} +%%\usepackage{ulem}\normalem + +% to center caption +\usepackage{caption} + +\newcommand{\textstretch}{1.4} +\newcommand{\algostretch}{1} +\newcommand{\eqnstretch}{0.5} + +\newconstruct{\FOREACH}{\textbf{for each}}{\textbf{do}}{\ENDFOREACH}{} + +%\newconstruct{\ON}{\textbf{on}}{\textbf{do}}{\ENDON}{\textbf{end on}} +\newcommand\With{\textbf{while}} +\newcommand\From{\textbf{from}} +\newcommand\Broadcast{\textbf{broadcast}} +\newcommand\PBroadcast{send} +\newcommand\UpCall{\textbf{UpCall}} +\newcommand\DownCall{\textbf{DownCall}} +\newcommand \Call{\textbf{Call}} +\newident{noop} +\newconstruct{\UPON}{\textbf{upon}}{\textbf{do}}{\ENDUPON}{} + + + +\newcommand{\abcast}{\mathsf{to\mbox{\sf-}broadcast}} +\newcommand{\adeliver}{\mathsf{to\mbox{\sf-}deliver}} + +\newcommand{\ABCAgreement}{\emph{TO-Agreement}} +\newcommand{\ABCIntegrity}{\emph{TO-Integrity}} +\newcommand{\ABCValidity}{\emph{TO-Validity}} +\newcommand{\ABCTotalOrder}{\emph{TO-Order}} +\newcommand{\ABCBoundedDelivery}{\emph{TO-Bounded Delivery}} + + +\newcommand{\tabc}{\mathit{atab\mbox{\sf-}cast}} +\newcommand{\anno}{\mathit{atab\mbox{\sf-}announce}} +\newcommand{\abort}{\mathit{atab\mbox{\sf-}abort}} +\newcommand{\tadel}{\mathit{atab\mbox{\sf-}deliver}} + +\newcommand{\ATABAgreement}{\emph{ATAB-Agreement}} +\newcommand{\ATABAbort}{\emph{ATAB-Abort}} +\newcommand{\ATABIntegrity}{\emph{ATAB-Integrity}} +\newcommand{\ATABValidity}{\emph{ATAB-Validity}} +\newcommand{\ATABAnnounce}{\emph{ATAB-Announcement}} +\newcommand{\ATABTermination}{\emph{ATAB-Termination}} +%\newcommand{\ATABFastAnnounce}{\emph{ATAB-Fast-Announcement}} + +%% Command for observations. +\newtheorem{observation}{Observation} + + +%% HO ALGORITHM DEFINITIONS +\newconstruct{\FUNCTION}{\textbf{Function}}{\textbf{:}}{\ENDFUNCTION}{} + +%% Uncomment the following four lines to remove remarks and visible traces of +%% modifications in the document +%%\renewcommand{\sout}[1]{\relaxx} +%%\renewcommand{\uline}[1]{#1} +%% \renewcommand{\uwave}[1]{#1} + \renewcommand{\note}[2][default]{\relax} + + +%% The following commands can be used to generate TR or Conference version of the paper +\newcommand{\tr}[1]{} +\renewcommand{\tr}[1]{#1} +\newcommand{\onlypaper}[1]{#1} +%\renewcommand{\onlypaper}[1]{} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%\pagestyle{plain} +%\pagestyle{empty} + +%% IEEE tweaks +%\setlength{\IEEEilabelindent}{.5\parindent} +%\setlength{\IEEEiednormlabelsep}{.5\parindent} + +\begin{document} +% +% paper title +% can use linebreaks \\ within to get better formatting as desired +\title{The latest gossip on BFT consensus\vspace{-0.7\baselineskip}} + + + +\author{\IEEEauthorblockN{\large Ethan Buchman, Jae Kwon and Zarko Milosevic\\} + \IEEEauthorblockN{\large Tendermint}\\ + %\\\vspace{-0.5\baselineskip} + \IEEEauthorblockN{September 24, 2018} +} + +% make the title area +\maketitle +\vspace*{0.5em} + +\begin{abstract} +This paper presents Tendermint, a new protocol for ordering events in a distributed network under adversarial conditions. More commonly known as Byzantine Fault Tolerant (BFT) consensus or atomic broadcast, the problem has attracted significant attention in recent years due to the widespread success of blockchain-based digital currencies, such as Bitcoin and Ethereum, which successfully solved the problem in a public setting without a central authority. Tendermint modernizes classic academic work on the subject and simplifies the design of the BFT algorithm by relying on a peer-to-peer gossip protocol among nodes. +\end{abstract} + +%\noindent \textbf{Keywords:} Blockchain, Byzantine Fault Tolerance, State Machine %Replication + +\input{intro} +\input{definitions} +\input{consensus} +\input{proof} +\input{conclusion} + +\bibliographystyle{IEEEtran} +\bibliography{lit} + +%\appendix + +\end{document} diff --git a/spec/consensus/consensus-paper/proof.tex b/spec/consensus/consensus-paper/proof.tex new file mode 100644 index 0000000000..1c84d9b11e --- /dev/null +++ b/spec/consensus/consensus-paper/proof.tex @@ -0,0 +1,280 @@ +\section{Proof of Tendermint consensus algorithm} \label{sec:proof} + +\begin{lemma} \label{lemma:majority-intersection} For all $f\geq 0$, any two +sets of processes with voting power at least equal to $2f+1$ have at least one +correct process in common. \end{lemma} + +\begin{proof} As the total voting power is equal to $n=3f+1$, we have $2(2f+1) + = n+f+1$. This means that the intersection of two sets with the voting + power equal to $2f+1$ contains at least $f+1$ voting power in common, \ie, + at least one correct process (as the total voting power of faulty processes + is $f$). The result follows directly from this. \end{proof} + +\begin{lemma} \label{lemma:locked-decision_value-prevote-v} If $f+1$ correct +processes lock value $v$ in round $r_0$ ($lockedValue = v$ and $lockedRound = +r_0$), then in all rounds $r > r_0$, they send $\Prevote$ for $id(v)$ or +$\nil$. \end{lemma} + +\begin{proof} We prove the result by induction on $r$. + +\emph{Base step $r = r_0 + 1:$} Let's denote with $C$ the set of correct +processes with voting power equal to $f+1$. By the rules at +line~\ref{line:tab:recvProposal} and line~\ref{line:tab:acceptProposal}, the +processes from the set $C$ can't accept $\Proposal$ for any value different +from $v$ in round $r$, and therefore can't send a $\li{\Prevote,height_p, +r,id(v')}$ message, if $v' \neq v$. Therefore, the Lemma holds for the base +step. + +\emph{Induction step from $r_1$ to $r_1+1$:} We assume that no process from the +set $C$ has sent $\Prevote$ for values different than $id(v)$ or $\nil$ until +round $r_1 + 1$. We now prove that the Lemma also holds for round $r_1 + 1$. As +processes from the set $C$ send $\Prevote$ for $id(v)$ or $\nil$ in rounds $r_0 +\le r \le r_1$, by Lemma~\ref{lemma:majority-intersection} there is no value +$v' \neq v$ for which it is possible to receive $2f+1$ $\Prevote$ messages in +those rounds (i). Therefore, we have for all processes from the set $C$, +$lockedValue = v$ and $lockedRound \ge r_0$. Let's assume by a contradiction +that a process $q$ from the set $C$ sends $\Prevote$ in round $r_1 + 1$ for +value $id(v')$, where $v' \neq v$. This is possible only by +line~\ref{line:tab:prevote-higher-proposal}. Note that this implies that $q$ +received $2f+1$ $\li{\Prevote,h_q, r,id(v')}$ messages, where $r > r_0$ and $r +< r_1 +1$ (see line~\ref{line:tab:cond-prevote-higher-proposal}). A +contradiction with (i) and Lemma~\ref{lemma:majority-intersection}. +\end{proof} + +\begin{lemma} \label{lemma:agreement} Algorithm~\ref{alg:tendermint} satisfies +Agreement. \end{lemma} + +\begin{proof} Let round $r_0$ be the first round of height $h$ such that some + correct process $p$ decides $v$. We now prove that if some correct process + $q$ decides $v'$ in some round $r \ge r_0$, then $v = v'$. + +In case $r = r_0$, $q$ has received at least $2f+1$ +$\li{\Precommit,h_p,r_0,id(v')}$ messages at line~\ref{line:tab:onDecideRule}, +while $p$ has received at least $2f+1$ $\li{\Precommit,h_p,r_0,id(v)}$ +messages. By Lemma~\ref{lemma:majority-intersection} two sets of messages of +voting power $2f+1$ intersect in at least one correct process. As a correct +process sends a single $\Precommit$ message in a round, then $v=v'$. + +We prove the case $r > r_0$ by contradiction. By the +rule~\ref{line:tab:onDecideRule}, $p$ has received at least $2f+1$ voting-power +equivalent of $\li{\Precommit,h_p,r_0,id(v)}$ messages, i.e., at least $f+1$ +voting-power equivalent correct processes have locked value $v$ in round $r_0$ and have +sent those messages (i). Let denote this set of messages with $C$. On the +other side, $q$ has received at least $2f+1$ voting power equivalent of +$\li{\Precommit,h_q, r,id(v')}$ messages. As the voting power of all faulty +processes is at most $f$, some correct process $c$ has sent one of those +messages. By the rule at line~\ref{line:tab:recvPrevote}, $c$ has locked value +$v'$ in round $r$ before sending $\li{\Precommit,h_q, r,id(v')}$. Therefore $c$ +has received $2f+1$ $\Prevote$ messages for $id(v')$ in round $r > r_0$ (see +line~\ref{line:tab:recvPrevote}). By Lemma~\ref{lemma:majority-intersection}, a +process from the set $C$ has sent $\Prevote$ message for $id(v')$ in round $r$. +A contradiction with (i) and Lemma~\ref{lemma:locked-decision_value-prevote-v}. +\end{proof} + +\begin{lemma} \label{lemma:agreement} Algorithm~\ref{alg:tendermint} satisfies +Validity. \end{lemma} + +\begin{proof} Trivially follows from the rule at line +\ref{line:tab:validDecisionValue} which ensures that only valid values can be +decided. \end{proof} + +\begin{lemma} \label{lemma:round-synchronisation} If we assume that: +\begin{enumerate} + \item a correct process $p$ is the first correct process to + enter a round $r>0$ at time $t > GST$ (for every correct process + $c$, $round_c \le r$ at time $t$) + \item the proposer of round $r$ is + a correct process $q$ + \item for every correct process $c$, + $lockedRound_c \le validRound_q$ at time $t$ + \item $\timeoutPropose(r) + > 2\Delta + \timeoutPrecommit(r-1)$, $\timeoutPrevote(r) > 2\Delta$ and + $\timeoutPrecommit(r) > 2\Delta$, +\end{enumerate} +then all correct processes decide in round $r$ before $t + 4\Delta + + \timeoutPrecommit(r-1)$. +\end{lemma} + +\begin{proof} As $p$ is the first correct process to enter round $r$, it + executed the line~\ref{line:tab:nextRound} after $\timeoutPrecommit(r-1)$ + expired. Therefore, $p$ received $2f+1$ $\Precommit$ messages in the round + $r-1$ before time $t$. By the \emph{Gossip communication} property, all + correct processes will receive those messages the latest at time $t + + \Delta$. Correct processes that are in rounds $< r-1$ at time $t$ will + enter round $r-1$ (see the rule at line~\ref{line:tab:nextRound2}) and + trigger $\timeoutPrecommit(r-1)$ (see rule~\ref{line:tab:startTimeoutPrecommit}) + by time $t+\Delta$. Therefore, all correct processes will start round $r$ + by time $t+\Delta+\timeoutPrecommit(r-1)$ (i). + +In the worst case, the process $q$ is the last correct process to enter round +$r$, so $q$ starts round $r$ and sends $\Proposal$ message for some value $v$ +at time $t + \Delta + \timeoutPrecommit(r-1)$. Therefore, all correct processes +receive the $\Proposal$ message from $q$ the latest by time $t + 2\Delta + +\timeoutPrecommit(r-1)$. Therefore, if $\timeoutPropose(r) > 2\Delta + +\timeoutPrecommit(r-1)$, all correct processes will receive $\Proposal$ message +before $\timeoutPropose(r)$ expires. + +By (3) and the rules at line~\ref{line:tab:recvProposal} and +\ref{line:tab:acceptProposal}, all correct processes will accept the +$\Proposal$ message for value $v$ and will send a $\Prevote$ message for +$id(v)$ by time $t + 2\Delta + \timeoutPrecommit(r-1)$. Note that by the +\emph{Gossip communication} property, the $\Prevote$ messages needed to trigger +the rule at line~\ref{line:tab:acceptProposal} are received before time $t + +\Delta$. + +By time $t + 3\Delta + \timeoutPrecommit(r-1)$, all correct processes will receive +$\Proposal$ for $v$ and $2f+1$ corresponding $\Prevote$ messages for $id(v)$. +By the rule at line~\ref{line:tab:recvPrevote}, all correct processes will send +a $\Precommit$ message (see line~\ref{line:tab:precommit-v}) for $id(v)$ by +time $t + 3\Delta + \timeoutPrecommit(r-1)$. Therefore, by time $t + 4\Delta + +\timeoutPrecommit(r-1)$, all correct processes will have received the $\Proposal$ +for $v$ and $2f+1$ $\Precommit$ messages for $id(v)$, so they decide at +line~\ref{line:tab:decide} on $v$. + +This scenario holds if every correct process $q$ sends a $\Precommit$ message +before $\timeoutPrevote(r)$ expires, and if $\timeoutPrecommit(r)$ does not expire +before $t + 4\Delta + \timeoutPrecommit(r-1)$. Let's assume that a correct process +$c_1$ is the first correct process to trigger $\timeoutPrevote(r)$ (see the rule +at line~\ref{line:tab:recvAny2/3Prevote}) at time $t_1 > t$. This implies that +before time $t_1$, $c_1$ received a $\Proposal$ ($step_{c_1}$ must be +$\prevote$ by the rule at line~\ref{line:tab:recvAny2/3Prevote}) and a set of +$2f+1$ $\Prevote$ messages. By time $t_1 + \Delta$, all correct processes will +receive those messages. Note that even if some correct process was in the +smaller round before time $t_1$, at time $t_1 + \Delta$ it will start round $r$ +after receiving those messages (see the rule at +line~\ref{line:tab:skipRounds}). Therefore, all correct processes will send +their $\Prevote$ message for $id(v)$ by time $t_1 + \Delta$, and all correct +processes will receive those messages the by time $t_1 + 2\Delta$. Therefore, +as $\timeoutPrevote(r) > 2\Delta$, this ensures that all correct processes receive +$\Prevote$ messages from all correct processes before their respective local +$\timeoutPrevote(r)$ expire. + +On the other hand, $\timeoutPrecommit(r)$ is triggered in a correct process $c_2$ +after it receives any set of $2f+1$ $\Precommit$ messages for the first time. +Let's denote with $t_2 > t$ the earliest point in time $\timeoutPrecommit(r)$ is +triggered in some correct process $c_2$. This implies that $c_2$ has received +at least $f+1$ $\Precommit$ messages for $id(v)$ from correct processes, i.e., +those processes have received $\Proposal$ for $v$ and $2f+1$ $\Prevote$ +messages for $id(v)$ before time $t_2$. By the \emph{Gossip communication} +property, all correct processes will receive those messages by time $t_2 + +\Delta$, and will send $\Precommit$ messages for $id(v)$. Note that even if +some correct processes were at time $t_2$ in a round smaller than $r$, by the +rule at line~\ref{line:tab:skipRounds} they will enter round $r$ by time $t_2 + +\Delta$. Therefore, by time $t_2 + 2\Delta$, all correct processes will +receive $\Proposal$ for $v$ and $2f+1$ $\Precommit$ messages for $id(v)$. So if +$\timeoutPrecommit(r) > 2\Delta$, all correct processes will decide before the +timeout expires. \end{proof} + + +\begin{lemma} \label{lemma:validValue} If a correct process $p$ locks a value + $v$ at time $t_0 > GST$ in some round $r$ ($lockedValue = v$ and + $lockedRound = r$) and $\timeoutPrecommit(r) > 2\Delta$, then all correct + processes set $validValue$ to $v$ and $validRound$ to $r$ before starting + round $r+1$. \end{lemma} + +\begin{proof} In order to prove this Lemma, we need to prove that if the + process $p$ locks a value $v$ at time $t_0$, then no correct process will + leave round $r$ before time $t_0 + \Delta$ (unless it has already set + $validValue$ to $v$ and $validRound$ to $r$). It is sufficient to prove + this, since by the \emph{Gossip communication} property the messages that + $p$ received at time $t_0$ and that triggered rule at + line~\ref{line:tab:recvPrevote} will be received by time $t_0 + \Delta$ by + all correct processes, so all correct processes that are still in round $r$ + will set $validValue$ to $v$ and $validRound$ to $r$ (by the rule at + line~\ref{line:tab:recvPrevote}). To prove this, we need to compute the + earliest point in time a correct process could leave round $r$ without + updating $validValue$ to $v$ and $validRound$ to $r$ (we denote this time + with $t_1$). The Lemma is correct if $t_0 + \Delta < t_1$. + +If the process $p$ locks a value $v$ at time $t_0$, this implies that $p$ +received the valid $\Proposal$ message for $v$ and $2f+1$ +$\li{\Prevote,h,r,id(v)}$ at time $t_0$. At least $f+1$ of those messages are +sent by correct processes. Let's denote this set of correct processes as $C$. By +Lemma~\ref{lemma:majority-intersection} any set of $2f+1$ $\Prevote$ messages +in round $r$ contains at least a single message from the set $C$. + +Let's denote as time $t$ the earliest point in time a correct process, $c_1$, triggered +$\timeoutPrevote(r)$. This implies that $c_1$ received $2f+1$ $\Prevote$ messages +(see the rule at line \ref{line:tab:recvAny2/3Prevote}), where at least one of +those messages was sent by a process $c_2$ from the set $C$. Therefore, process +$c_2$ had received $\Proposal$ message before time $t$. By the \emph{Gossip +communication} property, all correct processes will receive $\Proposal$ and +$2f+1$ $\Prevote$ messages for round $r$ by time $t+\Delta$. The latest point +in time $p$ will trigger $\timeoutPrevote(r)$ is $t+\Delta$\footnote{Note that +even if $p$ was in smaller round at time $t$ it will start round $r$ by time +$t+\Delta$.}. So the latest point in time $p$ can lock the value $v$ in +round $r$ is $t_0 = t+\Delta+\timeoutPrevote(r)$ (as at this point +$\timeoutPrevote(r)$ expires, so a process sends $\Precommit$ $\nil$ and updates +$step$ to $\precommit$, see line \ref{line:tab:onTimeoutPrevote}). + +Note that according to the Algorithm \ref{alg:tendermint}, a correct process +can not send a $\Precommit$ message before receiving $2f+1$ $\Prevote$ +messages. Therefore, no correct process can send a $\Precommit$ message in +round $r$ before time $t$. If a correct process sends a $\Precommit$ message +for $\nil$, it implies that it has waited for the full duration of +$\timeoutPrevote(r)$ (see line +\ref{line:tab:precommit-nil-onTimeout})\footnote{The other case in which a +correct process $\Precommit$ for $\nil$ is after receiving $2f+1$ $Prevote$ for +$\nil$ messages, see the line \ref{line:tab:precommit-v-1}. By +Lemma~\ref{lemma:majority-intersection}, this is not possible in round $r$.}. +Therefore, no correct process can send $\Precommit$ for $\nil$ before time $t + +\timeoutPrevote(r)$ (*). + +A correct process $q$ that enters round $r+1$ must wait (i) $\timeoutPrecommit(r)$ +(see line \ref{line:tab:nextRound}) or (ii) receiving $f+1$ messages from the +round $r+1$ (see the line \ref{line:tab:skipRounds}). In the former case, $q$ +receives $2f+1$ $\Precommit$ messages before starting $\timeoutPrecommit(r)$. If +at least a single $\Precommit$ message from a correct process (at least $f+1$ +voting power equivalent of those messages is sent by correct processes) is for +$\nil$, then $q$ cannot start round $r+1$ before time $t_1 = t + +\timeoutPrevote(r) + \timeoutPrecommit(r)$ (see (*)). Therefore in this case we have: +$t_0 + \Delta < t_1$, i.e., $t+2\Delta+\timeoutPrevote(r) < t + \timeoutPrevote(r) + +\timeoutPrecommit(r)$, and this is true whenever $\timeoutPrecommit(r) > 2\Delta$, so +Lemma holds in this case. + +If in the set of $2f+1$ $\Precommit$ messages $q$ receives, there is at least a +single $\Precommit$ for $id(v)$ message from a correct process $c$, then $q$ +can start the round $r+1$ the earliest at time $t_1 = t+\timeoutPrecommit(r)$. In +this case, by the \emph{Gossip communication} property, all correct processes +will receive $\Proposal$ and $2f+1$ $\Prevote$ messages (that $c$ received +before time $t$) the latest at time $t+\Delta$. Therefore, $q$ will set +$validValue$ to $v$ and $validRound$ to $r$ the latest at time $t+\Delta$. As +$t+\Delta < t+\timeoutPrecommit(r)$, whenever $\timeoutPrecommit(r) > \Delta$, the +Lemma holds also in this case. + +In case (ii), $q$ received at least a single message from a correct process $c$ +from the round $r+1$. The earliest point in time $c$ could have started round +$r+1$ is $t+\timeoutPrecommit(r)$ in case it received a $\Precommit$ message for +$v$ from some correct process in the set of $2f+1$ $\Precommit$ messages it +received. The same reasoning as above holds also in this case, so $q$ set +$validValue$ to $v$ and $validRound$ to $r$ the latest by time $t+\Delta$. As +$t+\Delta < t+\timeoutPrecommit(r)$, whenever $\timeoutPrecommit(r) > \Delta$, the +Lemma holds also in this case. \end{proof} + +\begin{lemma} \label{lemma:agreement} Algorithm~\ref{alg:tendermint} satisfies +Termination. \end{lemma} + +\begin{proof} Lemma~\ref{lemma:round-synchronisation} defines a scenario in + which all correct processes decide. We now prove that within a bounded + duration after GST such a scenario will unfold. Let's assume that at time + $GST$ the highest round started by a correct process is $r_0$, and that + there exists a correct process $p$ such that the following holds: for every + correct process $c$, $lockedRound_c \le validRound_p$. Furthermore, we + assume that $p$ will be the proposer in some round $r_1 > r$ (this is + ensured by the $\coord$ function). + +We have two cases to consider. In the first case, for all rounds $r \ge r_0$ +and $r < r_1$, no correct process locks a value (set $lockedRound$ to $r$). So +in round $r_1$ we have the scenario from the +Lemma~\ref{lemma:round-synchronisation}, so all correct processes decides in +round $r_1$. + +In the second case, a correct process locks a value $v$ in round $r_2$, where +$r_2 \ge r_0$ and $r_2 < r_1$. Let's assume that $r_2$ is the highest round +before $r_1$ in which some correct process $q$ locks a value. By Lemma +\ref{lemma:validValue} at the end of round $r_2$ the following holds for all +correct processes $c$: $validValue_c = lockedValue_q$ and $validRound_c = r_2$. +Then in round $r_1$, the conditions for the +Lemma~\ref{lemma:round-synchronisation} holds, so all correct processes decide. +\end{proof} + diff --git a/spec/consensus/consensus-paper/rounddiag.sty b/spec/consensus/consensus-paper/rounddiag.sty new file mode 100644 index 0000000000..a6ca5d8835 --- /dev/null +++ b/spec/consensus/consensus-paper/rounddiag.sty @@ -0,0 +1,62 @@ +% ROUNDDIAG STYLE +% for LaTeX version 2e +% by -- 2008 Martin Hutle +% +% This style file is free software; you can redistribute it and/or +% modify it under the terms of the GNU Lesser General Public +% License as published by the Free Software Foundation; either +% version 2 of the License, or (at your option) any later version. +% +% This style file is distributed in the hope that it will be useful, +% but WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +% Lesser General Public License for more details. +% +% You should have received a copy of the GNU Lesser General Public +% License along with this style file; if not, write to the +% Free Software Foundation, Inc., 59 Temple Place - Suite 330, +% Boston, MA 02111-1307, USA. +% +\NeedsTeXFormat{LaTeX2e} +\ProvidesPackage{rounddiag} +\typeout{Document Style `rounddiag' - provides simple round diagrams} +% +\RequirePackage{ifthen} +\RequirePackage{calc} +\RequirePackage{tikz} + +\def\rdstretch{3} + +\tikzstyle{msg}=[->,thick,>=latex] +\tikzstyle{rndline}=[dotted] +\tikzstyle{procline}=[dotted] + +\newenvironment{rounddiag}[2]{ +\begin{center} +\begin{tikzpicture} +\foreach \i in {1,...,#1}{ + \draw[procline] (0,#1-\i) node[xshift=-1em]{$p_{\i}$} -- (#2*\rdstretch+1,#1-\i); +} +\foreach \i in {0,...,#2}{ + \draw[rndline] (\i*\rdstretch+0.5,0) -- (\i*\rdstretch+0.5,#1-1); +} +\newcommand{\rdat}[2]{ + (##2*\rdstretch+0.5,#1-##1) +}% +\newcommand{\round}[2]{% + \def\rdround{##1} + \ifthenelse{\equal{##2}{}}{}{ + \node[yshift=-1em] at ({##1*\rdstretch+0.5-0.5*\rdstretch},0) {##2}; + } +}% +\newcommand{\rdmessage}[3]{\draw[msg] + (\rdround*\rdstretch-\rdstretch+0.5,#1-##1) -- node[yshift=1.2ex]{##3} + (\rdround*\rdstretch+0.5,#1-##2);}% +\newcommand{\rdalltoall}{% + \foreach \i in {1,...,#1}{ + \foreach \j in {1,...,#1}{ + { \rdmessage{\i}{\j}{}}}}}% +}{% +\end{tikzpicture} +\end{center} +} diff --git a/spec/consensus/consensus-paper/technote.sty b/spec/consensus/consensus-paper/technote.sty new file mode 100644 index 0000000000..5353f13cd3 --- /dev/null +++ b/spec/consensus/consensus-paper/technote.sty @@ -0,0 +1,118 @@ +\NeedsTeXFormat{LaTeX2e} +\ProvidesPackage{technote}[2007/11/09] +\typeout{Template for quick notes with some useful definitions} + +\RequirePackage{ifthen} +\RequirePackage{calc} +\RequirePackage{amsmath,amssymb,amsthm} +\RequirePackage{epsfig} +\RequirePackage{algorithm} +\RequirePackage[noend]{algorithmicplus} + +\newboolean{technote@noedit} +\setboolean{technote@noedit}{false} +\DeclareOption{noedit}{\setboolean{technote@noedit}{true}} + +\newcounter{technote@lang} +\setcounter{technote@lang}{0} +\DeclareOption{german}{\setcounter{technote@lang}{1}} +\DeclareOption{french}{\setcounter{technote@lang}{2}} + +\DeclareOption{fullpage}{ +\oddsidemargin -10mm % Margin on odd side pages (default=0mm) +\evensidemargin -10mm % Margin on even side pages (default=0mm) +\topmargin -10mm % Top margin space (default=16mm) +\headheight \baselineskip % Height of headers (default=0mm) +\headsep \baselineskip % Separation spc btw header and text (d=0mm) +\footskip 30pt % Separation spc btw text and footer (d=30pt) +\textheight 230mm % Total text height (default=200mm) +\textwidth 180mm % Total text width (default=160mm) +} + +\renewcommand{\algorithmiccomment}[1]{\hfill/* #1 */} +\renewcommand{\algorithmiclnosize}{\scriptsize} + +\newboolean{technote@truenumbers} +\setboolean{technote@truenumbers}{false} +\DeclareOption{truenumbers}{\setboolean{technote@truenumbers}{true}} + +\ProcessOptions + +\newcommand{\N}{\ifthenelse{\boolean{technote@truenumbers}}% + {\mbox{\rm I\hspace{-.5em}N}}% + {\mathbb{N}}} + +\newcommand{\R}{\ifthenelse{\boolean{technote@truenumbers}}% + {\mbox{\rm I\hspace{-.2em}R}}% + {\mathbb{R}}} + +\newcommand{\Z}{\mathbb{Z}} + +\newcommand{\set}[1]{\left\{#1\right\}} +\newcommand{\mathsc}[1]{\mbox{\sc #1}} +\newcommand{\li}[1]{\langle#1\rangle} +\newcommand{\st}{\;s.t.\;} +\newcommand{\Real}{\R} +\newcommand{\Natural}{\N} +\newcommand{\Integer}{\Z} + +% edit commands +\newcommand{\newedit}[2]{ + \newcommand{#1}[2][default]{% + \ifthenelse{\boolean{technote@noedit}}{}{ + \par\vspace{2mm} + \noindent + \begin{tabular}{|l|}\hline + \parbox{\linewidth-\tabcolsep*2}{{\bf #2:}\hfill\ifthenelse{\equal{##1}{default}}{}{##1}}\\\hline + \parbox{\linewidth-\tabcolsep*2}{\rule{0pt}{5mm}##2\rule[-2mm]{0pt}{2mm}}\\\hline + \end{tabular} + \par\vspace{2mm} + } + } +} + +\newedit{\note}{Note} +\newedit{\comment}{Comment} +\newedit{\question}{Question} +\newedit{\content}{Content} +\newedit{\problem}{Problem} + +\newcommand{\mnote}[1]{\marginpar{\scriptsize\it + \begin{minipage}[t]{0.8 in} + \raggedright #1 + \end{minipage}}} + +\newcommand{\Insert}[1]{\underline{#1}\marginpar{$|$}} + +\newcommand{\Delete}[1]{\marginpar{$|$} +} + +% lemma, theorem, etc. +\newtheorem{lemma}{Lemma} +\newtheorem{proposition}{Proposition} +\newtheorem{theorem}{Theorem} +\newtheorem{corollary}{Corollary} +\newtheorem{assumption}{Assumption} +\newtheorem{definition}{Definition} + +\gdef\op|{\,|\;} +\gdef\op:{\,:\;} +\newcommand{\assign}{\leftarrow} +\newcommand{\inc}[1]{#1 \assign #1 + 1} +\newcommand{\isdef}{:=} + +\newcommand{\ident}[1]{\mathit{#1}} +\def\newident#1{\expandafter\def\csname #1\endcsname{\ident{#1}}} + +\newcommand{\eg}{{\it e.g.}} +\newcommand{\ie}{{\it i.e.}} +\newcommand{\apriori}{{\it apriori}} +\newcommand{\etal}{{\it et al.}} + +\newcommand\ps@technote{% + \renewcommand\@oddhead{\theheader}% + \let\@evenhead\@oddhead + \renewcommand\@evenfoot + {\hfil\normalfont\textrm{\thepage}\hfil}% + \let\@oddfoot\@evenfoot +} diff --git a/spec/consensus/consensus.md b/spec/consensus/consensus.md new file mode 100644 index 0000000000..e4b7bdecc5 --- /dev/null +++ b/spec/consensus/consensus.md @@ -0,0 +1,352 @@ +--- +order: 1 +--- +# Byzantine Consensus Algorithm + +## Terms + +- The network is composed of optionally connected _nodes_. Nodes + directly connected to a particular node are called _peers_. +- The consensus process in deciding the next block (at some _height_ + `H`) is composed of one or many _rounds_. +- `NewHeight`, `Propose`, `Prevote`, `Precommit`, and `Commit` + represent state machine states of a round. (aka `RoundStep` or + just "step"). +- A node is said to be _at_ a given height, round, and step, or at + `(H,R,S)`, or at `(H,R)` in short to omit the step. +- To _prevote_ or _precommit_ something means to broadcast a [prevote + vote](https://godoc.org/github.com/tendermint/tendermint/types#Vote) + or [first precommit + vote](https://godoc.org/github.com/tendermint/tendermint/types#FirstPrecommit) + for something. +- A vote _at_ `(H,R)` is a vote signed with the bytes for `H` and `R` + included in its [sign-bytes](../core/data_structures.md#vote). +- _+2/3_ is short for "more than 2/3" +- _1/3+_ is short for "1/3 or more" +- A set of +2/3 of prevotes for a particular block or `` at + `(H,R)` is called a _proof-of-lock-change_ or _PoLC_ for short. + +## State Machine Overview + +At each height of the blockchain a round-based protocol is run to +determine the next block. Each round is composed of three _steps_ +(`Propose`, `Prevote`, and `Precommit`), along with two special steps +`Commit` and `NewHeight`. + +In the optimal scenario, the order of steps is: + +```md +NewHeight -> (Propose -> Prevote -> Precommit)+ -> Commit -> NewHeight ->... +``` + +The sequence `(Propose -> Prevote -> Precommit)` is called a _round_. +There may be more than one round required to commit a block at a given +height. Examples for why more rounds may be required include: + +- The designated proposer was not online. +- The block proposed by the designated proposer was not valid. +- The block proposed by the designated proposer did not propagate + in time. +- The block proposed was valid, but +2/3 of prevotes for the proposed + block were not received in time for enough validator nodes by the + time they reached the `Precommit` step. Even though +2/3 of prevotes + are necessary to progress to the next step, at least one validator + may have voted `` or maliciously voted for something else. +- The block proposed was valid, and +2/3 of prevotes were received for + enough nodes, but +2/3 of precommits for the proposed block were not + received for enough validator nodes. + +Some of these problems are resolved by moving onto the next round & +proposer. Others are resolved by increasing certain round timeout +parameters over each successive round. + +## State Machine Diagram + +```md + +-------------------------------------+ + v |(Wait til `CommmitTime+timeoutCommit`) + +-----------+ +-----+-----+ + +----------> | Propose +--------------+ | NewHeight | + | +-----------+ | +-----------+ + | | ^ + |(Else, after timeoutPrecommit) v | ++-----+-----+ +-----------+ | +| Precommit | <------------------------+ Prevote | | ++-----+-----+ +-----------+ | + |(When +2/3 Precommits for block found) | + v | ++--------------------------------------------------------------------+ +| Commit | +| | +| * Set CommitTime = now; | +| * Wait for block, then stage/save/commit block; | ++--------------------------------------------------------------------+ +``` + +# Background Gossip + +A node may not have a corresponding validator private key, but it +nevertheless plays an active role in the consensus process by relaying +relevant meta-data, proposals, blocks, and votes to its peers. A node +that has the private keys of an active validator and is engaged in +signing votes is called a _validator-node_. All nodes (not just +validator-nodes) have an associated state (the current height, round, +and step) and work to make progress. + +Between two nodes there exists a `Connection`, and multiplexed on top of +this connection are fairly throttled `Channel`s of information. An +epidemic gossip protocol is implemented among some of these channels to +bring peers up to speed on the most recent state of consensus. For +example, + +- Nodes gossip `PartSet` parts of the current round's proposer's + proposed block. A LibSwift inspired algorithm is used to quickly + broadcast blocks across the gossip network. +- Nodes gossip prevote/precommit votes. A node `NODE_A` that is ahead + of `NODE_B` can send `NODE_B` prevotes or precommits for `NODE_B`'s + current (or future) round to enable it to progress forward. +- Nodes gossip prevotes for the proposed PoLC (proof-of-lock-change) + round if one is proposed. +- Nodes gossip to nodes lagging in blockchain height with block + [commits](https://godoc.org/github.com/tendermint/tendermint/types#Commit) + for older blocks. +- Nodes opportunistically gossip `ReceivedVote` messages to hint peers what + votes it already has. +- Nodes broadcast their current state to all neighboring peers. (but + is not gossiped further) + +There's more, but let's not get ahead of ourselves here. + +## Proposals + +A proposal is signed and published by the designated proposer at each +round. The proposer is chosen by a deterministic and non-choking round +robin selection algorithm that selects proposers in proportion to their +voting power (see +[implementation](https://github.com/tendermint/tendermint/blob/v0.34.x/types/validator_set.go)). + +A proposal at `(H,R)` is composed of a block and an optional latest +`PoLC-Round < R` which is included iff the proposer knows of one. This +hints the network to allow nodes to unlock (when safe) to ensure the +liveness property. + +## State Machine Spec + +### Propose Step (height:H,round:R) + +Upon entering `Propose`: + +- The designated proposer proposes a block at `(H,R)`. + +The `Propose` step ends: + +- After `timeoutProposeR` after entering `Propose`. --> goto + `Prevote(H,R)` +- After receiving proposal block and all prevotes at `PoLC-Round`. --> + goto `Prevote(H,R)` +- After [common exit conditions](#common-exit-conditions) + +### Prevote Step (height:H,round:R) + +Upon entering `Prevote`, each validator broadcasts its prevote vote. + +- First, if the validator is locked on a block since `LastLockRound` + but now has a PoLC for something else at round `PoLC-Round` where + `LastLockRound < PoLC-Round < R`, then it unlocks. +- If the validator is still locked on a block, it prevotes that. +- Else, if the proposed block from `Propose(H,R)` is good, it + prevotes that. +- Else, if the proposal is invalid or wasn't received on time, it + prevotes ``. + +The `Prevote` step ends: + +- After +2/3 prevotes for a particular block or ``. -->; goto + `Precommit(H,R)` +- After `timeoutPrevote` after receiving any +2/3 prevotes. --> goto + `Precommit(H,R)` +- After [common exit conditions](#common-exit-conditions) + +### Precommit Step (height:H,round:R) + +Upon entering `Precommit`, each validator broadcasts its precommit vote. + +- If the validator has a PoLC at `(H,R)` for a particular block `B`, it + (re)locks (or changes lock to) and precommits `B` and sets + `LastLockRound = R`. +- Else, if the validator has a PoLC at `(H,R)` for ``, it unlocks + and precommits ``. +- Else, it keeps the lock unchanged and precommits ``. + +A precommit for `` means "I didn’t see a PoLC for this round, but I +did get +2/3 prevotes and waited a bit". + +The Precommit step ends: + +- After +2/3 precommits for ``. --> goto `Propose(H,R+1)` +- After `timeoutPrecommit` after receiving any +2/3 precommits. --> goto + `Propose(H,R+1)` +- After [common exit conditions](#common-exit-conditions) + +### Common exit conditions + +- After +2/3 precommits for a particular block. --> goto + `Commit(H)` +- After any +2/3 prevotes received at `(H,R+x)`. --> goto + `Prevote(H,R+x)` +- After any +2/3 precommits received at `(H,R+x)`. --> goto + `Precommit(H,R+x)` + +### Commit Step (height:H) + +- Set `CommitTime = now()` +- Wait until block is received. --> goto `NewHeight(H+1)` + +### NewHeight Step (height:H) + +- Move `Precommits` to `LastCommit` and increment height. +- Set `StartTime = CommitTime+timeoutCommit` +- Wait until `StartTime` to receive straggler commits. --> goto + `Propose(H,0)` + +## Proofs + +### Proof of Safety + +Assume that at most -1/3 of the voting power of validators is byzantine. +If a validator commits block `B` at round `R`, it's because it saw +2/3 +of precommits at round `R`. This implies that 1/3+ of honest nodes are +still locked at round `R' > R`. These locked validators will remain +locked until they see a PoLC at `R' > R`, but this won't happen because +1/3+ are locked and honest, so at most -2/3 are available to vote for +anything other than `B`. + +### Proof of Liveness + +If 1/3+ honest validators are locked on two different blocks from +different rounds, a proposers' `PoLC-Round` will eventually cause nodes +locked from the earlier round to unlock. Eventually, the designated +proposer will be one that is aware of a PoLC at the later round. Also, +`timeoutProposalR` increments with round `R`, while the size of a +proposal are capped, so eventually the network is able to "fully gossip" +the whole proposal (e.g. the block & PoLC). + +### Proof of Fork Accountability + +Define the JSet (justification-vote-set) at height `H` of a validator +`V1` to be all the votes signed by the validator at `H` along with +justification PoLC prevotes for each lock change. For example, if `V1` +signed the following precommits: `Precommit(B1 @ round 0)`, +`Precommit( @ round 1)`, `Precommit(B2 @ round 4)` (note that no +precommits were signed for rounds 2 and 3, and that's ok), +`Precommit(B1 @ round 0)` must be justified by a PoLC at round 0, and +`Precommit(B2 @ round 4)` must be justified by a PoLC at round 4; but +the precommit for `` at round 1 is not a lock-change by definition +so the JSet for `V1` need not include any prevotes at round 1, 2, or 3 +(unless `V1` happened to have prevoted for those rounds). + +Further, define the JSet at height `H` of a set of validators `VSet` to +be the union of the JSets for each validator in `VSet`. For a given +commit by honest validators at round `R` for block `B` we can construct +a JSet to justify the commit for `B` at `R`. We say that a JSet +_justifies_ a commit at `(H,R)` if all the committers (validators in the +commit-set) are each justified in the JSet with no duplicitous vote +signatures (by the committers). + +- **Lemma**: When a fork is detected by the existence of two + conflicting [commits](../core/data_structures.md#commit), the + union of the JSets for both commits (if they can be compiled) must + include double-signing by at least 1/3+ of the validator set. + **Proof**: The commit cannot be at the same round, because that + would immediately imply double-signing by 1/3+. Take the union of + the JSets of both commits. If there is no double-signing by at least + 1/3+ of the validator set in the union, then no honest validator + could have precommitted any different block after the first commit. + Yet, +2/3 did. Reductio ad absurdum. + +As a corollary, when there is a fork, an external process can determine +the blame by requiring each validator to justify all of its round votes. +Either we will find 1/3+ who cannot justify at least one of their votes, +and/or, we will find 1/3+ who had double-signed. + +### Alternative algorithm + +Alternatively, we can take the JSet of a commit to be the "full commit". +That is, if light clients and validators do not consider a block to be +committed unless the JSet of the commit is also known, then we get the +desirable property that if there ever is a fork (e.g. there are two +conflicting "full commits"), then 1/3+ of the validators are immediately +punishable for double-signing. + +There are many ways to ensure that the gossip network efficiently share +the JSet of a commit. One solution is to add a new message type that +tells peers that this node has (or does not have) a +2/3 majority for B +(or) at (H,R), and a bitarray of which votes contributed towards that +majority. Peers can react by responding with appropriate votes. + +We will implement such an algorithm for the next iteration of the +Tendermint consensus protocol. + +Other potential improvements include adding more data in votes such as +the last known PoLC round that caused a lock change, and the last voted +round/step (or, we may require that validators not skip any votes). This +may make JSet verification/gossip logic easier to implement. + +### Censorship Attacks + +Due to the definition of a block +[commit](https://github.com/tendermint/tendermint/blob/v0.34.x/docs/tendermint-core/validators.md), any 1/3+ coalition of +validators can halt the blockchain by not broadcasting their votes. Such +a coalition can also censor particular transactions by rejecting blocks +that include these transactions, though this would result in a +significant proportion of block proposals to be rejected, which would +slow down the rate of block commits of the blockchain, reducing its +utility and value. The malicious coalition might also broadcast votes in +a trickle so as to grind blockchain block commits to a near halt, or +engage in any combination of these attacks. + +If a global active adversary were also involved, it can partition the +network in such a way that it may appear that the wrong subset of +validators were responsible for the slowdown. This is not just a +limitation of Tendermint, but rather a limitation of all consensus +protocols whose network is potentially controlled by an active +adversary. + +### Overcoming Forks and Censorship Attacks + +For these types of attacks, a subset of the validators through external +means should coordinate to sign a reorg-proposal that chooses a fork +(and any evidence thereof) and the initial subset of validators with +their signatures. Validators who sign such a reorg-proposal forego its +collateral on all other forks. Clients should verify the signatures on +the reorg-proposal, verify any evidence, and make a judgement or prompt +the end-user for a decision. For example, a phone wallet app may prompt +the user with a security warning, while a refrigerator may accept any +reorg-proposal signed by +1/2 of the original validators. + +No non-synchronous Byzantine fault-tolerant algorithm can come to +consensus when 1/3+ of validators are dishonest, yet a fork assumes that +1/3+ of validators have already been dishonest by double-signing or +lock-changing without justification. So, signing the reorg-proposal is a +coordination problem that cannot be solved by any non-synchronous +protocol (i.e. automatically, and without making assumptions about the +reliability of the underlying network). It must be provided by means +external to the weakly-synchronous Tendermint consensus algorithm. For +now, we leave the problem of reorg-proposal coordination to human +coordination via internet media. Validators must take care to ensure +that there are no significant network partitions, to avoid situations +where two conflicting reorg-proposals are signed. + +Assuming that the external coordination medium and protocol is robust, +it follows that forks are less of a concern than [censorship +attacks](#censorship-attacks). + +### Canonical vs subjective commit + +We distinguish between "canonical" and "subjective" commits. A subjective commit is what +each validator sees locally when they decide to commit a block. The canonical commit is +what is included by the proposer of the next block in the `LastCommit` field of +the block. This is what makes it canonical and ensures every validator agrees on the canonical commit, +even if it is different from the +2/3 votes a validator has seen, which caused the validator to +commit the respective block. Each block contains a canonical +2/3 commit for the previous +block. diff --git a/spec/consensus/creating-proposal.md b/spec/consensus/creating-proposal.md new file mode 100644 index 0000000000..cb43c8ebb4 --- /dev/null +++ b/spec/consensus/creating-proposal.md @@ -0,0 +1,43 @@ +--- +order: 2 +--- +# Creating a proposal + +A block consists of a header, transactions, votes (the commit), +and a list of evidence of malfeasance (ie. signing conflicting votes). + +We include no more than 1/10th of the maximum block size +(`ConsensusParams.Block.MaxBytes`) of evidence with each block. + +## Reaping transactions from the mempool + +When we reap transactions from the mempool, we calculate maximum data +size by subtracting maximum header size (`MaxHeaderBytes`), the maximum +amino overhead for a block (`MaxAminoOverheadForBlock`), the size of +the last commit (if present) and evidence (if present). While reaping +we account for amino overhead for each transaction. + +```go +func MaxDataBytes(maxBytes int64, valsCount, evidenceCount int) int64 { + return maxBytes - + MaxOverheadForBlock - + MaxHeaderBytes - + int64(valsCount)*MaxVoteBytes - + int64(evidenceCount)*MaxEvidenceBytes +} +``` + +## Validating transactions in the mempool + +Before we accept a transaction in the mempool, we check if it's size is no more +than {MaxDataSize}. {MaxDataSize} is calculated using the same formula as +above, except we subtract the max number of evidence, {MaxNum} by the maximum size of evidence + +```go +func MaxDataBytesUnknownEvidence(maxBytes int64, valsCount int) int64 { + return maxBytes - + MaxOverheadForBlock - + MaxHeaderBytes - + (maxNumEvidence * MaxEvidenceBytes) +} +``` diff --git a/spec/consensus/evidence.md b/spec/consensus/evidence.md new file mode 100644 index 0000000000..84d51a01b5 --- /dev/null +++ b/spec/consensus/evidence.md @@ -0,0 +1,199 @@ +# Evidence + +Evidence is an important component of Tendermint's security model. Whilst the core +consensus protocol provides correctness gaurantees for state machine replication +that can tolerate less than 1/3 failures, the evidence system looks to detect and +gossip byzantine faults whose combined power is greater than or equal to 1/3. It is worth noting that +the evidence system is designed purely to detect possible attacks, gossip them, +commit them on chain and inform the application running on top of Tendermint. +Evidence in itself does not punish "bad actors", this is left to the discretion +of the application. A common form of punishment is slashing where the validators +that were caught violating the protocol have all or a portion of their voting +power removed. Evidence, given the assumption that 1/3+ of the network is still +byzantine, is susceptible to censorship and should therefore be considered added +security on a "best effort" basis. + +This document walks through the various forms of evidence, how they are detected, +gossiped, verified and committed. + +> NOTE: Evidence here is internal to tendermint and should not be confused with +> application evidence + +## Detection + +### Equivocation + +Equivocation is the most fundamental of byzantine faults. Simply put, to prevent +replication of state across all nodes, a validator tries to convince some subset +of nodes to commit one block whilst convincing another subset to commit a +different block. This is achieved by double voting (hence +`DuplicateVoteEvidence`). A successful duplicate vote attack requires greater +than 1/3 voting power and a (temporary) network partition between the aforementioned +subsets. This is because in consensus, votes are gossiped around. When a node +observes two conflicting votes from the same peer, it will use the two votes of +evidence and begin gossiping this evidence to other nodes. [Verification](#duplicatevoteevidence) is addressed further down. + +```go +type DuplicateVoteEvidence struct { + VoteA Vote + VoteB Vote + + // and abci specific fields +} +``` + +### Light Client Attacks + +Light clients also comply with the 1/3+ security model, however, by using a +different, more lightweight verification method they are subject to a +different kind of 1/3+ attack whereby the byzantine validators could sign an +alternative light block that the light client will think is valid. Detection, +explained in greater detail +[here](../light-client/detection/detection_003_reviewed.md), involves comparison +with multiple other nodes in the hope that at least one is "honest". An "honest" +node will return a challenging light block for the light client to validate. If +this challenging light block also meets the +[validation criteria](../light-client/verification/verification_001_published.md) +then the light client sends the "forged" light block to the node. +[Verification](#lightclientattackevidence) is addressed further down. + +```go +type LightClientAttackEvidence struct { + ConflictingBlock LightBlock + CommonHeight int64 + + // and abci specific fields +} +``` + +## Verification + +If a node receives evidence, it will first try to verify it, then persist it. +Evidence of byzantine behavior should only be committed once (uniqueness) and +should be committed within a certain period from the point that it occurred +(timely). Timelines is defined by the `EvidenceParams`: `MaxAgeNumBlocks` and +`MaxAgeDuration`. In Proof of Stake chains where validators are bonded, evidence +age should be less than the unbonding period so validators still can be +punished. Given these two propoerties the following initial checks are made. + +1. Has the evidence expired? This is done by taking the height of the `Vote` + within `DuplicateVoteEvidence` or `CommonHeight` within + `LightClientAttakEvidence`. The evidence height is then used to retrieve the + header and thus the time of the block that corresponds to the evidence. If + `CurrentHeight - MaxAgeNumBlocks > EvidenceHeight` && `CurrentTime - + MaxAgeDuration > EvidenceTime`, the evidence is considered expired and + ignored. + +2. Has the evidence already been committed? The evidence pool tracks the hash of + all committed evidence and uses this to determine uniqueness. If a new + evidence has the same hash as a committed one, the new evidence will be + ignored. + +### DuplicateVoteEvidence + +Valid `DuplicateVoteEvidence` must adhere to the following rules: + +- Validator Address, Height, Round and Type must be the same for both votes + +- BlockID must be different for both votes (BlockID can be for a nil block) + +- Validator must have been in the validator set at that height + +- Vote signature must be correctly signed. This also uses `ChainID` so we know + that the fault occurred on this chain + +### LightClientAttackEvidence + +Valid Light Client Attack Evidence must adhere to the following rules: + +- If the header of the light block is invalid, thus indicating a lunatic attack, + the node must check that they can use `verifySkipping` from their header at + the common height to the conflicting header + +- If the header is valid, then the validator sets are the same and this is + either a form of equivocation or amnesia. We therefore check that 2/3 of the + validator set also signed the conflicting header. + +- The nodes own header at the same height as the conflicting header must have a + different hash to the conflicting header. + +- If the nodes latest header is less in height to the conflicting header, then + the node must check that the conflicting block has a time that is less than + this latest header (This is a forward lunatic attack). + +## Gossiping + +If a node verifies evidence it then broadcasts it to all peers, continously sending +the same evidence once every 10 seconds until the evidence is seen on chain or +expires. + +## Commiting on Chain + +Evidence takes strict priority over regular transactions, thus a block is filled +with evidence first and transactions take up the remainder of the space. To +mitigate the threat of an already punished node from spamming the network with +more evidence, the size of the evidence in a block can be capped by +`EvidenceParams.MaxBytes`. Nodes receiving blocks with evidence will validate +the evidence before sending `Prevote` and `Precommit` votes. The evidence pool +will usually cache verifications so that this process is much quicker. + +## Sending Evidence to the Application + +After evidence is committed, the block is then processed by the block executor +which delivers the evidence to the application via `EndBlock`. Evidence is +stripped of the actual proof, split up per faulty validator and only the +validator, height, time and evidence type is sent. + +```proto +enum EvidenceType { + UNKNOWN = 0; + DUPLICATE_VOTE = 1; + LIGHT_CLIENT_ATTACK = 2; +} + +message Evidence { + EvidenceType type = 1; + // The offending validator + Validator validator = 2 [(gogoproto.nullable) = false]; + // The height when the offense occurred + int64 height = 3; + // The corresponding time where the offense occurred + google.protobuf.Timestamp time = 4 [ + (gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + // Total voting power of the validator set in case the ABCI application does + // not store historical validators. + // https://github.com/tendermint/tendermint/issues/4581 + int64 total_voting_power = 5; +} +``` + +`DuplicateVoteEvidence` and `LightClientAttackEvidence` are self-contained in +the sense that the evidence can be used to derive the `abci.Evidence` that is +sent to the application. Because of this, extra fields are necessary: + +```go +type DuplicateVoteEvidence struct { + VoteA *Vote + VoteB *Vote + + // abci specific information + TotalVotingPower int64 + ValidatorPower int64 + Timestamp time.Time +} + +type LightClientAttackEvidence struct { + ConflictingBlock *LightBlock + CommonHeight int64 + + // abci specific information + ByzantineValidators []*Validator + TotalVotingPower int64 + Timestamp time.Time +} +``` + +These ABCI specific fields don't affect validity of the evidence itself but must +be consistent amongst nodes and agreed upon on chain. If evidence with the +incorrect abci information is sent, a node will create new evidence from it and +replace the ABCI fields with the correct information. diff --git a/spec/consensus/light-client/README.md b/spec/consensus/light-client/README.md new file mode 100644 index 0000000000..44b9e0c762 --- /dev/null +++ b/spec/consensus/light-client/README.md @@ -0,0 +1,9 @@ +--- +order: 1 +parent: + title: Light Client + order: false +--- +# Tendermint Light Client Protocol + +Deprecated, please see [light-client](../../light-client/README.md). diff --git a/spec/consensus/light-client/accountability.md b/spec/consensus/light-client/accountability.md new file mode 100644 index 0000000000..5cf46b0b43 --- /dev/null +++ b/spec/consensus/light-client/accountability.md @@ -0,0 +1,3 @@ +# Fork accountability + +Deprecated, please see [light-client/accountability](../../light-client/accountability.md). diff --git a/spec/consensus/light-client/assets/light-node-image.png b/spec/consensus/light-client/assets/light-node-image.png new file mode 100644 index 0000000000..f0b93c6e41 Binary files /dev/null and b/spec/consensus/light-client/assets/light-node-image.png differ diff --git a/spec/consensus/light-client/detection.md b/spec/consensus/light-client/detection.md new file mode 100644 index 0000000000..5c87562ba5 --- /dev/null +++ b/spec/consensus/light-client/detection.md @@ -0,0 +1,3 @@ +# Detection + +Deprecated, please see [light-client/detection](../../light-client/detection.md). diff --git a/spec/consensus/light-client/verification.md b/spec/consensus/light-client/verification.md new file mode 100644 index 0000000000..1f0104a402 --- /dev/null +++ b/spec/consensus/light-client/verification.md @@ -0,0 +1,3 @@ +# Core Verification + +Deprecated, please see [light-client/accountability](../../light-client/verification.md). diff --git a/spec/consensus/proposer-based-timestamp/README.md b/spec/consensus/proposer-based-timestamp/README.md new file mode 100644 index 0000000000..82421d99da --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/README.md @@ -0,0 +1,20 @@ +# Proposer-Based Timestamps + +This section describes a version of the Tendermint consensus protocol, +which uses proposer-based timestamps. + +## Contents + +- [Proposer-Based Time][main] (entry point) +- [Part I - System Model and Properties][sysmodel] +- [Part II - Protocol Specification][algorithm] +- [TLA+ Specification][proposertla] + + +[algorithm]: ./pbts-algorithm_001_draft.md + +[sysmodel]: ./pbts-sysmodel_001_draft.md + +[main]: ./pbts_001_draft.md + +[proposertla]: ./tla/TendermintPBT_001_draft.tla diff --git a/spec/consensus/proposer-based-timestamp/pbts-algorithm_001_draft.md b/spec/consensus/proposer-based-timestamp/pbts-algorithm_001_draft.md new file mode 100644 index 0000000000..9791ff86bc --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/pbts-algorithm_001_draft.md @@ -0,0 +1,163 @@ +# Proposer-Based Time - Part II + +## Updated Consensus Algorithm + +### Outline + +The algorithm in the [arXiv paper][arXiv] evaluates rules of the received messages without making explicit how these messages are received. In our solution, we will make some message filtering explicit. We will assume that there are message reception steps (where messages are received and possibly stored locally for later evaluation of rules) and processing steps (the latter roughly as described in a way similar to the pseudo code of the arXiv paper). + +In contrast to the original algorithm the field `proposal` in the `PROPOSE` message is a pair `(v, time)`, of the proposed consensus value `v` and the proposed time `time`. + +#### **[PBTS-RECEPTION-STEP.0]** + +In the reception step at process `p` at local time `now_p`, upon receiving a message `m`: + +- if the message `m` is of type `PROPOSE` and satisfies `now_p - PRECISION < m.time < now_p + PRECISION + MSGDELAY`, then mark the message as `timely` + +> if `m` does not satisfy the constraint consider it `untimely` + + +#### **[PBTS-PROCESSING-STEP.0]** + +In the processing step, based on the messages stored, the rules of the algorithms are +executed. Note that the processing step only operates on messages +for the current height. The consensus algorithm rules are defined by the following updates to arXiv paper. + +#### New `StartRound` + +There are two additions + +- in case the proposer's local time is smaller than the time of the previous block, the proposer waits until this is not the case anymore (to ensure the block time is monotonically increasing) +- the proposer sends its time `now_p` as part of its proposal + +We update the timeout for the `PROPOSE` step according to the following reasoning: + +- If a correct proposer needs to wait to make sure its proposed time is larger than the `blockTime` of the previous block, then it sends by realtime `blockTime + ACCURACY` (By this time, its local clock must exceed `blockTime`) +- the receiver will receive a `PROPOSE` message by `blockTime + ACCURACY + MSGDELAY` +- the receiver's local clock will be `<= blockTime + 2 * ACCURACY + MSGDELAY` +- thus when the receiver `p` enters this round it can set its timeout to a value `waitingTime => blockTime + 2 * ACCURACY + MSGDELAY - now_p` + +So we should set the timeout to `max(timeoutPropose(round_p), waitingTime)`. + +> If, in the future, a block delay parameter `BLOCKDELAY` is introduced, this means +that the proposer should wait for `now_p > blockTime + BLOCKDELAY` before sending a `PROPOSE` message. +Also, `BLOCKDELAY` needs to be added to `waitingTime`. + +#### **[PBTS-ALG-STARTROUND.0]** + +```go +function StartRound(round) { + blockTime ← block time of block h_p - 1 + waitingTime ← blockTime + 2 * ACCURACY + MSGDELAY - now_p + round_p ← round + step_p ← propose + if proposer(h_p, round_p) = p { + wait until now_p > blockTime // new wait condition + if validValue_p != nil { + proposal ← (validValue_p, now_p) // added "now_p" + } + else { + proposal ← (getValue(), now_p) // added "now_p" + } + broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ + } + else { + schedule OnTimeoutPropose(h_p,round_p) to be executed after max(timeoutPropose(round_p), waitingTime) + } +} +``` + +#### New Rule Replacing Lines 22 - 27 + +- a validator prevotes for the consensus value `v` **and** the time `t` +- the code changes as the `PROPOSAL` message carries time (while `lockedValue` does not) + +#### **[PBTS-ALG-UPON-PROP.0]** + +```go +upon timely(⟨PROPOSAL, h_p, round_p, (v,t), −1⟩) from proposer(h_p, round_p) while step_p = propose do { + if valid(v) ∧ (lockedRound_p = −1 ∨ lockedValue_p = v) { + broadcast ⟨PREVOTE, h_p, round_p, id(v,t)⟩ + } + else { + broadcast ⟨PREVOTE, h_p, round_p, nil⟩ + } + step_p ← prevote +} +``` + +#### New Rule Replacing Lines 28 - 33 + +In case consensus is not reached in round 1, in `StartRound` the proposer of future rounds may propose the same value but with a different time. +Thus, the time `tprop` in the `PROPOSAL` message need not match the time `tvote` in the (old) `PREVOTE` messages. +A validator may send `PREVOTE` for the current round as long as the value `v` matches. +This gives the following rule: + +#### **[PBTS-ALG-OLD-PREVOTE.0]** + +```go +upon timely(⟨PROPOSAL, h_p, round_p, (v, tprop), vr⟩) from proposer(h_p, round_p) AND 2f + 1 ⟨PREVOTE, h_p, vr, id((v, tvote)⟩ +while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { + if valid(v) ∧ (lockedRound_p ≤ vr ∨ lockedValue_p = v) { + broadcast ⟨PREVOTE, h_p, roundp, id(v, tprop)⟩ + } + else { + broadcast ⟨PREVOTE, hp, roundp, nil⟩ + } + step_p ← prevote +} +``` + +#### New Rule Replacing Lines 36 - 43 + +- As above, in the following `(v,t)` is part of the message rather than `v` +- the stored values (i.e., `lockedValue`, `validValue`) do not contain the time + +#### **[PBTS-ALG-NEW-PREVOTE.0]** + +```go +upon timely(⟨PROPOSAL, h_p, round_p, (v,t), ∗⟩) from proposer(h_p, round_p) AND 2f + 1 ⟨PREVOTE, h_p, round_p, id(v,t)⟩ while valid(v) ∧ step_p ≥ prevote for the first time do { + if step_p = prevote { + lockedValue_p ← v + lockedRound_p ← round_p + broadcast ⟨PRECOMMIT, h_p, round_p, id(v,t))⟩ + step_p ← precommit + } + validValue_p ← v + validRound_p ← round_p +} +``` + +#### New Rule Replacing Lines 49 - 54 + +- we decide on `v` as well as on the time from the proposal message +- here we do not care whether the proposal was received timely. + +> In particular we need to take care of the case where the proposer is untimely to one correct validator only. We need to ensure that this validator decides if all decide. + +#### **[PBTS-ALG-DECIDE.0]** + +```go +upon ⟨PROPOSAL, h_p, r, (v,t), ∗⟩ from proposer(h_p, r) AND 2f + 1 ⟨PRECOMMIT, h_p, r, id(v,t)⟩ while decisionp[h_p] = nil do { + if valid(v) { + decision_p [h_p] = (v,t) // decide on time too + h_p ← h_p + 1 + reset lockedRound_p , lockedValue_p, validRound_p and validValue_p to initial values and empty message log + StartRound(0) + } +} +``` + +**All other rules remains unchanged.** + +Back to [main document][main]. + +[main]: ./pbts_001_draft.md + +[arXiv]: https://arxiv.org/abs/1807.04938 + +[tlatender]: https://github.com/tendermint/spec/blob/master/rust-spec/tendermint-accountability/README.md + +[bfttime]: https://github.com/tendermint/spec/blob/439a5bcacb5ef6ef1118566d7b0cd68fff3553d4/spec/consensus/bft-time.md + +[lcspec]: https://github.com/tendermint/spec/blob/439a5bcacb5ef6ef1118566d7b0cd68fff3553d4/rust-spec/lightclient/README.md diff --git a/spec/consensus/proposer-based-timestamp/pbts-sysmodel_001_draft.md b/spec/consensus/proposer-based-timestamp/pbts-sysmodel_001_draft.md new file mode 100644 index 0000000000..2eb32bc7df --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/pbts-sysmodel_001_draft.md @@ -0,0 +1,198 @@ +# Proposer-Based Time - Part I + +## System Model + +### Time and Clocks + +#### **[PBTS-CLOCK-NEWTON.0]** + +There is a reference Newtonian real-time `t` (UTC). + +Every correct validator `V` maintains a synchronized clock `C_V` that ensures: + +#### **[PBTS-CLOCK-PRECISION.0]** + +There exists a system parameter `PRECISION` such that for any two correct validators `V` and `W`, and at any real-time `t`, +`|C_V(t) - C_W(t)| < PRECISION` + + +### Message Delays + +We do not want to interfere with the Tendermint timing assumptions. We will postulate a timing restriction, which, if satisfied, ensures that liveness is preserved. + +In general the local clock may drift from the global time. (It may progress faster, e.g., one second of clock time might take 1.005 seconds of real-time). As a result the local clock and the global clock may be measured in different time units. Usually, the message delay is measured in global clock time units. To estimate the correct local timeout precisely, we would need to estimate the clock time duration of a message delay taking into account the clock drift. For simplicity we ignore this, and directly postulate the message delay assumption in terms of local time. + + +#### **[PBTS-MSG-D.0]** + +There exists a system parameter `MSGDELAY` for message end-to-end delays **counted in clock-time**. + +> Observe that [PBTS-MSG-D.0] imposes constraints on message delays as well as on the clock. + +#### **[PBTS-MSG-FAIR.0]** + +The message end-to-end delay between a correct proposer and a correct validator (for `PROPOSE` messages) is less than `MSGDELAY`. + + +## Problem Statement + +In this section we define the properties of Tendermint consensus (cf. the [arXiv paper][arXiv]) in this new system model. + +#### **[PBTS-PROPOSE.0]** + +A proposer proposes a pair `(v,t)` of consensus value `v` and time `t`. + +> We then restrict the allowed decisions along the following lines: + +#### **[PBTS-INV-AGREEMENT.0]** + +[Agreement] No two correct validators decide on different values `v`. + +#### **[PBTS-INV-TIME-VAL.0]** + +[Time-Validity] If a correct validator decides on `t` then `t` is "OK" (we will formalize this below), even if up to `2f` validators are faulty. + +However, the properties of Tendermint consensus are of more interest with respect to the blocks, that is, what is written into a block and when. We therefore, in the following, will give the safety and liveness properties from this block-centric viewpoint. +For this, observe that the time `t` decided at consensus height `k` will be written in the block of height `k+1`, and will be supported by `2f + 1` `PRECOMMIT` messages of the same consensus round `r`. The time written in the block, we will denote by `b.time` (to distinguish it from the term `bfttime` used for median-based time). For this, it is important to have the following consensus algorithm property: + +#### **[PBTS-INV-TIME-AGR.0]** + +[Time-Agreement] If two correct validators decide in the same round, then they decide on the same `t`. + +#### **[PBTS-DECISION-ROUND.0]** + +Note that the relation between consensus decisions, on the one hand, and blocks, on the other hand, is not immediate; in particular if we consider time: In the proposed solution, +as validators may decide in different rounds, they may decide on different times. +The proposer of the next block, may pick a commit (at least `2f + 1` `PRECOMMIT` messages from one round), and thus it picks a decision round that is going to become "canonic". +As a result, the proposer implicitly has a choice of one of the times that belong to rounds in which validators decided. Observe that this choice was implicitly the case already in the median-based `bfttime`. +However, as most consensus instances terminate within one round on the Cosmos hub, this is hardly ever observed in practice. + + + +Finally, observe that the agreement ([Agreement] and [Time-Agreement]) properties are based on the Tendermint security model [TMBC-FM-2THIRDS.0] of more than 2/3 correct validators, while [Time-Validity] is based on more than 1/3 correct validators. + +### SAFETY + +Here we will provide specifications that relate local time to block time. However, since we do not assume (by now) that local time is linked to real-time, these specifications also do not provide a relation between block time and real-time. Such properties are given [later](#REAL-TIME-SAFETY). + +For a correct validator `V`, let `beginConsensus(V,k)` be the local time when it sets its height to `k`, and let `endConsensus(V,k)` be the time when it sets its height to `k + 1`. + +Let + +- `beginConsensus(k)` be the minimum over `beginConsensus(V,k)`, and +- `last-beginConsensus(k)` be the maximum over `beginConsensus(V,k)`, and +- `endConsensus(k)` the maximum over `endConsensus(V,k)` + +for all correct validators `V`. + +> Observe that `beginConsensus(k) <= last-beginConsensus(k)` and if local clocks are monotonic, then `last-beginConsensus(k) <= endConsensus(k)`. + +#### **[PBTS-CLOCK-GROW.0]** + +We assume that during one consensus instance, local clocks are not set back, in particular for each correct validator `V` and each height `k`, we have `beginConsensus(V,k) < endConsensus(V,k)`. + + +#### **[PBTS-CONSENSUS-TIME-VALID.0]** + +If + +- there is a valid commit `c` for height `k`, and +- `c` contains a `PRECOMMIT` message by at least one correct validator, + +then the time `b.time` in the block `b` that is signed by `c` satisfies + +- `beginConsensus(k) - PRECISION <= b.time < endConsensus(k) + PRECISION + MSGDELAY`. + + +> [PBTS-CONSENSUS-TIME-VALID.0] is based on an analysis where the proposer is faulty (and does does not count towards `beginConsensus(k)` and `endConsensus(k)`), and we estimate the times at which correct validators receive and `accept` the `propose` message. If the proposer is correct we obtain + +#### **[PBTS-CONSENSUS-LIVE-VALID-CORR-PROP.0]** + +If the proposer of round 1 is correct, and + +- [TMBC-FM-2THIRDS.0] holds for a block of height `k - 1`, and +- [PBTS-MSG-FAIR.0], and +- [PBTS-CLOCK-PRECISION.0], and +- [PBTS-CLOCK-GROW.0] (**TODO:** is that enough?) + +then eventually (within bounded time) every correct validator decides in round 1. + +#### **[PBTS-CONSENSUS-SAFE-VALID-CORR-PROP.0]** + +If the proposer of round 1 is correct, and + +- [TMBC-FM-2THIRDS.0] holds for a block of height `k - 1`, and +- [PBTS-MSG-FAIR.0], and +- [PBTS-CLOCK-PRECISION.0], and +- [PBTS-CLOCK-GROW.0] (**TODO:** is that enough?) + +then `beginConsensus_k <= b.time <= last-beginConsensus_k`. + + +> For the above two properties we will assume that a correct proposer `v` sends its `PROPOSAL` at its local time `beginConsensus(v,k)`. + +### LIVENESS + +If + +- [TMBC-FM-2THIRDS.0] holds for a block of height `k - 1`, and +- [PBTS-MSG-FAIR.0], +- [PBTS-CLOCK.0], and +- [PBTS-CLOCK-GROW.0] (**TODO:** is that enough?) + +then eventually there is a valid commit `c` for height `k`. + + +### REAL-TIME SAFETY + +> We want to give a property that can be exploited from the outside, that is, given a block with some time stored in it, what is the estimate at which real-time the block was generated. To do so, we need to link clock-time to real-time; which is not the case with [PBTS-CLOCK.0]. For this, we introduce the following assumption on the clocks: + +#### **[PBTS-CLOCKSYNC-EXTERNAL.0]** + +There is a system parameter `ACCURACY`, such that for all real-times `t` and all correct validators `V`, + +- `| C_V(t) - t | < ACCURACY`. + +> `ACCURACY` is not necessarily visible at the code level. The properties below just show that the smaller +its value, the closer the block time will be to real-time + +#### **[PBTS-CONSENSUS-PTIME.0]** + +LET `m` be a propose message. We consider the following two real-times `proposalTime(m)` and `propRecvTime(m)`: + +- if the proposer is correct and sends `m` at time `t`, we write `proposalTime(m)` for real-time `t`. +- if first correct validator receives `m` at time `t`, we write `propRecvTime(m)` for real-time `t`. + + +#### **[PBTS-CONSENSUS-REALTIME-VALID.0]** + +Let `b` be a block with a valid commit that contains at least one `precommit` message by a correct validator (and `proposalTime` is the time for the height/round `propose` message `m` that triggered the `precommit`). Then: + +`propRecvTime(m) - ACCURACY - PRECISION < b.time < propRecvTime(m) + ACCURACY + PRECISION + MSGDELAY` + + +#### **[PBTS-CONSENSUS-REALTIME-VALID-CORR.0]** + +Let `b` be a block with a valid commit that contains at least one `precommit` message by a correct validator (and `proposalTime` is the time for the height/round `propose` message `m` that triggered the `precommit`). Then, if the proposer is correct: + +`proposalTime(m) - ACCURACY < b.time < proposalTime(m) + ACCURACY` + +> by the algorithm at time `proposalTime(m)` the proposer fixes `m.time <- now_p(proposalTime(m))` + +> "triggered the `PRECOMMIT`" implies that the data in `m` and `b` are "matching", that is, `m` proposed the values that are actually stored in `b`. + +Back to [main document][main]. + +[main]: ./pbts_001_draft.md + +[arXiv]: https://arxiv.org/abs/1807.04938 + +[tlatender]: https://github.com/tendermint/spec/blob/master/rust-spec/tendermint-accountability/README.md + +[bfttime]: https://github.com/tendermint/spec/blob/439a5bcacb5ef6ef1118566d7b0cd68fff3553d4/spec/consensus/bft-time.md + +[lcspec]: https://github.com/tendermint/spec/blob/439a5bcacb5ef6ef1118566d7b0cd68fff3553d4/rust-spec/lightclient/README.md + +[algorithm]: ./pbts-algorithm_001_draft.md + +[sysmodel]: ./pbts-sysmodel_001_draft.md diff --git a/spec/consensus/proposer-based-timestamp/pbts_001_draft.md b/spec/consensus/proposer-based-timestamp/pbts_001_draft.md new file mode 100644 index 0000000000..a4d876b28c --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/pbts_001_draft.md @@ -0,0 +1,270 @@ +# Proposer-Based Time + +## Current BFTTime + +### Description + +In Tendermint consensus, the first version of how time is computed and stored in a block works as follows: + +- validators send their current local time as part of `precommit` messages +- upon collecting the `precommit` messages that the proposer uses to build a commit to be put in the next block, the proposer computes the `time` of the next block as the median (weighted over voting power) of the times in the `precommit` messages. + +### Analysis + +1. **Fault tolerance.** The computed median time is called [`bfttime`][bfttime] as it is indeed fault-tolerant: if **less than a third** of the validators is faulty (counted in voting power), it is guaranteed that the computed time lies between the minimum and the maximum times sent by correct validators. +1. **Effect of faulty validators.** If more than `1/2` of the voting power (which is in fact more than one third and less than two thirds of the voting power) is held by faulty validators, then the time is under total control of the faulty validators. (This is particularly challenging in the context of [lightclient][lcspec] security.) +1. **Proposer influence on block time.** The proposer of the next block has a degree of freedom in choosing the `bfttime`, since it computes the median time based on the timestamps from `precommit` messages sent by + `2f + 1` correct validators. + 1. If there are `n` different timestamps in the `precommit` messages, the proposer can use any subset of timestamps that add up to `2f + 1` + of the voting power in order to compute the median. + 1. If the validators decide in different rounds, the proposer can decide on which round the median computation is based. +1. **Liveness.** The liveness of the protocol: + 1. does not depend on clock synchronization, + 1. depends on bounded message delays. +1. **Relation to real time.** There is no clock synchronizaton, which implies that there is **no relation** between the computed block `time` and real time. +1. **Aggregate signatures.** As the `precommit` messages contain the local times, all these `precommit` messages typically differ in the time field, which **prevents** the use of aggregate signatures. + +## Suggested Proposer-Based Time + +### Outline + +An alternative approach to time has been discussed: Rather than having the validators send the time in the `precommit` messages, the proposer in the consensus algorithm sends its time in the `propose` message, and the validators locally check whether the time is OK (by comparing to their local clock). + +This proposed solution adds the requirement of having synchronized clocks, and other implicit assumptions. + +### Comparison of the Suggested Method to the Old One + +1. **Fault tolerance.** Maintained in the suggested protocol. +1. **Effect of faulty validators.** Eliminated in the suggested protocol, + that is, the block `time` can be corrupted only in the extreme case when + `>2/3` of the validators are faulty. +1. **Proposer influence on block time.** The proposer of the next block + has less freedom when choosing the block time. + 1. This scenario is eliminated in the suggested protocol, provided that there are `<1/3` faulty validators. + 1. This scenario is still there. +1. **Liveness.** The liveness of the suggested protocol: + 1. depends on the introduced assumptions on synchronized clocks (see below), + 1. still depends on the message delays (unavoidable). +1. **Relation to real time.** We formalize clock synchronization, and obtain a **well-defined relation** between the block `time` and real time. +1. **Aggregate signatures.** The `precommit` messages free of time, which **allows** for aggregate signatures. + +### Protocol Overview + +#### Proposed Time + +We assume that the field `proposal` in the `PROPOSE` message is a pair `(v, time)`, of the proposed consensus value `v` and the proposed time `time`. + +#### Reception Step + +In the reception step at node `p` at local time `now_p`, upon receiving a message `m`: + +- **if** the message `m` is of type `PROPOSE` and satisfies `now_p - PRECISION < m.time < now_p + PRECISION + MSGDELAY`, then mark the message as `timely`. +(`PRECISION` and `MSGDELAY` being system parameters, see [below](#safety-and-liveness)) + +> after the presentation in the dev session, we realized that different semantics for the reception step is closer aligned to the implementation. Instead of dropping propose messages, we keep all of them, and mark timely ones. + +#### Processing Step + +- Start round + + + + + + + + + + + + +
arXiv paperProposer-based time
+ +```go +function StartRound(round) { + round_p ← round + step_p ← propose + if proposer(h_p, round_p) = p { + + + if validValue_p != nil { + + proposal ← validValue_p + } else { + + proposal ← getValue() + } + broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ + } else { + schedule OnTimeoutPropose(h_p,round_p) to + be executed after timeoutPropose(round_p) + } +} +``` + + + +```go +function StartRound(round) { + round_p ← round + step_p ← propose + if proposer(h_p, round_p) = p { + // new wait condition + wait until now_p > block time of block h_p - 1 + if validValue_p != nil { + // add "now_p" + proposal ← (validValue_p, now_p) + } else { + // add "now_p" + proposal ← (getValue(), now_p) + } + broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ + } else { + schedule OnTimeoutPropose(h_p,round_p) to + be executed after timeoutPropose(round_p) + } +} +``` + +
+ +- Rule on lines 28-35 + + + + + + + + + + + + +
arXiv paperProposer-based time
+ +```go +upon timely(⟨PROPOSAL, h_p, round_p, v, vr⟩) + from proposer(h_p, round_p) + AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ +while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { + if valid(v) ∧ (lockedRound_p ≤ vr ∨ lockedValue_p = v) { + + broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ + } else { + broadcast ⟨PREVOTE, hp, round_p, nil⟩ + } +} +``` + + + +```go +upon timely(⟨PROPOSAL, h_p, round_p, (v, tprop), vr⟩) + from proposer(h_p, round_p) + AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v, tvote)⟩ + while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { + if valid(v) ∧ (lockedRound_p ≤ vr ∨ lockedValue_p = v) { + // send hash of v and tprop in PREVOTE message + broadcast ⟨PREVOTE, h_p, round_p, id(v, tprop)⟩ + } else { + broadcast ⟨PREVOTE, hp, round_p, nil⟩ + } + } +``` + +
+ +- Rule on lines 49-54 + + + + + + + + + + + + +
arXiv paperProposer-based time
+ +```go +upon ⟨PROPOSAL, h_p, r, v, ∗⟩ from proposer(h_p, r) + AND 2f + 1 ⟨PRECOMMIT, h_p, r, id(v)⟩ + while decisionp[h_p] = nil do { + if valid(v) { + + decision_p [h_p] = v + h_p ← h_p + 1 + reset lockedRound_p , lockedValue_p, validRound_p and + validValue_p to initial values and empty message log + StartRound(0) + } + } +``` + + + +```go +upon ⟨PROPOSAL, h_p, r, (v,t), ∗⟩ from proposer(h_p, r) + AND 2f + 1 ⟨PRECOMMIT, h_p, r, id(v,t)⟩ + while decisionp[h_p] = nil do { + if valid(v) { + // decide on time too + decision_p [h_p] = (v,t) + h_p ← h_p + 1 + reset lockedRound_p , lockedValue_p, validRound_p and + validValue_p to initial values and empty message log + StartRound(0) + } + } +``` + +
+ +- Other rules are extended in a similar way, or remain unchanged + +### Property Overview + +#### Safety and Liveness + +For safety (Point 1, Point 2, Point 3i) and liveness (Point 4) we need +the following assumptions: + +- There exists a system parameter `PRECISION` such that for any two correct validators `V` and `W`, and at any real-time `t`, their local times `C_V(t)` and `C_W(t)` differ by less than `PRECISION` time units, +i.e., `|C_V(t) - C_W(t)| < PRECISION` +- The message end-to-end delay between a correct proposer and a correct validator (for `PROPOSE` messages) is less than `MSGDELAY`. + +#### Relation to Real-Time + +For analyzing real-time safety (Point 5), we use a system parameter `ACCURACY`, such that for all real-times `t` and all correct validators `V`, we have `| C_V(t) - t | < ACCURACY`. + +> `ACCURACY` is not necessarily visible at the code level. We might even view `ACCURACY` as variable over time. The smaller it is during a consensus instance, the closer the block time will be to real-time. +> +> Note that `PRECISION` and `MSGDELAY` show up in the code. + +### Detailed Specification + +This specification describes the changes needed to be done to the Tendermint consensus algorithm as described in the [arXiv paper][arXiv] and the simplified specification in [TLA+][tlatender], and makes precise the underlying assumptions and the required properties. + +- [Part I - System Model and Properties][sysmodel] +- [Part II - Protocol specification][algorithm] +- [TLA+ Specification][proposertla] + +[arXiv]: https://arxiv.org/abs/1807.04938 + +[tlatender]: https://github.com/tendermint/spec/blob/master/rust-spec/tendermint-accountability/README.md + +[bfttime]: https://github.com/tendermint/spec/blob/439a5bcacb5ef6ef1118566d7b0cd68fff3553d4/spec/consensus/bft-time.md + +[lcspec]: https://github.com/tendermint/spec/blob/439a5bcacb5ef6ef1118566d7b0cd68fff3553d4/rust-spec/lightclient/README.md + +[algorithm]: ./pbts-algorithm_001_draft.md + +[sysmodel]: ./pbts-sysmodel_001_draft.md + +[main]: ./pbts_001_draft.md + +[proposertla]: ./tla/TendermintPBT_001_draft.tla diff --git a/spec/consensus/proposer-based-timestamp/tla/TendermintPBT_001_draft.tla b/spec/consensus/proposer-based-timestamp/tla/TendermintPBT_001_draft.tla new file mode 100644 index 0000000000..c84b024e75 --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/TendermintPBT_001_draft.tla @@ -0,0 +1,597 @@ +-------------------- MODULE TendermintPBT_001_draft --------------------------- +(* + A TLA+ specification of a simplified Tendermint consensus, with added clocks + and proposer-based timestamps. This TLA+ specification extends and modifies + the Tendermint TLA+ specification for fork accountability: + https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/accountability/TendermintAcc_004_draft.tla + + * Version 1. A preliminary specification. + + Zarko Milosevic, Igor Konnov, Informal Systems, 2019-2020. + Ilina Stoilkovska, Josef Widder, Informal Systems, 2021. + *) + +EXTENDS Integers, FiniteSets + +(********************* PROTOCOL PARAMETERS **********************************) +CONSTANTS + Corr, \* the set of correct processes + Faulty, \* the set of Byzantine processes, may be empty + N, \* the total number of processes: correct, defective, and Byzantine + T, \* an upper bound on the number of Byzantine processes + ValidValues, \* the set of valid values, proposed both by correct and faulty + InvalidValues, \* the set of invalid values, never proposed by the correct ones + MaxRound, \* the maximal round number + MaxTimestamp, \* the maximal value of the clock tick + Delay, \* message delay + Precision, \* clock precision: the maximal difference between two local clocks + Accuracy, \* clock accuracy: the maximal difference between a local clock and the real time + Proposer, \* the proposer function from 0..NRounds to 1..N + ClockDrift \* is there clock drift between the local clocks and the global clock + +ASSUME(N = Cardinality(Corr \union Faulty)) + +(*************************** DEFINITIONS ************************************) +AllProcs == Corr \union Faulty \* the set of all processes +Rounds == 0..MaxRound \* the set of potential rounds +Timestamps == 0..MaxTimestamp \* the set of clock ticks +NilRound == -1 \* a special value to denote a nil round, outside of Rounds +NilTimestamp == -1 \* a special value to denote a nil timestamp, outside of Ticks +RoundsOrNil == Rounds \union {NilRound} +Values == ValidValues \union InvalidValues \* the set of all values +NilValue == "None" \* a special value for a nil round, outside of Values +Proposals == Values \X Timestamps +NilProposal == <> +ValuesOrNil == Values \union {NilValue} +Decisions == Values \X Timestamps \X Rounds +NilDecision == <> + + +\* a value hash is modeled as identity +Id(v) == v + +\* The validity predicate +IsValid(v) == v \in ValidValues + +\* the two thresholds that are used in the algorithm +THRESHOLD1 == T + 1 \* at least one process is not faulty +THRESHOLD2 == 2 * T + 1 \* a quorum when having N > 3 * T + +Min(S) == CHOOSE x \in S : \A y \in S : x <= y + +Max(S) == CHOOSE x \in S : \A y \in S : y <= x + +(********************* TYPE ANNOTATIONS FOR APALACHE **************************) +\* the operator for type annotations +a <: b == a + +\* the type of message records +MT == [type |-> STRING, src |-> STRING, round |-> Int, + proposal |-> <>, validRound |-> Int, id |-> <>] + +RP == <> + +\* a type annotation for a message +AsMsg(m) == m <: MT +\* a type annotation for a set of messages +SetOfMsgs(S) == S <: {MT} +\* a type annotation for an empty set of messages +EmptyMsgSet == SetOfMsgs({}) + +SetOfRcvProp(S) == S <: {RP} +EmptyRcvProp == SetOfRcvProp({}) + +SetOfProc(S) == S <: {STRING} +EmptyProcSet == SetOfProc({}) + +(********************* PROTOCOL STATE VARIABLES ******************************) +VARIABLES + round, \* a process round number: Corr -> Rounds + localClock, \* a process local clock: Corr -> Ticks + realTime, \* a reference Newtonian real time + step, \* a process step: Corr -> { "PROPOSE", "PREVOTE", "PRECOMMIT", "DECIDED" } + decision, \* process decision: Corr -> ValuesOrNil + lockedValue, \* a locked value: Corr -> ValuesOrNil + lockedRound, \* a locked round: Corr -> RoundsOrNil + validValue, \* a valid value: Corr -> ValuesOrNil + validRound \* a valid round: Corr -> RoundsOrNil + +\* book-keeping variables +VARIABLES + msgsPropose, \* PROPOSE messages broadcast in the system, Rounds -> Messages + msgsPrevote, \* PREVOTE messages broadcast in the system, Rounds -> Messages + msgsPrecommit, \* PRECOMMIT messages broadcast in the system, Rounds -> Messages + receivedTimelyProposal, \* used to keep track when a process receives a timely PROPOSAL message, {<>} + inspectedProposal, \* used to keep track when a process tries to receive a message, [Rounds -> <>] + evidence, \* the messages that were used by the correct processes to make transitions + action, \* we use this variable to see which action was taken + beginConsensus, \* the minimum of the local clocks in the initial state, Int + endConsensus, \* the local time when a decision is made, [Corr -> Int] + lastBeginConsensus, \* the maximum of the local clocks in the initial state, Int + proposalTime, \* the real time when a proposer proposes in a round, [Rounds -> Int] + proposalReceivedTime \* the real time when a correct process first receives a proposal message in a round, [Rounds -> Int] + +(* to see a type invariant, check TendermintAccInv3 *) + +\* a handy definition used in UNCHANGED +vars == <> + +(********************* PROTOCOL INITIALIZATION ******************************) +FaultyProposals(r) == + SetOfMsgs([type: {"PROPOSAL"}, src: Faulty, + round: {r}, proposal: Proposals, validRound: RoundsOrNil]) + +AllFaultyProposals == + SetOfMsgs([type: {"PROPOSAL"}, src: Faulty, + round: Rounds, proposal: Proposals, validRound: RoundsOrNil]) + +FaultyPrevotes(r) == + SetOfMsgs([type: {"PREVOTE"}, src: Faulty, round: {r}, id: Proposals]) + +AllFaultyPrevotes == + SetOfMsgs([type: {"PREVOTE"}, src: Faulty, round: Rounds, id: Proposals]) + +FaultyPrecommits(r) == + SetOfMsgs([type: {"PRECOMMIT"}, src: Faulty, round: {r}, id: Proposals]) + +AllFaultyPrecommits == + SetOfMsgs([type: {"PRECOMMIT"}, src: Faulty, round: Rounds, id: Proposals]) + +AllProposals == + SetOfMsgs([type: {"PROPOSAL"}, src: AllProcs, + round: Rounds, proposal: Proposals, validRound: RoundsOrNil]) + +RoundProposals(r) == + SetOfMsgs([type: {"PROPOSAL"}, src: AllProcs, + round: {r}, proposal: Proposals, validRound: RoundsOrNil]) + +BenignRoundsInMessages(msgfun) == + \* the message function never contains a message for a wrong round + \A r \in Rounds: + \A m \in msgfun[r]: + r = m.round + +\* The initial states of the protocol. Some faults can be in the system already. +Init == + /\ round = [p \in Corr |-> 0] + /\ \/ /\ ~ClockDrift + /\ localClock \in [Corr -> 0..Accuracy] + \/ /\ ClockDrift + /\ localClock = [p \in Corr |-> 0] + /\ realTime = 0 + /\ step = [p \in Corr |-> "PROPOSE"] + /\ decision = [p \in Corr |-> NilDecision] + /\ lockedValue = [p \in Corr |-> NilValue] + /\ lockedRound = [p \in Corr |-> NilRound] + /\ validValue = [p \in Corr |-> NilValue] + /\ validRound = [p \in Corr |-> NilRound] + /\ msgsPropose \in [Rounds -> SUBSET AllFaultyProposals] + /\ msgsPrevote \in [Rounds -> SUBSET AllFaultyPrevotes] + /\ msgsPrecommit \in [Rounds -> SUBSET AllFaultyPrecommits] + /\ receivedTimelyProposal = EmptyRcvProp + /\ inspectedProposal = [r \in Rounds |-> EmptyProcSet] + /\ BenignRoundsInMessages(msgsPropose) + /\ BenignRoundsInMessages(msgsPrevote) + /\ BenignRoundsInMessages(msgsPrecommit) + /\ evidence = EmptyMsgSet + /\ action' = "Init" + /\ beginConsensus = Min({localClock[p] : p \in Corr}) + /\ endConsensus = [p \in Corr |-> NilTimestamp] + /\ lastBeginConsensus = Max({localClock[p] : p \in Corr}) + /\ proposalTime = [r \in Rounds |-> NilTimestamp] + /\ proposalReceivedTime = [r \in Rounds |-> NilTimestamp] + +(************************ MESSAGE PASSING ********************************) +BroadcastProposal(pSrc, pRound, pProposal, pValidRound) == + LET newMsg == + AsMsg([type |-> "PROPOSAL", src |-> pSrc, round |-> pRound, + proposal |-> pProposal, validRound |-> pValidRound]) + IN + msgsPropose' = [msgsPropose EXCEPT ![pRound] = msgsPropose[pRound] \union {newMsg}] + +BroadcastPrevote(pSrc, pRound, pId) == + LET newMsg == AsMsg([type |-> "PREVOTE", + src |-> pSrc, round |-> pRound, id |-> pId]) + IN + msgsPrevote' = [msgsPrevote EXCEPT ![pRound] = msgsPrevote[pRound] \union {newMsg}] + +BroadcastPrecommit(pSrc, pRound, pId) == + LET newMsg == AsMsg([type |-> "PRECOMMIT", + src |-> pSrc, round |-> pRound, id |-> pId]) + IN + msgsPrecommit' = [msgsPrecommit EXCEPT ![pRound] = msgsPrecommit[pRound] \union {newMsg}] + + +(***************************** TIME **************************************) + +\* [PBTS-CLOCK-PRECISION.0] +SynchronizedLocalClocks == + \A p \in Corr : \A q \in Corr : + p /= q => + \/ /\ localClock[p] >= localClock[q] + /\ localClock[p] - localClock[q] < Precision + \/ /\ localClock[p] < localClock[q] + /\ localClock[q] - localClock[p] < Precision + +\* [PBTS-PROPOSE.0] +Proposal(v, t) == + <> + +\* [PBTS-DECISION-ROUND.0] +Decision(v, t, r) == + <> + +(**************** MESSAGE PROCESSING TRANSITIONS *************************) +\* lines 12-13 +StartRound(p, r) == + /\ step[p] /= "DECIDED" \* a decided process does not participate in consensus + /\ round' = [round EXCEPT ![p] = r] + /\ step' = [step EXCEPT ![p] = "PROPOSE"] + +\* lines 14-19, a proposal may be sent later +InsertProposal(p) == + LET r == round[p] IN + /\ p = Proposer[r] + /\ step[p] = "PROPOSE" + \* if the proposer is sending a proposal, then there are no other proposals + \* by the correct processes for the same round + /\ \A m \in msgsPropose[r]: m.src /= p + /\ \E v \in ValidValues: + LET proposal == IF validValue[p] /= NilValue + THEN Proposal(validValue[p], localClock[p]) + ELSE Proposal(v, localClock[p]) IN + + /\ BroadcastProposal(p, round[p], proposal, validRound[p]) + /\ proposalTime' = [proposalTime EXCEPT ![r] = realTime] + /\ UNCHANGED <> + /\ action' = "InsertProposal" + +\* a new action used to filter messages that are not on time +\* [PBTS-RECEPTION-STEP.0] +ReceiveProposal(p) == + \E v \in Values, t \in Timestamps: + /\ LET r == round[p] IN + LET msg == + AsMsg([type |-> "PROPOSAL", src |-> Proposer[round[p]], + round |-> round[p], proposal |-> Proposal(v, t), validRound |-> NilRound]) IN + /\ msg \in msgsPropose[round[p]] + /\ p \notin inspectedProposal[r] + /\ <> \notin receivedTimelyProposal + /\ inspectedProposal' = [inspectedProposal EXCEPT ![r] = @ \union {p}] + /\ \/ /\ localClock[p] - Precision < t + /\ t < localClock[p] + Precision + Delay + /\ receivedTimelyProposal' = receivedTimelyProposal \union {<>} + /\ \/ /\ proposalReceivedTime[r] = NilTimestamp + /\ proposalReceivedTime' = [proposalReceivedTime EXCEPT ![r] = realTime] + \/ /\ proposalReceivedTime[r] /= NilTimestamp + /\ UNCHANGED proposalReceivedTime + \/ /\ \/ localClock[p] - Precision >= t + \/ t >= localClock[p] + Precision + Delay + /\ UNCHANGED <> + /\ UNCHANGED <> + /\ action' = "ReceiveProposal" + +\* lines 22-27 +UponProposalInPropose(p) == + \E v \in Values, t \in Timestamps: + /\ step[p] = "PROPOSE" (* line 22 *) + /\ LET msg == + AsMsg([type |-> "PROPOSAL", src |-> Proposer[round[p]], + round |-> round[p], proposal |-> Proposal(v, t), validRound |-> NilRound]) IN + /\ <> \in receivedTimelyProposal \* updated line 22 + /\ evidence' = {msg} \union evidence + /\ LET mid == (* line 23 *) + IF IsValid(v) /\ (lockedRound[p] = NilRound \/ lockedValue[p] = v) + THEN Id(Proposal(v, t)) + ELSE NilProposal + IN + BroadcastPrevote(p, round[p], mid) \* lines 24-26 + /\ step' = [step EXCEPT ![p] = "PREVOTE"] + /\ UNCHANGED <> + /\ action' = "UponProposalInPropose" + +\* lines 28-33 +\* [PBTS-ALG-OLD-PREVOTE.0] +UponProposalInProposeAndPrevote(p) == + \E v \in Values, t1 \in Timestamps, t2 \in Timestamps, vr \in Rounds: + /\ step[p] = "PROPOSE" /\ 0 <= vr /\ vr < round[p] \* line 28, the while part + /\ LET msg == + AsMsg([type |-> "PROPOSAL", src |-> Proposer[round[p]], + round |-> round[p], proposal |-> Proposal(v, t1), validRound |-> vr]) + IN + /\ <> \in receivedTimelyProposal \* updated line 28 + /\ LET PV == { m \in msgsPrevote[vr]: m.id = Id(Proposal(v, t2)) } IN + /\ Cardinality(PV) >= THRESHOLD2 \* line 28 + /\ evidence' = PV \union {msg} \union evidence + /\ LET mid == (* line 29 *) + IF IsValid(v) /\ (lockedRound[p] <= vr \/ lockedValue[p] = v) + THEN Id(Proposal(v, t1)) + ELSE NilProposal + IN + BroadcastPrevote(p, round[p], mid) \* lines 24-26 + /\ step' = [step EXCEPT ![p] = "PREVOTE"] + /\ UNCHANGED <> + /\ action' = "UponProposalInProposeAndPrevote" + + \* lines 34-35 + lines 61-64 (onTimeoutPrevote) +UponQuorumOfPrevotesAny(p) == + /\ step[p] = "PREVOTE" \* line 34 and 61 + /\ \E MyEvidence \in SUBSET msgsPrevote[round[p]]: + \* find the unique voters in the evidence + LET Voters == { m.src: m \in MyEvidence } IN + \* compare the number of the unique voters against the threshold + /\ Cardinality(Voters) >= THRESHOLD2 \* line 34 + /\ evidence' = MyEvidence \union evidence + /\ BroadcastPrecommit(p, round[p], NilProposal) + /\ step' = [step EXCEPT ![p] = "PRECOMMIT"] + /\ UNCHANGED <> + /\ action' = "UponQuorumOfPrevotesAny" + +\* lines 36-46 +\* [PBTS-ALG-NEW-PREVOTE.0] +UponProposalInPrevoteOrCommitAndPrevote(p) == + \E v \in ValidValues, t \in Timestamps, vr \in RoundsOrNil: + /\ step[p] \in {"PREVOTE", "PRECOMMIT"} \* line 36 + /\ LET msg == + AsMsg([type |-> "PROPOSAL", src |-> Proposer[round[p]], + round |-> round[p], proposal |-> Proposal(v, t), validRound |-> vr]) IN + /\ <> \in receivedTimelyProposal \* updated line 36 + /\ LET PV == { m \in msgsPrevote[round[p]]: m.id = Id(Proposal(v, t)) } IN + /\ Cardinality(PV) >= THRESHOLD2 \* line 36 + /\ evidence' = PV \union {msg} \union evidence + /\ IF step[p] = "PREVOTE" + THEN \* lines 38-41: + /\ lockedValue' = [lockedValue EXCEPT ![p] = v] + /\ lockedRound' = [lockedRound EXCEPT ![p] = round[p]] + /\ BroadcastPrecommit(p, round[p], Id(Proposal(v, t))) + /\ step' = [step EXCEPT ![p] = "PRECOMMIT"] + ELSE + UNCHANGED <> + \* lines 42-43 + /\ validValue' = [validValue EXCEPT ![p] = v] + /\ validRound' = [validRound EXCEPT ![p] = round[p]] + /\ UNCHANGED <> + /\ action' = "UponProposalInPrevoteOrCommitAndPrevote" + +\* lines 47-48 + 65-67 (onTimeoutPrecommit) +UponQuorumOfPrecommitsAny(p) == + /\ \E MyEvidence \in SUBSET msgsPrecommit[round[p]]: + \* find the unique committers in the evidence + LET Committers == { m.src: m \in MyEvidence } IN + \* compare the number of the unique committers against the threshold + /\ Cardinality(Committers) >= THRESHOLD2 \* line 47 + /\ evidence' = MyEvidence \union evidence + /\ round[p] + 1 \in Rounds + /\ StartRound(p, round[p] + 1) + /\ UNCHANGED <> + /\ action' = "UponQuorumOfPrecommitsAny" + +\* lines 49-54 +\* [PBTS-ALG-DECIDE.0] +UponProposalInPrecommitNoDecision(p) == + /\ decision[p] = NilDecision \* line 49 + /\ \E v \in ValidValues, t \in Timestamps (* line 50*) , r \in Rounds, vr \in RoundsOrNil: + /\ LET msg == AsMsg([type |-> "PROPOSAL", src |-> Proposer[r], + round |-> r, proposal |-> Proposal(v, t), validRound |-> vr]) IN + /\ msg \in msgsPropose[r] \* line 49 + /\ p \in inspectedProposal[r] + /\ LET PV == { m \in msgsPrecommit[r]: m.id = Id(Proposal(v, t)) } IN + /\ Cardinality(PV) >= THRESHOLD2 \* line 49 + /\ evidence' = PV \union {msg} \union evidence + /\ decision' = [decision EXCEPT ![p] = Decision(v, t, round[p])] \* update the decision, line 51 + \* The original algorithm does not have 'DECIDED', but it increments the height. + \* We introduced 'DECIDED' here to prevent the process from changing its decision. + /\ endConsensus' = [endConsensus EXCEPT ![p] = localClock[p]] + /\ step' = [step EXCEPT ![p] = "DECIDED"] + /\ UNCHANGED <> + /\ action' = "UponProposalInPrecommitNoDecision" + +\* the actions below are not essential for safety, but added for completeness + +\* lines 20-21 + 57-60 +OnTimeoutPropose(p) == + /\ step[p] = "PROPOSE" + /\ p /= Proposer[round[p]] + /\ BroadcastPrevote(p, round[p], NilProposal) + /\ step' = [step EXCEPT ![p] = "PREVOTE"] + /\ UNCHANGED <> + /\ action' = "OnTimeoutPropose" + +\* lines 44-46 +OnQuorumOfNilPrevotes(p) == + /\ step[p] = "PREVOTE" + /\ LET PV == { m \in msgsPrevote[round[p]]: m.id = Id(NilProposal) } IN + /\ Cardinality(PV) >= THRESHOLD2 \* line 36 + /\ evidence' = PV \union evidence + /\ BroadcastPrecommit(p, round[p], Id(NilProposal)) + /\ step' = [step EXCEPT ![p] = "PRECOMMIT"] + /\ UNCHANGED <> + /\ action' = "OnQuorumOfNilPrevotes" + +\* lines 55-56 +OnRoundCatchup(p) == + \E r \in {rr \in Rounds: rr > round[p]}: + LET RoundMsgs == msgsPropose[r] \union msgsPrevote[r] \union msgsPrecommit[r] IN + \E MyEvidence \in SUBSET RoundMsgs: + LET Faster == { m.src: m \in MyEvidence } IN + /\ Cardinality(Faster) >= THRESHOLD1 + /\ evidence' = MyEvidence \union evidence + /\ StartRound(p, r) + /\ UNCHANGED <> + /\ action' = "OnRoundCatchup" + + +(********************* PROTOCOL TRANSITIONS ******************************) +\* advance the global clock +AdvanceRealTime == + /\ realTime < MaxTimestamp + /\ realTime' = realTime + 1 + /\ \/ /\ ~ClockDrift + /\ localClock' = [p \in Corr |-> localClock[p] + 1] + \/ /\ ClockDrift + /\ UNCHANGED localClock + /\ UNCHANGED <> + /\ action' = "AdvanceRealTime" + +\* advance the local clock of node p +AdvanceLocalClock(p) == + /\ localClock[p] < MaxTimestamp + /\ localClock' = [localClock EXCEPT ![p] = @ + 1] + /\ UNCHANGED <> + /\ action' = "AdvanceLocalClock" + +\* process timely messages +MessageProcessing(p) == + \* start round + \/ InsertProposal(p) + \* reception step + \/ ReceiveProposal(p) + \* processing step + \/ UponProposalInPropose(p) + \/ UponProposalInProposeAndPrevote(p) + \/ UponQuorumOfPrevotesAny(p) + \/ UponProposalInPrevoteOrCommitAndPrevote(p) + \/ UponQuorumOfPrecommitsAny(p) + \/ UponProposalInPrecommitNoDecision(p) + \* the actions below are not essential for safety, but added for completeness + \/ OnTimeoutPropose(p) + \/ OnQuorumOfNilPrevotes(p) + \/ OnRoundCatchup(p) + +(* + * A system transition. In this specificatiom, the system may eventually deadlock, + * e.g., when all processes decide. This is expected behavior, as we focus on safety. + *) +Next == + \/ AdvanceRealTime + \/ /\ ClockDrift + /\ \E p \in Corr: AdvanceLocalClock(p) + \/ /\ SynchronizedLocalClocks + /\ \E p \in Corr: MessageProcessing(p) + +----------------------------------------------------------------------------- + +(*************************** INVARIANTS *************************************) + +\* [PBTS-INV-AGREEMENT.0] +AgreementOnValue == + \A p, q \in Corr: + /\ decision[p] /= NilDecision + /\ decision[q] /= NilDecision + => \E v \in ValidValues, t1 \in Timestamps, t2 \in Timestamps, r1 \in Rounds, r2 \in Rounds : + /\ decision[p] = Decision(v, t1, r1) + /\ decision[q] = Decision(v, t2, r2) + +\* [PBTS-INV-TIME-AGR.0] +AgreementOnTime == + \A p, q \in Corr: + \A v1 \in ValidValues, v2 \in ValidValues, t1 \in Timestamps, t2 \in Timestamps, r \in Rounds : + /\ decision[p] = Decision(v1, t1, r) + /\ decision[q] = Decision(v2, t2, r) + => t1 = t2 + +\* [PBTS-CONSENSUS-TIME-VALID.0] +ConsensusTimeValid == + \A p \in Corr, t \in Timestamps : + \* if a process decides on v and t + (\E v \in ValidValues, r \in Rounds : decision[p] = Decision(v, t, r)) + \* then + => /\ beginConsensus - Precision <= t + /\ t < endConsensus[p] + Precision + Delay + +\* [PBTS-CONSENSUS-SAFE-VALID-CORR-PROP.0] +ConsensusSafeValidCorrProp == + \A v \in ValidValues, t \in Timestamps : + \* if the proposer in the first round is correct + (/\ Proposer[0] \in Corr + \* and there exists a process that decided on v, t + /\ \E p \in Corr, r \in Rounds : decision[p] = Decision(v, t, r)) + \* then t is between the minimal and maximal initial local time + => /\ beginConsensus <= t + /\ t <= lastBeginConsensus + +\* [PBTS-CONSENSUS-REALTIME-VALID-CORR.0] +ConsensusRealTimeValidCorr == + \A t \in Timestamps, r \in Rounds : + (/\ \E p \in Corr, v \in ValidValues : decision[p] = Decision(v, t, r) + /\ proposalTime[r] /= NilTimestamp) + => /\ proposalTime[r] - Accuracy < t + /\ t < proposalTime[r] + Accuracy + +\* [PBTS-CONSENSUS-REALTIME-VALID.0] +ConsensusRealTimeValid == + \A t \in Timestamps, r \in Rounds : + (\E p \in Corr, v \in ValidValues : decision[p] = Decision(v, t, r)) + => /\ proposalReceivedTime[r] - Accuracy - Precision < t + /\ t < proposalReceivedTime[r] + Accuracy + Precision + Delay + +\* [PBTS-MSG-FAIR.0] +BoundedDelay == + \A r \in Rounds : + (/\ proposalTime[r] /= NilTimestamp + /\ proposalTime[r] + Delay < realTime) + => inspectedProposal[r] = Corr + +\* [PBTS-CONSENSUS-TIME-LIVE.0] +ConsensusTimeLive == + \A r \in Rounds, p \in Corr : + (/\ proposalTime[r] /= NilTimestamp + /\ proposalTime[r] + Delay < realTime + /\ Proposer[r] \in Corr + /\ round[p] <= r) + => \E msg \in RoundProposals(r) : <> \in receivedTimelyProposal + +\* a conjunction of all invariants +Inv == + /\ AgreementOnValue + /\ AgreementOnTime + /\ ConsensusTimeValid + /\ ConsensusSafeValidCorrProp + /\ ConsensusRealTimeValid + /\ ConsensusRealTimeValidCorr + /\ BoundedDelay + +Liveness == + ConsensusTimeLive + +============================================================================= diff --git a/spec/consensus/proposer-selection.md b/spec/consensus/proposer-selection.md new file mode 100644 index 0000000000..3cea3d5cde --- /dev/null +++ b/spec/consensus/proposer-selection.md @@ -0,0 +1,323 @@ +--- +order: 3 +--- + +# Proposer Selection Procedure + +This document specifies the Proposer Selection Procedure that is used in Tendermint to choose a round proposer. +As Tendermint is “leader-based protocol”, the proposer selection is critical for its correct functioning. + +At a given block height, the proposer selection algorithm runs with the same validator set at each round . +Between heights, an updated validator set may be specified by the application as part of the ABCIResponses' EndBlock. + +## Requirements for Proposer Selection + +This sections covers the requirements with Rx being mandatory and Ox optional requirements. +The following requirements must be met by the Proposer Selection procedure: + +### R1: Determinism + +Given a validator set `V`, and two honest validators `p` and `q`, for each height `h` and each round `r` the following must hold: + + `proposer_p(h,r) = proposer_q(h,r)` + +where `proposer_p(h,r)` is the proposer returned by the Proposer Selection Procedure at process `p`, at height `h` and round `r`. + +### R2: Fairness + +Given a validator set with total voting power P and a sequence S of elections. In any sub-sequence of S with length C*P, a validator v must be elected as proposer P/VP(v) times, i.e. with frequency: + + f(v) ~ VP(v) / P + +where C is a tolerance factor for validator set changes with following values: + +- C == 1 if there are no validator set changes +- C ~ k when there are validator changes + +*[this needs more work]* + +## Basic Algorithm + +At its core, the proposer selection procedure uses a weighted round-robin algorithm. + +A model that gives a good intuition on how/ why the selection algorithm works and it is fair is that of a priority queue. The validators move ahead in this queue according to their voting power (the higher the voting power the faster a validator moves towards the head of the queue). When the algorithm runs the following happens: + +- all validators move "ahead" according to their powers: for each validator, increase the priority by the voting power +- first in the queue becomes the proposer: select the validator with highest priority +- move the proposer back in the queue: decrease the proposer's priority by the total voting power + +Notation: + +- vset - the validator set +- n - the number of validators +- VP(i) - voting power of validator i +- A(i) - accumulated priority for validator i +- P - total voting power of set +- avg - average of all validator priorities +- prop - proposer + +Simple view at the Selection Algorithm: + +```md + def ProposerSelection (vset): + + // compute priorities and elect proposer + for each validator i in vset: + A(i) += VP(i) + prop = max(A) + A(prop) -= P +``` + +## Stable Set + +Consider the validator set: + +Validator | p1 | p2 +----------|----|--- +VP | 1 | 3 + +Assuming no validator changes, the following table shows the proposer priority computation over a few runs. Four runs of the selection procedure are shown, starting with the 5th the same values are computed. +Each row shows the priority queue and the process place in it. The proposer is the closest to the head, the rightmost validator. As priorities are updated, the validators move right in the queue. The proposer moves left as its priority is reduced after election. + +| Priority Run | -2 | -1 | 0 | 1 | 2 | 3 | 4 | 5 | Alg step | +|----------------|----|----|-------|----|-------|----|----|----|------------------| +| | | | p1,p2 | | | | | | Initialized to 0 | +| run 1 | | | | p1 | | p2 | | | A(i)+=VP(i) | +| | | p2 | | p1 | | | | | A(p2)-= P | +| run 2 | | | | | p1,p2 | | | | A(i)+=VP(i) | +| | p1 | | | | p2 | | | | A(p1)-= P | +| run 3 | | p1 | | | | | | p2 | A(i)+=VP(i) | +| | | p1 | | p2 | | | | | A(p2)-= P | +| run 4 | | | p1 | | | | p2 | | A(i)+=VP(i) | +| | | | p1,p2 | | | | | | A(p2)-= P | + +It can be shown that: + +- At the end of each run k+1 the sum of the priorities is the same as at end of run k. If a new set's priorities are initialized to 0 then the sum of priorities will be 0 at each run while there are no changes. +- The max distance between priorites is (n-1) *P.*[formal proof not finished]* + +## Validator Set Changes + +Between proposer selection runs the validator set may change. Some changes have implications on the proposer election. + +### Voting Power Change + +Consider again the earlier example and assume that the voting power of p1 is changed to 4: + +Validator | p1 | p2 +----------|----|--- +VP | 4 | 3 + +Let's also assume that before this change the proposer priorites were as shown in first row (last run). As it can be seen, the selection could run again, without changes, as before. + +| Priority Run | -2 | -1 | 0 | 1 | 2 | Comment | +|----------------|----|----|---|----|----|-------------------| +| last run | | p2 | | p1 | | __update VP(p1)__ | +| next run | | | | | p2 | A(i)+=VP(i) | +| | p1 | | | | p2 | A(p1)-= P | + +However, when a validator changes power from a high to a low value, some other validator remain far back in the queue for a long time. This scenario is considered again in the Proposer Priority Range section. + +As before: + +- At the end of each run k+1 the sum of the priorities is the same as at run k. +- The max distance between priorites is (n-1) * P. + +### Validator Removal + +Consider a new example with set: + +Validator | p1 | p2 | p3 +----------|----|----|--- +VP | 1 | 2 | 3 + +Let's assume that after the last run the proposer priorities were as shown in first row with their sum being 0. After p2 is removed, at the end of next proposer selection run (penultimate row) the sum of priorities is -2 (minus the priority of the removed process). + +The procedure could continue without modifications. However, after a sufficiently large number of modifications in validator set, the priority values would migrate towards maximum or minimum allowed values causing truncations due to overflow detection. +For this reason, the selection procedure adds another __new step__ that centers the current priority values such that the priority sum remains close to 0. + +| Priority Run | -3 | -2 | -1 | 0 | 1 | 2 | 4 | Comment | +|----------------|----|----|----|---|----|----|---|-----------------------| +| last run | p3 | | | | p1 | p2 | | __remove p2__ | +| nextrun | | | | | | | | | +| __new step__ | | p3 | | | | p1 | | A(i) -= avg, avg = -1 | +| | | | | | p3 | p1 | | A(i)+=VP(i) | +| | | | p1 | | p3 | | | A(p1)-= P | + +The modified selection algorithm is: + +```md + def ProposerSelection (vset): + + // center priorities around zero + avg = sum(A(i) for i in vset)/len(vset) + for each validator i in vset: + A(i) -= avg + + // compute priorities and elect proposer + for each validator i in vset: + A(i) += VP(i) + prop = max(A) + A(prop) -= P +``` + +Observations: + +- The sum of priorities is now close to 0. Due to integer division the sum is an integer in (-n, n), where n is the number of validators. + +### New Validator + +When a new validator is added, same problem as the one described for removal appears, the sum of priorities in the new set is not zero. This is fixed with the centering step introduced above. + +One other issue that needs to be addressed is the following. A validator V that has just been elected is moved to the end of the queue. If the validator set is large and/ or other validators have significantly higher power, V will have to wait many runs to be elected. If V removes and re-adds itself to the set, it would make a significant (albeit unfair) "jump" ahead in the queue. + +In order to prevent this, when a new validator is added, its initial priority is set to: + +```md + A(V) = -1.125 * P +``` + +where P is the total voting power of the set including V. + +Curent implementation uses the penalty factor of 1.125 because it provides a small punishment that is efficient to calculate. See [here](https://github.com/tendermint/tendermint/pull/2785#discussion_r235038971) for more details. + +If we consider the validator set where p3 has just been added: + +Validator | p1 | p2 | p3 +----------|----|----|--- +VP | 1 | 3 | 8 + +then p3 will start with proposer priority: + +```md + A(p3) = -1.125 * (1 + 3 + 8) ~ -13 +``` + +Note that since current computation uses integer division there is penalty loss when sum of the voting power is less than 8. + +In the next run, p3 will still be ahead in the queue, elected as proposer and moved back in the queue. + +| Priority Run | -13 | -9 | -5 | -2 | -1 | 0 | 1 | 2 | 5 | 6 | 7 | Alg step | +|----------------|-----|----|----|----|----|---|---|----|----|----|----|-----------------------| +| last run | | | | p2 | | | | p1 | | | | __add p3__ | +| | p3 | | | p2 | | | | p1 | | | | A(p3) = -4 | +| next run | | p3 | | | | | | p2 | | p1 | | A(i) -= avg, avg = -4 | +| | | | | | p3 | | | | p2 | | p1 | A(i)+=VP(i) | +| | | | p1 | | p3 | | | | p2 | | | A(p1)-=P | + +## Proposer Priority Range + +With the introduction of centering, some interesting cases occur. Low power validators that bind early in a set that includes high power validator(s) benefit from subsequent additions to the set. This is because these early validators run through more right shift operations during centering, operations that increase their priority. + +As an example, consider the set where p2 is added after p1, with priority -1.125 * 80k = -90k. After the selection procedure runs once: + +Validator | p1 | p2 | Comment +----------|------|------|------------------ +VP | 80k | 10 | +A | 0 | -90k | __added p2__ +A | -45k | 45k | __run selection__ + +Then execute the following steps: + +1. Add a new validator p3: + + Validator | p1 | p2 | p3 + ----------|-----|----|--- + VP | 80k | 10 | 10 + +2. Run selection once. The notation '..p'/'p..' means very small deviations compared to column priority. + + | Priority Run | -90k.. | -60k | -45k | -15k | 0 | 45k | 75k | 155k | Comment | + |---------------|--------|------|------|------|---|-----|-----|------|--------------| + | last run | p3 | | p2 | | | p1 | | | __added p3__ | + | next run + | *right_shift*| | p3 | | p2 | | | p1 | | A(i) -= avg,avg=-30k + | | | ..p3| | ..p2| | | | p1 | A(i)+=VP(i) + | | | ..p3| | ..p2| | | p1.. | | A(p1)-=P, P=80k+20 + +3. Remove p1 and run selection once: + + Validator | p3 | p2 | Comment + ----------|--------|-------|------------------ + VP | 10 | 10 | + A | -60k | -15k | + A | -22.5k | 22.5k | __run selection__ + +At this point, while the total voting power is 20, the distance between priorities is 45k. It will take 4500 runs for p3 to catch up with p2. + +In order to prevent these types of scenarios, the selection algorithm performs scaling of priorities such that the difference between min and max values is smaller than two times the total voting power. + +The modified selection algorithm is: + +```md + def ProposerSelection (vset): + + // scale the priority values + diff = max(A)-min(A) + threshold = 2 * P + if diff > threshold: + scale = diff/threshold + for each validator i in vset: + A(i) = A(i)/scale + + // center priorities around zero + avg = sum(A(i) for i in vset)/len(vset) + for each validator i in vset: + A(i) -= avg + + // compute priorities and elect proposer + for each validator i in vset: + A(i) += VP(i) + prop = max(A) + A(prop) -= P +``` + +Observations: + +- With this modification, the maximum distance between priorites becomes 2 * P. + +Note also that even during steady state the priority range may increase beyond 2 * P. The scaling introduced here helps to keep the range bounded. + +## Wrinkles + +### Validator Power Overflow Conditions + +The validator voting power is a positive number stored as an int64. When a validator is added the `1.125 * P` computation must not overflow. As a consequence the code handling validator updates (add and update) checks for overflow conditions making sure the total voting power is never larger than the largest int64 `MAX`, with the property that `1.125 * MAX` is still in the bounds of int64. Fatal error is return when overflow condition is detected. + +### Proposer Priority Overflow/ Underflow Handling + +The proposer priority is stored as an int64. The selection algorithm performs additions and subtractions to these values and in the case of overflows and underflows it limits the values to: + +```go + MaxInt64 = 1 << 63 - 1 + MinInt64 = -1 << 63 +``` + +## Requirement Fulfillment Claims + +__[R1]__ + +The proposer algorithm is deterministic giving consistent results across executions with same transactions and validator set modifications. +[WIP - needs more detail] + +__[R2]__ + +Given a set of processes with the total voting power P, during a sequence of elections of length P, the number of times any process is selected as proposer is equal to its voting power. The sequence of the P proposers then repeats. If we consider the validator set: + +Validator | p1 | p2 +----------|----|--- +VP | 1 | 3 + +With no other changes to the validator set, the current implementation of proposer selection generates the sequence: +`p2, p1, p2, p2, p2, p1, p2, p2,...` or [`p2, p1, p2, p2`]* +A sequence that starts with any circular permutation of the [`p2, p1, p2, p2`] sub-sequence would also provide the same degree of fairness. In fact these circular permutations show in the sliding window (over the generated sequence) of size equal to the length of the sub-sequence. + +Assigning priorities to each validator based on the voting power and updating them at each run ensures the fairness of the proposer selection. In addition, every time a validator is elected as proposer its priority is decreased with the total voting power. + +Intuitively, a process v jumps ahead in the queue at most (max(A) - min(A))/VP(v) times until it reaches the head and is elected. The frequency is then: + +```md + f(v) ~ VP(v)/(max(A)-min(A)) = 1/k * VP(v)/P +``` + +For current implementation, this means v should be proposer at least VP(v) times out of k * P runs, with scaling factor k=2. diff --git a/spec/consensus/readme.md b/spec/consensus/readme.md new file mode 100644 index 0000000000..aa79ba1929 --- /dev/null +++ b/spec/consensus/readme.md @@ -0,0 +1,32 @@ +--- +order: 1 +parent: + title: Consensus + order: 4 +--- + +# Consensus + +Specification of the Tendermint consensus protocol. + +## Contents + +- [Consensus Paper](./consensus-paper) - Latex paper on + [arxiv](https://arxiv.org/abs/1807.04938) describing the + core Tendermint consensus state machine with proofs of safety and termination. +- [BFT Time](./bft-time.md) - How the timestamp in a Tendermint + block header is computed in a Byzantine Fault Tolerant manner +- [Creating Proposal](./creating-proposal.md) - How a proposer + creates a block proposal for consensus +- [Light Client Protocol](./light-client) - A protocol for light weight consensus + verification and syncing to the latest state +- [Signing](./signing.md) - Rules for cryptographic signatures + produced by validators. +- [Write Ahead Log](./wal.md) - Write ahead log used by the + consensus state machine to recover from crashes. + +The protocol used to gossip consensus messages between peers, which is critical +for liveness, is described in the [reactors section](../reactors/consensus/consensus.md). + +There is also a [stale markdown description](consensus.md) of the consensus state machine +(TODO update this). diff --git a/spec/consensus/signing.md b/spec/consensus/signing.md new file mode 100644 index 0000000000..907a5a01af --- /dev/null +++ b/spec/consensus/signing.md @@ -0,0 +1,229 @@ +# Validator Signing + +Here we specify the rules for validating a proposal and vote before signing. +First we include some general notes on validating data structures common to both types. +We then provide specific validation rules for each. Finally, we include validation rules to prevent double-sigining. + +## SignedMsgType + +The `SignedMsgType` is a single byte that refers to the type of the message +being signed. It is defined in Go as follows: + +```go +// SignedMsgType is a type of signed message in the consensus. +type SignedMsgType byte + +const ( + // Votes + PrevoteType SignedMsgType = 0x01 + PrecommitType SignedMsgType = 0x02 + + // Proposals + ProposalType SignedMsgType = 0x20 +) +``` + +All signed messages must correspond to one of these types. + +## Timestamp + +Timestamp validation is subtle and there are currently no bounds placed on the +timestamp included in a proposal or vote. It is expected that validators will honestly +report their local clock time. The median of all timestamps +included in a commit is used as the timestamp for the next block height. + +Timestamps are expected to be strictly monotonic for a given validator, though +this is not currently enforced. + +## ChainID + +ChainID is an unstructured string with a max length of 50-bytes. +In the future, the ChainID may become structured, and may take on longer lengths. +For now, it is recommended that signers be configured for a particular ChainID, +and to only sign votes and proposals corresponding to that ChainID. + +## BlockID + +BlockID is the structure used to represent the block: + +```go +type BlockID struct { + Hash []byte + PartsHeader PartSetHeader +} + +type PartSetHeader struct { + Hash []byte + Total int +} +``` + +To be included in a valid vote or proposal, BlockID must either represent a `nil` block, or a complete one. +We introduce two methods, `BlockID.IsZero()` and `BlockID.IsComplete()` for these cases, respectively. + +`BlockID.IsZero()` returns true for BlockID `b` if each of the following +are true: + +```go +b.Hash == nil +b.PartsHeader.Total == 0 +b.PartsHeader.Hash == nil +``` + +`BlockID.IsComplete()` returns true for BlockID `b` if each of the following +are true: + +```go +len(b.Hash) == 32 +b.PartsHeader.Total > 0 +len(b.PartsHeader.Hash) == 32 +``` + +## Proposals + +The structure of a proposal for signing looks like: + +```go +type CanonicalProposal struct { + Type SignedMsgType // type alias for byte + Height int64 `binary:"fixed64"` + Round int64 `binary:"fixed64"` + POLRound int64 `binary:"fixed64"` + BlockID BlockID + Timestamp time.Time + ChainID string +} +``` + +A proposal is valid if each of the following lines evaluates to true for proposal `p`: + +```go +p.Type == 0x20 +p.Height > 0 +p.Round >= 0 +p.POLRound >= -1 +p.BlockID.IsComplete() +``` + +In other words, a proposal is valid for signing if it contains the type of a Proposal +(0x20), has a positive, non-zero height, a +non-negative round, a POLRound not less than -1, and a complete BlockID. + +## Votes + +The structure of a vote for signing looks like: + +```go +type CanonicalVote struct { + Type SignedMsgType // type alias for byte + Height int64 `binary:"fixed64"` + Round int64 `binary:"fixed64"` + BlockID BlockID + Timestamp time.Time + ChainID string +} +``` + +A vote is valid if each of the following lines evaluates to true for vote `v`: + +```go +v.Type == 0x1 || v.Type == 0x2 +v.Height > 0 +v.Round >= 0 +v.BlockID.IsZero() || v.BlockID.IsComplete() +``` + +In other words, a vote is valid for signing if it contains the type of a Prevote +or Precommit (0x1 or 0x2, respectively), has a positive, non-zero height, a +non-negative round, and an empty or valid BlockID. + +## Invalid Votes and Proposals + +Votes and proposals which do not satisfy the above rules are considered invalid. +Peers gossipping invalid votes and proposals may be disconnected from other peers on the network. +Note, however, that there is not currently any explicit mechanism to punish validators signing votes or proposals that fail +these basic validation rules. + +## Double Signing + +Signers must be careful not to sign conflicting messages, also known as "double signing" or "equivocating". +Tendermint has mechanisms to publish evidence of validators that signed conflicting votes, so they can be punished +by the application. Note Tendermint does not currently handle evidence of conflciting proposals, though it may in the future. + +### State + +To prevent such double signing, signers must track the height, round, and type of the last message signed. +Assume the signer keeps the following state, `s`: + +```go +type LastSigned struct { + Height int64 + Round int64 + Type SignedMsgType // byte +} +``` + +After signing a vote or proposal `m`, the signer sets: + +```go +s.Height = m.Height +s.Round = m.Round +s.Type = m.Type +``` + +### Proposals + +A signer should only sign a proposal `p` if any of the following lines are true: + +```go +p.Height > s.Height +p.Height == s.Height && p.Round > s.Round +``` + +In other words, a proposal should only be signed if it's at a higher height, or a higher round for the same height. +Once a proposal or vote has been signed for a given height and round, a proposal should never be signed for the same height and round. + +### Votes + +A signer should only sign a vote `v` if any of the following lines are true: + +```go +v.Height > s.Height +v.Height == s.Height && v.Round > s.Round +v.Height == s.Height && v.Round == s.Round && v.Step == 0x1 && s.Step == 0x20 +v.Height == s.Height && v.Round == s.Round && v.Step == 0x2 && s.Step != 0x2 +``` + +In other words, a vote should only be signed if it's: + +- at a higher height +- at a higher round for the same height +- a prevote for the same height and round where we haven't signed a prevote or precommit (but have signed a proposal) +- a precommit for the same height and round where we haven't signed a precommit (but have signed a proposal and/or a prevote) + +This means that once a validator signs a prevote for a given height and round, the only other message it can sign for that height and round is a precommit. +And once a validator signs a precommit for a given height and round, it must not sign any other message for that same height and round. + +Note this includes votes for `nil`, ie. where `BlockID.IsZero()` is true. If a +signer has already signed a vote where `BlockID.IsZero()` is true, it cannot +sign another vote with the same type for the same height and round where +`BlockID.IsComplete()` is true. Thus only a single vote of a particular type +(ie. 0x01 or 0x02) can be signed for the same height and round. + +### Other Rules + +According to the rules of Tendermint consensus, once a validator precommits for +a block, they become "locked" on that block, which means they can't prevote for +another block unless they see sufficient justification (ie. a polka from a +higher round). For more details, see the [consensus +spec](https://arxiv.org/abs/1807.04938). + +Violating this rule is known as "amnesia". In contrast to equivocation, +which is easy to detect, amnesia is difficult to detect without access to votes +from all the validators, as this is what constitutes the justification for +"unlocking". Hence, amnesia is not punished within the protocol, and cannot +easily be prevented by a signer. If enough validators simultaneously commit an +amnesia attack, they may cause a fork of the blockchain, at which point an +off-chain protocol must be engaged to collect votes from all the validators and +determine who misbehaved. For more details, see [fork +detection](https://github.com/tendermint/tendermint/pull/3978). diff --git a/spec/consensus/wal.md b/spec/consensus/wal.md new file mode 100644 index 0000000000..597d1b79e1 --- /dev/null +++ b/spec/consensus/wal.md @@ -0,0 +1,32 @@ +# WAL + +Consensus module writes every message to the WAL (write-ahead log). + +It also issues fsync syscall through +[File#Sync](https://golang.org/pkg/os/#File.Sync) for messages signed by this +node (to prevent double signing). + +Under the hood, it uses +[autofile.Group](https://godoc.org/github.com/tendermint/tmlibs/autofile#Group), +which rotates files when those get too big (> 10MB). + +The total maximum size is 1GB. We only need the latest block and the block before it, +but if the former is dragging on across many rounds, we want all those rounds. + +## Replay + +Consensus module will replay all the messages of the last height written to WAL +before a crash (if such occurs). + +The private validator may try to sign messages during replay because it runs +somewhat autonomously and does not know about replay process. + +For example, if we got all the way to precommit in the WAL and then crash, +after we replay the proposal message, the private validator will try to sign a +prevote. But it will fail. That's ok because we’ll see the prevote later in the +WAL. Then it will go to precommit, and that time it will work because the +private validator contains the `LastSignBytes` and then we’ll replay the +precommit from the WAL. + +Make sure to read about [WAL corruption](https://github.com/tendermint/tendermint/blob/v0.34.x/docs/tendermint-core/running-in-production.md#wal-corruption) +and recovery strategies. diff --git a/spec/core/data_structures.md b/spec/core/data_structures.md new file mode 100644 index 0000000000..d89681dcd4 --- /dev/null +++ b/spec/core/data_structures.md @@ -0,0 +1,456 @@ +# Data Structures + +Here we describe the data structures in the Tendermint blockchain and the rules for validating them. + +The Tendermint blockchains consists of a short list of data types: + +- [Data Structures](#data-structures) + - [Block](#block) + - [Execution](#execution) + - [Header](#header) + - [Version](#version) + - [BlockID](#blockid) + - [PartSetHeader](#partsetheader) + - [Part](#part) + - [Time](#time) + - [Data](#data) + - [Commit](#commit) + - [CommitSig](#commitsig) + - [BlockIDFlag](#blockidflag) + - [Vote](#vote) + - [CanonicalVote](#canonicalvote) + - [Proposal](#proposal) + - [SignedMsgType](#signedmsgtype) + - [Signature](#signature) + - [EvidenceList](#evidencelist) + - [Evidence](#evidence) + - [DuplicateVoteEvidence](#duplicatevoteevidence) + - [LightClientAttackEvidence](#lightclientattackevidence) + - [LightBlock](#lightblock) + - [SignedHeader](#signedheader) + - [ValidatorSet](#validatorset) + - [Validator](#validator) + - [Address](#address) + - [ConsensusParams](#consensusparams) + - [BlockParams](#blockparams) + - [EvidenceParams](#evidenceparams) + - [ValidatorParams](#validatorparams) + - [VersionParams](#versionparams) + - [Proof](#proof) + + +## Block + +A block consists of a header, transactions, votes (the commit), +and a list of evidence of malfeasance (ie. signing conflicting votes). + +| Name | Type | Description | Validation | +|--------|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------| +| Header | [Header](#header) | Header corresponding to the block. This field contains information used throughout consensus and other areas of the protocol. To find out what it contains, visit [header](#header) | Must adhere to the validation rules of [header](#header) | +| Data | [Data](#data) | Data contains a list of transactions. The contents of the transaction is unknown to Tendermint. | This field can be empty or populated, but no validation is performed. Applications can perform validation on individual transactions prior to block creation using [checkTx](../abci/abci.md#checktx). +| Evidence | [EvidenceList](#evidence_list) | Evidence contains a list of infractions committed by validators. | Can be empty, but when populated the validations rules from [evidenceList](#evidence_list) apply | +| LastCommit | [Commit](#commit) | `LastCommit` includes one vote for every validator. All votes must either be for the previous block, nil or absent. If a vote is for the previous block it must have a valid signature from the corresponding validator. The sum of the voting power of the validators that voted must be greater than 2/3 of the total voting power of the complete validator set. The number of votes in a commit is limited to 10000 (see `types.MaxVotesCount`). | Must be empty for the initial height and must adhere to the validation rules of [commit](#commit). | + +## Execution + +Once a block is validated, it can be executed against the state. + +The state follows this recursive equation: + +```go +state(initialHeight) = InitialState +state(h+1) <- Execute(state(h), ABCIApp, block(h)) +``` + +where `InitialState` includes the initial consensus parameters and validator set, +and `ABCIApp` is an ABCI application that can return results and changes to the validator +set (TODO). Execute is defined as: + +```go +func Execute(s State, app ABCIApp, block Block) State { + // Fuction ApplyBlock executes block of transactions against the app and returns the new root hash of the app state, + // modifications to the validator set and the changes of the consensus parameters. + AppHash, ValidatorChanges, ConsensusParamChanges := app.ApplyBlock(block) + + nextConsensusParams := UpdateConsensusParams(state.ConsensusParams, ConsensusParamChanges) + return State{ + ChainID: state.ChainID, + InitialHeight: state.InitialHeight, + LastResults: abciResponses.DeliverTxResults, + AppHash: AppHash, + InitialHeight: state.InitialHeight, + LastValidators: state.Validators, + Validators: state.NextValidators, + NextValidators: UpdateValidators(state.NextValidators, ValidatorChanges), + ConsensusParams: nextConsensusParams, + Version: { + Consensus: { + AppVersion: nextConsensusParams.Version.AppVersion, + }, + }, + } +} +``` + +Validating a new block is first done prior to the `prevote`, `precommit` & `finalizeCommit` stages. + +The steps to validate a new block are: + +- Check the validity rules of the block and its fields. +- Check the versions (Block & App) are the same as in local state. +- Check the chainID's match. +- Check the height is correct. +- Check the `LastBlockID` corresponds to BlockID currently in state. +- Check the hashes in the header match those in state. +- Verify the LastCommit against state, this step is skipped for the initial height. + - This is where checking the signatures correspond to the correct block will be made. +- Make sure the proposer is part of the validator set. +- Validate bock time. + - Make sure the new blocks time is after the previous blocks time. + - Calculate the medianTime and check it against the blocks time. + - If the blocks height is the initial height then check if it matches the genesis time. +- Validate the evidence in the block. Note: Evidence can be empty + +## Header + +A block header contains metadata about the block and about the consensus, as well as commitments to +the data in the current block, the previous block, and the results returned by the application: + +| Name | Type | Description | Validation | +|-------------------|---------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Version | [Version](#version) | Version defines the application and protocol version being used. | Must adhere to the validation rules of [Version](#version) | +| ChainID | String | ChainID is the ID of the chain. This must be unique to your chain. | ChainID must be less than 50 bytes. | +| Height | uint64 | Height is the height for this header. | Must be > 0, >= initialHeight, and == previous Height+1 | +| Time | [Time](#time) | The timestamp is equal to the weighted median of validators present in the last commit. Read more on time in the [BFT-time section](../consensus/bft-time.md). Note: the timestamp of a vote must be greater by at least one millisecond than that of the block being voted on. | Time must be >= previous header timestamp + consensus parameters TimeIotaMs. The timestamp of the first block must be equal to the genesis time (since there's no votes to compute the median). | +| LastBlockID | [BlockID](#blockid) | BlockID of the previous block. | Must adhere to the validation rules of [blockID](#blockid). The first block has `block.Header.LastBlockID == BlockID{}`. | +| LastCommitHash | slice of bytes (`[]byte`) | MerkleRoot of the lastCommit's signatures. The signatures represent the validators that committed to the last block. The first block has an empty slices of bytes for the hash. | Must be of length 32 | +| DataHash | slice of bytes (`[]byte`) | MerkleRoot of the hash of transactions. **Note**: The transactions are hashed before being included in the merkle tree, the leaves of the Merkle tree are the hashes, not the transactions themselves. | Must be of length 32 | +| ValidatorHash | slice of bytes (`[]byte`) | MerkleRoot of the current validator set. The validators are first sorted by voting power (descending), then by address (ascending) prior to computing the MerkleRoot. | Must be of length 32 | +| NextValidatorHash | slice of bytes (`[]byte`) | MerkleRoot of the next validator set. The validators are first sorted by voting power (descending), then by address (ascending) prior to computing the MerkleRoot. | Must be of length 32 | +| ConsensusHash | slice of bytes (`[]byte`) | Hash of the protobuf encoded consensus parameters. | Must be of length 32 | +| AppHash | slice of bytes (`[]byte`) | Arbitrary byte array returned by the application after executing and commiting the previous block. It serves as the basis for validating any merkle proofs that comes from the ABCI application and represents the state of the actual application rather than the state of the blockchain itself. The first block's `block.Header.AppHash` is given by `ResponseInitChain.app_hash`. | This hash is determined by the application, Tendermint can not perform validation on it. | +| LastResultHash | slice of bytes (`[]byte`) | `LastResultsHash` is the root hash of a Merkle tree built from `ResponseDeliverTx` responses (`Log`,`Info`, `Codespace` and `Events` fields are ignored). | Must be of length 32. The first block has `block.Header.ResultsHash == MerkleRoot(nil)`, i.e. the hash of an empty input, for RFC-6962 conformance. | +| EvidenceHash | slice of bytes (`[]byte`) | MerkleRoot of the evidence of Byzantine behaviour included in this block. | Must be of length 32 | +| ProposerAddress | slice of bytes (`[]byte`) | Address of the original proposer of the block. Validator must be in the current validatorSet. | Must be of length 20 | + +## Version + +NOTE: that this is more specifically the consensus version and doesn't include information like the +P2P Version. (TODO: we should write a comprehensive document about +versioning that this can refer to) + +| Name | type | Description | Validation | +|-------|--------|-----------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------| +| Block | uint64 | This number represents the version of the block protocol and must be the same throughout an operational network | Must be equal to protocol version being used in a network (`block.Version.Block == state.Version.Consensus.Block`) | +| App | uint64 | App version is decided on by the application. Read [here](../abci/abci.md#info) | `block.Version.App == state.Version.Consensus.App` | + +## BlockID + +The `BlockID` contains two distinct Merkle roots of the block. The `BlockID` includes these two hashes, as well as the number of parts (ie. `len(MakeParts(block))`) + +| Name | Type | Description | Validation | +|---------------|---------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| Hash | slice of bytes (`[]byte`) | MerkleRoot of all the fields in the header (ie. `MerkleRoot(header)`. | hash must be of length 32 | +| PartSetHeader | [PartSetHeader](#PartSetHeader) | Used for secure gossiping of the block during consensus, is the MerkleRoot of the complete serialized block cut into parts (ie. `MerkleRoot(MakeParts(block))`). | Must adhere to the validation rules of [PartSetHeader](#PartSetHeader) | + +See [MerkleRoot](./encoding.md#MerkleRoot) for details. + +## PartSetHeader + +| Name | Type | Description | Validation | +|-------|---------------------------|-----------------------------------|----------------------| +| Total | int32 | Total amount of parts for a block | Must be > 0 | +| Hash | slice of bytes (`[]byte`) | MerkleRoot of a serialized block | Must be of length 32 | + +## Part + +Part defines a part of a block. In Tendermint blocks are broken into `parts` for gossip. + +| Name | Type | Description | Validation | +|-------|-----------------|-----------------------------------|----------------------| +| index | int32 | Total amount of parts for a block | Must be > 0 | +| bytes | bytes | MerkleRoot of a serialized block | Must be of length 32 | +| proof | [Proof](#proof) | MerkleRoot of a serialized block | Must be of length 32 | + +## Time + +Tendermint uses the [Google.Protobuf.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Timestamp) +format, which uses two integers, one 64 bit integer for Seconds and a 32 bit integer for Nanoseconds. + +## Data + +Data is just a wrapper for a list of transactions, where transactions are arbitrary byte arrays: + +| Name | Type | Description | Validation | +|------|----------------------------|------------------------|-----------------------------------------------------------------------------| +| Txs | Matrix of bytes ([][]byte) | Slice of transactions. | Validation does not occur on this field, this data is unknown to Tendermint | + +## Commit + +Commit is a simple wrapper for a list of signatures, with one for each validator. It also contains the relevant BlockID, height and round: + +| Name | Type | Description | Validation | +|------------|----------------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------| +| Height | uint64 | Height at which this commit was created. | Must be > 0 | +| Round | int32 | Round that the commit corresponds to. | Must be > 0 | +| BlockID | [BlockID](#blockid) | The blockID of the corresponding block. | Must adhere to the validation rules of [BlockID](#blockid). | +| Signatures | Array of [CommitSig](#commitsig) | Array of commit signatures that correspond to current validator set. | Length of signatures must be > 0 and adhere to the validation of each individual [Commitsig](#commitsig) | + +## CommitSig + +`CommitSig` represents a signature of a validator, who has voted either for nil, +a particular `BlockID` or was absent. It's a part of the `Commit` and can be used +to reconstruct the vote set given the validator set. + +| Name | Type | Description | Validation | +|------------------|-----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------| +| BlockIDFlag | [BlockIDFlag](#blockidflag) | Represents the validators participation in consensus: Either voted for the block that received the majority, voted for another block, voted nil or did not vote | Must be one of the fields in the [BlockIDFlag](#blockidflag) enum | +| ValidatorAddress | [Address](#address) | Address of the validator | Must be of length 20 | +| Timestamp | [Time](#time) | This field will vary from `CommitSig` to `CommitSig`. It represents the timestamp of the validator. | [Time](#time) | +| Signature | [Signature](#signature) | Signature corresponding to the validators participation in consensus. | The length of the signature must be > 0 and < than 64 | + +NOTE: `ValidatorAddress` and `Timestamp` fields may be removed in the future +(see [ADR-25](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-025-commit.md)). + +## BlockIDFlag + +BlockIDFlag represents which BlockID the [signature](#commitsig) is for. + +```go +enum BlockIDFlag { + BLOCK_ID_FLAG_UNKNOWN = 0; + BLOCK_ID_FLAG_ABSENT = 1; // signatures for other blocks are also considered absent + BLOCK_ID_FLAG_COMMIT = 2; + BLOCK_ID_FLAG_NIL = 3; +} +``` + +## Vote + +A vote is a signed message from a validator for a particular block. +The vote includes information about the validator signing it. When stored in the blockchain or propagated over the network, votes are encoded in Protobuf. + +| Name | Type | Description | Validation | +|------------------|---------------------------------|---------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------| +| Type | [SignedMsgType](#signedmsgtype) | Either prevote or precommit. [SignedMsgType](#signedmsgtype) | A Vote is valid if its corresponding fields are included in the enum [signedMsgType](#signedmsgtype) | +| Height | uint64 | Height for which this vote was created for | Must be > 0 | +| Round | int32 | Round that the commit corresponds to. | Must be > 0 | +| BlockID | [BlockID](#blockid) | The blockID of the corresponding block. | [BlockID](#blockid) | +| Timestamp | [Time](#Time) | Timestamp represents the time at which a validator signed. | [Time](#time) | +| ValidatorAddress | slice of bytes (`[]byte`) | Address of the validator | Length must be equal to 20 | +| ValidatorIndex | int32 | Index at a specific block height that corresponds to the Index of the validator in the set. | must be > 0 | +| Signature | slice of bytes (`[]byte`) | Signature by the validator if they participated in consensus for the associated bock. | Length of signature must be > 0 and < 64 | + +## CanonicalVote + +CanonicalVote is for validator signing. This type will not be present in a block. Votes are represented via `CanonicalVote` and also encoded using protobuf via `type.SignBytes` which includes the `ChainID`, and uses a different ordering of +the fields. + +```proto +message CanonicalVote { + SignedMsgType type = 1; + fixed64 height = 2; + sfixed64 round = 3; + CanonicalBlockID block_id = 4; + google.protobuf.Timestamp timestamp = 5; + string chain_id = 6; +} +``` + +For signing, votes are represented via [`CanonicalVote`](#canonicalvote) and also encoded using protobuf via +`type.SignBytes` which includes the `ChainID`, and uses a different ordering of +the fields. + +We define a method `Verify` that returns `true` if the signature verifies against the pubkey for the `SignBytes` +using the given ChainID: + +```go +func (vote *Vote) Verify(chainID string, pubKey crypto.PubKey) error { + if !bytes.Equal(pubKey.Address(), vote.ValidatorAddress) { + return ErrVoteInvalidValidatorAddress + } + v := vote.ToProto() + if !pubKey.VerifyBytes(types.VoteSignBytes(chainID, v), vote.Signature) { + return ErrVoteInvalidSignature + } + return nil +} +``` + +## Proposal + +Proposal contains height and round for which this proposal is made, BlockID as a unique identifier +of proposed block, timestamp, and POLRound (a so-called Proof-of-Lock (POL) round) that is needed for +termination of the consensus. If POLRound >= 0, then BlockID corresponds to the block that +is locked in POLRound. The message is signed by the validator private key. + +| Name | Type | Description | Validation | +|-----------|---------------------------------|---------------------------------------------------------------------------------------|---------------------------------------------------------| +| Type | [SignedMsgType](#signedmsgtype) | Represents a Proposal [SignedMsgType](#signedmsgtype) | Must be `ProposalType` [signedMsgType](#signedmsgtype) | +| Height | uint64 | Height for which this vote was created for | Must be > 0 | +| Round | int32 | Round that the commit corresponds to. | Must be > 0 | +| POLRound | int64 | Proof of lock | Must be > 0 | +| BlockID | [BlockID](#blockid) | The blockID of the corresponding block. | [BlockID](#blockid) | +| Timestamp | [Time](#Time) | Timestamp represents the time at which a validator signed. | [Time](#time) | +| Signature | slice of bytes (`[]byte`) | Signature by the validator if they participated in consensus for the associated bock. | Length of signature must be > 0 and < 64 | + +## SignedMsgType + +Signed message type represents a signed messages in consensus. + +```proto +enum SignedMsgType { + + SIGNED_MSG_TYPE_UNKNOWN = 0; + // Votes + SIGNED_MSG_TYPE_PREVOTE = 1; + SIGNED_MSG_TYPE_PRECOMMIT = 2; + + // Proposal + SIGNED_MSG_TYPE_PROPOSAL = 32; +} +``` + +## Signature + +Signatures in Tendermint are raw bytes representing the underlying signature. + +See the [signature spec](./encoding.md#key-types) for more. + +## EvidenceList + +EvidenceList is a simple wrapper for a list of evidence: + +| Name | Type | Description | Validation | +|----------|--------------------------------|----------------------------------------|-----------------------------------------------------------------| +| Evidence | Array of [Evidence](#evidence) | List of verified [evidence](#evidence) | Validation adheres to individual types of [Evidence](#evidence) | + +## Evidence + +Evidence in Tendermint is used to indicate breaches in the consensus by a validator. + +More information on how evidence works in Tendermint can be found [here](../consensus/evidence.md) + +### DuplicateVoteEvidence + +`DuplicateVoteEvidence` represents a validator that has voted for two different blocks +in the same round of the same height. Votes are lexicographically sorted on `BlockID`. + +| Name | Type | Description | Validation | +|------------------|---------------|--------------------------------------------------------------------|-----------------------------------------------------| +| VoteA | [Vote](#vote) | One of the votes submitted by a validator when they equivocated | VoteA must adhere to [Vote](#vote) validation rules | +| VoteB | [Vote](#vote) | The second vote submitted by a validator when they equivocated | VoteB must adhere to [Vote](#vote) validation rules | +| TotalVotingPower | int64 | The total power of the validator set at the height of equivocation | Must be equal to nodes own copy of the data | +| ValidatorPower | int64 | Power of the equivocating validator at the height | Must be equal to the nodes own copy of the data | +| Timestamp | [Time](#Time) | Time of the block where the equivocation occurred | Must be equal to the nodes own copy of the data | + +### LightClientAttackEvidence + +`LightClientAttackEvidence` is a generalized evidence that captures all forms of known attacks on +a light client such that a full node can verify, propose and commit the evidence on-chain for +punishment of the malicious validators. There are three forms of attacks: Lunatic, Equivocation +and Amnesia. These attacks are exhaustive. You can find a more detailed overview of this [here](../light-client/accountability#the_misbehavior_of_faulty_validators) + +| Name | Type | Description | Validation | +|----------------------|------------------------------------|----------------------------------------------------------------------|------------------------------------------------------------------| +| ConflictingBlock | [LightBlock](#LightBlock) | Read Below | Must adhere to the validation rules of [lightBlock](#lightblock) | +| CommonHeight | int64 | Read Below | must be > 0 | +| Byzantine Validators | Array of [Validators](#Validators) | validators that acted maliciously | Read Below | +| TotalVotingPower | int64 | The total power of the validator set at the height of the infraction | Must be equal to the nodes own copy of the data | +| Timestamp | [Time](#Time) | Time of the block where the infraction occurred | Must be equal to the nodes own copy of the data | + +## LightBlock + +LightBlock is the core data structure of the [light client](../light-client/README.md). It combines two data structures needed for verification ([signedHeader](#signedheader) & [validatorSet](#validatorset)). + +| Name | Type | Description | Validation | +|--------------|-------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| SignedHeader | [SignedHeader](#signedheader) | The header and commit, these are used for verification purposes. To find out more visit [light client docs](../light-client/README.md) | Must not be nil and adhere to the validation rules of [signedHeader](#signedheader) | +| ValidatorSet | [ValidatorSet](#validatorset) | The validatorSet is used to help with verify that the validators in that committed the infraction were truly in the validator set. | Must not be nil and adhere to the validation rules of [validatorSet](#validatorset) | + +The `SignedHeader` and `ValidatorSet` are linked by the hash of the validator set(`SignedHeader.ValidatorsHash == ValidatorSet.Hash()`. + +## SignedHeader + +The SignedhHeader is the [header](#header) accompanied by the commit to prove it. + +| Name | Type | Description | Validation | +|--------|-------------------|-------------------|-----------------------------------------------------------------------------------| +| Header | [Header](#Header) | [Header](#header) | Header cannot be nil and must adhere to the [Header](#Header) validation criteria | +| Commit | [Commit](#commit) | [Commit](#commit) | Commit cannot be nil and must adhere to the [Commit](#commit) criteria | + +## ValidatorSet + +| Name | Type | Description | Validation | +|------------|----------------------------------|----------------------------------------------------|-------------------------------------------------------------------------------------------------------------------| +| Validators | Array of [validator](#validator) | List of the active validators at a specific height | The list of validators can not be empty or nil and must adhere to the validation rules of [validator](#validator) | +| Proposer | [validator](#validator) | The block proposer for the corresponding block | The proposer cannot be nil and must adhere to the validation rules of [validator](#validator) | + +## Validator + +| Name | Type | Description | Validation | +|------------------|---------------------------|---------------------------------------------------------------------------------------------------|---------------------------------------------------| +| Address | [Address](#address) | Validators Address | Length must be of size 20 | +| Pubkey | slice of bytes (`[]byte`) | Validators Public Key | must be a length greater than 0 | +| VotingPower | int64 | Validators voting power | cannot be < 0 | +| ProposerPriority | int64 | Validators proposer priority. This is used to gauge when a validator is up next to propose blocks | No validation, value can be negative and positive | + +## Address + +Address is a type alias of a slice of bytes. The address is calculated by hashing the public key using sha256 and truncating it to only use the first 20 bytes of the slice. + +```go +const ( + TruncatedSize = 20 +) + +func SumTruncated(bz []byte) []byte { + hash := sha256.Sum256(bz) + return hash[:TruncatedSize] +} +``` + +## ConsensusParams + +| Name | Type | Description | Field Number | +|-----------|-------------------------------------|------------------------------------------------------------------------------|--------------| +| block | [BlockParams](#blockparams) | Parameters limiting the size of a block and time between consecutive blocks. | 1 | +| evidence | [EvidenceParams](#evidenceparams) | Parameters limiting the validity of evidence of byzantine behaviour. | 2 | +| validator | [ValidatorParams](#validatorparams) | Parameters limiting the types of public keys validators can use. | 3 | +| version | [BlockParams](#blockparams) | The ABCI application version. | 4 | + +### BlockParams + +| Name | Type | Description | Field Number | +|--------------|-------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| +| max_bytes | int64 | Max size of a block, in bytes. | 1 | +| max_gas | int64 | Max sum of `GasWanted` in a proposed block. NOTE: blocks that violate this may be committed if there are Byzantine proposers. It's the application's responsibility to handle this when processing a block! | 2 | + +### EvidenceParams + +| Name | Type | Description | Field Number | +|--------------------|------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| +| max_age_num_blocks | int64 | Max age of evidence, in blocks. | 1 | +| max_age_duration | [google.protobuf.Duration](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration) | Max age of evidence, in time. It should correspond with an app's "unbonding period" or other similar mechanism for handling [Nothing-At-Stake attacks](https://github.com/ethereum/wiki/wiki/Proof-of-Stake-FAQ#what-is-the-nothing-at-stake-problem-and-how-can-it-be-fixed). | 2 | +| max_bytes | int64 | maximum size in bytes of total evidence allowed to be entered into a block | 3 | + +### ValidatorParams + +| Name | Type | Description | Field Number | +|---------------|-----------------|-----------------------------------------------------------------------|--------------| +| pub_key_types | repeated string | List of accepted public key types. Uses same naming as `PubKey.Type`. | 1 | + +### VersionParams + +| Name | Type | Description | Field Number | +|-------------|--------|-------------------------------|--------------| +| app_version | uint64 | The ABCI application version. | 1 | + +## Proof + +| Name | Type | Description | Field Number | +|-----------|----------------|-----------------------------------------------|--------------| +| total | int64 | Total number of items. | 1 | +| index | int64 | Index item to prove. | 2 | +| leaf_hash | bytes | Hash of item value. | 3 | +| aunts | repeated bytes | Hashes from leaf's sibling to a root's child. | 4 | diff --git a/spec/core/encoding.md b/spec/core/encoding.md new file mode 100644 index 0000000000..c137575d78 --- /dev/null +++ b/spec/core/encoding.md @@ -0,0 +1,300 @@ +# Encoding + +## Protocol Buffers + +Tendermint uses [Protocol Buffers](https://developers.google.com/protocol-buffers), specifically proto3, for all data structures. + +Please see the [Proto3 language guide](https://developers.google.com/protocol-buffers/docs/proto3) for more details. + +## Byte Arrays + +The encoding of a byte array is simply the raw-bytes prefixed with the length of +the array as a `UVarint` (what proto calls a `Varint`). + +For details on varints, see the [protobuf +spec](https://developers.google.com/protocol-buffers/docs/encoding#varints). + +For example, the byte-array `[0xA, 0xB]` would be encoded as `0x020A0B`, +while a byte-array containing 300 entires beginning with `[0xA, 0xB, ...]` would +be encoded as `0xAC020A0B...` where `0xAC02` is the UVarint encoding of 300. + +## Hashing + +Tendermint uses `SHA256` as its hash function. +Objects are always serialized before being hashed. +So `SHA256(obj)` is short for `SHA256(ProtoEncoding(obj))`. + +## Public Key Cryptography + +Tendermint uses Protobuf [Oneof](https://developers.google.com/protocol-buffers/docs/proto3#oneof) +to distinguish between different types public keys, and signatures. +Additionally, for each public key, Tendermint +defines an Address function that can be used as a more compact identifier in +place of the public key. Here we list the concrete types, their names, +and prefix bytes for public keys and signatures, as well as the address schemes +for each PubKey. Note for brevity we don't +include details of the private keys beyond their type and name. + +### Key Types + +Each type specifies it's own pubkey, address, and signature format. + +#### Ed25519 + +The address is the first 20-bytes of the SHA256 hash of the raw 32-byte public key: + +```go +address = SHA256(pubkey)[:20] +``` + +The signature is the raw 64-byte ED25519 signature. + +Tendermint adopted [zip215](https://zips.z.cash/zip-0215) for verification of ed25519 signatures. + +> Note: This change will be released in the next major release of Tendermint-Go (0.35). + +#### Secp256k1 + +The address is the first 20-bytes of the SHA256 hash of the raw 32-byte public key: + +```go +address = SHA256(pubkey)[:20] +``` + +## Other Common Types + +### BitArray + +The BitArray is used in some consensus messages to represent votes received from +validators, or parts received in a block. It is represented +with a struct containing the number of bits (`Bits`) and the bit-array itself +encoded in base64 (`Elems`). + +| Name | Type | +|-------|----------------------------| +| bits | int64 | +| elems | slice of int64 (`[]int64`) | + +Note BitArray receives a special JSON encoding in the form of `x` and `_` +representing `1` and `0`. Ie. the BitArray `10110` would be JSON encoded as +`"x_xx_"` + +### Part + +Part is used to break up blocks into pieces that can be gossiped in parallel +and securely verified using a Merkle tree of the parts. + +Part contains the index of the part (`Index`), the actual +underlying data of the part (`Bytes`), and a Merkle proof that the part is contained in +the set (`Proof`). + +| Name | Type | +|-------|---------------------------| +| index | uint32 | +| bytes | slice of bytes (`[]byte`) | +| proof | [proof](#merkle-proof) | + +See details of SimpleProof, below. + +### MakeParts + +Encode an object using Protobuf and slice it into parts. +Tendermint uses a part size of 65536 bytes, and allows a maximum of 1601 parts +(see `types.MaxBlockPartsCount`). This corresponds to the hard-coded block size +limit of 100MB. + +```go +func MakeParts(block Block) []Part +``` + +## Merkle Trees + +For an overview of Merkle trees, see +[wikipedia](https://en.wikipedia.org/wiki/Merkle_tree) + +We use the RFC 6962 specification of a merkle tree, with sha256 as the hash function. +Merkle trees are used throughout Tendermint to compute a cryptographic digest of a data structure. +The differences between RFC 6962 and the simplest form a merkle tree are that: + +1. leaf nodes and inner nodes have different hashes. + This is for "second pre-image resistance", to prevent the proof to an inner node being valid as the proof of a leaf. + The leaf nodes are `SHA256(0x00 || leaf_data)`, and inner nodes are `SHA256(0x01 || left_hash || right_hash)`. + +2. When the number of items isn't a power of two, the left half of the tree is as big as it could be. + (The largest power of two less than the number of items) This allows new leaves to be added with less + recomputation. For example: + +```md + Simple Tree with 6 items Simple Tree with 7 items + + * * + / \ / \ + / \ / \ + / \ / \ + / \ / \ + * * * * + / \ / \ / \ / \ + / \ / \ / \ / \ + / \ / \ / \ / \ + * * h4 h5 * * * h6 + / \ / \ / \ / \ / \ +h0 h1 h2 h3 h0 h1 h2 h3 h4 h5 +``` + +### MerkleRoot + +The function `MerkleRoot` is a simple recursive function defined as follows: + +```go +// SHA256([]byte{}) +func emptyHash() []byte { + return tmhash.Sum([]byte{}) +} + +// SHA256(0x00 || leaf) +func leafHash(leaf []byte) []byte { + return tmhash.Sum(append(0x00, leaf...)) +} + +// SHA256(0x01 || left || right) +func innerHash(left []byte, right []byte) []byte { + return tmhash.Sum(append(0x01, append(left, right...)...)) +} + +// largest power of 2 less than k +func getSplitPoint(k int) { ... } + +func MerkleRoot(items [][]byte) []byte{ + switch len(items) { + case 0: + return empthHash() + case 1: + return leafHash(items[0]) + default: + k := getSplitPoint(len(items)) + left := MerkleRoot(items[:k]) + right := MerkleRoot(items[k:]) + return innerHash(left, right) + } +} +``` + +Note: `MerkleRoot` operates on items which are arbitrary byte arrays, not +necessarily hashes. For items which need to be hashed first, we introduce the +`Hashes` function: + +```go +func Hashes(items [][]byte) [][]byte { + return SHA256 of each item +} +``` + +Note: we will abuse notion and invoke `MerkleRoot` with arguments of type `struct` or type `[]struct`. +For `struct` arguments, we compute a `[][]byte` containing the protobuf encoding of each +field in the struct, in the same order the fields appear in the struct. +For `[]struct` arguments, we compute a `[][]byte` by protobuf encoding the individual `struct` elements. + +### Merkle Proof + +Proof that a leaf is in a Merkle tree is composed as follows: + +| Name | Type | +|----------|----------------------------| +| total | int64 | +| index | int64 | +| leafHash | slice of bytes (`[]byte`) | +| aunts | Matrix of bytes ([][]byte) | + +Which is verified as follows: + +```golang +func (proof Proof) Verify(rootHash []byte, leaf []byte) bool { + assert(proof.LeafHash, leafHash(leaf) + + computedHash := computeHashFromAunts(proof.Index, proof.Total, proof.LeafHash, proof.Aunts) + return computedHash == rootHash +} + +func computeHashFromAunts(index, total int, leafHash []byte, innerHashes [][]byte) []byte{ + assert(index < total && index >= 0 && total > 0) + + if total == 1{ + assert(len(proof.Aunts) == 0) + return leafHash + } + + assert(len(innerHashes) > 0) + + numLeft := getSplitPoint(total) // largest power of 2 less than total + if index < numLeft { + leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + assert(leftHash != nil) + return innerHash(leftHash, innerHashes[len(innerHashes)-1]) + } + rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + assert(rightHash != nil) + return innerHash(innerHashes[len(innerHashes)-1], rightHash) +} +``` + +The number of aunts is limited to 100 (`MaxAunts`) to protect the node against DOS attacks. +This limits the tree size to 2^100 leaves, which should be sufficient for any +conceivable purpose. + +### IAVL+ Tree + +Because Tendermint only uses a Simple Merkle Tree, application developers are expect to use their own Merkle tree in their applications. For example, the IAVL+ Tree - an immutable self-balancing binary tree for persisting application state is used by the [Cosmos SDK](https://github.com/cosmos/cosmos-sdk/blob/ae77f0080a724b159233bd9b289b2e91c0de21b5/docs/interfaces/lite/specification.md) + +## JSON + +Tendermint has its own JSON encoding in order to keep backwards compatibility with the previous RPC layer. + +Registered types are encoded as: + +```json +{ + "type": "", + "value": +} +``` + +For instance, an ED25519 PubKey would look like: + +```json +{ + "type": "tendermint/PubKeyEd25519", + "value": "uZ4h63OFWuQ36ZZ4Bd6NF+/w9fWUwrOncrQsackrsTk=" +} +``` + +Where the `"value"` is the base64 encoding of the raw pubkey bytes, and the +`"type"` is the type name for Ed25519 pubkeys. + +### Signed Messages + +Signed messages (eg. votes, proposals) in the consensus are encoded using protobuf. + +When signing, the elements of a message are re-ordered so the fixed-length fields +are first, making it easy to quickly check the type, height, and round. +The `ChainID` is also appended to the end. +We call this encoding the SignBytes. For instance, SignBytes for a vote is the protobuf encoding of the following struct: + +```protobuf +message CanonicalVote { + SignedMsgType type = 1; + sfixed64 height = 2; // canonicalization requires fixed size encoding here + sfixed64 round = 3; // canonicalization requires fixed size encoding here + CanonicalBlockID block_id = 4; + google.protobuf.Timestamp timestamp = 5; + string chain_id = 6; +} +``` + +The field ordering and the fixed sized encoding for the first three fields is optimized to ease parsing of SignBytes +in HSMs. It creates fixed offsets for relevant fields that need to be read in this context. + +> Note: All canonical messages are length prefixed. + +For more details, see the [signing spec](../consensus/signing.md). +Also, see the motivating discussion in +[#1622](https://github.com/tendermint/tendermint/issues/1622). diff --git a/spec/core/genesis.md b/spec/core/genesis.md new file mode 100644 index 0000000000..1dc019e77e --- /dev/null +++ b/spec/core/genesis.md @@ -0,0 +1,34 @@ +# Genesis + +The genesis file is the starting point of a chain. An application will populate the `app_state` field in the genesis with their required fields. Tendermint is not able to validate this section because it is unaware what application state consists of. + +## Genesis Fields + +- `genesis_time`: The genesis time is the time the blockchain started or will start. If nodes are started before this time they will sit idle until the time specified. +- `chain_id`: The chainid is the chain identifier. Every chain should have a unique identifier. When conducting a fork based upgrade, we recommend changing the chainid to avoid network or consensus errors. +- `initial_height`: This field is the starting height of the blockchain. When conducting a chain restart to avoid restarting at height 1, the network is able to start at a specified height. +- `consensus_params` + - `block` + - `max_bytes`: The max amount of bytes a block can be. + - `max_gas`: The maximum amount of gas that a block can have. + - `time_iota_ms`: This parameter has no value anymore in Tendermint-core. + +- `evidence` + - `max_age_num_blocks`: After this preset amount of blocks has passed a single piece of evidence is considered invalid + - `max_age_duration`: After this preset amount of time has passed a single piece of evidence is considered invalid. + - `max_bytes`: The max amount of bytes of all evidence included in a block. + +> Note: For evidence to be considered invalid, evidence must be older than both `max_age_num_blocks` and `max_age_duration` + +- `validator` + - `pub_key_types`: Defines which curves are to be accepted as a valid validator consensus key. Tendermint supports ed25519, sr25519 and secp256k1. + +- `version` + - `app_version`: The version of the application. This is set by the application and is used to identify which version of the app a user should be using in order to operate a node. + +- `validators` + - This is an array of validators. This validator set is used as the starting validator set of the chain. This field can be empty, if the application sets the validator set in `InitChain`. + +- `app_hash`: The applications state root hash. This field does not need to be populated at the start of the chain, the application may provide the needed information via `Initchain`. + +- `app_state`: This section is filled in by the application and is unknown to Tendermint. diff --git a/spec/core/readme.md b/spec/core/readme.md new file mode 100644 index 0000000000..46f95f1b76 --- /dev/null +++ b/spec/core/readme.md @@ -0,0 +1,13 @@ +--- +order: 1 +parent: + title: Core + order: 3 +--- + +This section describes the core types and functionality of the Tendermint protocol implementation. + +- [Core Data Structures](./data_structures.md) +- [Encoding](./encoding.md) +- [Genesis](./genesis.md) +- [State](./state.md) diff --git a/spec/core/state.md b/spec/core/state.md new file mode 100644 index 0000000000..5138c09506 --- /dev/null +++ b/spec/core/state.md @@ -0,0 +1,121 @@ +# State + +The state contains information whose cryptographic digest is included in block headers, and thus is +necessary for validating new blocks. For instance, the validators set and the results of +transactions are never included in blocks, but their Merkle roots are: +the state keeps track of them. + +The `State` object itself is an implementation detail, since it is never +included in a block or gossiped over the network, and we never compute +its hash. The persistence or query interface of the `State` object +is an implementation detail and not included in the specification. +However, the types in the `State` object are part of the specification, since +the Merkle roots of the `State` objects are included in blocks and values are used during +validation. + +```go +type State struct { + ChainID string + InitialHeight int64 + + LastBlockHeight int64 + LastBlockID types.BlockID + LastBlockTime time.Time + + Version Version + LastResults []Result + AppHash []byte + + LastValidators ValidatorSet + Validators ValidatorSet + NextValidators ValidatorSet + + ConsensusParams ConsensusParams +} +``` + +The chain ID and initial height are taken from the genesis file, and not changed again. The +initial height will be `1` in the typical case, `0` is an invalid value. + +Note there is a hard-coded limit of 10000 validators. This is inherited from the +limit on the number of votes in a commit. + +Further information on [`Validator`'s](./data_structures.md#validator), +[`ValidatorSet`'s](./data_structures.md#validatorset) and +[`ConsensusParams`'s](./data_structures.md#consensusparams) can +be found in [data structures](./data_structures.md) + +## Execution + +State gets updated at the end of executing a block. Of specific interest is `ResponseEndBlock` and +`ResponseCommit` + +```go +type ResponseEndBlock struct { + ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` + ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` + Events []Event `protobuf:"bytes,3,rep,name=events,proto3" json:"events,omitempty"` +} +``` + +where + +```go +type ValidatorUpdate struct { + PubKey crypto.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key"` + Power int64 `protobuf:"varint,2,opt,name=power,proto3" json:"power,omitempty"` +} +``` + +and + +```go +type ResponseCommit struct { + // reserve 1 + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + RetainHeight int64 `protobuf:"varint,3,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` +} +``` + +`ValidatorUpdates` are used to add and remove validators to the current set as well as update +validator power. Setting validator power to 0 in `ValidatorUpdate` will cause the validator to be +removed. `ConsensusParams` are safely copied across (i.e. if a field is nil it gets ignored) and the +`Data` from the `ResponseCommit` is used as the `AppHash` + +## Version + +```go +type Version struct { + consensus Consensus + software string +} +``` + +[`Consensus`](./data_structures.md#version) contains the protocol version for the blockchain and the +application. + +## Block + +The total size of a block is limited in bytes by the `ConsensusParams.Block.MaxBytes`. +Proposed blocks must be less than this size, and will be considered invalid +otherwise. + +Blocks should additionally be limited by the amount of "gas" consumed by the +transactions in the block, though this is not yet implemented. + +## Evidence + +For evidence in a block to be valid, it must satisfy: + +```go +block.Header.Time-evidence.Time < ConsensusParams.Evidence.MaxAgeDuration && + block.Header.Height-evidence.Height < ConsensusParams.Evidence.MaxAgeNumBlocks +``` + +A block must not contain more than `ConsensusParams.Evidence.MaxBytes` of evidence. This is +implemented to mitigate spam attacks. + +## Validator + +Validators from genesis file and `ResponseEndBlock` must have pubkeys of type ∈ +`ConsensusParams.Validator.PubKeyTypes`. diff --git a/spec/ivy-proofs/Dockerfile b/spec/ivy-proofs/Dockerfile new file mode 100644 index 0000000000..be60151fd2 --- /dev/null +++ b/spec/ivy-proofs/Dockerfile @@ -0,0 +1,37 @@ +# we need python2 support, which was dropped after buster: +FROM debian:buster + +RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections +RUN apt-get update +RUN apt-get install -y apt-utils + +# Install and configure locale `en_US.UTF-8` +RUN apt-get install -y locales && \ + sed -i -e "s/# $en_US.*/en_US.UTF-8 UTF-8/" /etc/locale.gen && \ + dpkg-reconfigure --frontend=noninteractive locales && \ + update-locale LANG=en_US.UTF-8 +ENV LANG=en_US.UTF-8 + +RUN apt-get update +RUN apt-get install -y git python2 python-pip g++ cmake python-ply python-tk tix pkg-config libssl-dev python-setuptools + +# create a user: +RUN useradd -ms /bin/bash user +USER user +WORKDIR /home/user + +RUN git clone --recurse-submodules https://github.com/kenmcmil/ivy.git +WORKDIR /home/user/ivy/ +RUN git checkout 271ee38980699115508eb90a0dd01deeb750a94b + +RUN python2.7 build_submodules.py +RUN mkdir -p "/home/user/python/lib/python2.7/site-packages" +ENV PYTHONPATH="/home/user/python/lib/python2.7/site-packages" +# need to install pyparsing manually because otherwise wrong version found +RUN pip install pyparsing +RUN python2.7 setup.py install --prefix="/home/user/python/" +ENV PATH=$PATH:"/home/user/python/bin/" +WORKDIR /home/user/tendermint-proof/ + +ENTRYPOINT ["/home/user/tendermint-proof/check_proofs.sh"] + diff --git a/spec/ivy-proofs/README.md b/spec/ivy-proofs/README.md new file mode 100644 index 0000000000..00a4bed259 --- /dev/null +++ b/spec/ivy-proofs/README.md @@ -0,0 +1,33 @@ +# Ivy Proofs + +```copyright +Copyright (c) 2020 Galois, Inc. +SPDX-License-Identifier: Apache-2.0 +``` + +## Contents + +This folder contains: + +* `tendermint.ivy`, a specification of Tendermint algorithm as described in *The latest gossip on BFT consensus* by E. Buchman, J. Kwon, Z. Milosevic. +* `abstract_tendermint.ivy`, a more abstract specification of Tendermint that is more verification-friendly. +* `classic_safety.ivy`, a proof that Tendermint satisfies the classic safety property of BFT consensus: if every two quorums have a well-behaved node in common, then no two well-behaved nodes ever disagree. +* `accountable_safety_1.ivy`, a proof that, assuming every quorum contains at least one well-behaved node, if two well-behaved nodes disagree, then there is evidence demonstrating at least f+1 nodes misbehaved. +* `accountable_safety_2.ivy`, a proof that, regardless of any assumption about quorums, well-behaved nodes cannot be framed by malicious nodes. In other words, malicious nodes can never construct evidence that incriminates a well-behaved node. +* `network_shim.ivy`, the network model and a convenience `shim` object to interface with the Tendermint specification. +* `domain_model.ivy`, a specification of the domain model underlying the Tendermint specification, i.e. rounds, value, quorums, etc. + +All specifications and proofs are written in [Ivy](https://github.com/kenmcmil/ivy). + +The license above applies to all files in this folder. + + +## Building and running + +The easiest way to check the proofs is to use [Docker](https://www.docker.com/). + +1. Install [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/install/). +2. Build a Docker image: `docker-compose build` +3. Run the proofs inside the Docker container: `docker-compose run +tendermint-proof`. This will check all the proofs with the `ivy_check` +command and write the output of `ivy_check` to a subdirectory of `./output/' diff --git a/spec/ivy-proofs/abstract_tendermint.ivy b/spec/ivy-proofs/abstract_tendermint.ivy new file mode 100644 index 0000000000..4a160be2a7 --- /dev/null +++ b/spec/ivy-proofs/abstract_tendermint.ivy @@ -0,0 +1,178 @@ +#lang ivy1.7 +# --- +# layout: page +# title: Abstract specification of Tendermint in Ivy +# --- + +# Here we define an abstract version of the Tendermint specification. We use +# two main forms of abstraction: a) We abstract over how information is +# transmitted (there is no network). b) We abstract functions using relations. +# For example, we abstract over a node's current round, instead only tracking +# with a relation which rounds the node has left. We do something similar for +# the `lockedRound` variable. This is in order to avoid using a function from +# node to round, and it allows us to emit verification conditions that are +# efficiently solvable by Z3. + +# This specification also defines the observations that are used to adjudicate +# misbehavior. Well-behaved nodes faithfully observe every message that they +# use to take a step, while Byzantine nodes can fake observations about +# themselves (including withholding observations). Misbehavior is defined using +# the collection of all observations made (in reality, those observations must +# be collected first, but we do not model this process). + +include domain_model + +module abstract_tendermint = { + +# Protocol state +# ############## + + relation left_round(N:node, R:round) + relation prevoted(N:node, R:round, V:value) + relation precommitted(N:node, R:round, V:value) + relation decided(N:node, R:round, V:value) + relation locked(N:node, R:round, V:value) + +# Accountability relations +# ######################## + + relation observed_prevoted(N:node, R:round, V:value) + relation observed_precommitted(N:node, R:round, V:value) + +# relations that are defined in terms of the previous two: + relation observed_equivocation(N:node) + relation observed_unlawful_prevote(N:node) + relation agreement + relation accountability_violation + + object defs = { # we hide those definitions and use them only when needed + private { + definition [observed_equivocation_def] observed_equivocation(N) = exists V1,V2,R . + V1 ~= V2 & (observed_precommitted(N,R,V1) & observed_precommitted(N,R,V2) | observed_prevoted(N,R,V1) & observed_prevoted(N,R,V2)) + + definition [observed_unlawful_prevote_def] observed_unlawful_prevote(N) = exists V1,V2,R1,R2 . + V1 ~= value.nil & V2 ~= value.nil & V1 ~= V2 & R1 < R2 & observed_precommitted(N,R1,V1) & observed_prevoted(N,R2,V2) + & forall Q,R . R1 <= R & R < R2 & nset.is_quorum(Q) -> exists N2 . nset.member(N2,Q) & ~observed_prevoted(N2,R,V2) + + definition [agreement_def] agreement = forall N1,N2,R1,R2,V1,V2 . well_behaved(N1) & well_behaved(N2) & decided(N1,R1,V1) & decided(N2,R2,V2) -> V1 = V2 + + definition [accountability_violation_def] accountability_violation = exists Q1,Q2 . nset.is_quorum(Q1) & nset.is_quorum(Q2) & (forall N . nset.member(N,Q1) & nset.member(N,Q2) -> observed_equivocation(N) | observed_unlawful_prevote(N)) + } + } + +# Protocol transitions +# #################### + + after init { + left_round(N,R) := R < 0; + prevoted(N,R,V) := false; + precommitted(N,R,V) := false; + decided(N,R,V) := false; + locked(N,R,V) := false; + + observed_prevoted(N,R,V) := false; + observed_precommitted(N,R,V) := false; + } + +# Actions are named after the corresponding line numbers in the Tendermint +# arXiv paper. + + action l_11(n:node, r:round) = { # start round r + require ~left_round(n,r); + left_round(n,R) := R < r; + } + + action l_22(n:node, rp:round, v:value) = { + require ~left_round(n,rp); + require ~prevoted(n,rp,V) & ~precommitted(n,rp,V); + require (forall R,V . locked(n,R,V) -> V = v) | v = value.nil; + prevoted(n, rp, v) := true; + left_round(n, R) := R < rp; # leave all lower rounds. + + observed_prevoted(n, rp, v) := observed_prevoted(n, rp, v) | well_behaved(n); # the node observes itself + } + + action l_28(n:node, rp:round, v:value, vr:round, q:nset) = { + require ~left_round(n,rp) & ~prevoted(n,rp,V); + require ~prevoted(n,rp,V) & ~precommitted(n,rp,V); + require vr < rp; + require nset.is_quorum(q) & (forall N . nset.member(N,q) -> (prevoted(N,vr,v) | ~well_behaved(N))); + var proposal:value; + if value.valid(v) & ((forall R0,V0 . locked(n,R0,V0) -> R0 <= vr) | (forall R,V . locked(n,R,V) -> V = v)) { + proposal := v; + } + else { + proposal := value.nil; + }; + prevoted(n, rp, proposal) := true; + left_round(n, R) := R < rp; # leave all lower rounds + + observed_prevoted(N, vr, v) := observed_prevoted(N, vr, v) | (well_behaved(n) & nset.member(N,q)); # the node observes the prevotes of quorum q + observed_prevoted(n, rp, proposal) := observed_prevoted(n, rp, proposal) | well_behaved(n); # the node observes itself + } + + action l_36(n:node, rp:round, v:value, q:nset) = { + require v ~= value.nil; + require ~left_round(n,rp); + require exists V . prevoted(n,rp,V); + require ~precommitted(n,rp,V); + require nset.is_quorum(q) & (forall N . nset.member(N,q) -> (prevoted(N,rp,v) | ~well_behaved(N))); + precommitted(n, rp, v) := true; + left_round(n, R) := R < rp; # leave all lower rounds + locked(n,R,V) := R <= rp & V = v; + + observed_prevoted(N, rp, v) := observed_prevoted(N, rp, v) | (well_behaved(n) & nset.member(N,q)); # the node observes the prevotes of quorum q + observed_precommitted(n, rp, v) := observed_precommitted(n, rp, v) | well_behaved(n); # the node observes itself + } + + action l_44(n:node, rp:round, q:nset) = { + require ~left_round(n,rp); + require ~precommitted(n,rp,V); + require nset.is_quorum(q) & (forall N .nset.member(N,q) -> (prevoted(N,rp,value.nil) | ~well_behaved(N))); + precommitted(n, rp, value.nil) := true; + left_round(n, R) := R < rp; # leave all lower rounds + + observed_prevoted(N, rp, value.nil) := observed_prevoted(N, rp, value.nil) | (well_behaved(n) & nset.member(N,q)); # the node observes the prevotes of quorum q + observed_precommitted(n, rp, value.nil) := observed_precommitted(n, rp, value.nil) | well_behaved(n); # the node observes itself + } + + action l_57(n:node, rp:round) = { + require ~left_round(n,rp); + require ~prevoted(n,rp,V); + prevoted(n, rp, value.nil) := true; + left_round(n, R) := R < rp; # leave all lower rounds + + observed_prevoted(n, rp, value.nil) := observed_prevoted(n, rp, value.nil) | well_behaved(n); # the node observes itself + } + + action l_61(n:node, rp:round) = { + require ~left_round(n,rp); + require ~precommitted(n,rp,V); + precommitted(n, rp, value.nil) := true; + left_round(n, R) := R < rp; # leave all lower rounds + + observed_precommitted(n, rp, value.nil) := observed_precommitted(n, rp, value.nil) | well_behaved(n); # the node observes itself + } + + action decide(n:node, r:round, v:value, q:nset) = { + require v ~= value.nil; + require nset.is_quorum(q) & (forall N . nset.member(N, q) -> (precommitted(N, r, v) | ~well_behaved(N))); + decided(n, r, v) := true; + + observed_precommitted(N, r, v) := observed_precommitted(N, r, v) | (well_behaved(n) & nset.member(N,q)); # the node observes the precommits of quorum q + + } + + action misbehave = { +# Byzantine nodes can claim they observed whatever they want about themselves, +# but they cannot remove observations. Note that we use assume because we don't +# want those to be checked; we just want them to be true (that's the model of +# Byzantine behavior). + observed_prevoted(N,R,V) := *; + assume (old observed_prevoted(N,R,V)) -> observed_prevoted(N,R,V); + assume well_behaved(N) -> old observed_prevoted(N,R,V) = observed_prevoted(N,R,V); + observed_precommitted(N,R,V) := *; + assume (old observed_precommitted(N,R,V)) -> observed_precommitted(N,R,V); + assume well_behaved(N) -> old observed_precommitted(N,R,V) = observed_precommitted(N,R,V); + } +} diff --git a/spec/ivy-proofs/accountable_safety_1.ivy b/spec/ivy-proofs/accountable_safety_1.ivy new file mode 100644 index 0000000000..02bdf1add8 --- /dev/null +++ b/spec/ivy-proofs/accountable_safety_1.ivy @@ -0,0 +1,143 @@ +#lang ivy1.7 +# --- +# layout: page +# title: Proof of Classic Safety +# --- + +include tendermint +include abstract_tendermint + +# Here we prove the first accountability property: if two well-behaved nodes +# disagree, then there are two quorums Q1 and Q2 such that all members of the +# intersection of Q1 and Q2 have violated the accountability properties. + +# The proof is done in two steps: first we prove the abstract specification +# satisfies the property, and then we show by refinement that this property +# also holds in the concrete specification. + +# To see what is checked in the refinement proof, use `ivy_show isolate=accountable_safety_1 accountable_safety_1.ivy` +# To see what is checked in the abstract correctness proof, use `ivy_show isolate=abstract_accountable_safety_1 accountable_safety_1.ivy` +# To check the whole proof, use `ivy_check accountable_safety_1.ivy`. + + +# Proof of the accountability property in the abstract specification +# ================================================================== + +# We prove with tactics (see `lemma_1` and `lemma_2`) that, if some basic +# invariants hold (see `invs` below), then the accountability property holds. + +isolate abstract_accountable_safety = { + + instantiate abstract_tendermint + +# The main property +# ----------------- + +# If there is disagreement, then there is evidence that a third of the nodes +# have violated the protocol: + invariant [accountability] agreement | accountability_violation + proof { + apply lemma_1.thm # this reduces to goal to three subgoals: p1, p2, and p3 (see their definition below) + proof [p1] { + assume invs.inv1 + } + proof [p2] { + assume invs.inv2 + } + proof [p3] { + assume invs.inv3 + } + } + +# The invariants +# -------------- + + isolate invs = { + + # well-behaved nodes observe their own actions faithfully: + invariant [inv1] well_behaved(N) -> (observed_precommitted(N,R,V) = precommitted(N,R,V)) + # if a value is precommitted by a well-behaved node, then a quorum is observed to prevote it: + invariant [inv2] (exists N . well_behaved(N) & precommitted(N,R,V)) & V ~= value.nil -> exists Q . nset.is_quorum(Q) & forall N2 . nset.member(N2,Q) -> observed_prevoted(N2,R,V) + # if a value is decided by a well-behaved node, then a quorum is observed to precommit it: + invariant [inv3] (exists N . well_behaved(N) & decided(N,R,V)) -> 0 <= R & V ~= value.nil & exists Q . nset.is_quorum(Q) & forall N2 . nset.member(N2,Q) -> observed_precommitted(N2,R,V) + private { + invariant (precommitted(N,R,V) | prevoted(N,R,V)) -> 0 <= R + invariant R < 0 -> left_round(N,R) + } + + } with this, nset, round, accountable_bft.max_2f_byzantine + +# The theorems proved with tactics +# -------------------------------- + +# Using complete induction on rounds, we prove that, assuming that the +# invariants inv1, inv2, and inv3 hold, the accountability property holds. + +# For technical reasons, we separate the proof in two steps + isolate lemma_1 = { + + specification { + theorem [thm] { + property [p1] forall N,R,V . well_behaved(N) -> (observed_precommitted(N,R,V) = precommitted(N,R,V)) + property [p2] forall R,V . (exists N . well_behaved(N) & precommitted(N,R,V)) & V ~= value.nil -> exists Q . nset.is_quorum(Q) & forall N2 . nset.member(N2,Q) -> observed_prevoted(N2,R,V) + property [p3] forall R,V. (exists N . well_behaved(N) & decided(N,R,V)) -> 0 <= R & V ~= value.nil & exists Q . nset.is_quorum(Q) & forall N2 . nset.member(N2,Q) -> observed_precommitted(N2,R,V) + #------------------------------------------------------------------------------------------------------------------------------------------- + property agreement | accountability_violation + } + proof { + assume inductive_property # the theorem follows from what we prove by induction below + } + } + + implementation { + # complete induction is not built-in, so we introduce it with an axiom. Note that this only holds for a type where 0 is the smallest element + axiom [complete_induction] { + relation p(X:round) + { # base case + property p(0) + } + { # inductive step: show that if the property is true for all X lower or equal to x and y=x+1, then the property is true of y + individual a:round + individual b:round + property (forall X. 0 <= X & X <= a -> p(X)) & round.succ(a,b) -> p(b) + } + #-------------------------- + property forall X . 0 <= X -> p(X) + } + + # The main lemma: if inv1 and inv2 below hold and a quorum is observed to + # precommit V1 at R1 and another quorum is observed to precommit V2~=V1 at + # R2>=R1, then the intersection of two quorums (i.e. f+1 nodes) is observed to + # violate the protocol. We prove this by complete induction on R2. + theorem [inductive_property] { + property [p1] forall N,R,V . well_behaved(N) -> (observed_precommitted(N,R,V) = precommitted(N,R,V)) + property [p2] forall R,V . (exists N . well_behaved(N) & precommitted(N,R,V)) -> V = value.nil | exists Q . nset.is_quorum(Q) & forall N2 . nset.member(N2,Q) -> observed_prevoted(N2,R,V) + #----------------------------------------------------------------------------------------------------------------------- + property forall R2. 0 <= R2 -> ((exists V2,Q1,R1,V1,Q1 . V1 ~= value.nil & V2 ~= value.nil & V1 ~= V2 & 0 <= R1 & R1 <= R2 & nset.is_quorum(Q1) & (forall N . nset.member(N,Q1) -> observed_precommitted(N,R1,V1)) & (exists Q2 . nset.is_quorum(Q2) & forall N . nset.member(N,Q2) -> observed_prevoted(N,R2,V2))) -> accountability_violation) + } + proof { + apply complete_induction # the two subgoals (base case and inductive case) are then discharged automatically + # NOTE: this can take a long time depending on the SMT random seed (to try a different seed, use `ivy_check seed=$RANDOM` + } + } + } with this, round, nset, accountable_bft.max_2f_byzantine, defs.observed_equivocation_def, defs.observed_unlawful_prevote_def, defs.accountability_violation_def, defs.agreement_def + +} with round + +# The final proof +# =============== + +isolate accountable_safety_1 = { + +# First we instantiate the concrete protocol: + instantiate tendermint(abstract_accountable_safety) + +# We then define what we mean by agreement + relation agreement + definition [agreement_def] agreement = forall N1,N2. well_behaved(N1) & well_behaved(N2) & server.decision(N1) ~= value.nil & server.decision(N2) ~= value.nil -> server.decision(N1) = server.decision(N2) + + invariant abstract_accountable_safety.agreement -> agreement + + invariant [accountability] agreement | abstract_accountable_safety.accountability_violation + +} with value, round, proposers, shim, abstract_accountable_safety, abstract_accountable_safety.defs.agreement_def, accountable_safety_1.agreement_def diff --git a/spec/ivy-proofs/accountable_safety_2.ivy b/spec/ivy-proofs/accountable_safety_2.ivy new file mode 100644 index 0000000000..7fb928909a --- /dev/null +++ b/spec/ivy-proofs/accountable_safety_2.ivy @@ -0,0 +1,52 @@ +#lang ivy1.7 + +include tendermint +include abstract_tendermint + +# Here we prove the second accountability property: no well-behaved node is +# ever observed to violate the accountability properties. + +# The proof is done in two steps: first we prove the the abstract specification +# satisfies the property, and then we show by refinement that this property +# also holds in the concrete specification. + +# To see what is checked in the refinement proof, use `ivy_show isolate=accountable_safety_2 accountable_safety_2.ivy` +# To see what is checked in the abstract correctness proof, use `ivy_show isolate=abstract_accountable_safety_2 accountable_safety_2.ivy` +# To check the whole proof, use `ivy_check complete=fo accountable_safety_2.ivy`. + +# Proof that the property holds in the abstract specification +# ============================================================ + +isolate abstract_accountable_safety_2 = { + + instantiate abstract_tendermint + +# the main property: + invariant [wb_never_punished] well_behaved(N) -> ~(observed_equivocation(N) | observed_unlawful_prevote(N)) + +# the main invariant for proving wb_not_punished: + invariant well_behaved(N) & precommitted(N,R,V) & ~locked(N,R,V) & V ~= value.nil -> exists R2,V2 . V2 ~= value.nil & R < R2 & precommitted(N,R2,V2) & locked(N,R2,V2) + + invariant (exists N . well_behaved(N) & precommitted(N,R,V) & V ~= value.nil) -> exists Q . nset.is_quorum(Q) & forall N . nset.member(N,Q) -> observed_prevoted(N,R,V) + + invariant well_behaved(N) -> (observed_prevoted(N,R,V) <-> prevoted(N,R,V)) + invariant well_behaved(N) -> (observed_precommitted(N,R,V) <-> precommitted(N,R,V)) + +# nodes stop prevoting or precommitting in lower rounds when doing so in a higher round: + invariant well_behaved(N) & prevoted(N,R2,V2) & R1 < R2 -> left_round(N,R1) + invariant well_behaved(N) & locked(N,R2,V2) & R1 < R2 -> left_round(N,R1) + + invariant [precommit_unique_per_round] well_behaved(N) & precommitted(N,R,V1) & precommitted(N,R,V2) -> V1 = V2 + +} with nset, round, abstract_accountable_safety_2.defs.observed_equivocation_def, abstract_accountable_safety_2.defs.observed_unlawful_prevote_def + +# Proof that the property holds in the concrete specification +# =========================================================== + +isolate accountable_safety_2 = { + + instantiate tendermint(abstract_accountable_safety_2) + + invariant well_behaved(N) -> ~(abstract_accountable_safety_2.observed_equivocation(N) | abstract_accountable_safety_2.observed_unlawful_prevote(N)) + +} with round, value, shim, abstract_accountable_safety_2, abstract_accountable_safety_2.defs.observed_equivocation_def, abstract_accountable_safety_2.defs.observed_unlawful_prevote_def diff --git a/spec/ivy-proofs/check_proofs.sh b/spec/ivy-proofs/check_proofs.sh new file mode 100755 index 0000000000..6afd1a962d --- /dev/null +++ b/spec/ivy-proofs/check_proofs.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# returns non-zero error code if any proof fails + +success=0 +log_dir=$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 6) +cmd="ivy_check seed=$RANDOM" +mkdir -p output/$log_dir + +echo "Checking classic safety:" +res=$($cmd classic_safety.ivy | tee "output/$log_dir/classic_safety.txt" | tail -n 1) +if [ "$res" = "OK" ]; then + echo "OK" +else + echo "FAILED" + success=1 +fi + +echo "Checking accountable safety 1:" +res=$($cmd accountable_safety_1.ivy | tee "output/$log_dir/accountable_safety_1.txt" | tail -n 1) +if [ "$res" = "OK" ]; then + echo "OK" +else + echo "FAILED" + success=1 +fi + +echo "Checking accountable safety 2:" +res=$($cmd complete=fo accountable_safety_2.ivy | tee "output/$log_dir/accountable_safety_2.txt" | tail -n 1) +if [ "$res" = "OK" ]; then + echo "OK" +else + echo "FAILED" + success=1 +fi + +echo +echo "See ivy_check output in the output/ folder" +exit $success diff --git a/spec/ivy-proofs/classic_safety.ivy b/spec/ivy-proofs/classic_safety.ivy new file mode 100644 index 0000000000..b422a2c175 --- /dev/null +++ b/spec/ivy-proofs/classic_safety.ivy @@ -0,0 +1,85 @@ +#lang ivy1.7 +# --- +# layout: page +# title: Proof of Classic Safety +# --- + +include tendermint +include abstract_tendermint + +# Here we prove the classic safety property: assuming that every two quorums +# have a well-behaved node in common, no two well-behaved nodes ever disagree. + +# The proof is done in two steps: first we prove the the abstract specification +# satisfies the property, and then we show by refinement that this property +# also holds in the concrete specification. + +# To see what is checked in the refinement proof, use `ivy_show isolate=classic_safety classic_safety.ivy` +# To see what is checked in the abstract correctness proof, use `ivy_show isolate=abstract_classic_safety classic_safety.ivy` + +# To check the whole proof, use `ivy_check classic_safety.ivy`. + +# Note that all the verification conditions sent to Z3 for this proof are in +# EPR. + +# Classic safety in the abstract model +# ==================================== + +# We start by proving that classic safety holds in the abstract model. + +isolate abstract_classic_safety = { + + instantiate abstract_tendermint + + invariant [classic_safety] classic_bft.quorum_intersection & decided(N1,R1,V1) & decided(N2,R2,V2) -> V1 = V2 + +# The notion of choosable value +# ----------------------------- + + relation choosable(R:round, V:value) + definition choosable(R,V) = exists Q . nset.is_quorum(Q) & forall N . well_behaved(N) & nset.member(N,Q) -> ~left_round(N,R) | precommitted(N,R,V) + +# Main invariants +# --------------- + +# `classic_safety` is inductive relative to those invariants + + invariant [decision_is_quorum_precommit] (exists N1 . decided(N1,R,V)) -> exists Q. nset.is_quorum(Q) & forall N2. well_behaved(N2) & nset.member(N2, Q) -> precommitted(N2,R,V) + + invariant [precommitted_is_quorum_prevote] V ~= value.nil & (exists N1 . precommitted(N1,R,V)) -> exists Q. nset.is_quorum(Q) & forall N2. well_behaved(N2) & nset.member(N2, Q) -> prevoted(N2,R,V) + + invariant [prevote_unique_per_round] prevoted(N,R,V1) & prevoted(N,R,V2) -> V1 = V2 + +# This is the core invariant: as long as a precommitted value is still choosable, it remains protected by a lock and prevents any new value from being prevoted: + invariant [locks] classic_bft.quorum_intersection & V ~= value.nil & precommitted(N,R,V) & choosable(R,V) -> locked(N,R,V) & forall R2,V2 . R < R2 & prevoted(N,R2,V2) -> V2 = V | V2 = value.nil + +# Supporting invariants +# --------------------- + +# The main invariants are inductive relative to those + + invariant decided(N,R,V) -> V ~= value.nil + + invariant left_round(N,R2) & R1 < R2 -> left_round(N,R1) # if a node left round R2>R1, then it also left R1: + + invariant prevoted(N,R2,V2) & R1 < R2 -> left_round(N,R1) + invariant precommitted(N,R2,V2) & R1 < R2 -> left_round(N,R1) + +} with round, nset, classic_bft.quorum_intersection_def + +# The refinement proof +# ==================== + +# Now, thanks to the refinement relation that we establish in +# `concrete_tendermint.ivy`, we prove that classic safety transfers to the +# concrete specification: +isolate classic_safety = { + + # We instantiate the `tendermint` module providing `abstract_classic_safety` as abstract model. + instantiate tendermint(abstract_classic_safety) + + # We prove that if every two quorums have a well-behaved node in common, + # then well-behaved nodes never disagree: + invariant [classic_safety] classic_bft.quorum_intersection & server.decision(N1) ~= value.nil & server.decision(N2) ~= value.nil -> server.decision(N1) = server.decision(N2) + +} with value, round, proposers, shim, abstract_classic_safety # here we list all the specifications that we rely on for this proof diff --git a/spec/ivy-proofs/count_lines.sh b/spec/ivy-proofs/count_lines.sh new file mode 100755 index 0000000000..b2c457e21a --- /dev/null +++ b/spec/ivy-proofs/count_lines.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +r='^\s*$\|^\s*\#\|^\s*\}\s*$\|^\s*{\s*$' # removes comments and blank lines and lines that contain only { or } +N1=`cat tendermint.ivy domain_model.ivy network_shim.ivy | grep -v $r'\|.*invariant.*' | wc -l` +N2=`cat abstract_tendermint.ivy | grep "observed_" | wc -l` # the observed_* variables specify the observations of the nodes +SPEC_LINES=`expr $N1 + $N2` +echo "spec lines: $SPEC_LINES" +N3=`cat abstract_tendermint.ivy | grep -v $r'\|.*observed_.*' | wc -l` +N4=`cat accountable_safety_1.ivy | grep -v $r | wc -l` +PROOF_LINES=`expr $N3 + $N4` +echo "proof lines: $PROOF_LINES" +RATIO=`bc <<< "scale=2;$PROOF_LINES / $SPEC_LINES"` +echo "proof-to-code ratio for the accountable-safety property: $RATIO" diff --git a/spec/ivy-proofs/docker-compose.yml b/spec/ivy-proofs/docker-compose.yml new file mode 100644 index 0000000000..1d4a8ffe14 --- /dev/null +++ b/spec/ivy-proofs/docker-compose.yml @@ -0,0 +1,8 @@ +version: '3' +services: + tendermint-proof: + build: . + volumes: + - ./:/home/user/tendermint-proof:ro + - ./output:/home/user/tendermint-proof/output:rw + diff --git a/spec/ivy-proofs/domain_model.ivy b/spec/ivy-proofs/domain_model.ivy new file mode 100644 index 0000000000..0f12f7288a --- /dev/null +++ b/spec/ivy-proofs/domain_model.ivy @@ -0,0 +1,143 @@ +#lang ivy1.7 + +include order # this is a file from the standard library (`ivy/ivy/include/1.7/order.ivy`) + +isolate round = { + type this + individual minus_one:this + relation succ(R1:round, R2:round) + action incr(i:this) returns (j:this) + specification { +# to simplify verification, we treat rounds as an abstract totally ordered set with a successor relation. + instantiate totally_ordered(this) + property minus_one < 0 + property succ(X,Z) -> (X < Z & ~(X < Y & Y < Z)) + after incr { + ensure succ(i,j) + } + } + implementation { +# here we prove that the abstraction is sound. + interpret this -> int # rounds are integers in the Tendermint specification. + definition minus_one = 0-1 + definition succ(R1,R2) = R2 = R1 + 1 + implement incr { + j := i+1; + } + } +} + +instance node : iterable # nodes are a set with an order, that can be iterated over (see order.ivy in the standard library) + +relation well_behaved(N:node) # whether a node is well-behaved or not. NOTE: Used only in the proof and the Byzantine model; Nodes do know know who is well-behaved and who is not. + +isolate proposers = { + # each round has a unique proposer in Tendermint. In order to avoid a + # function from round to node (which makes verification more difficult), we + # abstract over this function using a relation. + relation is_proposer(N:node, R:round) + export action get_proposer(r:round) returns (n:node) + specification { + property is_proposer(N1,R) & is_proposer(N2,R) -> N1 = N2 + after get_proposer { + ensure is_proposer(n,r); + } + } + implementation { + function f(R:round):node + definition f(r:round) = <<>> + definition is_proposer(N,R) = N = f(R) + implement get_proposer { + n := f(r); + } + } +} + +isolate value = { # the type of values + type this + relation valid(V:value) + individual nil:value + specification { + property ~valid(nil) + } + implementation { + interpret value -> bv[2] + definition nil = <<< -1 >>> # let's say nil is -1 + definition valid(V) = V ~= nil + } +} + +object nset = { # the type of node sets + type this # a set of N=3f+i nodes for 0 + #include + namespace hash_space { + template + class hash > { + public: + size_t operator()(const std::set &s) const { + hash h; + size_t res = 0; + for (const T &e : s) + res += h(e); + return res; + } + }; + } + >>> + interpret nset -> <<< std::set<`node`> >>> + definition member(n:node, s:nset) = <<< `s`.find(`n`) != `s`.end() >>> + definition is_quorum(s:nset) = <<< 3*`s`.size() > 2*`node.size` >>> + definition is_blocking(s:nset) = <<< 3*`s`.size() > `node.size` >>> + implement empty { + <<< + >>> + } + implement insert { + <<< + `t` = `s`; + `t`.insert(`n`); + >>> + } + <<< encode `nset` + + std::ostream &operator <<(std::ostream &s, const `nset` &a) { + s << "{"; + for (auto iter = a.begin(); iter != a.end(); iter++) { + if (iter != a.begin()) s << ", "; + s << *iter; + } + s << "}"; + return s; + } + + template <> + `nset` _arg<`nset`>(std::vector &args, unsigned idx, long long bound) { + throw std::invalid_argument("Not implemented"); // no syntax for nset values in the REPL + } + + >>> + } +} + +object classic_bft = { + relation quorum_intersection + private { + definition [quorum_intersection_def] quorum_intersection = forall Q1,Q2. exists N. well_behaved(N) & nset.member(N, Q1) & nset.member(N, Q2) # every two quorums have a well-behaved node in common + } +} + +trusted isolate accountable_bft = { + # this is our baseline assumption about quorums: + private { + property [max_2f_byzantine] exists N . well_behaved(N) & nset.member(N,Q) # every quorum has a well-behaved member + } +} diff --git a/spec/ivy-proofs/network_shim.ivy b/spec/ivy-proofs/network_shim.ivy new file mode 100644 index 0000000000..ebc3a04fce --- /dev/null +++ b/spec/ivy-proofs/network_shim.ivy @@ -0,0 +1,133 @@ +#lang ivy1.7 +# --- +# layout: page +# title: Network model and network shim +# --- + +# Here we define a network module, which is our model of the network, and a +# shim module that sits on top of the network and which, upon receiving a +# message, calls the appropriate protocol handler. + +include domain_model + +# Here we define an enumeration type for identifying the 3 different types of +# messages that nodes send. +object msg_kind = { # TODO: merge with step_t + type this = {proposal, prevote, precommit} +} + +# Here we define the type of messages `msg`. Its members are structs with the fields described below. +object msg = { + type this = struct { + m_kind : msg_kind, + m_src : node, + m_round : round, + m_value : value, + m_vround : round + } +} + +# This is our model of the network: +isolate net = { + + export action recv(dst:node,v:msg) + action send(src:node,dst:node,v:msg) + # Note that the `recv` action is exported, meaning that it can be called + # non-deterministically by the environment any time it is enabled. In other + # words, a packet that is in flight can be received at any time. In this + # sense, the network is fully asynchronous. Moreover, there is no + # requirement that a given message will be received at all. + + # The state of the network consists of all the packets that have been + # sent so far, along with their destination. + relation sent(V:msg, N:node) + + after init { + sent(V, N) := false + } + + before send { + sent(v,dst) := true + } + + before recv { + require sent(v,dst) # only sent messages can be received. + } +} + +# The network shim sits on top of the network and, upon receiving a message, +# calls the appropriate protocol handler. It also exposes a `broadcast` action +# that sends to all nodes. + +isolate shim = { + + # In order not repeat the same code for each handler, we use a handler + # module parameterized by the type of message it will handle. Below we + # instantiate this module for the 3 types of messages of Tendermint + module handler(p_kind) = { + action handle(dst:node,m:msg) + object spec = { + before handle { + assert sent(m,dst) & m.m_kind = p_kind + } + } + } + + instance proposal_handler : handler(msg_kind.proposal) + instance prevote_handler : handler(msg_kind.prevote) + instance precommit_handler : handler(msg_kind.precommit) + + relation sent(M:msg,N:node) + + action broadcast(src:node,m:msg) + action send(src:node,dst:node,m:msg) + + specification { + after init { + sent(M,D) := false; + } + before broadcast { + sent(m,D) := true + } + before send { + sent(m,dst) := true + } + } + + # Here we give an implementation of it that satisfies its specification: + implementation { + + implement net.recv(dst:node,m:msg) { + + if m.m_kind = msg_kind.proposal { + call proposal_handler.handle(dst,m) + } + else if m.m_kind = msg_kind.prevote { + call prevote_handler.handle(dst,m) + } + else if m.m_kind = msg_kind.precommit { + call precommit_handler.handle(dst,m) + } + } + + implement broadcast { # broadcast sends to all nodes, including the sender. + var iter := node.iter.create(0); + while ~iter.is_end + invariant net.sent(M,D) -> sent(M,D) + { + var n := iter.val; + call net.send(src,n,m); + iter := iter.next; + } + } + + implement send { + call net.send(src,dst,m) + } + + private { + invariant net.sent(M,D) -> sent(M,D) + } + } + +} with net, node # to prove that the shim implementation satisfies the shim specification, we rely on the specification of net and node. diff --git a/spec/ivy-proofs/output/.gitignore b/spec/ivy-proofs/output/.gitignore new file mode 100644 index 0000000000..5e7d2734cf --- /dev/null +++ b/spec/ivy-proofs/output/.gitignore @@ -0,0 +1,4 @@ +# Ignore everything in this directory +* +# Except this file +!.gitignore diff --git a/spec/ivy-proofs/tendermint.ivy b/spec/ivy-proofs/tendermint.ivy new file mode 100644 index 0000000000..b7678bef98 --- /dev/null +++ b/spec/ivy-proofs/tendermint.ivy @@ -0,0 +1,420 @@ +#lang ivy1.7 +# --- +# layout: page +# title: Specification of Tendermint in Ivy +# --- + +# This specification closely follows the pseudo-code given in "The latest +# gossip on BFT consensus" by E. Buchman, J. Kwon, Z. Milosevic +# + +include domain_model +include network_shim + +# We model the Tendermint protocol as an Ivy object. Like in Object-Oriented +# Programming, the basic structuring unit in Ivy is the object. Objects have +# internal state and actions (i.e. methods in OO parlance) that modify their +# state. We model Tendermint as an object whose actions represent steps taken +# by individual nodes in the protocol. Actions in Ivy can have preconditions, +# and a valid execution is a sequence of actions whose preconditions are all +# satisfied in the state in which they are called. + +# For technical reasons, we define below a `tendermint` module instead of an +# object. Ivy modules are a little bit like classes in OO programs, and like +# classes they can be instantiated to obtain objects. To instantiate the +# `tendermint` module, we must provide an abstract-protocol object. This allows +# us to use different abstract-protocol objects for different parts of the +# proof, and to do so without too much notational burden (we could have used +# Ivy monitors, but then we would need to prefix every variable name by the +# name of the object containing it, which clutters things a bit compared to the +# approach we took). + +# The abstract-protocol object is called by the resulting tendermint object so +# as to run the abstract protocol alongside the concrete protocol. This allows +# us to transfer properties proved of the abstract protocol to the concrete +# protocol, as follows. First, we prove that running the abstract protocol in +# this way results in a valid execution of the abstract protocol. This is done +# by checking that all preconditions of the abstract actions are satisfied at +# their call sites. Second, we establish a relation between abstract state and +# concrete state (in the form of invariants of the resulting, two-object +# transition system) that allow us to transfer properties proved in the +# abstract protocol to the concrete protocol (for example, we prove that any +# decision made in the Tendermint protocol is also made in the abstract +# protocol; if the abstract protocol satisfies the agreement property, this +# allows us to conclude that the Tendermint protocol also does). + +# The abstract protocol object that we will use is always the same, and only +# the abstract properties that we prove about it change in the different +# instantiations of the `tendermint` module. Thus we provide common invariants +# that a) allow to prove that the abstract preconditions are met, and b) +# provide a refinement relation (see end of the module) relating the state of +# Tendermint to the state of the abstract protocol. + +# In the model, Byzantine nodes can send whatever messages they want, except +# that they cannot forge sender identities. This reflects the fact that, in +# practice, nodes use public key cryptography to sign their messages. + +# Finally, note that the observations that serve to adjudicate misbehavior are +# defined only in the abstract protocol (they happen in the abstract actions). + +module tendermint(abstract_protocol) = { + + # the initial value of a node: + function init_val(N:node): value + + # the three type of steps + object step_t = { + type this = {propose, prevote, precommit} + } # refer to those e.g. as step_t.propose + + object server(n:node) = { + + # the current round of a node + individual round_p: round + + individual step: step_t + + individual decision: value + + individual lockedValue: value + individual lockedRound: round + + individual validValue: value + individual validRound: round + + + relation done_l34(R:round) + relation done_l36(R:round, V:value) + relation done_l47(R:round) + + # variables for scheduling request + relation propose_timer_scheduled(R:round) + relation prevote_timer_scheduled(R:round) + relation precommit_timer_scheduled(R:round) + + relation _recved_proposal(Sender:node, R:round, V:value, VR:round) + relation _recved_prevote(Sender:node, R:round, V:value) + relation _recved_precommit(Sender:node, R:round, V:value) + + relation _has_started + + after init { + round_p := 0; + step := step_t.propose; + decision := value.nil; + + lockedValue := value.nil; + lockedRound := round.minus_one; + + validValue := value.nil; + validRound := round.minus_one; + + done_l34(R) := false; + done_l36(R, V) := false; + done_l47(R) := false; + + propose_timer_scheduled(R) := false; + prevote_timer_scheduled(R) := false; + precommit_timer_scheduled(R) := false; + + _recved_proposal(Sender, R, V, VR) := false; + _recved_prevote(Sender, R, V) := false; + _recved_precommit(Sender, R, V) := false; + + _has_started := false; + } + + action getValue returns (v:value) = { + v := init_val(n) + } + + export action start = { + require ~_has_started; + _has_started := true; + # line 10 + call startRound(0); + } + + # line 11-21 + action startRound(r:round) = { + # line 12 + round_p := r; + + # line 13 + step := step_t.propose; + + var proposal : value; + + # line 14 + if (proposers.get_proposer(r) = n) { + if validValue ~= value.nil { # line 15 + proposal := validValue; # line 16 + } else { + proposal := getValue(); # line 18 + }; + call broadcast_proposal(r, proposal, validRound); # line 19 + } else { + propose_timer_scheduled(r) := true; # line 21 + }; + + call abstract_protocol.l_11(n, r); + } + + # This action, as not exported, can only be called at specific call sites. + action broadcast_proposal(r:round, v:value, vr:round) = { + var m: msg; + m.m_kind := msg_kind.proposal; + m.m_src := n; + m.m_round := r; + m.m_value := v; + m.m_vround := vr; + call shim.broadcast(n,m); + } + + implement shim.proposal_handler.handle(msg:msg) { + _recved_proposal(msg.m_src, msg.m_round, msg.m_value, msg.m_vround) := true; + } + + # line 22-27 + export action l_22(v:value) = { + require _has_started; + require _recved_proposal(proposers.get_proposer(round_p), round_p, v, round.minus_one); + require step = step_t.propose; + + if (value.valid(v) & (lockedRound = round.minus_one | lockedValue = v)) { + call broadcast_prevote(round_p, v); # line 24 + call abstract_protocol.l_22(n, round_p, v); + } else { + call broadcast_prevote(round_p, value.nil); # line 26 + call abstract_protocol.l_22(n, round_p, value.nil); + }; + + # line 27 + step := step_t.prevote; + } + + # line 28-33 + export action l_28(r:round, v:value, vr:round, q:nset) = { + require _has_started; + require r = round_p; + require _recved_proposal(proposers.get_proposer(r), r, v, vr); + require nset.is_quorum(q); + require nset.member(N,q) -> _recved_prevote(N,vr,v); + require step = step_t.propose; + require vr >= 0 & vr < r; + + # line 29 + if (value.valid(v) & (lockedRound <= vr | lockedValue = v)) { + call broadcast_prevote(r, v); + } else { + call broadcast_prevote(r, value.nil); + }; + + call abstract_protocol.l_28(n,r,v,vr,q); + step := step_t.prevote; + } + + action broadcast_prevote(r:round, v:value) = { + var m: msg; + m.m_kind := msg_kind.prevote; + m.m_src := n; + m.m_round := r; + m.m_value := v; + call shim.broadcast(n,m); + } + + implement shim.prevote_handler.handle(msg:msg) { + _recved_prevote(msg.m_src, msg.m_round, msg.m_value) := true; + } + + # line 34-35 + export action l_34(r:round, q:nset) = { + require _has_started; + require round_p = r; + require nset.is_quorum(q); + require exists V . nset.member(N,q) -> _recved_prevote(N,r,V); + require step = step_t.prevote; + require ~done_l34(r); + done_l34(r) := true; + + prevote_timer_scheduled(r) := true; + } + + + # line 36-43 + export action l_36(r:round, v:value, q:nset) = { + require _has_started; + require r = round_p; + require exists VR . round.minus_one <= VR & VR < r & _recved_proposal(proposers.get_proposer(r), r, v, VR); + require nset.is_quorum(q); + require nset.member(N,q) -> _recved_prevote(N,r,v); + require value.valid(v); + require step = step_t.prevote | step = step_t.precommit; + + require ~done_l36(r,v); + done_l36(r, v) := true; + + if step = step_t.prevote { + lockedValue := v; # line 38 + lockedRound := r; # line 39 + call broadcast_precommit(r, v); # line 40 + step := step_t.precommit; # line 41 + call abstract_protocol.l_36(n, r, v, q); + }; + + validValue := v; # line 42 + validRound := r; # line 43 + } + + # line 44-46 + export action l_44(r:round, q:nset) = { + require _has_started; + require r = round_p; + require nset.is_quorum(q); + require nset.member(N,q) -> _recved_prevote(N,r,value.nil); + require step = step_t.prevote; + + call broadcast_precommit(r, value.nil); # line 45 + step := step_t.precommit; # line 46 + + call abstract_protocol.l_44(n, r, q); + } + + action broadcast_precommit(r:round, v:value) = { + var m: msg; + m.m_kind := msg_kind.precommit; + m.m_src := n; + m.m_round := r; + m.m_value := v; + call shim.broadcast(n,m); + } + + implement shim.precommit_handler.handle(msg:msg) { + _recved_precommit(msg.m_src, msg.m_round, msg.m_value) := true; + } + + + # line 47-48 + export action l_47(r:round, q:nset) = { + require _has_started; + require round_p = r; + require nset.is_quorum(q); + require nset.member(N,q) -> exists V . _recved_precommit(N,r,V); + require ~done_l47(r); + done_l47(r) := true; + + precommit_timer_scheduled(r) := true; + } + + + # line 49-54 + export action l_49_decide(r:round, v:value, q:nset) = { + require _has_started; + require exists VR . round.minus_one <= VR & VR < r & _recved_proposal(proposers.get_proposer(r), r, v, VR); + require nset.is_quorum(q); + require nset.member(N,q) -> _recved_precommit(N,r,v); + require decision = value.nil; + + if value.valid(v) { + decision := v; + # MORE for next height + call abstract_protocol.decide(n, r, v, q); + } + } + + # line 55-56 + export action l_55(r:round, b:nset) = { + require _has_started; + require nset.is_blocking(b); + require nset.member(N,b) -> exists VR . round.minus_one <= VR & VR < r & exists V . _recved_proposal(N,r,V,VR) | _recved_prevote(N,r,V) | _recved_precommit(N,r,V); + require r > round_p; + call startRound(r); # line 56 + } + + # line 57-60 + export action onTimeoutPropose(r:round) = { + require _has_started; + require propose_timer_scheduled(r); + require r = round_p; + require step = step_t.propose; + call broadcast_prevote(r,value.nil); + step := step_t.prevote; + + call abstract_protocol.l_57(n,r); + + propose_timer_scheduled(r) := false; + } + + # line 61-64 + export action onTimeoutPrevote(r:round) = { + require _has_started; + require prevote_timer_scheduled(r); + require r = round_p; + require step = step_t.prevote; + call broadcast_precommit(r,value.nil); + step := step_t.precommit; + + call abstract_protocol.l_61(n,r); + + prevote_timer_scheduled(r) := false; + } + + # line 65-67 + export action onTimeoutPrecommit(r:round) = { + require _has_started; + require precommit_timer_scheduled(r); + require r = round_p; + call startRound(round.incr(r)); + + precommit_timer_scheduled(r) := false; + } + +# The Byzantine actions +# --------------------- + +# Byzantine nodes can send whatever they want, but they cannot send +# messages on behalf of well-behaved nodes. In practice this is implemented +# using cryptography (e.g. public-key cryptography). + + export action byzantine_send(m:msg, dst:node) = { + require ~well_behaved(n); + require ~well_behaved(m.m_src); # cannot forge the identity of well-behaved nodes + call shim.send(n,dst,m); + } + +# Byzantine nodes can also report fake observations, as defined in the abstract protocol. + export action fake_observations = { + call abstract_protocol.misbehave + } + +# Invariants +# ---------- + +# We provide common invariants that a) allow to prove that the abstract +# preconditions are met, and b) provide a refinement relation. + + + specification { + + invariant 0 <= round_p + invariant abstract_protocol.left_round(n,R) <-> R < round_p + + invariant lockedRound ~= round.minus_one -> forall R,V . abstract_protocol.locked(n,R,V) <-> R <= lockedRound & lockedValue = V + invariant lockedRound = round.minus_one -> forall R,V . ~abstract_protocol.locked(n,R,V) + + invariant forall M:msg . well_behaved(M.m_src) & M.m_kind = msg_kind.prevote & shim.sent(M,N) -> abstract_protocol.prevoted(M.m_src,M.m_round,M.m_value) + invariant well_behaved(N) & _recved_prevote(N,R,V) -> abstract_protocol.prevoted(N,R,V) + invariant forall M:msg . well_behaved(M.m_src) & M.m_kind = msg_kind.precommit & shim.sent(M,N) -> abstract_protocol.precommitted(M.m_src,M.m_round,M.m_value) + invariant well_behaved(N) & _recved_precommit(N,R,V) -> abstract_protocol.precommitted(N,R,V) + + invariant (step = step_t.prevote | step = step_t.propose) -> ~abstract_protocol.precommitted(n,round_p,V) + invariant step = step_t.propose -> ~abstract_protocol.prevoted(n,round_p,V) + invariant step = step_t.prevote -> exists V . abstract_protocol.prevoted(n,round_p,V) + + invariant round_p < R -> ~(abstract_protocol.prevoted(n,R,V) | abstract_protocol.precommitted(n,R,V)) + invariant ~_has_started -> step = step_t.propose & ~(abstract_protocol.prevoted(n,R,V) | abstract_protocol.precommitted(n,R,V)) & round_p = 0 + + invariant decision ~= value.nil -> exists R . abstract_protocol.decided(n,R,decision) + } + } +} diff --git a/spec/ivy-proofs/tendermint_test.ivy b/spec/ivy-proofs/tendermint_test.ivy new file mode 100644 index 0000000000..1299fc086d --- /dev/null +++ b/spec/ivy-proofs/tendermint_test.ivy @@ -0,0 +1,127 @@ +#lang ivy1.7 + +include tendermint +include abstract_tendermint + +isolate ghost_ = { + instantiate abstract_tendermint +} + +isolate protocol = { + instantiate tendermint(ghost_) # here we instantiate the parameter of the tendermint module with `ghost_`; however note that we don't extract any code for `ghost_` (it's not in the list of object in the extract, and it's thus sliced away). + implementation { + definition init_val(n:node) = <<< `n`%2 >>> + } + # attribute test = impl +} with ghost_, shim, value, round, proposers + +# Here we run a simple scenario that exhibits an execution in which nodes make +# a decision. We do this to rule out trivial modeling errors. + +# One option to check that this scenario is valid is to run it in Ivy's REPL. +# For this, first compile the scenario: +#```ivyc target=repl isolate=code trace=true tendermint_test.ivy +# Then, run the produced binary (e.g. for 4 nodes): +#``` ./tendermint_test 4 +# Finally, call the action: +#``` scenarios.scenario_1 +# Note that Ivy will check at runtime that all action preconditions are +# satisfied. For example, runing the scenario twice will cause a violation of +# the precondition of the `start` action, because a node cannot start twice +# (see `require ~_has_started` in action `start`). + +# Another possibility would be to run `ivy_check` on the scenario, but that +# does not seem to work at the moment. + +isolate scenarios = { + individual all:nset # will be used as parameter to actions requiring a quorum + + after init { + var iter := node.iter.create(0); + while ~iter.is_end + { + all := all.insert(iter.val); + iter := iter.next; + }; + assert nset.is_quorum(all); # we can also use asserts to make sure we are getting what we expect + } + + export action scenario_1 = { + # all nodes start: + var iter := node.iter.create(0); + while ~iter.is_end + { + call protocol.server.start(iter.val); + iter := iter.next; + }; + # all nodes receive the leader's proposal: + var m:msg; + m.m_kind := msg_kind.proposal; + m.m_src := 0; + m.m_round := 0; + m.m_value := 0; + m.m_vround := round.minus_one; + iter := node.iter.create(0); + while ~iter.is_end + { + call net.recv(iter.val,m); + iter := iter.next; + }; + # all nodes prevote: + iter := node.iter.create(0); + while ~iter.is_end + { + call protocol.server.l_22(iter.val,0); + iter := iter.next; + }; + # all nodes receive each other's prevote messages; + m.m_kind := msg_kind.prevote; + m.m_vround := 0; + iter := node.iter.create(0); + while ~iter.is_end + { + var iter2 := node.iter.create(0); # the sender + while ~iter2.is_end + { + m.m_src := iter2.val; + call net.recv(iter.val,m); + iter2 := iter2.next; + }; + iter := iter.next; + }; + # all nodes precommit: + iter := node.iter.create(0); + while ~iter.is_end + { + call protocol.server.l_36(iter.val,0,0,all); + iter := iter.next; + }; + # all nodes receive each other's pre-commits + m.m_kind := msg_kind.precommit; + iter := node.iter.create(0); + while ~iter.is_end + { + var iter2 := node.iter.create(0); # the sender + while ~iter2.is_end + { + m.m_src := iter2.val; + call net.recv(iter.val,m); + iter2 := iter2.next; + }; + iter := iter.next; + }; + # now all nodes can decide: + iter := node.iter.create(0); + while ~iter.is_end + { + call protocol.server.l_49_decide(iter.val,0,0,all); + iter := iter.next; + }; + } + + # TODO: add more scenarios + +} with round, node, proposers, value, nset, protocol, shim, net + +# extract code = protocol, shim, round, node +extract code = round, node, proposers, value, nset, protocol, shim, net, scenarios diff --git a/spec/light-client/README.md b/spec/light-client/README.md new file mode 100644 index 0000000000..1fec7a5eab --- /dev/null +++ b/spec/light-client/README.md @@ -0,0 +1,205 @@ +--- +order: 1 +parent: + title: Light Client + order: 5 +--- + +# Light Client Specification + +This directory contains work-in-progress English and TLA+ specifications for the Light Client +protocol. Implementations of the light client can be found in +[Rust](https://github.com/informalsystems/tendermint-rs/tree/master/light-client) and +[Go](https://github.com/tendermint/tendermint/tree/v0.34.x/light). + +Light clients are assumed to be initialized once from a trusted source +with a trusted header and validator set. The light client +protocol allows a client to then securely update its trusted state by requesting and +verifying a minimal set of data from a network of full nodes (at least one of which is correct). + +The light client is decomposed into two main components: + +- [Commit Verification](#Commit-Verification) - verify signed headers and associated validator + set changes from a single full node, called primary +- [Attack Detection](#Attack-Detection) - verify commits across multiple full nodes (called secondaries) and detect conflicts (ie. the existence of a lightclient attack) + +In case a lightclient attack is detected, the lightclient submits evidence to a full node which is responsible for "accountability", that is, punishing attackers: + +- [Accountability](#Accountability) - given evidence for an attack, compute a set of validators that are responsible for it. + +## Commit Verification + +The [English specification](verification/verification_001_published.md) describes the light client +commit verification problem in terms of the temporal properties +[LCV-DIST-SAFE.1](https://github.com/informalsystems/tendermint-rs/blob/master/docs/spec/lightclient/verification/verification_001_published.md#lcv-dist-safe1) and +[LCV-DIST-LIVE.1](https://github.com/informalsystems/tendermint-rs/blob/master/docs/spec/lightclient/verification/verification_001_published.md#lcv-dist-live1). +Commit verification is assumed to operate within the Tendermint Failure Model, where +2/3 of validators are correct for some time period and +validator sets can change arbitrarily at each height. + +A light client protocol is also provided, including all checks that +need to be performed on headers, commits, and validator sets +to satisfy the temporal properties - so a light client can continuously +synchronize with a blockchain. Clients can skip possibly +many intermediate headers by exploiting overlap in trusted and untrusted validator sets. +When there is not enough overlap, a bisection routine can be used to find a +minimal set of headers that do provide the required overlap. + +The [TLA+ specification ver. 001](verification/Lightclient_A_1.tla) +is a formal description of the +commit verification protocol executed by a client, including the safety and +termination, which can be model checked with Apalache. + +A more detailed TLA+ specification of +[Light client verification ver. 003](verification/Lightclient_003_draft.tla) +is currently under peer review. + +The `MC*.tla` files contain concrete parameters for the +[TLA+ specification](verification/Lightclient_A_1.tla), in order to do model checking. +For instance, [MC4_3_faulty.tla](verification/MC4_3_faulty.tla) contains the following parameters +for the nodes, heights, the trusting period, the clock drifts, +correctness of the primary node, and the ratio of the faulty processes: + +```tla +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 3 +TRUSTING_PERIOD == 1400 \* the trusting period in some time units +CLOCK_DRIFT = 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT = 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators +``` + +To run a complete set of experiments, clone [apalache](https://github.com/informalsystems/apalache) and [apalache-tests](https://github.com/informalsystems/apalache-tests) into a directory `$DIR` and run the following commands: + +```sh +$DIR/apalache-tests/scripts/mk-run.py --memlimit 28 002bmc-apalache-ok.csv $DIR/apalache . out +./out/run-all.sh +``` + +After the experiments have finished, you can collect the logs by executing the following command: + +```sh +cd ./out +$DIR/apalache-tests/scripts/parse-logs.py --human . +``` + +All lines in `results.csv` should report `Deadlock`, which means that the algorithm +has terminated and no invariant violation was found. + +Similar to [002bmc-apalache-ok.csv](verification/002bmc-apalache-ok.csv), +file [003bmc-apalache-error.csv](verification/003bmc-apalache-error.csv) specifies +the set of experiments that should result in counterexamples: + +```sh +$DIR/apalache-tests/scripts/mk-run.py --memlimit 28 003bmc-apalache-error.csv $DIR/apalache . out +./out/run-all.sh +``` + +All lines in `results.csv` should report `Error`. + +The following table summarizes the experimental results for Light client verification +version 001. The TLA+ properties can be found in the +[TLA+ specification](verification/Lightclient_A_1.tla). + The experiments were run in an AWS instance equipped with 32GB +RAM and a 4-core Intel® Xeon® CPU E5-2686 v4 @ 2.30GHz CPU. +We write “✗=k” when a bug is reported at depth k, and “✓<=k” when +no bug is reported up to depth k. + +![Experimental results](experiments.png) + +The experimental results for version 003 are to be added. + +## Attack Detection + +The [English specification](detection/detection_003_reviewed.md) +defines light client attacks (and how they differ from blockchain +forks), and describes the problem of a light client detecting +these attacks by communicating with a network of full nodes, +where at least one is correct. + +The specification also contains a detection protocol that checks +whether the header obtained from the primary via the verification +protocol matches corresponding headers provided by the secondaries. +If this is not the case, the protocol analyses the verification traces +of the involved full nodes +and generates +[evidence](detection/detection_003_reviewed.md#tmbc-lc-evidence-data1) +of misbehavior that can be submitted to a full node so that +the faulty validators can be punished. + +The [TLA+ specification](detection/LCDetector_003_draft.tla) +is a formal description of the +detection protocol for two peers, including the safety and +termination, which can be model checked with Apalache. + +The `LCD_MC*.tla` files contain concrete parameters for the +[TLA+ specification](detection/LCDetector_003_draft.tla), +in order to run the model checker. +For instance, [LCD_MC4_4_faulty.tla](detection/MC4_4_faulty.tla) +contains the following parameters +for the nodes, heights, the trusting period, the clock drifts, +correctness of the nodes, and the ratio of the faulty processes: + +```tla +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 3 +TRUSTING_PERIOD == 1400 \* the trusting period in some time units +CLOCK_DRIFT = 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT = 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +IS_SECONDARY_CORRECT == FALSE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators +``` + +To run a complete set of experiments, clone [apalache](https://github.com/informalsystems/apalache) and [apalache-tests](https://github.com/informalsystems/apalache-tests) into a directory `$DIR` and run the following commands: + +```sh +$DIR/apalache-tests/scripts/mk-run.py --memlimit 28 004bmc-apalache-ok.csv $DIR/apalache . out +./out/run-all.sh +``` + +After the experiments have finished, you can collect the logs by executing the following command: + +```sh +cd ./out +$DIR/apalache-tests/scripts/parse-logs.py --human . +``` + +All lines in `results.csv` should report `Deadlock`, which means that the algorithm +has terminated and no invariant violation was found. + +Similar to [004bmc-apalache-ok.csv](verification/004bmc-apalache-ok.csv), +file [005bmc-apalache-error.csv](verification/005bmc-apalache-error.csv) specifies +the set of experiments that should result in counterexamples: + +```sh +$DIR/apalache-tests/scripts/mk-run.py --memlimit 28 005bmc-apalache-error.csv $DIR/apalache . out +./out/run-all.sh +``` + +All lines in `results.csv` should report `Error`. + +The detailed experimental results are to be added soon. + +## Accountability + +The [English specification](attacks/isolate-attackers_002_reviewed.md) +defines the protocol that is executed on a full node upon receiving attack [evidence](detection/detection_003_reviewed.md#tmbc-lc-evidence-data1) from a lightclient. In particular, the protocol handles three types of attacks + +- lunatic +- equivocation +- amnesia + +We discussed in the [last part](attacks/isolate-attackers_002_reviewed.md#Part-III---Completeness) of the English specification +that the non-lunatic cases are defined by having the same validator set in the conflicting blocks. For these cases, +computer-aided analysis of [Tendermint Consensus in TLA+](./accountability/README.md) shows that equivocation and amnesia capture all non-lunatic attacks. + +The [TLA+ specification](attacks/Isolation_001_draft.tla) +is a formal description of the +protocol, including the safety property, which can be model checked with Apalache. + +Similar to the other specifications, [MC_5_3.tla](attacks/MC_5_3.tla) contains concrete parameters to run the model checker. The specification can be checked within seconds. + +[tendermint-accountability](./accountability/README.md) diff --git a/spec/light-client/accountability/001indinv-apalache.csv b/spec/light-client/accountability/001indinv-apalache.csv new file mode 100644 index 0000000000..37c6aeda25 --- /dev/null +++ b/spec/light-client/accountability/001indinv-apalache.csv @@ -0,0 +1,13 @@ +no,filename,tool,timeout,init,inv,next,args +1,MC_n4_f1.tla,apalache,10h,TypedInv,TypedInv,,--length=1 --cinit=ConstInit +2,MC_n4_f2.tla,apalache,10h,TypedInv,TypedInv,,--length=1 --cinit=ConstInit +3,MC_n5_f1.tla,apalache,10h,TypedInv,TypedInv,,--length=1 --cinit=ConstInit +4,MC_n5_f2.tla,apalache,10h,TypedInv,TypedInv,,--length=1 --cinit=ConstInit +5,MC_n4_f1.tla,apalache,20h,Init,TypedInv,,--length=0 --cinit=ConstInit +6,MC_n4_f2.tla,apalache,20h,Init,TypedInv,,--length=0 --cinit=ConstInit +7,MC_n5_f1.tla,apalache,20h,Init,TypedInv,,--length=0 --cinit=ConstInit +8,MC_n5_f2.tla,apalache,20h,Init,TypedInv,,--length=0 --cinit=ConstInit +9,MC_n4_f1.tla,apalache,20h,TypedInv,Agreement,,--length=0 --cinit=ConstInit +10,MC_n4_f2.tla,apalache,20h,TypedInv,Accountability,,--length=0 --cinit=ConstInit +11,MC_n5_f1.tla,apalache,20h,TypedInv,Agreement,,--length=0 --cinit=ConstInit +12,MC_n5_f2.tla,apalache,20h,TypedInv,Accountability,,--length=0 --cinit=ConstInit diff --git a/spec/light-client/accountability/MC_n4_f1.tla b/spec/light-client/accountability/MC_n4_f1.tla new file mode 100644 index 0000000000..7a828b4986 --- /dev/null +++ b/spec/light-client/accountability/MC_n4_f1.tla @@ -0,0 +1,22 @@ +----------------------------- MODULE MC_n4_f1 ------------------------------- +CONSTANT Proposer \* the proposer function from 0..NRounds to 1..N + +\* the variables declared in TendermintAcc3 +VARIABLES + round, step, decision, lockedValue, lockedRound, validValue, validRound, + msgsPropose, msgsPrevote, msgsPrecommit, evidence, action + +INSTANCE TendermintAccDebug_004_draft WITH + Corr <- {"c1", "c2", "c3"}, + Faulty <- {"f1"}, + N <- 4, + T <- 1, + ValidValues <- { "v0", "v1" }, + InvalidValues <- {"v2"}, + MaxRound <- 2 + +\* run Apalache with --cinit=ConstInit +ConstInit == \* the proposer is arbitrary -- works for safety + Proposer \in [Rounds -> AllProcs] + +============================================================================= diff --git a/spec/light-client/accountability/MC_n4_f2.tla b/spec/light-client/accountability/MC_n4_f2.tla new file mode 100644 index 0000000000..893f18db63 --- /dev/null +++ b/spec/light-client/accountability/MC_n4_f2.tla @@ -0,0 +1,22 @@ +----------------------------- MODULE MC_n4_f2 ------------------------------- +CONSTANT Proposer \* the proposer function from 0..NRounds to 1..N + +\* the variables declared in TendermintAcc3 +VARIABLES + round, step, decision, lockedValue, lockedRound, validValue, validRound, + msgsPropose, msgsPrevote, msgsPrecommit, evidence, action + +INSTANCE TendermintAccDebug_004_draft WITH + Corr <- {"c1", "c2"}, + Faulty <- {"f3", "f4"}, + N <- 4, + T <- 1, + ValidValues <- { "v0", "v1" }, + InvalidValues <- {"v2"}, + MaxRound <- 2 + +\* run Apalache with --cinit=ConstInit +ConstInit == \* the proposer is arbitrary -- works for safety + Proposer \in [Rounds -> AllProcs] + +============================================================================= diff --git a/spec/light-client/accountability/MC_n4_f2_amnesia.tla b/spec/light-client/accountability/MC_n4_f2_amnesia.tla new file mode 100644 index 0000000000..434fffaeb4 --- /dev/null +++ b/spec/light-client/accountability/MC_n4_f2_amnesia.tla @@ -0,0 +1,40 @@ +---------------------- MODULE MC_n4_f2_amnesia ------------------------------- +EXTENDS Sequences + +CONSTANT Proposer \* the proposer function from 0..NRounds to 1..N + +\* the variables declared in TendermintAcc3 +VARIABLES + round, step, decision, lockedValue, lockedRound, validValue, validRound, + msgsPropose, msgsPrevote, msgsPrecommit, evidence, action + +\* the variable declared in TendermintAccTrace3 +VARIABLE + toReplay + +\* old apalache annotations, fix with the new release +a <: b == a + +INSTANCE TendermintAccTrace_004_draft WITH + Corr <- {"c1", "c2"}, + Faulty <- {"f3", "f4"}, + N <- 4, + T <- 1, + ValidValues <- { "v0", "v1" }, + InvalidValues <- {"v2"}, + MaxRound <- 2, + Trace <- << + "UponProposalInPropose", + "UponProposalInPrevoteOrCommitAndPrevote", + "UponProposalInPrecommitNoDecision", + "OnRoundCatchup", + "UponProposalInPropose", + "UponProposalInPrevoteOrCommitAndPrevote", + "UponProposalInPrecommitNoDecision" + >> <: Seq(STRING) + +\* run Apalache with --cinit=ConstInit +ConstInit == \* the proposer is arbitrary -- works for safety + Proposer \in [Rounds -> AllProcs] + +============================================================================= diff --git a/spec/light-client/accountability/MC_n4_f3.tla b/spec/light-client/accountability/MC_n4_f3.tla new file mode 100644 index 0000000000..b794fff5ed --- /dev/null +++ b/spec/light-client/accountability/MC_n4_f3.tla @@ -0,0 +1,22 @@ +----------------------------- MODULE MC_n4_f3 ------------------------------- +CONSTANT Proposer \* the proposer function from 0..NRounds to 1..N + +\* the variables declared in TendermintAcc3 +VARIABLES + round, step, decision, lockedValue, lockedRound, validValue, validRound, + msgsPropose, msgsPrevote, msgsPrecommit, evidence, action + +INSTANCE TendermintAccDebug_004_draft WITH + Corr <- {"c1"}, + Faulty <- {"f2", "f3", "f4"}, + N <- 4, + T <- 1, + ValidValues <- { "v0", "v1" }, + InvalidValues <- {"v2"}, + MaxRound <- 2 + +\* run Apalache with --cinit=ConstInit +ConstInit == \* the proposer is arbitrary -- works for safety + Proposer \in [Rounds -> AllProcs] + +============================================================================= diff --git a/spec/light-client/accountability/MC_n5_f1.tla b/spec/light-client/accountability/MC_n5_f1.tla new file mode 100644 index 0000000000..d65673a58d --- /dev/null +++ b/spec/light-client/accountability/MC_n5_f1.tla @@ -0,0 +1,22 @@ +----------------------------- MODULE MC_n5_f1 ------------------------------- +CONSTANT Proposer \* the proposer function from 0..NRounds to 1..N + +\* the variables declared in TendermintAcc3 +VARIABLES + round, step, decision, lockedValue, lockedRound, validValue, validRound, + msgsPropose, msgsPrevote, msgsPrecommit, evidence, action + +INSTANCE TendermintAccDebug_004_draft WITH + Corr <- {"c1", "c2", "c3", "c4"}, + Faulty <- {"f5"}, + N <- 5, + T <- 1, + ValidValues <- { "v0", "v1" }, + InvalidValues <- {"v2"}, + MaxRound <- 2 + +\* run Apalache with --cinit=ConstInit +ConstInit == \* the proposer is arbitrary -- works for safety + Proposer \in [Rounds -> AllProcs] + +============================================================================= diff --git a/spec/light-client/accountability/MC_n5_f2.tla b/spec/light-client/accountability/MC_n5_f2.tla new file mode 100644 index 0000000000..c19aa98cc6 --- /dev/null +++ b/spec/light-client/accountability/MC_n5_f2.tla @@ -0,0 +1,22 @@ +----------------------------- MODULE MC_n5_f2 ------------------------------- +CONSTANT Proposer \* the proposer function from 0..NRounds to 1..N + +\* the variables declared in TendermintAcc3 +VARIABLES + round, step, decision, lockedValue, lockedRound, validValue, validRound, + msgsPropose, msgsPrevote, msgsPrecommit, evidence, action + +INSTANCE TendermintAccDebug_004_draft WITH + Corr <- {"c1", "c2", "c3"}, + Faulty <- {"f4", "f5"}, + N <- 5, + T <- 1, + ValidValues <- { "v0", "v1" }, + InvalidValues <- {"v2"}, + MaxRound <- 2 + +\* run Apalache with --cinit=ConstInit +ConstInit == \* the proposer is arbitrary -- works for safety + Proposer \in [Rounds -> AllProcs] + +============================================================================= diff --git a/spec/light-client/accountability/MC_n6_f1.tla b/spec/light-client/accountability/MC_n6_f1.tla new file mode 100644 index 0000000000..2e992974f5 --- /dev/null +++ b/spec/light-client/accountability/MC_n6_f1.tla @@ -0,0 +1,22 @@ +----------------------------- MODULE MC_n6_f1 ------------------------------- +CONSTANT Proposer \* the proposer function from 0..NRounds to 1..N + +\* the variables declared in TendermintAcc3 +VARIABLES + round, step, decision, lockedValue, lockedRound, validValue, validRound, + msgsPropose, msgsPrevote, msgsPrecommit, evidence, action + +INSTANCE TendermintAccDebug_004_draft WITH + Corr <- {"c1", "c2", "c3", "c4", "c5"}, + Faulty <- {"f6"}, + N <- 4, + T <- 1, + ValidValues <- { "v0", "v1" }, + InvalidValues <- {"v2"}, + MaxRound <- 2 + +\* run Apalache with --cinit=ConstInit +ConstInit == \* the proposer is arbitrary -- works for safety + Proposer \in [Rounds -> AllProcs] + +============================================================================= diff --git a/spec/light-client/accountability/README.md b/spec/light-client/accountability/README.md new file mode 100644 index 0000000000..bb872649bb --- /dev/null +++ b/spec/light-client/accountability/README.md @@ -0,0 +1,308 @@ +--- +order: 1 +parent: + title: Accountability + order: 4 +--- + +# Fork accountability + +## Problem Statement + +Tendermint consensus guarantees the following specifications for all heights: + +* agreement -- no two correct full nodes decide differently. +* validity -- the decided block satisfies the predefined predicate *valid()*. +* termination -- all correct full nodes eventually decide, + +If the faulty validators have less than 1/3 of voting power in the current validator set. In the case where this assumption +does not hold, each of the specification may be violated. + +The agreement property says that for a given height, any two correct validators that decide on a block for that height decide on the same block. That the block was indeed generated by the blockchain, can be verified starting from a trusted (genesis) block, and checking that all subsequent blocks are properly signed. + +However, faulty nodes may forge blocks and try to convince users (light clients) that the blocks had been correctly generated. In addition, Tendermint agreement might be violated in the case where 1/3 or more of the voting power belongs to faulty validators: Two correct validators decide on different blocks. The latter case motivates the term "fork": as Tendermint consensus also agrees on the next validator set, correct validators may have decided on disjoint next validator sets, and the chain branches into two or more partitions (possibly having faulty validators in common) and each branch continues to generate blocks independently of the other. + +We say that a fork is a case in which there are two commits for different blocks at the same height of the blockchain. The proplem is to ensure that in those cases we are able to detect faulty validators (and not mistakenly accuse correct validators), and incentivize therefore validators to behave according to the protocol specification. + +**Conceptual Limit.** In order to prove misbehavior of a node, we have to show that the behavior deviates from correct behavior with respect to a given algorithm. Thus, an algorithm that detects misbehavior of nodes executing some algorithm *A* must be defined with respect to algorithm *A*. In our case, *A* is Tendermint consensus (+ other protocols in the infrastructure; e.g.,full nodes and the Light Client). If the consensus algorithm is changed/updated/optimized in the future, we have to check whether changes to the accountability algorithm are also required. All the discussions in this document are thus inherently specific to Tendermint consensus and the Light Client specification. + +**Q:** Should we distinguish agreement for validators and full nodes for agreement? The case where all correct validators agree on a block, but a correct full node decides on a different block seems to be slightly less severe that the case where two correct validators decide on different blocks. Still, if a contaminated full node becomes validator that may be problematic later on. Also it is not clear how gossiping is impaired if a contaminated full node is on a different branch. + +*Remark.* In the case 1/3 or more of the voting power belongs to faulty validators, also validity and termination can be broken. Termination can be broken if faulty processes just do not send the messages that are needed to make progress. Due to asynchrony, this is not punishable, because faulty validators can always claim they never received the messages that would have forced them to send messages. + +## The Misbehavior of Faulty Validators + +Forks are the result of faulty validators deviating from the protocol. In principle several such deviations can be detected without a fork actually occurring: + +1. double proposal: A faulty proposer proposes two different values (blocks) for the same height and the same round in Tendermint consensus. + +2. double signing: Tendermint consensus forces correct validators to prevote and precommit for at most one value per round. In case a faulty validator sends multiple prevote and/or precommit messages for different values for the same height/round, this is a misbehavior. + +3. lunatic validator: Tendermint consensus forces correct validators to prevote and precommit only for values *v* that satisfy *valid(v)*. If faulty validators prevote and precommit for *v* although *valid(v)=false* this is misbehavior. + +*Remark.* In isolation, Point 3 is an attack on validity (rather than agreement). However, the prevotes and precommits can then also be used to forge blocks. + +1. amnesia: Tendermint consensus has a locking mechanism. If a validator has some value v locked, then it can only prevote/precommit for v or nil. Sending prevote/precomit message for a different value v' (that is not nil) while holding lock on value v is misbehavior. + +2. spurious messages: In Tendermint consensus most of the message send instructions are guarded by threshold guards, e.g., one needs to receive *2f + 1* prevote messages to send precommit. Faulty validators may send precommit without having received the prevote messages. + +Independently of a fork happening, punishing this behavior might be important to prevent forks altogether. This should keep attackers from misbehaving: if less than 1/3 of the voting power is faulty, this misbehavior is detectable but will not lead to a safety violation. Thus, unless they have 1/3 or more (or in some cases more than 2/3) of the voting power attackers have the incentive to not misbehave. If attackers control too much voting power, we have to deal with forks, as discussed in this document. + +## Two types of forks + +* Fork-Full. Two correct validators decide on different blocks for the same height. Since also the next validator sets are decided upon, the correct validators may be partitioned to participate in two distinct branches of the forked chain. + +As in this case we have two different blocks (both having the same right/no right to exist), a central system invariant (one block per height decided by correct validators) is violated. As full nodes are contaminated in this case, the contamination can spread also to light clients. However, even without breaking this system invariant, light clients can be subject to a fork: + +* Fork-Light. All correct validators decide on the same block for height *h*, but faulty processes (validators or not), forge a different block for that height, in order to fool users (who use the light client). + +# Attack scenarios + +## On-chain attacks + +### Equivocation (one round) + +There are several scenarios in which forks might happen. The first is double signing within a round. + +* F1. Equivocation: faulty validators sign multiple vote messages (prevote and/or precommit) for different values *during the same round r* at a given height h. + +### Flip-flopping + +Tendermint consensus implements a locking mechanism: If a correct validator *p* receives proposal for value v and *2f + 1* prevotes for a value *id(v)* in round *r*, it locks *v* and remembers *r*. In this case, *p* also sends a precommit message for *id(v)*, which later may serve as proof that *p* locked *v*. +In subsequent rounds, *p* only sends prevote messages for a value it had previously locked. However, it is possible to change the locked value if in a future round *r' > r*, if the process receives proposal and *2f + 1* prevotes for a different value *v'*. In this case, *p* could send a prevote/precommit for *id(v')*. This algorithmic feature can be exploited in two ways: + +* F2. Faulty Flip-flopping (Amnesia): faulty validators precommit some value *id(v)* in round *r* (value *v* is locked in round *r*) and then prevote for different value *id(v')* in higher round *r' > r* without previously correctly unlocking value *v*. In this case faulty processes "forget" that they have locked value *v* and prevote some other value in the following rounds. +Some correct validators might have decided on *v* in *r*, and other correct validators decide on *v'* in *r'*. Here we can have branching on the main chain (Fork-Full). + +* F3. Correct Flip-flopping (Back to the past): There are some precommit messages signed by (correct) validators for value *id(v)* in round *r*. Still, *v* is not decided upon, and all processes move on to the next round. Then correct validators (correctly) lock and decide a different value *v'* in some round *r' > r*. And the correct validators continue; there is no branching on the main chain. +However, faulty validators may use the correct precommit messages from round *r* together with a posteriori generated faulty precommit messages for round *r* to forge a block for a value that was not decided on the main chain (Fork-Light). + +## Off-chain attacks + +F1-F3 may contaminate the state of full nodes (and even validators). Contaminated (but otherwise correct) full nodes may thus communicate faulty blocks to light clients. +Similarly, without actually interfering with the main chain, we can have the following: + +* F4. Phantom validators: faulty validators vote (sign prevote and precommit messages) in heights in which they are not part of the validator sets (at the main chain). + +* F5. Lunatic validator: faulty validator that sign vote messages to support (arbitrary) application state that is different from the application state that resulted from valid state transitions. + +## Types of victims + +We consider three types of potential attack victims: + +* FN: full node +* LCS: light client with sequential header verification +* LCB: light client with bisection based header verification + +F1 and F2 can be used by faulty validators to actually create multiple branches on the blockchain. That means that correctly operating full nodes decide on different blocks for the same height. Until a fork is detected locally by a full node (by receiving evidence from others or by some other local check that fails), the full node can spread corrupted blocks to light clients. + +*Remark.* If full nodes take a branch different from the one taken by the validators, it may be that the liveness of the gossip protocol may be affected. We should eventually look at this more closely. However, as it does not influence safety it is not a primary concern. + +F3 is similar to F1, except that no two correct validators decide on different blocks. It may still be the case that full nodes become affected. + +In addition, without creating a fork on the main chain, light clients can be contaminated by more than a third of validators that are faulty and sign a forged header +F4 cannot fool correct full nodes as they know the current validator set. Similarly, LCS know who the validators are. Hence, F4 is an attack against LCB that do not necessarily know the complete prefix of headers (Fork-Light), as they trust a header that is signed by at least one correct validator (trusting period method). + +The following table gives an overview of how the different attacks may affect different nodes. F1-F3 are *on-chain* attacks so they can corrupt the state of full nodes. Then if a light client (LCS or LCB) contacts a full node to obtain headers (or blocks), the corrupted state may propagate to the light client. + +F4 and F5 are *off-chain*, that is, these attacks cannot be used to corrupt the state of full nodes (which have sufficient knowledge on the state of the chain to not be fooled). + +| Attack | FN | LCS | LCB | +|:------:|:------:|:------:|:------:| +| F1 | direct | FN | FN | +| F2 | direct | FN | FN | +| F3 | direct | FN | FN | +| F4 | | | direct | +| F5 | | | direct | + +**Q:** Light clients are more vulnerable than full nodes, because the former do only verify headers but do not execute transactions. What kind of certainty is gained by a full node that executes a transaction? + +As a full node verifies all transactions, it can only be +contaminated by an attack if the blockchain itself violates its invariant (one block per height), that is, in case of a fork that leads to branching. + +## Detailed Attack Scenarios + +### Equivocation based attacks + +In case of equivocation based attacks, faulty validators sign multiple votes (prevote and/or precommit) in the same +round of some height. This attack can be executed on both full nodes and light clients. It requires 1/3 or more of voting power to be executed. + +#### Scenario 1: Equivocation on the main chain + +Validators: + +* CA - a set of correct validators with less than 1/3 of the voting power +* CB - a set of correct validators with less than 1/3 of the voting power +* CA and CB are disjoint +* F - a set of faulty validators with 1/3 or more voting power + +Observe that this setting violates the Tendermint failure model. + +Execution: + +* A faulty proposer proposes block A to CA +* A faulty proposer proposes block B to CB +* Validators from the set CA and CB prevote for A and B, respectively. +* Faulty validators from the set F prevote both for A and B. +* The faulty prevote messages + * for A arrive at CA long before the B messages + * for B arrive at CB long before the A messages +* Therefore correct validators from set CA and CB will observe +more than 2/3 of prevotes for A and B and precommit for A and B, respectively. +* Faulty validators from the set F precommit both values A and B. +* Thus, we have more than 2/3 commits for both A and B. + +Consequences: + +* Creating evidence of misbehavior is simple in this case as we have multiple messages signed by the same faulty processes for different values in the same round. + +* We have to ensure that these different messages reach a correct process (full node, monitor?), which can submit evidence. + +* This is an attack on the full node level (Fork-Full). +* It extends also to the light clients, +* For both we need a detection and recovery mechanism. + +#### Scenario 2: Equivocation to a light client (LCS) + +Validators: + +* a set F of faulty validators with more than 2/3 of the voting power. + +Execution: + +* for the main chain F behaves nicely +* F coordinates to sign a block B that is different from the one on the main chain. +* the light clients obtains B and trusts at as it is signed by more than 2/3 of the voting power. + +Consequences: + +Once equivocation is used to attack light client it opens space +for different kind of attacks as application state can be diverged in any direction. For example, it can modify validator set such that it contains only validators that do not have any stake bonded. Note that after a light client is fooled by a fork, that means that an attacker can change application state and validator set arbitrarily. + +In order to detect such (equivocation-based attack), the light client would need to cross check its state with some correct validator (or to obtain a hash of the state from the main chain using out of band channels). + +*Remark.* The light client would be able to create evidence of misbehavior, but this would require to pull potentially a lot of data from correct full nodes. Maybe we need to figure out different architecture where a light client that is attacked will push all its data for the current unbonding period to a correct node that will inspect this data and submit corresponding evidence. There are also architectures that assumes a special role (sometimes called fisherman) whose goal is to collect as much as possible useful data from the network, to do analysis and create evidence transactions. That functionality is outside the scope of this document. + +*Remark.* The difference between LCS and LCB might only be in the amount of voting power needed to convince light client about arbitrary state. In case of LCB where security threshold is at minimum, an attacker can arbitrarily modify application state with 1/3 or more of voting power, while in case of LCS it requires more than 2/3 of the voting power. + +### Flip-flopping: Amnesia based attacks + +In case of amnesia, faulty validators lock some value *v* in some round *r*, and then vote for different value *v'* in higher rounds without correctly unlocking value *v*. This attack can be used both on full nodes and light clients. + +#### Scenario 3: At most 2/3 of faults + +Validators: + +* a set F of faulty validators with 1/3 or more but at most 2/3 of the voting power +* a set C of correct validators + +Execution: + +* Faulty validators commit (without exposing it on the main chain) a block A in round *r* by collecting more than 2/3 of the + voting power (containing correct and faulty validators). +* All validators (correct and faulty) reach a round *r' > r*. +* Some correct validators in C do not lock any value before round *r'*. +* The faulty validators in F deviate from Tendermint consensus by ignoring that they locked A in *r*, and propose a different block B in *r'*. +* As the validators in C that have not locked any value find B acceptable, they accept the proposal for B and commit a block B. + +*Remark.* In this case, the more than 1/3 of faulty validators do not need to commit an equivocation (F1) as they only vote once per round in the execution. + +Detecting faulty validators in the case of such an attack can be done by the fork accountability mechanism described in: . + +If a light client is attacked using this attack with 1/3 or more of voting power (and less than 2/3), the attacker cannot change the application state arbitrarily. Rather, the attacker is limited to a state a correct validator finds acceptable: In the execution above, correct validators still find the value acceptable, however, the block the light client trusts deviates from the one on the main chain. + +#### Scenario 4: More than 2/3 of faults + +In case there is an attack with more than 2/3 of the voting power, an attacker can arbitrarily change application state. + +Validators: + +* a set F1 of faulty validators with 1/3 or more of the voting power +* a set F2 of faulty validators with less than 1/3 of the voting power + +Execution + +* Similar to Scenario 3 (however, messages by correct validators are not needed) +* The faulty validators in F1 lock value A in round *r* +* They sign a different value in follow-up rounds +* F2 does not lock A in round *r* + +Consequences: + +* The validators in F1 will be detectable by the the fork accountability mechanisms. +* The validators in F2 cannot be detected using this mechanism. +Only in case they signed something which conflicts with the application this can be used against them. Otherwise they do not do anything incorrect. +* This case is not covered by the report as it only assumes at most 2/3 of faulty validators. + +**Q:** do we need to define a special kind of attack for the case where a validator sign arbitrarily state? It seems that detecting such attack requires a different mechanism that would require as an evidence a sequence of blocks that led to that state. This might be very tricky to implement. + +### Back to the past + +In this kind of attack, faulty validators take advantage of the fact that they did not sign messages in some of the past rounds. Due to the asynchronous network in which Tendermint operates, we cannot easily differentiate between such an attack and delayed message. This kind of attack can be used at both full nodes and light clients. + +#### Scenario 5 + +Validators: + +* C1 - a set of correct validators with over 1/3 of the voting power +* C2 - a set of correct validators with 1/3 of the voting power +* C1 and C2 are disjoint +* F - a set of faulty validators with less than 1/3 voting power +* one additional faulty process *q* +* F and *q* violate the Tendermint failure model. + +Execution: + +* in a round *r* of height *h* we have C1 precommitting a value A, +* C2 precommits nil, +* F does not send any message +* *q* precommits nil. +* In some round *r' > r*, F and *q* and C2 commit some other value B different from A. +* F and *fp* "go back to the past" and sign precommit message for value A in round *r*. +* Together with precomit messages of C1 this is sufficient for a commit for value A. + +Consequences: + +* Only a single faulty validator that previously precommited nil did equivocation, while the other 1/3 of faulty validators actually executed an attack that has exactly the same sequence of messages as part of amnesia attack. Detecting this kind of attack boil down to mechanisms for equivocation and amnesia. + +**Q:** should we keep this as a separate kind of attack? It seems that equivocation, amnesia and phantom validators are the only kind of attack we need to support and this gives us security also in other cases. This would not be surprising as equivocation and amnesia are attacks that followed from the protocol and phantom attack is not really an attack to Tendermint but more to the Proof of Stake module. + +### Phantom validators + +In case of phantom validators, processes that are not part of the current validator set but are still bonded (as attack happen during their unbonding period) can be part of the attack by signing vote messages. This attack can be executed against both full nodes and light clients. + +#### Scenario 6 + +Validators: + +* F -- a set of faulty validators that are not part of the validator set on the main chain at height *h + k* + +Execution: + +* There is a fork, and there exist two different headers for height *h + k*, with different validator sets: + * VS2 on the main chain + * forged header VS2', signed by F (and others) + +* a light client has a trust in a header for height *h* (and the corresponding validator set VS1). +* As part of bisection header verification, it verifies the header at height *h + k* with new validator set VS2'. + +Consequences: + +* To detect this, a node needs to see both, the forged header and the canonical header from the chain. +* If this is the case, detecting these kind of attacks is easy as it just requires verifying if processes are signing messages in heights in which they are not part of the validator set. + +**Remark.** We can have phantom-validator-based attacks as a follow up of equivocation or amnesia based attack where forked state contains validators that are not part of the validator set at the main chain. In this case, they keep signing messages contributed to a forked chain (the wrong branch) although they are not part of the validator set on the main chain. This attack can also be used to attack full node during a period of time it is eclipsed. + +**Remark.** Phantom validator evidence has been removed from implementation as it was deemed, although possibly a plausible form of evidence, not relevant. Any attack on +the light client involving a phantom validator will have needed to be initiated by 1/3+ lunatic +validators that can forge a new validator set that includes the phantom validator. Only in +that case will the light client accept the phantom validators vote. We need only worry about +punishing the 1/3+ lunatic cabal, that is the root cause of the attack. + +### Lunatic validator + +Lunatic validator agrees to sign commit messages for arbitrary application state. It is used to attack light clients. +Note that detecting this behavior require application knowledge. Detecting this behavior can probably be done by +referring to the block before the one in which height happen. + +**Q:** can we say that in this case a validator declines to check if a proposed value is valid before voting for it? diff --git a/spec/light-client/accountability/Synopsis.md b/spec/light-client/accountability/Synopsis.md new file mode 100644 index 0000000000..76da3868c7 --- /dev/null +++ b/spec/light-client/accountability/Synopsis.md @@ -0,0 +1,105 @@ + +# Synopsis + + A TLA+ specification of a simplified Tendermint consensus, tuned for + fork accountability. The simplifications are as follows: + +- the procotol runs for one height, that is, one-shot consensus + +- this specification focuses on safety, so timeouts are modelled with + with non-determinism + +- the proposer function is non-determinstic, no fairness is assumed + +- the messages by the faulty processes are injected right in the initial states + +- every process has the voting power of 1 + +- hashes are modelled as identity + + Having the above assumptions in mind, the specification follows the pseudo-code + of the Tendermint paper: + + Byzantine processes can demonstrate arbitrary behavior, including + no communication. However, we have to show that under the collective evidence + collected by the correct processes, at least `f+1` Byzantine processes demonstrate + one of the following behaviors: + +- Equivocation: a Byzantine process sends two different values + in the same round. + +- Amnesia: a Byzantine process locks a value, although it has locked + another value in the past. + +# TLA+ modules + +- [TendermintAcc_004_draft](TendermintAcc_004_draft.tla) is the protocol + specification, + +- [TendermintAccInv_004_draft](TendermintAccInv_004_draft.tla) contains an + inductive invariant for establishing the protocol safety as well as the + forking cases, + +- `MC_n_f`, e.g., [MC_n4_f1](MC_n4_f1.tla), contains fixed constants for + model checking with the [Apalache model + checker](https://github.com/informalsystems/apalache), + +- [TendermintAccTrace_004_draft](TendermintAccTrace_004_draft.tla) shows how + to restrict the execution space to a fixed sequence of actions (e.g., to + instantiate a counterexample), + +- [TendermintAccDebug_004_draft](TendermintAccDebug_004_draft.tla) contains + the useful definitions for debugging the protocol specification with TLC and + Apalache. + +# Reasoning about fork scenarios + +The theorem statements can be found in +[TendermintAccInv_004_draft.tla](TendermintAccInv_004_draft.tla). + +First, we would like to show that `TypedInv` is an inductive invariant. +Formally, the statement looks as follows: + +```tla +THEOREM TypedInvIsInductive == + \/ FaultyQuorum + \//\ Init => TypedInv + /\ TypedInv /\ [Next]_vars => TypedInv' +``` + +When over two-thirds of processes are faulty, `TypedInv` is not inductive. +However, there is no hope to repair the protocol in this case. We run +[Apalache](https://github.com/informalsystems/apalache) to prove this theorem +only for fixed instances of 4 to 5 validators. Apalache does not parse theorem +statements at the moment, so we ran Apalache using a shell script. To find a +parameterized argument, one has to use a theorem prover, e.g., TLAPS. + +Second, we would like to show that the invariant implies `Agreement`, that is, +no fork, provided that less than one third of processes is faulty. By combining +this theorem with the previous theorem, we conclude that the protocol indeed +satisfies Agreement under the condition `LessThanThirdFaulty`. + +```tla +THEOREM AgreementWhenLessThanThirdFaulty == + LessThanThirdFaulty /\ TypedInv => Agreement +``` + +Third, in the general case, we either have no fork, or two fork scenarios: + +```tla +THEOREM AgreementOrFork == + ~FaultyQuorum /\ TypedInv => Accountability +``` + +# Model checking results + +Check the report on [model checking with Apalache](./results/001indinv-apalache-report.md). + +To run the model checking experiments, use the script: + +```console +./run.sh +``` + +This script assumes that the apalache build is available in +`~/devl/apalache-unstable`. diff --git a/spec/light-client/accountability/TendermintAccDebug_004_draft.tla b/spec/light-client/accountability/TendermintAccDebug_004_draft.tla new file mode 100644 index 0000000000..deaa990ea3 --- /dev/null +++ b/spec/light-client/accountability/TendermintAccDebug_004_draft.tla @@ -0,0 +1,100 @@ +------------------ MODULE TendermintAccDebug_004_draft ------------------------- +(* + A few definitions that we use for debugging TendermintAcc3, which do not belong + to the specification itself. + + * Version 3. Modular and parameterized definitions. + + Igor Konnov, 2020. + *) + +EXTENDS TendermintAccInv_004_draft + +\* make them parameters? +NFaultyProposals == 0 \* the number of injected faulty PROPOSE messages +NFaultyPrevotes == 6 \* the number of injected faulty PREVOTE messages +NFaultyPrecommits == 6 \* the number of injected faulty PRECOMMIT messages + +\* Given a set of allowed messages Msgs, this operator produces a function from +\* rounds to sets of messages. +\* Importantly, there will be exactly k messages in the image of msgFun. +\* We use this action to produce k faults in an initial state. +ProduceFaults(msgFun, From, k) == + \E f \in [1..k -> From]: + msgFun = [r \in Rounds |-> {m \in {f[i]: i \in 1..k}: m.round = r}] + +\* As TLC explodes with faults, we may have initial states without faults +InitNoFaults == + /\ round = [p \in Corr |-> 0] + /\ step = [p \in Corr |-> "PROPOSE"] + /\ decision = [p \in Corr |-> NilValue] + /\ lockedValue = [p \in Corr |-> NilValue] + /\ lockedRound = [p \in Corr |-> NilRound] + /\ validValue = [p \in Corr |-> NilValue] + /\ validRound = [p \in Corr |-> NilRound] + /\ msgsPropose = [r \in Rounds |-> EmptyMsgSet] + /\ msgsPrevote = [r \in Rounds |-> EmptyMsgSet] + /\ msgsPrecommit = [r \in Rounds |-> EmptyMsgSet] + /\ evidence = EmptyMsgSet + +(* + A specialized version of Init that injects NFaultyProposals proposals, + NFaultyPrevotes prevotes, NFaultyPrecommits precommits by the faulty processes + *) +InitFewFaults == + /\ round = [p \in Corr |-> 0] + /\ step = [p \in Corr |-> "PROPOSE"] + /\ decision = [p \in Corr |-> NilValue] + /\ lockedValue = [p \in Corr |-> NilValue] + /\ lockedRound = [p \in Corr |-> NilRound] + /\ validValue = [p \in Corr |-> NilValue] + /\ validRound = [p \in Corr |-> NilRound] + /\ ProduceFaults(msgsPrevote', + SetOfMsgs([type: {"PREVOTE"}, src: Faulty, round: Rounds, id: Values]), + NFaultyPrevotes) + /\ ProduceFaults(msgsPrecommit', + SetOfMsgs([type: {"PRECOMMIT"}, src: Faulty, round: Rounds, id: Values]), + NFaultyPrecommits) + /\ ProduceFaults(msgsPropose', + SetOfMsgs([type: {"PROPOSAL"}, src: Faulty, round: Rounds, + proposal: Values, validRound: Rounds \cup {NilRound}]), + NFaultyProposals) + /\ evidence = EmptyMsgSet + +\* Add faults incrementally +NextWithFaults == + \* either the protocol makes a step + \/ Next + \* or a faulty process sends a message + \//\ UNCHANGED <> + /\ \E p \in Faulty: + \E r \in Rounds: + \//\ UNCHANGED <> + /\ \E proposal \in ValidValues \union {NilValue}: + \E vr \in RoundsOrNil: + BroadcastProposal(p, r, proposal, vr) + \//\ UNCHANGED <> + /\ \E id \in ValidValues \union {NilValue}: + BroadcastPrevote(p, r, id) + \//\ UNCHANGED <> + /\ \E id \in ValidValues \union {NilValue}: + BroadcastPrecommit(p, r, id) + +(******************************** PROPERTIES ***************************************) +\* simple reachability properties to see that the spec is progressing +NoPrevote == \A p \in Corr: step[p] /= "PREVOTE" + +NoPrecommit == \A p \in Corr: step[p] /= "PRECOMMIT" + +NoValidPrecommit == + \A r \in Rounds: + \A m \in msgsPrecommit[r]: + m.id = NilValue \/ m.src \in Faulty + +NoHigherRounds == \A p \in Corr: round[p] < 1 + +NoDecision == \A p \in Corr: decision[p] = NilValue + +============================================================================= + diff --git a/spec/light-client/accountability/TendermintAccInv_004_draft.tla b/spec/light-client/accountability/TendermintAccInv_004_draft.tla new file mode 100644 index 0000000000..5dd15396de --- /dev/null +++ b/spec/light-client/accountability/TendermintAccInv_004_draft.tla @@ -0,0 +1,370 @@ +------------------- MODULE TendermintAccInv_004_draft -------------------------- +(* + An inductive invariant for TendermintAcc3, which capture the forked + and non-forked cases. + + * Version 3. Modular and parameterized definitions. + * Version 2. Bugfixes in the spec and an inductive invariant. + + Igor Konnov, 2020. + *) + +EXTENDS TendermintAcc_004_draft + +(************************** TYPE INVARIANT ***********************************) +(* first, we define the sets of all potential messages *) +AllProposals == + SetOfMsgs([type: {"PROPOSAL"}, + src: AllProcs, + round: Rounds, + proposal: ValuesOrNil, + validRound: RoundsOrNil]) + +AllPrevotes == + SetOfMsgs([type: {"PREVOTE"}, + src: AllProcs, + round: Rounds, + id: ValuesOrNil]) + +AllPrecommits == + SetOfMsgs([type: {"PRECOMMIT"}, + src: AllProcs, + round: Rounds, + id: ValuesOrNil]) + +(* the standard type invariant -- importantly, it is inductive *) +TypeOK == + /\ round \in [Corr -> Rounds] + /\ step \in [Corr -> { "PROPOSE", "PREVOTE", "PRECOMMIT", "DECIDED" }] + /\ decision \in [Corr -> ValidValues \union {NilValue}] + /\ lockedValue \in [Corr -> ValidValues \union {NilValue}] + /\ lockedRound \in [Corr -> RoundsOrNil] + /\ validValue \in [Corr -> ValidValues \union {NilValue}] + /\ validRound \in [Corr -> RoundsOrNil] + /\ msgsPropose \in [Rounds -> SUBSET AllProposals] + /\ BenignRoundsInMessages(msgsPropose) + /\ msgsPrevote \in [Rounds -> SUBSET AllPrevotes] + /\ BenignRoundsInMessages(msgsPrevote) + /\ msgsPrecommit \in [Rounds -> SUBSET AllPrecommits] + /\ BenignRoundsInMessages(msgsPrecommit) + /\ evidence \in SUBSET (AllProposals \union AllPrevotes \union AllPrecommits) + /\ action \in { + "Init", + "InsertProposal", + "UponProposalInPropose", + "UponProposalInProposeAndPrevote", + "UponQuorumOfPrevotesAny", + "UponProposalInPrevoteOrCommitAndPrevote", + "UponQuorumOfPrecommitsAny", + "UponProposalInPrecommitNoDecision", + "OnTimeoutPropose", + "OnQuorumOfNilPrevotes", + "OnRoundCatchup" + } + +(************************** INDUCTIVE INVARIANT *******************************) +EvidenceContainsMessages == + \* evidence contains only the messages from: + \* msgsPropose, msgsPrevote, and msgsPrecommit + \A m \in evidence: + LET r == m.round + t == m.type + IN + CASE t = "PROPOSAL" -> m \in msgsPropose[r] + [] t = "PREVOTE" -> m \in msgsPrevote[r] + [] OTHER -> m \in msgsPrecommit[r] + +NoFutureMessagesForLargerRounds(p) == + \* a correct process does not send messages for the future rounds + \A r \in { rr \in Rounds: rr > round[p] }: + /\ \A m \in msgsPropose[r]: m.src /= p + /\ \A m \in msgsPrevote[r]: m.src /= p + /\ \A m \in msgsPrecommit[r]: m.src /= p + +NoFutureMessagesForCurrentRound(p) == + \* a correct process does not send messages in the future + LET r == round[p] IN + /\ Proposer[r] = p \/ \A m \in msgsPropose[r]: m.src /= p + /\ \/ step[p] \in {"PREVOTE", "PRECOMMIT", "DECIDED"} + \/ \A m \in msgsPrevote[r]: m.src /= p + /\ \/ step[p] \in {"PRECOMMIT", "DECIDED"} + \/ \A m \in msgsPrecommit[r]: m.src /= p + +\* the correct processes never send future messages +AllNoFutureMessagesSent == + \A p \in Corr: + /\ NoFutureMessagesForCurrentRound(p) + /\ NoFutureMessagesForLargerRounds(p) + +\* a correct process in the PREVOTE state has sent a PREVOTE message +IfInPrevoteThenSentPrevote(p) == + step[p] = "PREVOTE" => + \E m \in msgsPrevote[round[p]]: + /\ m.id \in ValidValues \cup { NilValue } + /\ m.src = p + +AllIfInPrevoteThenSentPrevote == + \A p \in Corr: IfInPrevoteThenSentPrevote(p) + +\* a correct process in the PRECOMMIT state has sent a PRECOMMIT message +IfInPrecommitThenSentPrecommit(p) == + step[p] = "PRECOMMIT" => + \E m \in msgsPrecommit[round[p]]: + /\ m.id \in ValidValues \cup { NilValue } + /\ m.src = p + +AllIfInPrecommitThenSentPrecommit == + \A p \in Corr: IfInPrecommitThenSentPrecommit(p) + +\* a process in the PRECOMMIT state has sent a PRECOMMIT message +IfInDecidedThenValidDecision(p) == + step[p] = "DECIDED" <=> decision[p] \in ValidValues + +AllIfInDecidedThenValidDecision == + \A p \in Corr: IfInDecidedThenValidDecision(p) + +\* a decided process should have received a proposal on its decision +IfInDecidedThenReceivedProposal(p) == + step[p] = "DECIDED" => + \E r \in Rounds: \* r is not necessarily round[p] + /\ \E m \in msgsPropose[r] \intersect evidence: + /\ m.src = Proposer[r] + /\ m.proposal = decision[p] + \* not inductive: /\ m.src \in Corr => (m.validRound <= r) + +AllIfInDecidedThenReceivedProposal == + \A p \in Corr: + IfInDecidedThenReceivedProposal(p) + +\* a decided process has received two-thirds of precommit messages +IfInDecidedThenReceivedTwoThirds(p) == + step[p] = "DECIDED" => + \E r \in Rounds: + LET PV == + { m \in msgsPrecommit[r] \intersect evidence: m.id = decision[p] } + IN + Cardinality(PV) >= THRESHOLD2 + +AllIfInDecidedThenReceivedTwoThirds == + \A p \in Corr: + IfInDecidedThenReceivedTwoThirds(p) + +\* for a round r, there is proposal by the round proposer for a valid round vr +ProposalInRound(r, proposedVal, vr) == + \E m \in msgsPropose[r]: + /\ m.src = Proposer[r] + /\ m.proposal = proposedVal + /\ m.validRound = vr + +TwoThirdsPrevotes(vr, v) == + LET PV == { mm \in msgsPrevote[vr] \intersect evidence: mm.id = v } IN + Cardinality(PV) >= THRESHOLD2 + +\* if a process sends a PREVOTE, then there are three possibilities: +\* 1) the process is faulty, 2) the PREVOTE cotains Nil, +\* 3) there is a proposal in an earlier (valid) round and two thirds of PREVOTES +IfSentPrevoteThenReceivedProposalOrTwoThirds(r) == + \A mpv \in msgsPrevote[r]: + \/ mpv.src \in Faulty + \* lockedRound and lockedValue is beyond my comprehension + \/ mpv.id = NilValue + \//\ mpv.src \in Corr + /\ mpv.id /= NilValue + /\ \/ ProposalInRound(r, mpv.id, NilRound) + \/ \E vr \in { rr \in Rounds: rr < r }: + /\ ProposalInRound(r, mpv.id, vr) + /\ TwoThirdsPrevotes(vr, mpv.id) + +AllIfSentPrevoteThenReceivedProposalOrTwoThirds == + \A r \in Rounds: + IfSentPrevoteThenReceivedProposalOrTwoThirds(r) + +\* if a correct process has sent a PRECOMMIT, then there are two thirds, +\* either on a valid value, or a nil value +IfSentPrecommitThenReceivedTwoThirds == + \A r \in Rounds: + \A mpc \in msgsPrecommit[r]: + mpc.src \in Corr => + \/ /\ mpc.id \in ValidValues + /\ LET PV == + { m \in msgsPrevote[r] \intersect evidence: m.id = mpc.id } + IN + Cardinality(PV) >= THRESHOLD2 + \/ /\ mpc.id = NilValue + /\ Cardinality(msgsPrevote[r]) >= THRESHOLD2 + +\* if a correct process has sent a precommit message in a round, it should +\* have sent a prevote +IfSentPrecommitThenSentPrevote == + \A r \in Rounds: + \A mpc \in msgsPrecommit[r]: + mpc.src \in Corr => + \E m \in msgsPrevote[r]: + m.src = mpc.src + +\* there is a locked round if a only if there is a locked value +LockedRoundIffLockedValue(p) == + (lockedRound[p] = NilRound) <=> (lockedValue[p] = NilValue) + +AllLockedRoundIffLockedValue == + \A p \in Corr: + LockedRoundIffLockedValue(p) + +\* when a process locked a round, it must have sent a precommit on the locked value. +IfLockedRoundThenSentCommit(p) == + lockedRound[p] /= NilRound + => \E r \in { rr \in Rounds: rr <= round[p] }: + \E m \in msgsPrecommit[r]: + m.src = p /\ m.id = lockedValue[p] + +AllIfLockedRoundThenSentCommit == + \A p \in Corr: + IfLockedRoundThenSentCommit(p) + +\* a process always locks the latest round, for which it has sent a PRECOMMIT +LatestPrecommitHasLockedRound(p) == + LET pPrecommits == + {mm \in UNION { msgsPrecommit[r]: r \in Rounds }: mm.src = p /\ mm.id /= NilValue } + IN + pPrecommits /= {} <: {MT} + => LET latest == + CHOOSE m \in pPrecommits: + \A m2 \in pPrecommits: + m2.round <= m.round + IN + /\ lockedRound[p] = latest.round + /\ lockedValue[p] = latest.id + +AllLatestPrecommitHasLockedRound == + \A p \in Corr: + LatestPrecommitHasLockedRound(p) + +\* Every correct process sends only one value or NilValue. +\* This test has quantifier alternation -- a threat to all decision procedures. +\* Luckily, the sets Corr and ValidValues are small. +NoEquivocationByCorrect(r, msgs) == + \A p \in Corr: + \E v \in ValidValues \union {NilValue}: + \A m \in msgs[r]: + \/ m.src /= p + \/ m.id = v + +\* a proposer nevers sends two values +ProposalsByProposer(r, msgs) == + \* if the proposer is not faulty, it sends only one value + \E v \in ValidValues: + \A m \in msgs[r]: + \/ m.src \in Faulty + \/ m.src = Proposer[r] /\ m.proposal = v + +AllNoEquivocationByCorrect == + \A r \in Rounds: + /\ ProposalsByProposer(r, msgsPropose) + /\ NoEquivocationByCorrect(r, msgsPrevote) + /\ NoEquivocationByCorrect(r, msgsPrecommit) + +\* construct the set of the message senders +Senders(M) == { m.src: m \in M } + +\* The final piece by Josef Widder: +\* if T + 1 processes precommit on the same value in a round, +\* then in the future rounds there are less than 2T + 1 prevotes for another value +PrecommitsLockValue == + \A r \in Rounds: + \A v \in ValidValues \union {NilValue}: + \/ LET Precommits == {m \in msgsPrecommit[r]: m.id = v} + IN + Cardinality(Senders(Precommits)) < THRESHOLD1 + \/ \A fr \in { rr \in Rounds: rr > r }: \* future rounds + \A w \in (ValuesOrNil) \ {v}: + LET Prevotes == {m \in msgsPrevote[fr]: m.id = w} + IN + Cardinality(Senders(Prevotes)) < THRESHOLD2 + +\* a combination of all lemmas +Inv == + /\ EvidenceContainsMessages + /\ AllNoFutureMessagesSent + /\ AllIfInPrevoteThenSentPrevote + /\ AllIfInPrecommitThenSentPrecommit + /\ AllIfInDecidedThenReceivedProposal + /\ AllIfInDecidedThenReceivedTwoThirds + /\ AllIfInDecidedThenValidDecision + /\ AllLockedRoundIffLockedValue + /\ AllIfLockedRoundThenSentCommit + /\ AllLatestPrecommitHasLockedRound + /\ AllIfSentPrevoteThenReceivedProposalOrTwoThirds + /\ IfSentPrecommitThenSentPrevote + /\ IfSentPrecommitThenReceivedTwoThirds + /\ AllNoEquivocationByCorrect + /\ PrecommitsLockValue + +\* this is the inductive invariant we like to check +TypedInv == TypeOK /\ Inv + +\* UNUSED FOR SAFETY +ValidRoundNotSmallerThanLockedRound(p) == + validRound[p] >= lockedRound[p] + +\* UNUSED FOR SAFETY +ValidRoundIffValidValue(p) == + (validRound[p] = NilRound) <=> (validValue[p] = NilValue) + +\* UNUSED FOR SAFETY +AllValidRoundIffValidValue == + \A p \in Corr: ValidRoundIffValidValue(p) + +\* if validRound is defined, then there are two-thirds of PREVOTEs +IfValidRoundThenTwoThirds(p) == + \/ validRound[p] = NilRound + \/ LET PV == { m \in msgsPrevote[validRound[p]]: m.id = validValue[p] } IN + Cardinality(PV) >= THRESHOLD2 + +\* UNUSED FOR SAFETY +AllIfValidRoundThenTwoThirds == + \A p \in Corr: IfValidRoundThenTwoThirds(p) + +\* a valid round can be only set to a valid value that was proposed earlier +IfValidRoundThenProposal(p) == + \/ validRound[p] = NilRound + \/ \E m \in msgsPropose[validRound[p]]: + m.proposal = validValue[p] + +\* UNUSED FOR SAFETY +AllIfValidRoundThenProposal == + \A p \in Corr: IfValidRoundThenProposal(p) + +(******************************** THEOREMS ***************************************) +(* Under this condition, the faulty processes can decide alone *) +FaultyQuorum == Cardinality(Faulty) >= THRESHOLD2 + +(* The standard condition of the Tendermint security model *) +LessThanThirdFaulty == N > 3 * T /\ Cardinality(Faulty) <= T + +(* + TypedInv is an inductive invariant, provided that there is no faulty quorum. + We run Apalache to prove this theorem only for fixed instances of 4 to 10 processes. + (We run Apalache manually, as it does not parse theorem statements at the moment.) + To get a parameterized argument, one has to use a theorem prover, e.g., TLAPS. + *) +THEOREM TypedInvIsInductive == + \/ FaultyQuorum \* if there are 2 * T + 1 faulty processes, we give up + \//\ Init => TypedInv + /\ TypedInv /\ [Next]_vars => TypedInv' + +(* + There should be no fork, when there are less than 1/3 faulty processes. + *) +THEOREM AgreementWhenLessThanThirdFaulty == + LessThanThirdFaulty /\ TypedInv => Agreement + +(* + In a more general case, when there are less than 2/3 faulty processes, + there is either Agreement (no fork), or two scenarios exist: + equivocation by Faulty, or amnesia by Faulty. + *) +THEOREM AgreementOrFork == + ~FaultyQuorum /\ TypedInv => Accountability + +============================================================================= + diff --git a/spec/light-client/accountability/TendermintAccTrace_004_draft.tla b/spec/light-client/accountability/TendermintAccTrace_004_draft.tla new file mode 100644 index 0000000000..436c2275a6 --- /dev/null +++ b/spec/light-client/accountability/TendermintAccTrace_004_draft.tla @@ -0,0 +1,33 @@ +------------------ MODULE TendermintAccTrace_004_draft ------------------------- +(* + When Apalache is running too slow and we have an idea of a counterexample, + we use this module to restrict the behaviors only to certain actions. + Once the whole trace is replayed, the system deadlocks. + + Version 1. + + Igor Konnov, 2020. + *) + +EXTENDS Sequences, Apalache, TendermintAcc_004_draft + +\* a sequence of action names that should appear in the given order, +\* excluding "Init" +CONSTANT Trace + +VARIABLE toReplay + +TraceInit == + /\ toReplay = Trace + /\ action' := "Init" + /\ Init + +TraceNext == + /\ Len(toReplay) > 0 + /\ toReplay' = Tail(toReplay) + \* Here is the trick. We restrict the action to the expected one, + \* so the other actions will be pruned + /\ action' := Head(toReplay) + /\ Next + +================================================================================ diff --git a/spec/light-client/accountability/TendermintAcc_004_draft.tla b/spec/light-client/accountability/TendermintAcc_004_draft.tla new file mode 100644 index 0000000000..9d3a543d4e --- /dev/null +++ b/spec/light-client/accountability/TendermintAcc_004_draft.tla @@ -0,0 +1,474 @@ +-------------------- MODULE TendermintAcc_004_draft --------------------------- +(* + A TLA+ specification of a simplified Tendermint consensus, tuned for + fork accountability. The simplifications are as follows: + + - the protocol runs for one height, that is, it is one-shot consensus + + - this specification focuses on safety, so timeouts are modelled + with non-determinism + + - the proposer function is non-determinstic, no fairness is assumed + + - the messages by the faulty processes are injected right in the initial states + + - every process has the voting power of 1 + + - hashes are modelled as identity + + Having the above assumptions in mind, the specification follows the pseudo-code + of the Tendermint paper: https://arxiv.org/abs/1807.04938 + + Byzantine processes can demonstrate arbitrary behavior, including + no communication. We show that if agreement is violated, then the Byzantine + processes demonstrate one of the two behaviours: + + - Equivocation: a Byzantine process may send two different values + in the same round. + + - Amnesia: a Byzantine process may lock a value without unlocking + the previous value that it has locked in the past. + + * Version 4. Remove defective processes, fix bugs, collect global evidence. + * Version 3. Modular and parameterized definitions. + * Version 2. Bugfixes in the spec and an inductive invariant. + * Version 1. A preliminary specification. + + Zarko Milosevic, Igor Konnov, Informal Systems, 2019-2020. + *) + +EXTENDS Integers, FiniteSets + +(********************* PROTOCOL PARAMETERS **********************************) +CONSTANTS + Corr, \* the set of correct processes + Faulty, \* the set of Byzantine processes, may be empty + N, \* the total number of processes: correct, defective, and Byzantine + T, \* an upper bound on the number of Byzantine processes + ValidValues, \* the set of valid values, proposed both by correct and faulty + InvalidValues, \* the set of invalid values, never proposed by the correct ones + MaxRound, \* the maximal round number + Proposer \* the proposer function from 0..NRounds to 1..N + +ASSUME(N = Cardinality(Corr \union Faulty)) + +(*************************** DEFINITIONS ************************************) +AllProcs == Corr \union Faulty \* the set of all processes +Rounds == 0..MaxRound \* the set of potential rounds +NilRound == -1 \* a special value to denote a nil round, outside of Rounds +RoundsOrNil == Rounds \union {NilRound} +Values == ValidValues \union InvalidValues \* the set of all values +NilValue == "None" \* a special value for a nil round, outside of Values +ValuesOrNil == Values \union {NilValue} + +\* a value hash is modeled as identity +Id(v) == v + +\* The validity predicate +IsValid(v) == v \in ValidValues + +\* the two thresholds that are used in the algorithm +THRESHOLD1 == T + 1 \* at least one process is not faulty +THRESHOLD2 == 2 * T + 1 \* a quorum when having N > 3 * T + +(********************* TYPE ANNOTATIONS FOR APALACHE **************************) +\* the operator for type annotations +a <: b == a + +\* the type of message records +MT == [type |-> STRING, src |-> STRING, round |-> Int, + proposal |-> STRING, validRound |-> Int, id |-> STRING] + +\* a type annotation for a message +AsMsg(m) == m <: MT +\* a type annotation for a set of messages +SetOfMsgs(S) == S <: {MT} +\* a type annotation for an empty set of messages +EmptyMsgSet == SetOfMsgs({}) + +(********************* PROTOCOL STATE VARIABLES ******************************) +VARIABLES + round, \* a process round number: Corr -> Rounds + step, \* a process step: Corr -> { "PROPOSE", "PREVOTE", "PRECOMMIT", "DECIDED" } + decision, \* process decision: Corr -> ValuesOrNil + lockedValue, \* a locked value: Corr -> ValuesOrNil + lockedRound, \* a locked round: Corr -> RoundsOrNil + validValue, \* a valid value: Corr -> ValuesOrNil + validRound \* a valid round: Corr -> RoundsOrNil + +\* book-keeping variables +VARIABLES + msgsPropose, \* PROPOSE messages broadcast in the system, Rounds -> Messages + msgsPrevote, \* PREVOTE messages broadcast in the system, Rounds -> Messages + msgsPrecommit, \* PRECOMMIT messages broadcast in the system, Rounds -> Messages + evidence, \* the messages that were used by the correct processes to make transitions + action \* we use this variable to see which action was taken + +(* to see a type invariant, check TendermintAccInv3 *) + +\* a handy definition used in UNCHANGED +vars == <> + +(********************* PROTOCOL INITIALIZATION ******************************) +FaultyProposals(r) == + SetOfMsgs([type: {"PROPOSAL"}, src: Faulty, + round: {r}, proposal: Values, validRound: RoundsOrNil]) + +AllFaultyProposals == + SetOfMsgs([type: {"PROPOSAL"}, src: Faulty, + round: Rounds, proposal: Values, validRound: RoundsOrNil]) + +FaultyPrevotes(r) == + SetOfMsgs([type: {"PREVOTE"}, src: Faulty, round: {r}, id: Values]) + +AllFaultyPrevotes == + SetOfMsgs([type: {"PREVOTE"}, src: Faulty, round: Rounds, id: Values]) + +FaultyPrecommits(r) == + SetOfMsgs([type: {"PRECOMMIT"}, src: Faulty, round: {r}, id: Values]) + +AllFaultyPrecommits == + SetOfMsgs([type: {"PRECOMMIT"}, src: Faulty, round: Rounds, id: Values]) + +BenignRoundsInMessages(msgfun) == + \* the message function never contains a message for a wrong round + \A r \in Rounds: + \A m \in msgfun[r]: + r = m.round + +\* The initial states of the protocol. Some faults can be in the system already. +Init == + /\ round = [p \in Corr |-> 0] + /\ step = [p \in Corr |-> "PROPOSE"] + /\ decision = [p \in Corr |-> NilValue] + /\ lockedValue = [p \in Corr |-> NilValue] + /\ lockedRound = [p \in Corr |-> NilRound] + /\ validValue = [p \in Corr |-> NilValue] + /\ validRound = [p \in Corr |-> NilRound] + /\ msgsPropose \in [Rounds -> SUBSET AllFaultyProposals] + /\ msgsPrevote \in [Rounds -> SUBSET AllFaultyPrevotes] + /\ msgsPrecommit \in [Rounds -> SUBSET AllFaultyPrecommits] + /\ BenignRoundsInMessages(msgsPropose) + /\ BenignRoundsInMessages(msgsPrevote) + /\ BenignRoundsInMessages(msgsPrecommit) + /\ evidence = EmptyMsgSet + /\ action' = "Init" + +(************************ MESSAGE PASSING ********************************) +BroadcastProposal(pSrc, pRound, pProposal, pValidRound) == + LET newMsg == + AsMsg([type |-> "PROPOSAL", src |-> pSrc, round |-> pRound, + proposal |-> pProposal, validRound |-> pValidRound]) + IN + msgsPropose' = [msgsPropose EXCEPT ![pRound] = msgsPropose[pRound] \union {newMsg}] + +BroadcastPrevote(pSrc, pRound, pId) == + LET newMsg == AsMsg([type |-> "PREVOTE", + src |-> pSrc, round |-> pRound, id |-> pId]) + IN + msgsPrevote' = [msgsPrevote EXCEPT ![pRound] = msgsPrevote[pRound] \union {newMsg}] + +BroadcastPrecommit(pSrc, pRound, pId) == + LET newMsg == AsMsg([type |-> "PRECOMMIT", + src |-> pSrc, round |-> pRound, id |-> pId]) + IN + msgsPrecommit' = [msgsPrecommit EXCEPT ![pRound] = msgsPrecommit[pRound] \union {newMsg}] + + +(********************* PROTOCOL TRANSITIONS ******************************) +\* lines 12-13 +StartRound(p, r) == + /\ step[p] /= "DECIDED" \* a decided process does not participate in consensus + /\ round' = [round EXCEPT ![p] = r] + /\ step' = [step EXCEPT ![p] = "PROPOSE"] + +\* lines 14-19, a proposal may be sent later +InsertProposal(p) == + LET r == round[p] IN + /\ p = Proposer[r] + /\ step[p] = "PROPOSE" + \* if the proposer is sending a proposal, then there are no other proposals + \* by the correct processes for the same round + /\ \A m \in msgsPropose[r]: m.src /= p + /\ \E v \in ValidValues: + LET proposal == IF validValue[p] /= NilValue THEN validValue[p] ELSE v IN + BroadcastProposal(p, round[p], proposal, validRound[p]) + /\ UNCHANGED <> + /\ action' = "InsertProposal" + +\* lines 22-27 +UponProposalInPropose(p) == + \E v \in Values: + /\ step[p] = "PROPOSE" (* line 22 *) + /\ LET msg == + AsMsg([type |-> "PROPOSAL", src |-> Proposer[round[p]], + round |-> round[p], proposal |-> v, validRound |-> NilRound]) IN + /\ msg \in msgsPropose[round[p]] \* line 22 + /\ evidence' = {msg} \union evidence + /\ LET mid == (* line 23 *) + IF IsValid(v) /\ (lockedRound[p] = NilRound \/ lockedValue[p] = v) + THEN Id(v) + ELSE NilValue + IN + BroadcastPrevote(p, round[p], mid) \* lines 24-26 + /\ step' = [step EXCEPT ![p] = "PREVOTE"] + /\ UNCHANGED <> + /\ action' = "UponProposalInPropose" + +\* lines 28-33 +UponProposalInProposeAndPrevote(p) == + \E v \in Values, vr \in Rounds: + /\ step[p] = "PROPOSE" /\ 0 <= vr /\ vr < round[p] \* line 28, the while part + /\ LET msg == + AsMsg([type |-> "PROPOSAL", src |-> Proposer[round[p]], + round |-> round[p], proposal |-> v, validRound |-> vr]) + IN + /\ msg \in msgsPropose[round[p]] \* line 28 + /\ LET PV == { m \in msgsPrevote[vr]: m.id = Id(v) } IN + /\ Cardinality(PV) >= THRESHOLD2 \* line 28 + /\ evidence' = PV \union {msg} \union evidence + /\ LET mid == (* line 29 *) + IF IsValid(v) /\ (lockedRound[p] <= vr \/ lockedValue[p] = v) + THEN Id(v) + ELSE NilValue + IN + BroadcastPrevote(p, round[p], mid) \* lines 24-26 + /\ step' = [step EXCEPT ![p] = "PREVOTE"] + /\ UNCHANGED <> + /\ action' = "UponProposalInProposeAndPrevote" + + \* lines 34-35 + lines 61-64 (onTimeoutPrevote) +UponQuorumOfPrevotesAny(p) == + /\ step[p] = "PREVOTE" \* line 34 and 61 + /\ \E MyEvidence \in SUBSET msgsPrevote[round[p]]: + \* find the unique voters in the evidence + LET Voters == { m.src: m \in MyEvidence } IN + \* compare the number of the unique voters against the threshold + /\ Cardinality(Voters) >= THRESHOLD2 \* line 34 + /\ evidence' = MyEvidence \union evidence + /\ BroadcastPrecommit(p, round[p], NilValue) + /\ step' = [step EXCEPT ![p] = "PRECOMMIT"] + /\ UNCHANGED <> + /\ action' = "UponQuorumOfPrevotesAny" + +\* lines 36-46 +UponProposalInPrevoteOrCommitAndPrevote(p) == + \E v \in ValidValues, vr \in RoundsOrNil: + /\ step[p] \in {"PREVOTE", "PRECOMMIT"} \* line 36 + /\ LET msg == + AsMsg([type |-> "PROPOSAL", src |-> Proposer[round[p]], + round |-> round[p], proposal |-> v, validRound |-> vr]) IN + /\ msg \in msgsPropose[round[p]] \* line 36 + /\ LET PV == { m \in msgsPrevote[round[p]]: m.id = Id(v) } IN + /\ Cardinality(PV) >= THRESHOLD2 \* line 36 + /\ evidence' = PV \union {msg} \union evidence + /\ IF step[p] = "PREVOTE" + THEN \* lines 38-41: + /\ lockedValue' = [lockedValue EXCEPT ![p] = v] + /\ lockedRound' = [lockedRound EXCEPT ![p] = round[p]] + /\ BroadcastPrecommit(p, round[p], Id(v)) + /\ step' = [step EXCEPT ![p] = "PRECOMMIT"] + ELSE + UNCHANGED <> + \* lines 42-43 + /\ validValue' = [validValue EXCEPT ![p] = v] + /\ validRound' = [validRound EXCEPT ![p] = round[p]] + /\ UNCHANGED <> + /\ action' = "UponProposalInPrevoteOrCommitAndPrevote" + +\* lines 47-48 + 65-67 (onTimeoutPrecommit) +UponQuorumOfPrecommitsAny(p) == + /\ \E MyEvidence \in SUBSET msgsPrecommit[round[p]]: + \* find the unique committers in the evidence + LET Committers == { m.src: m \in MyEvidence } IN + \* compare the number of the unique committers against the threshold + /\ Cardinality(Committers) >= THRESHOLD2 \* line 47 + /\ evidence' = MyEvidence \union evidence + /\ round[p] + 1 \in Rounds + /\ StartRound(p, round[p] + 1) + /\ UNCHANGED <> + /\ action' = "UponQuorumOfPrecommitsAny" + +\* lines 49-54 +UponProposalInPrecommitNoDecision(p) == + /\ decision[p] = NilValue \* line 49 + /\ \E v \in ValidValues (* line 50*) , r \in Rounds, vr \in RoundsOrNil: + /\ LET msg == AsMsg([type |-> "PROPOSAL", src |-> Proposer[r], + round |-> r, proposal |-> v, validRound |-> vr]) IN + /\ msg \in msgsPropose[r] \* line 49 + /\ LET PV == { m \in msgsPrecommit[r]: m.id = Id(v) } IN + /\ Cardinality(PV) >= THRESHOLD2 \* line 49 + /\ evidence' = PV \union {msg} \union evidence + /\ decision' = [decision EXCEPT ![p] = v] \* update the decision, line 51 + \* The original algorithm does not have 'DECIDED', but it increments the height. + \* We introduced 'DECIDED' here to prevent the process from changing its decision. + /\ step' = [step EXCEPT ![p] = "DECIDED"] + /\ UNCHANGED <> + /\ action' = "UponProposalInPrecommitNoDecision" + +\* the actions below are not essential for safety, but added for completeness + +\* lines 20-21 + 57-60 +OnTimeoutPropose(p) == + /\ step[p] = "PROPOSE" + /\ p /= Proposer[round[p]] + /\ BroadcastPrevote(p, round[p], NilValue) + /\ step' = [step EXCEPT ![p] = "PREVOTE"] + /\ UNCHANGED <> + /\ action' = "OnTimeoutPropose" + +\* lines 44-46 +OnQuorumOfNilPrevotes(p) == + /\ step[p] = "PREVOTE" + /\ LET PV == { m \in msgsPrevote[round[p]]: m.id = Id(NilValue) } IN + /\ Cardinality(PV) >= THRESHOLD2 \* line 36 + /\ evidence' = PV \union evidence + /\ BroadcastPrecommit(p, round[p], Id(NilValue)) + /\ step' = [step EXCEPT ![p] = "PRECOMMIT"] + /\ UNCHANGED <> + /\ action' = "OnQuorumOfNilPrevotes" + +\* lines 55-56 +OnRoundCatchup(p) == + \E r \in {rr \in Rounds: rr > round[p]}: + LET RoundMsgs == msgsPropose[r] \union msgsPrevote[r] \union msgsPrecommit[r] IN + \E MyEvidence \in SUBSET RoundMsgs: + LET Faster == { m.src: m \in MyEvidence } IN + /\ Cardinality(Faster) >= THRESHOLD1 + /\ evidence' = MyEvidence \union evidence + /\ StartRound(p, r) + /\ UNCHANGED <> + /\ action' = "OnRoundCatchup" + +(* + * A system transition. In this specificatiom, the system may eventually deadlock, + * e.g., when all processes decide. This is expected behavior, as we focus on safety. + *) +Next == + \E p \in Corr: + \/ InsertProposal(p) + \/ UponProposalInPropose(p) + \/ UponProposalInProposeAndPrevote(p) + \/ UponQuorumOfPrevotesAny(p) + \/ UponProposalInPrevoteOrCommitAndPrevote(p) + \/ UponQuorumOfPrecommitsAny(p) + \/ UponProposalInPrecommitNoDecision(p) + \* the actions below are not essential for safety, but added for completeness + \/ OnTimeoutPropose(p) + \/ OnQuorumOfNilPrevotes(p) + \/ OnRoundCatchup(p) + + +(**************************** FORK SCENARIOS ***************************) + +\* equivocation by a process p +EquivocationBy(p) == + \E m1, m2 \in evidence: + /\ m1 /= m2 + /\ m1.src = p + /\ m2.src = p + /\ m1.round = m2.round + /\ m1.type = m2.type + +\* amnesic behavior by a process p +AmnesiaBy(p) == + \E r1, r2 \in Rounds: + /\ r1 < r2 + /\ \E v1, v2 \in ValidValues: + /\ v1 /= v2 + /\ AsMsg([type |-> "PRECOMMIT", src |-> p, + round |-> r1, id |-> Id(v1)]) \in evidence + /\ AsMsg([type |-> "PREVOTE", src |-> p, + round |-> r2, id |-> Id(v2)]) \in evidence + /\ \A r \in { rnd \in Rounds: r1 <= rnd /\ rnd < r2 }: + LET prevotes == + { m \in evidence: + m.type = "PREVOTE" /\ m.round = r /\ m.id = Id(v2) } + IN + Cardinality(prevotes) < THRESHOLD2 + +(******************************** PROPERTIES ***************************************) + +\* the safety property -- agreement +Agreement == + \A p, q \in Corr: + \/ decision[p] = NilValue + \/ decision[q] = NilValue + \/ decision[p] = decision[q] + +\* the protocol validity +Validity == + \A p \in Corr: decision[p] \in ValidValues \union {NilValue} + +(* + The protocol safety. Two cases are possible: + 1. There is no fork, that is, Agreement holds true. + 2. A subset of faulty processes demonstrates equivocation or amnesia. + *) +Accountability == + \/ Agreement + \/ \E Detectable \in SUBSET Faulty: + /\ Cardinality(Detectable) >= THRESHOLD1 + /\ \A p \in Detectable: + EquivocationBy(p) \/ AmnesiaBy(p) + +(****************** FALSE INVARIANTS TO PRODUCE EXAMPLES ***********************) + +\* This property is violated. You can check it to see how amnesic behavior +\* appears in the evidence variable. +NoAmnesia == + \A p \in Faulty: ~AmnesiaBy(p) + +\* This property is violated. You can check it to see an example of equivocation. +NoEquivocation == + \A p \in Faulty: ~EquivocationBy(p) + +\* This property is violated. You can check it to see an example of agreement. +\* It is not exactly ~Agreement, as we do not want to see the states where +\* decision[p] = NilValue +NoAgreement == + \A p, q \in Corr: + (p /= q /\ decision[p] /= NilValue /\ decision[q] /= NilValue) + => decision[p] /= decision[q] + +\* Either agreement holds, or the faulty processes indeed demonstrate amnesia. +\* This property is violated. A counterexample should demonstrate equivocation. +AgreementOrAmnesia == + Agreement \/ (\A p \in Faulty: AmnesiaBy(p)) + +\* We expect this property to be violated. It shows us a protocol run, +\* where one faulty process demonstrates amnesia without equivocation. +\* However, the absence of amnesia +\* is a tough constraint for Apalache. It has not reported a counterexample +\* for n=4,f=2, length <= 5. +ShowMeAmnesiaWithoutEquivocation == + (~Agreement /\ \E p \in Faulty: ~EquivocationBy(p)) + => \A p \in Faulty: ~AmnesiaBy(p) + +\* This property is violated on n=4,f=2, length=4 in less than 10 min. +\* Two faulty processes may demonstrate amnesia without equivocation. +AmnesiaImpliesEquivocation == + (\E p \in Faulty: AmnesiaBy(p)) => (\E q \in Faulty: EquivocationBy(q)) + +(* + This property is violated. You can check it to see that all correct processes + may reach MaxRound without making a decision. + *) +NeverUndecidedInMaxRound == + LET AllInMax == \A p \in Corr: round[p] = MaxRound + AllDecided == \A p \in Corr: decision[p] /= NilValue + IN + AllInMax => AllDecided + +============================================================================= + diff --git a/spec/light-client/accountability/results/001indinv-apalache-mem-log.svg b/spec/light-client/accountability/results/001indinv-apalache-mem-log.svg new file mode 100644 index 0000000000..5821418da4 --- /dev/null +++ b/spec/light-client/accountability/results/001indinv-apalache-mem-log.svg @@ -0,0 +1,1063 @@ + + + + + + + + + 2020-12-11T20:07:39.617177 + image/svg+xml + + + Matplotlib v3.3.3, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spec/light-client/accountability/results/001indinv-apalache-mem.svg b/spec/light-client/accountability/results/001indinv-apalache-mem.svg new file mode 100644 index 0000000000..dc7213eaed --- /dev/null +++ b/spec/light-client/accountability/results/001indinv-apalache-mem.svg @@ -0,0 +1,1141 @@ + + + + + + + + + 2020-12-11T20:07:40.321995 + image/svg+xml + + + Matplotlib v3.3.3, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spec/light-client/accountability/results/001indinv-apalache-ncells.svg b/spec/light-client/accountability/results/001indinv-apalache-ncells.svg new file mode 100644 index 0000000000..20c49f4f19 --- /dev/null +++ b/spec/light-client/accountability/results/001indinv-apalache-ncells.svg @@ -0,0 +1,1015 @@ + + + + + + + + + 2020-12-11T20:07:40.804886 + image/svg+xml + + + Matplotlib v3.3.3, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spec/light-client/accountability/results/001indinv-apalache-nclauses.svg b/spec/light-client/accountability/results/001indinv-apalache-nclauses.svg new file mode 100644 index 0000000000..86d19143bf --- /dev/null +++ b/spec/light-client/accountability/results/001indinv-apalache-nclauses.svg @@ -0,0 +1,1133 @@ + + + + + + + + + 2020-12-11T20:07:41.276750 + image/svg+xml + + + Matplotlib v3.3.3, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spec/light-client/accountability/results/001indinv-apalache-report.md b/spec/light-client/accountability/results/001indinv-apalache-report.md new file mode 100644 index 0000000000..0c14742c53 --- /dev/null +++ b/spec/light-client/accountability/results/001indinv-apalache-report.md @@ -0,0 +1,61 @@ +# Results of 001indinv-apalache + +## 1. Awesome plots + +### 1.1. Time (logarithmic scale) + +![time-log](001indinv-apalache-time-log.svg "Time Log") + +### 1.2. Time (linear) + +![time-log](001indinv-apalache-time.svg "Time Log") + +### 1.3. Memory (logarithmic scale) + +![mem-log](001indinv-apalache-mem-log.svg "Memory Log") + +### 1.4. Memory (linear) + +![mem](001indinv-apalache-mem.svg "Memory Log") + +### 1.5. Number of arena cells (linear) + +![ncells](001indinv-apalache-ncells.svg "Number of arena cells") + +### 1.6. Number of SMT clauses (linear) + +![nclauses](001indinv-apalache-nclauses.svg "Number of SMT clauses") + +## 2. Input parameters + +no | filename | tool | timeout | init | inv | next | args +----|----------------|------------|-----------|------------|------------------|--------|------------------------------ +1 | MC_n4_f1.tla | apalache | 10h | TypedInv | TypedInv | | --length=1 --cinit=ConstInit +2 | MC_n4_f2.tla | apalache | 10h | TypedInv | TypedInv | | --length=1 --cinit=ConstInit +3 | MC_n5_f1.tla | apalache | 10h | TypedInv | TypedInv | | --length=1 --cinit=ConstInit +4 | MC_n5_f2.tla | apalache | 10h | TypedInv | TypedInv | | --length=1 --cinit=ConstInit +5 | MC_n4_f1.tla | apalache | 20h | Init | TypedInv | | --length=0 --cinit=ConstInit +6 | MC_n4_f2.tla | apalache | 20h | Init | TypedInv | | --length=0 --cinit=ConstInit +7 | MC_n5_f1.tla | apalache | 20h | Init | TypedInv | | --length=0 --cinit=ConstInit +8 | MC_n5_f2.tla | apalache | 20h | Init | TypedInv | | --length=0 --cinit=ConstInit +9 | MC_n4_f1.tla | apalache | 20h | TypedInv | Agreement | | --length=0 --cinit=ConstInit +10 | MC_n4_f2.tla | apalache | 20h | TypedInv | Accountability | | --length=0 --cinit=ConstInit +11 | MC_n5_f1.tla | apalache | 20h | TypedInv | Agreement | | --length=0 --cinit=ConstInit +12 | MC_n5_f2.tla | apalache | 20h | TypedInv | Accountability | | --length=0 --cinit=ConstInit + +## 3. Detailed results: 001indinv-apalache-unstable.csv + +01:no | 02:tool | 03:status | 04:time_sec | 05:depth | 05:mem_kb | 10:ninit_trans | 11:ninit_trans | 12:ncells | 13:nclauses | 14:navg_clause_len +-------|------------|-------------|---------------|------------|-------------|------------------|------------------|-------------|---------------|-------------------- +1 | apalache | NoError | 11m | 1 | 3.0GB | 0 | 0 | 217K | 1.0M | 89 +2 | apalache | NoError | 11m | 1 | 3.0GB | 0 | 0 | 207K | 1.0M | 88 +3 | apalache | NoError | 16m | 1 | 4.0GB | 0 | 0 | 311K | 2.0M | 101 +4 | apalache | NoError | 14m | 1 | 3.0GB | 0 | 0 | 290K | 1.0M | 103 +5 | apalache | NoError | 9s | 0 | 563MB | 0 | 0 | 2.0K | 14K | 42 +6 | apalache | NoError | 10s | 0 | 657MB | 0 | 0 | 2.0K | 28K | 43 +7 | apalache | NoError | 8s | 0 | 635MB | 0 | 0 | 2.0K | 17K | 44 +8 | apalache | NoError | 10s | 0 | 667MB | 0 | 0 | 3.0K | 32K | 45 +9 | apalache | NoError | 5m05s | 0 | 2.0GB | 0 | 0 | 196K | 889K | 108 +10 | apalache | NoError | 8m08s | 0 | 6.0GB | 0 | 0 | 2.0M | 3.0M | 34 +11 | apalache | NoError | 9m09s | 0 | 3.0GB | 0 | 0 | 284K | 1.0M | 128 +12 | apalache | NoError | 14m | 0 | 7.0GB | 0 | 0 | 4.0M | 5.0M | 38 diff --git a/spec/light-client/accountability/results/001indinv-apalache-time-log.svg b/spec/light-client/accountability/results/001indinv-apalache-time-log.svg new file mode 100644 index 0000000000..458d67c6c3 --- /dev/null +++ b/spec/light-client/accountability/results/001indinv-apalache-time-log.svg @@ -0,0 +1,1134 @@ + + + + + + + + + 2020-12-11T20:07:38.347583 + image/svg+xml + + + Matplotlib v3.3.3, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spec/light-client/accountability/results/001indinv-apalache-time.svg b/spec/light-client/accountability/results/001indinv-apalache-time.svg new file mode 100644 index 0000000000..a5db5a8b59 --- /dev/null +++ b/spec/light-client/accountability/results/001indinv-apalache-time.svg @@ -0,0 +1,957 @@ + + + + + + + + + 2020-12-11T20:07:39.136767 + image/svg+xml + + + Matplotlib v3.3.3, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spec/light-client/accountability/results/001indinv-apalache-unstable.csv b/spec/light-client/accountability/results/001indinv-apalache-unstable.csv new file mode 100644 index 0000000000..db1a060938 --- /dev/null +++ b/spec/light-client/accountability/results/001indinv-apalache-unstable.csv @@ -0,0 +1,13 @@ +01:no,02:tool,03:status,04:time_sec,05:depth,05:mem_kb,10:ninit_trans,11:ninit_trans,12:ncells,13:nclauses,14:navg_clause_len +1,apalache,NoError,704,1,3215424,0,0,217385,1305718,89 +2,apalache,NoError,699,1,3195020,0,0,207969,1341979,88 +3,apalache,NoError,1018,1,4277060,0,0,311798,2028544,101 +4,apalache,NoError,889,1,4080012,0,0,290989,1951616,103 +5,apalache,NoError,9,0,577100,0,0,2045,14655,42 +6,apalache,NoError,10,0,673772,0,0,2913,28213,43 +7,apalache,NoError,8,0,651008,0,0,2214,17077,44 +8,apalache,NoError,10,0,683188,0,0,3082,32651,45 +9,apalache,NoError,340,0,3053848,0,0,196943,889859,108 +10,apalache,NoError,517,0,6424536,0,0,2856378,3802779,34 +11,apalache,NoError,587,0,4028516,0,0,284369,1343296,128 +12,apalache,NoError,880,0,7881148,0,0,4382556,5778072,38 diff --git a/spec/light-client/accountability/run.sh b/spec/light-client/accountability/run.sh new file mode 100755 index 0000000000..75e57a5f86 --- /dev/null +++ b/spec/light-client/accountability/run.sh @@ -0,0 +1,9 @@ +#!/bin/sh +# +# The script to run all experiments at once + +export SCRIPTS_DIR=~/devl/apalache-tests/scripts +export BUILDS="unstable" +export BENCHMARK=001indinv-apalache +export RUN_SCRIPT=./run-all.sh # alternatively, use ./run-parallel.sh +make -e -f ~/devl/apalache-tests/Makefile.common diff --git a/spec/light-client/assets/light-node-image.png b/spec/light-client/assets/light-node-image.png new file mode 100644 index 0000000000..f0b93c6e41 Binary files /dev/null and b/spec/light-client/assets/light-node-image.png differ diff --git a/spec/light-client/attacks/Blockchain_003_draft.tla b/spec/light-client/attacks/Blockchain_003_draft.tla new file mode 100644 index 0000000000..fb6e6e8e87 --- /dev/null +++ b/spec/light-client/attacks/Blockchain_003_draft.tla @@ -0,0 +1,166 @@ +------------------------ MODULE Blockchain_003_draft ----------------------------- +(* + This is a high-level specification of Tendermint blockchain + that is designed specifically for the light client. + Validators have the voting power of one. If you like to model various + voting powers, introduce multiple copies of the same validator + (do not forget to give them unique names though). + *) +EXTENDS Integers, FiniteSets, Apalache + +Min(a, b) == IF a < b THEN a ELSE b + +CONSTANT + AllNodes, + (* a set of all nodes that can act as validators (correct and faulty) *) + ULTIMATE_HEIGHT, + (* a maximal height that can be ever reached (modelling artifact) *) + TRUSTING_PERIOD + (* the period within which the validators are trusted *) + +Heights == 1..ULTIMATE_HEIGHT (* possible heights *) + +(* A commit is just a set of nodes who have committed the block *) +Commits == SUBSET AllNodes + +(* The set of all block headers that can be on the blockchain. + This is a simplified version of the Block data structure in the actual implementation. *) +BlockHeaders == [ + height: Heights, + \* the block height + time: Int, + \* the block timestamp in some integer units + lastCommit: Commits, + \* the nodes who have voted on the previous block, the set itself instead of a hash + (* in the implementation, only the hashes of V and NextV are stored in a block, + as V and NextV are stored in the application state *) + VS: SUBSET AllNodes, + \* the validators of this bloc. We store the validators instead of the hash. + NextVS: SUBSET AllNodes + \* the validators of the next block. We store the next validators instead of the hash. +] + +(* A signed header is just a header together with a set of commits *) +LightBlocks == [header: BlockHeaders, Commits: Commits] + +VARIABLES + refClock, + (* the current global time in integer units as perceived by the reference chain *) + blockchain, + (* A sequence of BlockHeaders, which gives us a bird view of the blockchain. *) + Faulty + (* A set of faulty nodes, which can act as validators. We assume that the set + of faulty processes is non-decreasing. If a process has recovered, it should + connect using a different id. *) + +(* all variables, to be used with UNCHANGED *) +vars == <> + +(* The set of all correct nodes in a state *) +Corr == AllNodes \ Faulty + +(* APALACHE annotations *) +a <: b == a \* type annotation + +NT == STRING +NodeSet(S) == S <: {NT} +EmptyNodeSet == NodeSet({}) + +BT == [height |-> Int, time |-> Int, lastCommit |-> {NT}, VS |-> {NT}, NextVS |-> {NT}] + +LBT == [header |-> BT, Commits |-> {NT}] +(* end of APALACHE annotations *) + +(****************************** BLOCKCHAIN ************************************) + +(* the header is still within the trusting period *) +InTrustingPeriod(header) == + refClock < header.time + TRUSTING_PERIOD + +(* + Given a function pVotingPower \in D -> Powers for some D \subseteq AllNodes + and pNodes \subseteq D, test whether the set pNodes \subseteq AllNodes has + more than 2/3 of voting power among the nodes in D. + *) +TwoThirds(pVS, pNodes) == + LET TP == Cardinality(pVS) + SP == Cardinality(pVS \intersect pNodes) + IN + 3 * SP > 2 * TP \* when thinking in real numbers, not integers: SP > 2.0 / 3.0 * TP + +(* + Given a set of FaultyNodes, test whether the voting power of the correct nodes in D + is more than 2/3 of the voting power of the faulty nodes in D. + + Parameters: + - pFaultyNodes is a set of nodes that are considered faulty + - pVS is a set of all validators, maybe including Faulty, intersecting with it, etc. + - pMaxFaultRatio is a pair <> that limits the ratio a / b of the faulty + validators from above (exclusive) + *) +FaultyValidatorsFewerThan(pFaultyNodes, pVS, maxRatio) == + LET FN == pFaultyNodes \intersect pVS \* faulty nodes in pNodes + CN == pVS \ pFaultyNodes \* correct nodes in pNodes + CP == Cardinality(CN) \* power of the correct nodes + FP == Cardinality(FN) \* power of the faulty nodes + IN + \* CP + FP = TP is the total voting power + LET TP == CP + FP IN + FP * maxRatio[2] < TP * maxRatio[1] + +(* Can a block be produced by a correct peer, or an authenticated Byzantine peer *) +IsLightBlockAllowedByDigitalSignatures(ht, block) == + \/ block.header = blockchain[ht] \* signed by correct and faulty (maybe) + \/ /\ block.Commits \subseteq Faulty + /\ block.header.height = ht + /\ block.header.time >= 0 \* signed only by faulty + +(* + Initialize the blockchain to the ultimate height right in the initial states. + We pick the faulty validators statically, but that should not affect the light client. + + Parameters: + - pMaxFaultyRatioExclusive is a pair <> that bound the number of + faulty validators in each block by the ratio a / b (exclusive) + *) +InitToHeight(pMaxFaultyRatioExclusive) == + /\ \E Nodes \in SUBSET AllNodes: + Faulty := Nodes \* pick a subset of nodes to be faulty + \* pick the validator sets and last commits + /\ \E vs, lastCommit \in [Heights -> SUBSET AllNodes]: + \E timestamp \in [Heights -> Int]: + \* refClock is at least as early as the timestamp in the last block + /\ \E tm \in Int: + refClock := tm /\ tm >= timestamp[ULTIMATE_HEIGHT] + \* the genesis starts on day 1 + /\ timestamp[1] = 1 + /\ vs[1] = AllNodes + /\ lastCommit[1] = EmptyNodeSet + /\ \A h \in Heights \ {1}: + /\ lastCommit[h] \subseteq vs[h - 1] \* the non-validators cannot commit + /\ TwoThirds(vs[h - 1], lastCommit[h]) \* the commit has >2/3 of validator votes + \* the faulty validators have the power below the threshold + /\ FaultyValidatorsFewerThan(Faulty, vs[h], pMaxFaultyRatioExclusive) + /\ timestamp[h] > timestamp[h - 1] \* the time grows monotonically + /\ timestamp[h] < timestamp[h - 1] + TRUSTING_PERIOD \* but not too fast + \* form the block chain out of validator sets and commits (this makes apalache faster) + /\ blockchain := [h \in Heights |-> + [height |-> h, + time |-> timestamp[h], + VS |-> vs[h], + NextVS |-> IF h < ULTIMATE_HEIGHT THEN vs[h + 1] ELSE AllNodes, + lastCommit |-> lastCommit[h]] + ] \****** + +(********************* BLOCKCHAIN ACTIONS ********************************) +(* + Advance the clock by zero or more time units. + *) +AdvanceTime == + /\ \E tm \in Int: tm >= refClock /\ refClock' = tm + /\ UNCHANGED <> + +============================================================================= +\* Modification History +\* Last modified Wed Jun 10 14:10:54 CEST 2020 by igor +\* Created Fri Oct 11 15:45:11 CEST 2019 by igor diff --git a/spec/light-client/attacks/Isolation_001_draft.tla b/spec/light-client/attacks/Isolation_001_draft.tla new file mode 100644 index 0000000000..7406b89422 --- /dev/null +++ b/spec/light-client/attacks/Isolation_001_draft.tla @@ -0,0 +1,159 @@ +----------------------- MODULE Isolation_001_draft ---------------------------- +(** + * The specification of the attackers isolation at full node, + * when it has received an evidence from the light client. + * We check that the isolation spec produces a set of validators + * that have more than 1/3 of the voting power. + * + * It follows the English specification: + * + * https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/attacks/isolate-attackers_001_draft.md + * + * The assumptions made in this specification: + * + * - the voting power of every validator is 1 + * (add more validators, if you need more validators) + * + * - Tendermint security model is violated + * (there are Byzantine validators who signed a conflicting block) + * + * Igor Konnov, Zarko Milosevic, Josef Widder, Informal Systems, 2020 + *) + + +EXTENDS Integers, FiniteSets, Apalache + +\* algorithm parameters +CONSTANTS + AllNodes, + (* a set of all nodes that can act as validators (correct and faulty) *) + COMMON_HEIGHT, + (* an index of the block header that two peers agree upon *) + CONFLICT_HEIGHT, + (* an index of the block header that two peers disagree upon *) + TRUSTING_PERIOD, + (* the period within which the validators are trusted *) + FAULTY_RATIO + (* a pair <> that limits that ratio of faulty validator in the blockchain + from above (exclusive). Tendermint security model prescribes 1 / 3. *) + +VARIABLES + blockchain, (* the chain at the full node *) + refClock, (* the reference clock at the full node *) + Faulty, (* the set of faulty validators *) + conflictingBlock, (* an evidence that two peers reported conflicting blocks *) + state, (* the state of the attack isolation machine at the full node *) + attackers (* the set of the identified attackers *) + +vars == <> + +\* instantiate the chain at the full node +ULTIMATE_HEIGHT == CONFLICT_HEIGHT + 1 +BC == INSTANCE Blockchain_003_draft + +\* use the light client API +TRUSTING_HEIGHT == COMMON_HEIGHT +TARGET_HEIGHT == CONFLICT_HEIGHT + +LC == INSTANCE LCVerificationApi_003_draft + WITH localClock <- refClock, REAL_CLOCK_DRIFT <- 0, CLOCK_DRIFT <- 0 + +\* old-style type annotations in apalache +a <: b == a + +\* [LCAI-NONVALID-OUTPUT.1::TLA.1] +ViolatesValidity(header1, header2) == + \/ header1.VS /= header2.VS + \/ header1.NextVS /= header2.NextVS + \/ header1.height /= header2.height + \/ header1.time /= header2.time + (* The English specification also checks the fields that we do not have + at this level of abstraction: + - header1.ConsensusHash != header2.ConsensusHash or + - header1.AppHash != header2.AppHash or + - header1.LastResultsHash header2 != ev.LastResultsHash + *) + +Init == + /\ state := "init" + \* Pick an arbitrary blockchain from 1 to COMMON_HEIGHT + 1. + /\ BC!InitToHeight(FAULTY_RATIO) \* initializes blockchain, Faulty, and refClock + /\ attackers := {} <: {STRING} \* attackers are unknown + \* Receive an arbitrary evidence. + \* Instantiate the light block fields one by one, + \* to avoid combinatorial explosion of records. + /\ \E time \in Int: + \E VS, NextVS, lastCommit, Commits \in SUBSET AllNodes: + LET conflicting == + [ Commits |-> Commits, + header |-> + [height |-> CONFLICT_HEIGHT, + time |-> time, + VS |-> VS, + NextVS |-> NextVS, + lastCommit |-> lastCommit] ] + IN + LET refBlock == [ header |-> blockchain[COMMON_HEIGHT], + Commits |-> blockchain[COMMON_HEIGHT + 1].lastCommit ] + IN + /\ "SUCCESS" = LC!ValidAndVerifiedUntimed(refBlock, conflicting) + \* More than third of next validators in the common reference block + \* is faulty. That is a precondition for a fork. + /\ 3 * Cardinality(Faulty \intersect refBlock.header.NextVS) + > Cardinality(refBlock.header.NextVS) + \* correct validators cannot sign an invalid block + /\ ViolatesValidity(conflicting.header, refBlock.header) + => conflicting.Commits \subseteq Faulty + /\ conflictingBlock := conflicting + + +\* This is a specification of isolateMisbehavingProcesses. +\* +\* [LCAI-FUNC-MAIN.1::TLA.1] +Next == + /\ state = "init" + \* Extract the rounds from the reference block and the conflicting block. + \* In this specification, we just pick rounds non-deterministically. + \* The English specification calls RoundOf on the blocks. + /\ \E referenceRound, evidenceRound \in Int: + /\ referenceRound >= 0 /\ evidenceRound >= 0 + /\ LET reference == blockchain[CONFLICT_HEIGHT] + referenceCommit == blockchain[CONFLICT_HEIGHT + 1].lastCommit + evidenceHeader == conflictingBlock.header + evidenceCommit == conflictingBlock.Commits + IN + IF ViolatesValidity(reference, evidenceHeader) + THEN /\ attackers' := blockchain[COMMON_HEIGHT].NextVS \intersect evidenceCommit + /\ state' := "Lunatic" + ELSE IF referenceRound = evidenceRound + THEN /\ attackers' := referenceCommit \intersect evidenceCommit + /\ state' := "Equivocation" + ELSE + \* This property is shown in property + \* Accountability of TendermintAcc3.tla + /\ state' := "Amnesia" + /\ \E Attackers \in SUBSET (Faulty \intersect reference.VS): + /\ 3 * Cardinality(Attackers) > Cardinality(reference.VS) + /\ attackers' := Attackers + /\ blockchain' := blockchain + /\ refClock' := refClock + /\ Faulty' := Faulty + /\ conflictingBlock' := conflictingBlock + +(********************************** INVARIANTS *******************************) + +\* This invariant ensure that the attackers have +\* more than 1/3 of the voting power +\* +\* [LCAI-INV-Output.1::TLA-DETECTION-COMPLETENESS.1] +DetectionCompleteness == + state /= "init" => + 3 * Cardinality(attackers) > Cardinality(blockchain[CONFLICT_HEIGHT].VS) + +\* This invariant ensures that only the faulty validators are detected +\* +\* [LCAI-INV-Output.1::TLA-DETECTION-ACCURACY.1] +DetectionAccuracy == + attackers \subseteq Faulty + +============================================================================== diff --git a/spec/light-client/attacks/LCVerificationApi_003_draft.tla b/spec/light-client/attacks/LCVerificationApi_003_draft.tla new file mode 100644 index 0000000000..909eab92b8 --- /dev/null +++ b/spec/light-client/attacks/LCVerificationApi_003_draft.tla @@ -0,0 +1,192 @@ +-------------------- MODULE LCVerificationApi_003_draft -------------------------- +(** + * The common interface of the light client verification and detection. + *) +EXTENDS Integers, FiniteSets + +\* the parameters of Light Client +CONSTANTS + TRUSTING_PERIOD, + (* the period within which the validators are trusted *) + CLOCK_DRIFT, + (* the assumed precision of the clock *) + REAL_CLOCK_DRIFT, + (* the actual clock drift, which under normal circumstances should not + be larger than CLOCK_DRIFT (otherwise, there will be a bug) *) + FAULTY_RATIO + (* a pair <> that limits that ratio of faulty validator in the blockchain + from above (exclusive). Tendermint security model prescribes 1 / 3. *) + +VARIABLES + localClock (* current time as measured by the light client *) + +(* the header is still within the trusting period *) +InTrustingPeriodLocal(header) == + \* note that the assumption about the drift reduces the period of trust + localClock < header.time + TRUSTING_PERIOD - CLOCK_DRIFT + +(* the header is still within the trusting period, even if the clock can go backwards *) +InTrustingPeriodLocalSurely(header) == + \* note that the assumption about the drift reduces the period of trust + localClock < header.time + TRUSTING_PERIOD - 2 * CLOCK_DRIFT + +(* ensure that the local clock does not drift far away from the global clock *) +IsLocalClockWithinDrift(local, global) == + /\ global - REAL_CLOCK_DRIFT <= local + /\ local <= global + REAL_CLOCK_DRIFT + +(** + * Check that the commits in an untrusted block form 1/3 of the next validators + * in a trusted header. + *) +SignedByOneThirdOfTrusted(trusted, untrusted) == + LET TP == Cardinality(trusted.header.NextVS) + SP == Cardinality(untrusted.Commits \intersect trusted.header.NextVS) + IN + 3 * SP > TP + +(** + The first part of the precondition of ValidAndVerified, which does not take + the current time into account. + + [LCV-FUNC-VALID.1::TLA-PRE-UNTIMED.1] + *) +ValidAndVerifiedPreUntimed(trusted, untrusted) == + LET thdr == trusted.header + uhdr == untrusted.header + IN + /\ thdr.height < uhdr.height + \* the trusted block has been created earlier + /\ thdr.time < uhdr.time + /\ untrusted.Commits \subseteq uhdr.VS + /\ LET TP == Cardinality(uhdr.VS) + SP == Cardinality(untrusted.Commits) + IN + 3 * SP > 2 * TP + /\ thdr.height + 1 = uhdr.height => thdr.NextVS = uhdr.VS + (* As we do not have explicit hashes we ignore these three checks of the English spec: + + 1. "trusted.Commit is a commit is for the header trusted.Header, + i.e. it contains the correct hash of the header". + 2. untrusted.Validators = hash(untrusted.Header.Validators) + 3. untrusted.NextValidators = hash(untrusted.Header.NextValidators) + *) + +(** + Check the precondition of ValidAndVerified, including the time checks. + + [LCV-FUNC-VALID.1::TLA-PRE.1] + *) +ValidAndVerifiedPre(trusted, untrusted, checkFuture) == + LET thdr == trusted.header + uhdr == untrusted.header + IN + /\ InTrustingPeriodLocal(thdr) + \* The untrusted block is not from the future (modulo clock drift). + \* Do the check, if it is required. + /\ checkFuture => uhdr.time < localClock + CLOCK_DRIFT + /\ ValidAndVerifiedPreUntimed(trusted, untrusted) + + +(** + Check, whether an untrusted block is valid and verifiable w.r.t. a trusted header. + This test does take current time into account, but only looks at the block structure. + + [LCV-FUNC-VALID.1::TLA-UNTIMED.1] + *) +ValidAndVerifiedUntimed(trusted, untrusted) == + IF ~ValidAndVerifiedPreUntimed(trusted, untrusted) + THEN "INVALID" + ELSE IF untrusted.header.height = trusted.header.height + 1 + \/ SignedByOneThirdOfTrusted(trusted, untrusted) + THEN "SUCCESS" + ELSE "NOT_ENOUGH_TRUST" + +(** + Check, whether an untrusted block is valid and verifiable w.r.t. a trusted header. + + [LCV-FUNC-VALID.1::TLA.1] + *) +ValidAndVerified(trusted, untrusted, checkFuture) == + IF ~ValidAndVerifiedPre(trusted, untrusted, checkFuture) + THEN "INVALID" + ELSE IF ~InTrustingPeriodLocal(untrusted.header) + (* We leave the following test for the documentation purposes. + The implementation should do this test, as signature verification may be slow. + In the TLA+ specification, ValidAndVerified happens in no time. + *) + THEN "FAILED_TRUSTING_PERIOD" + ELSE IF untrusted.header.height = trusted.header.height + 1 + \/ SignedByOneThirdOfTrusted(trusted, untrusted) + THEN "SUCCESS" + ELSE "NOT_ENOUGH_TRUST" + + +(** + The invariant of the light store that is not related to the blockchain + *) +LightStoreInv(fetchedLightBlocks, lightBlockStatus) == + \A lh, rh \in DOMAIN fetchedLightBlocks: + \* for every pair of stored headers that have been verified + \/ lh >= rh + \/ lightBlockStatus[lh] /= "StateVerified" + \/ lightBlockStatus[rh] /= "StateVerified" + \* either there is a header between them + \/ \E mh \in DOMAIN fetchedLightBlocks: + lh < mh /\ mh < rh /\ lightBlockStatus[mh] = "StateVerified" + \* or the left header is outside the trusting period, so no guarantees + \/ LET lhdr == fetchedLightBlocks[lh] + rhdr == fetchedLightBlocks[rh] + IN + \* we can verify the right one using the left one + "SUCCESS" = ValidAndVerifiedUntimed(lhdr, rhdr) + +(** + Correctness states that all the obtained headers are exactly like in the blockchain. + + It is always the case that every verified header in LightStore was generated by + an instance of Tendermint consensus. + + [LCV-DIST-SAFE.1::CORRECTNESS-INV.1] + *) +CorrectnessInv(blockchain, fetchedLightBlocks, lightBlockStatus) == + \A h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] = "StateVerified" => + fetchedLightBlocks[h].header = blockchain[h] + +(** + * When the light client terminates, there are no failed blocks. + * (Otherwise, someone lied to us.) + *) +NoFailedBlocksOnSuccessInv(fetchedLightBlocks, lightBlockStatus) == + \A h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] /= "StateFailed" + +(** + The expected post-condition of VerifyToTarget. + *) +VerifyToTargetPost(blockchain, isPeerCorrect, + fetchedLightBlocks, lightBlockStatus, + trustedHeight, targetHeight, finalState) == + LET trustedHeader == fetchedLightBlocks[trustedHeight].header IN + \* The light client is not lying us on the trusted block. + \* It is straightforward to detect. + /\ lightBlockStatus[trustedHeight] = "StateVerified" + /\ trustedHeight \in DOMAIN fetchedLightBlocks + /\ trustedHeader = blockchain[trustedHeight] + \* the invariants we have found in the light client verification + \* there is a problem with trusting period + /\ isPeerCorrect + => CorrectnessInv(blockchain, fetchedLightBlocks, lightBlockStatus) + \* a correct peer should fail the light client, + \* if the trusted block is in the trusting period + /\ isPeerCorrect /\ InTrustingPeriodLocalSurely(trustedHeader) + => finalState = "finishedSuccess" + /\ finalState = "finishedSuccess" => + /\ lightBlockStatus[targetHeight] = "StateVerified" + /\ targetHeight \in DOMAIN fetchedLightBlocks + /\ NoFailedBlocksOnSuccessInv(fetchedLightBlocks, lightBlockStatus) + /\ LightStoreInv(fetchedLightBlocks, lightBlockStatus) + + +================================================================================== diff --git a/spec/light-client/attacks/MC_5_3.tla b/spec/light-client/attacks/MC_5_3.tla new file mode 100644 index 0000000000..552de49aee --- /dev/null +++ b/spec/light-client/attacks/MC_5_3.tla @@ -0,0 +1,18 @@ +------------------------- MODULE MC_5_3 ------------------------------------- + +AllNodes == {"n1", "n2", "n3", "n4", "n5"} +COMMON_HEIGHT == 1 +CONFLICT_HEIGHT == 3 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +FAULTY_RATIO == <<1, 2>> \* < 1 / 2 faulty validators + +VARIABLES + blockchain, \* the reference blockchain + refClock, \* current time in the reference blockchain + Faulty, \* the set of faulty validators + state, \* the state of the light client detector + conflictingBlock, \* an evidence that two peers reported conflicting blocks + attackers + +INSTANCE Isolation_001_draft +============================================================================ diff --git a/spec/light-client/attacks/isolate-attackers_001_draft.md b/spec/light-client/attacks/isolate-attackers_001_draft.md new file mode 100644 index 0000000000..e4f585f4a8 --- /dev/null +++ b/spec/light-client/attacks/isolate-attackers_001_draft.md @@ -0,0 +1,221 @@ + +# Lightclient Attackers Isolation + +> Warning: This is the beginning of an unfinished draft. Don't continue reading! + +Adversarial nodes may have the incentive to lie to a lightclient about the state of a Tendermint blockchain. An attempt to do so is called attack. Light client [verification][verification] checks incoming data by checking a so-called "commit", which is a forwarded set of signed messages that is (supposedly) produced during executing Tendermint consensus. Thus, an attack boils down to creating and signing Tendermint consensus messages in deviation from the Tendermint consensus algorithm rules. + +As Tendermint consensus and light client verification is safe under the assumption of more than 2/3 of correct voting power per block [[TMBC-FM-2THIRDS]][TMBC-FM-2THIRDS-link], this implies that if there was an attack then [[TMBC-FM-2THIRDS]][TMBC-FM-2THIRDS-link] was violated, that is, there is a block such that + +- validators deviated from the protocol, and +- these validators represent more than 1/3 of the voting power in that block. + +In the case of an [attack][node-based-attack-characterization], the lightclient [attack detection mechanism][detection] computes data, so called evidence [[LC-DATA-EVIDENCE.1]][LC-DATA-EVIDENCE-link], that can be used + +- to proof that there has been attack [[TMBC-LC-EVIDENCE-DATA.1]][TMBC-LC-EVIDENCE-DATA-link] and +- as basis to find the actual nodes that deviated from the Tendermint protocol. + +This specification considers how a full node in a Tendermint blockchain can isolate a set of attackers that launched the attack. The set should satisfy + +- the set does not contain a correct validator +- the set contains validators that represent more than 1/3 of the voting power of a block that is still within the unbonding period + +# Outline + +**TODO** when preparing a version for broader review. + +# Part I - Basics + +For definitions of data structures used here, in particular LightBlocks [[LCV-DATA-LIGHTBLOCK.1]](https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#lcv-data-lightblock1), cf. [Light Client Verification][verification]. + +# Part II - Definition of the Problem + +The specification of the [detection mechanism][detection] describes + +- what is a light client attack, +- conditions under which the detector will detect a light client attack, +- and the format of the output data, called evidence, in the case an attack is detected. The format is defined in +[[LC-DATA-EVIDENCE.1]][LC-DATA-EVIDENCE-link] and looks as follows + +```go +type LightClientAttackEvidence struct { + ConflictingBlock LightBlock + CommonHeight int64 +} +``` + +The isolator is a function that gets as input evidence `ev` +and a prefix of the blockchain `bc` at least up to height `ev.ConflictingBlock.Header.Height + 1`. The output is a set of *peerIDs* of validators. + +We assume that the full node is synchronized with the blockchain and has reached the height `ev.ConflictingBlock.Header.Height + 1`. + +#### **[FN-INV-Output.1]** + +When an output is generated it satisfies the following properties: + +- If + - `bc[CommonHeight].bfttime` is within the unbonding period w.r.t. the time at the full node, + - `ev.ConflictingBlock.Header != bc[ev.ConflictingBlock.Header.Height]` + - Validators in `ev.ConflictingBlock.Commit` represent more than 1/3 of the voting power in `bc[ev.CommonHeight].NextValidators` +- Then: A set of validators in `bc[CommonHeight].NextValidators` that + - represent more than 1/3 of the voting power in `bc[ev.commonHeight].NextValidators` + - signed Tendermint consensus messages for height `ev.ConflictingBlock.Header.Height` by violating the Tendermint consensus protocol. +- Else: the empty set. + +# Part IV - Protocol + +Here we discuss how to solve the problem of isolating misbehaving processes. We describe the function `isolateMisbehavingProcesses` as well as all the helping functions below. In [Part V](#part-v---Completeness), we discuss why the solution is complete based on result from analysis with automated tools. + +## Isolation + +### Outline + +> Describe solution (in English), decomposition into functions, where communication to other components happens. + +#### **[LCAI-FUNC-MAIN.1]** + +```go +func isolateMisbehavingProcesses(ev LightClientAttackEvidence, bc Blockchain) []ValidatorAddress { + + reference := bc[ev.conflictingBlock.Header.Height].Header + ev_header := ev.conflictingBlock.Header + + ref_commit := bc[ev.conflictingBlock.Header.Height + 1].Header.LastCommit // + 1 !! + ev_commit := ev.conflictingBlock.Commit + + if violatesTMValidity(reference, ev_header) { + // lunatic light client attack + signatories := Signers(ev.ConflictingBlock.Commit) + bonded_vals := Addresses(bc[ev.CommonHeight].NextValidators) + return intersection(signatories,bonded_vals) + + } + // If this point is reached the validator sets in reference and ev_header are identical + else if RoundOf(ref_commit) == RoundOf(ev_commit) { + // equivocation light client attack + return intersection(Signers(ref_commit), Signers(ev_commit)) + } + else { + // amnesia light client attack + return IsolateAmnesiaAttacker(ev, bc) + } +} +``` + +- Implementation comment + - If the full node has only reached height `ev.conflictingBlock.Header.Height` then `bc[ev.conflictingBlock.Header.Height + 1].Header.LastCommit` refers to the locally stored commit for this height. (This commit must be present by the precondition on `length(bc)`.) + - We check in the precondition that the unbonding period is not expired. However, since time moves on, before handing the validators over Cosmos SDK, the time needs to be checked again to satisfy the contract which requires that only bonded validators are reported. This passing of validators to the SDK is out of scope of this specification. +- Expected precondition + - `length(bc) >= ev.conflictingBlock.Header.Height` + - `ValidAndVerifiedUnbonding(bc[ev.CommonHeight], ev.ConflictingBlock) == SUCCESS` + - `ev.ConflictingBlock.Header != bc[ev.ConflictingBlock.Header.Height]` + - TODO: input light blocks pass basic validation +- Expected postcondition + - [[FN-INV-Output.1]](#FN-INV-Output1) holds +- Error condition + - returns an error if precondition is violated. + +### Details of the Functions + +#### **[LCAI-FUNC-VVU.1]** + +```go +func ValidAndVerifiedUnbonding(trusted LightBlock, untrusted LightBlock) Result +``` + +- Conditions are identical to [[LCV-FUNC-VALID.2]][LCV-FUNC-VALID.link] except the precondition "*trusted.Header.Time > now - trustingPeriod*" is substituted with + - `trusted.Header.Time > now - UnbondingPeriod` + +#### **[LCAI-FUNC-NONVALID.1]** + +```go +func violatesTMValidity(ref Header, ev Header) boolean +``` + +- Implementation remarks + - checks whether the evidence header `ev` violates the validity property of Tendermint Consensus, by checking agains a reference header +- Expected precondition + - `ref.Height == ev.Height` +- Expected postcondition + - returns evaluation of the following disjunction + **[[LCAI-NONVALID-OUTPUT.1]]** == + `ref.ValidatorsHash != ev.ValidatorsHash` or + `ref.NextValidatorsHash != ev.NextValidatorsHash` or + `ref.ConsensusHash != ev.ConsensusHash` or + `ref.AppHash != ev.AppHash` or + `ref.LastResultsHash != ev.LastResultsHash` + +```go +func IsolateAmnesiaAttacker(ev LightClientAttackEvidence, bc Blockchain) []ValidatorAddress +``` + +- Implementation remarks + **TODO:** What should we do here? Refer to the accountability doc? +- Expected postcondition + **TODO:** What should we do here? Refer to the accountability doc? + +```go +func RoundOf(commit Commit) []ValidatorAddress +``` + +- Expected precondition + - `commit` is well-formed. In particular all votes are from the same round `r`. +- Expected postcondition + - returns round `r` that is encoded in all the votes of the commit + +```go +func Signers(commit Commit) []ValidatorAddress +``` + +- Expected postcondition + - returns all validator addresses in `commit` + +```go +func Addresses(vals Validator[]) ValidatorAddress[] +``` + +- Expected postcondition + - returns all validator addresses in `vals` + +# Part V - Completeness + +As discussed in the beginning of this document, an attack boils down to creating and signing Tendermint consensus messages in deviation from the Tendermint consensus algorithm rules. +The main function `isolateMisbehavingProcesses` distinguishes three kinds of wrongly signing messages, namely, + +- lunatic: signing invalid blocks +- equivocation: double-signing valid blocks in the same consensus round +- amnesia: signing conflicting blocks in different consensus rounds, without having seen a quorum of messages that would have allowed to do so. + +The question is whether this captures all attacks. +First observe that the first checking in `isolateMisbehavingProcesses` is `violatesTMValidity`. It takes care of lunatic attacks. If this check passes, that is, if `violatesTMValidity` returns `FALSE` this means that [FN-NONVALID-OUTPUT] evaluates to false, which implies that `ref.ValidatorsHash = ev.ValidatorsHash`. Hence after `violatesTMValidity`, all the involved validators are the ones from the blockchain. It is thus sufficient to analyze one instance of Tendermint consensus with a fixed group membership (set of validators). Also it is sufficient to consider two different valid consensus values, that is, binary consensus. + +**TODO** we have analyzed Tendermint consensus with TLA+ and have accompanied Galois in an independent study of the protocol based on [Ivy proofs](https://github.com/tendermint/spec/tree/master/ivy-proofs). + +# References + +[[supervisor]] The specification of the light client supervisor. + +[[verification]] The specification of the light client verification protocol + +[[detection]] The specification of the light client attack detection mechanism. + +[supervisor]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/supervisor/supervisor_001_draft.md + +[verification]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md + +[detection]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/detection/detection_003_reviewed.md + +[LC-DATA-EVIDENCE-link]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/detection/detection_003_reviewed.md#lc-data-evidence1 + +[TMBC-LC-EVIDENCE-DATA-link]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/detection/detection_003_reviewed.md#tmbc-lc-evidence-data1 + +[node-based-attack-characterization]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/detection/detection_003_reviewed.md#node-based-characterization-of-attacks + +[TMBC-FM-2THIRDS-link]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#tmbc-fm-2thirds1 + +[LCV-FUNC-VALID.link]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#lcv-func-valid2 diff --git a/spec/light-client/attacks/isolate-attackers_002_reviewed.md b/spec/light-client/attacks/isolate-attackers_002_reviewed.md new file mode 100644 index 0000000000..febcc10a82 --- /dev/null +++ b/spec/light-client/attacks/isolate-attackers_002_reviewed.md @@ -0,0 +1,223 @@ +# Lightclient Attackers Isolation + +Adversarial nodes may have the incentive to lie to a lightclient about the state of a Tendermint blockchain. An attempt to do so is called attack. Light client [verification][verification] checks incoming data by checking a so-called "commit", which is a forwarded set of signed messages that is (supposedly) produced during executing Tendermint consensus. Thus, an attack boils down to creating and signing Tendermint consensus messages in deviation from the Tendermint consensus algorithm rules. + +As Tendermint consensus and light client verification is safe under the assumption of more than 2/3 of correct voting power per block [[TMBC-FM-2THIRDS]][TMBC-FM-2THIRDS-link], this implies that if there was an attack then [[TMBC-FM-2THIRDS]][TMBC-FM-2THIRDS-link] was violated, that is, there is a block such that + +- validators deviated from the protocol, and +- these validators represent more than 1/3 of the voting power in that block. + +In the case of an [attack][node-based-attack-characterization], the lightclient [attack detection mechanism][detection] computes data, so called evidence [[LC-DATA-EVIDENCE.1]][LC-DATA-EVIDENCE-link], that can be used + +- to proof that there has been attack [[TMBC-LC-EVIDENCE-DATA.1]][TMBC-LC-EVIDENCE-DATA-link] and +- as basis to find the actual nodes that deviated from the Tendermint protocol. + +This specification considers how a full node in a Tendermint blockchain can isolate a set of attackers that launched the attack. The set should satisfy + +- the set does not contain a correct validator +- the set contains validators that represent more than 1/3 of the voting power of a block that is still within the unbonding period + +# Outline + +After providing the [problem statement](#Part-I---Basics-and-Definition-of-the-Problem), we specify the [isolator function](#Part-II---Protocol) and close with the discussion about its [correctness](#Part-III---Completeness) which is based on computer-aided analysis of Tendermint Consensus. + +# Part I - Basics and Definition of the Problem + +For definitions of data structures used here, in particular LightBlocks [[LCV-DATA-LIGHTBLOCK.1]](https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#lcv-data-lightblock1), we refer to the specification of [Light Client Verification][verification]. + +The specification of the [detection mechanism][detection] describes + +- what is a light client attack, +- conditions under which the detector will detect a light client attack, +- and the format of the output data, called evidence, in the case an attack is detected. The format is defined in +[[LC-DATA-EVIDENCE.1]][LC-DATA-EVIDENCE-link] and looks as follows + +```go +type LightClientAttackEvidence struct { + ConflictingBlock LightBlock + CommonHeight int64 +} +``` + +The isolator is a function that gets as input evidence `ev` +and a prefix of the blockchain `bc` at least up to height `ev.ConflictingBlock.Header.Height + 1`. The output is a set of *peerIDs* of validators. + +We assume that the full node is synchronized with the blockchain and has reached the height `ev.ConflictingBlock.Header.Height + 1`. + +#### **[LCAI-INV-Output.1]** + +When an output is generated it satisfies the following properties: + +- If + - `bc[CommonHeight].bfttime` is within the unbonding period w.r.t. the time at the full node, + - `ev.ConflictingBlock.Header != bc[ev.ConflictingBlock.Header.Height]` + - Validators in `ev.ConflictingBlock.Commit` represent more than 1/3 of the voting power in `bc[ev.CommonHeight].NextValidators` +- Then: The output is a set of validators in `bc[CommonHeight].NextValidators` that + - represent more than 1/3 of the voting power in `bc[ev.commonHeight].NextValidators` + - signed Tendermint consensus messages for height `ev.ConflictingBlock.Header.Height` by violating the Tendermint consensus protocol. +- Else: the empty set. + +# Part II - Protocol + +Here we discuss how to solve the problem of isolating misbehaving processes. We describe the function `isolateMisbehavingProcesses` as well as all the helping functions below. In [Part III](#part-III---Completeness), we discuss why the solution is complete based on result from analysis with automated tools. + +## Isolation + +### Outline + +We first check whether the conflicting block can indeed be verified from the common height. We then first check whether it was a lunatic attack (violating validity). If this is not the case, we check for equivocation. If this also is not the case, we start the on-chain [accountability protocol](https://docs.google.com/document/d/11ZhMsCj3y7zIZz4udO9l25xqb0kl7gmWqNpGVRzOeyY/edit). + +#### **[LCAI-FUNC-MAIN.1]** + +```go +func isolateMisbehavingProcesses(ev LightClientAttackEvidence, bc Blockchain) []ValidatorAddress { + + reference := bc[ev.conflictingBlock.Header.Height].Header + ev_header := ev.conflictingBlock.Header + + ref_commit := bc[ev.conflictingBlock.Header.Height + 1].Header.LastCommit // + 1 !! + ev_commit := ev.conflictingBlock.Commit + + if violatesTMValidity(reference, ev_header) { + // lunatic light client attack + signatories := Signers(ev.ConflictingBlock.Commit) + bonded_vals := Addresses(bc[ev.CommonHeight].NextValidators) + return intersection(signatories,bonded_vals) + + } + // If this point is reached the validator sets in reference and ev_header are identical + else if RoundOf(ref_commit) == RoundOf(ev_commit) { + // equivocation light client attack + return intersection(Signers(ref_commit), Signers(ev_commit)) + } + else { + // amnesia light client attack + return IsolateAmnesiaAttacker(ev, bc) + } +} +``` + +- Implementation comment + - If the full node has only reached height `ev.conflictingBlock.Header.Height` then `bc[ev.conflictingBlock.Header.Height + 1].Header.LastCommit` refers to the locally stored commit for this height. (This commit must be present by the precondition on `length(bc)`.) + - We check in the precondition that the unbonding period is not expired. However, since time moves on, before handing the validators over Cosmos SDK, the time needs to be checked again to satisfy the contract which requires that only bonded validators are reported. This passing of validators to the SDK is out of scope of this specification. +- Expected precondition + - `length(bc) >= ev.conflictingBlock.Header.Height` + - `ValidAndVerifiedUnbonding(bc[ev.CommonHeight], ev.ConflictingBlock) == SUCCESS` + - `ev.ConflictingBlock.Header != bc[ev.ConflictingBlock.Header.Height]` + - `ev.conflictingBlock` satisfies basic validation (in particular all signed messages in the Commit are from the same round) +- Expected postcondition + - [[FN-INV-Output.1]](#FN-INV-Output1) holds +- Error condition + - returns an error if precondition is violated. + +### Details of the Functions + +#### **[LCAI-FUNC-VVU.1]** + +```go +func ValidAndVerifiedUnbonding(trusted LightBlock, untrusted LightBlock) Result +``` + +- Conditions are identical to [[LCV-FUNC-VALID.2]][LCV-FUNC-VALID.link] except the precondition "*trusted.Header.Time > now - trustingPeriod*" is substituted with + - `trusted.Header.Time > now - UnbondingPeriod` + +#### **[LCAI-FUNC-NONVALID.1]** + +```go +func violatesTMValidity(ref Header, ev Header) boolean +``` + +- Implementation remarks + - checks whether the evidence header `ev` violates the validity property of Tendermint Consensus, by checking against a reference header +- Expected precondition + - `ref.Height == ev.Height` +- Expected postcondition + - returns evaluation of the following disjunction + **[LCAI-NONVALID-OUTPUT.1]** == + `ref.ValidatorsHash != ev.ValidatorsHash` or + `ref.NextValidatorsHash != ev.NextValidatorsHash` or + `ref.ConsensusHash != ev.ConsensusHash` or + `ref.AppHash != ev.AppHash` or + `ref.LastResultsHash != ev.LastResultsHash` + +```go +func IsolateAmnesiaAttacker(ev LightClientAttackEvidence, bc Blockchain) []ValidatorAddress +``` + +- Implementation remarks + - This triggers the [query/response protocol](https://docs.google.com/document/d/11ZhMsCj3y7zIZz4udO9l25xqb0kl7gmWqNpGVRzOeyY/edit). +- Expected postcondition + - returns attackers according to [LCAI-INV-Output.1]. + +```go +func RoundOf(commit Commit) []ValidatorAddress +``` + +- Expected precondition + - `commit` is well-formed. In particular all votes are from the same round `r`. +- Expected postcondition + - returns round `r` that is encoded in all the votes of the commit +- Error condition + - reports error if precondition is violated + +```go +func Signers(commit Commit) []ValidatorAddress +``` + +- Expected postcondition + - returns all validator addresses in `commit` + +```go +func Addresses(vals Validator[]) ValidatorAddress[] +``` + +- Expected postcondition + - returns all validator addresses in `vals` + +# Part III - Completeness + +As discussed in the beginning of this document, an attack boils down to creating and signing Tendermint consensus messages in deviation from the Tendermint consensus algorithm rules. +The main function `isolateMisbehavingProcesses` distinguishes three kinds of wrongly signed messages, namely, + +- lunatic: signing invalid blocks +- equivocation: double-signing valid blocks in the same consensus round +- amnesia: signing conflicting blocks in different consensus rounds, without having seen a quorum of messages that would have allowed to do so. + +The question is whether this captures all attacks. +First observe that the first check in `isolateMisbehavingProcesses` is `violatesTMValidity`. It takes care of lunatic attacks. If this check passes, that is, if `violatesTMValidity` returns `FALSE` this means that [[LCAI-NONVALID-OUTPUT.1]](#LCAI-FUNC-NONVALID1]) evaluates to false, which implies that `ref.ValidatorsHash = ev.ValidatorsHash`. Hence, after `violatesTMValidity`, all the involved validators are the ones from the blockchain. It is thus sufficient to analyze one instance of Tendermint consensus with a fixed group membership (set of validators). Also, as we have two different blocks for the same height, it is sufficient to consider two different valid consensus values, that is, binary consensus. + +For this fixed group membership, we have analyzed the attacks using the TLA+ specification of [Tendermint Consensus in TLA+][tendermint-accountability]. We checked that indeed the only possible scenarios that can lead to violation of agreement are **equivocation** and **amnesia**. An independent study by Galois of the protocol based on [Ivy proofs](https://github.com/tendermint/spec/tree/master/ivy-proofs) led to the same conclusion. + +# References + +[[supervisor]] The specification of the light client supervisor. + +[[verification]] The specification of the light client verification protocol. + +[[detection]] The specification of the light client attack detection mechanism. + +[[tendermint-accountability]]: TLA+ specification to check the types of attacks + +[tendermint-accountability]: +https://github.com/tendermint/spec/blob/master/rust-spec/tendermint-accountability/README.md + +[supervisor]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/supervisor/supervisor_001_draft.md + +[verification]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md + +[detection]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/detection/detection_003_reviewed.md + +[LC-DATA-EVIDENCE-link]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/detection/detection_003_reviewed.md#lc-data-evidence1 + +[TMBC-LC-EVIDENCE-DATA-link]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/detection/detection_003_reviewed.md#tmbc-lc-evidence-data1 + +[node-based-attack-characterization]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/detection/detection_003_reviewed.md#node-based-characterization-of-attacks + +[TMBC-FM-2THIRDS-link]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#tmbc-fm-2thirds1 + +[LCV-FUNC-VALID.link]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#lcv-func-valid2 diff --git a/spec/light-client/attacks/notes-on-evidence-handling.md b/spec/light-client/attacks/notes-on-evidence-handling.md new file mode 100644 index 0000000000..4b7d819191 --- /dev/null +++ b/spec/light-client/attacks/notes-on-evidence-handling.md @@ -0,0 +1,219 @@ + +# Light client attacks + +We define a light client attack as detection of conflicting headers for a given height that can be verified +starting from the trusted light block. A light client attack is defined in the context of interactions of +light client with two peers. One of the peers (called primary) defines a trace of verified light blocks +(primary trace) that are being checked against trace of the other peer (called witness) that we call +witness trace. + +A light client attack is defined by the primary and witness traces +that have a common root (the same trusted light block for a common height) but forms +conflicting branches (end of traces is for the same height but with different headers). +Note that conflicting branches could be arbitrarily big as branches continue to diverge after +a bifurcation point. We propose an approach that allows us to define a valid light client attack +only with a common light block and a single conflicting light block. We rely on the fact that +we assume that the primary is under suspicion (therefore not trusted) and that the witness plays +support role to detect and process an attack (therefore trusted). Therefore, once a light client +detects an attack, it needs to send to a witness only missing data (common height +and conflicting light block) as it has its trace. Keeping light client attack data of constant size +saves bandwidth and reduces an attack surface. As we will explain below, although in the context of +light client core +[verification](https://github.com/informalsystems/tendermint-rs/tree/master/docs/spec/lightclient/verification) +the roles of primary and witness are clearly defined, +in case of the attack, we run the same attack detection procedure twice where the roles are swapped. +The rationale is that the light client does not know what peer is correct (on a right main branch) +so it tries to create and submit an attack evidence to both peers. + +Light client attack evidence consists of a conflicting light block and a common height. + +```go +type LightClientAttackEvidence struct { + ConflictingBlock LightBlock + CommonHeight int64 +} +``` + +Full node can validate a light client attack evidence by executing the following procedure: + +```go +func IsValid(lcaEvidence LightClientAttackEvidence, bc Blockchain) boolean { + commonBlock = GetLightBlock(bc, lcaEvidence.CommonHeight) + if commonBlock == nil return false + + // Note that trustingPeriod in ValidAndVerified is set to UNBONDING_PERIOD + verdict = ValidAndVerified(commonBlock, lcaEvidence.ConflictingBlock) + conflictingHeight = lcaEvidence.ConflictingBlock.Header.Height + + return verdict == OK and bc[conflictingHeight].Header != lcaEvidence.ConflictingBlock.Header +} +``` + +## Light client attack creation + +Given a trusted light block `trusted`, a light node executes the bisection algorithm to verify header +`untrusted` at some height `h`. If the bisection algorithm succeeds, then the header `untrusted` is verified. +Headers that are downloaded as part of the bisection algorithm are stored in a store and they are also in +the verified state. Therefore, after the bisection algorithm successfully terminates we have a trace of +the light blocks ([] LightBlock) we obtained from the primary that we call primary trace. + +### Primary trace + +The following invariant holds for the primary trace: + +- Given a `trusted` light block, target height `h`, and `primary_trace` ([] LightBlock): + *primary_trace[0] == trusted* and *primary_trace[len(primary_trace)-1].Height == h* and + successive light blocks are passing light client verification logic. + +### Witness with a conflicting header + +The verified header at height `h` is cross-checked with every witness as part of +[detection](https://github.com/informalsystems/tendermint-rs/tree/master/docs/spec/lightclient/detection). +If a witness returns the conflicting header at the height `h` the following procedure is executed to verify +if the conflicting header comes from the valid trace and if that's the case to create an attack evidence: + +#### Helper functions + +We assume the following helper functions: + +```go +// Returns trace of verified light blocks starting from rootHeight and ending with targetHeight. +Trace(lightStore LightStore, rootHeight int64, targetHeight int64) LightBlock[] + +// Returns validator set for the given height +GetValidators(bc Blockchain, height int64) Validator[] + +// Returns validator set for the given height +GetValidators(bc Blockchain, height int64) Validator[] + +// Return validator addresses for the given validators +GetAddresses(vals Validator[]) ValidatorAddress[] +``` + +```go +func DetectLightClientAttacks(primary PeerID, + primary_trace []LightBlock, + witness PeerID) (LightClientAttackEvidence, LightClientAttackEvidence) { + primary_lca_evidence, witness_trace = DetectLightClientAttack(primary_trace, witness) + + witness_lca_evidence = nil + if witness_trace != nil { + witness_lca_evidence, _ = DetectLightClientAttack(witness_trace, primary) + } + return primary_lca_evidence, witness_lca_evidence +} + +func DetectLightClientAttack(trace []LightBlock, peer PeerID) (LightClientAttackEvidence, []LightBlock) { + + lightStore = new LightStore().Update(trace[0], StateTrusted) + + for i in 1..len(trace)-1 { + lightStore, result = VerifyToTarget(peer, lightStore, trace[i].Header.Height) + + if result == ResultFailure then return (nil, nil) + + current = lightStore.Get(trace[i].Header.Height) + + // if obtained header is the same as in the trace we continue with a next height + if current.Header == trace[i].Header continue + + // we have identified a conflicting header + commonBlock = trace[i-1] + conflictingBlock = trace[i] + + return (LightClientAttackEvidence { conflictingBlock, commonBlock.Header.Height }, + Trace(lightStore, trace[i-1].Header.Height, trace[i].Header.Height)) + } + return (nil, nil) +} +``` + +## Evidence handling + +As part of on chain evidence handling, full nodes identifies misbehaving processes and informs +the application, so they can be slashed. Note that only bonded validators should +be reported to the application. There are three types of attacks that can be executed against +Tendermint light client: + +- lunatic attack +- equivocation attack and +- amnesia attack. + +We now specify the evidence handling logic. + +```go +func detectMisbehavingProcesses(lcAttackEvidence LightClientAttackEvidence, bc Blockchain) []ValidatorAddress { + assume IsValid(lcaEvidence, bc) + + // lunatic light client attack + if !isValidBlock(current.Header, conflictingBlock.Header) { + conflictingCommit = lcAttackEvidence.ConflictingBlock.Commit + bondedValidators = GetNextValidators(bc, lcAttackEvidence.CommonHeight) + + return getSigners(conflictingCommit) intersection GetAddresses(bondedValidators) + + // equivocation light client attack + } else if current.Header.Round == conflictingBlock.Header.Round { + conflictingCommit = lcAttackEvidence.ConflictingBlock.Commit + trustedCommit = bc[conflictingBlock.Header.Height+1].LastCommit + + return getSigners(trustedCommit) intersection getSigners(conflictingCommit) + + // amnesia light client attack + } else { + HandleAmnesiaAttackEvidence(lcAttackEvidence, bc) + } +} + +// Block validity in this context is defined by the trusted header. +func isValidBlock(trusted Header, conflicting Header) boolean { + return trusted.ValidatorsHash == conflicting.ValidatorsHash and + trusted.NextValidatorsHash == conflicting.NextValidatorsHash and + trusted.ConsensusHash == conflicting.ConsensusHash and + trusted.AppHash == conflicting.AppHash and + trusted.LastResultsHash == conflicting.LastResultsHash +} + +func getSigners(commit Commit) []ValidatorAddress { + signers = []ValidatorAddress + for (i, commitSig) in commit.Signatures { + if commitSig.BlockIDFlag == BlockIDFlagCommit { + signers.append(commitSig.ValidatorAddress) + } + } + return signers +} +``` + +Note that amnesia attack evidence handling involves more complex processing, i.e., cannot be +defined simply on amnesia attack evidence. We explain in the following section a protocol +for handling amnesia attack evidence. + +### Amnesia attack evidence handling + +Detecting faulty processes in case of the amnesia attack is more complex and cannot be inferred +purely based on attack evidence data. In this case, in order to detect misbehaving processes we need +access to votes processes sent/received during the conflicting height. Therefore, amnesia handling assumes that +validators persist all votes received and sent during multi-round heights (as amnesia attack +is only possible in heights that executes over multiple rounds, i.e., commit round > 0). + +To simplify description of the algorithm we assume existence of the trusted oracle called monitor that will +drive the algorithm and output faulty processes at the end. Monitor can be implemented in a +distributed setting as on-chain module. The algorithm works as follows: + 1) Monitor sends votesets request to validators of the conflicting height. Validators + are expected to send their votesets within predefined timeout. + 2) Upon receiving votesets request, validators send their votesets to a monitor. + 2) Validators which have not sent its votesets within timeout are considered faulty. + 3) The preprocessing of the votesets is done. That means that the received votesets are analyzed + and each vote (valid) sent by process p is added to the voteset of the sender p. This phase ensures that + votes sent by faulty processes observed by at least one correct validator cannot be excluded from the analysis. + 4) Votesets of every validator are analyzed independently to decide whether the validator is correct or faulty. + A faulty validators is the one where at least one of those invalid transitions is found: + - More than one PREVOTE message is sent in a round + - More than one PRECOMMIT message is sent in a round + - PRECOMMIT message is sent without receiving +2/3 of voting-power equivalent + appropriate PREVOTE messages + - PREVOTE message is sent for the value V’ in round r’ and the PRECOMMIT message had + been sent for the value V in round r by the same process (r’ > r) and there are no + +2/3 of voting-power equivalent PREVOTE(vr, V’) messages (vr ≥ 0 and vr > r and vr < r’) + as the justification for sending PREVOTE(r’, V’) diff --git a/spec/light-client/detection/004bmc-apalache-ok.csv b/spec/light-client/detection/004bmc-apalache-ok.csv new file mode 100644 index 0000000000..bf4f53ea2a --- /dev/null +++ b/spec/light-client/detection/004bmc-apalache-ok.csv @@ -0,0 +1,10 @@ +no;filename;tool;timeout;init;inv;next;args +1;LCD_MC3_3_faulty.tla;apalache;1h;;CommonHeightOnEvidenceInv;;--length=10 +2;LCD_MC3_3_faulty.tla;apalache;1h;;AccuracyInv;;--length=10 +3;LCD_MC3_3_faulty.tla;apalache;1h;;PrecisionInvLocal;;--length=10 +4;LCD_MC3_4_faulty.tla;apalache;1h;;CommonHeightOnEvidenceInv;;--length=10 +5;LCD_MC3_4_faulty.tla;apalache;1h;;AccuracyInv;;--length=10 +6;LCD_MC3_4_faulty.tla;apalache;1h;;PrecisionInvLocal;;--length=10 +7;LCD_MC4_4_faulty.tla;apalache;1h;;CommonHeightOnEvidenceInv;;--length=10 +8;LCD_MC4_4_faulty.tla;apalache;1h;;AccuracyInv;;--length=10 +9;LCD_MC4_4_faulty.tla;apalache;1h;;PrecisionInvLocal;;--length=10 diff --git a/spec/light-client/detection/005bmc-apalache-error.csv b/spec/light-client/detection/005bmc-apalache-error.csv new file mode 100644 index 0000000000..1b9dd05ca9 --- /dev/null +++ b/spec/light-client/detection/005bmc-apalache-error.csv @@ -0,0 +1,4 @@ +no;filename;tool;timeout;init;inv;next;args +1;LCD_MC3_3_faulty.tla;apalache;1h;;PrecisionInvGrayZone;;--length=10 +2;LCD_MC3_4_faulty.tla;apalache;1h;;PrecisionInvGrayZone;;--length=10 +3;LCD_MC4_4_faulty.tla;apalache;1h;;PrecisionInvGrayZone;;--length=10 diff --git a/spec/light-client/detection/Blockchain_003_draft.tla b/spec/light-client/detection/Blockchain_003_draft.tla new file mode 100644 index 0000000000..2b37c1b181 --- /dev/null +++ b/spec/light-client/detection/Blockchain_003_draft.tla @@ -0,0 +1,164 @@ +------------------------ MODULE Blockchain_003_draft ----------------------------- +(* + This is a high-level specification of Tendermint blockchain + that is designed specifically for the light client. + Validators have the voting power of one. If you like to model various + voting powers, introduce multiple copies of the same validator + (do not forget to give them unique names though). + *) +EXTENDS Integers, FiniteSets + +Min(a, b) == IF a < b THEN a ELSE b + +CONSTANT + AllNodes, + (* a set of all nodes that can act as validators (correct and faulty) *) + ULTIMATE_HEIGHT, + (* a maximal height that can be ever reached (modelling artifact) *) + TRUSTING_PERIOD + (* the period within which the validators are trusted *) + +Heights == 1..ULTIMATE_HEIGHT (* possible heights *) + +(* A commit is just a set of nodes who have committed the block *) +Commits == SUBSET AllNodes + +(* The set of all block headers that can be on the blockchain. + This is a simplified version of the Block data structure in the actual implementation. *) +BlockHeaders == [ + height: Heights, + \* the block height + time: Int, + \* the block timestamp in some integer units + lastCommit: Commits, + \* the nodes who have voted on the previous block, the set itself instead of a hash + (* in the implementation, only the hashes of V and NextV are stored in a block, + as V and NextV are stored in the application state *) + VS: SUBSET AllNodes, + \* the validators of this bloc. We store the validators instead of the hash. + NextVS: SUBSET AllNodes + \* the validators of the next block. We store the next validators instead of the hash. +] + +(* A signed header is just a header together with a set of commits *) +LightBlocks == [header: BlockHeaders, Commits: Commits] + +VARIABLES + refClock, + (* the current global time in integer units as perceived by the reference chain *) + blockchain, + (* A sequence of BlockHeaders, which gives us a bird view of the blockchain. *) + Faulty + (* A set of faulty nodes, which can act as validators. We assume that the set + of faulty processes is non-decreasing. If a process has recovered, it should + connect using a different id. *) + +(* all variables, to be used with UNCHANGED *) +vars == <> + +(* The set of all correct nodes in a state *) +Corr == AllNodes \ Faulty + +(* APALACHE annotations *) +a <: b == a \* type annotation + +NT == STRING +NodeSet(S) == S <: {NT} +EmptyNodeSet == NodeSet({}) + +BT == [height |-> Int, time |-> Int, lastCommit |-> {NT}, VS |-> {NT}, NextVS |-> {NT}] + +LBT == [header |-> BT, Commits |-> {NT}] +(* end of APALACHE annotations *) + +(****************************** BLOCKCHAIN ************************************) + +(* the header is still within the trusting period *) +InTrustingPeriod(header) == + refClock < header.time + TRUSTING_PERIOD + +(* + Given a function pVotingPower \in D -> Powers for some D \subseteq AllNodes + and pNodes \subseteq D, test whether the set pNodes \subseteq AllNodes has + more than 2/3 of voting power among the nodes in D. + *) +TwoThirds(pVS, pNodes) == + LET TP == Cardinality(pVS) + SP == Cardinality(pVS \intersect pNodes) + IN + 3 * SP > 2 * TP \* when thinking in real numbers, not integers: SP > 2.0 / 3.0 * TP + +(* + Given a set of FaultyNodes, test whether the voting power of the correct nodes in D + is more than 2/3 of the voting power of the faulty nodes in D. + + Parameters: + - pFaultyNodes is a set of nodes that are considered faulty + - pVS is a set of all validators, maybe including Faulty, intersecting with it, etc. + - pMaxFaultRatio is a pair <> that limits the ratio a / b of the faulty + validators from above (exclusive) + *) +FaultyValidatorsFewerThan(pFaultyNodes, pVS, maxRatio) == + LET FN == pFaultyNodes \intersect pVS \* faulty nodes in pNodes + CN == pVS \ pFaultyNodes \* correct nodes in pNodes + CP == Cardinality(CN) \* power of the correct nodes + FP == Cardinality(FN) \* power of the faulty nodes + IN + \* CP + FP = TP is the total voting power + LET TP == CP + FP IN + FP * maxRatio[2] < TP * maxRatio[1] + +(* Can a block be produced by a correct peer, or an authenticated Byzantine peer *) +IsLightBlockAllowedByDigitalSignatures(ht, block) == + \/ block.header = blockchain[ht] \* signed by correct and faulty (maybe) + \/ /\ block.Commits \subseteq Faulty + /\ block.header.height = ht + /\ block.header.time >= 0 \* signed only by faulty + +(* + Initialize the blockchain to the ultimate height right in the initial states. + We pick the faulty validators statically, but that should not affect the light client. + + Parameters: + - pMaxFaultyRatioExclusive is a pair <> that bound the number of + faulty validators in each block by the ratio a / b (exclusive) + *) +InitToHeight(pMaxFaultyRatioExclusive) == + /\ Faulty \in SUBSET AllNodes \* some nodes may fail + \* pick the validator sets and last commits + /\ \E vs, lastCommit \in [Heights -> SUBSET AllNodes]: + \E timestamp \in [Heights -> Int]: + \* refClock is at least as early as the timestamp in the last block + /\ \E tm \in Int: refClock = tm /\ tm >= timestamp[ULTIMATE_HEIGHT] + \* the genesis starts on day 1 + /\ timestamp[1] = 1 + /\ vs[1] = AllNodes + /\ lastCommit[1] = EmptyNodeSet + /\ \A h \in Heights \ {1}: + /\ lastCommit[h] \subseteq vs[h - 1] \* the non-validators cannot commit + /\ TwoThirds(vs[h - 1], lastCommit[h]) \* the commit has >2/3 of validator votes + \* the faulty validators have the power below the threshold + /\ FaultyValidatorsFewerThan(Faulty, vs[h], pMaxFaultyRatioExclusive) + /\ timestamp[h] > timestamp[h - 1] \* the time grows monotonically + /\ timestamp[h] < timestamp[h - 1] + TRUSTING_PERIOD \* but not too fast + \* form the block chain out of validator sets and commits (this makes apalache faster) + /\ blockchain = [h \in Heights |-> + [height |-> h, + time |-> timestamp[h], + VS |-> vs[h], + NextVS |-> IF h < ULTIMATE_HEIGHT THEN vs[h + 1] ELSE AllNodes, + lastCommit |-> lastCommit[h]] + ] \****** + +(********************* BLOCKCHAIN ACTIONS ********************************) +(* + Advance the clock by zero or more time units. + *) +AdvanceTime == + /\ \E tm \in Int: tm >= refClock /\ refClock' = tm + /\ UNCHANGED <> + +============================================================================= +\* Modification History +\* Last modified Wed Jun 10 14:10:54 CEST 2020 by igor +\* Created Fri Oct 11 15:45:11 CEST 2019 by igor diff --git a/spec/light-client/detection/LCD_MC3_3_faulty.tla b/spec/light-client/detection/LCD_MC3_3_faulty.tla new file mode 100644 index 0000000000..cef1df4d37 --- /dev/null +++ b/spec/light-client/detection/LCD_MC3_3_faulty.tla @@ -0,0 +1,27 @@ +------------------------- MODULE LCD_MC3_3_faulty --------------------------- + +AllNodes == {"n1", "n2", "n3"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 3 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +IS_SECONDARY_CORRECT == TRUE +FAULTY_RATIO == <<2, 3>> \* < 1 / 3 faulty validators + +VARIABLES + blockchain, (* the reference blockchain *) + localClock, (* current time in the light client *) + refClock, (* current time in the reference blockchain *) + Faulty, (* the set of faulty validators *) + state, (* the state of the light client detector *) + fetchedLightBlocks1, (* a function from heights to LightBlocks *) + fetchedLightBlocks2, (* a function from heights to LightBlocks *) + fetchedLightBlocks1b, (* a function from heights to LightBlocks *) + commonHeight, (* the height that is trusted in CreateEvidenceForPeer *) + nextHeightToTry, (* the index in CreateEvidenceForPeer *) + evidences + +INSTANCE LCDetector_003_draft +============================================================================ diff --git a/spec/light-client/detection/LCD_MC3_4_faulty.tla b/spec/light-client/detection/LCD_MC3_4_faulty.tla new file mode 100644 index 0000000000..06bcdee13a --- /dev/null +++ b/spec/light-client/detection/LCD_MC3_4_faulty.tla @@ -0,0 +1,27 @@ +------------------------- MODULE LCD_MC3_4_faulty --------------------------- + +AllNodes == {"n1", "n2", "n3"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 4 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +IS_SECONDARY_CORRECT == TRUE +FAULTY_RATIO == <<2, 3>> \* < 1 / 3 faulty validators + +VARIABLES + blockchain, (* the reference blockchain *) + localClock, (* current time in the light client *) + refClock, (* current time in the reference blockchain *) + Faulty, (* the set of faulty validators *) + state, (* the state of the light client detector *) + fetchedLightBlocks1, (* a function from heights to LightBlocks *) + fetchedLightBlocks2, (* a function from heights to LightBlocks *) + fetchedLightBlocks1b, (* a function from heights to LightBlocks *) + commonHeight, (* the height that is trusted in CreateEvidenceForPeer *) + nextHeightToTry, (* the index in CreateEvidenceForPeer *) + evidences + +INSTANCE LCDetector_003_draft +============================================================================ diff --git a/spec/light-client/detection/LCD_MC4_4_faulty.tla b/spec/light-client/detection/LCD_MC4_4_faulty.tla new file mode 100644 index 0000000000..fdb97d9616 --- /dev/null +++ b/spec/light-client/detection/LCD_MC4_4_faulty.tla @@ -0,0 +1,27 @@ +------------------------- MODULE LCD_MC4_4_faulty --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 4 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +IS_SECONDARY_CORRECT == TRUE +FAULTY_RATIO == <<2, 3>> \* < 2 / 3 faulty validators + +VARIABLES + blockchain, (* the reference blockchain *) + localClock, (* current time in the light client *) + refClock, (* current time in the reference blockchain *) + Faulty, (* the set of faulty validators *) + state, (* the state of the light client detector *) + fetchedLightBlocks1, (* a function from heights to LightBlocks *) + fetchedLightBlocks2, (* a function from heights to LightBlocks *) + fetchedLightBlocks1b, (* a function from heights to LightBlocks *) + commonHeight, (* the height that is trusted in CreateEvidenceForPeer *) + nextHeightToTry, (* the index in CreateEvidenceForPeer *) + evidences + +INSTANCE LCDetector_003_draft +============================================================================ diff --git a/spec/light-client/detection/LCD_MC5_5_faulty.tla b/spec/light-client/detection/LCD_MC5_5_faulty.tla new file mode 100644 index 0000000000..fdbd87b8b9 --- /dev/null +++ b/spec/light-client/detection/LCD_MC5_5_faulty.tla @@ -0,0 +1,27 @@ +------------------------- MODULE LCD_MC5_5_faulty --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4", "n5"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 5 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +IS_SECONDARY_CORRECT == TRUE +FAULTY_RATIO == <<2, 3>> \* < 1 / 3 faulty validators + +VARIABLES + blockchain, (* the reference blockchain *) + localClock, (* current time in the light client *) + refClock, (* current time in the reference blockchain *) + Faulty, (* the set of faulty validators *) + state, (* the state of the light client detector *) + fetchedLightBlocks1, (* a function from heights to LightBlocks *) + fetchedLightBlocks2, (* a function from heights to LightBlocks *) + fetchedLightBlocks1b, (* a function from heights to LightBlocks *) + commonHeight, (* the height that is trusted in CreateEvidenceForPeer *) + nextHeightToTry, (* the index in CreateEvidenceForPeer *) + evidences + +INSTANCE LCDetector_003_draft +============================================================================ diff --git a/spec/light-client/detection/LCDetector_003_draft.tla b/spec/light-client/detection/LCDetector_003_draft.tla new file mode 100644 index 0000000000..e2d32e996f --- /dev/null +++ b/spec/light-client/detection/LCDetector_003_draft.tla @@ -0,0 +1,373 @@ +-------------------------- MODULE LCDetector_003_draft ----------------------------- +(** + * This is a specification of the light client detector module. + * It follows the English specification: + * + * https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/detection/detection_003_reviewed.md + * + * The assumptions made in this specification: + * + * - light client connects to one primary and one secondary peer + * + * - the light client has its own local clock that can drift from the reference clock + * within the envelope [refClock - CLOCK_DRIFT, refClock + CLOCK_DRIFT]. + * The local clock may increase as well as decrease in the the envelope + * (similar to clock synchronization). + * + * - the ratio of the faulty validators is set as the parameter. + * + * Igor Konnov, Josef Widder, 2020 + *) + +EXTENDS Integers + +\* the parameters of Light Client +CONSTANTS + AllNodes, + (* a set of all nodes that can act as validators (correct and faulty) *) + TRUSTED_HEIGHT, + (* an index of the block header that the light client trusts by social consensus *) + TARGET_HEIGHT, + (* an index of the block header that the light client tries to verify *) + TRUSTING_PERIOD, + (* the period within which the validators are trusted *) + CLOCK_DRIFT, + (* the assumed precision of the clock *) + REAL_CLOCK_DRIFT, + (* the actual clock drift, which under normal circumstances should not + be larger than CLOCK_DRIFT (otherwise, there will be a bug) *) + FAULTY_RATIO, + (* a pair <> that limits that ratio of faulty validator in the blockchain + from above (exclusive). Tendermint security model prescribes 1 / 3. *) + IS_PRIMARY_CORRECT, + IS_SECONDARY_CORRECT + +VARIABLES + blockchain, (* the reference blockchain *) + localClock, (* the local clock of the light client *) + refClock, (* the reference clock in the reference blockchain *) + Faulty, (* the set of faulty validators *) + state, (* the state of the light client detector *) + fetchedLightBlocks1, (* a function from heights to LightBlocks *) + fetchedLightBlocks2, (* a function from heights to LightBlocks *) + fetchedLightBlocks1b, (* a function from heights to LightBlocks *) + commonHeight, (* the height that is trusted in CreateEvidenceForPeer *) + nextHeightToTry, (* the index in CreateEvidenceForPeer *) + evidences (* a set of evidences *) + +vars == <> + +\* (old) type annotations in Apalache +a <: b == a + + +\* instantiate a reference chain +ULTIMATE_HEIGHT == TARGET_HEIGHT + 1 +BC == INSTANCE Blockchain_003_draft + WITH ULTIMATE_HEIGHT <- (TARGET_HEIGHT + 1) + +\* use the light client API +LC == INSTANCE LCVerificationApi_003_draft + +\* evidence type +ET == [peer |-> STRING, conflictingBlock |-> BC!LBT, commonHeight |-> Int] + +\* is the algorithm in the terminating state +IsTerminated == + state \in { <<"NoEvidence", "PRIMARY">>, + <<"NoEvidence", "SECONDARY">>, + <<"FaultyPeer", "PRIMARY">>, + <<"FaultyPeer", "SECONDARY">>, + <<"FoundEvidence", "PRIMARY">> } + + +(********************************* Initialization ******************************) + +\* initialization for the light blocks data structure +InitLightBlocks(lb, Heights) == + \* BC!LightBlocks is an infinite set, as time is not restricted. + \* Hence, we initialize the light blocks by picking the sets inside. + \E vs, nextVS, lastCommit, commit \in [Heights -> SUBSET AllNodes]: + \* although [Heights -> Int] is an infinite set, + \* Apalache needs just one instance of this set, so it does not complain. + \E timestamp \in [Heights -> Int]: + LET hdr(h) == + [height |-> h, + time |-> timestamp[h], + VS |-> vs[h], + NextVS |-> nextVS[h], + lastCommit |-> lastCommit[h]] + IN + LET lightHdr(h) == + [header |-> hdr(h), Commits |-> commit[h]] + IN + lb = [ h \in Heights |-> lightHdr(h) ] + +\* initialize the detector algorithm +Init == + \* initialize the blockchain to TARGET_HEIGHT + 1 + /\ BC!InitToHeight(FAULTY_RATIO) + /\ \E tm \in Int: + tm >= 0 /\ LC!IsLocalClockWithinDrift(tm, refClock) /\ localClock = tm + \* start with the secondary looking for evidence + /\ state = <<"Init", "SECONDARY">> /\ commonHeight = 0 /\ nextHeightToTry = 0 + /\ evidences = {} <: {ET} + \* Precompute a possible result of light client verification for the primary. + \* It is the input to the detection algorithm. + /\ \E Heights1 \in SUBSET(TRUSTED_HEIGHT..TARGET_HEIGHT): + /\ TRUSTED_HEIGHT \in Heights1 + /\ TARGET_HEIGHT \in Heights1 + /\ InitLightBlocks(fetchedLightBlocks1, Heights1) + \* As we have a non-deterministic scheduler, for every trace that has + \* an unverified block, there is a filtered trace that only has verified + \* blocks. This is a deep observation. + /\ LET status == [h \in Heights1 |-> "StateVerified"] IN + LC!VerifyToTargetPost(blockchain, IS_PRIMARY_CORRECT, + fetchedLightBlocks1, status, + TRUSTED_HEIGHT, TARGET_HEIGHT, "finishedSuccess") + \* initialize the other data structures to the default values + /\ LET trustedBlock == blockchain[TRUSTED_HEIGHT] + trustedLightBlock == [header |-> trustedBlock, Commits |-> AllNodes] + IN + /\ fetchedLightBlocks2 = [h \in {TRUSTED_HEIGHT} |-> trustedLightBlock] + /\ fetchedLightBlocks1b = [h \in {TRUSTED_HEIGHT} |-> trustedLightBlock] + + +(********************************* Transitions ******************************) + +\* a block should contain a copy of the block from the reference chain, +\* with a matching commit +CopyLightBlockFromChain(block, height) == + LET ref == blockchain[height] + lastCommit == + IF height < ULTIMATE_HEIGHT + THEN blockchain[height + 1].lastCommit + \* for the ultimate block, which we never use, + \* as ULTIMATE_HEIGHT = TARGET_HEIGHT + 1 + ELSE blockchain[height].VS + IN + block = [header |-> ref, Commits |-> lastCommit] + +\* Either the primary is correct and the block comes from the reference chain, +\* or the block is produced by a faulty primary. +\* +\* [LCV-FUNC-FETCH.1::TLA.1] +FetchLightBlockInto(isPeerCorrect, block, height) == + IF isPeerCorrect + THEN CopyLightBlockFromChain(block, height) + ELSE BC!IsLightBlockAllowedByDigitalSignatures(height, block) + + +(** + * Pick the next height, for which there is a block. + *) +PickNextHeight(fetchedBlocks, height) == + LET largerHeights == { h \in DOMAIN fetchedBlocks: h > height } IN + IF largerHeights = ({} <: {Int}) + THEN -1 + ELSE CHOOSE h \in largerHeights: + \A h2 \in largerHeights: h <= h2 + + +(** + * Check, whether the target header matches at the secondary and primary. + *) +CompareLast == + /\ state = <<"Init", "SECONDARY">> + \* fetch a block from the secondary: + \* non-deterministically pick a block that matches the constraints + /\ \E latest \in BC!LightBlocks: + \* for the moment, we ignore the possibility of a timeout when fetching a block + /\ FetchLightBlockInto(IS_SECONDARY_CORRECT, latest, TARGET_HEIGHT) + /\ IF latest.header = fetchedLightBlocks1[TARGET_HEIGHT].header + THEN \* if the headers match, CreateEvidence is not called + /\ state' = <<"NoEvidence", "SECONDARY">> + \* save the retrieved block for further analysis + /\ fetchedLightBlocks2' = + [h \in (DOMAIN fetchedLightBlocks2) \union {TARGET_HEIGHT} |-> + IF h = TARGET_HEIGHT THEN latest ELSE fetchedLightBlocks2[h]] + /\ UNCHANGED <> + ELSE \* prepare the parameters for CreateEvidence + /\ commonHeight' = TRUSTED_HEIGHT + /\ nextHeightToTry' = PickNextHeight(fetchedLightBlocks1, TRUSTED_HEIGHT) + /\ state' = IF nextHeightToTry' >= 0 + THEN <<"CreateEvidence", "SECONDARY">> + ELSE <<"FaultyPeer", "SECONDARY">> + /\ UNCHANGED fetchedLightBlocks2 + + /\ UNCHANGED <> + + +\* the actual loop in CreateEvidence +CreateEvidence(peer, isPeerCorrect, refBlocks, targetBlocks) == + /\ state = <<"CreateEvidence", peer>> + \* precompute a possible result of light client verification for the secondary + \* we have to introduce HeightRange, because Apalache can only handle a..b + \* for constant a and b + /\ LET HeightRange == { h \in TRUSTED_HEIGHT..TARGET_HEIGHT: + commonHeight <= h /\ h <= nextHeightToTry } IN + \E HeightsRange \in SUBSET(HeightRange): + /\ commonHeight \in HeightsRange /\ nextHeightToTry \in HeightsRange + /\ InitLightBlocks(targetBlocks, HeightsRange) + \* As we have a non-deterministic scheduler, for every trace that has + \* an unverified block, there is a filtered trace that only has verified + \* blocks. This is a deep observation. + /\ \E result \in {"finishedSuccess", "finishedFailure"}: + LET targetStatus == [h \in HeightsRange |-> "StateVerified"] IN + \* call VerifyToTarget for (commonHeight, nextHeightToTry). + /\ LC!VerifyToTargetPost(blockchain, isPeerCorrect, + targetBlocks, targetStatus, + commonHeight, nextHeightToTry, result) + \* case 1: the peer has failed (or the trusting period has expired) + /\ \/ /\ result /= "finishedSuccess" + /\ state' = <<"FaultyPeer", peer>> + /\ UNCHANGED <> + \* case 2: success + \/ /\ result = "finishedSuccess" + /\ LET block1 == refBlocks[nextHeightToTry] IN + LET block2 == targetBlocks[nextHeightToTry] IN + IF block1.header /= block2.header + THEN \* the target blocks do not match + /\ state' = <<"FoundEvidence", peer>> + /\ evidences' = evidences \union + {[peer |-> peer, + conflictingBlock |-> block1, + commonHeight |-> commonHeight]} + /\ UNCHANGED <> + ELSE \* the target blocks match + /\ nextHeightToTry' = PickNextHeight(refBlocks, nextHeightToTry) + /\ commonHeight' = nextHeightToTry + /\ state' = IF nextHeightToTry' >= 0 + THEN state + ELSE <<"NoEvidence", peer>> + /\ UNCHANGED evidences + +SwitchToPrimary == + /\ state = <<"FoundEvidence", "SECONDARY">> + /\ nextHeightToTry' = PickNextHeight(fetchedLightBlocks2, commonHeight) + /\ state' = <<"CreateEvidence", "PRIMARY">> + /\ UNCHANGED <> + + +CreateEvidenceForSecondary == + /\ CreateEvidence("SECONDARY", IS_SECONDARY_CORRECT, + fetchedLightBlocks1, fetchedLightBlocks2') + /\ UNCHANGED <> + +CreateEvidenceForPrimary == + /\ CreateEvidence("PRIMARY", IS_PRIMARY_CORRECT, + fetchedLightBlocks2, + fetchedLightBlocks1b') + /\ UNCHANGED <> + +(* + The local and global clocks can be updated. They can also drift from each other. + Note that the local clock can actually go backwards in time. + However, it still stays in the drift envelope + of [refClock - REAL_CLOCK_DRIFT, refClock + REAL_CLOCK_DRIFT]. + *) +AdvanceClocks == + /\ \E tm \in Int: + tm >= refClock /\ refClock' = tm + /\ \E tm \in Int: + /\ tm >= localClock + /\ LC!IsLocalClockWithinDrift(tm, refClock') + /\ localClock' = tm + +(** + Execute AttackDetector for one secondary. + + [LCD-FUNC-DETECTOR.2::LOOP.1] + *) +Next == + /\ AdvanceClocks + /\ \/ CompareLast + \/ CreateEvidenceForSecondary + \/ SwitchToPrimary + \/ CreateEvidenceForPrimary + + +\* simple invariants to see the progress of the detector +NeverNoEvidence == state[1] /= "NoEvidence" +NeverFoundEvidence == state[1] /= "FoundEvidence" +NeverFaultyPeer == state[1] /= "FaultyPeer" +NeverCreateEvidence == state[1] /= "CreateEvidence" + +NeverFoundEvidencePrimary == state /= <<"FoundEvidence", "PRIMARY">> + +NeverReachTargetHeight == nextHeightToTry < TARGET_HEIGHT + +EvidenceWhenFaultyInv == + (state[1] = "FoundEvidence") => (~IS_PRIMARY_CORRECT \/ ~IS_SECONDARY_CORRECT) + +NoEvidenceForCorrectInv == + IS_PRIMARY_CORRECT /\ IS_SECONDARY_CORRECT => evidences = {} <: {ET} + +(** + * If we find an evidence by peer A, peer B has ineded given us a corrupted + * header following the common height. Also, we have a verification trace by peer A. + *) +CommonHeightOnEvidenceInv == + \A e \in evidences: + LET conflicting == e.conflictingBlock IN + LET conflictingHeader == conflicting.header IN + \* the evidence by suspectingPeer can be verified by suspectingPeer in one step + LET SoundEvidence(suspectingPeer, peerBlocks) == + \/ e.peer /= suspectingPeer + \* the conflicting block from another peer verifies against the common height + \/ /\ "SUCCESS" = + LC!ValidAndVerifiedUntimed(peerBlocks[e.commonHeight], conflicting) + \* and the headers of the same height by the two peers do not match + /\ peerBlocks[conflictingHeader.height].header /= conflictingHeader + IN + /\ SoundEvidence("PRIMARY", fetchedLightBlocks1b) + /\ SoundEvidence("SECONDARY", fetchedLightBlocks2) + +(** + * If the light client does not find an evidence, + * then there is no attack on the light client. + *) +AccuracyInv == + (LC!InTrustingPeriodLocal(fetchedLightBlocks1[TARGET_HEIGHT].header) + /\ state = <<"NoEvidence", "SECONDARY">>) + => + (fetchedLightBlocks1[TARGET_HEIGHT].header = blockchain[TARGET_HEIGHT] + /\ fetchedLightBlocks2[TARGET_HEIGHT].header = blockchain[TARGET_HEIGHT]) + +(** + * The primary reports a corrupted block at the target height. If the secondary is + * correct and the algorithm has terminated, we should get the evidence. + * This property is violated due to clock drift. VerifyToTarget may fail with + * the correct secondary within the trusting period (due to clock drift, locally + * we think that we are outside of the trusting period). + *) +PrecisionInvGrayZone == + (/\ fetchedLightBlocks1[TARGET_HEIGHT].header /= blockchain[TARGET_HEIGHT] + /\ BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) + /\ IS_SECONDARY_CORRECT + /\ IsTerminated) + => + evidences /= {} <: {ET} + +(** + * The primary reports a corrupted block at the target height. If the secondary is + * correct and the algorithm has terminated, we should get the evidence. + * This invariant does not fail, as we are using the local clock to check the trusting + * period. + *) +PrecisionInvLocal == + (/\ fetchedLightBlocks1[TARGET_HEIGHT].header /= blockchain[TARGET_HEIGHT] + /\ LC!InTrustingPeriodLocalSurely(blockchain[TRUSTED_HEIGHT]) + /\ IS_SECONDARY_CORRECT + /\ IsTerminated) + => + evidences /= {} <: {ET} + +==================================================================================== diff --git a/spec/light-client/detection/LCVerificationApi_003_draft.tla b/spec/light-client/detection/LCVerificationApi_003_draft.tla new file mode 100644 index 0000000000..909eab92b8 --- /dev/null +++ b/spec/light-client/detection/LCVerificationApi_003_draft.tla @@ -0,0 +1,192 @@ +-------------------- MODULE LCVerificationApi_003_draft -------------------------- +(** + * The common interface of the light client verification and detection. + *) +EXTENDS Integers, FiniteSets + +\* the parameters of Light Client +CONSTANTS + TRUSTING_PERIOD, + (* the period within which the validators are trusted *) + CLOCK_DRIFT, + (* the assumed precision of the clock *) + REAL_CLOCK_DRIFT, + (* the actual clock drift, which under normal circumstances should not + be larger than CLOCK_DRIFT (otherwise, there will be a bug) *) + FAULTY_RATIO + (* a pair <> that limits that ratio of faulty validator in the blockchain + from above (exclusive). Tendermint security model prescribes 1 / 3. *) + +VARIABLES + localClock (* current time as measured by the light client *) + +(* the header is still within the trusting period *) +InTrustingPeriodLocal(header) == + \* note that the assumption about the drift reduces the period of trust + localClock < header.time + TRUSTING_PERIOD - CLOCK_DRIFT + +(* the header is still within the trusting period, even if the clock can go backwards *) +InTrustingPeriodLocalSurely(header) == + \* note that the assumption about the drift reduces the period of trust + localClock < header.time + TRUSTING_PERIOD - 2 * CLOCK_DRIFT + +(* ensure that the local clock does not drift far away from the global clock *) +IsLocalClockWithinDrift(local, global) == + /\ global - REAL_CLOCK_DRIFT <= local + /\ local <= global + REAL_CLOCK_DRIFT + +(** + * Check that the commits in an untrusted block form 1/3 of the next validators + * in a trusted header. + *) +SignedByOneThirdOfTrusted(trusted, untrusted) == + LET TP == Cardinality(trusted.header.NextVS) + SP == Cardinality(untrusted.Commits \intersect trusted.header.NextVS) + IN + 3 * SP > TP + +(** + The first part of the precondition of ValidAndVerified, which does not take + the current time into account. + + [LCV-FUNC-VALID.1::TLA-PRE-UNTIMED.1] + *) +ValidAndVerifiedPreUntimed(trusted, untrusted) == + LET thdr == trusted.header + uhdr == untrusted.header + IN + /\ thdr.height < uhdr.height + \* the trusted block has been created earlier + /\ thdr.time < uhdr.time + /\ untrusted.Commits \subseteq uhdr.VS + /\ LET TP == Cardinality(uhdr.VS) + SP == Cardinality(untrusted.Commits) + IN + 3 * SP > 2 * TP + /\ thdr.height + 1 = uhdr.height => thdr.NextVS = uhdr.VS + (* As we do not have explicit hashes we ignore these three checks of the English spec: + + 1. "trusted.Commit is a commit is for the header trusted.Header, + i.e. it contains the correct hash of the header". + 2. untrusted.Validators = hash(untrusted.Header.Validators) + 3. untrusted.NextValidators = hash(untrusted.Header.NextValidators) + *) + +(** + Check the precondition of ValidAndVerified, including the time checks. + + [LCV-FUNC-VALID.1::TLA-PRE.1] + *) +ValidAndVerifiedPre(trusted, untrusted, checkFuture) == + LET thdr == trusted.header + uhdr == untrusted.header + IN + /\ InTrustingPeriodLocal(thdr) + \* The untrusted block is not from the future (modulo clock drift). + \* Do the check, if it is required. + /\ checkFuture => uhdr.time < localClock + CLOCK_DRIFT + /\ ValidAndVerifiedPreUntimed(trusted, untrusted) + + +(** + Check, whether an untrusted block is valid and verifiable w.r.t. a trusted header. + This test does take current time into account, but only looks at the block structure. + + [LCV-FUNC-VALID.1::TLA-UNTIMED.1] + *) +ValidAndVerifiedUntimed(trusted, untrusted) == + IF ~ValidAndVerifiedPreUntimed(trusted, untrusted) + THEN "INVALID" + ELSE IF untrusted.header.height = trusted.header.height + 1 + \/ SignedByOneThirdOfTrusted(trusted, untrusted) + THEN "SUCCESS" + ELSE "NOT_ENOUGH_TRUST" + +(** + Check, whether an untrusted block is valid and verifiable w.r.t. a trusted header. + + [LCV-FUNC-VALID.1::TLA.1] + *) +ValidAndVerified(trusted, untrusted, checkFuture) == + IF ~ValidAndVerifiedPre(trusted, untrusted, checkFuture) + THEN "INVALID" + ELSE IF ~InTrustingPeriodLocal(untrusted.header) + (* We leave the following test for the documentation purposes. + The implementation should do this test, as signature verification may be slow. + In the TLA+ specification, ValidAndVerified happens in no time. + *) + THEN "FAILED_TRUSTING_PERIOD" + ELSE IF untrusted.header.height = trusted.header.height + 1 + \/ SignedByOneThirdOfTrusted(trusted, untrusted) + THEN "SUCCESS" + ELSE "NOT_ENOUGH_TRUST" + + +(** + The invariant of the light store that is not related to the blockchain + *) +LightStoreInv(fetchedLightBlocks, lightBlockStatus) == + \A lh, rh \in DOMAIN fetchedLightBlocks: + \* for every pair of stored headers that have been verified + \/ lh >= rh + \/ lightBlockStatus[lh] /= "StateVerified" + \/ lightBlockStatus[rh] /= "StateVerified" + \* either there is a header between them + \/ \E mh \in DOMAIN fetchedLightBlocks: + lh < mh /\ mh < rh /\ lightBlockStatus[mh] = "StateVerified" + \* or the left header is outside the trusting period, so no guarantees + \/ LET lhdr == fetchedLightBlocks[lh] + rhdr == fetchedLightBlocks[rh] + IN + \* we can verify the right one using the left one + "SUCCESS" = ValidAndVerifiedUntimed(lhdr, rhdr) + +(** + Correctness states that all the obtained headers are exactly like in the blockchain. + + It is always the case that every verified header in LightStore was generated by + an instance of Tendermint consensus. + + [LCV-DIST-SAFE.1::CORRECTNESS-INV.1] + *) +CorrectnessInv(blockchain, fetchedLightBlocks, lightBlockStatus) == + \A h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] = "StateVerified" => + fetchedLightBlocks[h].header = blockchain[h] + +(** + * When the light client terminates, there are no failed blocks. + * (Otherwise, someone lied to us.) + *) +NoFailedBlocksOnSuccessInv(fetchedLightBlocks, lightBlockStatus) == + \A h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] /= "StateFailed" + +(** + The expected post-condition of VerifyToTarget. + *) +VerifyToTargetPost(blockchain, isPeerCorrect, + fetchedLightBlocks, lightBlockStatus, + trustedHeight, targetHeight, finalState) == + LET trustedHeader == fetchedLightBlocks[trustedHeight].header IN + \* The light client is not lying us on the trusted block. + \* It is straightforward to detect. + /\ lightBlockStatus[trustedHeight] = "StateVerified" + /\ trustedHeight \in DOMAIN fetchedLightBlocks + /\ trustedHeader = blockchain[trustedHeight] + \* the invariants we have found in the light client verification + \* there is a problem with trusting period + /\ isPeerCorrect + => CorrectnessInv(blockchain, fetchedLightBlocks, lightBlockStatus) + \* a correct peer should fail the light client, + \* if the trusted block is in the trusting period + /\ isPeerCorrect /\ InTrustingPeriodLocalSurely(trustedHeader) + => finalState = "finishedSuccess" + /\ finalState = "finishedSuccess" => + /\ lightBlockStatus[targetHeight] = "StateVerified" + /\ targetHeight \in DOMAIN fetchedLightBlocks + /\ NoFailedBlocksOnSuccessInv(fetchedLightBlocks, lightBlockStatus) + /\ LightStoreInv(fetchedLightBlocks, lightBlockStatus) + + +================================================================================== diff --git a/spec/light-client/detection/README.md b/spec/light-client/detection/README.md new file mode 100644 index 0000000000..50d8ab6fe1 --- /dev/null +++ b/spec/light-client/detection/README.md @@ -0,0 +1,75 @@ +--- +order: 1 +parent: + title: Fork Detection + order: 2 +--- + +# Tendermint fork detection and IBC fork detection + +## Status + +This is a work in progress. +This directory captures the ongoing work and discussion on fork +detection both in the context of a Tendermint light node and in the +context of IBC. It contains the following files + +### [detection.md](./detection.md) + +a draft of the light node fork detection including "proof of fork" + definition, that is, the data structure to submit evidence to full + nodes. + +### [discussions.md](./discussions.md) + +A collection of ideas and intuitions from recent discussions + +- the outcome of recent discussion +- a sketch of the light client supervisor to provide the context in + which fork detection happens +- a discussion about lightstore semantics + +### [req-ibc-detection.md](./req-ibc-detection.md) + +- a collection of requirements for fork detection in the IBC + context. In particular it contains a section "Required Changes in + ICS 007" with necessary updates to ICS 007 to support Tendermint + fork detection + +### [draft-functions.md](./draft-functions.md) + +In order to address the collected requirements, we started to sketch +some functions that we will need in the future when we specify in more +detail the + +- fork detections +- proof of fork generation +- proof of fork verification + +on the following components. + +- IBC on-chain components +- Relayer + +### TODOs + +We decided to merge the files while there are still open points to +address to record the current state an move forward. In particular, +the following points need to be addressed: + +- + +- + +- + +- + +Most likely we will write a specification on the light client +supervisor along the outcomes of + +- + +that also addresses initialization + +- diff --git a/spec/light-client/detection/detection_001_reviewed.md b/spec/light-client/detection/detection_001_reviewed.md new file mode 100644 index 0000000000..db8c29a143 --- /dev/null +++ b/spec/light-client/detection/detection_001_reviewed.md @@ -0,0 +1,788 @@ +# ***This an unfinished draft. Comments are welcome!*** + +**TODO:** We will need to do small adaptations to the verification +spec to reflect the semantics in the LightStore (verified, trusted, +untrusted, etc. not needed anymore). In more detail: + +- The state of the Lightstore needs to go. Functions like `LatestVerified` can +keep the name but will ignore state as it will not exist anymore. + +- verification spec should be adapted to the second parameter of +`VerifyToTarget` +being a lightblock; new version number of function tag; + +- We should clarify what is the expectation of VerifyToTarget +so if it returns TimeoutError it can be assumed faulty. I guess that +VerifyToTarget with correct full node should never terminate with +TimeoutError. + +- We need to introduce a new version number for the new +specification. So we should decide how + to handle that. + +# Light Client Attack Detector + +In this specification, we strengthen the light client to be resistant +against so-called light client attacks. In a light client attack, all +the correct Tendermint full nodes agree on the sequence of generated +blocks (no fork), but a set of faulty full nodes attack a light client +by generating (signing) a block that deviates from the block of the +same height on the blockchain. In order to do so, some of these faulty +full nodes must have been validators before and violate +[[TMBC-FM-2THIRDS]](TMBC-FM-2THIRDS-link), as otherwise, if +[[TMBC-FM-2THIRDS]](TMBC-FM-2THIRDS-link) would hold, +[verification](verification) would satisfy +[[LCV-SEQ-SAFE.1]](LCV-SEQ-SAFE-link). + +An attack detector (or detector for short) is a mechanism that is used +by the light client [supervisor](supervisor) after +[verification](verification) of a new light block +with the primary, to cross-check the newly learned light block with +other peers (secondaries). It expects as input a light block with some +height *root* (that serves as a root of trust), and a verification +trace (a sequence of lightblocks) that the primary provided. + +In case the detector observes a light client attack, it computes +evidence data that can be used by Tendermint full nodes to isolate a +set of faulty full nodes that are still within the unbonding period +(more than 1/3 of the voting power of the validator set at some block of the chain), +and report them via ABCI to the application of a Tendermint blockchain +in order to punish faulty nodes. + +## Context of this document + +The light client [verification](verification) specification is +designed for the Tendermint failure model (1/3 assumption) +[[TMBC-FM-2THIRDS]](TMBC-FM-2THIRDS-link). It is safe under this +assumption, and live if it can reliably (that is, no message loss, no +duplication, and eventually delivered) and timely communicate with a +correct full node. If [[TMBC-FM-2THIRDS]](TMBC-FM-2THIRDS-link) assumption is violated, the light client +can be fooled to trust a light block that was not generated by +Tendermint consensus. + +This specification, the attack detector, is a "second line of +defense", in case the 1/3 assumption is violated. Its goal is to +detect a light client attack (conflicting light blocks) and collect +evidence. However, it is impractical to probe all full nodes. At this +time we consider a simple scheme of maintaining an address book of +known full nodes from which a small subset (e.g., 4) are chosen +initially to communicate with. More involved book keeping with +probabilistic guarantees can be considered at later stages of the +project. + +The light client maintains a simple address book containing addresses +of full nodes that it can pick as primary and secondaries. To obtain +a new light block, the light client first does +[verification](verification) with the primary, and then cross-checks +the light block (and the trace of light blocks that led to it) with +the secondaries using this specification. + +## Tendermint Consensus and Light Client Attacks + +In this section we will give some mathematical definitions of what we +mean by light client attacks (that are considered in this +specification) and how they differ from main-chain forks. To this end +we start by defining some properties of the sequence of blocks that is +decided upon by Tendermint consensus in normal operation (if the +Tendermint failure model holds +[[TMBC-FM-2THIRDS]](TMBC-FM-2THIRDS-link)), +and then define different +deviations that correspond to attack scenarios. + +#### **[TMBC-GENESIS.1]** + +Let *Genesis* be the agreed-upon initial block (file). + +#### **[TMBC-FUNC-SIGN.1]** + +Let *b* and *c* be two light blocks with *b.Header.Height + 1 = +c.Header.Height*. We define the predicate **signs(b,c)** to hold +iff *c.Header.LastCommit* is in *PossibleCommit(b)*. +[[TMBC-SOUND-DISTR-POSS-COMMIT.1]](TMBC-SOUND-DISTR-POSS-COMMIT-link). + +> The above encodes sequential verification, that is, intuitively, +> b.Header.NextValidators = c.Header.Validators and 2/3 of +> these Validators signed c? + +#### **[TMBC-FUNC-SUPPORT.1]** + +Let *b* and *c* be two light blocks. We define the predicate +**supports(b,c,t)** to hold iff + +- *t - trustingPeriod < b.Header.Time < t* +- the voting power in *b.NextValidators* of nodes in *c.Commit* + is more than 1/3 of *TotalVotingPower(b.Header.NextValidators)* + +> That is, if the [Tendermint failure model](TMBC-FM-2THIRDS-link) +> holds, then *c* has been signed by at least one correct full node, cf. +> [[TMBC-VAL-CONTAINS-CORR.1]](TMBC-VAL-CONTAINS-CORR-link). +> The following formalizes that *b* was properly generated by +> Tendermint; *b* can be traced back to genesis + +#### **[TMBC-SEQ-ROOTED.1]** + +Let *b* be a light block. +We define *sequ-rooted(b)* iff for all *i*, *1 <= i < h = b.Header.Height*, +there exist light blocks *a(i)* s.t. + +- *a(1) = Genesis* and +- *a(h) = b* and +- *signs( a(i) , a(i+1) )*. + +> The following formalizes that *c* is trusted based on *b* in +> skipping verification. Observe that we do not require here (yet) +> that *b* was properly generated. + +#### **[TMBC-SKIP-TRACE.1]** + +Let *b* and *c* be light blocks. We define *skip-trace(b,c,t)* if at +time t there exists an *h* and a sequence *a(1)*, ... *a(h)* s.t. + +- *a(1) = b* and +- *a(h) = c* and +- *supports( a(i), a(i+1), t)*, for all i, *1 <= i < h*. + +We call such a sequence *a(1)*, ... *a(h)* a **verification trace**. + +> The following formalizes that two light blocks of the same height +> should agree on the content of the header. Observe that *b* and *c* +> may disagree on the Commit. This is a special case if the canonical +> commit has not been decided on, that is, if b.Header.Height is the +> maximum height of all blocks decided upon by Tendermint at this +> moment. + +#### **[TMBC-SIGN-SKIP-MATCH.1]** + +Let *a*, *b*, *c*, be light blocks and *t* a time, we define +*sign-skip-match(a,b,c,t) = true* iff the following implication +evaluates to true: + +- *sequ-rooted(a)* and +- *b.Header.Height = c.Header.Height* and +- *skip-trace(a,b,t)* +- *skip-trace(a,c,t)* + +implies *b.Header = c.Header*. + +> Observe that *sign-skip-match* is defined via an implication. If it +> evaluates to false this means that the left-hand-side of the +> implication evaluates to true, and the right-hand-side evaluates to +> false. In particular, there are two **different** headers *b* and +> *c* that both can be verified from a common block *a* from the +> chain. Thus, the following describes an attack. + +#### **[TMBC-ATTACK.1]** + +If there exists three light blocks a, b, and c, with +*sign-skip-match(a,b,c,t) = false* then we have an *attack*. We say +we have **an attack at height** *b.Header.Height* and write +*attack(a,b,c,t)*. + +> The lightblock *a* need not be unique, that is, there may be +> several blocks that satisfy the above requirement for the same +> blocks *b* and *c*. + +[[TMBC-ATTACK.1]](#TMBC-ATTACK1) is a formalization of the violation +of the agreement property based on the result of consensus, that is, +the generated blocks. + +**Remark.** +Violation of agreement is only possible if more than 1/3 of the validators (or +next validators) of some previous block deviated from the protocol. The +upcoming "accountability" specification will describe how to compute +a set of at least 1/3 faulty nodes from two conflicting blocks. [] + +There are different ways to characterize forks +and attack scenarios. This specification uses the "node-based +characterization of attacks" which focuses on what kinds of nodes are +affected (light nodes vs. full nodes). For future reference and +discussion we also provide a +"block-based characterization of attacks" below. + +### Node-based characterization of attacks + +#### **[TMBC-MC-FORK.1]** + +We say there is a (main chain) fork at time *t* if + +- there are two correct full nodes *i* and *j* and +- *i* is different from *j* and +- *i* has decided on *b* and +- *j* has decided on *c* and +- there exist *a* such that *attack(a,b,c,t)*. + +#### **[TMBC-LC-ATTACK.1]** + +We say there is a light client attack at time *t*, if + +- there is **no** (main chain) fork [[TMBC-MC-FORK.1]](#TMBC-MC-FORK1), and +- there exist nodes that have computed light blocks *b* and *c* and +- there exist *a* such that *attack(a,b,c,t)*. + +We say the attack is at height *a.Header.Height*. + +> In this specification we consider detection of light client +> attacks. Intuitively, the case we consider is that +> light block *b* is the one from the +> blockchain, and some attacker has computed *c* and tries to wrongly +> convince +> the light client that *c* is the block from the chain. + +#### **[TMBC-LC-ATTACK-EVIDENCE.1]** + +We consider the following case of a light client attack +[[TMBC-LC-ATTACK.1]](#TMBC-LC-ATTACK1): + +- *attack(a,b,c,t)* +- there is a peer p1 that has a sequence *chain* of blocks from *a* to *b* +- *skip-trace(a,c,t)*: by [[TMBC-SKIP-TRACE.1]](#TMBC-SKIP-TRACE1) there is a + verification trace *v* of the form *a = v(1)*, ... *v(h) = c* + +Evidence for p1 (that proves an attack) consists for index i +of v(i) and v(i+1) such that + +- E1(i). v(i) is equal to the block of *chain* at height v(i).Height, and +- E2(i). v(i+1) that is different from the block of *chain* at + height v(i+1).height + +> Observe p1 can +> +> - check that v(i+1) differs from its block at that height, and +> - verify v(i+1) in one step from v(i) as v is a verification trace. + +**Proposition.** In the case of attack, evidence exists. +*Proof.* First observe that + +- (A). (NOT E2(i)) implies E1(i+1) + +Now by contradiction assume there is no evidence. Thus + +- for all i, we have NOT E1(i) or NOT E2(i) +- for i = 1 we have E1(1) and thus NOT E2(1) + thus by induction on i, by (A) we have for all i that **E1(i)** +- from attack we have E2(h-1), and as there is no evidence for + i = h - 1 we get **NOT E1(h-1)**. Contradiction. +QED. + +#### **[TMBC-LC-EVIDENCE-DATA.1]** + +To prove the attack to p1, because of Point E1, it is sufficient to +submit + +- v(i).Height (rather than v(i)). +- v(i+1) + +This information is *evidence for height v(i).Height*. + +### Block-based characterization of attacks + +In this section we provide a different characterization of attacks. It +is not defined on the nodes that are affected but purely on the +content of the blocks. In that sense these definitions are less +operational. + +> They might be relevant for a closer analysis of fork scenarios on the +> chain, which is out of the scope of this specification. + +#### **[TMBC-SIGN-UNIQUE.1]** + +Let *b* and *c* be light blocks, we define the predicate +*sign-unique(b,c)* to evaluate to true iff the following implication +evaluates to true: + +- *b.Header.Height = c.Header.Height* and +- *sequ-rooted(b)* and +- *sequ-rooted(c)* + +implies *b = c*. + +#### **[TMBC-BLOCKS-MCFORK.1]** + +If there exists two light blocks b and c, with *sign-unique(b,c) = +false* then we have a *fork*. + +> The difference of the above definition to +> [[TMBC-MC-FORK.1]](#TMBC-MC-FORK1) is subtle. The latter requires a +> full node being affected by a bad block while +> [[TMBC-BLOCKS-MCFORK.1]](#TMBC-BLOCKS-MCFORK1) just requires that a +> bad block exists, possibly in memory of an attacker. +> The following captures a light client fork. There is no fork up to +> the height of block b. However, c is of that height, is different, +> and passes skipping verification. It is a stricter property than +> [[TMBC-LC-ATTACK.1]](#TMBC-LC-ATTACK1), as +> [[TMBC-LC-ATTACK.1]](#TMBC-LC-ATTACK1) requires that no correct full +> node is affected. + +#### **[TMBC-BLOCKS-LCFORK.1]** + +Let *a*, *b*, *c*, be light blocks and *t* a time. We define +*light-client-fork(a,b,c,t)* iff + +- *sign-skip-match(a,b,c,t) = false* and +- *sequ-rooted(b)* and +- *b* is "unique", that is, for all *d*, *sequ-rooted(d)* and + *d.Header.Height = b.Header.Height* implies *d = b* + +> Finally, let us also define bogus blocks that have no support. +> Observe that bogus is even defined if there is a fork. +> Also, for the definition it would be sufficient to restrict *a* to +> *a.height < b.height* (which is implied by the definitions which +> unfold until *supports()*). + +#### **[TMBC-BOGUS.1]** + +Let *b* be a light block and *t* a time. We define *bogus(b,t)* iff + +- *sequ-rooted(b) = false* and +- for all *a*, *sequ-rooted(a)* implies *skip-trace(a,b,t) = false* + +### Informal Problem statement + +There is no sequential specification: the detector only makes sense +in a distributed systems where some nodes misbehave. + +We work under the assumption that full nodes and validators are +responsible for detecting attacks on the main chain, and the evidence +reactor takes care of broadcasting evidence to communicate +misbehaving nodes via ABCI to the application, and halt the chain in +case of a fork. The point of this specification is to shield a light +clients against attacks that cannot be detected by full nodes, and +are fully addressed at light clients (and consequently IBC relayers, +which use the light client protocols to observe the state of a +blockchain). In order to provide full nodes the incentive to follow +the protocols when communicating with the light client, this +specification also considers the generation of evidence that will +also be processed by the Tendermint blockchain. + +#### **[LCD-IP-MODEL.1]** + +The detector is designed under the assumption that + +- [[TMBC-FM-2THIRDS]](TMBC-FM-2THIRDS-link) may be violated +- there is no fork on the main chain. + +> As a result some faulty full nodes may launch an attack on a light +> client. + +The following requirements are operational in that they describe how +things should be done, rather than what should be done. However, they +do not constitute temporal logic verification conditions. For those, +see [LCD-DIST-*] below. + +The detector is called in the [supervisor](supervisor) as follows + +```go +Evidences := AttackDetector(root_of_trust, verifiedLS);` +``` + +where + +- `root-of-trust` is a light block that is trusted (that is, +except upon initialization, the primary and the secondaries +agreed on in the past), and +- `verifiedLS` is a lightstore that contains a verification trace that + starts from a lightblock that can be verified with the + `root-of-trust` in one step and ends with a lightblock of the height + requested by the user +- `Evidences` is a list of evidences for misbehavior + +#### **[LCD-IP-STATEMENT.1]** + +Whenever AttackDetector is called, the detector should for each +secondary try to replay the verification trace `verifiedLS` with the +secondary + +- in case replaying leads to detection of a light client attack + (one of the lightblocks differ from the one in verifiedLS with + the same height), we should return evidence +- if the secondary cannot provide a verification trace, we have no + proof for an attack. Block *b* may be bogus. In this case the + secondary is faulty and it should be replaced. + +## Assumptions/Incentives/Environment + +It is not in the interest of faulty full nodes to talk to the +detector as long as the detector is connected to at least one +correct full node. This would only increase the likelihood of +misbehavior being detected. Also we cannot punish them easily +(cheaply). The absence of a response need not be the fault of the full +node. + +Correct full nodes have the incentive to respond, because the +detector may help them to understand whether their header is a good +one. We can thus base liveness arguments of the detector on +the assumptions that correct full nodes reliably talk to the +detector. + +### Assumptions + +#### **[LCD-A-CorrFull.1]** + +At all times there is at least one correct full +node among the primary and the secondaries. + +> For this version of the detection we take this assumption. It +> allows us to establish the invariant that the lightblock +> `root-of-trust` is always the one from the blockchain, and we can +> use it as starting point for the evidence computation. Moreover, it +> allows us to establish the invariant at the supervisor that any +> lightblock in the (top-level) lightstore is from the blockchain. +> In the future we might design a lightclient based on the assumption +> that at least in regular intervals the lightclient is connected to a +> correct full node. This will require the detector to reconsider +> `root-of-trust`, and remove lightblocks from the top-level +> lightstore. + +#### **[LCD-A-RelComm.1]** + +Communication between the detector and a correct full node is +reliable and bounded in time. Reliable communication means that +messages are not lost, not duplicated, and eventually delivered. There +is a (known) end-to-end delay *Delta*, such that if a message is sent +at time *t* then it is received and processed by time *t + Delta*. +This implies that we need a timeout of at least *2 Delta* for remote +procedure calls to ensure that the response of a correct peer arrives +before the timeout expires. + +## Definitions + +### Evidence + +Following the definition of +[[TMBC-LC-ATTACK-EVIDENCE.1]](#TMBC-LC-ATTACK-EVIDENCE1), by evidence +we refer to a variable of the following type + +#### **[LC-DATA-EVIDENCE.1]** + +```go +type LightClientAttackEvidence struct { + ConflictingBlock LightBlock + CommonHeight int64 +} +``` + +As the above data is computed for a specific peer, the following +data structure wraps the evidence and adds the peerID. + +#### **[LC-DATA-EVIDENCE-INT.1]** + +```go +type InternalEvidence struct { + Evidence LightClientAttackEvidence + Peer PeerID +} +``` + +#### **[LC-SUMBIT-EVIDENCE.1]** + +```go +func submitEvidence(Evidences []InternalEvidence) +``` + +- Expected postcondition + - for each `ev` in `Evidences`: submit `ev.Evidence` to `ev.Peer` + +--- + +### LightStore + +Lightblocks and LightStores are defined in the verification +specification [LCV-DATA-LIGHTBLOCK.1] and [LCV-DATA-LIGHTSTORE.1]. See +the [verification specification][verification] for details. + +## (Distributed) Problem statement + +> As the attack detector is there to reduce the impact of faulty +> nodes, and faulty nodes imply that there is a distributed system, +> there is no sequential specification to which this distributed +> problem statement may refer to. + +The detector gets as input a trusted lightblock called *root* and an +auxiliary lightstore called *primary_trace* with lightblocks that have +been verified before, and that were provided by the primary. + +#### **[LCD-DIST-INV-ATTACK.1]** + +If the detector returns evidence for height *h* +[[TMBC-LC-EVIDENCE-DATA.1]](#TMBC-LC-EVIDENCE-DATA1), then there is an +attack at height *h*. [[TMBC-LC-ATTACK.1]](#TMBC-LC-ATTACK1) + +#### **[LCD-DIST-INV-STORE.1]** + +If the detector does not return evidence, then *primary_trace* +contains only blocks from the blockchain. + +#### **[LCD-DIST-LIVE.1]** + +The detector eventually terminates. + +#### **[LCD-DIST-TERM-NORMAL.1]** + +If + +- the *primary_trace* contains only blocks from the blockchain, and +- there is no attack, and +- *Secondaries* is always non-empty, and +- the age of *root* is always less than the trusting period, + +then the detector does not return evidence. + +#### **[LCD-DIST-TERM-ATTACK.1]** + +If + +- there is an attack, and +- a secondary reports a block that conflicts + with one of the blocks in *primary_trace*, and +- *Secondaries* is always non-empty, and +- the age of *root* is always less than the trusting period, + +then the detector returns evidence. + +> Observe that above we require that "a secondary reports a block that +> conflicts". If there is an attack, but no secondary tries to launch +> it against the detector (or the message from the secondary is lost +> by the network), then there is nothing to detect for us. + +#### **[LCD-DIST-SAFE-SECONDARY.1]** + +No correct secondary is ever replaced. + +#### **[LCD-DIST-SAFE-BOGUS.1]** + +If + +- a secondary reports a bogus lightblock, +- the age of *root* is always less than the trusting period, + +then the secondary is replaced before the detector terminates. + +> The above property is quite operational ("reports"), but it captures +> quite closely the requirement. As the +> detector only makes sense in a distributed setting, and does +> not have a sequential specification, less "pure" +> specification are acceptable. + +# Protocol + +## Functions and Data defined in other Specifications + +### From the supervisor + +```go +Replace_Secondary(addr Address, root-of-trust LightBlock) +``` + +### From the verifier + +```go +func VerifyToTarget(primary PeerID, root LightBlock, + targetHeight Height) (LightStore, Result) +``` + +> Note: the above differs from the current version in the second +> parameter. verification will be revised. + +Observe that `VerifyToTarget` does communication with the secondaries +via the function [FetchLightBlock](fetch). + +### Shared data of the light client + +- a pool of full nodes *FullNodes* that have not been contacted before +- peer set called *Secondaries* +- primary + +> Note that the lightStore is not needed to be shared. + +## Outline + +The problem laid out is solved by calling the function `AttackDetector` +with a lightstore that contains a light block that has just been +verified by the verifier. + +Then `AttackDetector` downloads headers from the secondaries. In case +a conflicting header is downloaded from a secondary, +`CreateEvidenceForPeer` which computes evidence in the case that +indeed an attack is confirmed. It could be that the secondary reports +a bogus block, which means that there need not be an attack, and the +secondary is replaced. + +## Details of the functions + +#### **[LCD-FUNC-DETECTOR.1]:** + +```go +func AttackDetector(root LightBlock, primary_trace []LightBlock) + ([]InternalEvidence) { + + Evidences := new []InternalEvidence; + + for each secondary in Secondaries { + // we replay the primary trace with the secondary, in + // order to generate evidence that we can submit to the + // secodary. We return the evidence + the trace the + // secondary told us that spans the evidence at its local store + + EvidenceForSecondary, newroot, secondary_trace, result := + CreateEvidenceForPeer(secondary, + root, + primary_trace); + if result == FaultyPeer { + Replace_Secondary(root); + } + else if result == FoundEvidence { + // the conflict is not bogus + Evidences.Add(EvidenceForSecondary); + // we replay the secondary trace with the primary, ... + EvidenceForPrimary, _, result := + CreateEvidenceForPeer(primary, + newroot, + secondary_trace); + if result == FoundEvidence { + Evidences.Add(EvidenceForPrimary); + } + // At this point we do not care about the other error + // codes. We already have generated evidence for an + // attack and need to stop the lightclient. It does not + // help to call replace_primary. Also we will use the + // same primary to check with other secondaries in + // later iterations of the loop + } + // In the case where the secondary reports NoEvidence + // we do nothing + } + return Evidences; +} +``` + +- Expected precondition + - root and primary trace are a verification trace +- Expected postcondition + - solves the problem statement (if attack found, then evidence is reported) +- Error condition + - `ErrorTrustExpired`: fails if root expires (outside trusting + period) [[LCV-INV-TP.1]](LCV-INV-TP1-link) + - `ErrorNoPeers`: if no peers are left to replace secondaries, and + no evidence was found before that happened + +--- + +```go +func CreateEvidenceForPeer(peer PeerID, root LightBlock, trace LightStore) + (Evidence, LightBlock, LightStore, result) { + + common := root; + + for i in 1 .. len(trace) { + auxLS, result := VerifyToTarget(peer, common, trace[i].Header.Height) + + if result != ResultSuccess { + // something went wrong; peer did not provide a verifyable block + return (nil, nil, nil, FaultyPeer) + } + else { + if auxLS.LatestVerified().Header != trace[i].Header { + // the header reported by the peer differs from the + // reference header in trace but both could be + // verified from common in one step. + // we can create evidence for submission to the secondary + ev := new InternalEvidence; + ev.Evidence.ConflictingBlock := trace[i]; + ev.Evidence.CommonHeight := common.Height; + ev.Peer := peer + return (ev, common, auxLS, FoundEvidence) + } + else { + // the peer agrees with the trace, we move common forward + // we could delete auxLS as it will be overwritten in + // the next iteration + common := trace[i] + } + } + } + return (nil, nil, nil, NoEvidence) +} +``` + +- Expected precondition + - root and trace are a verification trace +- Expected postcondition + - finds evidence where trace and peer diverge +- Error condition + - `ErrorTrustExpired`: fails if root expires (outside trusting + period) [[LCV-INV-TP.1]](LCV-INV-TP1-link) + - If `VerifyToTarget` returns error but root is not expired then return + `FaultyPeer` + +--- + +## Correctness arguments + +#### Argument for [[LCD-DIST-INV-ATTACK.1]](#LCD-DIST-INV-ATTACK1) + +Under the assumption that root and trace are a verification trace, +when in `CreateEvidenceForPeer` the detector the detector creates +evidence, then the lightclient has seen two different headers (one via +`trace` and one via `VerifyToTarget` for the same height that can both +be verified in one step. + +#### Argument for [[LCD-DIST-INV-STORE.1]](#LCD-DIST-INV-STORE1) + +We assume that there is at least one correct peer, and there is no +fork. As a result the correct peer has the correct sequence of +blocks. Since the primary_trace is checked block-by-block also against +each secondary, and at no point evidence was generated that means at +no point there were conflicting blocks. + +#### Argument for [[LCD-DIST-LIVE.1]](#LCD-DIST-LIVE1) + +At the latest when [[LCV-INV-TP.1]](LCV-INV-TP1-link) is violated, +`AttackDetector` terminates. + +#### Argument for [[LCD-DIST-TERM-NORMAL.1]](#LCD-DIST-TERM-NORMAL1) + +As there are finitely many peers, eventually the main loop +terminates. As there is no attack no evidence can be generated. + +#### Argument for [[LCD-DIST-TERM-ATTACK.1]](#LCD-DIST-TERM-ATTACK1) + +Argument similar to [[LCD-DIST-TERM-NORMAL.1]](#LCD-DIST-TERM-NORMAL1) + +#### Argument for [[LCD-DIST-SAFE-SECONDARY.1]](#LCD-DIST-SAFE-SECONDARY1) + +Secondaries are only replaced if they time-out or if they report bogus +blocks. The former is ruled out by the timing assumption, the latter +by correct peers only reporting blocks from the chain. + +#### Argument for [[LCD-DIST-SAFE-BOGUS.1]](#LCD-DIST-SAFE-BOGUS1) + +Once a bogus block is recognized as such the secondary is removed. + +# References + +> links to other specifications/ADRs this document refers to + +[[verification]] The specification of the light client verification. + +[[supervisor]] The specification of the light client supervisor. + +[verification]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification.md + +[supervisor]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/supervisor/supervisor.md + +[block]: https://github.com/tendermint/spec/blob/d46cd7f573a2c6a2399fcab2cde981330aa63f37/spec/core/data_structures.md + +[TMBC-FM-2THIRDS-link]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification.md#tmbc-fm-2thirds1 + +[TMBC-SOUND-DISTR-POSS-COMMIT-link]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification.md#tmbc-sound-distr-poss-commit1 + +[LCV-SEQ-SAFE-link]:https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification.md#lcv-seq-safe1 + +[TMBC-VAL-CONTAINS-CORR-link]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification.md#tmbc-val-contains-corr1 + +[fetch]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification.md#lcv-func-fetch1 + +[LCV-INV-TP1-link]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification.md#lcv-inv-tp1 diff --git a/spec/light-client/detection/detection_003_reviewed.md b/spec/light-client/detection/detection_003_reviewed.md new file mode 100644 index 0000000000..13f6e9716a --- /dev/null +++ b/spec/light-client/detection/detection_003_reviewed.md @@ -0,0 +1,839 @@ +# Light Client Attack Detector + +In this specification, we strengthen the light client to be resistant +against so-called light client attacks. In a light client attack, all +the correct Tendermint full nodes agree on the sequence of generated +blocks (no fork), but a set of faulty full nodes attack a light client +by generating (signing) a block that deviates from the block of the +same height on the blockchain. In order to do so, some of these faulty +full nodes must have been validators before and violate the assumption +of more than two thirds of "correct voting power" +[[TMBC-FM-2THIRDS]][TMBC-FM-2THIRDS-link], as otherwise, if +[[TMBC-FM-2THIRDS]][TMBC-FM-2THIRDS-link] would hold, +[verification][verification] would satisfy +[[LCV-SEQ-SAFE.1]][LCV-SEQ-SAFE-link]. + +An attack detector (or detector for short) is a mechanism that is used +by the light client [supervisor][supervisor] after +[verification][verification] of a new light block +with the primary, to cross-check the newly learned light block with +other peers (secondaries). It expects as input a light block with some +height *root* (that serves as a root of trust), and a verification +trace (a sequence of lightblocks) that the primary provided. + +In case the detector observes a light client attack, it computes +evidence data that can be used by Tendermint full nodes to isolate a +set of faulty full nodes that are still within the unbonding period +(more than 1/3 of the voting power of the validator set at some block +of the chain), and report them via ABCI (application/blockchain +interface) +to the application of a +Tendermint blockchain in order to punish faulty nodes. + +## Context of this document + +The light client [verification][verification] specification is +designed for the Tendermint failure model (1/3 assumption) +[[TMBC-FM-2THIRDS]][TMBC-FM-2THIRDS-link]. It is safe under this +assumption, and live if it can reliably (that is, no message loss, no +duplication, and eventually delivered) and timely communicate with a +correct full node. If [[TMBC-FM-2THIRDS]][TMBC-FM-2THIRDS-link] +assumption is violated, the light client can be fooled to trust a +light block that was not generated by Tendermint consensus. + +This specification, the attack detector, is a "second line of +defense", in case the 1/3 assumption is violated. Its goal is to +detect a light client attack (conflicting light blocks) and collect +evidence. However, it is impractical to probe all full nodes. At this +time we consider a simple scheme of maintaining an address book of +known full nodes from which a small subset (e.g., 4) are chosen +initially to communicate with. More involved book keeping with +probabilistic guarantees can be considered at later stages of the +project. + +The light client maintains a simple address book containing addresses +of full nodes that it can pick as primary and secondaries. To obtain +a new light block, the light client first does +[verification][verification] with the primary, and then cross-checks +the light block (and the trace of light blocks that led to it) with +the secondaries using this specification. + +# Outline + +- [Part I](#part-i---Tendermint-Consensus-and-Light-Client-Attacks): + Formal definitions of lightclient attacks, based on basic + properties of Tendermint consensus. + - [Node-based characterization of + attacks](#Node-based-characterization-of-attacks). The + definition of attacks used in the problem statement of + this specification. + + - [Block-based characterization of attacks](#Block-based-characterization-of-attacks). Alternative definitions + provided for future reference. + +- [Part II](#part-ii---problem-statement): Problem statement of + lightclient attack detection + + - [Informal Problem Statement](#informal-problem-statement) + - [Assumptions](#Assumptions) + - [Definitions](#definitions) + - [Distributed Problem statement](#Distributed-Problem-statement) + +- [Part III](#part-iii---protocol): The protocol + + - [Functions and Data defined in other Specifications](#Functions-and-Data-defined-in-other-Specifications) + - [Outline of Solution](#Outline-of-solution) + - [Details of the functions](#Details-of-the-functions) + - [Correctness arguments](#Correctness-arguments) + +# Part I - Tendermint Consensus and Light Client Attacks + +In this section we will give some mathematical definitions of what we +mean by light client attacks (that are considered in this +specification) and how they differ from main-chain forks. To this end, +we start by defining some properties of the sequence of blocks that is +decided upon by Tendermint consensus in normal operation (if the +Tendermint failure model holds +[[TMBC-FM-2THIRDS]][TMBC-FM-2THIRDS-link]), +and then define different +deviations that correspond to attack scenarios. We consider the notion +of [light blocks][LCV-LB-link] and [headers][LVC-HD-link]. + +#### **[TMBC-GENESIS.1]** + +Let *Genesis* be the agreed-upon initial block (file). + +#### **[TMBC-FUNC-SIGN.1]** + +Let *b* and *c* be two light blocks with *b.Header.Height + 1 = +c.Header.Height*. We define the predicate **signs(b,c)** to hold +iff *c.Header.LastCommit* is in *PossibleCommit(b)*. +[[TMBC-SOUND-DISTR-POSS-COMMIT.1]][TMBC-SOUND-DISTR-POSS-COMMIT-link]. + +> The above encodes sequential verification, that is, intuitively, +> b.Header.NextValidators = c.Header.Validators and 2/3 of +> these Validators signed c. + +#### **[TMBC-FUNC-SUPPORT.1]** + +Let *b* and *c* be two light blocks. We define the predicate +**supports(b,c,t)** to hold iff + +- *t - trustingPeriod < b.Header.Time < t* +- the voting power in *b.NextValidators* of nodes in *c.Commit* + is more than 1/3 of *TotalVotingPower(b.Header.NextValidators)* + +> That is, if the [Tendermint failure model][TMBC-FM-2THIRDS-link] +> holds, then *c* has been signed by at least one correct full node, cf. +> [[TMBC-VAL-CONTAINS-CORR.1]][TMBC-VAL-CONTAINS-CORR-link]. +> The following formalizes that *b* was properly generated by +> Tendermint; *b* can be traced back to genesis. + +#### **[TMBC-SEQ-ROOTED.1]** + +Let *b* be a light block. +We define *sequ-rooted(b)* iff for all *i*, *1 <= i < h = b.Header.Height*, +there exist light blocks *a(i)* s.t. + +- *a(1) = Genesis* and +- *a(h) = b* and +- *signs( a(i) , a(i+1) )*. + +> The following formalizes that *c* is trusted based on *b* in +> skipping verification. Observe that we do not require here (yet) +> that *b* was properly generated. + +#### **[TMBC-SKIP-TRACE.1]** + +Let *b* and *c* be light blocks. We define *skip-trace(b,c,t)* if at +time t there exists an integer *h* and a sequence *a(1)*, ... *a(h)* s.t. + +- *a(1) = b* and +- *a(h) = c* and +- *supports( a(i), a(i+1), t)*, for all i, *1 <= i < h*. + +We call such a sequence *a(1)*, ... *a(h)* a **verification trace**. + +> The following formalizes that two light blocks of the same height +> should agree on the content of the header. Observe that *b* and *c* +> may disagree on the Commit. This is a special case if the canonical +> commit has not been decided on yet, that is, if b.Header.Height is the +> maximum height of all blocks decided upon by Tendermint at this +> moment. + +#### **[TMBC-SIGN-SKIP-MATCH.1]** + +Let *a*, *b*, *c*, be light blocks and *t* a time, we define +*sign-skip-match(a,b,c,t) = true* iff the following implication +evaluates to true: + +- *sequ-rooted(a)* and +- *b.Header.Height = c.Header.Height* and +- *skip-trace(a,b,t)* +- *skip-trace(a,c,t)* + +implies *b.Header = c.Header*. + +> Observe that *sign-skip-match* is defined via an implication. If it +> evaluates to false this means that the left-hand-side of the +> implication evaluates to true, and the right-hand-side evaluates to +> false. In particular, there are two **different** headers *b* and +> *c* that both can be verified from a common block *a* from the +> chain. Thus, the following describes an attack. + +#### **[TMBC-ATTACK.1]** + +If there exists three light blocks a, b, and c, with +*sign-skip-match(a,b,c,t) = false* then we have an *attack*. We say +we have **an attack at height** *b.Header.Height* and write +*attack(a,b,c,t)*. + +> The lightblock *a* need not be unique, that is, there may be +> several blocks that satisfy the above requirement for the same +> blocks *b* and *c*. + +[[TMBC-ATTACK.1]](#TMBC-ATTACK1) is a formalization of the violation +of the agreement property based on the result of consensus, that is, +the generated blocks. + +**Remark.** +Violation of agreement is only possible if more than 1/3 of the validators (or +next validators) of some previous block deviated from the protocol. The +upcoming "accountability" specification will describe how to compute +a set of at least 1/3 faulty nodes from two conflicting blocks. [] + +There are different ways to characterize forks +and attack scenarios. This specification uses the "node-based +characterization of attacks" which focuses on what kinds of nodes are +affected (light nodes vs. full nodes). For future reference and +discussion we also provide a +"block-based characterization of attacks" below. + +## Node-based characterization of attacks + +#### **[TMBC-MC-FORK.1]** + +We say there is a (main chain) fork at time *t* if + +- there are two correct full nodes *i* and *j* and +- *i* is different from *j* and +- *i* has decided on *b* and +- *j* has decided on *c* and +- there exist *a* such that *attack(a,b,c,t)*. + +#### **[TMBC-LC-ATTACK.1]** + +We say there is a light client attack at time *t*, if + +- there is **no** (main chain) fork [[TMBC-MC-FORK.1]](#TMBC-MC-FORK1), and +- there exist nodes that have computed light blocks *b* and *c* and +- there exist *a* such that *attack(a,b,c,t)*. + +We say the attack is at height *a.Header.Height*. + +> In this specification we consider detection of light client +> attacks. Intuitively, the case we consider is that +> light block *b* is the one from the +> blockchain, and some attacker has computed *c* and tries to wrongly +> convince +> the light client that *c* is the block from the chain. + +#### **[TMBC-LC-ATTACK-EVIDENCE.1]** + +We consider the following case of a light client attack +[[TMBC-LC-ATTACK.1]](#TMBC-LC-ATTACK1): + +- *attack(a,b,c,t)* +- there is a peer p1 that has a sequence *chain* of blocks from *a* to *b* +- *skip-trace(a,c,t)*: by [[TMBC-SKIP-TRACE.1]](#TMBC-SKIP-TRACE1) there is a + verification trace *v* of the form *a = v(1)*, ... *v(h) = c* + +Evidence for p1 (that proves an attack to p1) consists for index i +of v(i) and v(i+1) such that + +- E1(i). v(i) is equal to the block of *chain* at height v(i).Height, and +- E2(i). v(i+1) that is different from the block of *chain* at + height v(i+1).height + +> Observe p1 can +> +> - check that v(i+1) differs from its block at that height, and +> - verify v(i+1) in one step from v(i) as v is a verification trace. + +#### **[TMBC-LC-EVIDENCE-DATA.1]** + +To prove the attack to p1, because of Point E1, it is sufficient to +submit + +- v(i).Height (rather than v(i)). +- v(i+1) + +This information is *evidence for height v(i).Height*. + +## Block-based characterization of attacks + +In this section we provide a different characterization of attacks. It +is not defined on the nodes that are affected but purely on the +content of the blocks. In that sense these definitions are less +operational. + +> They might be relevant for a closer analysis of fork scenarios on the +> chain, which is out of the scope of this specification. + +#### **[TMBC-SIGN-UNIQUE.1]** + +Let *b* and *c* be light blocks, we define the predicate +*sign-unique(b,c)* to evaluate to true iff the following implication +evaluates to true: + +- *b.Header.Height = c.Header.Height* and +- *sequ-rooted(b)* and +- *sequ-rooted(c)* + +implies *b = c*. + +#### **[TMBC-BLOCKS-MCFORK.1]** + +If there exists two light blocks b and c, with *sign-unique(b,c) = +false* then we have a *fork*. + +> The difference of the above definition to +> [[TMBC-MC-FORK.1]](#TMBC-MC-FORK1) is subtle. The latter requires a +> full node being affected by a bad block while +> [[TMBC-BLOCKS-MCFORK.1]](#TMBC-BLOCKS-MCFORK1) just requires that a +> bad block exists, possibly in memory of an attacker. +> The following captures a light client fork. There is no fork up to +> the height of block b. However, c is of that height, is different, +> and passes skipping verification. It is a stricter property than +> [[TMBC-LC-ATTACK.1]](#TMBC-LC-ATTACK1), as +> [[TMBC-LC-ATTACK.1]](#TMBC-LC-ATTACK1) requires that no correct full +> node is affected. + +#### **[TMBC-BLOCKS-LCFORK.1]** + +Let *a*, *b*, *c*, be light blocks and *t* a time. We define +*light-client-fork(a,b,c,t)* iff + +- *sign-skip-match(a,b,c,t) = false* and +- *sequ-rooted(b)* and +- *b* is "unique", that is, for all *d*, *sequ-rooted(d)* and + *d.Header.Height = b.Header.Height* implies *d = b* + +> Finally, let us also define bogus blocks that have no support. +> Observe that bogus is even defined if there is a fork. +> Also, for the definition it would be sufficient to restrict *a* to +> *a.height < b.height* (which is implied by the definitions which +> unfold until *supports()*). + +#### **[TMBC-BOGUS.1]** + +Let *b* be a light block and *t* a time. We define *bogus(b,t)* iff + +- *sequ-rooted(b) = false* and +- for all *a*, *sequ-rooted(a)* implies *skip-trace(a,b,t) = false* + +# Part II - Problem Statement + +## Informal Problem statement + +There is no sequential specification: the detector only makes sense +in a distributed systems where some nodes misbehave. + +We work under the assumption that full nodes and validators are +responsible for detecting attacks on the main chain, and the evidence +reactor takes care of broadcasting evidence to communicate +misbehaving nodes via ABCI to the application, and halt the chain in +case of a fork. The point of this specification is to shield a light +clients against attacks that cannot be detected by full nodes, and +are fully addressed at light clients (and consequently IBC relayers, +which use the light client protocols to observe the state of a +blockchain). In order to provide full nodes the incentive to follow +the protocols when communicating with the light client, this +specification also considers the generation of evidence that will +also be processed by the Tendermint blockchain. + +#### **[LCD-IP-MODEL.1]** + +The detector is designed under the assumption that + +- [[TMBC-FM-2THIRDS]][TMBC-FM-2THIRDS-link] may be violated +- there is no fork on the main chain. + +> As a result some faulty full nodes may launch an attack on a light +> client. + +The following requirements are operational in that they describe how +things should be done, rather than what should be done. However, they +do not constitute temporal logic verification conditions. For those, +see [LCD-DIST-*] below. + +The detector is called in the [supervisor][supervisor] as follows + +```go +Evidences := AttackDetector(root_of_trust, verifiedLS);` +``` + +where + +- `root-of-trust` is a light block that is trusted (that is, +except upon initialization, the primary and the secondaries +agreed on in the past), and +- `verifiedLS` is a lightstore that contains a verification trace that + starts from a lightblock that can be verified with the + `root-of-trust` in one step and ends with a lightblock of the height + requested by the user +- `Evidences` is a list of evidences for misbehavior + +#### **[LCD-IP-STATEMENT.1]** + +Whenever AttackDetector is called, the detector should for each +secondary cross check the largest header in verifiedLS with the +corresponding header of the same height provided by the secondary. If +there is a deviation, the detector should +try to replay the verification trace `verifiedLS` with the +secondary + +- in case replaying leads to detection of a light client attack + (one of the lightblocks differ from the one in verifiedLS with + the same height), we should return evidence +- if the secondary cannot provide a verification trace, we have no + proof for an attack. Block *b* may be bogus. In this case the + secondary is faulty and it should be replaced. + +## Assumptions + +It is not in the interest of faulty full nodes to talk to the +detector as long as the detector is connected to at least one +correct full node. This would only increase the likelihood of +misbehavior being detected. Also we cannot punish them easily +(cheaply). The absence of a response need not be the fault of the full +node. + +Correct full nodes have the incentive to respond, because the +detector may help them to understand whether their header is a good +one. We can thus base liveness arguments of the detector on +the assumptions that correct full nodes reliably talk to the +detector. + +#### **[LCD-A-CorrFull.1]** + +At all times there is at least one correct full +node among the primary and the secondaries. + +> For this version of the detection we take this assumption. It +> allows us to establish the invariant that the lightblock +> `root-of-trust` is always the one from the blockchain, and we can +> use it as starting point for the evidence computation. Moreover, it +> allows us to establish the invariant at the supervisor that any +> lightblock in the (top-level) lightstore is from the blockchain. +> In the future we might design a lightclient based on the assumption +> that at least in regular intervals the lightclient is connected to a +> correct full node. This will require the detector to reconsider +> `root-of-trust`, and remove lightblocks from the top-level +> lightstore. + +#### **[LCD-A-RelComm.1]** + +Communication between the detector and a correct full node is +reliable and bounded in time. Reliable communication means that +messages are not lost, not duplicated, and eventually delivered. There +is a (known) end-to-end delay *Delta*, such that if a message is sent +at time *t* then it is received and processed by time *t + Delta*. +This implies that we need a timeout of at least *2 Delta* for remote +procedure calls to ensure that the response of a correct peer arrives +before the timeout expires. + +## Definitions + +### Evidence + +Following the definition of +[[TMBC-LC-ATTACK-EVIDENCE.1]](#TMBC-LC-ATTACK-EVIDENCE1), by evidence +we refer to a variable of the following type + +#### **[LC-DATA-EVIDENCE.1]** + +```go +type LightClientAttackEvidence struct { + ConflictingBlock LightBlock + CommonHeight int64 + + // Evidence also includes application specific data which is not + // part of verification but is sent to the application once the + // evidence gets committed on chain. +} +``` + +As the above data is computed for a specific peer, the following +data structure wraps the evidence and adds the peerID. + +#### **[LC-DATA-EVIDENCE-INT.1]** + +```go +type InternalEvidence struct { + Evidence LightClientAttackEvidence + Peer PeerID +} +``` + +#### **[LC-SUMBIT-EVIDENCE.1]** + +```go +func submitEvidence(Evidences []InternalEvidence) +``` + +- Expected postcondition + - for each `ev` in `Evidences`: submit `ev.Evidence` to `ev.Peer` + +--- + +### LightStore + +Lightblocks and LightStores are defined in the verification +specification [[LCV-DATA-LIGHTBLOCK.1]][LCV-LB-link] +and [[LCV-DATA-LIGHTSTORE.2]][LCV-LS-link]. See +the [verification specification][verification] for details. + +## Distributed Problem statement + +> As the attack detector is there to reduce the impact of faulty +> nodes, and faulty nodes imply that there is a distributed system, +> there is no sequential specification to which this distributed +> problem statement may refer to. + +The detector gets as input a trusted lightblock called *root* and an +auxiliary lightstore called *primary_trace* with lightblocks that have +been verified before, and that were provided by the primary. + +#### **[LCD-DIST-INV-ATTACK.1]** + +If the detector returns evidence for height *h* +[[TMBC-LC-EVIDENCE-DATA.1]](#TMBC-LC-EVIDENCE-DATA1), then there is an +attack at height *h*. [[TMBC-LC-ATTACK.1]](#TMBC-LC-ATTACK1) + +#### **[LCD-DIST-INV-STORE.1]** + +If the detector does not return evidence, then *primary_trace* +contains only blocks from the blockchain. + +#### **[LCD-DIST-LIVE.1]** + +The detector eventually terminates. + +#### **[LCD-DIST-TERM-NORMAL.1]** + +If + +- the *primary_trace* contains only blocks from the blockchain, and +- there is no attack, and +- *Secondaries* is always non-empty, and +- the age of *root* is always less than the trusting period, + +then the detector does not return evidence. + +#### **[LCD-DIST-TERM-ATTACK.1]** + +If + +- there is an attack, and +- a secondary reports a block that conflicts + with one of the blocks in *primary_trace*, and +- *Secondaries* is always non-empty, and +- the age of *root* is always less than the trusting period, + +then the detector returns evidence. + +> Observe that above we require that "a secondary reports a block that +> conflicts". If there is an attack, but no secondary tries to launch +> it against the detector (or the message from the secondary is lost +> by the network), then there is nothing to detect for us. + +#### **[LCD-DIST-SAFE-SECONDARY.1]** + +No correct secondary is ever replaced. + +#### **[LCD-DIST-SAFE-BOGUS.1]** + +If + +- a secondary reports a bogus lightblock, +- the age of *root* is always less than the trusting period, + +then the secondary is replaced before the detector terminates. + +> The above property is quite operational (e.g., the usage of +> "reports"), but it captures closely the requirement. As the +> detector only makes sense in a distributed setting, and does not +> have a sequential specification, a less "pure" specification are +> acceptable. + +# Part III - Protocol + +## Functions and Data defined in other Specifications + +### From the [supervisor][supervisor] + +[[LC-FUNC-REPLACE-SECONDARY.1]][repl] + +```go +Replace_Secondary(addr Address, root-of-trust LightBlock) +``` + +### From the [verifier][verification] + +[[LCV-FUNC-MAIN.2]][vtt] + +```go +func VerifyToTarget(primary PeerID, root LightBlock, + targetHeight Height) (LightStore, Result) +``` + +Observe that `VerifyToTarget` does communication with the secondaries +via the function [FetchLightBlock][fetch]. + +### Shared data of the light client + +- a pool of full nodes *FullNodes* that have not been contacted before +- peer set called *Secondaries* +- primary + +> Note that the lightStore is not needed to be shared. + +## Outline of solution + +The problem laid out is solved by calling the function `AttackDetector` +with a lightstore that contains a light block that has just been +verified by the verifier. + +Then `AttackDetector` downloads headers from the secondaries. In case +a conflicting header is downloaded from a secondary, it calls +`CreateEvidenceForPeer` which computes evidence in the case that +indeed an attack is confirmed. It could be that the secondary reports +a bogus block, which means that there need not be an attack, and the +secondary is replaced. + +## Details of the functions + +#### **[LCD-FUNC-DETECTOR.2]:** + +```go +func AttackDetector(root LightBlock, primary_trace []LightBlock) + ([]InternalEvidence) { + + Evidences := new []InternalEvidence; + + for each secondary in Secondaries { + lb, result := FetchLightBlock(secondary,primary_trace.Latest().Header.Height); + if result != ResultSuccess { + Replace_Secondary(root); + } + else if lb.Header != primary_trace.Latest().Header { + + // we replay the primary trace with the secondary, in + // order to generate evidence that we can submit to the + // secondary. We return the evidence + the trace the + // secondary told us that spans the evidence at its local store + + EvidenceForSecondary, newroot, secondary_trace, result := + CreateEvidenceForPeer(secondary, + root, + primary_trace); + if result == FaultyPeer { + Replace_Secondary(root); + } + else if result == FoundEvidence { + // the conflict is not bogus + Evidences.Add(EvidenceForSecondary); + // we replay the secondary trace with the primary, ... + EvidenceForPrimary, _, result := + CreateEvidenceForPeer(primary, + newroot, + secondary_trace); + if result == FoundEvidence { + Evidences.Add(EvidenceForPrimary); + } + // At this point we do not care about the other error + // codes. We already have generated evidence for an + // attack and need to stop the lightclient. It does not + // help to call replace_primary. Also we will use the + // same primary to check with other secondaries in + // later iterations of the loop + } + // In the case where the secondary reports NoEvidence + // after initially it reported a conflicting header. + // secondary is faulty + Replace_Secondary(root); + } + } + return Evidences; +} +``` + +- Expected precondition + - root and primary trace are a verification trace +- Expected postcondition + - solves the problem statement (if attack found, then evidence is reported) +- Error condition + - `ErrorTrustExpired`: fails if root expires (outside trusting + period) [[LCV-INV-TP.1]][LCV-INV-TP1-link] + - `ErrorNoPeers`: if no peers are left to replace secondaries, and + no evidence was found before that happened + +--- + +```go +func CreateEvidenceForPeer(peer PeerID, root LightBlock, trace LightStore) + (Evidence, LightBlock, LightStore, result) { + + common := root; + + for i in 1 .. len(trace) { + auxLS, result := VerifyToTarget(peer, common, trace[i].Header.Height) + + if result != ResultSuccess { + // something went wrong; peer did not provide a verifiable block + return (nil, nil, nil, FaultyPeer) + } + else { + if auxLS.LatestVerified().Header != trace[i].Header { + // the header reported by the peer differs from the + // reference header in trace but both could be + // verified from common in one step. + // we can create evidence for submission to the secondary + ev := new InternalEvidence; + ev.Evidence.ConflictingBlock := trace[i]; + // CommonHeight is used to indicate the type of attack + // if the CommonHeight != ConflictingBlock.Height this + // is by definition a lunatic attack else it is an + // equivocation attack + ev.Evidence.CommonHeight := common.Height; + ev.Peer := peer + return (ev, common, auxLS, FoundEvidence) + } + else { + // the peer agrees with the trace, we move common forward. + // we could delete auxLS as it will be overwritten in + // the next iteration + common := trace[i] + } + } + } + return (nil, nil, nil, NoEvidence) +} +``` + +- Expected precondition + - root and trace are a verification trace +- Expected postcondition + - finds evidence where trace and peer diverge +- Error condition + - `ErrorTrustExpired`: fails if root expires (outside trusting + period) [[LCV-INV-TP.1]][LCV-INV-TP1-link] + - If `VerifyToTarget` returns error but root is not expired then return + `FaultyPeer` + +--- + +## Correctness arguments + +#### On the existence of evidence + +**Proposition.** In the case of attack, +evidence [[TMBC-LC-ATTACK-EVIDENCE.1]](#TMBC-LC-ATTACK-EVIDENCE1) + exists. +*Proof.* First observe that + +- (A). (NOT E2(i)) implies E1(i+1) + +Now by contradiction assume there is no evidence. Thus + +- for all i, we have NOT E1(i) or NOT E2(i) +- for i = 1 we have E1(1) and thus NOT E2(1) + thus by induction on i, by (A) we have for all i that **E1(i)** +- from attack we have E2(h-1), and as there is no evidence for + i = h - 1 we get **NOT E1(h-1)**. Contradiction. +QED. + +#### Argument for [[LCD-DIST-INV-ATTACK.1]](#LCD-DIST-INV-ATTACK1) + +Under the assumption that root and trace are a verification trace, +when in `CreateEvidenceForPeer` the detector creates +evidence, then the lightclient has seen two different headers (one via +`trace` and one via `VerifyToTarget`) for the same height that can both +be verified in one step. + +#### Argument for [[LCD-DIST-INV-STORE.1]](#LCD-DIST-INV-STORE1) + +We assume that there is at least one correct peer, and there is no +fork. As a result, the correct peer has the correct sequence of +blocks. Since the primary_trace is checked block-by-block also against +each secondary, and at no point evidence was generated that means at +no point there were conflicting blocks. + +#### Argument for [[LCD-DIST-LIVE.1]](#LCD-DIST-LIVE1) + +At the latest when [[LCV-INV-TP.1]][LCV-INV-TP1-link] is violated, +`AttackDetector` terminates. + +#### Argument for [[LCD-DIST-TERM-NORMAL.1]](#LCD-DIST-TERM-NORMAL1) + +As there are finitely many peers, eventually the main loop +terminates. As there is no attack no evidence can be generated. + +#### Argument for [[LCD-DIST-TERM-ATTACK.1]](#LCD-DIST-TERM-ATTACK1) + +Argument similar to [[LCD-DIST-TERM-NORMAL.1]](#LCD-DIST-TERM-NORMAL1) + +#### Argument for [[LCD-DIST-SAFE-SECONDARY.1]](#LCD-DIST-SAFE-SECONDARY1) + +Secondaries are only replaced if they time-out or if they report bogus +blocks. The former is ruled out by the timing assumption, the latter +by correct peers only reporting blocks from the chain. + +#### Argument for [[LCD-DIST-SAFE-BOGUS.1]](#LCD-DIST-SAFE-BOGUS1) + +Once a bogus block is recognized as such the secondary is removed. + +# References + +> links to other specifications/ADRs this document refers to + +[[verification]] The specification of the light client verification. + +[[supervisor]] The specification of the light client supervisor. + +[verification]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md + +[supervisor]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/supervisor/supervisor_001_draft.md + +[block]: https://github.com/tendermint/spec/blob/d46cd7f573a2c6a2399fcab2cde981330aa63f37/spec/core/data_structures.md + +[TMBC-FM-2THIRDS-link]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#tmbc-fm-2thirds1 + +[TMBC-SOUND-DISTR-POSS-COMMIT-link]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#tmbc-sound-distr-poss-commit1 + +[LCV-SEQ-SAFE-link]:https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#lcv-seq-safe1 + +[TMBC-VAL-CONTAINS-CORR-link]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#tmbc-val-contains-corr1 + +[fetch]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#lcv-func-fetch1 + +[LCV-INV-TP1-link]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#lcv-inv-tp1 + +[LCV-LB-link]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#lcv-data-lightblock1 + +[LCV-LS-link]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#lcv-data-lightstore2 + +[LVC-HD-link]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#tmbc-header-fields2 + +[repl]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/supervisor/supervisor_001_draft.md#lc-func-replace-secondary1 + +[vtt]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#lcv-func-main2 diff --git a/spec/light-client/detection/discussions.md b/spec/light-client/detection/discussions.md new file mode 100644 index 0000000000..82702dd69d --- /dev/null +++ b/spec/light-client/detection/discussions.md @@ -0,0 +1,178 @@ +# Results of Discussions and Decisions + +- Generating a minimal proof of fork (as suggested in [Issue #5083](https://github.com/tendermint/tendermint/issues/5083)) is too costly at the light client + - we do not know all lightblocks from the primary + - therefore there are many scenarios. we might even need to ask + the primary again for additional lightblocks to isolate the + branch. + +> For instance, the light node starts with block at height 1 and the +> primary provides a block of height 10 that the light node can +> verify immediately. In cross-checking, a secondary now provides a +> conflicting header b10 of height 10 that needs another header b5 +> of height 5 to +> verify. Now, in order for the light node to convince the primary: +> +> - The light node cannot just sent b5, as it is not clear whether +> the fork happened before or after 5 +> - The light node cannot just send b10, as the primary would also +> need b5 for verification +> - In order to minimize the evidence, the light node may try to +> figure out where the branch happens, e.g., by asking the primary +> for height 5 (it might be that more queries are required, also +> to the secondary. However, assuming that in this scenario the +> primary is faulty it may not respond. + + As the main goal is to catch misbehavior of the primary, + evidence generation and punishment must not depend on their + cooperation. So the moment we have proof of fork (even if it + contains several light blocks) we should submit right away. + +- decision: "full" proof of fork consists of two traces that originate in the + same lightblock and lead to conflicting headers of the same height. + +- For submission of proof of fork, we may do some optimizations, for + instance, we might just submit a trace of lightblocks that verifies a block + different from the one the full node knows (we do not send the trace + the primary gave us back to the primary) + +- The light client attack is via the primary. Thus we try to + catch if the primary installs a bad light block + - We do not check secondary against secondary + - For each secondary, we check the primary against one secondary + +- Observe that just two blocks for the same height are not +sufficient proof of fork. +One of the blocks may be bogus [TMBC-BOGUS.1] which does +not constitute slashable behavior. +Which leads to the question whether the light node should try to do +fork detection on its initial block (from subjective +initialization). This could be done by doing backwards verification +(with the hashes) until a bifurcation block is found. +While there are scenarios where a +fork could be found, there is also the scenario where a faulty full +node feeds the light node with bogus light blocks and forces the light +node to check hashes until a bogus chain is out of the trusting period. +As a result, the light client +should not try to detect a fork for its initial header. **The initial +header must be trusted as is.** + +# Light Client Sequential Supervisor + +**TODO:** decide where (into which specification) to put the +following: + +We describe the context on which the fork detector is called by giving +a sequential version of the supervisor function. +Roughly, it alternates two phases namely: + +- Light Client Verification. As a result, a header of the required + height has been downloaded from and verified with the primary. +- Light Client Fork Detections. As a result the header has been + cross-checked with the secondaries. In case there is a fork we + submit "proof of fork" and exit. + +#### **[LC-FUNC-SUPERVISOR.1]:** + +```go +func Sequential-Supervisor () (Error) { + loop { + // get the next height + nextHeight := input(); + + // Verify + result := NoResult; + while result != ResultSuccess { + lightStore,result := VerifyToTarget(primary, lightStore, nextHeight); + if result == ResultFailure { + // pick new primary (promote a secondary to primary) + /// and delete all lightblocks above + // LastTrusted (they have not been cross-checked) + Replace_Primary(); + } + } + + // Cross-check + PoFs := Forkdetector(lightStore, PoFs); + if PoFs.Empty { + // no fork detected with secondaries, we trust the new + // lightblock + LightStore.Update(testedLB, StateTrusted); + } + else { + // there is a fork, we submit the proofs and exit + for i, p range PoFs { + SubmitProofOfFork(p); + } + return(ErrorFork); + } + } +} +``` + +**TODO:** finish conditions + +- Implementation remark +- Expected precondition + - *lightStore* initialized with trusted header + - *PoFs* empty +- Expected postcondition + - runs forever, or + - is terminated by user and satisfies LightStore invariant, or **TODO** + - has submitted proof of fork upon detecting a fork +- Error condition + - none + +---- + +# Semantics of the LightStore + +Currently, a lightblock in the lightstore can be in one of the +following states: + +- StateUnverified +- StateVerified +- StateFailed +- StateTrusted + +The intuition is that `StateVerified` captures that the lightblock has +been verified with the primary, and `StateTrusted` is the state after +successful cross-checking with the secondaries. + +Assuming there is **always one correct node among primary and +secondaries**, and there is no fork on the blockchain, lightblocks that +are in `StateTrusted` can be used by the user with the guarantee of +"finality". If a block in `StateVerified` is used, it might be that +detection later finds a fork, and a roll-back might be needed. + +**Remark:** The assumption of one correct node, does not render +verification useless. It is true that if the primary and the +secondaries return the same block we may trust it. However, if there +is a node that provides a different block, the light node still needs +verification to understand whether there is a fork, or whether the +different block is just bogus (without any support of some previous +validator set). + +**Remark:** A light node may choose the full nodes it communicates +with (the light node and the full node might even belong to the same +stakeholder) so the assumption might be justified in some cases. + +In the future, we will do the following changes + +- we assume that only from time to time, the light node is + connected to a correct full node +- this means for some limited time, the light node might have no + means to defend against light client attacks +- as a result we do not have finality +- once the light node reconnects with a correct full node, it + should detect the light client attack and submit evidence. + +Under these assumptions, `StateTrusted` loses its meaning. As a +result, it should be removed from the API. We suggest that we replace +it with a flag "trusted" that can be used + +- internally for efficiency reasons (to maintain + [LCD-INV-TRUSTED-AGREED.1] until a fork is detected) +- by light client based on the "one correct full node" assumption + +---- diff --git a/spec/light-client/detection/draft-functions.md b/spec/light-client/detection/draft-functions.md new file mode 100644 index 0000000000..c56594a533 --- /dev/null +++ b/spec/light-client/detection/draft-functions.md @@ -0,0 +1,289 @@ +# Draft of Functions for Fork detection and Proof of Fork Submisstion + +This document collects drafts of function for generating and +submitting proof of fork in the IBC context + +- [IBC](#on---chain-ibc-component) + +- [Relayer](#relayer) + +## On-chain IBC Component + +> The following is a suggestions to change the function defined in ICS 007 + +#### [TAG-IBC-MISBEHAVIOR.1] + +```go +func checkMisbehaviourAndUpdateState(cs: ClientState, PoF: LightNodeProofOfFork) +``` + +**TODO:** finish conditions + +- Implementation remark +- Expected precondition + - PoF.TrustedBlock.Header is equal to lightBlock on store with + same height + - both traces end with header of same height + - headers are different + - both traces are supported by PoF.TrustedBlock (`supports` + defined in [TMBC-FUNC]), that is, for `t = currentTimestamp()` (see + ICS 024) + - supports(PoF.TrustedBlock, PoF.PrimaryTrace[1], t) + - supports(PoF.PrimaryTrace[i], PoF.PrimaryTrace[i+1], t) for + *0 < i < length(PoF.PrimaryTrace)* + - supports(PoF.TrustedBlock, PoF.SecondaryTrace[1], t) + - supports(PoF.SecondaryTrace[i], PoF.SecondaryTrace[i+1], t) for + *0 < i < length(PoF.SecondaryTrace)* +- Expected postcondition + - set cs.FrozenHeight to min(cs.FrozenHeight, PoF.TrustedBlock.Header.Height) +- Error condition + - none + +---- + +> The following is a suggestions to add functionality to ICS 002 and 007. +> I suppose the above is the most efficient way to get the required +> information. Another option is to subscribe to "header install" +> events via CosmosSDK + +#### [TAG-IBC-HEIGHTS.1] + +```go +func QueryHeightsRange(id, from, to) ([]Height) +``` + +- Expected postcondition + - returns all heights *h*, with *from <= h <= to* for which the + IBC component has a consensus state. + +---- + +> This function can be used if the relayer has no information about +> the IBC component. This allows late-joining relayers to also +> participate in fork dection and the generation in proof of +> fork. Alternatively, we may also postulate that relayers are not +> responsible to detect forks for heights before they started (and +> subscribed to the transactions reporting fresh headers being +> installed at the IBC component). + +## Relayer + +### Auxiliary Functions to be implemented in the Light Client + +#### [LCV-LS-FUNC-GET-PREV.1] + +```go +func (ls LightStore) GetPreviousVerified(height Height) (LightBlock, bool) +``` + +- Expected postcondition + - returns a verified LightBlock, whose height is maximal among all + verified lightblocks with height smaller than `height` + +---- + +### Relayer Submitting Proof of Fork to the IBC Component + +There are two ways the relayer can detect a fork + +- by the fork detector of one of its lightclients +- be checking the consensus state of the IBC component + +The following function ignores how the proof of fork was generated. +It takes a proof of fork as input and computes a proof of fork that + will be accepted by the IBC component. +The problem addressed here is that both, the relayer's light client + and the IBC component have incomplete light stores, that might + not have all light blocks in common. +Hence the relayer has to figure out what the IBC component knows + (intuitively, a meeting point between the two lightstores + computed in `commonRoot`) and compute a proof of fork + (`extendPoF`) that the IBC component will accept based on its + knowledge. + +The auxiliary functions `commonRoot` and `extendPoF` are +defined below. + +#### [TAG-SUBMIT-POF-IBC.1] + +```go +func SubmitIBCProofOfFork( + lightStore LightStore, + PoF: LightNodeProofOfFork, + ibc IBCComponent) (Error) { + if ibc.queryChainConsensusState(PoF.TrustedBlock.Height) = PoF.TrustedBlock { + // IBC component has root of PoF on store, we can just submit + ibc.submitMisbehaviourToClient(ibc.id,PoF) + return Success + // note sure about the id parameter + } + else { + // the ibc component does not have the TrustedBlock and might + // even be on yet a different branch. We have to compute a PoF + // that the ibc component can verifiy based on its current + // knowledge + + ibcLightBlock, lblock, _, result := commonRoot(lightStore, ibc, PoF.TrustedBlock) + + if result = Success { + newPoF = extendPoF(ibcLightBlock, lblock, lightStore, PoF) + ibc.submitMisbehaviourToClient(ibc.id, newPoF) + return Success + } + else{ + return CouldNotGeneratePoF + } + } +} +``` + +**TODO:** finish conditions + +- Implementation remark +- Expected precondition +- Expected postcondition +- Error condition + - none + +---- + +### Auxiliary Functions at the Relayer + +> If the relayer detects a fork, it has to compute a proof of fork that +> will convince the IBC component. That is it has to compare the +> relayer's local lightstore against the lightstore of the IBC +> component, and find common ancestor lightblocks. + +#### [TAG-COMMON-ROOT.1] + +```go +func commonRoot(lightStore LightStore, ibc IBCComponent, lblock +LightBlock) (LightBlock, LightBlock, LightStore, Result) { + + auxLS.Init + + // first we ask for the heights the ibc component is aware of + ibcHeights = ibc.QueryHeightsRange( + ibc.id, + lightStore.LowestVerified().Height, + lblock.Height - 1); + // this function does not exist yet. Alternatively, we may + // request all transactions that installed headers via CosmosSDK + + + for { + h, result = max(ibcHeights) + if result = Empty { + return (_, _, _, NoRoot) + } + ibcLightBlock = ibc.queryChainConsensusState(h) + auxLS.Update(ibcLightBlock, StateVerified); + connector, result := Connector(lightStore, ibcLightBlock, lblock.Header.Height) + if result = success { + return (ibcLightBlock, connector, auxLS, Success) + } + else{ + ibcHeights.remove(h) + } + } +} +``` + +- Expected postcondition + - returns + - a lightBlock b1 from the IBC component, and + - a lightBlock b2 + from the local lightStore with height less than + lblock.Header.Hight, s.t. b1 supports b2, and + - a lightstore with the blocks downloaded from + the ibc component + +---- + +#### [TAG-LS-FUNC-CONNECT.1] + +```go +func Connector (lightStore LightStore, lb LightBlock, h Height) (LightBlock, bool) +``` + +- Expected postcondition + - returns a verified LightBlock from lightStore with height less + than *h* that can be + verified by lb in one step. + +**TODO:** for the above to work we need an invariant that all verified +lightblocks form a chain of trust. Otherwise, we need a lightblock +that has a chain of trust to height. + +> Once the common root is found, a proof of fork that will be accepted +> by the IBC component needs to be generated. This is done in the +> following function. + +#### [TAG-EXTEND-POF.1] + +```go +func extendPoF (root LightBlock, + connector LightBlock, + lightStore LightStore, + Pof LightNodeProofofFork) (LightNodeProofofFork} +``` + +- Implementation remark + - PoF is not sufficient to convince an IBC component, so we extend + the proof of fork farther in the past +- Expected postcondition + - returns a newPOF: + - newPoF.TrustedBlock = root + - let prefix = + connector + + lightStore.Subtrace(connector.Header.Height, PoF.TrustedBlock.Header.Height-1) + + PoF.TrustedBlock + - newPoF.PrimaryTrace = prefix + PoF.PrimaryTrace + - newPoF.SecondaryTrace = prefix + PoF.SecondaryTrace + +### Detection a fork at the IBC component + +The following functions is assumed to be called regularly to check +that latest consensus state of the IBC component. Alternatively, this +logic can be executed whenever the relayer is informed (via an event) +that a new header has been installed. + +#### [TAG-HANDLER-DETECT-FORK.1] + +```go +func DetectIBCFork(ibc IBCComponent, lightStore LightStore) (LightNodeProofOfFork, Error) { + cs = ibc.queryClientState(ibc); + lb, found := lightStore.Get(cs.Header.Height) + if !found { + **TODO:** need verify to target + lb, result = LightClient.Main(primary, lightStore, cs.Header.Height) + // [LCV-FUNC-IBCMAIN.1] + **TODO** decide what to do following the outcome of Issue #499 + + // I guess here we have to get into the light client + + } + if cs != lb { + // IBC component disagrees with my primary. + // I fetch the + ibcLightBlock, lblock, ibcStore, result := commonRoot(lightStore, ibc, lb) + pof = new LightNodeProofOfFork; + pof.TrustedBlock := ibcLightBlock + pof.PrimaryTrace := ibcStore + cs + pof.SecondaryTrace := lightStore.Subtrace(lblock.Header.Height, + lb.Header.Height); + return(pof, Fork) + } + return(nil , NoFork) +} +``` + +**TODO:** finish conditions + +- Implementation remark + - we ask the handler for the lastest check. Cross-check with the + chain. In case they deviate we generate PoF. + - we assume IBC component is correct. It has verified the + consensus state +- Expected precondition +- Expected postcondition diff --git a/spec/light-client/detection/req-ibc-detection.md b/spec/light-client/detection/req-ibc-detection.md new file mode 100644 index 0000000000..439ca26b64 --- /dev/null +++ b/spec/light-client/detection/req-ibc-detection.md @@ -0,0 +1,345 @@ +# Requirements for Fork Detection in the IBC Context + +## What you need to know about IBC + +In the following, I distilled what I considered relevant from + + + +### Components and their interface + +#### Tendermint Blockchains + +> I assume you know what that is. + +#### An IBC/Tendermint correspondence + +| IBC Term | Tendermint-RS Spec Term | Comment | +|----------|-------------------------| --------| +| `CommitmentRoot` | AppState | app hash | +| `ConsensusState` | Lightblock | not all fields are there. NextValidator is definitly needed | +| `ClientState` | latest light block + configuration parameters (e.g., trusting period + `frozenHeight` | NextValidators missing; what is `proofSpecs`?| +| `frozenHeight` | height of fork | set when a fork is detected | +| "would-have-been-fooled" | light node fork detection | light node may submit proof of fork to IBC component to halt it | +| `Height` | (no epochs) | (epoch,height) pair in lexicographical order (`compare`) | +| `Header` | ~signed header | validatorSet explicit (no hash); nextValidators missing | +| `Evidence` | t.b.d. | definition unclear "which the light client would have considered valid". Data structure will need to change | +| `verify` | `ValidAndVerified` | signature does not match perfectly (ClientState vs. LightBlock) + in `checkMisbehaviourAndUpdateState` it is unclear whether it uses traces or goes to h1 and h2 in one step | + +#### Some IBC links + +- [QueryConsensusState](https://github.com/cosmos/cosmos-sdk/blob/2651427ab4c6ea9f81d26afa0211757fc76cf747/x/ibc/02-client/client/utils/utils.go#L68) + +#### Required Changes in ICS 007 + +- `assert(height > 0)` in definition of `initialise` doesn't match + definition of `Height` as *(epoch,height)* pair. + +- `initialise` needs to be updated to new data structures + +- `clientState.frozenHeight` semantics seem not totally consistent in + document. E.g., `min` needs to be defined over optional value in + `checkMisbehaviourAndUpdateState`. Also, if you are frozen, why do + you accept more evidence. + +- `checkValidityAndUpdateState` + - `verify`: it needs to be clarified that checkValidityAndUpdateState + does not perform "bisection" (as currently hinted in the text) but + performs a single step of "skipping verification", called, + `ValidAndVerified` + - `assert (header.height > clientState.latestHeight)`: no old + headers can be installed. This might be OK, but we need to check + interplay with misbehavior + - clienstState needs to be updated according to complete data + structure + +- `checkMisbehaviourAndUpdateState`: as evidence will contain a trace + (or two), the assertion that uses verify will need to change. + +- ICS 002 states w.r.t. `queryChainConsensusState` that "Note that + retrieval of past consensus states by height (as opposed to just the + current consensus state) is convenient but not required." For + Tendermint fork detection, this seems to be a necessity. + +- `Header` should become a lightblock + +- `Evidence` should become `LightNodeProofOfFork` [LCV-DATA-POF.1] + +- `upgradeClientState` what is the semantics (in particular what is + `height` doing?). + +- `checkMisbehaviourAndUpdateState(cs: ClientState, PoF: + LightNodeProofOfFork)` needs to be adapted + +#### Handler + +A blockchain runs a **handler** that passively collects information about + other blockchains. It can be thought of a state machine that takes + input events. + +- the state includes a lightstore (I guess called `ConsensusState` + in IBC) + +- The following function is used to pass a header to a handler + +```go +type checkValidityAndUpdateState = (Header) => Void +``` + + For Tendermint, it will perform + `ValidandVerified`, that is, it does the trusting period check and the + +1/3 check (+2/3 for sequential headers). + If it verifies a header, it adds it to its lightstore, + if it does not pass verification it drops it. + Right now it only accepts a header more recent then the latest + header, + and drops older + ones or ones that could not be verified. + +> The above paragraph captures what I believe what is the current + logic of `checkValidityAndUpdateState`. It may be subject to + change. E.g., maintain a lightstore with state (unverified, verified) + +- The following function is used to pass "evidence" (this we + will need to make precise eventually) to a handler + +```go +type checkMisbehaviourAndUpdateState = (bytes) => Void +``` + + We have to design this, and the data that the handler can use to + check that there was some misbehavior (fork) in order react on + it, e.g., flagging a situation and + stop the protocol. + +- The following function is used to query the light store (`ConsensusState`) + +```go +type queryChainConsensusState = (height: uint64) => ConsensusState +``` + +#### Relayer + +- The active components are called **relayer**. + +- a relayer contains light clients to two (or more?) blockchains + +- the relayer send headers and data to the handler to invoke + `checkValidityAndUpdateState` and + `checkMisbehaviourAndUpdateState`. It may also query + `queryChainConsensusState`. + +- multiple relayers may talk to one handler. Some relayers might be + faulty. We assume existence of at least single correct relayer. + +## Informal Problem Statement: Fork detection in IBC + +### Relayer requirement: Evidence for Handler + +- The relayer should provide the handler with + "evidence" that there was a fork. + +- The relayer can read the handler's consensus state. Thus the relayer can + feed the handler precisely the information the handler needs to detect a + fork. + What is this + information needs to be specified. + +- The information depends on the verification the handler does. It + might be necessary to provide a bisection proof (list of + lightblocks) so that the handler can verify based on its local + lightstore a header *h* that is conflicting with a header *h'* in the + local lightstore, that is, *h != h'* and *h.Height = h'.Height* + +### Relayer requirement: Fork detection + +Let's assume there is a fork at chain A. There are two ways the +relayer can figure that out: + +1. as the relayer contains a light client for A, it also includes a fork + detector that can detect a fork. + +2. the relayer may also detect a fork by observing that the + handler for chain A (on chain B) + is on a different branch than the relayer + +- in both detection scenarios, the relayer should submit evidence to + full nodes of chain A where there is a fork. As we assume a fullnode + has a complete list of blocks, it is sufficient to send "Bucky's + evidence" (), + that is, + - two lightblocks from different branches + + - a lightblock (perhaps just a height) from which both blocks + can be verified. + +- in the scenario 2., the relayer must feed the A-handler (on chain B) + a proof of a fork on A so that chain B can react accordingly + +### Handler requirement + +- there are potentially many relayers, some correct some faulty + +- a handler cannot trust the information provided by the relayer, + but must verify + (Доверя́й, но проверя́й) + +- in case of a fork, we accept that the handler temporarily stores + headers (tagged as verified). + +- eventually, a handler should be informed + (`checkMisbehaviourAndUpdateState`) + by some relayer that it has + verified a header from a fork. Then the handler should do what is + required by IBC in this case (stop?) + +### Challenges in the handler requirement + +- handlers and relayers work on different lightstores. In principle + the lightstore need not intersect in any heights a priori + +- if a relayer sees a header *h* it doesn't know at a handler (`queryChainConsensusState`), the + relayer needs to + verify that header. If it cannot do it locally based on downloaded + and verified (trusted?) light blocks, it might need to use + `VerifyToTarget` (bisection). To call `VerifyToTarget` we might keep + *h* in the lightstore. If verification fails, we need to download the + "alternative" header of height *h.Height* to generate evidence for + the handler. + +- we have to specify what precisely `queryChainConsensusState` + returns. It cannot be the complete lightstore. Is the last header enough? + +- we would like to assume that every now and then (smaller than the + trusting period) a correct relayer checks whether the handler is on a + different branch than the relayer. + And we would like that this is enough to achieve + the Handler requirement. + + - here the correctness argument would be easy if a correct relayer is + based on a light client with a *trusted* state, that is, a light + client who never changes its opinion about trusted. Then if such a + correct relayer checks-in with a handler, it will detect a fork, and + act in time. + + - if the light client does not provide this interface, in the case of + a fork, we need some assumption about a correct relayer being on a + different branch than the handler, and we need such a relayer to + check-in not too late. Also + what happens if the relayer's light client is forced to roll-back + its lightstore? + Does it have to re-check all handlers? + +## On the interconnectedness of things + +In the broader discussion of so-called "fork accountability" there are +several subproblems + +- Fork detection + +- Evidence creation and submission + +- Isolating misbehaving nodes (and report them for punishment over abci) + +### Fork detection + +The preliminary specification ./detection.md formalizes the notion of +a fork. Roughly, a fork exists if there are two conflicting headers +for the same height, where both are supported by bonded full nodes +(that have been validators in the near past, that is, within the +trusting period). We distinguish between *fork on the chain* where two +conflicting blocks are signed by +2/3 of the validators of that +height, and a *light client fork* where one of the conflicting headers +is not signed by +2/3 of the current height, but by +1/3 of the +validators of some smaller height. + +In principle everyone can detect a fork + +- ./detection talks about the Tendermint light client with a focus on + light nodes. A relayer runs such light clients and may detect + forks in this way + +- in IBC, a relayer can see that a handler is on a conflicting branch + - the relayer should feed the handler the necessary information so + that it can halt + - the relayer should report the fork to a full node + +### Evidence creation and submission + +- the information sent from the relayer to the handler could be called + evidence, but this is perhaps a bad idea because the information sent to a + full node can also be called evidence. But this evidence might still + not be enough as the full node might need to run the "fork + accountability" protocol to generate evidence in the form of + consensus messages. So perhaps we should + introduce different terms for: + + - proof of fork for the handler (basically consisting of lightblocks) + - proof of fork for a full node (basically consisting of (fewer) lightblocks) + - proof of misbehavior (consensus messages) + +### Isolating misbehaving nodes + +- this is the job of a full node. + +- might be subjective in the future: the protocol depends on what the + full node believes is the "correct" chain. Right now we postulate + that every full node is on the correct chain, that is, there is no + fork on the chain. + +- The full node figures out which nodes are + - lunatic + - double signing + - amnesic; **using the challenge response protocol** + +- We do not punish "phantom" validators + - currently we understand a phantom validator as a node that + - signs a block for a height in which it is not in the + validator set + - the node is not part of the +1/3 of previous validators that + are used to support the header. Whether we call a validator + phantom might be subjective and depend on the header we + check against. Their formalization actually seems not so + clear. + - they can only do something if there are +1/3 faulty validators + that are either lunatic, double signing, or amnesic. + - abci requires that we only report bonded validators. So if a + node is a "phantom", we would need the check whether the node is + bonded, which currently is expensive, as it requires checking + blocks from the last three weeks. + - in the future, with state sync, a correct node might be + convinced by faulty nodes that it is in the validator set. Then + it might appear to be "phantom" although it behaves correctly + +## Next steps + +> The following points are subject to my limited knowledge of the +> state of the work on IBC. Some/most of it might already exist and we +> will just need to bring everything together. + +- "proof of fork for a full node" defines a clean interface between + fork detection and misbehavior isolation. So it should be produced + by protocols (light client, the relayer). So we should fix that + first. + +- Given the problems of not having a light client architecture spec, + for the relayer we should start with this. E.g. + + - the relayer runs light clients for two chains + - the relayer regularly queries consensus state of a handler + - the relayer needs to check the consensus state + - this involves local checks + - this involves calling the light client + - the relayer uses the light client to do IBC business (channels, + packets, connections, etc.) + - the relayer submits proof of fork to handlers and full nodes + +> the list is definitely not complete. I think part of this +> (perhaps all) is +> covered by what Anca presented recently. + +We will need to define what we expect from these components + +- for the parts where the relayer talks to the handler, we need to fix + the interface, and what the handler does + +- we write specs for these components. diff --git a/spec/light-client/experiments.png b/spec/light-client/experiments.png new file mode 100644 index 0000000000..94166ffa31 Binary files /dev/null and b/spec/light-client/experiments.png differ diff --git a/spec/light-client/supervisor/supervisor_001_draft.md b/spec/light-client/supervisor/supervisor_001_draft.md new file mode 100644 index 0000000000..eb0dd190b9 --- /dev/null +++ b/spec/light-client/supervisor/supervisor_001_draft.md @@ -0,0 +1,637 @@ +# Draft of Light Client Supervisor for discussion + +## TODOs + +This specification in done in parallel with updates on the +verification specification. So some hyperlinks have to be placed to +the correct files eventually. + +# Light Client Sequential Supervisor + +The light client implements a read operation of a +[header](TMBC-HEADER-link) from the [blockchain](TMBC-SEQ-link), by +communicating with full nodes, a so-called primary and several +so-called witnesses. As some full nodes may be faulty, this +functionality must be implemented in a fault-tolerant way. + +In the Tendermint blockchain, the validator set may change with every +new block. The staking and unbonding mechanism induces a [security +model](TMBC-FM-2THIRDS-link): starting at time *Time* of the +[header](TMBC-HEADER-link), +more than two-thirds of the next validators of a new block are correct +for the duration of *TrustedPeriod*. + +[Light Client Verification](https://informal.systems) implements the fault-tolerant read +operation designed for this security model. That is, it is safe if the +model assumptions are satisfied and makes progress if it communicates +to a correct primary. + +However, if the [security model](TMBC-FM-2THIRDS-link) is violated, +faulty peers (that have been validators at some point in the past) may +launch attacks on the Tendermint network, and on the light +client. These attacks as well as an axiomatization of blocks in +general are defined in [a document that contains the definitions that +are currently in detection.md](https://informal.systems). + +If there is a light client attack (but no +successful attack on the network), the safety of the verification step +may be violated (as we operate outside its basic assumption). +The light client also +contains a defense mechanism against light clients attacks, called detection. + +[Light Client Detection](https://informal.systems) implements a cross check of the result +of the verification step. If there is a light client attack, and the +light client is connected to a correct peer, the light client as a +whole is safe, that is, it will not operate on invalid +blocks. However, in this case it cannot successfully read, as +inconsistent blocks are in the system. However, in this case the +detection performs a distributed computation that results in so-called +evidence. Evidence can be used to prove +to a correct full node that there has been a +light client attack. + +[Light Client Evidence Accountability](https://informal.systems) is a protocol run on a +full node to check whether submitted evidence indeed proves the +existence of a light client attack. Further, from the evidence and its +own knowledge about the blockchain, the full node computes a set of +bonded full nodes (that at some point had more than one third of the +voting power) that participated in the attack that will be reported +via ABCI to the application. + +In this document we specify + +- Initialization of the Light Client +- The interaction of [verification](https://informal.systems) and [detection](https://informal.systems) + +The details of these two protocols are captured in their own +documents, as is the [accountability](https://informal.systems) protocol. + +> Another related line is IBC attack detection and submission at the +> relayer, as well as attack verification at the IBC handler. This +> will call for yet another spec. + +# Status + +This document is work in progress. In order to develop the +specification step-by-step, +it assumes certain details of [verification](https://informal.systems) and +[detection](https://informal.systems) that are not specified in the respective current +versions yet. This inconsistencies will be addresses over several +upcoming PRs. + +# Part I - Tendermint Blockchain + +See [verification spec](addLinksWhenDone) + +# Part II - Sequential Problem Definition + +#### **[LC-SEQ-INIT-LIVE.1]** + +Upon initialization, the light client gets as input a header of the +blockchain, or the genesis file of the blockchain, and eventually +stores a header of the blockchain. + +#### **[LC-SEQ-LIVE.1]** + +The light client gets a sequence of heights as inputs. For each input +height *targetHeight*, it eventually stores the header of height +*targetHeight*. + +#### **[LC-SEQ-SAFE.1]** + +The light client never stores a header which is not in the blockchain. + +# Part III - Light Client as Distributed System + +## Computational Model + +The light client communicates with remote processes only via the +[verification](TODO) and the [detection](TODO) protocols. The +respective assumptions are given there. + +## Distributed Problem Statement + +### Two Kinds of Liveness + +In case of light client attacks, the sequential problem statement +cannot always be satisfied. The lightclient cannot decide which block +is from the chain and which is not. As a result, the light client just +creates evidence, submits it, and terminates. +For the liveness property, we thus add the +possibility that instead of adding a lightblock, we also might terminate +in case there is an attack. + +#### **[LC-DIST-TERM.1]** + +The light client either runs forever or it *terminates on attack*. + +### Design choices + +#### [LC-DIST-STORE.1] + +The light client has a local data structure called LightStore +that contains light blocks (that contain a header). + +> The light store exposes functions to query and update it. They are +> specified [here](TODO:onceVerificationIsMerged). + +**TODO:** reference light store invariant [LCV-INV-LS-ROOT.2] once +verification is merged + +#### **[LC-DIST-SAFE.1]** + +It is always the case that every header in *LightStore* was +generated by an instance of Tendermint consensus. + +#### **[LC-DIST-LIVE.1]** + +Whenever the light client gets a new height *h* as input, + +- and there is +no light client attack up to height *h*, then the lightclient +eventually puts the lightblock of height *h* in the lightstore and +wait for another input. +- otherwise, that is, if there +is a light client attack on height *h*, then the light client +must perform one of the following: + - it terminates on attack. + - it eventually puts the lightblock of height *h* in the lightstore and +wait for another input. + +> Observe that the "existence of a lightclient attack" just means that some node has generated a conflicting block. It does not necessarily mean that a (faulty) peer sends such a block to "our" lightclient. Thus, even if there is an attack somewhere in the system, our lightclient might still continue to operate normally. + +### Solving the sequential specification + +[LC-DIST-SAFE.1] is guaranteed by the detector; in particular it +follows from +[[LCD-DIST-INV-STORE.1]](TODO) +[[LCD-DIST-LIVE.1]](TODO) + +# Part IV - Light Client Supervisor Protocol + +We provide a specification for a sequential Light Client Supervisor. +The local code for verification is presented by a sequential function +`Sequential-Supervisor` to highlight the control flow of this +functionality. Each lightblock is first verified with a primary, and then +cross-checked with secondaries, and if all goes well, the lightblock +is +added (with the attribute "trusted") to the +lightstore. Intermiate lightblocks that were used to verify the target +block but were not cross-checked are stored as "verified" + +> We note that if a different concurrency model is considered +> for an implementation, the semantics of the lightstore might change: +> In a concurrent implementation, we might do verification for some +> height *h*, add the +> lightblock to the lightstore, and start concurrent threads that +> +> - do verification for the next height *h' != h* +> - do cross-checking for height *h*. If we find an attack, we remove +> *h* from the lightstore. +> - the user might already start to use *h* +> +> Thus, this concurrency model changes the semantics of the +> lightstore (not all lightblocks that are read by the user are +> trusted; they may be removed if +> we find a problem). Whether this is desirable, and whether the gain in +> performance is worth it, we keep for future versions/discussion of +> lightclient protocols. + +## Definitions + +### Peers + +#### **[LC-DATA-PEERS.1]:** + +A fixed set of full nodes is provided in the configuration upon +initialization. Initially this set is partitioned into + +- one full node that is the *primary* (singleton set), +- a set *Secondaries* (of fixed size, e.g., 3), +- a set *FullNodes*; it excludes *primary* and *Secondaries* nodes. +- A set *FaultyNodes* of nodes that the light client suspects of + being faulty; it is initially empty + +#### **[LC-INV-NODES.1]:** + +The detector shall maintain the following invariants: + +- *FullNodes \intersect Secondaries = {}* +- *FullNodes \intersect FaultyNodes = {}* +- *Secondaries \intersect FaultyNodes = {}* + +and the following transition invariant + +- *FullNodes' \union Secondaries' \union FaultyNodes' = FullNodes + \union Secondaries \union FaultyNodes* + +#### **[LC-FUNC-REPLACE-PRIMARY.1]:** + +```go +Replace_Primary(root-of-trust LightBlock) +``` + +- Implementation remark + - the primary is replaced by a secondary + - to maintain a constant size of secondaries, need to + - pick a new secondary *nsec* while ensuring [LC-INV-ROOT-AGREED.1] + - that is, we need to ensure that root-of-trust = FetchLightBlock(nsec, root-of-trust.Header.Height) +- Expected precondition + - *FullNodes* is nonempty +- Expected postcondition + - *primary* is moved to *FaultyNodes* + - a secondary *s* is moved from *Secondaries* to primary +- Error condition + - if precondition is violated + +#### **[LC-FUNC-REPLACE-SECONDARY.1]:** + +```go +Replace_Secondary(addr Address, root-of-trust LightBlock) +``` + +- Implementation remark + - maintain [LC-INV-ROOT-AGREED.1], that is, + ensure root-of-trust = FetchLightBlock(nsec, root-of-trust.Header.Height) +- Expected precondition + - *FullNodes* is nonempty +- Expected postcondition + - addr is moved from *Secondaries* to *FaultyNodes* + - an address *nsec* is moved from *FullNodes* to *Secondaries* +- Error condition + - if precondition is violated + +### Data Types + +The core data structure of the protocol is the LightBlock. + +#### **[LC-DATA-LIGHTBLOCK.1]** + +```go +type LightBlock struct { + Header Header + Commit Commit + Validators ValidatorSet + NextValidators ValidatorSet + Provider PeerID +} +``` + +#### **[LC-DATA-LIGHTSTORE.1]** + +LightBlocks are stored in a structure which stores all LightBlock from +initialization or received from peers. + +```go +type LightStore struct { + ... +} + +``` + +We use the functions that the LightStore exposes, which +are defined in the [verification specification](TODO). + +### Inputs + +The lightclient is initialized with LCInitData + +#### **[LC-DATA-INIT.1]** + +```go +type LCInitData struct { + lightBlock LightBlock + genesisDoc GenesisDoc +} +``` + +where only one of the components must be provided. `GenesisDoc` is +defined in the [Tendermint +Types](https://github.com/tendermint/tendermint/blob/v0.34.x/types/genesis.go). + +#### **[LC-DATA-GENESIS.1]** + +```go +type GenesisDoc struct { + GenesisTime time.Time `json:"genesis_time"` + ChainID string `json:"chain_id"` + InitialHeight int64 `json:"initial_height"` + ConsensusParams *tmproto.ConsensusParams `json:"consensus_params,omitempty"` + Validators []GenesisValidator `json:"validators,omitempty"` + AppHash tmbytes.HexBytes `json:"app_hash"` + AppState json.RawMessage `json:"app_state,omitempty"` +} +``` + +We use the following function +`makeblock` so that we create a lightblock from the genesis +file in order to do verification based on the data from the genesis +file using the same verification function we use in normal operation. + +#### **[LC-FUNC-MAKEBLOCK.1]** + +```go +func makeblock (genesisDoc GenesisDoc) (lightBlock LightBlock)) +``` + +- Implementation remark + - none +- Expected precondition + - none +- Expected postcondition + - lightBlock.Header.Height = genesisDoc.InitialHeight + - lightBlock.Header.Time = genesisDoc.GenesisTime + - lightBlock.Header.LastBlockID = nil + - lightBlock.Header.LastCommit = nil + - lightBlock.Header.Validators = genesisDoc.Validators + - lightBlock.Header.NextValidators = genesisDoc.Validators + - lightBlock.Header.Data = nil + - lightBlock.Header.AppState = genesisDoc.AppState + - lightBlock.Header.LastResult = nil + - lightBlock.Commit = nil + - lightBlock.Validators = genesisDoc.Validators + - lightBlock.NextValidators = genesisDoc.Validators + - lightBlock.Provider = nil +- Error condition + - none + +---- + +### Configuration Parameters + +#### **[LC-INV-ROOT-AGREED.1]** + +In the Sequential-Supervisor, it is always the case that the primary +and all secondaries agree on lightStore.Latest(). + +### Assumptions + +We have to assume that the initialization data (the lightblock or the +genesis file) are consistent with the blockchain. This is subjective +initialization and it cannot be checked locally. + +### Invariants + +#### **[LC-INV-PEERLIST.1]:** + +The peer list contains a primary and a secondary. + +> If the invariant is violated, the light client does not have enough +> peers to download headers from. As a result, the light client +> needs to terminate in case this invariant is violated. + +## Supervisor + +### Outline + +The supervisor implements the functionality of the lightclient. It is +initialized with a genesis file or with a lightblock the user +trusts. This initialization is subjective, that is, the security of +the lightclient is based on the validity of the input. If the genesis +file or the lightblock deviate from the actual ones on the blockchain, +the lightclient provides no guarantees. + +After initialization, the supervisor awaits an input, that is, the +height of the next lightblock that should be obtained. Then it +downloads, verifies, and cross-checks a lightblock, and if all tests +go through, the light block (and possibly other lightblocks) are added +to the lightstore, which is returned in an output event to the user. + +The following main loop does the interaction with the user (input, +output) and calls the following two functions: + +- `InitLightClient`: it initializes the lightstore either with the + provided lightblock or with the lightblock that corresponds to the + first block generated by the blockchain (by the validators defined + by the genesis file) +- `VerifyAndDetect`: takes as input a lightstore and a height and + returns the updated lightstore. + +#### **[LC-FUNC-SUPERVISOR.1]:** + +```go +func Sequential-Supervisor (initdata LCInitData) (Error) { + + lightStore,result := InitLightClient(initData); + if result != OK { + return result; + } + + loop { + // get the next height + nextHeight := input(); + + lightStore,result := VerifyAndDetect(lightStore, nextHeight); + + if result == OK { + output(LightStore.Get(targetHeight)); + // we only output a trusted lightblock + } + else { + return result + } + // QUESTION: is it OK to generate output event in normal case, + // and terminate with failure in the (light client) attack case? + } +} +``` + +- Implementation remark + - infinite loop unless a light client attack is detected + - In typical implementations (e.g., the one in Rust), + there are mutliple input actions: + `VerifytoLatest`, `LatestTrusted`, and `GetStatus`. The + information can be easily obtained from the lightstore, so that + we do not treat these requests explicitly here but just consider + the request for a block of a given height which requires more + involved computation and communication. +- Expected precondition + - *LCInitData* contains a genesis file or a lightblock. +- Expected postcondition + - if a light client attack is detected: it stops and submits + evidence (in `InitLightClient` or `VerifyAndDetect`) + - otherwise: non. It runs forever. +- Invariant: *lightStore* contains trusted lightblocks only. +- Error condition + - if `InitLightClient` or `VerifyAndDetect` fails (if a attack is + detected, or if [LCV-INV-TP.1] is violated) + +---- + +### Details of the Functions + +#### Initialization + +The light client is based on subjective initialization. It has to +trust the initial data given to it by the user. It cannot do any +detection of attack. So either upon initialization we obtain a +lightblock and just initialize the lightstore with it. Or in case of a +genesis file, we download, verify, and cross-check the first block, to +initialize the lightstore with this first block. The reason is that +we want to maintain [LCV-INV-TP.1] from the beginning. + +> If the lightclient is initialized with a lightblock, one might think +> it may increase trust, when one cross-checks the initial light +> block. However, if a peer provides a conflicting +> lightblock, the question is to distinguish the case of a +> [bogus](https://informal.systems) block (upon which operation should proceed) from a +> [light client attack](https://informal.systems) (upon which operation should stop). In +> case of a bogus block, the lightclient might be forced to do +> backwards verification until the blocks are out of the trusting +> period, to make sure no previous validator set could have generated +> the bogus block, which effectively opens up a DoS attack on the lightclient +> without adding effective robustness. + +#### **[LC-FUNC-INIT.1]:** + +```go +func InitLightClient (initData LCInitData) (LightStore, Error) { + + if LCInitData.LightBlock != nil { + // we trust the provided initial block. + newblock := LCInitData.LightBlock + } + else { + genesisBlock := makeblock(initData.genesisDoc); + + result := NoResult; + while result != ResultSuccess { + current = FetchLightBlock(PeerList.primary(), genesisBlock.Header.Height + 1) + // QUESTION: is the height with "+1" OK? + + if CANNOT_VERIFY = ValidAndVerify(genesisBlock, current) { + Replace_Primary(); + } + else { + result = ResultSuccess + } + } + + // cross-check + auxLS := new LightStore + auxLS.Add(current) + Evidences := AttackDetector(genesisBlock, auxLS) + if Evidences.Empty { + newBlock := current + } + else { + // [LC-SUMBIT-EVIDENCE.1] + submitEvidence(Evidences); + return(nil, ErrorAttack); + } + } + + lightStore := new LightStore; + lightStore.Add(newBlock); + return (lightStore, OK); +} + +``` + +- Implementation remark + - none +- Expected precondition + - *LCInitData* contains either a genesis file of a lightblock + - if genesis it passes `ValidateAndComplete()` see [Tendermint](https://informal.systems) +- Expected postcondition + - *lightStore* initialized with trusted lightblock. It has either been + cross-checked (from genesis) or it has initial trust from the + user. +- Error condition + - if precondition is violated + - empty peerList + +---- + +#### Main verification and detection logic + +#### **[LC-FUNC-MAIN-VERIF-DETECT.1]:** + +```go +func VerifyAndDetect (lightStore LightStore, targetHeight Height) + (LightStore, Result) { + + b1, r1 = lightStore.Get(targetHeight) + if r1 == true { + if b1.State == StateTrusted { + // block already there and trusted + return (lightStore, ResultSuccess) + } + else { + // We have a lightblock in the store, but it has not been + // cross-checked by now. We do that now. + root_of_trust, auxLS := lightstore.TraceTo(b1); + + // Cross-check + Evidences := AttackDetector(root_of_trust, auxLS); + if Evidences.Empty { + // no attack detected, we trust the new lightblock + lightStore.Update(auxLS.Latest(), + StateTrusted, + verfiedLS.Latest().verification-root); + return (lightStore, OK); + } + else { + // there is an attack, we exit + submitEvidence(Evidences); + return(lightStore, ErrorAttack); + } + } + } + + // get the lightblock with maximum height smaller than targetHeight + // would typically be the heighest, if we always move forward + root_of_trust, r2 = lightStore.LatestPrevious(targetHeight); + + if r2 = false { + // there is no lightblock from which we can do forward + // (skipping) verification. Thus we have to go backwards. + // No cross-check needed. We trust hashes. Therefore, we + // directly return the result + return Backwards(primary, lightStore.Lowest(), targetHeight) + } + else { + // Forward verification + detection + result := NoResult; + while result != ResultSuccess { + verifiedLS,result := VerifyToTarget(primary, + root_of_trust, + nextHeight); + if result == ResultFailure { + // pick new primary (promote a secondary to primary) + Replace_Primary(root_of_trust); + } + else if result == ResultExpired { + return (lightStore, result) + } + } + + // Cross-check + Evidences := AttackDetector(root_of_trust, verifiedLS); + if Evidences.Empty { + // no attack detected, we trust the new lightblock + verifiedLS.Update(verfiedLS.Latest(), + StateTrusted, + verfiedLS.Latest().verification-root); + lightStore.store_chain(verifidLS); + return (lightStore, OK); + } + else { + // there is an attack, we exit + return(lightStore, ErrorAttack); + } + } +} +``` + +- Implementation remark + - none +- Expected precondition + - none +- Expected postcondition + - lightblock of height *targetHeight* (and possibly additional blocks) added to *lightStore* +- Error condition + - an attack is detected + - [LC-DATA-PEERLIST-INV.1] is violated + +---- diff --git a/spec/light-client/supervisor/supervisor_001_draft.tla b/spec/light-client/supervisor/supervisor_001_draft.tla new file mode 100644 index 0000000000..949a7c0200 --- /dev/null +++ b/spec/light-client/supervisor/supervisor_001_draft.tla @@ -0,0 +1,71 @@ +------------------------- MODULE supervisor_001_draft ------------------------ +(* +This is the beginning of a spec that will eventually use verification and detector API +*) + +EXTENDS Integers, FiniteSets + +VARIABLES + state, + output + +vars == <> + +CONSTANT + INITDATA + +Init == + /\ state = "Init" + /\ output = "none" + +NextInit == + /\ state = "Init" + /\ \/ state' = "EnterLoop" + \/ state' = "FailedToInitialize" + /\ UNCHANGED output + +NextVerifyToTarget == + /\ state = "EnterLoop" + /\ \/ state' = "EnterLoop" \* replace primary + \/ state' = "EnterDetect" + \/ state' = "ExhaustedPeersPrimary" + /\ UNCHANGED output + +NextAttackDetector == + /\ state = "EnterDetect" + /\ \/ state' = "NoEvidence" + \/ state' = "EvidenceFound" + \/ state' = "ExhaustedPeersSecondaries" + /\ UNCHANGED output + +NextVerifyAndDetect == + \/ NextVerifyToTarget + \/ NextAttackDetector + +NextOutput == + /\ state = "NoEvidence" + /\ state' = "EnterLoop" + /\ output' = "data" \* to generate a trace + +NextTerminated == + /\ \/ state = "FailedToInitialize" + \/ state = "ExhaustedPeersPrimary" + \/ state = "EvidenceFound" + \/ state = "ExhaustedPeersSecondaries" + /\ UNCHANGED vars + +Next == + \/ NextInit + \/ NextVerifyAndDetect + \/ NextOutput + \/ NextTerminated + +InvEnoughPeers == + /\ state /= "ExhaustedPeersPrimary" + /\ state /= "ExhaustedPeersSecondaries" + + +============================================================================= +\* Modification History +\* Last modified Sun Oct 18 11:48:45 CEST 2020 by widder +\* Created Sun Oct 18 11:18:53 CEST 2020 by widder diff --git a/spec/light-client/supervisor/supervisor_002_draft.md b/spec/light-client/supervisor/supervisor_002_draft.md new file mode 100644 index 0000000000..9286560e79 --- /dev/null +++ b/spec/light-client/supervisor/supervisor_002_draft.md @@ -0,0 +1,131 @@ +# Draft of Light Client Supervisor for discussion + +## Modification to the initialization + +The lightclient is initialized with LCInitData + +### **[LC-DATA-INIT.2]** + +```go +type LCInitData struct { + TrustedBlock LightBlock + Genesis GenesisDoc + TrustedHash []byte + TrustedHeight int64 +} +``` + +where only one of the components must be provided. `GenesisDoc` is +defined in the [Tendermint +Types](https://github.com/tendermint/tendermint/blob/v0.34.x/types/genesis.go). + + +### Initialization + +The light client is based on subjective initialization. It has to +trust the initial data given to it by the user. It cannot perform any +detection of an attack yet instead requires an initial point of trust. +There are three forms of initial data which are used to obtain the +first trusted block: + +- A trusted block from a prior initialization +- A trusted height and hash +- A genesis file + +The golang light client implementation checks this initial data in that +order; first attempting to find a trusted block from the trusted store, +then acquiring a light block from the primary at the trusted height and matching +the hash, or finally checking for a genesis file to verify the initial header. + +The light client doesn't need to check if the trusted block is within the +trusted period because it already trusts it, however, if the light block is +outside the trust period, there is a higher chance the light client won't be +able to verify anything. + +Cross-checking this trusted block with providers upon initialization is helpful +for ensuring that the node is responsive and correctly configured but does not +increase trust since proving a conflicting block is a +[light client attack](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/detection/detection_003_reviewed.md#tmbc-lc-attack1) +and not just a [bogus](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/detection/detection_003_reviewed.md#tmbc-bogus1) block could result in +performing backwards verification beyond the trusted period, thus a fruitless +endeavour. + +However, with the notion of it's better to fail earlier than later, the golang +light client implementation will perform a consistency check on all providers +and will error if one returns a different header, allowing the user +the opportunity to reinitialize. + +#### **[LC-FUNC-INIT.2]:** + +```go +func InitLightClient(initData LCInitData) (LightStore, Error) { + var initialBlock LightBlock + + switch { + case LCInitData.TrustedBlock != nil: + // we trust the block from a prior initialization + initialBlock = LCInitData.TrustedBlock + + case LCInitData.TrustedHash != nil: + untrustedBlock := FetchLightBlock(PeerList.Primary(), LCInitData.TrustedHeight) + + + // verify that the hashes match + if untrustedBlock.Hash() != LCInitData.TrustedHash { + return nil, Error("Primary returned block with different hash") + } + // after checking the hash we now trust the block + initialBlock = untrustedBlock + } + case LCInitData.Genesis != nil: + untrustedBlock := FetchLightBlock(PeerList.Primary(), LCInitData.Genesis.InitialHeight) + + // verify that 2/3+ of the validator set signed the untrustedBlock + if err := VerifyCommitFull(untrustedBlock.Commit, LCInitData.Genesis.Validators); err != nil { + return nil, err + } + + // we can now trust the block + initialBlock = untrustedBlock + default: + return nil, Error("No initial data was provided") + + // This is done in the golang version but is optional and not strictly part of the protocol + if err := CrossCheck(initialBlock, PeerList.Witnesses()); err != nil { + return nil, err + } + + // initialize light store + lightStore := new LightStore; + lightStore.Add(newBlock); + return (lightStore, OK); +} + +func CrossCheck(lb LightBlock, witnesses []Provider) error { + for _, witness := range witnesses { + witnessBlock := FetchLightBlock(witness, lb.Height) + + if witnessBlock.Hash() != lb.Hash() { + return Error("Witness has different block") + } + } + return OK +} + +``` + +- Implementation remark + - none +- Expected precondition + - *LCInitData* contains either a genesis file of a lightblock + - if genesis it passes `ValidateAndComplete()` see [Tendermint](https://informal.systems) +- Expected postcondition + - *lightStore* initialized with trusted lightblock. It has either been + cross-checked (from genesis) or it has initial trust from the + user. +- Error condition + - if precondition is violated + - empty peerList + +---- + diff --git a/spec/light-client/verification/001bmc-apalache.csv b/spec/light-client/verification/001bmc-apalache.csv new file mode 100644 index 0000000000..8d5ad8ea3a --- /dev/null +++ b/spec/light-client/verification/001bmc-apalache.csv @@ -0,0 +1,49 @@ +no,filename,tool,timeout,init,inv,next,args +1,MC4_3_correct.tla,apalache,1h,,PositiveBeforeTrustedHeaderExpires,,--length=30 +2,MC4_3_correct.tla,apalache,1h,,CorrectnessInv,,--length=30 +3,MC4_3_correct.tla,apalache,1h,,PrecisionInv,,--length=30 +4,MC4_3_correct.tla,apalache,1h,,SuccessOnCorrectPrimaryAndChainOfTrust,,--length=30 +5,MC4_3_correct.tla,apalache,1h,,NoFailedBlocksOnSuccessInv,,--length=30 +6,MC4_3_correct.tla,apalache,1h,,StoredHeadersAreVerifiedOrNotTrustedInv,,--length=30 +7,MC4_3_correct.tla,apalache,1h,,CorrectPrimaryAndTimeliness,,--length=30 +8,MC4_3_correct.tla,apalache,1h,,Complexity,,--length=30 +9,MC4_3_faulty.tla,apalache,1h,,PositiveBeforeTrustedHeaderExpires,,--length=30 +10,MC4_3_faulty.tla,apalache,1h,,CorrectnessInv,,--length=30 +11,MC4_3_faulty.tla,apalache,1h,,PrecisionInv,,--length=30 +12,MC4_3_faulty.tla,apalache,1h,,SuccessOnCorrectPrimaryAndChainOfTrust,,--length=30 +13,MC4_3_faulty.tla,apalache,1h,,NoFailedBlocksOnSuccessInv,,--length=30 +14,MC4_3_faulty.tla,apalache,1h,,StoredHeadersAreVerifiedOrNotTrustedInv,,--length=30 +15,MC4_3_faulty.tla,apalache,1h,,CorrectPrimaryAndTimeliness,,--length=30 +16,MC4_3_faulty.tla,apalache,1h,,Complexity,,--length=30 +17,MC5_5_correct.tla,apalache,1h,,PositiveBeforeTrustedHeaderExpires,,--length=30 +18,MC5_5_correct.tla,apalache,1h,,CorrectnessInv,,--length=30 +19,MC5_5_correct.tla,apalache,1h,,PrecisionInv,,--length=30 +20,MC5_5_correct.tla,apalache,1h,,SuccessOnCorrectPrimaryAndChainOfTrust,,--length=30 +21,MC5_5_correct.tla,apalache,1h,,NoFailedBlocksOnSuccessInv,,--length=30 +22,MC5_5_correct.tla,apalache,1h,,StoredHeadersAreVerifiedOrNotTrustedInv,,--length=30 +23,MC5_5_correct.tla,apalache,1h,,CorrectPrimaryAndTimeliness,,--length=30 +24,MC5_5_correct.tla,apalache,1h,,Complexity,,--length=30 +25,MC5_5_faulty.tla,apalache,1h,,PositiveBeforeTrustedHeaderExpires,,--length=30 +26,MC5_5_faulty.tla,apalache,1h,,CorrectnessInv,,--length=30 +27,MC5_5_faulty.tla,apalache,1h,,PrecisionInv,,--length=30 +28,MC5_5_faulty.tla,apalache,1h,,SuccessOnCorrectPrimaryAndChainOfTrust,,--length=30 +29,MC5_5_faulty.tla,apalache,1h,,NoFailedBlocksOnSuccessInv,,--length=30 +30,MC5_5_faulty.tla,apalache,1h,,StoredHeadersAreVerifiedOrNotTrustedInv,,--length=30 +31,MC5_5_faulty.tla,apalache,1h,,CorrectPrimaryAndTimeliness,,--length=30 +32,MC5_5_faulty.tla,apalache,1h,,Complexity,,--length=30 +33,MC7_5_faulty.tla,apalache,10h,,PositiveBeforeTrustedHeaderExpires,,--length=30 +34,MC7_5_faulty.tla,apalache,10h,,CorrectnessInv,,--length=30 +35,MC7_5_faulty.tla,apalache,10h,,PrecisionInv,,--length=30 +36,MC7_5_faulty.tla,apalache,10h,,SuccessOnCorrectPrimaryAndChainOfTrust,,--length=30 +37,MC7_5_faulty.tla,apalache,10h,,NoFailedBlocksOnSuccessInv,,--length=30 +38,MC7_5_faulty.tla,apalache,10h,,StoredHeadersAreVerifiedOrNotTrustedInv,,--length=30 +39,MC7_5_faulty.tla,apalache,10h,,CorrectPrimaryAndTimeliness,,--length=30 +40,MC7_5_faulty.tla,apalache,10h,,Complexity,,--length=30 +41,MC4_7_faulty.tla,apalache,10h,,PositiveBeforeTrustedHeaderExpires,,--length=30 +42,MC4_7_faulty.tla,apalache,10h,,CorrectnessInv,,--length=30 +43,MC4_7_faulty.tla,apalache,10h,,PrecisionInv,,--length=30 +44,MC4_7_faulty.tla,apalache,10h,,SuccessOnCorrectPrimaryAndChainOfTrust,,--length=30 +45,MC4_7_faulty.tla,apalache,10h,,NoFailedBlocksOnSuccessInv,,--length=30 +46,MC4_7_faulty.tla,apalache,10h,,StoredHeadersAreVerifiedOrNotTrustedInv,,--length=30 +47,MC4_7_faulty.tla,apalache,10h,,CorrectPrimaryAndTimeliness,,--length=30 +48,MC4_7_faulty.tla,apalache,10h,,Complexity,,--length=30 diff --git a/spec/light-client/verification/002bmc-apalache-ok.csv b/spec/light-client/verification/002bmc-apalache-ok.csv new file mode 100644 index 0000000000..eb26aa89e5 --- /dev/null +++ b/spec/light-client/verification/002bmc-apalache-ok.csv @@ -0,0 +1,55 @@ +no;filename;tool;timeout;init;inv;next;args +1;MC4_3_correct.tla;apalache;1h;;TargetHeightOnSuccessInv;;--length=5 +2;MC4_3_correct.tla;apalache;1h;;StoredHeadersAreVerifiedOrNotTrustedInv;;--length=5 +3;MC4_3_correct.tla;apalache;1h;;CorrectnessInv;;--length=5 +4;MC4_3_correct.tla;apalache;1h;;NoTrustOnFaultyBlockInv;;--length=5 +5;MC4_3_correct.tla;apalache;1h;;ProofOfChainOfTrustInv;;--length=5 +6;MC4_3_correct.tla;apalache;1h;;NoFailedBlocksOnSuccessInv;;--length=5 +7;MC4_3_correct.tla;apalache;1h;;Complexity;;--length=5 +8;MC4_3_correct.tla;apalache;1h;;ApiPostInv;;--length=5 +9;MC4_4_correct.tla;apalache;1h;;TargetHeightOnSuccessInv;;--length=7 +10;MC4_4_correct.tla;apalache;1h;;CorrectnessInv;;--length=7 +11;MC4_4_correct.tla;apalache;1h;;NoTrustOnFaultyBlockInv;;--length=7 +12;MC4_4_correct.tla;apalache;1h;;ProofOfChainOfTrustInv;;--length=7 +13;MC4_4_correct.tla;apalache;1h;;NoFailedBlocksOnSuccessInv;;--length=7 +14;MC4_4_correct.tla;apalache;1h;;Complexity;;--length=7 +15;MC4_4_correct.tla;apalache;1h;;ApiPostInv;;--length=7 +16;MC4_5_correct.tla;apalache;1h;;TargetHeightOnSuccessInv;;--length=11 +17;MC4_5_correct.tla;apalache;1h;;CorrectnessInv;;--length=11 +18;MC4_5_correct.tla;apalache;1h;;NoTrustOnFaultyBlockInv;;--length=11 +19;MC4_5_correct.tla;apalache;1h;;ProofOfChainOfTrustInv;;--length=11 +20;MC4_5_correct.tla;apalache;1h;;NoFailedBlocksOnSuccessInv;;--length=11 +21;MC4_5_correct.tla;apalache;1h;;Complexity;;--length=11 +22;MC4_5_correct.tla;apalache;1h;;ApiPostInv;;--length=11 +23;MC5_5_correct.tla;apalache;1h;;TargetHeightOnSuccessInv;;--length=11 +24;MC5_5_correct.tla;apalache;1h;;CorrectnessInv;;--length=11 +25;MC5_5_correct.tla;apalache;1h;;NoTrustOnFaultyBlockInv;;--length=11 +26;MC5_5_correct.tla;apalache;1h;;ProofOfChainOfTrustInv;;--length=11 +27;MC5_5_correct.tla;apalache;1h;;NoFailedBlocksOnSuccessInv;;--length=11 +28;MC5_5_correct.tla;apalache;1h;;Complexity;;--length=11 +29;MC5_5_correct.tla;apalache;1h;;ApiPostInv;;--length=11 +30;MC4_3_faulty.tla;apalache;1h;;TargetHeightOnSuccessInv;;--length=5 +31;MC4_3_faulty.tla;apalache;1h;;StoredHeadersAreVerifiedOrNotTrustedInv;;--length=5 +32;MC4_3_faulty.tla;apalache;1h;;CorrectnessInv;;--length=5 +33;MC4_3_faulty.tla;apalache;1h;;NoTrustOnFaultyBlockInv;;--length=5 +34;MC4_3_faulty.tla;apalache;1h;;ProofOfChainOfTrustInv;;--length=5 +35;MC4_3_faulty.tla;apalache;1h;;NoFailedBlocksOnSuccessInv;;--length=5 +36;MC4_3_faulty.tla;apalache;1h;;Complexity;;--length=5 +37;MC4_3_faulty.tla;apalache;1h;;ApiPostInv;;--length=5 +38;MC4_4_faulty.tla;apalache;1h;;TargetHeightOnSuccessInv;;--length=7 +39;MC4_4_faulty.tla;apalache;1h;;StoredHeadersAreVerifiedOrNotTrustedInv;;--length=7 +40;MC4_4_faulty.tla;apalache;1h;;CorrectnessInv;;--length=7 +41;MC4_4_faulty.tla;apalache;1h;;NoTrustOnFaultyBlockInv;;--length=7 +42;MC4_4_faulty.tla;apalache;1h;;ProofOfChainOfTrustInv;;--length=7 +43;MC4_4_faulty.tla;apalache;1h;;NoFailedBlocksOnSuccessInv;;--length=7 +44;MC4_4_faulty.tla;apalache;1h;;Complexity;;--length=7 +45;MC4_4_faulty.tla;apalache;1h;;ApiPostInv;;--length=7 +46;MC4_5_faulty.tla;apalache;1h;;TargetHeightOnSuccessInv;;--length=11 +47;MC4_5_faulty.tla;apalache;1h;;StoredHeadersAreVerifiedOrNotTrustedInv;;--length=11 +48;MC4_5_faulty.tla;apalache;1h;;CorrectnessInv;;--length=11 +49;MC4_5_faulty.tla;apalache;1h;;NoTrustOnFaultyBlockInv;;--length=11 +50;MC4_5_faulty.tla;apalache;1h;;ProofOfChainOfTrustInv;;--length=11 +51;MC4_5_faulty.tla;apalache;1h;;NoFailedBlocksOnSuccessInv;;--length=11 +52;MC4_5_faulty.tla;apalache;1h;;Complexity;;--length=11 +53;MC4_5_faulty.tla;apalache;1h;;ApiPostInv;;--length=11 + diff --git a/spec/light-client/verification/003bmc-apalache-error.csv b/spec/light-client/verification/003bmc-apalache-error.csv new file mode 100644 index 0000000000..ad5ef96548 --- /dev/null +++ b/spec/light-client/verification/003bmc-apalache-error.csv @@ -0,0 +1,45 @@ +no;filename;tool;timeout;init;inv;next;args +1;MC4_3_correct.tla;apalache;1h;;StoredHeadersAreVerifiedInv;;--length=5 +2;MC4_3_correct.tla;apalache;1h;;PositiveBeforeTrustedHeaderExpires;;--length=5 +3;MC4_3_correct.tla;apalache;1h;;CorrectPrimaryAndTimeliness;;--length=5 +4;MC4_3_correct.tla;apalache;1h;;PrecisionInv;;--length=5 +5;MC4_3_correct.tla;apalache;1h;;PrecisionBuggyInv;;--length=5 +6;MC4_3_correct.tla;apalache;1h;;SuccessOnCorrectPrimaryAndChainOfTrustGlobal;;--length=5 +7;MC4_3_correct.tla;apalache;1h;;SuccessOnCorrectPrimaryAndChainOfTrustLocal;;--length=5 +8;MC4_4_correct.tla;apalache;1h;;StoredHeadersAreVerifiedInv;;--length=7 +9;MC4_4_correct.tla;apalache;1h;;PositiveBeforeTrustedHeaderExpires;;--length=7 +10;MC4_4_correct.tla;apalache;1h;;CorrectPrimaryAndTimeliness;;--length=7 +11;MC4_4_correct.tla;apalache;1h;;PrecisionInv;;--length=7 +12;MC4_4_correct.tla;apalache;1h;;PrecisionBuggyInv;;--length=7 +13;MC4_4_correct.tla;apalache;1h;;SuccessOnCorrectPrimaryAndChainOfTrustGlobal;;--length=7 +14;MC4_4_correct.tla;apalache;1h;;SuccessOnCorrectPrimaryAndChainOfTrustLocal;;--length=7 +15;MC4_5_correct.tla;apalache;1h;;StoredHeadersAreVerifiedInv;;--length=11 +16;MC4_5_correct.tla;apalache;1h;;PositiveBeforeTrustedHeaderExpires;;--length=11 +17;MC4_5_correct.tla;apalache;1h;;CorrectPrimaryAndTimeliness;;--length=11 +18;MC4_5_correct.tla;apalache;1h;;PrecisionInv;;--length=11 +19;MC4_5_correct.tla;apalache;1h;;PrecisionBuggyInv;;--length=11 +20;MC4_5_correct.tla;apalache;1h;;SuccessOnCorrectPrimaryAndChainOfTrustGlobal;;--length=11 +21;MC4_5_correct.tla;apalache;1h;;SuccessOnCorrectPrimaryAndChainOfTrustLocal;;--length=11 +22;MC4_5_correct.tla;apalache;1h;;StoredHeadersAreVerifiedOrNotTrustedInv;;--length=11 +23;MC4_3_faulty.tla;apalache;1h;;StoredHeadersAreVerifiedInv;;--length=5 +24;MC4_3_faulty.tla;apalache;1h;;PositiveBeforeTrustedHeaderExpires;;--length=5 +25;MC4_3_faulty.tla;apalache;1h;;CorrectPrimaryAndTimeliness;;--length=5 +26;MC4_3_faulty.tla;apalache;1h;;PrecisionInv;;--length=5 +27;MC4_3_faulty.tla;apalache;1h;;PrecisionBuggyInv;;--length=5 +28;MC4_3_faulty.tla;apalache;1h;;SuccessOnCorrectPrimaryAndChainOfTrustGlobal;;--length=5 +29;MC4_3_faulty.tla;apalache;1h;;SuccessOnCorrectPrimaryAndChainOfTrustLocal;;--length=5 +30;MC4_4_faulty.tla;apalache;1h;;StoredHeadersAreVerifiedInv;;--length=7 +31;MC4_4_faulty.tla;apalache;1h;;PositiveBeforeTrustedHeaderExpires;;--length=7 +32;MC4_4_faulty.tla;apalache;1h;;CorrectPrimaryAndTimeliness;;--length=7 +33;MC4_4_faulty.tla;apalache;1h;;PrecisionInv;;--length=7 +34;MC4_4_faulty.tla;apalache;1h;;PrecisionBuggyInv;;--length=7 +35;MC4_4_faulty.tla;apalache;1h;;SuccessOnCorrectPrimaryAndChainOfTrustGlobal;;--length=7 +36;MC4_4_faulty.tla;apalache;1h;;SuccessOnCorrectPrimaryAndChainOfTrustLocal;;--length=7 +37;MC4_5_faulty.tla;apalache;1h;;StoredHeadersAreVerifiedInv;;--length=11 +38;MC4_5_faulty.tla;apalache;1h;;PositiveBeforeTrustedHeaderExpires;;--length=11 +39;MC4_5_faulty.tla;apalache;1h;;CorrectPrimaryAndTimeliness;;--length=11 +40;MC4_5_faulty.tla;apalache;1h;;PrecisionInv;;--length=11 +41;MC4_5_faulty.tla;apalache;1h;;PrecisionBuggyInv;;--length=11 +42;MC4_5_faulty.tla;apalache;1h;;SuccessOnCorrectPrimaryAndChainOfTrustGlobal;;--length=11 +43;MC4_5_faulty.tla;apalache;1h;;SuccessOnCorrectPrimaryAndChainOfTrustLocal;;--length=11 +44;MC4_5_faulty.tla;apalache;1h;;StoredHeadersAreVerifiedOrNotTrustedInv;;--length=11 diff --git a/spec/light-client/verification/004bmc-apalache-ok.csv b/spec/light-client/verification/004bmc-apalache-ok.csv new file mode 100644 index 0000000000..bf4f53ea2a --- /dev/null +++ b/spec/light-client/verification/004bmc-apalache-ok.csv @@ -0,0 +1,10 @@ +no;filename;tool;timeout;init;inv;next;args +1;LCD_MC3_3_faulty.tla;apalache;1h;;CommonHeightOnEvidenceInv;;--length=10 +2;LCD_MC3_3_faulty.tla;apalache;1h;;AccuracyInv;;--length=10 +3;LCD_MC3_3_faulty.tla;apalache;1h;;PrecisionInvLocal;;--length=10 +4;LCD_MC3_4_faulty.tla;apalache;1h;;CommonHeightOnEvidenceInv;;--length=10 +5;LCD_MC3_4_faulty.tla;apalache;1h;;AccuracyInv;;--length=10 +6;LCD_MC3_4_faulty.tla;apalache;1h;;PrecisionInvLocal;;--length=10 +7;LCD_MC4_4_faulty.tla;apalache;1h;;CommonHeightOnEvidenceInv;;--length=10 +8;LCD_MC4_4_faulty.tla;apalache;1h;;AccuracyInv;;--length=10 +9;LCD_MC4_4_faulty.tla;apalache;1h;;PrecisionInvLocal;;--length=10 diff --git a/spec/light-client/verification/005bmc-apalache-error.csv b/spec/light-client/verification/005bmc-apalache-error.csv new file mode 100644 index 0000000000..1b9dd05ca9 --- /dev/null +++ b/spec/light-client/verification/005bmc-apalache-error.csv @@ -0,0 +1,4 @@ +no;filename;tool;timeout;init;inv;next;args +1;LCD_MC3_3_faulty.tla;apalache;1h;;PrecisionInvGrayZone;;--length=10 +2;LCD_MC3_4_faulty.tla;apalache;1h;;PrecisionInvGrayZone;;--length=10 +3;LCD_MC4_4_faulty.tla;apalache;1h;;PrecisionInvGrayZone;;--length=10 diff --git a/spec/light-client/verification/Blockchain_002_draft.tla b/spec/light-client/verification/Blockchain_002_draft.tla new file mode 100644 index 0000000000..f2ca5aba5a --- /dev/null +++ b/spec/light-client/verification/Blockchain_002_draft.tla @@ -0,0 +1,171 @@ +------------------------ MODULE Blockchain_002_draft ----------------------------- +(* + This is a high-level specification of Tendermint blockchain + that is designed specifically for the light client. + Validators have the voting power of one. If you like to model various + voting powers, introduce multiple copies of the same validator + (do not forget to give them unique names though). + *) +EXTENDS Integers, FiniteSets + +Min(a, b) == IF a < b THEN a ELSE b + +CONSTANT + AllNodes, + (* a set of all nodes that can act as validators (correct and faulty) *) + ULTIMATE_HEIGHT, + (* a maximal height that can be ever reached (modelling artifact) *) + TRUSTING_PERIOD + (* the period within which the validators are trusted *) + +Heights == 1..ULTIMATE_HEIGHT (* possible heights *) + +(* A commit is just a set of nodes who have committed the block *) +Commits == SUBSET AllNodes + +(* The set of all block headers that can be on the blockchain. + This is a simplified version of the Block data structure in the actual implementation. *) +BlockHeaders == [ + height: Heights, + \* the block height + time: Int, + \* the block timestamp in some integer units + lastCommit: Commits, + \* the nodes who have voted on the previous block, the set itself instead of a hash + (* in the implementation, only the hashes of V and NextV are stored in a block, + as V and NextV are stored in the application state *) + VS: SUBSET AllNodes, + \* the validators of this bloc. We store the validators instead of the hash. + NextVS: SUBSET AllNodes + \* the validators of the next block. We store the next validators instead of the hash. +] + +(* A signed header is just a header together with a set of commits *) +LightBlocks == [header: BlockHeaders, Commits: Commits] + +VARIABLES + now, + (* the current global time in integer units *) + blockchain, + (* A sequence of BlockHeaders, which gives us a bird view of the blockchain. *) + Faulty + (* A set of faulty nodes, which can act as validators. We assume that the set + of faulty processes is non-decreasing. If a process has recovered, it should + connect using a different id. *) + +(* all variables, to be used with UNCHANGED *) +vars == <> + +(* The set of all correct nodes in a state *) +Corr == AllNodes \ Faulty + +(* APALACHE annotations *) +a <: b == a \* type annotation + +NT == STRING +NodeSet(S) == S <: {NT} +EmptyNodeSet == NodeSet({}) + +BT == [height |-> Int, time |-> Int, lastCommit |-> {NT}, VS |-> {NT}, NextVS |-> {NT}] + +LBT == [header |-> BT, Commits |-> {NT}] +(* end of APALACHE annotations *) + +(****************************** BLOCKCHAIN ************************************) + +(* the header is still within the trusting period *) +InTrustingPeriod(header) == + now < header.time + TRUSTING_PERIOD + +(* + Given a function pVotingPower \in D -> Powers for some D \subseteq AllNodes + and pNodes \subseteq D, test whether the set pNodes \subseteq AllNodes has + more than 2/3 of voting power among the nodes in D. + *) +TwoThirds(pVS, pNodes) == + LET TP == Cardinality(pVS) + SP == Cardinality(pVS \intersect pNodes) + IN + 3 * SP > 2 * TP \* when thinking in real numbers, not integers: SP > 2.0 / 3.0 * TP + +(* + Given a set of FaultyNodes, test whether the voting power of the correct nodes in D + is more than 2/3 of the voting power of the faulty nodes in D. + *) +IsCorrectPower(pFaultyNodes, pVS) == + LET FN == pFaultyNodes \intersect pVS \* faulty nodes in pNodes + CN == pVS \ pFaultyNodes \* correct nodes in pNodes + CP == Cardinality(CN) \* power of the correct nodes + FP == Cardinality(FN) \* power of the faulty nodes + IN + \* CP + FP = TP is the total voting power, so we write CP > 2.0 / 3 * TP as follows: + CP > 2 * FP \* Note: when FP = 0, this implies CP > 0. + +(* This is what we believe is the assumption about failures in Tendermint *) +FaultAssumption(pFaultyNodes, pNow, pBlockchain) == + \A h \in Heights: + pBlockchain[h].time + TRUSTING_PERIOD > pNow => + IsCorrectPower(pFaultyNodes, pBlockchain[h].NextVS) + +(* Can a block be produced by a correct peer, or an authenticated Byzantine peer *) +IsLightBlockAllowedByDigitalSignatures(ht, block) == + \/ block.header = blockchain[ht] \* signed by correct and faulty (maybe) + \/ block.Commits \subseteq Faulty /\ block.header.height = ht /\ block.header.time >= 0 \* signed only by faulty + +(* + Initialize the blockchain to the ultimate height right in the initial states. + We pick the faulty validators statically, but that should not affect the light client. + *) +InitToHeight == + /\ Faulty \in SUBSET AllNodes \* some nodes may fail + \* pick the validator sets and last commits + /\ \E vs, lastCommit \in [Heights -> SUBSET AllNodes]: + \E timestamp \in [Heights -> Int]: + \* now is at least as early as the timestamp in the last block + /\ \E tm \in Int: now = tm /\ tm >= timestamp[ULTIMATE_HEIGHT] + \* the genesis starts on day 1 + /\ timestamp[1] = 1 + /\ vs[1] = AllNodes + /\ lastCommit[1] = EmptyNodeSet + /\ \A h \in Heights \ {1}: + /\ lastCommit[h] \subseteq vs[h - 1] \* the non-validators cannot commit + /\ TwoThirds(vs[h - 1], lastCommit[h]) \* the commit has >2/3 of validator votes + /\ IsCorrectPower(Faulty, vs[h]) \* the correct validators have >2/3 of power + /\ timestamp[h] > timestamp[h - 1] \* the time grows monotonically + /\ timestamp[h] < timestamp[h - 1] + TRUSTING_PERIOD \* but not too fast + \* form the block chain out of validator sets and commits (this makes apalache faster) + /\ blockchain = [h \in Heights |-> + [height |-> h, + time |-> timestamp[h], + VS |-> vs[h], + NextVS |-> IF h < ULTIMATE_HEIGHT THEN vs[h + 1] ELSE AllNodes, + lastCommit |-> lastCommit[h]] + ] \****** + + +(* is the blockchain in the faulty zone where the Tendermint security model does not apply *) +InFaultyZone == + ~FaultAssumption(Faulty, now, blockchain) + +(********************* BLOCKCHAIN ACTIONS ********************************) +(* + Advance the clock by zero or more time units. + *) +AdvanceTime == + \E tm \in Int: tm >= now /\ now' = tm + /\ UNCHANGED <> + +(* + One more process fails. As a result, the blockchain may move into the faulty zone. + The light client is not using this action, as the faults are picked in the initial state. + However, this action may be useful when reasoning about fork detection. + *) +OneMoreFault == + /\ \E n \in AllNodes \ Faulty: + /\ Faulty' = Faulty \cup {n} + /\ Faulty' /= AllNodes \* at least process remains non-faulty + /\ UNCHANGED <> +============================================================================= +\* Modification History +\* Last modified Wed Jun 10 14:10:54 CEST 2020 by igor +\* Created Fri Oct 11 15:45:11 CEST 2019 by igor diff --git a/spec/light-client/verification/Blockchain_003_draft.tla b/spec/light-client/verification/Blockchain_003_draft.tla new file mode 100644 index 0000000000..2b37c1b181 --- /dev/null +++ b/spec/light-client/verification/Blockchain_003_draft.tla @@ -0,0 +1,164 @@ +------------------------ MODULE Blockchain_003_draft ----------------------------- +(* + This is a high-level specification of Tendermint blockchain + that is designed specifically for the light client. + Validators have the voting power of one. If you like to model various + voting powers, introduce multiple copies of the same validator + (do not forget to give them unique names though). + *) +EXTENDS Integers, FiniteSets + +Min(a, b) == IF a < b THEN a ELSE b + +CONSTANT + AllNodes, + (* a set of all nodes that can act as validators (correct and faulty) *) + ULTIMATE_HEIGHT, + (* a maximal height that can be ever reached (modelling artifact) *) + TRUSTING_PERIOD + (* the period within which the validators are trusted *) + +Heights == 1..ULTIMATE_HEIGHT (* possible heights *) + +(* A commit is just a set of nodes who have committed the block *) +Commits == SUBSET AllNodes + +(* The set of all block headers that can be on the blockchain. + This is a simplified version of the Block data structure in the actual implementation. *) +BlockHeaders == [ + height: Heights, + \* the block height + time: Int, + \* the block timestamp in some integer units + lastCommit: Commits, + \* the nodes who have voted on the previous block, the set itself instead of a hash + (* in the implementation, only the hashes of V and NextV are stored in a block, + as V and NextV are stored in the application state *) + VS: SUBSET AllNodes, + \* the validators of this bloc. We store the validators instead of the hash. + NextVS: SUBSET AllNodes + \* the validators of the next block. We store the next validators instead of the hash. +] + +(* A signed header is just a header together with a set of commits *) +LightBlocks == [header: BlockHeaders, Commits: Commits] + +VARIABLES + refClock, + (* the current global time in integer units as perceived by the reference chain *) + blockchain, + (* A sequence of BlockHeaders, which gives us a bird view of the blockchain. *) + Faulty + (* A set of faulty nodes, which can act as validators. We assume that the set + of faulty processes is non-decreasing. If a process has recovered, it should + connect using a different id. *) + +(* all variables, to be used with UNCHANGED *) +vars == <> + +(* The set of all correct nodes in a state *) +Corr == AllNodes \ Faulty + +(* APALACHE annotations *) +a <: b == a \* type annotation + +NT == STRING +NodeSet(S) == S <: {NT} +EmptyNodeSet == NodeSet({}) + +BT == [height |-> Int, time |-> Int, lastCommit |-> {NT}, VS |-> {NT}, NextVS |-> {NT}] + +LBT == [header |-> BT, Commits |-> {NT}] +(* end of APALACHE annotations *) + +(****************************** BLOCKCHAIN ************************************) + +(* the header is still within the trusting period *) +InTrustingPeriod(header) == + refClock < header.time + TRUSTING_PERIOD + +(* + Given a function pVotingPower \in D -> Powers for some D \subseteq AllNodes + and pNodes \subseteq D, test whether the set pNodes \subseteq AllNodes has + more than 2/3 of voting power among the nodes in D. + *) +TwoThirds(pVS, pNodes) == + LET TP == Cardinality(pVS) + SP == Cardinality(pVS \intersect pNodes) + IN + 3 * SP > 2 * TP \* when thinking in real numbers, not integers: SP > 2.0 / 3.0 * TP + +(* + Given a set of FaultyNodes, test whether the voting power of the correct nodes in D + is more than 2/3 of the voting power of the faulty nodes in D. + + Parameters: + - pFaultyNodes is a set of nodes that are considered faulty + - pVS is a set of all validators, maybe including Faulty, intersecting with it, etc. + - pMaxFaultRatio is a pair <> that limits the ratio a / b of the faulty + validators from above (exclusive) + *) +FaultyValidatorsFewerThan(pFaultyNodes, pVS, maxRatio) == + LET FN == pFaultyNodes \intersect pVS \* faulty nodes in pNodes + CN == pVS \ pFaultyNodes \* correct nodes in pNodes + CP == Cardinality(CN) \* power of the correct nodes + FP == Cardinality(FN) \* power of the faulty nodes + IN + \* CP + FP = TP is the total voting power + LET TP == CP + FP IN + FP * maxRatio[2] < TP * maxRatio[1] + +(* Can a block be produced by a correct peer, or an authenticated Byzantine peer *) +IsLightBlockAllowedByDigitalSignatures(ht, block) == + \/ block.header = blockchain[ht] \* signed by correct and faulty (maybe) + \/ /\ block.Commits \subseteq Faulty + /\ block.header.height = ht + /\ block.header.time >= 0 \* signed only by faulty + +(* + Initialize the blockchain to the ultimate height right in the initial states. + We pick the faulty validators statically, but that should not affect the light client. + + Parameters: + - pMaxFaultyRatioExclusive is a pair <> that bound the number of + faulty validators in each block by the ratio a / b (exclusive) + *) +InitToHeight(pMaxFaultyRatioExclusive) == + /\ Faulty \in SUBSET AllNodes \* some nodes may fail + \* pick the validator sets and last commits + /\ \E vs, lastCommit \in [Heights -> SUBSET AllNodes]: + \E timestamp \in [Heights -> Int]: + \* refClock is at least as early as the timestamp in the last block + /\ \E tm \in Int: refClock = tm /\ tm >= timestamp[ULTIMATE_HEIGHT] + \* the genesis starts on day 1 + /\ timestamp[1] = 1 + /\ vs[1] = AllNodes + /\ lastCommit[1] = EmptyNodeSet + /\ \A h \in Heights \ {1}: + /\ lastCommit[h] \subseteq vs[h - 1] \* the non-validators cannot commit + /\ TwoThirds(vs[h - 1], lastCommit[h]) \* the commit has >2/3 of validator votes + \* the faulty validators have the power below the threshold + /\ FaultyValidatorsFewerThan(Faulty, vs[h], pMaxFaultyRatioExclusive) + /\ timestamp[h] > timestamp[h - 1] \* the time grows monotonically + /\ timestamp[h] < timestamp[h - 1] + TRUSTING_PERIOD \* but not too fast + \* form the block chain out of validator sets and commits (this makes apalache faster) + /\ blockchain = [h \in Heights |-> + [height |-> h, + time |-> timestamp[h], + VS |-> vs[h], + NextVS |-> IF h < ULTIMATE_HEIGHT THEN vs[h + 1] ELSE AllNodes, + lastCommit |-> lastCommit[h]] + ] \****** + +(********************* BLOCKCHAIN ACTIONS ********************************) +(* + Advance the clock by zero or more time units. + *) +AdvanceTime == + /\ \E tm \in Int: tm >= refClock /\ refClock' = tm + /\ UNCHANGED <> + +============================================================================= +\* Modification History +\* Last modified Wed Jun 10 14:10:54 CEST 2020 by igor +\* Created Fri Oct 11 15:45:11 CEST 2019 by igor diff --git a/spec/light-client/verification/Blockchain_A_1.tla b/spec/light-client/verification/Blockchain_A_1.tla new file mode 100644 index 0000000000..70f59bf975 --- /dev/null +++ b/spec/light-client/verification/Blockchain_A_1.tla @@ -0,0 +1,171 @@ +------------------------ MODULE Blockchain_A_1 ----------------------------- +(* + This is a high-level specification of Tendermint blockchain + that is designed specifically for the light client. + Validators have the voting power of one. If you like to model various + voting powers, introduce multiple copies of the same validator + (do not forget to give them unique names though). + *) +EXTENDS Integers, FiniteSets + +Min(a, b) == IF a < b THEN a ELSE b + +CONSTANT + AllNodes, + (* a set of all nodes that can act as validators (correct and faulty) *) + ULTIMATE_HEIGHT, + (* a maximal height that can be ever reached (modelling artifact) *) + TRUSTING_PERIOD + (* the period within which the validators are trusted *) + +Heights == 1..ULTIMATE_HEIGHT (* possible heights *) + +(* A commit is just a set of nodes who have committed the block *) +Commits == SUBSET AllNodes + +(* The set of all block headers that can be on the blockchain. + This is a simplified version of the Block data structure in the actual implementation. *) +BlockHeaders == [ + height: Heights, + \* the block height + time: Int, + \* the block timestamp in some integer units + lastCommit: Commits, + \* the nodes who have voted on the previous block, the set itself instead of a hash + (* in the implementation, only the hashes of V and NextV are stored in a block, + as V and NextV are stored in the application state *) + VS: SUBSET AllNodes, + \* the validators of this bloc. We store the validators instead of the hash. + NextVS: SUBSET AllNodes + \* the validators of the next block. We store the next validators instead of the hash. +] + +(* A signed header is just a header together with a set of commits *) +LightBlocks == [header: BlockHeaders, Commits: Commits] + +VARIABLES + now, + (* the current global time in integer units *) + blockchain, + (* A sequence of BlockHeaders, which gives us a bird view of the blockchain. *) + Faulty + (* A set of faulty nodes, which can act as validators. We assume that the set + of faulty processes is non-decreasing. If a process has recovered, it should + connect using a different id. *) + +(* all variables, to be used with UNCHANGED *) +vars == <> + +(* The set of all correct nodes in a state *) +Corr == AllNodes \ Faulty + +(* APALACHE annotations *) +a <: b == a \* type annotation + +NT == STRING +NodeSet(S) == S <: {NT} +EmptyNodeSet == NodeSet({}) + +BT == [height |-> Int, time |-> Int, lastCommit |-> {NT}, VS |-> {NT}, NextVS |-> {NT}] + +LBT == [header |-> BT, Commits |-> {NT}] +(* end of APALACHE annotations *) + +(****************************** BLOCKCHAIN ************************************) + +(* the header is still within the trusting period *) +InTrustingPeriod(header) == + now <= header.time + TRUSTING_PERIOD + +(* + Given a function pVotingPower \in D -> Powers for some D \subseteq AllNodes + and pNodes \subseteq D, test whether the set pNodes \subseteq AllNodes has + more than 2/3 of voting power among the nodes in D. + *) +TwoThirds(pVS, pNodes) == + LET TP == Cardinality(pVS) + SP == Cardinality(pVS \intersect pNodes) + IN + 3 * SP > 2 * TP \* when thinking in real numbers, not integers: SP > 2.0 / 3.0 * TP + +(* + Given a set of FaultyNodes, test whether the voting power of the correct nodes in D + is more than 2/3 of the voting power of the faulty nodes in D. + *) +IsCorrectPower(pFaultyNodes, pVS) == + LET FN == pFaultyNodes \intersect pVS \* faulty nodes in pNodes + CN == pVS \ pFaultyNodes \* correct nodes in pNodes + CP == Cardinality(CN) \* power of the correct nodes + FP == Cardinality(FN) \* power of the faulty nodes + IN + \* CP + FP = TP is the total voting power, so we write CP > 2.0 / 3 * TP as follows: + CP > 2 * FP \* Note: when FP = 0, this implies CP > 0. + +(* This is what we believe is the assumption about failures in Tendermint *) +FaultAssumption(pFaultyNodes, pNow, pBlockchain) == + \A h \in Heights: + pBlockchain[h].time + TRUSTING_PERIOD > pNow => + IsCorrectPower(pFaultyNodes, pBlockchain[h].NextVS) + +(* Can a block be produced by a correct peer, or an authenticated Byzantine peer *) +IsLightBlockAllowedByDigitalSignatures(ht, block) == + \/ block.header = blockchain[ht] \* signed by correct and faulty (maybe) + \/ block.Commits \subseteq Faulty /\ block.header.height = ht \* signed only by faulty + +(* + Initialize the blockchain to the ultimate height right in the initial states. + We pick the faulty validators statically, but that should not affect the light client. + *) +InitToHeight == + /\ Faulty \in SUBSET AllNodes \* some nodes may fail + \* pick the validator sets and last commits + /\ \E vs, lastCommit \in [Heights -> SUBSET AllNodes]: + \E timestamp \in [Heights -> Int]: + \* now is at least as early as the timestamp in the last block + /\ \E tm \in Int: now = tm /\ tm >= timestamp[ULTIMATE_HEIGHT] + \* the genesis starts on day 1 + /\ timestamp[1] = 1 + /\ vs[1] = AllNodes + /\ lastCommit[1] = EmptyNodeSet + /\ \A h \in Heights \ {1}: + /\ lastCommit[h] \subseteq vs[h - 1] \* the non-validators cannot commit + /\ TwoThirds(vs[h - 1], lastCommit[h]) \* the commit has >2/3 of validator votes + /\ IsCorrectPower(Faulty, vs[h]) \* the correct validators have >2/3 of power + /\ timestamp[h] > timestamp[h - 1] \* the time grows monotonically + /\ timestamp[h] < timestamp[h - 1] + TRUSTING_PERIOD \* but not too fast + \* form the block chain out of validator sets and commits (this makes apalache faster) + /\ blockchain = [h \in Heights |-> + [height |-> h, + time |-> timestamp[h], + VS |-> vs[h], + NextVS |-> IF h < ULTIMATE_HEIGHT THEN vs[h + 1] ELSE AllNodes, + lastCommit |-> lastCommit[h]] + ] \****** + + +(* is the blockchain in the faulty zone where the Tendermint security model does not apply *) +InFaultyZone == + ~FaultAssumption(Faulty, now, blockchain) + +(********************* BLOCKCHAIN ACTIONS ********************************) +(* + Advance the clock by zero or more time units. + *) +AdvanceTime == + \E tm \in Int: tm >= now /\ now' = tm + /\ UNCHANGED <> + +(* + One more process fails. As a result, the blockchain may move into the faulty zone. + The light client is not using this action, as the faults are picked in the initial state. + However, this action may be useful when reasoning about fork detection. + *) +OneMoreFault == + /\ \E n \in AllNodes \ Faulty: + /\ Faulty' = Faulty \cup {n} + /\ Faulty' /= AllNodes \* at least process remains non-faulty + /\ UNCHANGED <> +============================================================================= +\* Modification History +\* Last modified Wed Jun 10 14:10:54 CEST 2020 by igor +\* Created Fri Oct 11 15:45:11 CEST 2019 by igor diff --git a/spec/light-client/verification/LCVerificationApi_003_draft.tla b/spec/light-client/verification/LCVerificationApi_003_draft.tla new file mode 100644 index 0000000000..909eab92b8 --- /dev/null +++ b/spec/light-client/verification/LCVerificationApi_003_draft.tla @@ -0,0 +1,192 @@ +-------------------- MODULE LCVerificationApi_003_draft -------------------------- +(** + * The common interface of the light client verification and detection. + *) +EXTENDS Integers, FiniteSets + +\* the parameters of Light Client +CONSTANTS + TRUSTING_PERIOD, + (* the period within which the validators are trusted *) + CLOCK_DRIFT, + (* the assumed precision of the clock *) + REAL_CLOCK_DRIFT, + (* the actual clock drift, which under normal circumstances should not + be larger than CLOCK_DRIFT (otherwise, there will be a bug) *) + FAULTY_RATIO + (* a pair <> that limits that ratio of faulty validator in the blockchain + from above (exclusive). Tendermint security model prescribes 1 / 3. *) + +VARIABLES + localClock (* current time as measured by the light client *) + +(* the header is still within the trusting period *) +InTrustingPeriodLocal(header) == + \* note that the assumption about the drift reduces the period of trust + localClock < header.time + TRUSTING_PERIOD - CLOCK_DRIFT + +(* the header is still within the trusting period, even if the clock can go backwards *) +InTrustingPeriodLocalSurely(header) == + \* note that the assumption about the drift reduces the period of trust + localClock < header.time + TRUSTING_PERIOD - 2 * CLOCK_DRIFT + +(* ensure that the local clock does not drift far away from the global clock *) +IsLocalClockWithinDrift(local, global) == + /\ global - REAL_CLOCK_DRIFT <= local + /\ local <= global + REAL_CLOCK_DRIFT + +(** + * Check that the commits in an untrusted block form 1/3 of the next validators + * in a trusted header. + *) +SignedByOneThirdOfTrusted(trusted, untrusted) == + LET TP == Cardinality(trusted.header.NextVS) + SP == Cardinality(untrusted.Commits \intersect trusted.header.NextVS) + IN + 3 * SP > TP + +(** + The first part of the precondition of ValidAndVerified, which does not take + the current time into account. + + [LCV-FUNC-VALID.1::TLA-PRE-UNTIMED.1] + *) +ValidAndVerifiedPreUntimed(trusted, untrusted) == + LET thdr == trusted.header + uhdr == untrusted.header + IN + /\ thdr.height < uhdr.height + \* the trusted block has been created earlier + /\ thdr.time < uhdr.time + /\ untrusted.Commits \subseteq uhdr.VS + /\ LET TP == Cardinality(uhdr.VS) + SP == Cardinality(untrusted.Commits) + IN + 3 * SP > 2 * TP + /\ thdr.height + 1 = uhdr.height => thdr.NextVS = uhdr.VS + (* As we do not have explicit hashes we ignore these three checks of the English spec: + + 1. "trusted.Commit is a commit is for the header trusted.Header, + i.e. it contains the correct hash of the header". + 2. untrusted.Validators = hash(untrusted.Header.Validators) + 3. untrusted.NextValidators = hash(untrusted.Header.NextValidators) + *) + +(** + Check the precondition of ValidAndVerified, including the time checks. + + [LCV-FUNC-VALID.1::TLA-PRE.1] + *) +ValidAndVerifiedPre(trusted, untrusted, checkFuture) == + LET thdr == trusted.header + uhdr == untrusted.header + IN + /\ InTrustingPeriodLocal(thdr) + \* The untrusted block is not from the future (modulo clock drift). + \* Do the check, if it is required. + /\ checkFuture => uhdr.time < localClock + CLOCK_DRIFT + /\ ValidAndVerifiedPreUntimed(trusted, untrusted) + + +(** + Check, whether an untrusted block is valid and verifiable w.r.t. a trusted header. + This test does take current time into account, but only looks at the block structure. + + [LCV-FUNC-VALID.1::TLA-UNTIMED.1] + *) +ValidAndVerifiedUntimed(trusted, untrusted) == + IF ~ValidAndVerifiedPreUntimed(trusted, untrusted) + THEN "INVALID" + ELSE IF untrusted.header.height = trusted.header.height + 1 + \/ SignedByOneThirdOfTrusted(trusted, untrusted) + THEN "SUCCESS" + ELSE "NOT_ENOUGH_TRUST" + +(** + Check, whether an untrusted block is valid and verifiable w.r.t. a trusted header. + + [LCV-FUNC-VALID.1::TLA.1] + *) +ValidAndVerified(trusted, untrusted, checkFuture) == + IF ~ValidAndVerifiedPre(trusted, untrusted, checkFuture) + THEN "INVALID" + ELSE IF ~InTrustingPeriodLocal(untrusted.header) + (* We leave the following test for the documentation purposes. + The implementation should do this test, as signature verification may be slow. + In the TLA+ specification, ValidAndVerified happens in no time. + *) + THEN "FAILED_TRUSTING_PERIOD" + ELSE IF untrusted.header.height = trusted.header.height + 1 + \/ SignedByOneThirdOfTrusted(trusted, untrusted) + THEN "SUCCESS" + ELSE "NOT_ENOUGH_TRUST" + + +(** + The invariant of the light store that is not related to the blockchain + *) +LightStoreInv(fetchedLightBlocks, lightBlockStatus) == + \A lh, rh \in DOMAIN fetchedLightBlocks: + \* for every pair of stored headers that have been verified + \/ lh >= rh + \/ lightBlockStatus[lh] /= "StateVerified" + \/ lightBlockStatus[rh] /= "StateVerified" + \* either there is a header between them + \/ \E mh \in DOMAIN fetchedLightBlocks: + lh < mh /\ mh < rh /\ lightBlockStatus[mh] = "StateVerified" + \* or the left header is outside the trusting period, so no guarantees + \/ LET lhdr == fetchedLightBlocks[lh] + rhdr == fetchedLightBlocks[rh] + IN + \* we can verify the right one using the left one + "SUCCESS" = ValidAndVerifiedUntimed(lhdr, rhdr) + +(** + Correctness states that all the obtained headers are exactly like in the blockchain. + + It is always the case that every verified header in LightStore was generated by + an instance of Tendermint consensus. + + [LCV-DIST-SAFE.1::CORRECTNESS-INV.1] + *) +CorrectnessInv(blockchain, fetchedLightBlocks, lightBlockStatus) == + \A h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] = "StateVerified" => + fetchedLightBlocks[h].header = blockchain[h] + +(** + * When the light client terminates, there are no failed blocks. + * (Otherwise, someone lied to us.) + *) +NoFailedBlocksOnSuccessInv(fetchedLightBlocks, lightBlockStatus) == + \A h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] /= "StateFailed" + +(** + The expected post-condition of VerifyToTarget. + *) +VerifyToTargetPost(blockchain, isPeerCorrect, + fetchedLightBlocks, lightBlockStatus, + trustedHeight, targetHeight, finalState) == + LET trustedHeader == fetchedLightBlocks[trustedHeight].header IN + \* The light client is not lying us on the trusted block. + \* It is straightforward to detect. + /\ lightBlockStatus[trustedHeight] = "StateVerified" + /\ trustedHeight \in DOMAIN fetchedLightBlocks + /\ trustedHeader = blockchain[trustedHeight] + \* the invariants we have found in the light client verification + \* there is a problem with trusting period + /\ isPeerCorrect + => CorrectnessInv(blockchain, fetchedLightBlocks, lightBlockStatus) + \* a correct peer should fail the light client, + \* if the trusted block is in the trusting period + /\ isPeerCorrect /\ InTrustingPeriodLocalSurely(trustedHeader) + => finalState = "finishedSuccess" + /\ finalState = "finishedSuccess" => + /\ lightBlockStatus[targetHeight] = "StateVerified" + /\ targetHeight \in DOMAIN fetchedLightBlocks + /\ NoFailedBlocksOnSuccessInv(fetchedLightBlocks, lightBlockStatus) + /\ LightStoreInv(fetchedLightBlocks, lightBlockStatus) + + +================================================================================== diff --git a/spec/light-client/verification/Lightclient_002_draft.tla b/spec/light-client/verification/Lightclient_002_draft.tla new file mode 100644 index 0000000000..32c807f6e6 --- /dev/null +++ b/spec/light-client/verification/Lightclient_002_draft.tla @@ -0,0 +1,465 @@ +-------------------------- MODULE Lightclient_002_draft ---------------------------- +(** + * A state-machine specification of the lite client, following the English spec: + * + * https://github.com/informalsystems/tendermint-rs/blob/master/docs/spec/lightclient/verification.md + *) + +EXTENDS Integers, FiniteSets + +\* the parameters of Light Client +CONSTANTS + TRUSTED_HEIGHT, + (* an index of the block header that the light client trusts by social consensus *) + TARGET_HEIGHT, + (* an index of the block header that the light client tries to verify *) + TRUSTING_PERIOD, + (* the period within which the validators are trusted *) + IS_PRIMARY_CORRECT + (* is primary correct? *) + +VARIABLES (* see TypeOK below for the variable types *) + state, (* the current state of the light client *) + nextHeight, (* the next height to explore by the light client *) + nprobes (* the lite client iteration, or the number of block tests *) + +(* the light store *) +VARIABLES + fetchedLightBlocks, (* a function from heights to LightBlocks *) + lightBlockStatus, (* a function from heights to block statuses *) + latestVerified (* the latest verified block *) + +(* the variables of the lite client *) +lcvars == <> + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevNow, + prevVerdict + +InitMonitor(verified, current, now, verdict) == + /\ prevVerified = verified + /\ prevCurrent = current + /\ prevNow = now + /\ prevVerdict = verdict + +NextMonitor(verified, current, now, verdict) == + /\ prevVerified' = verified + /\ prevCurrent' = current + /\ prevNow' = now + /\ prevVerdict' = verdict + + +(******************* Blockchain instance ***********************************) + +\* the parameters that are propagated into Blockchain +CONSTANTS + AllNodes + (* a set of all nodes that can act as validators (correct and faulty) *) + +\* the state variables of Blockchain, see Blockchain.tla for the details +VARIABLES now, blockchain, Faulty + +\* All the variables of Blockchain. For some reason, BC!vars does not work +bcvars == <> + +(* Create an instance of Blockchain. + We could write EXTENDS Blockchain, but then all the constants and state variables + would be hidden inside the Blockchain module. + *) +ULTIMATE_HEIGHT == TARGET_HEIGHT + 1 + +BC == INSTANCE Blockchain_002_draft WITH + now <- now, blockchain <- blockchain, Faulty <- Faulty + +(************************** Lite client ************************************) + +(* the heights on which the light client is working *) +HEIGHTS == TRUSTED_HEIGHT..TARGET_HEIGHT + +(* the control states of the lite client *) +States == { "working", "finishedSuccess", "finishedFailure" } + +(** + Check the precondition of ValidAndVerified. + + [LCV-FUNC-VALID.1::TLA-PRE.1] + *) +ValidAndVerifiedPre(trusted, untrusted) == + LET thdr == trusted.header + uhdr == untrusted.header + IN + /\ BC!InTrustingPeriod(thdr) + /\ thdr.height < uhdr.height + \* the trusted block has been created earlier (no drift here) + /\ thdr.time < uhdr.time + \* the untrusted block is not from the future + /\ uhdr.time < now + /\ untrusted.Commits \subseteq uhdr.VS + /\ LET TP == Cardinality(uhdr.VS) + SP == Cardinality(untrusted.Commits) + IN + 3 * SP > 2 * TP + /\ thdr.height + 1 = uhdr.height => thdr.NextVS = uhdr.VS + (* As we do not have explicit hashes we ignore these three checks of the English spec: + + 1. "trusted.Commit is a commit is for the header trusted.Header, + i.e. it contains the correct hash of the header". + 2. untrusted.Validators = hash(untrusted.Header.Validators) + 3. untrusted.NextValidators = hash(untrusted.Header.NextValidators) + *) + +(** + * Check that the commits in an untrusted block form 1/3 of the next validators + * in a trusted header. + *) +SignedByOneThirdOfTrusted(trusted, untrusted) == + LET TP == Cardinality(trusted.header.NextVS) + SP == Cardinality(untrusted.Commits \intersect trusted.header.NextVS) + IN + 3 * SP > TP + +(** + Check, whether an untrusted block is valid and verifiable w.r.t. a trusted header. + + [LCV-FUNC-VALID.1::TLA.1] + *) +ValidAndVerified(trusted, untrusted) == + IF ~ValidAndVerifiedPre(trusted, untrusted) + THEN "INVALID" + ELSE IF ~BC!InTrustingPeriod(untrusted.header) + (* We leave the following test for the documentation purposes. + The implementation should do this test, as signature verification may be slow. + In the TLA+ specification, ValidAndVerified happens in no time. + *) + THEN "FAILED_TRUSTING_PERIOD" + ELSE IF untrusted.header.height = trusted.header.height + 1 + \/ SignedByOneThirdOfTrusted(trusted, untrusted) + THEN "SUCCESS" + ELSE "NOT_ENOUGH_TRUST" + +(* + Initial states of the light client. + Initially, only the trusted light block is present. + *) +LCInit == + /\ state = "working" + /\ nextHeight = TARGET_HEIGHT + /\ nprobes = 0 \* no tests have been done so far + /\ LET trustedBlock == blockchain[TRUSTED_HEIGHT] + trustedLightBlock == [header |-> trustedBlock, Commits |-> AllNodes] + IN + \* initially, fetchedLightBlocks is a function of one element, i.e., TRUSTED_HEIGHT + /\ fetchedLightBlocks = [h \in {TRUSTED_HEIGHT} |-> trustedLightBlock] + \* initially, lightBlockStatus is a function of one element, i.e., TRUSTED_HEIGHT + /\ lightBlockStatus = [h \in {TRUSTED_HEIGHT} |-> "StateVerified"] + \* the latest verified block the the trusted block + /\ latestVerified = trustedLightBlock + /\ InitMonitor(trustedLightBlock, trustedLightBlock, now, "SUCCESS") + +\* block should contain a copy of the block from the reference chain, with a matching commit +CopyLightBlockFromChain(block, height) == + LET ref == blockchain[height] + lastCommit == + IF height < ULTIMATE_HEIGHT + THEN blockchain[height + 1].lastCommit + \* for the ultimate block, which we never use, as ULTIMATE_HEIGHT = TARGET_HEIGHT + 1 + ELSE blockchain[height].VS + IN + block = [header |-> ref, Commits |-> lastCommit] + +\* Either the primary is correct and the block comes from the reference chain, +\* or the block is produced by a faulty primary. +\* +\* [LCV-FUNC-FETCH.1::TLA.1] +FetchLightBlockInto(block, height) == + IF IS_PRIMARY_CORRECT + THEN CopyLightBlockFromChain(block, height) + ELSE BC!IsLightBlockAllowedByDigitalSignatures(height, block) + +\* add a block into the light store +\* +\* [LCV-FUNC-UPDATE.1::TLA.1] +LightStoreUpdateBlocks(lightBlocks, block) == + LET ht == block.header.height IN + [h \in DOMAIN lightBlocks \union {ht} |-> + IF h = ht THEN block ELSE lightBlocks[h]] + +\* update the state of a light block +\* +\* [LCV-FUNC-UPDATE.1::TLA.1] +LightStoreUpdateStates(statuses, ht, blockState) == + [h \in DOMAIN statuses \union {ht} |-> + IF h = ht THEN blockState ELSE statuses[h]] + +\* Check, whether newHeight is a possible next height for the light client. +\* +\* [LCV-FUNC-SCHEDULE.1::TLA.1] +CanScheduleTo(newHeight, pLatestVerified, pNextHeight, pTargetHeight) == + LET ht == pLatestVerified.header.height IN + \/ /\ ht = pNextHeight + /\ ht < pTargetHeight + /\ pNextHeight < newHeight + /\ newHeight <= pTargetHeight + \/ /\ ht < pNextHeight + /\ ht < pTargetHeight + /\ ht < newHeight + /\ newHeight < pNextHeight + \/ /\ ht = pTargetHeight + /\ newHeight = pTargetHeight + +\* The loop of VerifyToTarget. +\* +\* [LCV-FUNC-MAIN.1::TLA-LOOP.1] +VerifyToTargetLoop == + \* the loop condition is true + /\ latestVerified.header.height < TARGET_HEIGHT + \* pick a light block, which will be constrained later + /\ \E current \in BC!LightBlocks: + \* Get next LightBlock for verification + /\ IF nextHeight \in DOMAIN fetchedLightBlocks + THEN \* copy the block from the light store + /\ current = fetchedLightBlocks[nextHeight] + /\ UNCHANGED fetchedLightBlocks + ELSE \* retrieve a light block and save it in the light store + /\ FetchLightBlockInto(current, nextHeight) + /\ fetchedLightBlocks' = LightStoreUpdateBlocks(fetchedLightBlocks, current) + \* Record that one more probe has been done (for complexity and model checking) + /\ nprobes' = nprobes + 1 + \* Verify the current block + /\ LET verdict == ValidAndVerified(latestVerified, current) IN + NextMonitor(latestVerified, current, now, verdict) /\ + \* Decide whether/how to continue + CASE verdict = "SUCCESS" -> + /\ lightBlockStatus' = LightStoreUpdateStates(lightBlockStatus, nextHeight, "StateVerified") + /\ latestVerified' = current + /\ state' = + IF latestVerified'.header.height < TARGET_HEIGHT + THEN "working" + ELSE "finishedSuccess" + /\ \E newHeight \in HEIGHTS: + /\ CanScheduleTo(newHeight, current, nextHeight, TARGET_HEIGHT) + /\ nextHeight' = newHeight + + [] verdict = "NOT_ENOUGH_TRUST" -> + (* + do nothing: the light block current passed validation, but the validator + set is too different to verify it. We keep the state of + current at StateUnverified. For a later iteration, Schedule + might decide to try verification of that light block again. + *) + /\ lightBlockStatus' = LightStoreUpdateStates(lightBlockStatus, nextHeight, "StateUnverified") + /\ \E newHeight \in HEIGHTS: + /\ CanScheduleTo(newHeight, latestVerified, nextHeight, TARGET_HEIGHT) + /\ nextHeight' = newHeight + /\ UNCHANGED <> + + [] OTHER -> + \* verdict is some error code + /\ lightBlockStatus' = LightStoreUpdateStates(lightBlockStatus, nextHeight, "StateFailed") + /\ state' = "finishedFailure" + /\ UNCHANGED <> + +\* The terminating condition of VerifyToTarget. +\* +\* [LCV-FUNC-MAIN.1::TLA-LOOPCOND.1] +VerifyToTargetDone == + /\ latestVerified.header.height >= TARGET_HEIGHT + /\ state' = "finishedSuccess" + /\ UNCHANGED <> + /\ UNCHANGED <> + +(********************* Lite client + Blockchain *******************) +Init == + \* the blockchain is initialized immediately to the ULTIMATE_HEIGHT + /\ BC!InitToHeight + \* the light client starts + /\ LCInit + +(* + The system step is very simple. + The light client is either executing VerifyToTarget, or it has terminated. + (In the latter case, a model checker reports a deadlock.) + Simultaneously, the global clock may advance. + *) +Next == + /\ state = "working" + /\ VerifyToTargetLoop \/ VerifyToTargetDone + /\ BC!AdvanceTime \* the global clock is advanced by zero or more time units + +(************************* Types ******************************************) +TypeOK == + /\ state \in States + /\ nextHeight \in HEIGHTS + /\ latestVerified \in BC!LightBlocks + /\ \E HS \in SUBSET HEIGHTS: + /\ fetchedLightBlocks \in [HS -> BC!LightBlocks] + /\ lightBlockStatus + \in [HS -> {"StateVerified", "StateUnverified", "StateFailed"}] + +(************************* Properties ******************************************) + +(* The properties to check *) +\* this invariant candidate is false +NeverFinish == + state = "working" + +\* this invariant candidate is false +NeverFinishNegative == + state /= "finishedFailure" + +\* This invariant holds true, when the primary is correct. +\* This invariant candidate is false when the primary is faulty. +NeverFinishNegativeWhenTrusted == + (*(minTrustedHeight <= TRUSTED_HEIGHT)*) + BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) + => state /= "finishedFailure" + +\* this invariant candidate is false +NeverFinishPositive == + state /= "finishedSuccess" + +(** + Correctness states that all the obtained headers are exactly like in the blockchain. + + It is always the case that every verified header in LightStore was generated by + an instance of Tendermint consensus. + + [LCV-DIST-SAFE.1::CORRECTNESS-INV.1] + *) +CorrectnessInv == + \A h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] = "StateVerified" => + fetchedLightBlocks[h].header = blockchain[h] + +(** + Check that the sequence of the headers in storedLightBlocks satisfies ValidAndVerified = "SUCCESS" pairwise + This property is easily violated, whenever a header cannot be trusted anymore. + *) +StoredHeadersAreVerifiedInv == + state = "finishedSuccess" + => + \A lh, rh \in DOMAIN fetchedLightBlocks: \* for every pair of different stored headers + \/ lh >= rh + \* either there is a header between them + \/ \E mh \in DOMAIN fetchedLightBlocks: + lh < mh /\ mh < rh + \* or we can verify the right one using the left one + \/ "SUCCESS" = ValidAndVerified(fetchedLightBlocks[lh], fetchedLightBlocks[rh]) + +\* An improved version of StoredHeadersAreSound, assuming that a header may be not trusted. +\* This invariant candidate is also violated, +\* as there may be some unverified blocks left in the middle. +StoredHeadersAreVerifiedOrNotTrustedInv == + state = "finishedSuccess" + => + \A lh, rh \in DOMAIN fetchedLightBlocks: \* for every pair of different stored headers + \/ lh >= rh + \* either there is a header between them + \/ \E mh \in DOMAIN fetchedLightBlocks: + lh < mh /\ mh < rh + \* or we can verify the right one using the left one + \/ "SUCCESS" = ValidAndVerified(fetchedLightBlocks[lh], fetchedLightBlocks[rh]) + \* or the left header is outside the trusting period, so no guarantees + \/ ~BC!InTrustingPeriod(fetchedLightBlocks[lh].header) + +(** + * An improved version of StoredHeadersAreSoundOrNotTrusted, + * checking the property only for the verified headers. + * This invariant holds true. + *) +ProofOfChainOfTrustInv == + state = "finishedSuccess" + => + \A lh, rh \in DOMAIN fetchedLightBlocks: + \* for every pair of stored headers that have been verified + \/ lh >= rh + \/ lightBlockStatus[lh] = "StateUnverified" + \/ lightBlockStatus[rh] = "StateUnverified" + \* either there is a header between them + \/ \E mh \in DOMAIN fetchedLightBlocks: + lh < mh /\ mh < rh /\ lightBlockStatus[mh] = "StateVerified" + \* or the left header is outside the trusting period, so no guarantees + \/ ~(BC!InTrustingPeriod(fetchedLightBlocks[lh].header)) + \* or we can verify the right one using the left one + \/ "SUCCESS" = ValidAndVerified(fetchedLightBlocks[lh], fetchedLightBlocks[rh]) + +(** + * When the light client terminates, there are no failed blocks. (Otherwise, someone lied to us.) + *) +NoFailedBlocksOnSuccessInv == + state = "finishedSuccess" => + \A h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] /= "StateFailed" + +\* This property states that whenever the light client finishes with a positive outcome, +\* the trusted header is still within the trusting period. +\* We expect this property to be violated. And Apalache shows us a counterexample. +PositiveBeforeTrustedHeaderExpires == + (state = "finishedSuccess") => BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) + +\* If the primary is correct and the initial trusted block has not expired, +\* then whenever the algorithm terminates, it reports "success" +CorrectPrimaryAndTimeliness == + (BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) + /\ state /= "working" /\ IS_PRIMARY_CORRECT) => + state = "finishedSuccess" + +(** + If the primary is correct and there is a trusted block that has not expired, + then whenever the algorithm terminates, it reports "success". + + [LCV-DIST-LIVE.1::SUCCESS-CORR-PRIMARY-CHAIN-OF-TRUST.1] + *) +SuccessOnCorrectPrimaryAndChainOfTrust == + (\E h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] = "StateVerified" /\ BC!InTrustingPeriod(blockchain[h]) + /\ state /= "working" /\ IS_PRIMARY_CORRECT) => + state = "finishedSuccess" + +\* Lite Client Completeness: If header h was correctly generated by an instance +\* of Tendermint consensus (and its age is less than the trusting period), +\* then the lite client should eventually set trust(h) to true. +\* +\* Note that Completeness assumes that the lite client communicates with a correct full node. +\* +\* We decompose completeness into Termination (liveness) and Precision (safety). +\* Once again, Precision is an inverse version of the safety property in Completeness, +\* as A => B is logically equivalent to ~B => ~A. +PrecisionInv == + (state = "finishedFailure") + => \/ ~BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) \* outside of the trusting period + \/ \E h \in DOMAIN fetchedLightBlocks: + LET lightBlock == fetchedLightBlocks[h] IN + \* the full node lied to the lite client about the block header + \/ lightBlock.header /= blockchain[h] + \* the full node lied to the lite client about the commits + \/ lightBlock.Commits /= lightBlock.header.VS + +\* the old invariant that was found to be buggy by TLC +PrecisionBuggyInv == + (state = "finishedFailure") + => \/ ~BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) \* outside of the trusting period + \/ \E h \in DOMAIN fetchedLightBlocks: + LET lightBlock == fetchedLightBlocks[h] IN + \* the full node lied to the lite client about the block header + lightBlock.header /= blockchain[h] + +\* the worst complexity +Complexity == + LET N == TARGET_HEIGHT - TRUSTED_HEIGHT + 1 IN + state /= "working" => + (2 * nprobes <= N * (N - 1)) + +(* + We omit termination, as the algorithm deadlocks in the end. + So termination can be demonstrated by finding a deadlock. + Of course, one has to analyze the deadlocked state and see that + the algorithm has indeed terminated there. +*) +============================================================================= +\* Modification History +\* Last modified Fri Jun 26 12:08:28 CEST 2020 by igor +\* Created Wed Oct 02 16:39:42 CEST 2019 by igor diff --git a/spec/light-client/verification/Lightclient_003_draft.tla b/spec/light-client/verification/Lightclient_003_draft.tla new file mode 100644 index 0000000000..e17a88491b --- /dev/null +++ b/spec/light-client/verification/Lightclient_003_draft.tla @@ -0,0 +1,493 @@ +-------------------------- MODULE Lightclient_003_draft ---------------------------- +(** + * A state-machine specification of the lite client verification, + * following the English spec: + * + * https://github.com/informalsystems/tendermint-rs/blob/master/docs/spec/lightclient/verification.md + *) + +EXTENDS Integers, FiniteSets + +\* the parameters of Light Client +CONSTANTS + TRUSTED_HEIGHT, + (* an index of the block header that the light client trusts by social consensus *) + TARGET_HEIGHT, + (* an index of the block header that the light client tries to verify *) + TRUSTING_PERIOD, + (* the period within which the validators are trusted *) + CLOCK_DRIFT, + (* the assumed precision of the clock *) + REAL_CLOCK_DRIFT, + (* the actual clock drift, which under normal circumstances should not + be larger than CLOCK_DRIFT (otherwise, there will be a bug) *) + IS_PRIMARY_CORRECT, + (* is primary correct? *) + FAULTY_RATIO + (* a pair <> that limits that ratio of faulty validator in the blockchain + from above (exclusive). Tendermint security model prescribes 1 / 3. *) + +VARIABLES (* see TypeOK below for the variable types *) + localClock, (* the local clock of the light client *) + state, (* the current state of the light client *) + nextHeight, (* the next height to explore by the light client *) + nprobes (* the lite client iteration, or the number of block tests *) + +(* the light store *) +VARIABLES + fetchedLightBlocks, (* a function from heights to LightBlocks *) + lightBlockStatus, (* a function from heights to block statuses *) + latestVerified (* the latest verified block *) + +(* the variables of the lite client *) +lcvars == <> + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +InitMonitor(verified, current, pLocalClock, verdict) == + /\ prevVerified = verified + /\ prevCurrent = current + /\ prevLocalClock = pLocalClock + /\ prevVerdict = verdict + +NextMonitor(verified, current, pLocalClock, verdict) == + /\ prevVerified' = verified + /\ prevCurrent' = current + /\ prevLocalClock' = pLocalClock + /\ prevVerdict' = verdict + + +(******************* Blockchain instance ***********************************) + +\* the parameters that are propagated into Blockchain +CONSTANTS + AllNodes + (* a set of all nodes that can act as validators (correct and faulty) *) + +\* the state variables of Blockchain, see Blockchain.tla for the details +VARIABLES refClock, blockchain, Faulty + +\* All the variables of Blockchain. For some reason, BC!vars does not work +bcvars == <> + +(* Create an instance of Blockchain. + We could write EXTENDS Blockchain, but then all the constants and state variables + would be hidden inside the Blockchain module. + *) +ULTIMATE_HEIGHT == TARGET_HEIGHT + 1 + +BC == INSTANCE Blockchain_003_draft WITH + refClock <- refClock, blockchain <- blockchain, Faulty <- Faulty + +(************************** Lite client ************************************) + +(* the heights on which the light client is working *) +HEIGHTS == TRUSTED_HEIGHT..TARGET_HEIGHT + +(* the control states of the lite client *) +States == { "working", "finishedSuccess", "finishedFailure" } + +\* The verification functions are implemented in the API +API == INSTANCE LCVerificationApi_003_draft + + +(* + Initial states of the light client. + Initially, only the trusted light block is present. + *) +LCInit == + /\ \E tm \in Int: + tm >= 0 /\ API!IsLocalClockWithinDrift(tm, refClock) /\ localClock = tm + /\ state = "working" + /\ nextHeight = TARGET_HEIGHT + /\ nprobes = 0 \* no tests have been done so far + /\ LET trustedBlock == blockchain[TRUSTED_HEIGHT] + trustedLightBlock == [header |-> trustedBlock, Commits |-> AllNodes] + IN + \* initially, fetchedLightBlocks is a function of one element, i.e., TRUSTED_HEIGHT + /\ fetchedLightBlocks = [h \in {TRUSTED_HEIGHT} |-> trustedLightBlock] + \* initially, lightBlockStatus is a function of one element, i.e., TRUSTED_HEIGHT + /\ lightBlockStatus = [h \in {TRUSTED_HEIGHT} |-> "StateVerified"] + \* the latest verified block the the trusted block + /\ latestVerified = trustedLightBlock + /\ InitMonitor(trustedLightBlock, trustedLightBlock, localClock, "SUCCESS") + +\* block should contain a copy of the block from the reference chain, with a matching commit +CopyLightBlockFromChain(block, height) == + LET ref == blockchain[height] + lastCommit == + IF height < ULTIMATE_HEIGHT + THEN blockchain[height + 1].lastCommit + \* for the ultimate block, which we never use, as ULTIMATE_HEIGHT = TARGET_HEIGHT + 1 + ELSE blockchain[height].VS + IN + block = [header |-> ref, Commits |-> lastCommit] + +\* Either the primary is correct and the block comes from the reference chain, +\* or the block is produced by a faulty primary. +\* +\* [LCV-FUNC-FETCH.1::TLA.1] +FetchLightBlockInto(block, height) == + IF IS_PRIMARY_CORRECT + THEN CopyLightBlockFromChain(block, height) + ELSE BC!IsLightBlockAllowedByDigitalSignatures(height, block) + +\* add a block into the light store +\* +\* [LCV-FUNC-UPDATE.1::TLA.1] +LightStoreUpdateBlocks(lightBlocks, block) == + LET ht == block.header.height IN + [h \in DOMAIN lightBlocks \union {ht} |-> + IF h = ht THEN block ELSE lightBlocks[h]] + +\* update the state of a light block +\* +\* [LCV-FUNC-UPDATE.1::TLA.1] +LightStoreUpdateStates(statuses, ht, blockState) == + [h \in DOMAIN statuses \union {ht} |-> + IF h = ht THEN blockState ELSE statuses[h]] + +\* Check, whether newHeight is a possible next height for the light client. +\* +\* [LCV-FUNC-SCHEDULE.1::TLA.1] +CanScheduleTo(newHeight, pLatestVerified, pNextHeight, pTargetHeight) == + LET ht == pLatestVerified.header.height IN + \/ /\ ht = pNextHeight + /\ ht < pTargetHeight + /\ pNextHeight < newHeight + /\ newHeight <= pTargetHeight + \/ /\ ht < pNextHeight + /\ ht < pTargetHeight + /\ ht < newHeight + /\ newHeight < pNextHeight + \/ /\ ht = pTargetHeight + /\ newHeight = pTargetHeight + +\* The loop of VerifyToTarget. +\* +\* [LCV-FUNC-MAIN.1::TLA-LOOP.1] +VerifyToTargetLoop == + \* the loop condition is true + /\ latestVerified.header.height < TARGET_HEIGHT + \* pick a light block, which will be constrained later + /\ \E current \in BC!LightBlocks: + \* Get next LightBlock for verification + /\ IF nextHeight \in DOMAIN fetchedLightBlocks + THEN \* copy the block from the light store + /\ current = fetchedLightBlocks[nextHeight] + /\ UNCHANGED fetchedLightBlocks + ELSE \* retrieve a light block and save it in the light store + /\ FetchLightBlockInto(current, nextHeight) + /\ fetchedLightBlocks' = LightStoreUpdateBlocks(fetchedLightBlocks, current) + \* Record that one more probe has been done (for complexity and model checking) + /\ nprobes' = nprobes + 1 + \* Verify the current block + /\ LET verdict == API!ValidAndVerified(latestVerified, current, TRUE) IN + NextMonitor(latestVerified, current, localClock, verdict) /\ + \* Decide whether/how to continue + CASE verdict = "SUCCESS" -> + /\ lightBlockStatus' = LightStoreUpdateStates(lightBlockStatus, nextHeight, "StateVerified") + /\ latestVerified' = current + /\ state' = + IF latestVerified'.header.height < TARGET_HEIGHT + THEN "working" + ELSE "finishedSuccess" + /\ \E newHeight \in HEIGHTS: + /\ CanScheduleTo(newHeight, current, nextHeight, TARGET_HEIGHT) + /\ nextHeight' = newHeight + + [] verdict = "NOT_ENOUGH_TRUST" -> + (* + do nothing: the light block current passed validation, but the validator + set is too different to verify it. We keep the state of + current at StateUnverified. For a later iteration, Schedule + might decide to try verification of that light block again. + *) + /\ lightBlockStatus' = LightStoreUpdateStates(lightBlockStatus, nextHeight, "StateUnverified") + /\ \E newHeight \in HEIGHTS: + /\ CanScheduleTo(newHeight, latestVerified, nextHeight, TARGET_HEIGHT) + /\ nextHeight' = newHeight + /\ UNCHANGED <> + + [] OTHER -> + \* verdict is some error code + /\ lightBlockStatus' = LightStoreUpdateStates(lightBlockStatus, nextHeight, "StateFailed") + /\ state' = "finishedFailure" + /\ UNCHANGED <> + +\* The terminating condition of VerifyToTarget. +\* +\* [LCV-FUNC-MAIN.1::TLA-LOOPCOND.1] +VerifyToTargetDone == + /\ latestVerified.header.height >= TARGET_HEIGHT + /\ state' = "finishedSuccess" + /\ UNCHANGED <> + /\ UNCHANGED <> + +(* + The local and global clocks can be updated. They can also drift from each other. + Note that the local clock can actually go backwards in time. + However, it still stays in the drift envelope + of [refClock - REAL_CLOCK_DRIFT, refClock + REAL_CLOCK_DRIFT]. + *) +AdvanceClocks == + /\ BC!AdvanceTime + /\ \E tm \in Int: + /\ tm >= 0 + /\ API!IsLocalClockWithinDrift(tm, refClock') + /\ localClock' = tm + \* if you like the clock to always grow monotonically, uncomment the next line: + \*/\ localClock' > localClock + +(********************* Lite client + Blockchain *******************) +Init == + \* the blockchain is initialized immediately to the ULTIMATE_HEIGHT + /\ BC!InitToHeight(FAULTY_RATIO) + \* the light client starts + /\ LCInit + +(* + The system step is very simple. + The light client is either executing VerifyToTarget, or it has terminated. + (In the latter case, a model checker reports a deadlock.) + Simultaneously, the global clock may advance. + *) +Next == + /\ state = "working" + /\ VerifyToTargetLoop \/ VerifyToTargetDone + /\ AdvanceClocks + +(************************* Types ******************************************) +TypeOK == + /\ state \in States + /\ localClock \in Nat + /\ refClock \in Nat + /\ nextHeight \in HEIGHTS + /\ latestVerified \in BC!LightBlocks + /\ \E HS \in SUBSET HEIGHTS: + /\ fetchedLightBlocks \in [HS -> BC!LightBlocks] + /\ lightBlockStatus + \in [HS -> {"StateVerified", "StateUnverified", "StateFailed"}] + +(************************* Properties ******************************************) + +(* The properties to check *) +\* this invariant candidate is false +NeverFinish == + state = "working" + +\* this invariant candidate is false +NeverFinishNegative == + state /= "finishedFailure" + +\* This invariant holds true, when the primary is correct. +\* This invariant candidate is false when the primary is faulty. +NeverFinishNegativeWhenTrusted == + BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) + => state /= "finishedFailure" + +\* this invariant candidate is false +NeverFinishPositive == + state /= "finishedSuccess" + + +(** + Check that the target height has been reached upon successful termination. + *) +TargetHeightOnSuccessInv == + state = "finishedSuccess" => + /\ TARGET_HEIGHT \in DOMAIN fetchedLightBlocks + /\ lightBlockStatus[TARGET_HEIGHT] = "StateVerified" + +(** + Correctness states that all the obtained headers are exactly like in the blockchain. + + It is always the case that every verified header in LightStore was generated by + an instance of Tendermint consensus. + + [LCV-DIST-SAFE.1::CORRECTNESS-INV.1] + *) +CorrectnessInv == + \A h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] = "StateVerified" => + fetchedLightBlocks[h].header = blockchain[h] + +(** + No faulty block was used to construct a proof. This invariant holds, + only if FAULTY_RATIO < 1/3. + *) +NoTrustOnFaultyBlockInv == + (state = "finishedSuccess" + /\ fetchedLightBlocks[TARGET_HEIGHT].header = blockchain[TARGET_HEIGHT]) + => CorrectnessInv + +(** + Check that the sequence of the headers in storedLightBlocks satisfies ValidAndVerified = "SUCCESS" pairwise + This property is easily violated, whenever a header cannot be trusted anymore. + *) +StoredHeadersAreVerifiedInv == + state = "finishedSuccess" + => + \A lh, rh \in DOMAIN fetchedLightBlocks: \* for every pair of different stored headers + \/ lh >= rh + \* either there is a header between them + \/ \E mh \in DOMAIN fetchedLightBlocks: + lh < mh /\ mh < rh + \* or we can verify the right one using the left one + \/ "SUCCESS" = API!ValidAndVerified(fetchedLightBlocks[lh], + fetchedLightBlocks[rh], FALSE) + +\* An improved version of StoredHeadersAreVerifiedInv, +\* assuming that a header may be not trusted. +\* This invariant candidate is also violated, +\* as there may be some unverified blocks left in the middle. +\* This property is violated under two conditions: +\* (1) the primary is faulty and there are at least 4 blocks, +\* (2) the primary is correct and there are at least 5 blocks. +StoredHeadersAreVerifiedOrNotTrustedInv == + state = "finishedSuccess" + => + \A lh, rh \in DOMAIN fetchedLightBlocks: \* for every pair of different stored headers + \/ lh >= rh + \* either there is a header between them + \/ \E mh \in DOMAIN fetchedLightBlocks: + lh < mh /\ mh < rh + \* or we can verify the right one using the left one + \/ "SUCCESS" = API!ValidAndVerified(fetchedLightBlocks[lh], + fetchedLightBlocks[rh], FALSE) + \* or the left header is outside the trusting period, so no guarantees + \/ ~API!InTrustingPeriodLocal(fetchedLightBlocks[lh].header) + +(** + * An improved version of StoredHeadersAreSoundOrNotTrusted, + * checking the property only for the verified headers. + * This invariant holds true if CLOCK_DRIFT <= REAL_CLOCK_DRIFT. + *) +ProofOfChainOfTrustInv == + state = "finishedSuccess" + => + \A lh, rh \in DOMAIN fetchedLightBlocks: + \* for every pair of stored headers that have been verified + \/ lh >= rh + \/ lightBlockStatus[lh] = "StateUnverified" + \/ lightBlockStatus[rh] = "StateUnverified" + \* either there is a header between them + \/ \E mh \in DOMAIN fetchedLightBlocks: + lh < mh /\ mh < rh /\ lightBlockStatus[mh] = "StateVerified" + \* or the left header is outside the trusting period, so no guarantees + \/ ~(API!InTrustingPeriodLocal(fetchedLightBlocks[lh].header)) + \* or we can verify the right one using the left one + \/ "SUCCESS" = API!ValidAndVerified(fetchedLightBlocks[lh], + fetchedLightBlocks[rh], FALSE) + +(** + * When the light client terminates, there are no failed blocks. (Otherwise, someone lied to us.) + *) +NoFailedBlocksOnSuccessInv == + state = "finishedSuccess" => + \A h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] /= "StateFailed" + +\* This property states that whenever the light client finishes with a positive outcome, +\* the trusted header is still within the trusting period. +\* We expect this property to be violated. And Apalache shows us a counterexample. +PositiveBeforeTrustedHeaderExpires == + (state = "finishedSuccess") => + BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) + +\* If the primary is correct and the initial trusted block has not expired, +\* then whenever the algorithm terminates, it reports "success". +\* This property fails. +CorrectPrimaryAndTimeliness == + (BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) + /\ state /= "working" /\ IS_PRIMARY_CORRECT) => + state = "finishedSuccess" + +(** + If the primary is correct and there is a trusted block that has not expired, + then whenever the algorithm terminates, it reports "success". + This property only holds true, if the local clock is always growing monotonically. + If the local clock can go backwards in the envelope + [refClock - CLOCK_DRIFT, refClock + CLOCK_DRIFT], then the property fails. + + [LCV-DIST-LIVE.1::SUCCESS-CORR-PRIMARY-CHAIN-OF-TRUST.1] + *) +SuccessOnCorrectPrimaryAndChainOfTrustLocal == + (\E h \in DOMAIN fetchedLightBlocks: + /\ lightBlockStatus[h] = "StateVerified" + /\ API!InTrustingPeriodLocal(blockchain[h]) + /\ state /= "working" /\ IS_PRIMARY_CORRECT) => + state = "finishedSuccess" + +(** + Similar to SuccessOnCorrectPrimaryAndChainOfTrust, but using the blockchain clock. + It fails because the local clock of the client drifted away, so it rejects a block + that has not expired yet (according to the local clock). + *) +SuccessOnCorrectPrimaryAndChainOfTrustGlobal == + (\E h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] = "StateVerified" /\ BC!InTrustingPeriod(blockchain[h]) + /\ state /= "working" /\ IS_PRIMARY_CORRECT) => + state = "finishedSuccess" + +\* Lite Client Completeness: If header h was correctly generated by an instance +\* of Tendermint consensus (and its age is less than the trusting period), +\* then the lite client should eventually set trust(h) to true. +\* +\* Note that Completeness assumes that the lite client communicates with a correct full node. +\* +\* We decompose completeness into Termination (liveness) and Precision (safety). +\* Once again, Precision is an inverse version of the safety property in Completeness, +\* as A => B is logically equivalent to ~B => ~A. +\* +\* This property holds only when CLOCK_DRIFT = 0 and REAL_CLOCK_DRIFT = 0. +PrecisionInv == + (state = "finishedFailure") + => \/ ~BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) \* outside of the trusting period + \/ \E h \in DOMAIN fetchedLightBlocks: + LET lightBlock == fetchedLightBlocks[h] IN + \* the full node lied to the lite client about the block header + \/ lightBlock.header /= blockchain[h] + \* the full node lied to the lite client about the commits + \/ lightBlock.Commits /= lightBlock.header.VS + +\* the old invariant that was found to be buggy by TLC +PrecisionBuggyInv == + (state = "finishedFailure") + => \/ ~BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) \* outside of the trusting period + \/ \E h \in DOMAIN fetchedLightBlocks: + LET lightBlock == fetchedLightBlocks[h] IN + \* the full node lied to the lite client about the block header + lightBlock.header /= blockchain[h] + +\* the worst complexity +Complexity == + LET N == TARGET_HEIGHT - TRUSTED_HEIGHT + 1 IN + state /= "working" => + (2 * nprobes <= N * (N - 1)) + +(** + If the light client has terminated, then the expected postcondition holds true. + *) +ApiPostInv == + state /= "working" => + API!VerifyToTargetPost(blockchain, IS_PRIMARY_CORRECT, + fetchedLightBlocks, lightBlockStatus, + TRUSTED_HEIGHT, TARGET_HEIGHT, state) + +(* + We omit termination, as the algorithm deadlocks in the end. + So termination can be demonstrated by finding a deadlock. + Of course, one has to analyze the deadlocked state and see that + the algorithm has indeed terminated there. +*) +============================================================================= +\* Modification History +\* Last modified Fri Jun 26 12:08:28 CEST 2020 by igor +\* Created Wed Oct 02 16:39:42 CEST 2019 by igor diff --git a/spec/light-client/verification/Lightclient_A_1.tla b/spec/light-client/verification/Lightclient_A_1.tla new file mode 100644 index 0000000000..70e6caf002 --- /dev/null +++ b/spec/light-client/verification/Lightclient_A_1.tla @@ -0,0 +1,440 @@ +-------------------------- MODULE Lightclient_A_1 ---------------------------- +(** + * A state-machine specification of the lite client, following the English spec: + * + * ./verification_001_published.md + *) + +EXTENDS Integers, FiniteSets + +\* the parameters of Light Client +CONSTANTS + TRUSTED_HEIGHT, + (* an index of the block header that the light client trusts by social consensus *) + TARGET_HEIGHT, + (* an index of the block header that the light client tries to verify *) + TRUSTING_PERIOD, + (* the period within which the validators are trusted *) + IS_PRIMARY_CORRECT + (* is primary correct? *) + +VARIABLES (* see TypeOK below for the variable types *) + state, (* the current state of the light client *) + nextHeight, (* the next height to explore by the light client *) + nprobes (* the lite client iteration, or the number of block tests *) + +(* the light store *) +VARIABLES + fetchedLightBlocks, (* a function from heights to LightBlocks *) + lightBlockStatus, (* a function from heights to block statuses *) + latestVerified (* the latest verified block *) + +(* the variables of the lite client *) +lcvars == <> + +(******************* Blockchain instance ***********************************) + +\* the parameters that are propagated into Blockchain +CONSTANTS + AllNodes + (* a set of all nodes that can act as validators (correct and faulty) *) + +\* the state variables of Blockchain, see Blockchain.tla for the details +VARIABLES now, blockchain, Faulty + +\* All the variables of Blockchain. For some reason, BC!vars does not work +bcvars == <> + +(* Create an instance of Blockchain. + We could write EXTENDS Blockchain, but then all the constants and state variables + would be hidden inside the Blockchain module. + *) +ULTIMATE_HEIGHT == TARGET_HEIGHT + 1 + +BC == INSTANCE Blockchain_A_1 WITH + now <- now, blockchain <- blockchain, Faulty <- Faulty + +(************************** Lite client ************************************) + +(* the heights on which the light client is working *) +HEIGHTS == TRUSTED_HEIGHT..TARGET_HEIGHT + +(* the control states of the lite client *) +States == { "working", "finishedSuccess", "finishedFailure" } + +(** + Check the precondition of ValidAndVerified. + + [LCV-FUNC-VALID.1::TLA-PRE.1] + *) +ValidAndVerifiedPre(trusted, untrusted) == + LET thdr == trusted.header + uhdr == untrusted.header + IN + /\ BC!InTrustingPeriod(thdr) + /\ thdr.height < uhdr.height + \* the trusted block has been created earlier (no drift here) + /\ thdr.time <= uhdr.time + /\ untrusted.Commits \subseteq uhdr.VS + /\ LET TP == Cardinality(uhdr.VS) + SP == Cardinality(untrusted.Commits) + IN + 3 * SP > 2 * TP + /\ thdr.height + 1 = uhdr.height => thdr.NextVS = uhdr.VS + (* As we do not have explicit hashes we ignore these three checks of the English spec: + + 1. "trusted.Commit is a commit is for the header trusted.Header, + i.e. it contains the correct hash of the header". + 2. untrusted.Validators = hash(untrusted.Header.Validators) + 3. untrusted.NextValidators = hash(untrusted.Header.NextValidators) + *) + +(** + * Check that the commits in an untrusted block form 1/3 of the next validators + * in a trusted header. + *) +SignedByOneThirdOfTrusted(trusted, untrusted) == + LET TP == Cardinality(trusted.header.NextVS) + SP == Cardinality(untrusted.Commits \intersect trusted.header.NextVS) + IN + 3 * SP > TP + +(** + Check, whether an untrusted block is valid and verifiable w.r.t. a trusted header. + + [LCV-FUNC-VALID.1::TLA.1] + *) +ValidAndVerified(trusted, untrusted) == + IF ~ValidAndVerifiedPre(trusted, untrusted) + THEN "FAILED_VERIFICATION" + ELSE IF ~BC!InTrustingPeriod(untrusted.header) + (* We leave the following test for the documentation purposes. + The implementation should do this test, as signature verification may be slow. + In the TLA+ specification, ValidAndVerified happens in no time. + *) + THEN "FAILED_TRUSTING_PERIOD" + ELSE IF untrusted.header.height = trusted.header.height + 1 + \/ SignedByOneThirdOfTrusted(trusted, untrusted) + THEN "OK" + ELSE "CANNOT_VERIFY" + +(* + Initial states of the light client. + Initially, only the trusted light block is present. + *) +LCInit == + /\ state = "working" + /\ nextHeight = TARGET_HEIGHT + /\ nprobes = 0 \* no tests have been done so far + /\ LET trustedBlock == blockchain[TRUSTED_HEIGHT] + trustedLightBlock == [header |-> trustedBlock, Commits |-> AllNodes] + IN + \* initially, fetchedLightBlocks is a function of one element, i.e., TRUSTED_HEIGHT + /\ fetchedLightBlocks = [h \in {TRUSTED_HEIGHT} |-> trustedLightBlock] + \* initially, lightBlockStatus is a function of one element, i.e., TRUSTED_HEIGHT + /\ lightBlockStatus = [h \in {TRUSTED_HEIGHT} |-> "StateVerified"] + \* the latest verified block the the trusted block + /\ latestVerified = trustedLightBlock + +\* block should contain a copy of the block from the reference chain, with a matching commit +CopyLightBlockFromChain(block, height) == + LET ref == blockchain[height] + lastCommit == + IF height < ULTIMATE_HEIGHT + THEN blockchain[height + 1].lastCommit + \* for the ultimate block, which we never use, as ULTIMATE_HEIGHT = TARGET_HEIGHT + 1 + ELSE blockchain[height].VS + IN + block = [header |-> ref, Commits |-> lastCommit] + +\* Either the primary is correct and the block comes from the reference chain, +\* or the block is produced by a faulty primary. +\* +\* [LCV-FUNC-FETCH.1::TLA.1] +FetchLightBlockInto(block, height) == + IF IS_PRIMARY_CORRECT + THEN CopyLightBlockFromChain(block, height) + ELSE BC!IsLightBlockAllowedByDigitalSignatures(height, block) + +\* add a block into the light store +\* +\* [LCV-FUNC-UPDATE.1::TLA.1] +LightStoreUpdateBlocks(lightBlocks, block) == + LET ht == block.header.height IN + [h \in DOMAIN lightBlocks \union {ht} |-> + IF h = ht THEN block ELSE lightBlocks[h]] + +\* update the state of a light block +\* +\* [LCV-FUNC-UPDATE.1::TLA.1] +LightStoreUpdateStates(statuses, ht, blockState) == + [h \in DOMAIN statuses \union {ht} |-> + IF h = ht THEN blockState ELSE statuses[h]] + +\* Check, whether newHeight is a possible next height for the light client. +\* +\* [LCV-FUNC-SCHEDULE.1::TLA.1] +CanScheduleTo(newHeight, pLatestVerified, pNextHeight, pTargetHeight) == + LET ht == pLatestVerified.header.height IN + \/ /\ ht = pNextHeight + /\ ht < pTargetHeight + /\ pNextHeight < newHeight + /\ newHeight <= pTargetHeight + \/ /\ ht < pNextHeight + /\ ht < pTargetHeight + /\ ht < newHeight + /\ newHeight < pNextHeight + \/ /\ ht = pTargetHeight + /\ newHeight = pTargetHeight + +\* The loop of VerifyToTarget. +\* +\* [LCV-FUNC-MAIN.1::TLA-LOOP.1] +VerifyToTargetLoop == + \* the loop condition is true + /\ latestVerified.header.height < TARGET_HEIGHT + \* pick a light block, which will be constrained later + /\ \E current \in BC!LightBlocks: + \* Get next LightBlock for verification + /\ IF nextHeight \in DOMAIN fetchedLightBlocks + THEN \* copy the block from the light store + /\ current = fetchedLightBlocks[nextHeight] + /\ UNCHANGED fetchedLightBlocks + ELSE \* retrieve a light block and save it in the light store + /\ FetchLightBlockInto(current, nextHeight) + /\ fetchedLightBlocks' = LightStoreUpdateBlocks(fetchedLightBlocks, current) + \* Record that one more probe has been done (for complexity and model checking) + /\ nprobes' = nprobes + 1 + \* Verify the current block + /\ LET verdict == ValidAndVerified(latestVerified, current) IN + \* Decide whether/how to continue + CASE verdict = "OK" -> + /\ lightBlockStatus' = LightStoreUpdateStates(lightBlockStatus, nextHeight, "StateVerified") + /\ latestVerified' = current + /\ state' = + IF latestVerified'.header.height < TARGET_HEIGHT + THEN "working" + ELSE "finishedSuccess" + /\ \E newHeight \in HEIGHTS: + /\ CanScheduleTo(newHeight, current, nextHeight, TARGET_HEIGHT) + /\ nextHeight' = newHeight + + [] verdict = "CANNOT_VERIFY" -> + (* + do nothing: the light block current passed validation, but the validator + set is too different to verify it. We keep the state of + current at StateUnverified. For a later iteration, Schedule + might decide to try verification of that light block again. + *) + /\ lightBlockStatus' = LightStoreUpdateStates(lightBlockStatus, nextHeight, "StateUnverified") + /\ \E newHeight \in HEIGHTS: + /\ CanScheduleTo(newHeight, latestVerified, nextHeight, TARGET_HEIGHT) + /\ nextHeight' = newHeight + /\ UNCHANGED <> + + [] OTHER -> + \* verdict is some error code + /\ lightBlockStatus' = LightStoreUpdateStates(lightBlockStatus, nextHeight, "StateFailed") + /\ state' = "finishedFailure" + /\ UNCHANGED <> + +\* The terminating condition of VerifyToTarget. +\* +\* [LCV-FUNC-MAIN.1::TLA-LOOPCOND.1] +VerifyToTargetDone == + /\ latestVerified.header.height >= TARGET_HEIGHT + /\ state' = "finishedSuccess" + /\ UNCHANGED <> + +(********************* Lite client + Blockchain *******************) +Init == + \* the blockchain is initialized immediately to the ULTIMATE_HEIGHT + /\ BC!InitToHeight + \* the light client starts + /\ LCInit + +(* + The system step is very simple. + The light client is either executing VerifyToTarget, or it has terminated. + (In the latter case, a model checker reports a deadlock.) + Simultaneously, the global clock may advance. + *) +Next == + /\ state = "working" + /\ VerifyToTargetLoop \/ VerifyToTargetDone + /\ BC!AdvanceTime \* the global clock is advanced by zero or more time units + +(************************* Types ******************************************) +TypeOK == + /\ state \in States + /\ nextHeight \in HEIGHTS + /\ latestVerified \in BC!LightBlocks + /\ \E HS \in SUBSET HEIGHTS: + /\ fetchedLightBlocks \in [HS -> BC!LightBlocks] + /\ lightBlockStatus + \in [HS -> {"StateVerified", "StateUnverified", "StateFailed"}] + +(************************* Properties ******************************************) + +(* The properties to check *) +\* this invariant candidate is false +NeverFinish == + state = "working" + +\* this invariant candidate is false +NeverFinishNegative == + state /= "finishedFailure" + +\* This invariant holds true, when the primary is correct. +\* This invariant candidate is false when the primary is faulty. +NeverFinishNegativeWhenTrusted == + (*(minTrustedHeight <= TRUSTED_HEIGHT)*) + BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) + => state /= "finishedFailure" + +\* this invariant candidate is false +NeverFinishPositive == + state /= "finishedSuccess" + +(** + Correctness states that all the obtained headers are exactly like in the blockchain. + + It is always the case that every verified header in LightStore was generated by + an instance of Tendermint consensus. + + [LCV-DIST-SAFE.1::CORRECTNESS-INV.1] + *) +CorrectnessInv == + \A h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] = "StateVerified" => + fetchedLightBlocks[h].header = blockchain[h] + +(** + Check that the sequence of the headers in storedLightBlocks satisfies ValidAndVerified = "OK" pairwise + This property is easily violated, whenever a header cannot be trusted anymore. + *) +StoredHeadersAreVerifiedInv == + state = "finishedSuccess" + => + \A lh, rh \in DOMAIN fetchedLightBlocks: \* for every pair of different stored headers + \/ lh >= rh + \* either there is a header between them + \/ \E mh \in DOMAIN fetchedLightBlocks: + lh < mh /\ mh < rh + \* or we can verify the right one using the left one + \/ "OK" = ValidAndVerified(fetchedLightBlocks[lh], fetchedLightBlocks[rh]) + +\* An improved version of StoredHeadersAreSound, assuming that a header may be not trusted. +\* This invariant candidate is also violated, +\* as there may be some unverified blocks left in the middle. +StoredHeadersAreVerifiedOrNotTrustedInv == + state = "finishedSuccess" + => + \A lh, rh \in DOMAIN fetchedLightBlocks: \* for every pair of different stored headers + \/ lh >= rh + \* either there is a header between them + \/ \E mh \in DOMAIN fetchedLightBlocks: + lh < mh /\ mh < rh + \* or we can verify the right one using the left one + \/ "OK" = ValidAndVerified(fetchedLightBlocks[lh], fetchedLightBlocks[rh]) + \* or the left header is outside the trusting period, so no guarantees + \/ ~BC!InTrustingPeriod(fetchedLightBlocks[lh].header) + +(** + * An improved version of StoredHeadersAreSoundOrNotTrusted, + * checking the property only for the verified headers. + * This invariant holds true. + *) +ProofOfChainOfTrustInv == + state = "finishedSuccess" + => + \A lh, rh \in DOMAIN fetchedLightBlocks: + \* for every pair of stored headers that have been verified + \/ lh >= rh + \/ lightBlockStatus[lh] = "StateUnverified" + \/ lightBlockStatus[rh] = "StateUnverified" + \* either there is a header between them + \/ \E mh \in DOMAIN fetchedLightBlocks: + lh < mh /\ mh < rh /\ lightBlockStatus[mh] = "StateVerified" + \* or the left header is outside the trusting period, so no guarantees + \/ ~(BC!InTrustingPeriod(fetchedLightBlocks[lh].header)) + \* or we can verify the right one using the left one + \/ "OK" = ValidAndVerified(fetchedLightBlocks[lh], fetchedLightBlocks[rh]) + +(** + * When the light client terminates, there are no failed blocks. (Otherwise, someone lied to us.) + *) +NoFailedBlocksOnSuccessInv == + state = "finishedSuccess" => + \A h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] /= "StateFailed" + +\* This property states that whenever the light client finishes with a positive outcome, +\* the trusted header is still within the trusting period. +\* We expect this property to be violated. And Apalache shows us a counterexample. +PositiveBeforeTrustedHeaderExpires == + (state = "finishedSuccess") => BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) + +\* If the primary is correct and the initial trusted block has not expired, +\* then whenever the algorithm terminates, it reports "success" +CorrectPrimaryAndTimeliness == + (BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) + /\ state /= "working" /\ IS_PRIMARY_CORRECT) => + state = "finishedSuccess" + +(** + If the primary is correct and there is a trusted block that has not expired, + then whenever the algorithm terminates, it reports "success". + + [LCV-DIST-LIVE.1::SUCCESS-CORR-PRIMARY-CHAIN-OF-TRUST.1] + *) +SuccessOnCorrectPrimaryAndChainOfTrust == + (\E h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] = "StateVerified" /\ BC!InTrustingPeriod(blockchain[h]) + /\ state /= "working" /\ IS_PRIMARY_CORRECT) => + state = "finishedSuccess" + +\* Lite Client Completeness: If header h was correctly generated by an instance +\* of Tendermint consensus (and its age is less than the trusting period), +\* then the lite client should eventually set trust(h) to true. +\* +\* Note that Completeness assumes that the lite client communicates with a correct full node. +\* +\* We decompose completeness into Termination (liveness) and Precision (safety). +\* Once again, Precision is an inverse version of the safety property in Completeness, +\* as A => B is logically equivalent to ~B => ~A. +PrecisionInv == + (state = "finishedFailure") + => \/ ~BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) \* outside of the trusting period + \/ \E h \in DOMAIN fetchedLightBlocks: + LET lightBlock == fetchedLightBlocks[h] IN + \* the full node lied to the lite client about the block header + \/ lightBlock.header /= blockchain[h] + \* the full node lied to the lite client about the commits + \/ lightBlock.Commits /= lightBlock.header.VS + +\* the old invariant that was found to be buggy by TLC +PrecisionBuggyInv == + (state = "finishedFailure") + => \/ ~BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) \* outside of the trusting period + \/ \E h \in DOMAIN fetchedLightBlocks: + LET lightBlock == fetchedLightBlocks[h] IN + \* the full node lied to the lite client about the block header + lightBlock.header /= blockchain[h] + +\* the worst complexity +Complexity == + LET N == TARGET_HEIGHT - TRUSTED_HEIGHT + 1 IN + state /= "working" => + (2 * nprobes <= N * (N - 1)) + +(* + We omit termination, as the algorithm deadlocks in the end. + So termination can be demonstrated by finding a deadlock. + Of course, one has to analyze the deadlocked state and see that + the algorithm has indeed terminated there. +*) +============================================================================= +\* Modification History +\* Last modified Fri Jun 26 12:08:28 CEST 2020 by igor +\* Created Wed Oct 02 16:39:42 CEST 2019 by igor diff --git a/spec/light-client/verification/MC4_3_correct.tla b/spec/light-client/verification/MC4_3_correct.tla new file mode 100644 index 0000000000..a27d8de05d --- /dev/null +++ b/spec/light-client/verification/MC4_3_correct.tla @@ -0,0 +1,26 @@ +---------------------------- MODULE MC4_3_correct --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 3 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == TRUE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================== diff --git a/spec/light-client/verification/MC4_3_faulty.tla b/spec/light-client/verification/MC4_3_faulty.tla new file mode 100644 index 0000000000..74b278900b --- /dev/null +++ b/spec/light-client/verification/MC4_3_faulty.tla @@ -0,0 +1,26 @@ +---------------------------- MODULE MC4_3_faulty --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 3 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================== diff --git a/spec/light-client/verification/MC4_4_correct.tla b/spec/light-client/verification/MC4_4_correct.tla new file mode 100644 index 0000000000..0a8d96b59c --- /dev/null +++ b/spec/light-client/verification/MC4_4_correct.tla @@ -0,0 +1,26 @@ +------------------------- MODULE MC4_4_correct --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 4 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == TRUE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================ diff --git a/spec/light-client/verification/MC4_4_correct_drifted.tla b/spec/light-client/verification/MC4_4_correct_drifted.tla new file mode 100644 index 0000000000..7fefe349ea --- /dev/null +++ b/spec/light-client/verification/MC4_4_correct_drifted.tla @@ -0,0 +1,26 @@ +---------------------- MODULE MC4_4_correct_drifted --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 4 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 30 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == TRUE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================== diff --git a/spec/light-client/verification/MC4_4_faulty.tla b/spec/light-client/verification/MC4_4_faulty.tla new file mode 100644 index 0000000000..167fa61fb1 --- /dev/null +++ b/spec/light-client/verification/MC4_4_faulty.tla @@ -0,0 +1,26 @@ +---------------------------- MODULE MC4_4_faulty --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 4 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================== diff --git a/spec/light-client/verification/MC4_4_faulty_drifted.tla b/spec/light-client/verification/MC4_4_faulty_drifted.tla new file mode 100644 index 0000000000..e37c3cb404 --- /dev/null +++ b/spec/light-client/verification/MC4_4_faulty_drifted.tla @@ -0,0 +1,26 @@ +---------------------- MODULE MC4_4_faulty_drifted --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 4 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 30 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================== diff --git a/spec/light-client/verification/MC4_5_correct.tla b/spec/light-client/verification/MC4_5_correct.tla new file mode 100644 index 0000000000..cffb22cc8f --- /dev/null +++ b/spec/light-client/verification/MC4_5_correct.tla @@ -0,0 +1,26 @@ +------------------------- MODULE MC4_5_correct --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 5 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == TRUE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================ diff --git a/spec/light-client/verification/MC4_5_faulty.tla b/spec/light-client/verification/MC4_5_faulty.tla new file mode 100644 index 0000000000..3d3a002907 --- /dev/null +++ b/spec/light-client/verification/MC4_5_faulty.tla @@ -0,0 +1,26 @@ +------------------------- MODULE MC4_5_faulty --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 5 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +IS_PRICLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +MARY_CORRECT == FALSE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================ diff --git a/spec/light-client/verification/MC4_6_faulty.tla b/spec/light-client/verification/MC4_6_faulty.tla new file mode 100644 index 0000000000..64f164854b --- /dev/null +++ b/spec/light-client/verification/MC4_6_faulty.tla @@ -0,0 +1,26 @@ +------------------------- MODULE MC4_6_faulty --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 6 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +IS_PRCLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IMARY_CORRECT == FALSE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================ diff --git a/spec/light-client/verification/MC4_7_faulty.tla b/spec/light-client/verification/MC4_7_faulty.tla new file mode 100644 index 0000000000..dc6a94eb1d --- /dev/null +++ b/spec/light-client/verification/MC4_7_faulty.tla @@ -0,0 +1,26 @@ +------------------------- MODULE MC4_7_faulty --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 7 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================ diff --git a/spec/light-client/verification/MC5_5_correct.tla b/spec/light-client/verification/MC5_5_correct.tla new file mode 100644 index 0000000000..00b4151f7c --- /dev/null +++ b/spec/light-client/verification/MC5_5_correct.tla @@ -0,0 +1,26 @@ +------------------------- MODULE MC5_5_correct --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4", "n5"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 5 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == TRUE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================ diff --git a/spec/light-client/verification/MC5_5_correct_peer_two_thirds_faulty.tla b/spec/light-client/verification/MC5_5_correct_peer_two_thirds_faulty.tla new file mode 100644 index 0000000000..d4212032fc --- /dev/null +++ b/spec/light-client/verification/MC5_5_correct_peer_two_thirds_faulty.tla @@ -0,0 +1,26 @@ +------------------- MODULE MC5_5_correct_peer_two_thirds_faulty ---------------------- + +AllNodes == {"n1", "n2", "n3", "n4", "n5"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 5 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == TRUE +FAULTY_RATIO == <<2, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================ diff --git a/spec/light-client/verification/MC5_5_faulty.tla b/spec/light-client/verification/MC5_5_faulty.tla new file mode 100644 index 0000000000..f63d175a17 --- /dev/null +++ b/spec/light-client/verification/MC5_5_faulty.tla @@ -0,0 +1,26 @@ +----------------- MODULE MC5_5_faulty --------------------- + +AllNodes == {"n1", "n2", "n3", "n4", "n5"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 5 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +FAULTY_RATIO == <<2, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================ diff --git a/spec/light-client/verification/MC5_5_faulty_peer_two_thirds_faulty.tla b/spec/light-client/verification/MC5_5_faulty_peer_two_thirds_faulty.tla new file mode 100644 index 0000000000..ef9974d062 --- /dev/null +++ b/spec/light-client/verification/MC5_5_faulty_peer_two_thirds_faulty.tla @@ -0,0 +1,26 @@ +----------------- MODULE MC5_5_faulty_peer_two_thirds_faulty --------------------- + +AllNodes == {"n1", "n2", "n3", "n4", "n5"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 5 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +FAULTY_RATIO == <<2, 3>> \* < 2 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================ diff --git a/spec/light-client/verification/MC5_7_faulty.tla b/spec/light-client/verification/MC5_7_faulty.tla new file mode 100644 index 0000000000..63461b0c89 --- /dev/null +++ b/spec/light-client/verification/MC5_7_faulty.tla @@ -0,0 +1,26 @@ +------------------------- MODULE MC5_7_faulty --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4", "n5"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 7 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================ diff --git a/spec/light-client/verification/MC7_5_faulty.tla b/spec/light-client/verification/MC7_5_faulty.tla new file mode 100644 index 0000000000..860f9c0aa8 --- /dev/null +++ b/spec/light-client/verification/MC7_5_faulty.tla @@ -0,0 +1,26 @@ +------------------------- MODULE MC7_5_faulty --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4", "n5", "n6", "n7"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 5 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================ diff --git a/spec/light-client/verification/MC7_7_faulty.tla b/spec/light-client/verification/MC7_7_faulty.tla new file mode 100644 index 0000000000..79e328f141 --- /dev/null +++ b/spec/light-client/verification/MC7_7_faulty.tla @@ -0,0 +1,26 @@ +------------------------- MODULE MC7_7_faulty --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4", "n5", "n6", "n7"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 7 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================ diff --git a/spec/light-client/verification/README.md b/spec/light-client/verification/README.md new file mode 100644 index 0000000000..8777374ac9 --- /dev/null +++ b/spec/light-client/verification/README.md @@ -0,0 +1,577 @@ +--- +order: 1 +parent: + title: Verification + order: 2 +--- +# Core Verification + +## Problem statement + +We assume that the light client knows a (base) header `inithead` it trusts (by social consensus or because +the light client has decided to trust the header before). The goal is to check whether another header +`newhead` can be trusted based on the data in `inithead`. + +The correctness of the protocol is based on the assumption that `inithead` was generated by an instance of +Tendermint consensus. + +### Failure Model + +For the purpose of the following definitions we assume that there exists a function +`validators` that returns the corresponding validator set for the given hash. + +The light client protocol is defined with respect to the following failure model: + +Given a known bound `TRUSTED_PERIOD`, and a block `b` with header `h` generated at time `Time` +(i.e. `h.Time = Time`), a set of validators that hold more than 2/3 of the voting power +in `validators(b.Header.NextValidatorsHash)` is correct until time `b.Header.Time + TRUSTED_PERIOD`. + +*Assumption*: "correct" is defined w.r.t. realtime (some Newtonian global notion of time, i.e., wall time), +while `Header.Time` corresponds to the [BFT time](../consensus/bft-time.md). In this note, we assume that clocks of correct processes +are synchronized (for example using NTP), and therefore there is bounded clock drift (`CLOCK_DRIFT`) between local clocks and +BFT time. More precisely, for every correct light client process and every `header.Time` (i.e. BFT Time, for a header correctly +generated by the Tendermint consensus), the following inequality holds: `Header.Time < now + CLOCK_DRIFT`, +where `now` corresponds to the system clock at the light client process. + +Furthermore, we assume that `TRUSTED_PERIOD` is (several) order of magnitude bigger than `CLOCK_DRIFT` (`TRUSTED_PERIOD >> CLOCK_DRIFT`), +as `CLOCK_DRIFT` (using NTP) is in the order of milliseconds and `TRUSTED_PERIOD` is in the order of weeks. + +We expect a light client process defined in this document to be used in the context in which there is some +larger period during which misbehaving validators can be detected and punished (we normally refer to it as `UNBONDING_PERIOD` +due to the "bonding" mechanism in modern proof of stake systems). Furthermore, we assume that +`TRUSTED_PERIOD < UNBONDING_PERIOD` and that they are normally of the same order of magnitude, for example +`TRUSTED_PERIOD = UNBONDING_PERIOD / 2`. + +The specification in this document considers an implementation of the light client under the Failure Model defined above. +Mechanisms like `fork accountability` and `evidence submission` are defined in the context of `UNBONDING_PERIOD` and +they incentivize validators to follow the protocol specification defined in this document. If they don't, +and we have 1/3 (or more) faulty validators, safety may be violated. Our approach then is +to *detect* these cases (after the fact), and take suitable repair actions (automatic and social). +This is discussed in document on [Fork accountability](./accountability.md). + +The term "trusted" above indicates that the correctness of the protocol depends on +this assumption. It is in the responsibility of the user that runs the light client to make sure that the risk +of trusting a corrupted/forged `inithead` is negligible. + +*Remark*: This failure model might change to a hybrid version that takes heights into account in the future. + +### High Level Solution + +Upon initialization, the light client is given a header `inithead` it trusts (by +social consensus). When a light clients sees a new signed header `snh`, it has to decide whether to trust the new +header. Trust can be obtained by (possibly) the combination of three methods. + +1. **Uninterrupted sequence of headers.** Given a trusted header `h` and an untrusted header `h1`, +the light client trusts a header `h1` if it trusts all headers in between `h` and `h1`. + +2. **Trusted period.** Given a trusted header `h`, an untrusted header `h1 > h` and `TRUSTED_PERIOD` during which +the failure model holds, we can check whether at least one validator, that has been continuously correct +from `h.Time` until now, has signed `h1`. If this is the case, we can trust `h1`. + +3. **Bisection.** If a check according to 2. (trusted period) fails, the light client can try to +obtain a header `hp` whose height lies between `h` and `h1` in order to check whether `h` can be used to +get trust for `hp`, and `hp` can be used to get trust for `snh`. If this is the case we can trust `h1`; +if not, we continue recursively until either we found set of headers that can build (transitively) trust relation +between `h` and `h1`, or we failed as two consecutive headers don't verify against each other. + +## Definitions + +### Data structures + +In the following, only the details of the data structures needed for this specification are given. + + ```go + type Header struct { + Height int64 + Time Time // the chain time when the header (block) was generated + + LastBlockID BlockID // prev block info + ValidatorsHash []byte // hash of the validators for the current block + NextValidatorsHash []byte // hash of the validators for the next block + } + + type SignedHeader struct { + Header Header + Commit Commit // commit for the given header + } + + type ValidatorSet struct { + Validators []Validator + TotalVotingPower int64 + } + + type Validator struct { + Address Address // validator address (we assume validator's addresses are unique) + VotingPower int64 // validator's voting power + } + + type TrustedState { + SignedHeader SignedHeader + ValidatorSet ValidatorSet + } + ``` + +### Functions + +For the purpose of this light client specification, we assume that the Tendermint Full Node +exposes the following functions over Tendermint RPC: + +```go + // returns signed header: Header with Commit, for the given height + func Commit(height int64) (SignedHeader, error) + + // returns validator set for the given height + func Validators(height int64) (ValidatorSet, error) +``` + +Furthermore, we assume the following auxiliary functions: + +```go + // returns true if the commit is for the header, ie. if it contains + // the correct hash of the header; otherwise false + func matchingCommit(header Header, commit Commit) bool + + // returns the set of validators from the given validator set that + // committed the block (that correctly signed the block) + // it assumes signature verification so it can be computationally expensive + func signers(commit Commit, validatorSet ValidatorSet) []Validator + + // returns the voting power the validators in v1 have according to their voting power in set v2 + // it does not assume signature verification + func votingPowerIn(v1 []Validator, v2 ValidatorSet) int64 + + // returns hash of the given validator set + func hash(v2 ValidatorSet) []byte +``` + +In the functions below we will be using `trustThreshold` as a parameter. For simplicity +we assume that `trustThreshold` is a float between `1/3` and `2/3` and we will not be checking it +in the pseudo-code. + +**VerifySingle.** The function `VerifySingle` attempts to validate given untrusted header and the corresponding validator sets +based on a given trusted state. It ensures that the trusted state is still within its trusted period, +and that the untrusted header is within assumed `clockDrift` bound of the passed time `now`. +Note that this function is not making external (RPC) calls to the full node; the whole logic is +based on the local (given) state. This function is supposed to be used by the IBC handlers. + +```go +func VerifySingle(untrustedSh SignedHeader, + untrustedVs ValidatorSet, + untrustedNextVs ValidatorSet, + trustedState TrustedState, + trustThreshold float, + trustingPeriod Duration, + clockDrift Duration, + now Time) (TrustedState, error) { + + if untrustedSh.Header.Time > now + clockDrift { + return (trustedState, ErrInvalidHeaderTime) + } + + trustedHeader = trustedState.SignedHeader.Header + if !isWithinTrustedPeriod(trustedHeader, trustingPeriod, now) { + return (state, ErrHeaderNotWithinTrustedPeriod) + } + + // we assume that time it takes to execute verifySingle function + // is several order of magnitudes smaller than trustingPeriod + error = verifySingle( + trustedState, + untrustedSh, + untrustedVs, + untrustedNextVs, + trustThreshold) + + if error != nil return (state, error) + + // the untrusted header is now trusted + newTrustedState = TrustedState(untrustedSh, untrustedNextVs) + return (newTrustedState, nil) +} + +// return true if header is within its light client trusted period; otherwise returns false +func isWithinTrustedPeriod(header Header, + trustingPeriod Duration, + now Time) bool { + + return header.Time + trustedPeriod > now +} +``` + +Note that in case `VerifySingle` returns without an error (untrusted header +is successfully verified) then we have a guarantee that the transition of the trust +from `trustedState` to `newTrustedState` happened during the trusted period of +`trustedState.SignedHeader.Header`. + +TODO: Explain what happens in case `VerifySingle` returns with an error. + +**verifySingle.** The function `verifySingle` verifies a single untrusted header +against a given trusted state. It includes all validations and signature verification. +It is not publicly exposed since it does not check for header expiry (time constraints) +and hence it's possible to use it incorrectly. + +```go +func verifySingle(trustedState TrustedState, + untrustedSh SignedHeader, + untrustedVs ValidatorSet, + untrustedNextVs ValidatorSet, + trustThreshold float) error { + + untrustedHeader = untrustedSh.Header + untrustedCommit = untrustedSh.Commit + + trustedHeader = trustedState.SignedHeader.Header + trustedVs = trustedState.ValidatorSet + + if trustedHeader.Height >= untrustedHeader.Height return ErrNonIncreasingHeight + if trustedHeader.Time >= untrustedHeader.Time return ErrNonIncreasingTime + + // validate the untrusted header against its commit, vals, and next_vals + error = validateSignedHeaderAndVals(untrustedSh, untrustedVs, untrustedNextVs) + if error != nil return error + + // check for adjacent headers + if untrustedHeader.Height == trustedHeader.Height + 1 { + if trustedHeader.NextValidatorsHash != untrustedHeader.ValidatorsHash { + return ErrInvalidAdjacentHeaders + } + } else { + error = verifyCommitTrusting(trustedVs, untrustedCommit, untrustedVs, trustThreshold) + if error != nil return error + } + + // verify the untrusted commit + return verifyCommitFull(untrustedVs, untrustedCommit) +} + +// returns nil if header and validator sets are consistent; otherwise returns error +func validateSignedHeaderAndVals(signedHeader SignedHeader, vs ValidatorSet, nextVs ValidatorSet) error { + header = signedHeader.Header + if hash(vs) != header.ValidatorsHash return ErrInvalidValidatorSet + if hash(nextVs) != header.NextValidatorsHash return ErrInvalidNextValidatorSet + if !matchingCommit(header, signedHeader.Commit) return ErrInvalidCommitValue + return nil +} + +// returns nil if at least single correst signer signed the commit; otherwise returns error +func verifyCommitTrusting(trustedVs ValidatorSet, + commit Commit, + untrustedVs ValidatorSet, + trustLevel float) error { + + totalPower := trustedVs.TotalVotingPower + signedPower := votingPowerIn(signers(commit, untrustedVs), trustedVs) + + // check that the signers account for more than max(1/3, trustLevel) of the voting power + // this ensures that there is at least single correct validator in the set of signers + if signedPower < max(1/3, trustLevel) * totalPower return ErrInsufficientVotingPower + return nil +} + +// returns nil if commit is signed by more than 2/3 of voting power of the given validator set +// return error otherwise +func verifyCommitFull(vs ValidatorSet, commit Commit) error { + totalPower := vs.TotalVotingPower; + signedPower := votingPowerIn(signers(commit, vs), vs) + + // check the signers account for +2/3 of the voting power + if signedPower * 3 <= totalPower * 2 return ErrInvalidCommit + return nil +} +``` + +**VerifyHeaderAtHeight.** The function `VerifyHeaderAtHeight` captures high level +logic, i.e., application call to the light client module to download and verify header +for some height. + +```go +func VerifyHeaderAtHeight(untrustedHeight int64, + trustedState TrustedState, + trustThreshold float, + trustingPeriod Duration, + clockDrift Duration) (TrustedState, error)) { + + trustedHeader := trustedState.SignedHeader.Header + + now := System.Time() + if !isWithinTrustedPeriod(trustedHeader, trustingPeriod, now) { + return (trustedState, ErrHeaderNotWithinTrustedPeriod) + } + + newTrustedState, err := VerifyBisection(untrustedHeight, + trustedState, + trustThreshold, + trustingPeriod, + clockDrift, + now) + + if err != nil return (trustedState, err) + + now = System.Time() + if !isWithinTrustedPeriod(trustedHeader, trustingPeriod, now) { + return (trustedState, ErrHeaderNotWithinTrustedPeriod) + } + + return (newTrustedState, err) +} +``` + +Note that in case `VerifyHeaderAtHeight` returns without an error (untrusted header +is successfully verified) then we have a guarantee that the transition of the trust +from `trustedState` to `newTrustedState` happened during the trusted period of +`trustedState.SignedHeader.Header`. + +In case `VerifyHeaderAtHeight` returns with an error, then either (i) the full node we are talking to is faulty +or (ii) the trusted header has expired (it is outside its trusted period). In case (i) the full node is faulty so +light client should disconnect and reinitialise with new peer. In the case (ii) as the trusted header has expired, +we need to reinitialise light client with a new trusted header (that is within its trusted period), +but we don't necessarily need to disconnect from the full node we are talking to (as we haven't observed full node misbehavior in this case). + +**VerifyBisection.** The function `VerifyBisection` implements +recursive logic for checking if it is possible building trust +relationship between `trustedState` and untrusted header at the given height over +finite set of (downloaded and verified) headers. + +```go +func VerifyBisection(untrustedHeight int64, + trustedState TrustedState, + trustThreshold float, + trustingPeriod Duration, + clockDrift Duration, + now Time) (TrustedState, error) { + + untrustedSh, error := Commit(untrustedHeight) + if error != nil return (trustedState, ErrRequestFailed) + + untrustedHeader = untrustedSh.Header + + // note that we pass now during the recursive calls. This is fine as + // all other untrusted headers we download during recursion will be + // for a smaller heights, and therefore should happen before. + if untrustedHeader.Time > now + clockDrift { + return (trustedState, ErrInvalidHeaderTime) + } + + untrustedVs, error := Validators(untrustedHeight) + if error != nil return (trustedState, ErrRequestFailed) + + untrustedNextVs, error := Validators(untrustedHeight + 1) + if error != nil return (trustedState, ErrRequestFailed) + + error = verifySingle( + trustedState, + untrustedSh, + untrustedVs, + untrustedNextVs, + trustThreshold) + + if fatalError(error) return (trustedState, error) + + if error == nil { + // the untrusted header is now trusted. + newTrustedState = TrustedState(untrustedSh, untrustedNextVs) + return (newTrustedState, nil) + } + + // at this point in time we need to do bisection + pivotHeight := ceil((trustedHeader.Height + untrustedHeight) / 2) + + error, newTrustedState = VerifyBisection(pivotHeight, + trustedState, + trustThreshold, + trustingPeriod, + clockDrift, + now) + if error != nil return (newTrustedState, error) + + return VerifyBisection(untrustedHeight, + newTrustedState, + trustThreshold, + trustingPeriod, + clockDrift, + now) +} + +func fatalError(err) bool { + return err == ErrHeaderNotWithinTrustedPeriod OR + err == ErrInvalidAdjacentHeaders OR + err == ErrNonIncreasingHeight OR + err == ErrNonIncreasingTime OR + err == ErrInvalidValidatorSet OR + err == ErrInvalidNextValidatorSet OR + err == ErrInvalidCommitValue OR + err == ErrInvalidCommit +} +``` + +### The case `untrustedHeader.Height < trustedHeader.Height` + +In the use case where someone tells the light client that application data that is relevant for it +can be read in the block of height `k` and the light client trusts a more recent header, we can use the +hashes to verify headers "down the chain." That is, we iterate down the heights and check the hashes in each step. + +*Remark.* For the case were the light client trusts two headers `i` and `j` with `i < k < j`, we should +discuss/experiment whether the forward or the backward method is more effective. + +```go +func VerifyHeaderBackwards(trustedHeader Header, + untrustedHeader Header, + trustingPeriod Duration, + clockDrift Duration) error { + + if untrustedHeader.Height >= trustedHeader.Height return ErrErrNonDecreasingHeight + if untrustedHeader.Time >= trustedHeader.Time return ErrNonDecreasingTime + + now := System.Time() + if !isWithinTrustedPeriod(trustedHeader, trustingPeriod, now) { + return ErrHeaderNotWithinTrustedPeriod + } + + old := trustedHeader + for i := trustedHeader.Height - 1; i > untrustedHeader.Height; i-- { + untrustedSh, error := Commit(i) + if error != nil return ErrRequestFailed + + if (hash(untrustedSh.Header) != old.LastBlockID.Hash) { + return ErrInvalidAdjacentHeaders + } + + old := untrustedSh.Header + } + + if hash(untrustedHeader) != old.LastBlockID.Hash { + return ErrInvalidAdjacentHeaders + } + + now := System.Time() + if !isWithinTrustedPeriod(trustedHeader, trustingPeriod, now) { + return ErrHeaderNotWithinTrustedPeriod + } + + return nil + } +``` + +*Assumption*: In the following, we assume that *untrusted_h.Header.height > trusted_h.Header.height*. We will quickly discuss the other case in the next section. + +We consider the following set-up: + +- the light client communicates with one full node +- the light client locally stores all the headers that has passed basic verification and that are within light client trust period. In the pseudo code below we +write *Store.Add(header)* for this. If a header failed to verify, then +the full node we are talking to is faulty and we should disconnect from it and reinitialise with new peer. +- If `CanTrust` returns *error*, then the light client has seen a forged header or the trusted header has expired (it is outside its trusted period). + - In case of forged header, the full node is faulty so light client should disconnect and reinitialise with new peer. If the trusted header has expired, + we need to reinitialise light client with new trusted header (that is within its trusted period), but we don't necessarily need to disconnect from the full node + we are talking to (as we haven't observed full node misbehavior in this case). + +## Correctness of the Light Client Protocols + +### Definitions + +- `TRUSTED_PERIOD`: trusted period +- for realtime `t`, the predicate `correct(v,t)` is true if the validator `v` + follows the protocol until time `t` (we will see about recovery later). +- Validator fields. We will write a validator as a tuple `(v,p)` such that + - `v` is the identifier (i.e., validator address; we assume identifiers are unique in each validator set) + - `p` is its voting power +- For each header `h`, we write `trust(h) = true` if the light client trusts `h`. + +### Failure Model + +If a block `b` with a header `h` is generated at time `Time` (i.e. `h.Time = Time`), then a set of validators that +hold more than `2/3` of the voting power in `validators(h.NextValidatorsHash)` is correct until time +`h.Time + TRUSTED_PERIOD`. + +Formally, +\[ +\sum_{(v,p) \in validators(h.NextValidatorsHash) \wedge correct(v,h.Time + TRUSTED_PERIOD)} p > +2/3 \sum_{(v,p) \in validators(h.NextValidatorsHash)} p +\] + +The light client communicates with a full node and learns new headers. The goal is to locally decide whether to trust a header. Our implementation needs to ensure the following two properties: + +- *Light Client Completeness*: If a header `h` was correctly generated by an instance of Tendermint consensus (and its age is less than the trusted period), +then the light client should eventually set `trust(h)` to `true`. + +- *Light Client Accuracy*: If a header `h` was *not generated* by an instance of Tendermint consensus, then the light client should never set `trust(h)` to true. + +*Remark*: If in the course of the computation, the light client obtains certainty that some headers were forged by adversaries +(that is were not generated by an instance of Tendermint consensus), it may submit (a subset of) the headers it has seen as evidence of misbehavior. + +*Remark*: In Completeness we use "eventually", while in practice `trust(h)` should be set to true before `h.Time + TRUSTED_PERIOD`. If not, the header +cannot be trusted because it is too old. + +*Remark*: If a header `h` is marked with `trust(h)`, but it is too old at some point in time we denote with `now` (`h.Time + TRUSTED_PERIOD < now`), +then the light client should set `trust(h)` to `false` again at time `now`. + +*Assumption*: Initially, the light client has a header `inithead` that it trusts, that is, `inithead` was correctly generated by the Tendermint consensus. + +To reason about the correctness, we may prove the following invariant. + +*Verification Condition: light Client Invariant.* + For each light client `l` and each header `h`: +if `l` has set `trust(h) = true`, + then validators that are correct until time `h.Time + TRUSTED_PERIOD` have more than two thirds of the voting power in `validators(h.NextValidatorsHash)`. + + Formally, + \[ + \sum_{(v,p) \in validators(h.NextValidatorsHash) \wedge correct(v,h.Time + TRUSTED_PERIOD)} p > + 2/3 \sum_{(v,p) \in validators(h.NextValidatorsHash)} p + \] + +*Remark.* To prove the invariant, we will have to prove that the light client only trusts headers that were correctly generated by Tendermint consensus. +Then the formula above follows from the failure model. + +## Details + +**Observation 1.** If `h.Time + TRUSTED_PERIOD > now`, we trust the validator set `validators(h.NextValidatorsHash)`. + +When we say we trust `validators(h.NextValidatorsHash)` we do `not` trust that each individual validator in `validators(h.NextValidatorsHash)` +is correct, but we only trust the fact that less than `1/3` of them are faulty (more precisely, the faulty ones have less than `1/3` of the total voting power). + +*`VerifySingle` correctness arguments* + +Light Client Accuracy: + +- Assume by contradiction that `untrustedHeader` was not generated correctly and the light client sets trust to true because `verifySingle` returns without error. +- `trustedState` is trusted and sufficiently new +- by the Failure Model, less than `1/3` of the voting power held by faulty validators => at least one correct validator `v` has signed `untrustedHeader`. +- as `v` is correct up to now, it followed the Tendermint consensus protocol at least up to signing `untrustedHeader` => `untrustedHeader` was correctly generated. +We arrive at the required contradiction. + +Light Client Completeness: + +- The check is successful if sufficiently many validators of `trustedState` are still validators in the height `untrustedHeader.Height` and signed `untrustedHeader`. +- If `untrustedHeader.Height = trustedHeader.Height + 1`, and both headers were generated correctly, the test passes. + +*Verification Condition:* We may need a Tendermint invariant stating that if `untrustedSignedHeader.Header.Height = trustedHeader.Height + 1` then +`signers(untrustedSignedHeader.Commit) \subseteq validators(trustedHeader.NextValidatorsHash)`. + +*Remark*: The variable `trustThreshold` can be used if the user believes that relying on one correct validator is not sufficient. +However, in case of (frequent) changes in the validator set, the higher the `trustThreshold` is chosen, the more unlikely it becomes that +`verifySingle` returns with an error for non-adjacent headers. + +- `VerifyBisection` correctness arguments (sketch)* + +Light Client Accuracy: + +- Assume by contradiction that the header at `untrustedHeight` obtained from the full node was not generated correctly and +the light client sets trust to true because `VerifyBisection` returns without an error. +- `VerifyBisection` returns without error only if all calls to `verifySingle` in the recursion return without error (return `nil`). +- Thus we have a sequence of headers that all satisfied the `verifySingle` +- again a contradiction + +light Client Completeness: + +This is only ensured if upon `Commit(pivot)` the light client is always provided with a correctly generated header. + +*Stalling* + +With `VerifyBisection`, a faulty full node could stall a light client by creating a long sequence of headers that are queried one-by-one by the light client and look OK, +before the light client eventually detects a problem. There are several ways to address this: + +- Each call to `Commit` could be issued to a different full node +- Instead of querying header by header, the light client tells a full node which header it trusts, and the height of the header it needs. The full node responds with +the header along with a proof consisting of intermediate headers that the light client can use to verify. Roughly, `VerifyBisection` would then be executed at the full node. +- We may set a timeout how long `VerifyBisection` may take. diff --git a/spec/light-client/verification/verification_001_published.md b/spec/light-client/verification/verification_001_published.md new file mode 100644 index 0000000000..59aa0ff206 --- /dev/null +++ b/spec/light-client/verification/verification_001_published.md @@ -0,0 +1,1178 @@ +# Light Client Verification + +The light client implements a read operation of a +[header][TMBC-HEADER-link] from the [blockchain][TMBC-SEQ-link], by +communicating with full nodes. As some full nodes may be faulty, this +functionality must be implemented in a fault-tolerant way. + +In the Tendermint blockchain, the validator set may change with every +new block. The staking and unbonding mechanism induces a [security +model][TMBC-FM-2THIRDS-link]: starting at time *Time* of the +[header][TMBC-HEADER-link], +more than two-thirds of the next validators of a new block are correct +for the duration of *TrustedPeriod*. The fault-tolerant read +operation is designed for this security model. + +The challenge addressed here is that the light client might have a +block of height *h1* and needs to read the block of height *h2* +greater than *h1*. Checking all headers of heights from *h1* to *h2* +might be too costly (e.g., in terms of energy for mobile devices). +This specification tries to reduce the number of intermediate blocks +that need to be checked, by exploiting the guarantees provided by the +[security model][TMBC-FM-2THIRDS-link]. + +# Status + +This document is thoroughly reviewed, and the protocol has been +formalized in TLA+ and model checked. + +## Issues that need to be addressed + +As it is part of the larger light node, its data structures and +functions interact with the fork dectection functionality of the light +client. As a result of the work on +[Pull Request 479](https://github.com/informalsystems/tendermint-rs/pull/479) we +established the need for an update in the data structures in [Issue 499](https://github.com/informalsystems/tendermint-rs/issues/499). This +will not change the verification logic, but it will record information +about verification that can be used in fork detection (in particular +in computing more efficiently the proof of fork). + +# Outline + +- [Part I](#part-i---tendermint-blockchain): Introduction of + relevant terms of the Tendermint +blockchain. + +- [Part II](#part-ii---sequential-definition-of-the-verification-problem): Introduction +of the problem addressed by the Lightclient Verification protocol. + - [Verification Informal Problem + statement](#Verification-Informal-Problem-statement): For the general + audience, that is, engineers who want to get an overview over what + the component is doing from a bird's eye view. + - [Sequential Problem statement](#Sequential-Problem-statement): + Provides a mathematical definition of the problem statement in + its sequential form, that is, ignoring the distributed aspect of + the implementation of the blockchain. + +- [Part III](#part-iii---light-client-as-distributed-system): Distributed + aspects of the light client, system assumptions and temporal + logic specifications. + + - [Incentives](#incentives): how faulty full nodes may benefit from + misbehaving and how correct full nodes benefit from cooperating. + + - [Computational Model](#Computational-Model): + timing and correctness assumptions. + + - [Distributed Problem Statement](#Distributed-Problem-Statement): + temporal properties that formalize safety and liveness + properties in the distributed setting. + +- [Part IV](#part-iv---light-client-verification-protocol): + Specification of the protocols. + + - [Definitions](#Definitions): Describes inputs, outputs, + variables used by the protocol, auxiliary functions + + - [Core Verification](#core-verification): gives an outline of the solution, + and details of the functions used (with preconditions, + postconditions, error conditions). + + - [Liveness Scenarios](#liveness-scenarios): when the light + client makes progress depends heavily on the changes in the + validator sets of the blockchain. We discuss some typical scenarios. + +- [Part V](#part-v---supporting-the-ibc-relayer): The above parts + focus on a common case where the last verified block has height *h1* + and the + requested height *h2* satisfies *h2 > h1*. For IBC, there are + scenarios where this might not be the case. In this part, we provide + some preliminaries for supporting this. As not all details of the + IBC requirements are clear by now, we do not provide a complete + specification at this point. We mark with "Open Question" points + that need to be addressed in order to finalize this specification. + It should be noted that the technically + most challenging case is the one specified in Part IV. + +In this document we quite extensively use tags in order to be able to +reference assumptions, invariants, etc. in future communication. In +these tags we frequently use the following short forms: + +- TMBC: Tendermint blockchain +- SEQ: for sequential specifications +- LCV: Lightclient Verification +- LIVE: liveness +- SAFE: safety +- FUNC: function +- INV: invariant +- A: assumption + +# Part I - Tendermint Blockchain + +## Header Fields necessary for the Light Client + +#### **[TMBC-HEADER.1]** + +A set of blockchain transactions is stored in a data structure called +*block*, which contains a field called *header*. (The data structure +*block* is defined [here][block]). As the header contains hashes to +the relevant fields of the block, for the purpose of this +specification, we will assume that the blockchain is a list of +headers, rather than a list of blocks. + +#### **[TMBC-HASH-UNIQUENESS.1]** + +We assume that every hash in the header identifies the data it hashes. +Therefore, in this specification, we do not distinguish between hashes and the +data they represent. + +#### **[TMBC-HEADER-FIELDS.1]** + +A header contains the following fields: + +- `Height`: non-negative integer +- `Time`: time (integer) +- `LastBlockID`: Hashvalue +- `LastCommit` DomainCommit +- `Validators`: DomainVal +- `NextValidators`: DomainVal +- `Data`: DomainTX +- `AppState`: DomainApp +- `LastResults`: DomainRes + +#### **[TMBC-SEQ.1]** + +The Tendermint blockchain is a list *chain* of headers. + +#### **[TMBC-VALIDATOR-PAIR.1]** + +Given a full node, a +*validator pair* is a pair *(peerID, voting_power)*, where + +- *peerID* is the PeerID (public key) of a full node, +- *voting_power* is an integer (representing the full node's + voting power in a certain consensus instance). + +> In the Golang implementation the data type for *validator +pair* is called `Validator` + +#### **[TMBC-VALIDATOR-SET.1]** + +A *validator set* is a set of validator pairs. For a validator set +*vs*, we write *TotalVotingPower(vs)* for the sum of the voting powers +of its validator pairs. + +#### **[TMBC-VOTE.1]** + +A *vote* contains a `prevote` or `precommit` message sent and signed by +a validator node during the execution of [consensus][arXiv]. Each +message contains the following fields + +- `Type`: prevote or precommit +- `Height`: positive integer +- `Round` a positive integer +- `BlockID` a Hashvalue of a block (not necessarily a block of the chain) + +#### **[TMBC-COMMIT.1]** + +A commit is a set of `precommit` message. + +## Tendermint Failure Model + +#### **[TMBC-AUTH-BYZ.1]** + +We assume the authenticated Byzantine fault model in which no node (faulty or +correct) may break digital signatures, but otherwise, no additional +assumption is made about the internal behavior of faulty +nodes. That is, faulty nodes are only limited in that they cannot forge +messages. + +#### **[TMBC-TIME-PARAMS.1]** + +A Tendermint blockchain has the following configuration parameters: + +- *unbondingPeriod*: a time duration. +- *trustingPeriod*: a time duration smaller than *unbondingPeriod*. + +#### **[TMBC-CORRECT.1]** + +We define a predicate *correctUntil(n, t)*, where *n* is a node and *t* is a +time point. +The predicate *correctUntil(n, t)* is true if and only if the node *n* +follows all the protocols (at least) until time *t*. + +#### **[TMBC-FM-2THIRDS.1]** + +If a block *h* is in the chain, +then there exists a subset *CorrV* +of *h.NextValidators*, such that: + +- *TotalVotingPower(CorrV) > 2/3 + TotalVotingPower(h.NextValidators)*; cf. [TMBC-VALIDATOR-SET.1] +- For every validator pair *(n,p)* in *CorrV*, it holds *correctUntil(n, + h.Time + trustingPeriod)*; cf. [TMBC-CORRECT.1] + +> The definition of correct +> [**[TMBC-CORRECT.1]**][TMBC-CORRECT-link] refers to realtime, while it +> is used here with *Time* and *trustingPeriod*, which are "hardware +> times". We do not make a distinction here. + +#### **[TMBC-CORR-FULL.1]** + +Every correct full node locally stores a prefix of the +current list of headers from [**[TMBC-SEQ.1]**][TMBC-SEQ-link]. + +## What the Light Client Checks + +> From [TMBC-FM-2THIRDS.1] we directly derive the following observation: + +#### **[TMBC-VAL-CONTAINS-CORR.1]** + +Given a (trusted) block *tb* of the blockchain, a given set of full nodes +*N* contains a correct node at a real-time *t*, if + +- *t - trustingPeriod < tb.Time < t* +- the voting power in tb.NextValidators of nodes in *N* is more + than 1/3 of *TotalVotingPower(tb.NextValidators)* + +> The following describes how a commit for a given block *b* must look +> like. + +#### **[TMBC-SOUND-DISTR-POSS-COMMIT.1]** + +For a block *b*, each element *pc* of *PossibleCommit(b)* satisfies: + +- *pc* contains only votes (cf. [TMBC-VOTE.1]) + by validators from *b.Validators* +- the sum of the voting powers in *pc* is greater than 2/3 + *TotalVotingPower(b.Validators)* +- and there is an *r* such that each vote *v* in *pc* satisfies + - v.Type = precommit + - v.Height = b.Height + - v.Round = r + - v.blockID = hash(b) + +> The following property comes from the validity of the [consensus][arXiv]: A +> correct validator node only sends `prevote` or `precommit`, if +> `BlockID` of the new (to-be-decided) block is equal to the hash of +> the last block. + +#### **[TMBC-VAL-COMMIT.1]** + +If for a block *b*, a commit *c* + +- contains at least one validator pair *(v,p)* such that *v* is a + **correct** validator node, and +- is contained in *PossibleCommit(b)* + +then the block *b* is on the blockchain. + +## Context of this document + +In this document we specify the light client verification component, +called *Core Verification*. The *Core Verification* communicates with +a full node. As full nodes may be faulty, it cannot trust the +received information, but the light client has to check whether the +header it receives coincides with the one generated by Tendermint +consensus. + +The two + properties [[TMBC-VAL-CONTAINS-CORR.1]][TMBC-VAL-CONTAINS-CORR-link] and +[[TMBC-VAL-COMMIT]][TMBC-VAL-COMMIT-link] formalize the checks done + by this specification: +Given a trusted block *tb* and an untrusted block *ub* with a commit *cub*, +one has to check that *cub* is in *PossibleCommit(ub)*, and that *cub* +contains a correct node using *tb*. + +# Part II - Sequential Definition of the Verification Problem + +## Verification Informal Problem statement + +Given a height *targetHeight* as an input, the *Verifier* eventually +stores a header *h* of height *targetHeight* locally. This header *h* +is generated by the Tendermint [blockchain][block]. In +particular, a header that was not generated by the blockchain should +never be stored. + +## Sequential Problem statement + +#### **[LCV-SEQ-LIVE.1]** + +The *Verifier* gets as input a height *targetHeight*, and eventually stores the +header of height *targetHeight* of the blockchain. + +#### **[LCV-SEQ-SAFE.1]** + +The *Verifier* never stores a header which is not in the blockchain. + +# Part III - Light Client as Distributed System + +## Incentives + +Faulty full nodes may benefit from lying to the light client, by making the +light client accept a block that deviates (e.g., contains additional +transactions) from the one generated by Tendermint consensus. +Users using the light client might be harmed by accepting a forged header. + +The [fork detector][fork-detector] of the light client may help the +correct full nodes to understand whether their header is a good one. +Hence, in combination with the light client detector, the correct full +nodes have the incentive to respond. We can thus base liveness +arguments on the assumption that correct full nodes reliably talk to +the light client. + +## Computational Model + +#### **[LCV-A-PEER.1]** + +The verifier communicates with a full node called *primary*. No assumption is made about the full node (it may be correct or faulty). + +#### **[LCV-A-COMM.1]** + +Communication between the light client and a correct full node is +reliable and bounded in time. Reliable communication means that +messages are not lost, not duplicated, and eventually delivered. There +is a (known) end-to-end delay *Delta*, such that if a message is sent +at time *t* then it is received and processes by time *t + Delta*. +This implies that we need a timeout of at least *2 Delta* for remote +procedure calls to ensure that the response of a correct peer arrives +before the timeout expires. + +#### **[LCV-A-TFM.1]** + +The Tendermint blockchain satisfies the Tendermint failure model [**[TMBC-FM-2THIRDS.1]**][TMBC-FM-2THIRDS-link]. + +#### **[LCV-A-VAL.1]** + +The system satisfies [**[TMBC-AUTH-BYZ.1]**][TMBC-Auth-Byz-link] and +[**[TMBC-FM-2THIRDS.1]**][TMBC-FM-2THIRDS-link]. Thus, there is a +blockchain that satisfies the soundness requirements (that is, the +validation rules in [[block]]). + +## Distributed Problem Statement + +### Two Kinds of Termination + +We do not assume that *primary* is correct. Under this assumption no +protocol can guarantee the combination of the sequential +properties. Thus, in the (unreliable) distributed setting, we consider +two kinds of termination (successful and failure) and we will specify +below under what (favorable) conditions *Core Verification* ensures to +terminate successfully, and satisfy the requirements of the sequential +problem statement: + +#### **[LCV-DIST-TERM.1]** + +*Core Verification* either *terminates +successfully* or it *terminates with failure*. + +### Design choices + +#### **[LCV-DIST-STORE.1]** + +*Core Verification* has a local data structure called *LightStore* that +contains light blocks (that contain a header). For each light block we +record whether it is verified. + +#### **[LCV-DIST-PRIMARY.1]** + +*Core Verification* has a local variable *primary* that contains the PeerID of a full node. + +#### **[LCV-DIST-INIT.1]** + +*LightStore* is initialized with a header *trustedHeader* that was correctly +generated by the Tendermint consensus. We say *trustedHeader* is verified. + +### Temporal Properties + +#### **[LCV-DIST-SAFE.1]** + +It is always the case that every verified header in *LightStore* was +generated by an instance of Tendermint consensus. + +#### **[LCV-DIST-LIVE.1]** + +From time to time, a new instance of *Core Verification* is called with a +height *targetHeight* greater than the height of any header in *LightStore*. +Each instance must eventually terminate. + +- If + - the *primary* is correct (and locally has the block of + *targetHeight*), and + - *LightStore* always contains a verified header whose age is less than the + trusting period, + then *Core Verification* adds a verified header *hd* with height + *targetHeight* to *LightStore* and it **terminates successfully** + +> These definitions imply that if the primary is faulty, a header may or +> may not be added to *LightStore*. In any case, +> [**[LCV-DIST-SAFE.1]**](#lcv-vc-inv) must hold. +> The invariant [**[LCV-DIST-SAFE.1]**](#lcv-dist-safe) and the liveness +> requirement [**[LCV-DIST-LIVE.1]**](#lcv-dist-life) +> allow that verified headers are added to *LightStore* whose +> height was not passed +> to the verifier (e.g., intermediate headers used in bisection; see below). +> Note that for liveness, initially having a *trustedHeader* within +> the *trustinPeriod* is not sufficient. However, as this +> specification will leave some freedom with respect to the strategy +> in which order to download intermediate headers, we do not give a +> more precise liveness specification here. After giving the +> specification of the protocol, we will discuss some liveness +> scenarios [below](#liveness-scenarios). + +### Solving the sequential specification + +This specification provides a partial solution to the sequential specification. +The *Verifier* solves the invariant of the sequential part + +[**[LCV-DIST-SAFE.1]**](#lcv-vc-inv) => [**[LCV-SEQ-SAFE.1]**](#lcv-seq-inv) + +In the case the primary is correct, and there is a recent header in *LightStore*, the verifier satisfies the liveness requirements. + +⋀ *primary is correct* +⋀ always ∃ verified header in LightStore. *header.Time* > *now* - *trustingPeriod* +⋀ [**[LCV-A-Comm.1]**](#lcv-a-comm) ⋀ ( + ( [**[TMBC-CorrFull.1]**][TMBC-CorrFull-link] ⋀ + [**[LCV-DIST-LIVE.1]**](#lcv-vc-live) ) + ⟹ [**[LCV-SEQ-LIVE.1]**](#lcv-seq-live) +) + +# Part IV - Light Client Verification Protocol + +We provide a specification for Light Client Verification. The local +code for verification is presented by a sequential function +`VerifyToTarget` to highlight the control flow of this functionality. +We note that if a different concurrency model is considered for +an implementation, the sequential flow of the function may be +implemented with mutexes, etc. However, the light client verification +is partitioned into three blocks that can be implemented and tested +independently: + +- `FetchLightBlock` is called to download a light block (header) of a + given height from a peer. +- `ValidAndVerified` is a local code that checks the header. +- `Schedule` decides which height to try to verify next. We keep this + underspecified as different implementations (currently in Goland and + Rust) may implement different optimizations here. We just provide + necessary conditions on how the height may evolve. + + + + +## Definitions + +### Data Types + +The core data structure of the protocol is the LightBlock. + +#### **[LCV-DATA-LIGHTBLOCK.1]** + +```go +type LightBlock struct { + Header Header + Commit Commit + Validators ValidatorSet +} +``` + +#### **[LCV-DATA-LIGHTSTORE.1]** + +LightBlocks are stored in a structure which stores all LightBlock from +initialization or received from peers. + +```go +type LightStore struct { + ... +} + +``` + +Each LightBlock is in one of the following states: + +```go +type VerifiedState int + +const ( + StateUnverified = iota + 1 + StateVerified + StateFailed + StateTrusted +) +``` + +> Only the detector module sets a lightBlock state to `StateTrusted` +> and only if it was `StateVerified` before. + +The LightStore exposes the following functions to query stored LightBlocks. + +#### **[LCV-FUNC-GET.1]** + +```go +func (ls LightStore) Get(height Height) (LightBlock, bool) +``` + +- Expected postcondition + - returns a LightBlock at a given height or false in the second argument if + the LightStore does not contain the specified LightBlock. + +#### **[LCV-FUNC-LATEST-VERIF.1]** + +```go +func (ls LightStore) LatestVerified() LightBlock +``` + +- Expected postcondition + - returns the highest light block whose state is `StateVerified` + or `StateTrusted` + +#### **[LCV-FUNC-UPDATE.2]** + +```go +func (ls LightStore) Update(lightBlock LightBlock, + verfiedState VerifiedState + verifiedBy Height) +``` + +- Expected postcondition + - The state of the LightBlock is set to *verifiedState*. + - verifiedBy of the Lightblock is set to *Height* + +> The following function is used only in the detector specification +> listed here for completeness. + +#### **[LCV-FUNC-LATEST-TRUSTED.1]** + +```go +func (ls LightStore) LatestTrusted() LightBlock +``` + +- Expected postcondition + - returns the highest light block that has been verified and + checked by the detector. + +#### **[LCV-FUNC-FILTER.1]** + +```go +func (ls LightStore) FilterVerified() LightSTore +``` + +- Expected postcondition + - returns only the LightBlocks with state verified. + +### Inputs + +- *lightStore*: stores light blocks that have been downloaded and that + passed verification. Initially it contains a light block with + *trustedHeader*. +- *primary*: peerID +- *targetHeight*: the height of the needed header + +### Configuration Parameters + +- *trustThreshold*: a float. Can be used if correctness should not be based on more voting power and 1/3. +- *trustingPeriod*: a time duration [**[TMBC-TIME_PARAMS.1]**][TMBC-TIME_PARAMS-link]. +- *clockDrift*: a time duration. Correction parameter dealing with only approximately synchronized clocks. + +### Variables + +- *nextHeight*: initially *targetHeight* + > *nextHeight* should be thought of the "height of the next header we need + > to download and verify" + +### Assumptions + +#### **[LCV-A-INIT.1]** + +- *trustedHeader* is from the blockchain + +- *targetHeight > LightStore.LatestVerified.Header.Height* + +### Invariants + +#### **[LCV-INV-TP.1]** + +It is always the case that *LightStore.LatestTrusted.Header.Time > now - trustingPeriod*. + +> If the invariant is violated, the light client does not have a +> header it can trust. A trusted header must be obtained externally, +> its trust can only be based on social consensus. + +### Used Remote Functions + +We use the functions `commit` and `validators` that are provided +by the [RPC client for Tendermint][RPC]. + +```go +func Commit(height int64) (SignedHeader, error) +``` + +- Implementation remark + - RPC to full node *n* + - JSON sent: + +```javascript +// POST /commit +{ + "jsonrpc": "2.0", + "id": "ccc84631-dfdb-4adc-b88c-5291ea3c2cfb", // UUID v4, unique per request + "method": "commit", + "params": { + "height": 1234 + } +} +``` + +- Expected precondition + - header of `height` exists on blockchain +- Expected postcondition + - if *n* is correct: Returns the signed header of height `height` + from the blockchain if communication is timely (no timeout) + - if *n* is faulty: Returns a signed header with arbitrary content +- Error condition + - if *n* is correct: precondition violated or timeout + - if *n* is faulty: arbitrary error + +---- + +```go +func Validators(height int64) (ValidatorSet, error) +``` + +- Implementation remark + - RPC to full node *n* + - JSON sent: + +```javascript +// POST /validators +{ + "jsonrpc": "2.0", + "id": "ccc84631-dfdb-4adc-b88c-5291ea3c2cfb", // UUID v4, unique per request + "method": "validators", + "params": { + "height": 1234 + } +} +``` + +- Expected precondition + - header of `height` exists on blockchain +- Expected postcondition + - if *n* is correct: Returns the validator set of height `height` + from the blockchain if communication is timely (no timeout) + - if *n* is faulty: Returns arbitrary validator set +- Error condition + - if *n* is correct: precondition violated or timeout + - if *n* is faulty: arbitrary error + +---- + +### Communicating Function + +#### **[LCV-FUNC-FETCH.1]** + + ```go +func FetchLightBlock(peer PeerID, height Height) LightBlock +``` + +- Implementation remark + - RPC to peer at *PeerID* + - calls `Commit` for *height* and `Validators` for *height* and *height+1* +- Expected precondition + - `height` is less than or equal to height of the peer **[LCV-IO-PRE-HEIGHT.1]** +- Expected postcondition: + - if *node* is correct: + - Returns the LightBlock *lb* of height `height` + that is consistent with the blockchain + - *lb.provider = peer* **[LCV-IO-POST-PROVIDER.1]** + - *lb.Header* is a header consistent with the blockchain + - *lb.Validators* is the validator set of the blockchain at height *nextHeight* + - *lb.NextValidators* is the validator set of the blockchain at height *nextHeight + 1* + - if *node* is faulty: Returns a LightBlock with arbitrary content + [**[TMBC-AUTH-BYZ.1]**][TMBC-Auth-Byz-link] +- Error condition + - if *n* is correct: precondition violated + - if *n* is faulty: arbitrary error + - if *lb.provider != peer* + - times out after 2 Delta (by assumption *n* is faulty) + +---- + +## Core Verification + +### Outline + +The `VerifyToTarget` is the main function and uses the following functions. + +- `FetchLightBlock` is called to download the next light block. It is + the only function that communicates with other nodes +- `ValidAndVerified` checks whether header is valid and checks if a + new lightBlock should be trusted + based on a previously verified lightBlock. +- `Schedule` decides which height to try to verify next + +In the following description of `VerifyToTarget` we do not deal with error +handling. If any of the above function returns an error, VerifyToTarget just +passes the error on. + +#### **[LCV-FUNC-MAIN.1]** + +```go +func VerifyToTarget(primary PeerID, lightStore LightStore, + targetHeight Height) (LightStore, Result) { + + nextHeight := targetHeight + + for lightStore.LatestVerified.height < targetHeight { + + // Get next LightBlock for verification + current, found := lightStore.Get(nextHeight) + if !found { + current = FetchLightBlock(primary, nextHeight) + lightStore.Update(current, StateUnverified) + } + + // Verify + verdict = ValidAndVerified(lightStore.LatestVerified, current) + + // Decide whether/how to continue + if verdict == SUCCESS { + lightStore.Update(current, StateVerified) + } + else if verdict == NOT_ENOUGH_TRUST { + // do nothing + // the light block current passed validation, but the validator + // set is too different to verify it. We keep the state of + // current at StateUnverified. For a later iteration, Schedule + // might decide to try verification of that light block again. + } + else { + // verdict is some error code + lightStore.Update(current, StateFailed) + // possibly remove all LightBlocks from primary + return (lightStore, ResultFailure) + } + nextHeight = Schedule(lightStore, nextHeight, targetHeight) + } + return (lightStore, ResultSuccess) +} +``` + +- Expected precondition + - *lightStore* contains a LightBlock within the *trustingPeriod* **[LCV-PRE-TP.1]** + - *targetHeight* is greater than the height of all the LightBlocks in *lightStore* +- Expected postcondition: + - returns *lightStore* that contains a LightBlock that corresponds to a block + of the blockchain of height *targetHeight* + (that is, the LightBlock has been added to *lightStore*) **[LCV-POST-LS.1]** +- Error conditions + - if the precondition is violated + - if `ValidAndVerified` or `FetchLightBlock` report an error + - if [**[LCV-INV-TP.1]**](#LCV-INV-TP.1) is violated + +### Details of the Functions + +#### **[LCV-FUNC-VALID.1]** + +```go +func ValidAndVerified(trusted LightBlock, untrusted LightBlock) Result +``` + +- Expected precondition: + - *untrusted* is valid, that is, satisfies the soundness [checks][block] + - *untrusted* is **well-formed**, that is, + - *untrusted.Header.Time < now + clockDrift* + - *untrusted.Validators = hash(untrusted.Header.Validators)* + - *untrusted.NextValidators = hash(untrusted.Header.NextValidators)* + - *trusted.Header.Time > now - trustingPeriod* + - *trusted.Commit* is a commit for the header + *trusted.Header*, i.e., it contains + the correct hash of the header, and +2/3 of signatures + - the `Height` and `Time` of `trusted` are smaller than the Height and + `Time` of `untrusted`, respectively + - the *untrusted.Header* is well-formed (passes the tests from + [[block]]), and in particular + - if the untrusted header `unstrusted.Header` is the immediate + successor of `trusted.Header`, then it holds that + - *trusted.Header.NextValidators = + untrusted.Header.Validators*, and + moreover, + - *untrusted.Header.Commit* + - contains signatures by more than two-thirds of the validators + - contains no signature from nodes that are not in *trusted.Header.NextValidators* +- Expected postcondition: + - Returns `SUCCESS`: + - if *untrusted* is the immediate successor of *trusted*, or otherwise, + - if the signatures of a set of validators that have more than + *max(1/3,trustThreshold)* of voting power in + *trusted.Nextvalidators* is contained in + *untrusted.Commit* (that is, header passes the tests + [**[TMBC-VAL-CONTAINS-CORR.1]**][TMBC-VAL-CONTAINS-CORR-link] + and [**[TMBC-VAL-COMMIT.1]**][TMBC-VAL-COMMIT-link]) + - Returns `NOT_ENOUGH_TRUST` if: + - *untrusted* is *not* the immediate successor of + *trusted* + and the *max(1/3,trustThreshold)* threshold is not reached + (that is, if + [**[TMBC-VAL-CONTAINS-CORR.1]**][TMBC-VAL-CONTAINS-CORR-link] + fails and header is does not violate the soundness + checks [[block]]). +- Error condition: + - if precondition violated + +---- + +#### **[LCV-FUNC-SCHEDULE.1]** + +```go +func Schedule(lightStore, nextHeight, targetHeight) Height +``` + +- Implementation remark: If picks the next height to be verified. + We keep the precise choice of the next header under-specified. It is + subject to performance optimizations that do not influence the correctness +- Expected postcondition: **[LCV-SCHEDULE-POST.1]** + Return *H* s.t. + 1. if *lightStore.LatestVerified.Height = nextHeight* and + *lightStore.LatestVerified < targetHeight* then + *nextHeight < H <= targetHeight* + 2. if *lightStore.LatestVerified.Height < nextHeight* and + *lightStore.LatestVerified.Height < targetHeight* then + *lightStore.LatestVerified.Height < H < nextHeight* + 3. if *lightStore.LatestVerified.Height = targetHeight* then + *H = targetHeight* + +> Case i. captures the case where the light block at height *nextHeight* +> has been verified, and we can choose a height closer to the *targetHeight*. +> As we get the *lightStore* as parameter, the choice of the next height can +> depend on the *lightStore*, e.g., we can pick a height for which we have +> already downloaded a light block. +> In Case ii. the header of *nextHeight* could not be verified, and we need to pick a smaller height. +> In Case iii. is a special case when we have verified the *targetHeight*. + +### Solving the distributed specification + +*trustedStore* is implemented by the light blocks in lightStore that +have the state *StateVerified*. + +#### Argument for [**[LCV-DIST-SAFE.1]**](#lcv-dist-safe) + +- `ValidAndVerified` implements the soundness checks and the checks + [**[TMBC-VAL-CONTAINS-CORR.1]**][TMBC-VAL-CONTAINS-CORR-link] and + [**[TMBC-VAL-COMMIT.1]**][TMBC-VAL-COMMIT-link] under + the assumption [**[TMBC-FM-2THIRDS.1]**][TMBC-FM-2THIRDS-link] +- Only if `ValidAndVerified` returns with `SUCCESS`, the state of a light block is + set to *StateVerified*. + +#### Argument for [**[LCV-DIST-LIVE.1]**](#lcv-dist-life) + +- If *primary* is correct, + - `FetchLightBlock` will always return a light block consistent + with the blockchain + - `ValidAndVerified` either verifies the header using the trusting + period or falls back to sequential + verification + - If [**[LCV-INV-TP.1]**](#LCV-INV-TP.1) holds, eventually every + header will be verified and core verification **terminates successfully**. + - successful termination depends on the age of *lightStore.LatestVerified* + (for instance, initially on the age of *trustedHeader*) and the + changes of the validator sets on the blockchain. + We will give some examples [below](#liveness-scenarios). +- If *primary* is faulty, + - it either provides headers that pass all the tests, and we + return with the header + - it provides one header that fails a test, core verification + **terminates with failure**. + - it times out and core verification + **terminates with failure**. + +## Liveness Scenarios + +The liveness argument above assumes [**[LCV-INV-TP.1]**](#LCV-INV-TP.1) + +which requires that there is a header that does not expire before the +target height is reached. Here we discuss scenarios to ensure this. + +Let *startHeader* be *LightStore.LatestVerified* when core +verification is called (*trustedHeader*) and *startTime* be the time +core verification is invoked. + +In order to ensure liveness, *LightStore* always needs to contain a +verified (or initially trusted) header whose time is within the +trusting period. To ensure this, core verification needs to add new +headers to *LightStore* and verify them, before all headers in +*LightStore* expire. + +#### Many changes in validator set + + Let's consider `Schedule` implements + bisection, that is, it halves the distance. + Assume the case where the validator set changes completely in each +block. Then the + method in this specification needs to +sequentially verify all headers. That is, for + +- *W = log_2 (targetHeight - startHeader.Height)*, + +*W* headers need to be downloaded and checked before the +header of height *startHeader.Height + 1* is added to *LightStore*. + +- Let *Comp* + be the local computation time needed to check headers and signatures + for one header. +- Then we need in the worst case *Comp + 2 Delta* to download and + check one header. +- Then the first time a verified header could be added to *LightStore* is + startTime + W * (Comp + 2 Delta) +- [TP.1] However, it can only be added if we still have a header in + *LightStore*, + which is not + expired, that is only the case if + - startHeader.Time > startTime + WCG * (Comp + 2 Delta) - + trustingPeriod, + - that is, if core verification is started at + startTime < startHeader.Time + trustingPeriod - WCG * (Comp + 2 Delta) + +- one may then do an inductive argument from this point on, depending + on the implementation of `Schedule`. We may have to account for the + headers that are already + downloaded, but they are checked against the new *LightStore.LatestVerified*. + +> We observe that +> the worst case time it needs to verify the header of height +> *targetHeight* depends mainly on how frequent the validator set on the +> blockchain changes. That core verification terminates successfully +> crucially depends on the check [TP.1], that is, that the headers in +> *LightStore* do not expire in the time needed to download more +> headers, which depends on the creation time of the headers in +> *LightStore*. That is, termination of core verification is highly +> depending on the data stored in the blockchain. +> The current light client core verification protocol exploits that, in +> practice, changes in the validator set are rare. For instance, +> consider the following scenario. + +#### No change in validator set + +If on the blockchain the validator set of the block at height +*targetHeight* is equal to *startHeader.NextValidators*: + +- there is one round trip in `FetchLightBlock` to download the light + block + of height + *targetHeight*, and *Comp* to check it. +- as the validator sets are equal, `Verify` returns `SUCCESS`, if + *startHeader.Time > now - trustingPeriod*. +- that is, if *startTime < startHeader.Header.Time + trustingPeriod - + 2 Delta - Comp*, then core verification terminates successfully + +# Part V - Supporting the IBC Relayer + +The above specification focuses on the most common case, which also +constitutes the most challenging task: using the Tendermint [security +model][TMBC-FM-2THIRDS-link] to verify light blocks without +downloading all intermediate blocks. To focus on this challenge, above +we have restricted ourselves to the case where *targetHeight* is +greater than the height of any trusted header. This simplified +presentation of the algorithm as initially +`lightStore.LatestVerified()` is less than *targetHeight*, and in the +process of verification `lightStore.LatestVerified()` increases until +*targetHeight* is reached. + +For [IBC][ibc-rs] it might be that some "older" header is +needed, that is, *targetHeight < lightStore.LatestVerified()*. In this section we present a preliminary design, and we mark some +remaining open questions. +If *targetHeight < lightStore.LatestVerified()* our design separates +the following cases: + +- A previous instance of `VerifyToTarget` has already downloaded the + light block of *targetHeight*. There are two cases + - the light block has been verified + - the light block has not been verified yet +- No light block of *targetHeight* had been downloaded before. There + are two cases: + - there exists a verified light block of height less than *targetHeight* + - otherwise. In this case we need to do "backwards verification" + using the hash of the previous block in the `LastBlockID` field + of a header. + +**Open Question:** what are the security assumptions for backward +verification. Should we check that the light block we verify from +(and/or the checked light block) is within the trusting period? + +The design just presents the above case +distinction as a function, and defines some auxiliary functions in the +same way the protocol was presented in +[Part IV](#part-iv---light-client-verification-protocol). + +```go +func (ls LightStore) LatestPrevious(height Height) (LightBlock, bool) +``` + +- Expected postcondition + - returns a light block *lb* that satisfies: + - *lb* is in lightStore + - *lb* is verified and not expired + - *lb.Header.Height < height* + - for all *b* in lightStore s.t. *b* is verified and not expired it + holds *lb.Header.Height >= b.Header.Height* + - *false* in the second argument if + the LightStore does not contain such an *lb*. + +```go +func (ls LightStore) MinVerified() (LightBlock, bool) +``` + +- Expected postcondition + - returns a light block *lb* that satisfies: + - *lb* is in lightStore + - *lb* is verified **Open Question:** replace by trusted? + - *lb.Header.Height* is minimal in the lightStore + - **Open Question:** according to this, it might be expired (outside the + trusting period). This approach appears safe. Are there reasons we + should not do that? + - *false* in the second argument if + the LightStore does not contain such an *lb*. + +If a height that is smaller than the smallest height in the lightstore +is required, we check the hashes backwards. This is done with the +following function: + +#### **[LCV-FUNC-BACKWARDS.1]** + +```go +func Backwards (primary PeerID, lightStore LightStore, targetHeight Height) + (LightStore, Result) { + + lb,res = lightStore.MinVerified() + if res = false { + return (lightStore, ResultFailure) + } + + latest := lb.Header + for i := lb.Header.height - 1; i >= targetHeight; i-- { + // here we download height-by-height. We might first download all + // headers down to targetHeight and then check them. + current := FetchLightBlock(primary,i) + if (hash(current) != latest.Header.LastBlockId) { + return (lightStore, ResultFailure) + } + else { + lightStore.Update(current, StateVerified) + // **Open Question:** Do we need a new state type for + // backwards verified light blocks? + } + latest = current + } + return (lightStore, ResultSuccess) +} +``` + +The following function just decided based on the required height which +method should be used. + +#### **[LCV-FUNC-IBCMAIN.1]** + +```go +func Main (primary PeerID, lightStore LightStore, targetHeight Height) + (LightStore, Result) { + + b1, r1 = lightStore.Get(targetHeight) + if r1 = true and b1.State = StateVerified { + // block already there + return (lightStore, ResultSuccess) + } + + if targetHeight > lightStore.LatestVerified.height { + // case of Part IV + return VerifyToTarget(primary, lightStore, targetHeight) + } + else { + b2, r2 = lightStore.LatestPrevious(targetHeight); + if r2 = true { + // make auxiliary lightStore auxLS to call VerifyToTarget. + // VerifyToTarget uses LatestVerified of the given lightStore + // For that we need: + // auxLS.LatestVerified = lightStore.LatestPrevious(targetHeight) + auxLS.Init; + auxLS.Update(b2,StateVerified); + if r1 = true { + // we need to verify a previously downloaded light block. + // we add it to the auxiliary store so that VerifyToTarget + // does not download it again + auxLS.Update(b1,b1.State); + } + auxLS, res2 = VerifyToTarget(primary, auxLS, targetHeight) + // move all lightblocks from auxLS to lightStore, + // maintain state + // we do that whether VerifyToTarget was successful or not + for i, s range auxLS { + lighStore.Update(s,s.State) + } + return (lightStore, res2) + } + else { + return Backwards(primary, lightStore, targetHeight) + } + } +} +``` + + + + + + + + + + + + + + + + + + + +# References + +[[block]] Specification of the block data structure. + +[[RPC]] RPC client for Tendermint + +[[fork-detector]] The specification of the light client fork detector. + +[[fullnode]] Specification of the full node API + +[[ibc-rs]] Rust implementation of IBC modules and relayer. + +[[lightclient]] The light client ADR [77d2651 on Dec 27, 2019]. + +[RPC]: https://docs.tendermint.com/v0.34/rpc/ + +[block]: https://github.com/tendermint/spec/blob/d46cd7f573a2c6a2399fcab2cde981330aa63f37/spec/core/data_structures.md + +[TMBC-HEADER-link]: #tmbc-header1 +[TMBC-SEQ-link]: #tmbc-seq1 +[TMBC-CorrFull-link]: #tmbc-corr-full1 +[TMBC-Auth-Byz-link]: #tmbc-auth-byz1 +[TMBC-TIME_PARAMS-link]: #tmbc-time-params1 +[TMBC-FM-2THIRDS-link]: #tmbc-fm-2thirds1 +[TMBC-VAL-CONTAINS-CORR-link]: #tmbc-val-contains-corr1 +[TMBC-VAL-COMMIT-link]: #tmbc-val-commit1 +[TMBC-SOUND-DISTR-POSS-COMMIT-link]: #tmbc-sound-distr-poss-commit1 + +[lightclient]: https://github.com/interchainio/tendermint-rs/blob/e2cb9aca0b95430fca2eac154edddc9588038982/docs/architecture/adr-002-lite-client.md +[fork-detector]: https://github.com/informalsystems/tendermint-rs/blob/master/docs/spec/lightclient/detection.md +[fullnode]: https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/fullnode.md + +[ibc-rs]:https://github.com/informalsystems/ibc-rs + +[FN-LuckyCase-link]: https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/fullnode.md#fn-luckycase + +[blockchain-validator-set]: https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/blockchain.md#data-structures +[fullnode-data-structures]: https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/fullnode.md#data-structures + +[FN-ManifestFaulty-link]: https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/fullnode.md#fn-manifestfaulty + +[arXiv]: https://arxiv.org/abs/1807.04938 diff --git a/spec/light-client/verification/verification_002_draft.md b/spec/light-client/verification/verification_002_draft.md new file mode 100644 index 0000000000..4a9d777645 --- /dev/null +++ b/spec/light-client/verification/verification_002_draft.md @@ -0,0 +1,1061 @@ +# Light Client Verification + +The light client implements a read operation of a +[header][TMBC-HEADER-link] from the [blockchain][TMBC-SEQ-link], by +communicating with full nodes. As some full nodes may be faulty, this +functionality must be implemented in a fault-tolerant way. + +In the Tendermint blockchain, the validator set may change with every +new block. The staking and unbonding mechanism induces a [security +model][TMBC-FM-2THIRDS-link]: starting at time *Time* of the +[header][TMBC-HEADER-link], +more than two-thirds of the next validators of a new block are correct +for the duration of *TrustedPeriod*. The fault-tolerant read +operation is designed for this security model. + +The challenge addressed here is that the light client might have a +block of height *h1* and needs to read the block of height *h2* +greater than *h1*. Checking all headers of heights from *h1* to *h2* +might be too costly (e.g., in terms of energy for mobile devices). +This specification tries to reduce the number of intermediate blocks +that need to be checked, by exploiting the guarantees provided by the +[security model][TMBC-FM-2THIRDS-link]. + +# Status + +## Previous Versions + +- [[001_published]](./verification_001_published.md) + is thoroughly reviewed, and the protocol has been +formalized in TLA+ and model checked. + +## Issues that are addressed in this revision + +As it is part of the larger light node, its data structures and +functions interact with the attack dectection functionality of the light +client. As a result of the work on + +- [attack detection](https://github.com/tendermint/spec/pull/164) for light nodes + +- attack detection for IBC and [relayer requirements](https://github.com/informalsystems/tendermint-rs/issues/497) + +- light client + [supervisor](https://github.com/tendermint/spec/pull/159) (also in + [Rust proposal](https://github.com/informalsystems/tendermint-rs/pull/509)) + +adaptations to the semantics and functions exposed by the LightStore +needed to be made. In contrast to [version +001](./verification_001_published.md) we specify the following: + +- `VerifyToTarget` and `Backwards` are called with a single lightblock + as root of trust in contrast to passing the complete lightstore. + +- During verification, we record for each lightblock which other + lightblock can be used to verify it in one step. This is needed to + generate verification traces that are needed for IBC. + +# Outline + +- [Part I](#part-i---tendermint-blockchain): Introduction of + relevant terms of the Tendermint +blockchain. + +- [Part II](#part-ii---sequential-definition-of-the-verification-problem): Introduction +of the problem addressed by the Lightclient Verification protocol. + - [Verification Informal Problem + statement](#Verification-Informal-Problem-statement): For the general + audience, that is, engineers who want to get an overview over what + the component is doing from a bird's eye view. + - [Sequential Problem statement](#Sequential-Problem-statement): + Provides a mathematical definition of the problem statement in + its sequential form, that is, ignoring the distributed aspect of + the implementation of the blockchain. + +- [Part III](#part-iii---light-client-as-distributed-system): Distributed + aspects of the light client, system assumptions and temporal + logic specifications. + + - [Incentives](#incentives): how faulty full nodes may benefit from + misbehaving and how correct full nodes benefit from cooperating. + + - [Computational Model](#Computational-Model): + timing and correctness assumptions. + + - [Distributed Problem Statement](#Distributed-Problem-Statement): + temporal properties that formalize safety and liveness + properties in the distributed setting. + +- [Part IV](#part-iv---light-client-verification-protocol): + Specification of the protocols. + + - [Definitions](#Definitions): Describes inputs, outputs, + variables used by the protocol, auxiliary functions + + - [Core Verification](#core-verification): gives an outline of the solution, + and details of the functions used (with preconditions, + postconditions, error conditions). + + - [Liveness Scenarios](#liveness-scenarios): when the light + client makes progress depends heavily on the changes in the + validator sets of the blockchain. We discuss some typical scenarios. + +- [Part V](#part-v---supporting-the-ibc-relayer): The above parts + focus on a common case where the last verified block has height *h1* + and the + requested height *h2* satisfies *h2 > h1*. For IBC, there are + scenarios where this might not be the case. In this part, we provide + some preliminaries for supporting this. As not all details of the + IBC requirements are clear by now, we do not provide a complete + specification at this point. We mark with "Open Question" points + that need to be addressed in order to finalize this specification. + It should be noted that the technically + most challenging case is the one specified in Part IV. + +In this document we quite extensively use tags in order to be able to +reference assumptions, invariants, etc. in future communication. In +these tags we frequently use the following short forms: + +- TMBC: Tendermint blockchain +- SEQ: for sequential specifications +- LCV: Lightclient Verification +- LIVE: liveness +- SAFE: safety +- FUNC: function +- INV: invariant +- A: assumption + +# Part I - Tendermint Blockchain + +## Header Fields necessary for the Light Client + +#### **[TMBC-HEADER.1]** + +A set of blockchain transactions is stored in a data structure called +*block*, which contains a field called *header*. (The data structure +*block* is defined [here][block]). As the header contains hashes to +the relevant fields of the block, for the purpose of this +specification, we will assume that the blockchain is a list of +headers, rather than a list of blocks. + +#### **[TMBC-HASH-UNIQUENESS.1]** + +We assume that every hash in the header identifies the data it hashes. +Therefore, in this specification, we do not distinguish between hashes and the +data they represent. + +#### **[TMBC-HEADER-FIELDS.2]** + +A header contains the following fields: + +- `Height`: non-negative integer +- `Time`: time (non-negative integer) +- `LastBlockID`: Hashvalue +- `LastCommit` DomainCommit +- `Validators`: DomainVal +- `NextValidators`: DomainVal +- `Data`: DomainTX +- `AppState`: DomainApp +- `LastResults`: DomainRes + +#### **[TMBC-SEQ.1]** + +The Tendermint blockchain is a list *chain* of headers. + +#### **[TMBC-VALIDATOR-PAIR.1]** + +Given a full node, a +*validator pair* is a pair *(peerID, voting_power)*, where + +- *peerID* is the PeerID (public key) of a full node, +- *voting_power* is an integer (representing the full node's + voting power in a certain consensus instance). + +> In the Golang implementation the data type for *validator +pair* is called `Validator` + +#### **[TMBC-VALIDATOR-SET.1]** + +A *validator set* is a set of validator pairs. For a validator set +*vs*, we write *TotalVotingPower(vs)* for the sum of the voting powers +of its validator pairs. + +#### **[TMBC-VOTE.1]** + +A *vote* contains a `prevote` or `precommit` message sent and signed by +a validator node during the execution of [consensus][arXiv]. Each +message contains the following fields + +- `Type`: prevote or precommit +- `Height`: positive integer +- `Round` a positive integer +- `BlockID` a Hashvalue of a block (not necessarily a block of the chain) + +#### **[TMBC-COMMIT.1]** + +A commit is a set of `precommit` message. + +## Tendermint Failure Model + +#### **[TMBC-AUTH-BYZ.1]** + +We assume the authenticated Byzantine fault model in which no node (faulty or +correct) may break digital signatures, but otherwise, no additional +assumption is made about the internal behavior of faulty +nodes. That is, faulty nodes are only limited in that they cannot forge +messages. + +#### **[TMBC-TIME-PARAMS.1]** + +A Tendermint blockchain has the following configuration parameters: + +- *unbondingPeriod*: a time duration. +- *trustingPeriod*: a time duration smaller than *unbondingPeriod*. + +#### **[TMBC-CORRECT.1]** + +We define a predicate *correctUntil(n, t)*, where *n* is a node and *t* is a +time point. +The predicate *correctUntil(n, t)* is true if and only if the node *n* +follows all the protocols (at least) until time *t*. + +#### **[TMBC-FM-2THIRDS.1]** + +If a block *h* is in the chain, +then there exists a subset *CorrV* +of *h.NextValidators*, such that: + +- *TotalVotingPower(CorrV) > 2/3 + TotalVotingPower(h.NextValidators)*; cf. [TMBC-VALIDATOR-SET.1] +- For every validator pair *(n,p)* in *CorrV*, it holds *correctUntil(n, + h.Time + trustingPeriod)*; cf. [TMBC-CORRECT.1] + +> The definition of correct +> [**[TMBC-CORRECT.1]**][TMBC-CORRECT-link] refers to realtime, while it +> is used here with *Time* and *trustingPeriod*, which are "hardware +> times". We do not make a distinction here. + +#### **[TMBC-CORR-FULL.1]** + +Every correct full node locally stores a prefix of the +current list of headers from [**[TMBC-SEQ.1]**][TMBC-SEQ-link]. + +## What the Light Client Checks + +> From [TMBC-FM-2THIRDS.1] we directly derive the following observation: + +#### **[TMBC-VAL-CONTAINS-CORR.1]** + +Given a (trusted) block *tb* of the blockchain, a given set of full nodes +*N* contains a correct node at a real-time *t*, if + +- *t - trustingPeriod < tb.Time < t* +- the voting power in tb.NextValidators of nodes in *N* is more + than 1/3 of *TotalVotingPower(tb.NextValidators)* + +> The following describes how a commit for a given block *b* must look +> like. + +#### **[TMBC-SOUND-DISTR-POSS-COMMIT.1]** + +For a block *b*, each element *pc* of *PossibleCommit(b)* satisfies: + +- *pc* contains only votes (cf. [TMBC-VOTE.1]) + by validators from *b.Validators* +- the sum of the voting powers in *pc* is greater than 2/3 + *TotalVotingPower(b.Validators)* +- and there is an *r* such that each vote *v* in *pc* satisfies + - v.Type = precommit + - v.Height = b.Height + - v.Round = r + - v.blockID = hash(b) + +> The following property comes from the validity of the [consensus][arXiv]: A +> correct validator node only sends `prevote` or `precommit`, if +> `BlockID` of the new (to-be-decided) block is equal to the hash of +> the last block. + +#### **[TMBC-VAL-COMMIT.1]** + +If for a block *b*, a commit *c* + +- contains at least one validator pair *(v,p)* such that *v* is a + **correct** validator node, and +- is contained in *PossibleCommit(b)* + +then the block *b* is on the blockchain. + +## Context of this document + +In this document we specify the light client verification component, +called *Core Verification*. The *Core Verification* communicates with +a full node. As full nodes may be faulty, it cannot trust the +received information, but the light client has to check whether the +header it receives coincides with the one generated by Tendermint +consensus. + +The two + properties [[TMBC-VAL-CONTAINS-CORR.1]][TMBC-VAL-CONTAINS-CORR-link] and +[[TMBC-VAL-COMMIT]][TMBC-VAL-COMMIT-link] formalize the checks done + by this specification: +Given a trusted block *tb* and an untrusted block *ub* with a commit *cub*, +one has to check that *cub* is in *PossibleCommit(ub)*, and that *cub* +contains a correct node using *tb*. + +# Part II - Sequential Definition of the Verification Problem + +## Verification Informal Problem statement + +Given a height *targetHeight* as an input, the *Verifier* eventually +stores a header *h* of height *targetHeight* locally. This header *h* +is generated by the Tendermint [blockchain][block]. In +particular, a header that was not generated by the blockchain should +never be stored. + +## Sequential Problem statement + +#### **[LCV-SEQ-LIVE.1]** + +The *Verifier* gets as input a height *targetHeight*, and eventually stores the +header of height *targetHeight* of the blockchain. + +#### **[LCV-SEQ-SAFE.1]** + +The *Verifier* never stores a header which is not in the blockchain. + +# Part III - Light Client as Distributed System + +## Incentives + +Faulty full nodes may benefit from lying to the light client, by making the +light client accept a block that deviates (e.g., contains additional +transactions) from the one generated by Tendermint consensus. +Users using the light client might be harmed by accepting a forged header. + +The [attack detector][attack-detector] of the light client may help the +correct full nodes to understand whether their header is a good one. +Hence, in combination with the light client detector, the correct full +nodes have the incentive to respond. We can thus base liveness +arguments on the assumption that correct full nodes reliably talk to +the light client. + +## Computational Model + +#### **[LCV-A-PEER.1]** + +The verifier communicates with a full node called *primary*. No assumption is made about the full node (it may be correct or faulty). + +#### **[LCV-A-COMM.1]** + +Communication between the light client and a correct full node is +reliable and bounded in time. Reliable communication means that +messages are not lost, not duplicated, and eventually delivered. There +is a (known) end-to-end delay *Delta*, such that if a message is sent +at time *t* then it is received and processes by time *t + Delta*. +This implies that we need a timeout of at least *2 Delta* for remote +procedure calls to ensure that the response of a correct peer arrives +before the timeout expires. + +#### **[LCV-A-TFM.1]** + +The Tendermint blockchain satisfies the Tendermint failure model [**[TMBC-FM-2THIRDS.1]**][TMBC-FM-2THIRDS-link]. + +#### **[LCV-A-VAL.1]** + +The system satisfies [**[TMBC-AUTH-BYZ.1]**][TMBC-Auth-Byz-link] and +[**[TMBC-FM-2THIRDS.1]**][TMBC-FM-2THIRDS-link]. Thus, there is a +blockchain that satisfies the soundness requirements (that is, the +validation rules in [[block]]). + +## Distributed Problem Statement + +### Two Kinds of Termination + +We do not assume that *primary* is correct. Under this assumption no +protocol can guarantee the combination of the sequential +properties. Thus, in the (unreliable) distributed setting, we consider +two kinds of termination (successful and failure) and we will specify +below under what (favorable) conditions *Core Verification* ensures to +terminate successfully, and satisfy the requirements of the sequential +problem statement: + +#### **[LCV-DIST-TERM.1]** + +*Core Verification* either *terminates +successfully* or it *terminates with failure*. + +### Design choices + +#### **[LCV-DIST-STORE.2]** + +*Core Verification* returns a data structure called *LightStore* that +contains light blocks (that contain a header). + +#### **[LCV-DIST-INIT.2]** + +*Core Verification* is called with + +- *primary*: the PeerID of a full node (with verification communicates) +- *root*: a light block (the root of trust) +- *targetHeight*: a height (the height of a header that should be obtained) + +### Temporal Properties + +#### **[LCV-DIST-SAFE.2]** + +It is always the case that every header in *LightStore* was +generated by an instance of Tendermint consensus. + +#### **[LCV-DIST-LIVE.2]** + +If a new instance of *Core Verification* is called with a +height *targetHeight* greater than root.Header.Height it must +must eventually terminate. + +- If + - the *primary* is correct (and locally has the block of + *targetHeight*), and + - the age of root is always less than the trusting period, + then *Core Verification* adds a verified header *hd* with height + *targetHeight* to *LightStore* and it **terminates successfully** + +> These definitions imply that if the primary is faulty, a header may or +> may not be added to *LightStore*. In any case, +> [**[LCV-DIST-SAFE.2]**](#lcv-dist-safe2) must hold. +> The invariant [**[LCV-DIST-SAFE.2]**](#lcv-dist-safe2) and the liveness +> requirement [**[LCV-DIST-LIVE.2]**](#lcv-dist-life) +> allow that verified headers are added to *LightStore* whose +> height was not passed +> to the verifier (e.g., intermediate headers used in bisection; see below). +> Note that for liveness, initially having a *root* within +> the *trustinPeriod* is not sufficient. However, as this +> specification will leave some freedom with respect to the strategy +> in which order to download intermediate headers, we do not give a +> more precise liveness specification here. After giving the +> specification of the protocol, we will discuss some liveness +> scenarios [below](#liveness-scenarios). + +### Solving the sequential specification + +This specification provides a partial solution to the sequential specification. +The *Verifier* solves the invariant of the sequential part + +[**[LCV-DIST-SAFE.2]**](#lcv-dist-safe2) => [**[LCV-SEQ-SAFE.1]**](#lcv-seq-safe1) + +In the case the primary is correct, and *root* is a recent header in *LightStore*, the verifier satisfies the liveness requirements. + +⋀ *primary is correct* +⋀ *root.header.Time* > *now* - *trustingPeriod* +⋀ [**[LCV-A-Comm.1]**](#lcv-a-comm) ⋀ ( + ( [**[TMBC-CorrFull.1]**][TMBC-CorrFull-link] ⋀ + [**[LCV-DIST-LIVE.2]**](#lcv-dist-live2) ) + ⟹ [**[LCV-SEQ-LIVE.1]**](#lcv-seq-live1) +) + +# Part IV - Light Client Verification Protocol + +We provide a specification for Light Client Verification. The local +code for verification is presented by a sequential function +`VerifyToTarget` to highlight the control flow of this functionality. +We note that if a different concurrency model is considered for +an implementation, the sequential flow of the function may be +implemented with mutexes, etc. However, the light client verification +is partitioned into three blocks that can be implemented and tested +independently: + +- `FetchLightBlock` is called to download a light block (header) of a + given height from a peer. +- `ValidAndVerified` is a local code that checks the header. +- `Schedule` decides which height to try to verify next. We keep this + underspecified as different implementations (currently in Goland and + Rust) may implement different optimizations here. We just provide + necessary conditions on how the height may evolve. + + + + +## Definitions + +### Data Types + +The core data structure of the protocol is the LightBlock. + +#### **[LCV-DATA-LIGHTBLOCK.1]** + +```go +type LightBlock struct { + Header Header + Commit Commit + Validators ValidatorSet +} +``` + +#### **[LCV-DATA-LIGHTSTORE.2]** + +LightBlocks are stored in a structure which stores all LightBlock from +initialization or received from peers. + +```go +type LightStore struct { + ... +} + +``` + +#### **[LCV-DATA-LS-ROOT.2]** + +For each lightblock in a lightstore we record in a field `verification-root` of +type Height. + +> `verification-root` records the height of a lightblock that can be used to verify +> the lightblock in one step + +#### **[LCV-INV-LS-ROOT.2]** + +At all times, if a lightblock *b* in a lightstore has *b.verification-root = h*, +then + +- the lightstore contains a lightblock with height *h*, or +- *b* has the minimal height of all lightblocks in lightstore, then + b.verification-root should be nil. + +The LightStore exposes the following functions to query stored LightBlocks. + +#### **[LCV-DATA-LS-STATE.1]** + +Each LightBlock is in one of the following states: + +```go +type VerifiedState int + +const ( + StateUnverified = iota + 1 + StateVerified + StateFailed + StateTrusted +) +``` + +#### **[LCV-FUNC-GET.1]** + +```go +func (ls LightStore) Get(height Height) (LightBlock, bool) +``` + +- Expected postcondition + - returns a LightBlock at a given height or false in the second argument if + the LightStore does not contain the specified LightBlock. + +#### **[LCV-FUNC-LATEST.1]** + +```go +func (ls LightStore) Latest() LightBlock +``` + +- Expected postcondition + - returns the highest light block + +#### **[LCV-FUNC-ADD.1]** + +```go +func (ls LightStore) Add(newBlock) +``` + +- Expected precondition + - the lightstore is empty +- Expected postcondition + - adds newBlock into light store + +#### **[LCV-FUNC-STORE.1]** + +```go +func (ls LightStore) store_chain(newLS LightStore) +``` + +- Expected postcondition + - adds `newLS` to the lightStore. + +#### **[LCV-FUNC-LATEST-VERIF.2]** + +```go +func (ls LightStore) LatestVerified() LightBlock +``` + +- Expected postcondition + - returns the highest light block whose state is `StateVerified` + +#### **[LCV-FUNC-FILTER.1]** + +```go +func (ls LightStore) FilterVerified() LightStore +``` + +- Expected postcondition + - returns all the lightblocks of the lightstore with state `StateVerified` + +#### **[LCV-FUNC-UPDATE.2]** + +```go +func (ls LightStore) Update(lightBlock LightBlock, verfiedState +VerifiedState, root-height Height) +``` + +- Expected postcondition + - the lightblock is part of the lightstore + - The state of the LightBlock is set to *verifiedState*. + - The verification-root of the LightBlock is set to *root-height* + +```go +func (ls LightStore) TraceTo(lightBlock LightBlock) (LightBlock, LightStore) +``` + +- Expected postcondition + - returns a **trusted** lightblock `root` from the lightstore with a height + less than `lightBlock` + - returns a lightstore that contains lightblocks that constitute a + [verification trace](TODOlinkToDetectorSpecOnceThere) from + `root` to `lightBlock` (including `lightBlock`) + +### Inputs + +- *root*: A light block that is trusted +- *primary*: peerID +- *targetHeight*: the height of the needed header + +### Configuration Parameters + +- *trustThreshold*: a float. Can be used if correctness should not be based on more voting power and 1/3. +- *trustingPeriod*: a time duration [**[TMBC-TIME_PARAMS.1]**][TMBC-TIME_PARAMS-link]. +- *clockDrift*: a time duration. Correction parameter dealing with only approximately synchronized clocks. + +### Variables + +- *nextHeight*: initially *targetHeight* + > *nextHeight* should be thought of the "height of the next header we need + > to download and verify" + +### Assumptions + +#### **[LCV-A-INIT.2]** + +- *root* is from the blockchain + +- *targetHeight > root.Header.Height* + +### Invariants + +#### **[LCV-INV-TP.1]** + +It is always the case that *LightStore.LatestTrusted.Header.Time > now - trustingPeriod*. + +> If the invariant is violated, the light client does not have a +> header it can trust. A trusted header must be obtained externally, +> its trust can only be based on social consensus. +> We use the convention that root is assumed to be verified. + +### Used Remote Functions + +We use the functions `commit` and `validators` that are provided +by the [RPC client for Tendermint][RPC]. + +```go +func Commit(height int64) (SignedHeader, error) +``` + +- Implementation remark + - RPC to full node *n* + - JSON sent: + +```javascript +// POST /commit +{ + "jsonrpc": "2.0", + "id": "ccc84631-dfdb-4adc-b88c-5291ea3c2cfb", // UUID v4, unique per request + "method": "commit", + "params": { + "height": 1234 + } +} +``` + +- Expected precondition + - header of `height` exists on blockchain +- Expected postcondition + - if *n* is correct: Returns the signed header of height `height` + from the blockchain if communication is timely (no timeout) + - if *n* is faulty: Returns a signed header with arbitrary content +- Error condition + - if *n* is correct: precondition violated or timeout + - if *n* is faulty: arbitrary error + +----; + +```go +func Validators(height int64) (ValidatorSet, error) +``` + +- Implementation remark + - RPC to full node *n* + - JSON sent: + +```javascript +// POST /validators +{ + "jsonrpc": "2.0", + "id": "ccc84631-dfdb-4adc-b88c-5291ea3c2cfb", // UUID v4, unique per request + "method": "validators", + "params": { + "height": 1234 + } +} +``` + +- Expected precondition + - header of `height` exists on blockchain +- Expected postcondition + - if *n* is correct: Returns the validator set of height `height` + from the blockchain if communication is timely (no timeout) + - if *n* is faulty: Returns arbitrary validator set +- Error condition + - if *n* is correct: precondition violated or timeout + - if *n* is faulty: arbitrary error + +----; + +### Communicating Function + +#### **[LCV-FUNC-FETCH.1]** + + ```go +func FetchLightBlock(peer PeerID, height Height) LightBlock +``` + +- Implementation remark + - RPC to peer at *PeerID* + - calls `Commit` for *height* and `Validators` for *height* and *height+1* +- Expected precondition + - `height` is less than or equal to height of the peer **[LCV-IO-PRE-HEIGHT.1]** +- Expected postcondition: + - if *node* is correct: + - Returns the LightBlock *lb* of height `height` + that is consistent with the blockchain + - *lb.provider = peer* **[LCV-IO-POST-PROVIDER.1]** + - *lb.Header* is a header consistent with the blockchain + - *lb.Validators* is the validator set of the blockchain at height *nextHeight* + - *lb.NextValidators* is the validator set of the blockchain at height *nextHeight + 1* + - if *node* is faulty: Returns a LightBlock with arbitrary content + [**[TMBC-AUTH-BYZ.1]**][TMBC-Auth-Byz-link] +- Error condition + - if *n* is correct: precondition violated + - if *n* is faulty: arbitrary error + - if *lb.provider != peer* + - times out after 2 Delta (by assumption *n* is faulty) + +----; + +## Core Verification + +### Outline + +The `VerifyToTarget` is the main function and uses the following functions. + +- `FetchLightBlock` is called to download the next light block. It is + the only function that communicates with other nodes +- `ValidAndVerified` checks whether header is valid and checks if a + new lightBlock should be trusted + based on a previously verified lightBlock. +- `Schedule` decides which height to try to verify next + +In the following description of `VerifyToTarget` we do not deal with error +handling. If any of the above function returns an error, VerifyToTarget just +passes the error on. + +#### **[LCV-FUNC-MAIN.2]** + +```go +func VerifyToTarget(primary PeerID, root LightBlock, + targetHeight Height) (LightStore, Result) { + + lightStore = new LightStore; + lightStore.Update(root, StateVerified, root.verifiedBy); + nextHeight := targetHeight; + + for lightStore.LatestVerified.height < targetHeight { + + // Get next LightBlock for verification + current, found := lightStore.Get(nextHeight) + if !found { + current = FetchLightBlock(primary, nextHeight) + lightStore.Update(current, StateUnverified, nil) + } + + // Verify + verdict = ValidAndVerified(lightStore.LatestVerified, current) + + // Decide whether/how to continue + if verdict == SUCCESS { + lightStore.Update(current, StateVerified, lightStore.LatestVerified.Height) + } + else if verdict == NOT_ENOUGH_TRUST { + // do nothing + // the light block current passed validation, but the validator + // set is too different to verify it. We keep the state of + // current at StateUnverified. For a later iteration, Schedule + // might decide to try verification of that light block again. + } + else { + // verdict is some error code + lightStore.Update(current, StateFailed, nil) + return (nil, ResultFailure) + } + nextHeight = Schedule(lightStore, nextHeight, targetHeight) + } + return (lightStore.FilterVerified, ResultSuccess) +} +``` + +- Expected precondition + - *root* is within the *trustingPeriod* **[LCV-PRE-TP.1]** + - *targetHeight* is greater than the height of *root* +- Expected postcondition: + - returns *lightStore* that contains a LightBlock that corresponds to a block + of the blockchain of height *targetHeight* + (that is, the LightBlock has been added to *lightStore*) **[LCV-POST-LS.1]** +- Error conditions + - if the precondition is violated + - if `ValidAndVerified` or `FetchLightBlock` report an error + - if [**[LCV-INV-TP.1]**](#LCV-INV-TP.1) is violated + +### Details of the Functions + +#### **[LCV-FUNC-VALID.2]** + +```go +func ValidAndVerified(trusted LightBlock, untrusted LightBlock) Result +``` + +- Expected precondition: + - *untrusted* is valid, that is, satisfies the soundness [checks][block] + - *untrusted* is **well-formed**, that is, + - *untrusted.Header.Time < now + clockDrift* + - *untrusted.Validators = hash(untrusted.Header.Validators)* + - *untrusted.NextValidators = hash(untrusted.Header.NextValidators)* + - *trusted.Header.Time > now - trustingPeriod* + - the `Height` and `Time` of `trusted` are smaller than the Height and + `Time` of `untrusted`, respectively + - the *untrusted.Header* is well-formed (passes the tests from + [[block]]), and in particular + - if the untrusted header `unstrusted.Header` is the immediate + successor of `trusted.Header`, then it holds that + - *trusted.Header.NextValidators = + untrusted.Header.Validators*, and + moreover, + - *untrusted.Header.Commit* + - contains signatures by more than two-thirds of the validators + - contains no signature from nodes that are not in *trusted.Header.NextValidators* +- Expected postcondition: + - Returns `SUCCESS`: + - if *untrusted* is the immediate successor of *trusted*, or otherwise, + - if the signatures of a set of validators that have more than + *max(1/3,trustThreshold)* of voting power in + *trusted.Nextvalidators* is contained in + *untrusted.Commit* (that is, header passes the tests + [**[TMBC-VAL-CONTAINS-CORR.1]**][TMBC-VAL-CONTAINS-CORR-link] + and [**[TMBC-VAL-COMMIT.1]**][TMBC-VAL-COMMIT-link]) + - Returns `NOT_ENOUGH_TRUST` if: + - *untrusted* is *not* the immediate successor of + *trusted* + and the *max(1/3,trustThreshold)* threshold is not reached + (that is, if + [**[TMBC-VAL-CONTAINS-CORR.1]**][TMBC-VAL-CONTAINS-CORR-link] + fails and header is does not violate the soundness + checks [[block]]). +- Error condition: + - if precondition violated + +----; + +#### **[LCV-FUNC-SCHEDULE.1]** + +```go +func Schedule(lightStore, nextHeight, targetHeight) Height +``` + +- Implementation remark: If picks the next height to be verified. + We keep the precise choice of the next header under-specified. It is + subject to performance optimizations that do not influence the correctness +- Expected postcondition: **[LCV-SCHEDULE-POST.1]** + Return *H* s.t. + 1. if *lightStore.LatestVerified.Height = nextHeight* and + *lightStore.LatestVerified < targetHeight* then + *nextHeight < H <= targetHeight* + 2. if *lightStore.LatestVerified.Height < nextHeight* and + *lightStore.LatestVerified.Height < targetHeight* then + *lightStore.LatestVerified.Height < H < nextHeight* + 3. if *lightStore.LatestVerified.Height = targetHeight* then + *H = targetHeight* + +> Case i. captures the case where the light block at height *nextHeight* +> has been verified, and we can choose a height closer to the *targetHeight*. +> As we get the *lightStore* as parameter, the choice of the next height can +> depend on the *lightStore*, e.g., we can pick a height for which we have +> already downloaded a light block. +> In Case ii. the header of *nextHeight* could not be verified, and we need to pick a smaller height. +> In Case iii. is a special case when we have verified the *targetHeight*. + +### Solving the distributed specification + +Analogous to [[001_published]](./verification_001_published.md#solving-the-distributed-specification) + +## Liveness Scenarios + +Analogous to [[001_published]](./verification_001_published.md#liveness-scenarios) + +# Part V - Supporting the IBC Relayer + +The above specification focuses on the most common case, which also +constitutes the most challenging task: using the Tendermint [security +model][TMBC-FM-2THIRDS-link] to verify light blocks without +downloading all intermediate blocks. To focus on this challenge, above +we have restricted ourselves to the case where *targetHeight* is +greater than the height of any trusted header. This simplified +presentation of the algorithm as initially +`lightStore.LatestVerified()` is less than *targetHeight*, and in the +process of verification `lightStore.LatestVerified()` increases until +*targetHeight* is reached. + +For [IBC][ibc-rs] there are two additional challenges: + +1. it might be that some "older" header is needed, that is, +*targetHeight < lightStore.LatestVerified()*. The +[supervisor](../supervisor/supervisor.md) checks whether it is in this +case by calling `LatestPrevious` and `MinVerified` and if so it calls +`Backwards`. All these functions are specified below. + +2. In order to submit proof of a light client attack, a relayer may + need to submit a verification trace. This it is important to + compute such a trace efficiently. That it can be done is based on + the invariant [[LCV-INV-LS-ROOT.2]](#LCV-INV-LS-ROOT2) that needs + to be maintained by the light client. In particular + `VerifyToTarget` and `Backwards` need to take care of setting + `verification-root`. + +#### **[LCV-FUNC-LATEST-PREV.2]** + +```go +func (ls LightStore) LatestPrevious(height Height) (LightBlock, bool) +``` + +- Expected postcondition + - returns a light block *lb* that satisfies: + - *lb* is in lightStore + - *lb* is in StateTrusted + - *lb* is not expired + - *lb.Header.Height < height* + - for all *b* in lightStore s.t. *b* is trusted and not expired it + holds *lb.Header.Height >= b.Header.Height* + - *false* in the second argument if + the LightStore does not contain such an *lb*. + +----; + +#### **[LCV-FUNC-LOWEST.2]** + +```go +func (ls LightStore) Lowest() (LightBlock) +``` + +- Expected postcondition + - returns the lowest trusted light block within trusting period + +----; + +#### **[LCV-FUNC-MIN.2]** + +```go +func (ls LightStore) MinVerified() (LightBlock, bool) +``` + +- Expected postcondition + - returns a light block *lb* that satisfies: + - *lb* is in lightStore + - *lb.Header.Height* is minimal in the lightStore + - *false* in the second argument if + the LightStore does not contain such an *lb*. + +If a height that is smaller than the smallest height in the lightstore +is required, we check the hashes backwards. This is done with the +following function: + +#### **[LCV-FUNC-BACKWARDS.2]** + +```go +func Backwards (primary PeerID, root LightBlock, targetHeight Height) + (LightStore, Result) { + + lb := root; + lightStore := new LightStore; + lightStore.Update(lb, StateTrusted, lb.verifiedBy) + + latest := lb.Header + for i := lb.Header.height - 1; i >= targetHeight; i-- { + // here we download height-by-height. We might first download all + // headers down to targetHeight and then check them. + current := FetchLightBlock(primary,i) + if (hash(current) != latest.Header.LastBlockId) { + return (nil, ResultFailure) + } + else { + // latest and current are linked together by LastBlockId + // therefore it is not relevant which we verified first + // for consistency, we store latest was veried using + // current so that the verifiedBy is always pointing down + // the chain + lightStore.Update(current, StateTrusted, nil) + lightStore.Update(latest, StateTrusted, current.Header.Height) + } + latest = current + } + return (lightStore, ResultSuccess) +} +``` + +# References + +[[block]] Specification of the block data structure. + +[[RPC]] RPC client for Tendermint + +[[attack-detector]] The specification of the light client attack detector. + +[[fullnode]] Specification of the full node API + +[[ibc-rs]] Rust implementation of IBC modules and relayer. + +[[lightclient]] The light client ADR [77d2651 on Dec 27, 2019]. + +[RPC]: https://docs.tendermint.com/v0.34/rpc/ + +[block]: https://github.com/tendermint/spec/blob/d46cd7f573a2c6a2399fcab2cde981330aa63f37/spec/core/data_structures.md + +[TMBC-HEADER-link]: #tmbc-header1 +[TMBC-SEQ-link]: #tmbc-seq1 +[TMBC-CorrFull-link]: #tmbc-corr-full1 +[TMBC-Auth-Byz-link]: #tmbc-auth-byz1 +[TMBC-TIME_PARAMS-link]: #tmbc-time-params1 +[TMBC-FM-2THIRDS-link]: #tmbc-fm-2thirds1 +[TMBC-VAL-CONTAINS-CORR-link]: #tmbc-val-contains-corr1 +[TMBC-VAL-COMMIT-link]: #tmbc-val-commit1 +[TMBC-SOUND-DISTR-POSS-COMMIT-link]: #tmbc-sound-distr-poss-commit1 + +[lightclient]: https://github.com/interchainio/tendermint-rs/blob/e2cb9aca0b95430fca2eac154edddc9588038982/docs/architecture/adr-002-lite-client.md +[attack-detector]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/detection/detection_001_reviewed.md +[fullnode]: https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/fullnode.md + +[ibc-rs]:https://github.com/informalsystems/ibc-rs + +[blockchain-validator-set]: https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/blockchain.md#data-structures +[fullnode-data-structures]: https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/fullnode.md#data-structures + +[FN-ManifestFaulty-link]: https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/fullnode.md#fn-manifestfaulty + +[arXiv]: https://arxiv.org/abs/1807.04938 diff --git a/spec/light-client/verification/verification_003_draft.md b/spec/light-client/verification/verification_003_draft.md new file mode 100644 index 0000000000..cd38e7e967 --- /dev/null +++ b/spec/light-client/verification/verification_003_draft.md @@ -0,0 +1,76 @@ +# Light Client Verificaiton + +#### **[LCV-FUNC-VERIFYCOMMITLIGHT.1]** + +VerifyCommitLight verifies that 2/3+ of the signatures for a validator set were for +a given blockID. The function will finish early and thus may not check all signatures. + +```go +func VerifyCommitLight(chainID string, vals *ValidatorSet, blockID BlockID, +height int64, commit *Commit) error { + // run a basic validation of the arguments + if err := verifyBasicValsAndCommit(vals, commit, height, blockID); err != nil { + return err + } + + // calculate voting power needed + votingPowerNeeded := vals.TotalVotingPower() * 2 / 3 + + var ( + val *Validator + valIdx int32 + seenVals = make(map[int32]int, len(commit.Signatures)) + talliedVotingPower int64 = 0 + voteSignBytes []byte + ) + for idx, commitSig := range commit.Signatures { + // ignore all commit signatures that are not for the block + if !commitSig.ForBlock() { + continue + } + + // If the vals and commit have a 1-to-1 correspondance we can retrieve + // them by index else we need to retrieve them by address + if lookUpByIndex { + val = vals.Validators[idx] + } else { + valIdx, val = vals.GetByAddress(commitSig.ValidatorAddress) + + // if the signature doesn't belong to anyone in the validator set + // then we just skip over it + if val == nil { + continue + } + + // because we are getting validators by address we need to make sure + // that the same validator doesn't commit twice + if firstIndex, ok := seenVals[valIdx]; ok { + secondIndex := idx + return fmt.Errorf("double vote from %v (%d and %d)", val, firstIndex, secondIndex) + } + seenVals[valIdx] = idx + } + + voteSignBytes = commit.VoteSignBytes(chainID, int32(idx)) + + if !val.PubKey.VerifySignature(voteSignBytes, commitSig.Signature) { + return fmt.Errorf("wrong signature (#%d): %X", idx, commitSig.Signature) + } + + // Add the voting power of the validator + // to the tally + talliedVotingPower += val.VotingPower + + // check if we have enough signatures and can thus exit early + if talliedVotingPower > votingPowerNeeded { + return nil + } + } + + if got, needed := talliedVotingPower, votingPowerNeeded; got <= needed { + return ErrNotEnoughVotingPowerSigned{Got: got, Needed: needed} + } + + return nil +} +``` \ No newline at end of file diff --git a/spec/p2p/config.md b/spec/p2p/config.md new file mode 100644 index 0000000000..b63c04f28d --- /dev/null +++ b/spec/p2p/config.md @@ -0,0 +1,49 @@ +# P2P Config + +Here we describe configuration options around the Peer Exchange. +These can be set using flags or via the `$TMHOME/config/config.toml` file. + +## Seed Mode + +`--p2p.seed_mode` + +The node operates in seed mode. In seed mode, a node continuously crawls the network for peers, +and upon incoming connection shares some peers and disconnects. + +## Seeds + +`--p2p.seeds “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:4444”` + +Dials these seeds when we need more peers. They should return a list of peers and then disconnect. +If we already have enough peers in the address book, we may never need to dial them. + +## Persistent Peers + +`--p2p.persistent_peers “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:26656”` + +Dial these peers and auto-redial them if the connection fails. +These are intended to be trusted persistent peers that can help +anchor us in the p2p network. The auto-redial uses exponential +backoff and will give up after a day of trying to connect. + +But If `persistent_peers_max_dial_period` is set greater than zero, +pause between each dial to each persistent peer will not exceed `persistent_peers_max_dial_period` +during exponential backoff and we keep trying again without giving up + +**Note:** If `seeds` and `persistent_peers` intersect, +the user will be warned that seeds may auto-close connections +and that the node may not be able to keep the connection persistent. + +## Private Peers + +`--p2p.private_peer_ids “id100000000000000000000000000000000,id200000000000000000000000000000000”` + +These are IDs of the peers that we do not add to the address book or gossip to +other peers. They stay private to us. + +## Unconditional Peers + +`--p2p.unconditional_peer_ids “id100000000000000000000000000000000,id200000000000000000000000000000000”` + +These are IDs of the peers which are allowed to be connected by both inbound or outbound regardless of +`max_num_inbound_peers` or `max_num_outbound_peers` of user's node reached or not. diff --git a/spec/p2p/connection.md b/spec/p2p/connection.md new file mode 100644 index 0000000000..33178f4794 --- /dev/null +++ b/spec/p2p/connection.md @@ -0,0 +1,111 @@ +# P2P Multiplex Connection + +## MConnection + +`MConnection` is a multiplex connection that supports multiple independent streams +with distinct quality of service guarantees atop a single TCP connection. +Each stream is known as a `Channel` and each `Channel` has a globally unique _byte id_. +Each `Channel` also has a relative priority that determines the quality of service +of the `Channel` compared to other `Channel`s. +The _byte id_ and the relative priorities of each `Channel` are configured upon +initialization of the connection. + +The `MConnection` supports three packet types: + +- Ping +- Pong +- Msg + +### Ping and Pong + +The ping and pong messages consist of writing a single byte to the connection; 0x1 and 0x2, respectively. + +When we haven't received any messages on an `MConnection` in time `pingTimeout`, we send a ping message. +When a ping is received on the `MConnection`, a pong is sent in response only if there are no other messages +to send and the peer has not sent us too many pings (TODO). + +If a pong or message is not received in sufficient time after a ping, the peer is disconnected from. + +### Msg + +Messages in channels are chopped into smaller `msgPacket`s for multiplexing. + +```go +type msgPacket struct { + ChannelID byte + EOF byte // 1 means message ends here. + Bytes []byte +} +``` + +The `msgPacket` is serialized using [Proto3](https://developers.google.com/protocol-buffers/docs/proto3). +The received `Bytes` of a sequential set of packets are appended together +until a packet with `EOF=1` is received, then the complete serialized message +is returned for processing by the `onReceive` function of the corresponding channel. + +### Multiplexing + +Messages are sent from a single `sendRoutine`, which loops over a select statement and results in the sending +of a ping, a pong, or a batch of data messages. The batch of data messages may include messages from multiple channels. +Message bytes are queued for sending in their respective channel, with each channel holding one unsent message at a time. +Messages are chosen for a batch one at a time from the channel with the lowest ratio of recently sent bytes to channel priority. + +## Sending Messages + +There are two methods for sending messages: + +```go +func (m MConnection) Send(chID byte, msg interface{}) bool {} +func (m MConnection) TrySend(chID byte, msg interface{}) bool {} +``` + +`Send(chID, msg)` is a blocking call that waits until `msg` is successfully queued +for the channel with the given id byte `chID`. The message `msg` is serialized +using the `tendermint/go-amino` submodule's `WriteBinary()` reflection routine. + +`TrySend(chID, msg)` is a nonblocking call that queues the message msg in the channel +with the given id byte chID if the queue is not full; otherwise it returns false immediately. + +`Send()` and `TrySend()` are also exposed for each `Peer`. + +## Peer + +Each peer has one `MConnection` instance, and includes other information such as whether the connection +was outbound, whether the connection should be recreated if it closes, various identity information about the node, +and other higher level thread-safe data used by the reactors. + +## Switch/Reactor + +The `Switch` handles peer connections and exposes an API to receive incoming messages +on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one +or more `Channels`. So while sending outgoing messages is typically performed on the peer, +incoming messages are received on the reactor. + +```go +// Declare a MyReactor reactor that handles messages on MyChannelID. +type MyReactor struct{} + +func (reactor MyReactor) GetChannels() []*ChannelDescriptor { + return []*ChannelDescriptor{ChannelDescriptor{ID:MyChannelID, Priority: 1}} +} + +func (reactor MyReactor) Receive(chID byte, peer *Peer, msgBytes []byte) { + r, n, err := bytes.NewBuffer(msgBytes), new(int64), new(error) + msgString := ReadString(r, n, err) + fmt.Println(msgString) +} + +// Other Reactor methods omitted for brevity +... + +switch := NewSwitch([]Reactor{MyReactor{}}) + +... + +// Send a random message to all outbound connections +for _, peer := range switch.Peers().List() { + if peer.IsOutbound() { + peer.Send(MyChannelID, "Here's a random message") + } +} +``` diff --git a/spec/p2p/messages/README.md b/spec/p2p/messages/README.md new file mode 100644 index 0000000000..1b5f5c60dd --- /dev/null +++ b/spec/p2p/messages/README.md @@ -0,0 +1,19 @@ +--- +order: 1 +parent: + title: Messages + order: 1 +--- + +# Messages + +An implementation of the spec consists of many components. While many parts of these components are implementation specific, the p2p messages are not. In this section we will be covering all the p2p messages of components. + +There are two parts to the P2P messages, the message and the channel. The channel is message specific and messages are specific to components of Tendermint. When a node connect to a peer it will tell the other node which channels are available. This notifies the peer what services the connecting node offers. You can read more on channels in [connection.md](../connection.md#mconnection) + +- [Block Sync](./block-sync.md) +- [Mempool](./mempool.md) +- [Evidence](./evidence.md) +- [State Sync](./state-sync.md) +- [Pex](./pex.md) +- [Consensus](./consensus.md) diff --git a/spec/p2p/messages/block-sync.md b/spec/p2p/messages/block-sync.md new file mode 100644 index 0000000000..48aa6155fd --- /dev/null +++ b/spec/p2p/messages/block-sync.md @@ -0,0 +1,68 @@ +--- +order: 2 +--- + +# Block Sync + +## Channel + +Block sync has one channel. + +| Name | Number | +|-------------------|--------| +| BlockchainChannel | 64 | + +## Message Types + +There are multiple message types for Block Sync + +### BlockRequest + +BlockRequest asks a peer for a block at the height specified. + +| Name | Type | Description | Field Number | +|--------|-------|---------------------------|--------------| +| Height | int64 | Height of requested block | 1 | + +### NoBlockResponse + +NoBlockResponse notifies the peer requesting a block that the node does not contain it. + +| Name | Type | Description | Field Number | +|--------|-------|---------------------------|--------------| +| Height | int64 | Height of requested block | 1 | + +### BlockResponse + +BlockResponse contains the block requested. + +| Name | Type | Description | Field Number | +|-------|----------------------------------------------|-----------------|--------------| +| Block | [Block](../../core/data_structures.md#block) | Requested Block | 1 | + +### StatusRequest + +StatusRequest is an empty message that notifies the peer to respond with the highest and lowest blocks it has stored. + +> Empty message. + +### StatusResponse + +StatusResponse responds to a peer with the highest and lowest block stored. + +| Name | Type | Description | Field Number | +|--------|-------|-------------------------------------------------------------------|--------------| +| Height | int64 | Current Height of a node | 1 | +| base | int64 | First known block, if pruning is enabled it will be higher than 1 | 1 | + +### Message + +Message is a [`oneof` protobuf type](https://developers.google.com/protocol-buffers/docs/proto#oneof). The `oneof` consists of five messages. + +| Name | Type | Description | Field Number | +|-------------------|----------------------------------|--------------------------------------------------------------|--------------| +| block_request | [BlockRequest](#blockrequest) | Request a block from a peer | 1 | +| no_block_response | [NoBlockResponse](#noblockresponse) | Response saying it doe snot have the requested block | 2 | +| block_response | [BlockResponse](#blockresponse) | Response with requested block | 3 | +| status_request | [StatusRequest](#statusrequest) | Request the highest and lowest block numbers from a peer | 4 | +| status_response | [StatusResponse](#statusresponse) | Response with the highest and lowest block numbers the store | 5 | diff --git a/spec/p2p/messages/consensus.md b/spec/p2p/messages/consensus.md new file mode 100644 index 0000000000..48055d9da2 --- /dev/null +++ b/spec/p2p/messages/consensus.md @@ -0,0 +1,149 @@ +--- +order: 7 +--- + +# Consensus + +## Channel + +Consensus has four separate channels. The channel identifiers are listed below. + +| Name | Number | +|--------------------|--------| +| StateChannel | 32 | +| DataChannel | 33 | +| VoteChannel | 34 | +| VoteSetBitsChannel | 35 | + +## Message Types + +### Proposal + +Proposal is sent when a new block is proposed. It is a suggestion of what the +next block in the blockchain should be. + +| Name | Type | Description | Field Number | +|----------|----------------------------------------------------|----------------------------------------|--------------| +| proposal | [Proposal](../../core/data_structures.md#proposal) | Proposed Block to come to consensus on | 1 | + +### Vote + +Vote is sent to vote for some block (or to inform others that a process does not vote in the +current round). Vote is defined in the +[Blockchain](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/core/data_structures.md#blockidd) +section and contains validator's +information (validator address and index), height and round for which the vote is sent, vote type, +blockID if process vote for some block (`nil` otherwise) and a timestamp when the vote is sent. The +message is signed by the validator private key. + +| Name | Type | Description | Field Number | +|------|--------------------------------------------|---------------------------|--------------| +| vote | [Vote](../../core/data_structures.md#vote) | Vote for a proposed Block | 1 | + +### BlockPart + +BlockPart is sent when gossiping a piece of the proposed block. It contains height, round +and the block part. + +| Name | Type | Description | Field Number | +|--------|--------------------------------------------|----------------------------------------|--------------| +| height | int64 | Height of corresponding block. | 1 | +| round | int32 | Round of voting to finalize the block. | 2 | +| part | [Part](../../core/data_structures.md#part) | A part of the block. | 3 | + +### NewRoundStep + +NewRoundStep is sent for every step transition during the core consensus algorithm execution. +It is used in the gossip part of the Tendermint protocol to inform peers about a current +height/round/step a process is in. + +| Name | Type | Description | Field Number | +|--------------------------|--------|----------------------------------------|--------------| +| height | int64 | Height of corresponding block | 1 | +| round | int32 | Round of voting to finalize the block. | 2 | +| step | uint32 | | 3 | +| seconds_since_start_time | int64 | | 4 | +| last_commit_round | int32 | | 5 | + +### NewValidBlock + +NewValidBlock is sent when a validator observes a valid block B in some round r, +i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. +It contains height and round in which valid block is observed, block parts header that describes +the valid block and is used to obtain all +block parts, and a bit array of the block parts a process currently has, so its peers can know what +parts it is missing so they can send them. +In case the block is also committed, then IsCommit flag is set to true. + +| Name | Type | Description | Field Number | +|-----------------------|--------------------------------------------------------------|----------------------------------------|--------------| +| height | int64 | Height of corresponding block | 1 | +| round | int32 | Round of voting to finalize the block. | 2 | +| block_part_set_header | [PartSetHeader](../../core/data_structures.md#partsetheader) | | 3 | +| block_parts | int32 | | 4 | +| is_commit | bool | | 5 | + +### ProposalPOL + +ProposalPOL is sent when a previous block is re-proposed. +It is used to inform peers in what round the process learned for this block (ProposalPOLRound), +and what prevotes for the re-proposed block the process has. + +| Name | Type | Description | Field Number | +|--------------------|----------|-------------------------------|--------------| +| height | int64 | Height of corresponding block | 1 | +| proposal_pol_round | int32 | | 2 | +| proposal_pol | bitarray | | 3 | + +### ReceivedVote + +ReceivedVote is sent to indicate that a particular vote has been received. It contains height, +round, vote type and the index of the validator that is the originator of the corresponding vote. + +| Name | Type | Description | Field Number | +|--------|------------------------------------------------------------------|----------------------------------------|--------------| +| height | int64 | Height of corresponding block | 1 | +| round | int32 | Round of voting to finalize the block. | 2 | +| type | [SignedMessageType](../../core/data_structures.md#signedmsgtype) | | 3 | +| index | int32 | | 4 | + +### VoteSetMaj23 + +VoteSetMaj23 is sent to indicate that a process has seen +2/3 votes for some BlockID. +It contains height, round, vote type and the BlockID. + +| Name | Type | Description | Field Number | +|--------|------------------------------------------------------------------|----------------------------------------|--------------| +| height | int64 | Height of corresponding block | 1 | +| round | int32 | Round of voting to finalize the block. | 2 | +| type | [SignedMessageType](../../core/data_structures.md#signedmsgtype) | | 3 | + +### VoteSetBits + +VoteSetBits is sent to communicate the bit-array of votes a process has seen for a given +BlockID. It contains height, round, vote type, BlockID and a bit array of +the votes a process has. + +| Name | Type | Description | Field Number | +|----------|------------------------------------------------------------------|----------------------------------------|--------------| +| height | int64 | Height of corresponding block | 1 | +| round | int32 | Round of voting to finalize the block. | 2 | +| type | [SignedMessageType](../../core/data_structures.md#signedmsgtype) | | 3 | +| block_id | [BlockID](../../core/data_structures.md#blockid) | | 4 | +| votes | BitArray | Round of voting to finalize the block. | 5 | + +### Message + +Message is a [`oneof` protobuf type](https://developers.google.com/protocol-buffers/docs/proto#oneof). + +| Name | Type | Description | Field Number | +|-----------------|---------------------------------|----------------------------------------|--------------| +| new_round_step | [NewRoundStep](#newroundstep) | Height of corresponding block | 1 | +| new_valid_block | [NewValidBlock](#newvalidblock) | Round of voting to finalize the block. | 2 | +| proposal | [Proposal](#proposal) | | 3 | +| proposal_pol | [ProposalPOL](#proposalpol) | | 4 | +| block_part | [BlockPart](#blockpart) | | 5 | +| vote | [Vote](#vote) | | 6 | +| received_vote | [ReceivedVote](#ReceivedVote) | | 7 | +| vote_set_maj23 | [VoteSetMaj23](#votesetmaj23) | | 8 | +| vote_set_bits | [VoteSetBits](#votesetbits) | | 9 | diff --git a/spec/p2p/messages/evidence.md b/spec/p2p/messages/evidence.md new file mode 100644 index 0000000000..34fc40a915 --- /dev/null +++ b/spec/p2p/messages/evidence.md @@ -0,0 +1,23 @@ +--- +order: 3 +--- + +# Evidence + +## Channel + +Evidence has one channel. The channel identifier is listed below. + +| Name | Number | +|-----------------|--------| +| EvidenceChannel | 56 | + +## Message Types + +### EvidenceList + +EvidenceList consists of a list of verified evidence. This evidence will already have been propagated throughout the network. EvidenceList is used in two places, as a p2p message and within the block [block](../../core/data_structures.md#block) as well. + +| Name | Type | Description | Field Number | +|----------|-------------------------------------------------------------|------------------------|--------------| +| evidence | repeated [Evidence](../../core/data_structures.md#evidence) | List of valid evidence | 1 | diff --git a/spec/p2p/messages/mempool.md b/spec/p2p/messages/mempool.md new file mode 100644 index 0000000000..8f3925cad5 --- /dev/null +++ b/spec/p2p/messages/mempool.md @@ -0,0 +1,33 @@ +--- +order: 4 +--- +# Mempool + +## Channel + +Mempool has one channel. The channel identifier is listed below. + +| Name | Number | +|----------------|--------| +| MempoolChannel | 48 | + +## Message Types + +There is currently only one message that Mempool broadcasts and receives over +the p2p gossip network (via the reactor): `TxsMessage` + +### Txs + +A list of transactions. These transactions have been checked against the application for validity. This does not mean that the transactions are valid, it is up to the application to check this. + +| Name | Type | Description | Field Number | +|------|----------------|----------------------|--------------| +| txs | repeated bytes | List of transactions | 1 | + +### Message + +Message is a [`oneof` protobuf type](https://developers.google.com/protocol-buffers/docs/proto#oneof). The one of consists of one message [`Txs`](#txs). + +| Name | Type | Description | Field Number | +|------|-------------|-----------------------|--------------| +| txs | [Txs](#txs) | List of transactions | 1 | diff --git a/spec/p2p/messages/pex.md b/spec/p2p/messages/pex.md new file mode 100644 index 0000000000..e12a076e56 --- /dev/null +++ b/spec/p2p/messages/pex.md @@ -0,0 +1,76 @@ +--- +order: 6 +--- + +# Peer Exchange + +## Channels + +Pex has one channel. The channel identifier is listed below. + +| Name | Number | +|------------|--------| +| PexChannel | 0 | + +## Message Types + +The current PEX service has two versions. The first uses IP/port pair but since the p2p stack is moving towards a transport agnostic approach, +node endpoints require a `Protocol` and `Path` hence the V2 version uses a [url](https://golang.org/pkg/net/url/#URL) instead. + +### PexRequest + +PexRequest is an empty message requesting a list of peers. + +> EmptyRequest + +### PexResponse + +PexResponse is an list of net addresses provided to a peer to dial. + +| Name | Type | Description | Field Number | +|-------|------------------------------------|------------------------------------------|--------------| +| addresses | repeated [PexAddress](#PexAddress) | List of peer addresses available to dial | 1 | + +### PexAddress + +PexAddress provides needed information for a node to dial a peer. + +| Name | Type | Description | Field Number | +|------|--------|------------------|--------------| +| id | string | NodeID of a peer | 1 | +| ip | string | The IP of a node | 2 | +| port | port | Port of a peer | 3 | + + +### PexRequestV2 + +PexRequest is an empty message requesting a list of peers. + +> EmptyRequest + +### PexResponseV2 + +PexResponse is an list of net addresses provided to a peer to dial. + +| Name | Type | Description | Field Number | +|-------|------------------------------------|------------------------------------------|--------------| +| addresses | repeated [PexAddressV2](#PexAddressV2) | List of peer addresses available to dial | 1 | + +### PexAddressV2 + +PexAddress provides needed information for a node to dial a peer. + +| Name | Type | Description | Field Number | +|------|--------|------------------|--------------| +| url | string | See [golang url](https://golang.org/pkg/net/url/#URL) | 1 | + +### Message + +Message is a [`oneof` protobuf type](https://developers.google.com/protocol-buffers/docs/proto#oneof). The one of consists of two messages. + +| Name | Type | Description | Field Number | +|--------------|---------------------------|------------------------------------------------------|--------------| +| pex_request | [PexRequest](#PexRequest) | Empty request asking for a list of addresses to dial | 1 | +| pex_response | [PexResponse](#PexResponse) | List of addresses to dial | 2 | +| pex_request_v2 | [PexRequestV2](#PexRequestV2) | Empty request asking for a list of addresses to dial | 3 | +| pex_response_v2 | [PexRespinseV2](#PexResponseV2) | List of addresses to dial | 4 | diff --git a/spec/p2p/messages/state-sync.md b/spec/p2p/messages/state-sync.md new file mode 100644 index 0000000000..71e3ae71b1 --- /dev/null +++ b/spec/p2p/messages/state-sync.md @@ -0,0 +1,133 @@ +--- +order: 5 +--- + +# State Sync + +## Channels + +State sync has four distinct channels. The channel identifiers are listed below. + +| Name | Number | +|-------------------|--------| +| SnapshotChannel | 96 | +| ChunkChannel | 97 | +| LightBlockChannel | 98 | +| ParamsChannel | 99 | + +## Message Types + +### SnapshotRequest + +When a new node begin state syncing, it will ask all peers it encounters if it has any +available snapshots: + +| Name | Type | Description | Field Number | +|----------|--------|-------------|--------------| + +### SnapShotResponse + +The receiver will query the local ABCI application via `ListSnapshots`, and send a message +containing snapshot metadata (limited to 4 MB) for each of the 10 most recent snapshots: and stored at the application layer. When a peer is starting it will request snapshots. + +| Name | Type | Description | Field Number | +|----------|--------|-----------------------------------------------------------|--------------| +| height | uint64 | Height at which the snapshot was taken | 1 | +| format | uint32 | Format of the snapshot. | 2 | +| chunks | uint32 | How many chunks make up the snapshot | 3 | +| hash | bytes | Arbitrary snapshot hash | 4 | +| metadata | bytes | Arbitrary application data. **May be non-deterministic.** | 5 | + +### ChunkRequest + +The node running state sync will offer these snapshots to the local ABCI application via +`OfferSnapshot` ABCI calls, and keep track of which peers contain which snapshots. Once a snapshot +is accepted, the state syncer will request snapshot chunks from appropriate peers: + +| Name | Type | Description | Field Number | +|--------|--------|-------------------------------------------------------------|--------------| +| height | uint64 | Height at which the chunk was created | 1 | +| format | uint32 | Format chosen for the chunk. **May be non-deterministic.** | 2 | +| index | uint32 | Index of the chunk within the snapshot. | 3 | + +### ChunkResponse + +The receiver will load the requested chunk from its local application via `LoadSnapshotChunk`, +and respond with it (limited to 16 MB): + +| Name | Type | Description | Field Number | +|---------|--------|-------------------------------------------------------------|--------------| +| height | uint64 | Height at which the chunk was created | 1 | +| format | uint32 | Format chosen for the chunk. **May be non-deterministic.** | 2 | +| index | uint32 | Index of the chunk within the snapshot. | 3 | +| hash | bytes | Arbitrary snapshot hash | 4 | +| missing | bool | Arbitrary application data. **May be non-deterministic.** | 5 | + +Here, `Missing` is used to signify that the chunk was not found on the peer, since an empty +chunk is a valid (although unlikely) response. + +The returned chunk is given to the ABCI application via `ApplySnapshotChunk` until the snapshot +is restored. If a chunk response is not returned within some time, it will be re-requested, +possibly from a different peer. + +The ABCI application is able to request peer bans and chunk refetching as part of the ABCI protocol. + +### LightBlockRequest + +To verify state and to provide state relevant information for consensus, the node will ask peers for +light blocks at specified heights. + +| Name | Type | Description | Field Number | +|----------|--------|----------------------------|--------------| +| height | uint64 | Height of the light block | 1 | + +### LightBlockResponse + +The receiver will retrieve and construct the light block from both the block and state stores. The +receiver will verify the data by comparing the hashes and store the header, commit and validator set +if necessary. The light block at the height of the snapshot will be used to verify the `AppHash`. + +| Name | Type | Description | Field Number | +|---------------|---------------------------------------------------------|--------------------------------------|--------------| +| light_block | [LightBlock](../../core/data_structures.md#lightblock) | Light block at the height requested | 1 | + +State sync will use [light client verification](../../light-client/verification.README.md) to verify +the light blocks. + + +If no state sync is in progress (i.e. during normal operation), any unsolicited response messages +are discarded. + +### ParamsRequest + +In order to build tendermint state, the state provider will request the params at the height of the snapshot and use the header to verify it. + +| Name | Type | Description | Field Number | +|----------|--------|----------------------------|--------------| +| height | uint64 | Height of the consensus params | 1 | + + +### ParamsResponse + +A reciever to the request will use the state store to fetch the consensus params at that height and return it to the sender. + +| Name | Type | Description | Field Number | +|----------|--------|---------------------------------|--------------| +| height | uint64 | Height of the consensus params | 1 | +| consensus_params | [ConsensusParams](../../core/data_structures.md#ConsensusParams) | Consensus params at the height requested | 2 | + + +### Message + +Message is a [`oneof` protobuf type](https://developers.google.com/protocol-buffers/docs/proto#oneof). The `oneof` consists of eight messages. + +| Name | Type | Description | Field Number | +|----------------------|--------------------------------------------|----------------------------------------------|--------------| +| snapshots_request | [SnapshotRequest](#snapshotrequest) | Request a recent snapshot from a peer | 1 | +| snapshots_response | [SnapshotResponse](#snapshotresponse) | Respond with the most recent snapshot stored | 2 | +| chunk_request | [ChunkRequest](#chunkrequest) | Request chunks of the snapshot. | 3 | +| chunk_response | [ChunkRequest](#chunkresponse) | Response of chunks used to recreate state. | 4 | +| light_block_request | [LightBlockRequest](#lightblockrequest) | Request a light block. | 5 | +| light_block_response | [LightBlockResponse](#lightblockresponse) | Respond with a light block | 6 | +| params_request | [ParamsRequest](#paramsrequest) | Request the consensus params at a height. | 7 | +| params_response | [ParamsResponse](#paramsresponse) | Respond with the consensus params | 8 | diff --git a/spec/p2p/node.md b/spec/p2p/node.md new file mode 100644 index 0000000000..45559e97c5 --- /dev/null +++ b/spec/p2p/node.md @@ -0,0 +1,67 @@ +# Peer Discovery + +A Tendermint P2P network has different kinds of nodes with different requirements for connectivity to one another. +This document describes what kind of nodes Tendermint should enable and how they should work. + +## Seeds + +Seeds are the first point of contact for a new node. +They return a list of known active peers and then disconnect. + +Seeds should operate full nodes with the PEX reactor in a "crawler" mode +that continuously explores to validate the availability of peers. + +Seeds should only respond with some top percentile of the best peers it knows about. +See [the peer-exchange docs](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/reactors/pex/pex.md) for + details on peer quality. + +## New Full Node + +A new node needs a few things to connect to the network: + +- a list of seeds, which can be provided to Tendermint via config file or flags, + or hardcoded into the software by in-process apps +- a `ChainID`, also called `Network` at the p2p layer +- a recent block height, H, and hash, HASH for the blockchain. + +The values `H` and `HASH` must be received and corroborated by means external to Tendermint, and specific to the user - ie. via the user's trusted social consensus. +This requirement to validate `H` and `HASH` out-of-band and via social consensus +is the essential difference in security models between Proof-of-Work and Proof-of-Stake blockchains. + +With the above, the node then queries some seeds for peers for its chain, +dials those peers, and runs the Tendermint protocols with those it successfully connects to. + +When the peer catches up to height H, it ensures the block hash matches HASH. +If not, Tendermint will exit, and the user must try again - either they are connected +to bad peers or their social consensus is invalid. + +## Restarted Full Node + +A node checks its address book on startup and attempts to connect to peers from there. +If it can't connect to any peers after some time, it falls back to the seeds to find more. + +Restarted full nodes can run the `blockchain` or `consensus` reactor protocols to sync up +to the latest state of the blockchain from wherever they were last. +In a Proof-of-Stake context, if they are sufficiently far behind (greater than the length +of the unbonding period), they will need to validate a recent `H` and `HASH` out-of-band again +so they know they have synced the correct chain. + +## Validator Node + +A validator node is a node that interfaces with a validator signing key. +These nodes require the highest security, and should not accept incoming connections. +They should maintain outgoing connections to a controlled set of "Sentry Nodes" that serve +as their proxy shield to the rest of the network. + +Validators that know and trust each other can accept incoming connections from one another and maintain direct private connectivity via VPN. + +## Sentry Node + +Sentry nodes are guardians of a validator node and provide it access to the rest of the network. +They should be well connected to other full nodes on the network. +Sentry nodes may be dynamic, but should maintain persistent connections to some evolving random subset of each other. +They should always expect to have direct incoming connections from the validator node and its backup(s). +They do not report the validator node's address in the PEX and +they may be more strict about the quality of peers they keep. + +Sentry nodes belonging to validators that trust each other may wish to maintain persistent connections via VPN with one another, but only report each other sparingly in the PEX. diff --git a/spec/p2p/peer.md b/spec/p2p/peer.md new file mode 100644 index 0000000000..39410ce12e --- /dev/null +++ b/spec/p2p/peer.md @@ -0,0 +1,130 @@ +# Peers + +This document explains how Tendermint Peers are identified and how they connect to one another. + +For details on peer discovery, see the [peer exchange (PEX) reactor doc](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/reactors/pex/pex.md). + +## Peer Identity + +Tendermint peers are expected to maintain long-term persistent identities in the form of a public key. +Each peer has an ID defined as `peer.ID == peer.PubKey.Address()`, where `Address` uses the scheme defined in `crypto` package. + +A single peer ID can have multiple IP addresses associated with it, but a node +will only ever connect to one at a time. + +When attempting to connect to a peer, we use the PeerURL: `@:`. +We will attempt to connect to the peer at IP:PORT, and verify, +via authenticated encryption, that it is in possession of the private key +corresponding to ``. This prevents man-in-the-middle attacks on the peer layer. + +## Connections + +All p2p connections use TCP. +Upon establishing a successful TCP connection with a peer, +two handshakes are performed: one for authenticated encryption, and one for Tendermint versioning. +Both handshakes have configurable timeouts (they should complete quickly). + +### Authenticated Encryption Handshake + +Tendermint implements the Station-to-Station protocol +using X25519 keys for Diffie-Helman key-exchange and chacha20poly1305 for encryption. + +Previous versions of this protocol (0.32 and below) suffered from malleability attacks whereas an active man +in the middle attacker could compromise confidentiality as described in [Prime, Order Please! +Revisiting Small Subgroup and Invalid Curve Attacks on +Protocols using Diffie-Hellman](https://eprint.iacr.org/2019/526.pdf). + +We have added dependency on the Merlin a keccak based transcript hashing protocol to ensure non-malleability. + +It goes as follows: + +- generate an ephemeral X25519 keypair +- send the ephemeral public key to the peer +- wait to receive the peer's ephemeral public key +- create a new Merlin Transcript with the string "TENDERMINT_SECRET_CONNECTION_TRANSCRIPT_HASH" +- Sort the ephemeral keys and add the high labeled "EPHEMERAL_UPPER_PUBLIC_KEY" and the low keys labeled "EPHEMERAL_LOWER_PUBLIC_KEY" to the Merlin transcript. +- compute the Diffie-Hellman shared secret using the peers ephemeral public key and our ephemeral private key +- add the DH secret to the transcript labeled DH_SECRET. +- generate two keys to use for encryption (sending and receiving) and a challenge for authentication as follows: + - create a hkdf-sha256 instance with the key being the diffie hellman shared secret, and info parameter as + `TENDERMINT_SECRET_CONNECTION_KEY_AND_CHALLENGE_GEN` + - get 64 bytes of output from hkdf-sha256 + - if we had the smaller ephemeral pubkey, use the first 32 bytes for the key for receiving, the second 32 bytes for sending; else the opposite. +- use a separate nonce for receiving and sending. Both nonces start at 0, and should support the full 96 bit nonce range +- all communications from now on are encrypted in 1400 byte frames (plus encoding overhead), + using the respective secret and nonce. Each nonce is incremented by one after each use. +- we now have an encrypted channel, but still need to authenticate +- extract a 32 bytes challenge from merlin transcript with the label "SECRET_CONNECTION_MAC" +- sign the common challenge obtained from the hkdf with our persistent private key +- send the amino encoded persistent pubkey and signature to the peer +- wait to receive the persistent public key and signature from the peer +- verify the signature on the challenge using the peer's persistent public key + +If this is an outgoing connection (we dialed the peer) and we used a peer ID, +then finally verify that the peer's persistent public key corresponds to the peer ID we dialed, +ie. `peer.PubKey.Address() == `. + +The connection has now been authenticated. All traffic is encrypted. + +Note: only the dialer can authenticate the identity of the peer, +but this is what we care about since when we join the network we wish to +ensure we have reached the intended peer (and are not being MITMd). + +### Peer Filter + +Before continuing, we check if the new peer has the same ID as ourselves or +an existing peer. If so, we disconnect. + +We also check the peer's address and public key against +an optional whitelist which can be managed through the ABCI app - +if the whitelist is enabled and the peer does not qualify, the connection is +terminated. + +### Tendermint Version Handshake + +The Tendermint Version Handshake allows the peers to exchange their NodeInfo: + +```golang +type NodeInfo struct { + Version p2p.Version + ID p2p.ID + ListenAddr string + + Network string + SoftwareVersion string + Channels []int8 + + Moniker string + Other NodeInfoOther +} + +type Version struct { + P2P uint64 + Block uint64 + App uint64 +} + +type NodeInfoOther struct { + TxIndex string + RPCAddress string +} +``` + +The connection is disconnected if: + +- `peer.NodeInfo.ID` is not equal `peerConn.ID` +- `peer.NodeInfo.Version.Block` does not match ours +- `peer.NodeInfo.Network` is not the same as ours +- `peer.Channels` does not intersect with our known Channels. +- `peer.NodeInfo.ListenAddr` is malformed or is a DNS host that cannot be + resolved + +At this point, if we have not disconnected, the peer is valid. +It is added to the switch and hence all reactors via the `AddPeer` method. +Note that each reactor may handle multiple channels. + +## Connection Activity + +Once a peer is added, incoming messages for a given reactor are handled through +that reactor's `Receive` method, and output messages are sent directly by the Reactors +on each peer. A typical reactor maintains per-peer go-routine(s) that handle this. diff --git a/spec/p2p/readme.md b/spec/p2p/readme.md new file mode 100644 index 0000000000..96867aad05 --- /dev/null +++ b/spec/p2p/readme.md @@ -0,0 +1,6 @@ +--- +order: 1 +parent: + title: P2P + order: 6 +--- diff --git a/spec/rpc/README.md b/spec/rpc/README.md new file mode 100644 index 0000000000..7cdf417dca --- /dev/null +++ b/spec/rpc/README.md @@ -0,0 +1,1264 @@ +--- +order: 1 +parent: + title: RPC + order: 6 +--- + +# RPC spec + +This file defines the JSON-RPC spec of Tendermint. This is meant to be implemented by all clients. + +## Support + + | | [Tendermint-Go](https://github.com/tendermint/tendermint/) | [endermint-Rs](https://github.com/informalsystems/tendermint-rs) | + |--------------|:----------------------------------------------------------:|:----------------------------------------------------------------:| + | JSON-RPC 2.0 | ✅ | ✅ | + | HTTP | ✅ | ✅ | + | HTTPS | ✅ | ❌ | + | WS | ✅ | ✅ | + + | Routes | [Tendermint-Go](https://github.com/tendermint/tendermint/) | [Tendermint-Rs](https://github.com/informalsystems/tendermint-rs) | + |-----------------------------------------|:----------------------------------------------------------:|:-----------------------------------------------------------------:| + | [Health](#health) | ✅ | ✅ | + | [Status](#status) | ✅ | ✅ | + | [NetInfo](#netinfo) | ✅ | ✅ | + | [Blockchain](#blockchain) | ✅ | ✅ | + | [Block](#block) | ✅ | ✅ | + | [BlockByHash](#blockbyhash) | ✅ | ❌ | + | [BlockResults](#blockresults) | ✅ | ✅ | + | [Commit](#commit) | ✅ | ✅ | + | [Validators](#validators) | ✅ | ✅ | + | [Genesis](#genesis) | ✅ | ✅ | + | [GenesisChunked](#genesischunked) | ✅ | ❌ | + | [ConsensusParams](#consensusparams) | ✅ | ❌ | + | [UnconfirmedTxs](#unconfirmedtxs) | ✅ | ❌ | + | [NumUnconfirmedTxs](#numunconfirmedtxs) | ✅ | ❌ | + | [Tx](#tx) | ✅ | ❌ | + | [BroadCastTxSync](#broadcasttxsync) | ✅ | ✅ | + | [BroadCastTxAsync](#broadcasttxasync) | ✅ | ✅ | + | [ABCIInfo](#abciinfo) | ✅ | ✅ | + | [ABCIQuery](#abciquery) | ✅ | ✅ | + | [BroadcastTxAsync](#broadcasttxasync) | ✅ | ✅ | + | [BroadcastEvidence](#broadcastevidence) | ✅ | ✅ | + +## Timestamps + +Timestamps in the RPC layer of Tendermint follows RFC3339Nano. The RFC3339Nano format removes trailing zeros from the seconds field. + +This means if a block has a timestamp like: `1985-04-12T23:20:50.5200000Z`, the value returned in the RPC will be `1985-04-12T23:20:50.52Z`. + + + +## Info Routes + +### Health + +Node heartbeat + +#### Parameters + +None + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/health +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"health\"}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": -1, + "result": {} +} +``` + +### Status + +Get Tendermint status including node info, pubkey, latest block hash, app hash, block height and time. + +#### Parameters + +None + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/status +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"status\"}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": -1, + "result": { + "node_info": { + "protocol_version": { + "p2p": "8", + "block": "11", + "app": "0" + }, + "id": "b93270b358a72a2db30089f3856475bb1f918d6d", + "listen_addr": "tcp://0.0.0.0:26656", + "network": "cosmoshub-4", + "version": "v0.34.8", + "channels": "40202122233038606100", + "moniker": "aib-hub-node", + "other": { + "tx_index": "on", + "rpc_address": "tcp://0.0.0.0:26657" + } + }, + "sync_info": { + "latest_block_hash": "50F03C0EAACA8BCA7F9C14189ACE9C05A9A1BBB5268DB63DC6A3C848D1ECFD27", + "latest_app_hash": "2316CFF7644219F4F15BEE456435F280E2B38955EEA6D4617CCB6D7ABF781C22", + "latest_block_height": "5622165", + "latest_block_time": "2021-03-25T14:00:43.356134226Z", + "earliest_block_hash": "1455A0C15AC49BB506992EC85A3CD4D32367E53A087689815E01A524231C3ADF", + "earliest_app_hash": "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855", + "earliest_block_height": "5200791", + "earliest_block_time": "2019-12-11T16:11:34Z", + "catching_up": false + }, + "validator_info": { + "address": "38FB765D0092470989360ECA1C89CD06C2C1583C", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Z+8kntVegi1sQiWLYwFSVLNWqdAUGEy7lskL78gxLZI=" + }, + "voting_power": "0" + } + } +} +``` + +### NetInfo + +Network information + +#### Parameters + +None + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/net_info +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"net_info\"}" +``` + +#### Response + +```json +{ + "id": 0, + "jsonrpc": "2.0", + "result": { + "listening": true, + "listeners": [ + "Listener(@)" + ], + "n_peers": "1", + "peers": [ + { + "node_id": "5576458aef205977e18fd50b274e9b5d9014525a", + "url": "tcp://5576458aef205977e18fd50b274e9b5d9014525a@95.179.155.35:26656" + } + ] + } +} +``` + +### Blockchain + +Get block headers. Returned in descending order. May be limited in quantity. + +#### Parameters + +- `minHeight (integer)`: The lowest block to be returned in the response +- `maxHeight (integer)`: The highest block to be returned in the response + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/blockchain + +curl http://127.0.0.1:26657/blockchain?minHeight=1&maxHeight=2 +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"blockchain\",\"params\":{\"minHeight\":\"1\", \"maxHeight\":\"2\"}}" +``` + +#### Response + +```json +{ + "id": 0, + "jsonrpc": "2.0", + "result": { + "last_height": "1276718", + "block_metas": [ + { + "block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "block_size": 1000000, + "header": { + "version": { + "block": "10", + "app": "0" + }, + "chain_id": "cosmoshub-2", + "height": "12", + "time": "2019-04-22T17:01:51.701356223Z", + "last_block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "last_commit_hash": "21B9BC845AD2CB2C4193CDD17BFC506F1EBE5A7402E84AD96E64171287A34812", + "data_hash": "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73", + "validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0", + "next_validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0", + "consensus_hash": "0F2908883A105C793B74495EB7D6DF2EEA479ED7FC9349206A65CB0F9987A0B8", + "app_hash": "223BF64D4A01074DC523A80E76B9BBC786C791FB0A1893AC5B14866356FCFD6C", + "last_results_hash": "", + "evidence_hash": "", + "proposer_address": "D540AB022088612AC74B287D076DBFBC4A377A2E" + }, + "num_txs": "54" + } + ] + } +} +``` + +### Block + +Get block at a specified height. + +#### Parameters + +- `height (integer)`: height of the requested block. If no height is specified the latest block will be used. + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/block + +curl http://127.0.0.1:26657/block?height=1 +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"block\",\"params\":{\"height\":\"1\"}}" +``` + +#### Response + +```json +{ + "id": 0, + "jsonrpc": "2.0", + "result": { + "block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "block": { + "header": { + "version": { + "block": "10", + "app": "0" + }, + "chain_id": "cosmoshub-2", + "height": "12", + "time": "2019-04-22T17:01:51.701356223Z", + "last_block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "last_commit_hash": "21B9BC845AD2CB2C4193CDD17BFC506F1EBE5A7402E84AD96E64171287A34812", + "data_hash": "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73", + "validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0", + "next_validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0", + "consensus_hash": "0F2908883A105C793B74495EB7D6DF2EEA479ED7FC9349206A65CB0F9987A0B8", + "app_hash": "223BF64D4A01074DC523A80E76B9BBC786C791FB0A1893AC5B14866356FCFD6C", + "last_results_hash": "", + "evidence_hash": "", + "proposer_address": "D540AB022088612AC74B287D076DBFBC4A377A2E" + }, + "data": [ + "yQHwYl3uCkKoo2GaChRnd+THLQ2RM87nEZrE19910Z28ABIUWW/t8AtIMwcyU0sT32RcMDI9GF0aEAoFdWF0b20SBzEwMDAwMDASEwoNCgV1YXRvbRIEMzEwMRCd8gEaagom61rphyEDoJPxlcjRoNDtZ9xMdvs+lRzFaHe2dl2P5R2yVCWrsHISQKkqX5H1zXAIJuC57yw0Yb03Fwy75VRip0ZBtLiYsUqkOsPUoQZAhDNP+6LY+RUwz/nVzedkF0S29NZ32QXdGv0=" + ], + "evidence": [ + { + "type": "string", + "height": 0, + "time": 0, + "total_voting_power": 0, + "validator": { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "A6DoBUypNtUAyEHWtQ9bFjfNg8Bo9CrnkUGl6k6OHN4=" + }, + "voting_power": 0, + "address": "string" + } + } + ], + "last_commit": { + "height": 0, + "round": 0, + "block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "signatures": [ + { + "type": 2, + "height": "1262085", + "round": 0, + "block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "timestamp": "2019-08-01T11:39:38.867269833Z", + "validator_address": "000001E443FD237E4B616E2FA69DF4EE3D49A94F", + "validator_index": 0, + "signature": "DBchvucTzAUEJnGYpNvMdqLhBAHG4Px8BsOBB3J3mAFCLGeuG7uJqy+nVngKzZdPhPi8RhmE/xcw/M9DOJjEDg==" + } + ] + } + } + } +} +``` + +### BlockByHash + +#### Parameters + +- `hash (string)`: Hash of the block to query for. + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/block_by_hash?hash=0xD70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"block_by_hash\",\"params\":{\"hash\":\"0xD70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED\"}}" +``` + +#### Response + +```json +{ + "id": 0, + "jsonrpc": "2.0", + "result": { + "block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "block": { + "header": { + "version": { + "block": "10", + "app": "0" + }, + "chain_id": "cosmoshub-2", + "height": "12", + "time": "2019-04-22T17:01:51.701356223Z", + "last_block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "last_commit_hash": "21B9BC845AD2CB2C4193CDD17BFC506F1EBE5A7402E84AD96E64171287A34812", + "data_hash": "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73", + "validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0", + "next_validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0", + "consensus_hash": "0F2908883A105C793B74495EB7D6DF2EEA479ED7FC9349206A65CB0F9987A0B8", + "app_hash": "223BF64D4A01074DC523A80E76B9BBC786C791FB0A1893AC5B14866356FCFD6C", + "last_results_hash": "", + "evidence_hash": "", + "proposer_address": "D540AB022088612AC74B287D076DBFBC4A377A2E" + }, + "data": [ + "yQHwYl3uCkKoo2GaChRnd+THLQ2RM87nEZrE19910Z28ABIUWW/t8AtIMwcyU0sT32RcMDI9GF0aEAoFdWF0b20SBzEwMDAwMDASEwoNCgV1YXRvbRIEMzEwMRCd8gEaagom61rphyEDoJPxlcjRoNDtZ9xMdvs+lRzFaHe2dl2P5R2yVCWrsHISQKkqX5H1zXAIJuC57yw0Yb03Fwy75VRip0ZBtLiYsUqkOsPUoQZAhDNP+6LY+RUwz/nVzedkF0S29NZ32QXdGv0=" + ], + "evidence": [ + { + "type": "string", + "height": 0, + "time": 0, + "total_voting_power": 0, + "validator": { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "A6DoBUypNtUAyEHWtQ9bFjfNg8Bo9CrnkUGl6k6OHN4=" + }, + "voting_power": 0, + "address": "string" + } + } + ], + "last_commit": { + "height": 0, + "round": 0, + "block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "signatures": [ + { + "type": 2, + "height": "1262085", + "round": 0, + "block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "timestamp": "2019-08-01T11:39:38.867269833Z", + "validator_address": "000001E443FD237E4B616E2FA69DF4EE3D49A94F", + "validator_index": 0, + "signature": "DBchvucTzAUEJnGYpNvMdqLhBAHG4Px8BsOBB3J3mAFCLGeuG7uJqy+nVngKzZdPhPi8RhmE/xcw/M9DOJjEDg==" + } + ] + } + } + } +} +``` + +### BlockResults + +### Parameters + +- `height (integer)`: Height of the block which contains the results. If no height is specified, the latest block height will be used + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/block_results + + +curl http://127.0.0.1:26657/block_results?height=1 +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"block_results\",\"params\":{\"height\":\"1\"}}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": 0, + "result": { + "height": "12", + "total_gas_used": "100", + "txs_results": [ + { + "code": "0", + "data": "", + "log": "not enough gas", + "info": "", + "gas_wanted": "100", + "gas_used": "100", + "events": [ + { + "type": "app", + "attributes": [ + { + "key": "YWN0aW9u", + "value": "c2VuZA==", + "index": false + } + ] + } + ], + "codespace": "ibc" + } + ], + "begin_block_events": [ + { + "type": "app", + "attributes": [ + { + "key": "YWN0aW9u", + "value": "c2VuZA==", + "index": false + } + ] + } + ], + "end_block": [ + { + "type": "app", + "attributes": [ + { + "key": "YWN0aW9u", + "value": "c2VuZA==", + "index": false + } + ] + } + ], + "validator_updates": [ + { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "9tK9IT+FPdf2qm+5c2qaxi10sWP+3erWTKgftn2PaQM=" + }, + "power": "300" + } + ], + "consensus_params_updates": { + "block": { + "max_bytes": "22020096", + "max_gas": "1000", + "time_iota_ms": "1000" + }, + "evidence": { + "max_age": "100000" + }, + "validator": { + "pub_key_types": [ + "ed25519" + ] + } + } + } +} +``` + +### Commit + +#### Parameters + +- `height (integer)`: Height of the block the requested commit pertains to. If no height is set the latest commit will be returned. + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/commit + + +curl http://127.0.0.1:26657/commit?height=1 +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"commit\",\"params\":{\"height\":\"1\"}}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": 0, + "result": { + "signed_header": { + "header": { + "version": { + "block": "10", + "app": "0" + }, + "chain_id": "cosmoshub-2", + "height": "12", + "time": "2019-04-22T17:01:51.701356223Z", + "last_block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "last_commit_hash": "21B9BC845AD2CB2C4193CDD17BFC506F1EBE5A7402E84AD96E64171287A34812", + "data_hash": "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73", + "validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0", + "next_validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0", + "consensus_hash": "0F2908883A105C793B74495EB7D6DF2EEA479ED7FC9349206A65CB0F9987A0B8", + "app_hash": "223BF64D4A01074DC523A80E76B9BBC786C791FB0A1893AC5B14866356FCFD6C", + "last_results_hash": "", + "evidence_hash": "", + "proposer_address": "D540AB022088612AC74B287D076DBFBC4A377A2E" + }, + "commit": { + "height": "1311801", + "round": 0, + "block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "000001E443FD237E4B616E2FA69DF4EE3D49A94F", + "timestamp": "2019-04-22T17:01:58.376629719Z", + "signature": "14jaTQXYRt8kbLKEhdHq7AXycrFImiLuZx50uOjs2+Zv+2i7RTG/jnObD07Jo2ubZ8xd7bNBJMqkgtkd0oQHAw==" + } + ] + } + }, + "canonical": true + } +} +``` + +### Validators + +#### Parameters + +- `height (integer)`: Block height at which the validators were present on. If no height is set the latest commit will be returned. +- `page (integer)`: +- `per_page (integer)`: + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/validators +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"validators\",\"params\":{\"height\":\"1\", \"page\":\"1\", \"per_page\":\"20\"}}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": 0, + "result": { + "block_height": "55", + "validators": [ + { + "address": "000001E443FD237E4B616E2FA69DF4EE3D49A94F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "9tK9IT+FPdf2qm+5c2qaxi10sWP+3erWTKgftn2PaQM=" + }, + "voting_power": "239727", + "proposer_priority": "-11896414" + } + ], + "count": "1", + "total": "25" + } +} +``` + +### Genesis + +Get Genesis of the chain. If the response is large, this operation +will return an error: use `genesis_chunked` instead. + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/genesis +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"genesis\"}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": 0, + "result": { + "genesis": { + "genesis_time": "2019-04-22T17:00:00Z", + "chain_id": "cosmoshub-2", + "initial_height": "2", + "consensus_params": { + "block": { + "max_bytes": "22020096", + "max_gas": "1000", + "time_iota_ms": "1000" + }, + "evidence": { + "max_age": "100000" + }, + "validator": { + "pub_key_types": [ + "ed25519" + ] + } + }, + "validators": [ + { + "address": "B00A6323737F321EB0B8D59C6FD497A14B60938A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "cOQZvh/h9ZioSeUMZB/1Vy1Xo5x2sjrVjlE/qHnYifM=" + }, + "power": "9328525", + "name": "Certus One" + } + ], + "app_hash": "", + "app_state": {} + } + } +} +``` + +### GenesisChunked + +Get the genesis document in a chunks to support easily transfering larger documents. + +#### Parameters + +- `chunk` (integer): the index number of the chunk that you wish to + fetch. These IDs are 0 indexed. + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/genesis_chunked?chunk=0 +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"genesis_chunked\",\"params\":{\"chunk\":0}}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": 0, + "result": { + "chunk": 0, + "total": 10, + "data": "dGVuZGVybWludAo=" + } +} +``` + +### ConsensusParams + +Get the consensus parameters. + +#### Parameters + +- `height (integer)`: Block height at which the consensus params would like to be fetched for. + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/consensus_params +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"consensus_params\"}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": 0, + "result": { + "block_height": "1", + "consensus_params": { + "block": { + "max_bytes": "22020096", + "max_gas": "1000", + "time_iota_ms": "1000" + }, + "evidence": { + "max_age": "100000" + }, + "validator": { + "pub_key_types": [ + "ed25519" + ] + } + } + } +} +``` + +### UnconfirmedTxs + +Get a list of unconfirmed transactions. + +#### Parameters + +- `limit (integer)` The amount of txs to respond with. + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/unconfirmed_txs +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"unconfirmed_txs\, \"params\":{\"limit\":\"20\"}}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": 0, + "result": { + "n_txs": "82", + "total": "82", + "total_bytes": "19974", + "txs": [ + "gAPwYl3uCjCMTXENChSMnIkb5ZpYHBKIZqecFEV2tuZr7xIUA75/FmYq9WymsOBJ0XSJ8yV8zmQKMIxNcQ0KFIyciRvlmlgcEohmp5wURXa25mvvEhQbrvwbvlNiT+Yjr86G+YQNx7kRVgowjE1xDQoUjJyJG+WaWBwSiGannBRFdrbma+8SFK2m+1oxgILuQLO55n8mWfnbIzyPCjCMTXENChSMnIkb5ZpYHBKIZqecFEV2tuZr7xIUQNGfkmhTNMis4j+dyMDIWXdIPiYKMIxNcQ0KFIyciRvlmlgcEohmp5wURXa25mvvEhS8sL0D0wwgGCItQwVowak5YB38KRIUCg4KBXVhdG9tEgUxMDA1NBDoxRgaagom61rphyECn8x7emhhKdRCB2io7aS/6Cpuq5NbVqbODmqOT3jWw6kSQKUresk+d+Gw0BhjiggTsu8+1voW+VlDCQ1GRYnMaFOHXhyFv7BCLhFWxLxHSAYT8a5XqoMayosZf9mANKdXArA=" + ] + } +} +``` + +### NumUnconfirmedTxs + +Get data about unconfirmed transactions. + +#### Parameters + +None + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/num_unconfirmed_txs +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"num_unconfirmed_txs\"}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": 0, + "result": { + "n_txs": "31", + "total": "82", + "total_bytes": "19974" + } +} +``` + +### Tx + +#### Parameters + +- `hash (string)`: The hash of the transaction +- `prove (bool)`: If the response should include proof the transaction was included in a block. + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/num_unconfirmed_txs +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"num_unconfirmed_txs\"}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": 0, + "result": { + "hash": "D70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED", + "height": "1000", + "index": 0, + "tx_result": { + "log": "[{\"msg_index\":\"0\",\"success\":true,\"log\":\"\"}]", + "gas_wanted": "200000", + "gas_used": "28596", + "tags": [ + { + "key": "YWN0aW9u", + "value": "c2VuZA==", + "index": false + } + ] + }, + "tx": "5wHwYl3uCkaoo2GaChQmSIu8hxpJxLcCuIi8fiHN4TMwrRIU/Af1cEG7Rcs/6LjTl7YjRSymJfYaFAoFdWF0b20SCzE0OTk5OTk1MDAwEhMKDQoFdWF0b20SBDUwMDAQwJoMGmoKJuta6YchAwswBShaB1wkZBctLIhYqBC3JrAI28XGzxP+rVEticGEEkAc+khTkKL9CDE47aDvjEHvUNt+izJfT4KVF2v2JkC+bmlH9K08q3PqHeMI9Z5up+XMusnTqlP985KF+SI5J3ZOIhhNYWRlIGJ5IENpcmNsZSB3aXRoIGxvdmU=" + } +} +``` + +## Transaction Routes + +### BroadCastTxSync + +Returns with the response from CheckTx. Does not wait for DeliverTx result. + +#### Parameters + +- `tx (string)`: The transaction encoded + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/broadcast_tx_sync?tx=encoded_tx +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"broadcast_tx_sync\",\"params\":{\"tx\":\"a/encoded_tx/c\"}}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": 0, + "result": { + "code": "0", + "data": "", + "log": "", + "codespace": "ibc", + "hash": "0D33F2F03A5234F38706E43004489E061AC40A2E" + }, + "error": "" +} +``` + +### BroadCastTxAsync + +Returns right away, with no response. Does not wait for CheckTx nor DeliverTx results. + +#### Parameters + +- `tx (string)`: The transaction encoded + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/broadcast_tx_async?tx=encoded_tx +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"broadcast_tx_async\",\"params\":{\"tx\":\"a/encoded_tx/c\"}}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": 0, + "result": { + "code": "0", + "data": "", + "log": "", + "codespace": "ibc", + "hash": "0D33F2F03A5234F38706E43004489E061AC40A2E" + }, + "error": "" +} +``` + +### CheckTx + +Checks the transaction without executing it. + +#### Parameters + +- `tx (string)`: String of the encoded transaction + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/check_tx?tx=encoded_tx +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"check_tx\",\"params\":{\"tx\":\"a/encoded_tx/c\"}}" +``` + +#### Response + +```json +{ + "id": 0, + "jsonrpc": "2.0", + "error": "", + "result": { + "code": "0", + "data": "", + "log": "", + "info": "", + "gas_wanted": "1", + "gas_used": "0", + "events": [ + { + "type": "app", + "attributes": [ + { + "key": "YWN0aW9u", + "value": "c2VuZA==", + "index": false + } + ] + } + ], + "codespace": "bank" + } +} +``` + +## ABCI Routes + +### ABCIInfo + +Get some info about the application. + +#### Parameters + +None + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/abci_info +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"abci_info\"}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": 0, + "result": { + "response": { + "data": "{\"size\":0}", + "version": "0.16.1", + "app_version": "1314126" + } + } +} +``` + +### ABCIQuery + +Query the application for some information. + +#### Parameters + +- `path (string)`: Path to the data. This is defined by the application. +- `data (string)`: The data requested +- `height (integer)`: Height at which the data is being requested for. +- `prove (bool)`: Include proofs of the transactions inclusion in the block + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/abci_query?path="a/b/c"=IHAVENOIDEA&height=1&prove=true +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"abci_query\",\"params\":{\"path\":\"a/b/c\", \"height\":\"1\", \"bool\":\"true\"}}" +``` + +#### Response + +```json +{ + "error": "", + "result": { + "response": { + "log": "exists", + "height": "0", + "proof": "010114FED0DAD959F36091AD761C922ABA3CBF1D8349990101020103011406AA2262E2F448242DF2C2607C3CDC705313EE3B0001149D16177BC71E445476174622EA559715C293740C", + "value": "61626364", + "key": "61626364", + "index": "-1", + "code": "0" + } + }, + "id": 0, + "jsonrpc": "2.0" +} +``` + +## Evidence Routes + +### BroadcastEvidence + +Broadcast evidence of the misbehavior. + +#### Parameters + +- `evidence (string)`: + +#### Request + +##### HTTP + +```sh +curl http://localhost:26657/broadcast_evidence?evidence=JSON_EVIDENCE_encoded +``` + +#### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"broadcast_evidence\",\"params\":{\"evidence\":\"JSON_EVIDENCE_encoded\"}}" +``` + +#### Response + +```json +{ + "error": "", + "result": "", + "id": 0, + "jsonrpc": "2.0" +} +``` diff --git a/state/errors.go b/state/errors.go index 6e0cdfa479..38c581f7db 100644 --- a/state/errors.go +++ b/state/errors.go @@ -1,6 +1,9 @@ package state -import "fmt" +import ( + "errors" + "fmt" +) type ( ErrInvalidBlock error @@ -99,3 +102,5 @@ func (e ErrNoConsensusParamsForHeight) Error() string { func (e ErrNoABCIResponsesForHeight) Error() string { return fmt.Sprintf("could not find results for height #%d", e.Height) } + +var ErrABCIResponsesNotPersisted = errors.New("node is not persisting abci responses") diff --git a/state/execution_test.go b/state/execution_test.go index 4b273580e3..e64a4335bb 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -52,7 +52,9 @@ func TestApplyBlock(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, _ := makeState(1, 1) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mmock.Mempool{}, sm.EmptyEvidencePool{}) @@ -78,7 +80,9 @@ func TestBeginBlockValidators(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // no need to check error again state, stateDB, _ := makeState(2, 2) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) prevHash := state.LastBlockID.Hash prevParts := types.PartSetHeader{} @@ -147,7 +151,9 @@ func TestBeginBlockByzantineValidators(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, privVals := makeState(1, 1) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) defaultEvidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) privVal := privVals[state.Validators.Validators[0].Address.String()] @@ -242,7 +248,9 @@ func TestProcessProposal(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, _ := makeState(1, height) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mmock.Mempool{}, sm.EmptyEvidencePool{}) @@ -322,7 +330,9 @@ func makeBlockExec(t *testing.T, testName string, block *types.Block, stateDB db }() return sm.NewBlockExecutor( - sm.NewStore(stateDB), + sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }), log.TestingLogger(), proxyApp.Consensus(), mmock.Mempool{}, @@ -487,7 +497,9 @@ func TestEndBlockValidatorUpdates(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, _ := makeState(1, 1) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) blockExec := sm.NewBlockExecutor( stateStore, @@ -558,7 +570,9 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, _ := makeState(1, 1) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) blockExec := sm.NewBlockExecutor( stateStore, log.TestingLogger(), diff --git a/state/export_test.go b/state/export_test.go index 56c3d764c5..f6ca0e9cd2 100644 --- a/state/export_test.go +++ b/state/export_test.go @@ -43,6 +43,6 @@ func ValidateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, params tmproto // SaveValidatorsInfo is an alias for the private saveValidatorsInfo method in // store.go, exported exclusively and explicitly for testing. func SaveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *types.ValidatorSet) error { - stateStore := dbStore{db} + stateStore := dbStore{db, StoreOptions{DiscardABCIResponses: false}} return stateStore.saveValidatorsInfo(height, lastHeightChanged, valSet) } diff --git a/state/helpers_test.go b/state/helpers_test.go index c709fd8b62..588de1fe90 100644 --- a/state/helpers_test.go +++ b/state/helpers_test.go @@ -122,7 +122,9 @@ func makeState(nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValida }) stateDB := dbm.NewMemDB() - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) if err := stateStore.Save(s); err != nil { panic(err) } diff --git a/state/indexer/block.go b/state/indexer/block.go index b12c1f6713..9c4bb0e54a 100644 --- a/state/indexer/block.go +++ b/state/indexer/block.go @@ -7,7 +7,7 @@ import ( "github.com/tendermint/tendermint/types" ) -//go:generate mockery --case underscore --name BlockIndexer +//go:generate ../../scripts/mockery_generate.sh BlockIndexer // BlockIndexer defines an interface contract for indexing block events. type BlockIndexer interface { diff --git a/state/indexer/mocks/block_indexer.go b/state/indexer/mocks/block_indexer.go index ca14025f1a..2c0f0ecb07 100644 --- a/state/indexer/mocks/block_indexer.go +++ b/state/indexer/mocks/block_indexer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.12.3. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -75,13 +75,13 @@ func (_m *BlockIndexer) Search(ctx context.Context, q *query.Query) ([]int64, er return r0, r1 } -type NewBlockIndexerT interface { +type mockConstructorTestingTNewBlockIndexer interface { mock.TestingT Cleanup(func()) } // NewBlockIndexer creates a new instance of BlockIndexer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlockIndexer(t NewBlockIndexerT) *BlockIndexer { +func NewBlockIndexer(t mockConstructorTestingTNewBlockIndexer) *BlockIndexer { mock := &BlockIndexer{} mock.Mock.Test(t) diff --git a/state/indexer/sink/psql/psql_test.go b/state/indexer/sink/psql/psql_test.go index b40ba395ef..e0a29b7b19 100644 --- a/state/indexer/sink/psql/psql_test.go +++ b/state/indexer/sink/psql/psql_test.go @@ -5,7 +5,6 @@ import ( "database/sql" "flag" "fmt" - "io/ioutil" "log" "os" "os/signal" @@ -20,6 +19,7 @@ import ( "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/types" // Register the Postgres database driver. @@ -197,6 +197,55 @@ func TestIndexing(t *testing.T) { err = indexer.IndexTxEvents([]*abci.TxResult{txResult}) require.NoError(t, err) }) + + t.Run("IndexerService", func(t *testing.T) { + indexer := &EventSink{store: testDB(), chainID: chainID} + + // event bus + eventBus := types.NewEventBus() + err := eventBus.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := eventBus.Stop(); err != nil { + t.Error(err) + } + }) + + service := txindex.NewIndexerService(indexer.TxIndexer(), indexer.BlockIndexer(), eventBus, true) + err = service.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := service.Stop(); err != nil { + t.Error(err) + } + }) + + // publish block with txs + err = eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{ + Header: types.Header{Height: 1}, + NumTxs: int64(2), + }) + require.NoError(t, err) + txResult1 := &abci.TxResult{ + Height: 1, + Index: uint32(0), + Tx: types.Tx("foo"), + Result: abci.ResponseDeliverTx{Code: 0}, + } + err = eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult1}) + require.NoError(t, err) + txResult2 := &abci.TxResult{ + Height: 1, + Index: uint32(1), + Tx: types.Tx("bar"), + Result: abci.ResponseDeliverTx{Code: 1}, + } + err = eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult2}) + require.NoError(t, err) + + time.Sleep(100 * time.Millisecond) + require.True(t, service.IsRunning()) + }) } func TestStop(t *testing.T) { @@ -227,7 +276,7 @@ func newTestBlockHeader() types.EventDataNewBlockHeader { // readSchema loads the indexing database schema file func readSchema() ([]*schema.Migration, error) { const filename = "schema.sql" - contents, err := ioutil.ReadFile(filename) + contents, err := os.ReadFile(filename) if err != nil { return nil, fmt.Errorf("failed to read sql file from '%s': %w", filename, err) } diff --git a/state/mocks/block_store.go b/state/mocks/block_store.go index 70ee538397..4493a6e3f2 100644 --- a/state/mocks/block_store.go +++ b/state/mocks/block_store.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.14.0. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/state/mocks/evidence_pool.go b/state/mocks/evidence_pool.go index 6f41e120fe..7279d36f71 100644 --- a/state/mocks/evidence_pool.go +++ b/state/mocks/evidence_pool.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.14.0. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/state/mocks/store.go b/state/mocks/store.go index 4308d4ead8..8cbe490800 100644 --- a/state/mocks/store.go +++ b/state/mocks/store.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.14.0. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -153,6 +153,29 @@ func (_m *Store) LoadFromDBOrGenesisFile(_a0 string) (state.State, error) { return r0, r1 } +// LoadLastABCIResponse provides a mock function with given fields: _a0 +func (_m *Store) LoadLastABCIResponse(_a0 int64) (*tendermintstate.ABCIResponses, error) { + ret := _m.Called(_a0) + + var r0 *tendermintstate.ABCIResponses + if rf, ok := ret.Get(0).(func(int64) *tendermintstate.ABCIResponses); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*tendermintstate.ABCIResponses) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // LoadValidators provides a mock function with given fields: _a0 func (_m *Store) LoadValidators(_a0 int64) (*tenderminttypes.ValidatorSet, error) { ret := _m.Called(_a0) diff --git a/state/rollback_test.go b/state/rollback_test.go index 3428b0ef8a..4cd2038100 100644 --- a/state/rollback_test.go +++ b/state/rollback_test.go @@ -82,7 +82,10 @@ func TestRollback(t *testing.T) { } func TestRollbackNoState(t *testing.T) { - stateStore := state.NewStore(dbm.NewMemDB()) + stateStore := state.NewStore(dbm.NewMemDB(), + state.StoreOptions{ + DiscardABCIResponses: false, + }) blockStore := &mocks.BlockStore{} _, _, err := state.Rollback(blockStore, stateStore) @@ -115,7 +118,7 @@ func TestRollbackDifferentStateHeight(t *testing.T) { } func setupStateStore(t *testing.T, height int64) state.Store { - stateStore := state.NewStore(dbm.NewMemDB()) + stateStore := state.NewStore(dbm.NewMemDB(), state.StoreOptions{DiscardABCIResponses: false}) valSet, _ := types.RandValidatorSet(5, 10) params := types.DefaultConsensusParams() diff --git a/state/services.go b/state/services.go index 2cc376e85f..2b6c16fed2 100644 --- a/state/services.go +++ b/state/services.go @@ -12,7 +12,7 @@ import ( //------------------------------------------------------ // blockstore -//go:generate mockery --case underscore --name BlockStore +//go:generate ../scripts/mockery_generate.sh BlockStore // BlockStore defines the interface used by the ConsensusState. type BlockStore interface { @@ -38,7 +38,7 @@ type BlockStore interface { //----------------------------------------------------------------------------- // evidence pool -//go:generate mockery --case underscore --name EvidencePool +//go:generate ../scripts/mockery_generate.sh EvidencePool // EvidencePool defines the EvidencePool interface used by State. type EvidencePool interface { diff --git a/state/state.go b/state/state.go index eb686c0324..b3e7f3d389 100644 --- a/state/state.go +++ b/state/state.go @@ -4,7 +4,7 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "os" "time" "github.com/gogo/protobuf/proto" @@ -16,7 +16,7 @@ import ( "github.com/tendermint/tendermint/version" ) -// database keys +// database key var ( stateKey = []byte("stateKey") ) @@ -81,7 +81,6 @@ type State struct { // Copy makes a copy of the State for mutating. func (state State) Copy() State { - return State{ Version: state.Version, ChainID: state.ChainID, @@ -238,7 +237,6 @@ func (state State) MakeBlock( evidence []types.Evidence, proposerAddress []byte, ) (*types.Block, *types.PartSet) { - // Build base block with block data. block := types.MakeBlock(height, data, commit, evidence) @@ -302,7 +300,7 @@ func MakeGenesisStateFromFile(genDocFile string) (State, error) { // MakeGenesisDocFromFile reads and unmarshals genesis doc from the given file. func MakeGenesisDocFromFile(genDocFile string) (*types.GenesisDoc, error) { - genDocJSON, err := ioutil.ReadFile(genDocFile) + genDocJSON, err := os.ReadFile(genDocFile) if err != nil { return nil, fmt.Errorf("couldn't read GenesisDoc file: %v", err) } diff --git a/state/state_test.go b/state/state_test.go index b4e33626ac..cec4dfd9ac 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -29,7 +29,9 @@ func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) { config := cfg.ResetTestRoot("state_") dbType := dbm.BackendType(config.DBBackend) stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) require.NoError(t, err) state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) assert.NoError(t, err, "expected no error on LoadStateFromDBOrGenesisFile") @@ -76,7 +78,9 @@ func TestMakeGenesisStateNilValidators(t *testing.T) { func TestStateSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) assert := assert.New(t) state.LastBlockHeight++ @@ -95,7 +99,9 @@ func TestStateSaveLoad(t *testing.T) { func TestABCIResponsesSaveLoad1(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) assert := assert.New(t) state.LastBlockHeight++ @@ -128,7 +134,9 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { defer tearDown(t) assert := assert.New(t) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) cases := [...]struct { // Height is implied to equal index+2, @@ -216,7 +224,9 @@ func TestValidatorSimpleSaveLoad(t *testing.T) { defer tearDown(t) assert := assert.New(t) - statestore := sm.NewStore(stateDB) + statestore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) // Can't load anything for height 0. _, err := statestore.LoadValidators(0) @@ -249,7 +259,9 @@ func TestValidatorSimpleSaveLoad(t *testing.T) { func TestOneValidatorChangesSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) // Change vals at these heights. changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20} @@ -901,7 +913,9 @@ func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) { const valSetSize = 2 tearDown, stateDB, state := setupTestCase(t) t.Cleanup(func() { tearDown(t) }) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state.Validators = genValSet(valSetSize) state.NextValidators = state.Validators.CopyIncrementProposerPriority(1) err := stateStore.Save(state) @@ -926,7 +940,9 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { const valSetSize = 7 tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) require.Equal(t, int64(0), state.LastBlockHeight) state.Validators = genValSet(valSetSize) state.NextValidators = state.Validators.CopyIncrementProposerPriority(1) @@ -990,7 +1006,9 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) // Change vals at these heights. changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20} diff --git a/state/store.go b/state/store.go index b46fe56b23..7c80f358f9 100644 --- a/state/store.go +++ b/state/store.go @@ -39,7 +39,11 @@ func calcABCIResponsesKey(height int64) []byte { //---------------------- -//go:generate mockery --case underscore --name Store +var ( + lastABCIResponseKey = []byte("lastABCIResponseKey") +) + +//go:generate ../scripts/mockery_generate.sh Store // Store defines the state store interface // @@ -58,6 +62,8 @@ type Store interface { LoadValidators(int64) (*types.ValidatorSet, error) // LoadABCIResponses loads the abciResponse for a given height LoadABCIResponses(int64) (*tmstate.ABCIResponses, error) + // LoadLastABCIResponse loads the last abciResponse for a given height + LoadLastABCIResponse(int64) (*tmstate.ABCIResponses, error) // LoadConsensusParams loads the consensus params for a given height LoadConsensusParams(int64) (tmproto.ConsensusParams, error) // Save overwrites the previous state with the updated one @@ -75,13 +81,24 @@ type Store interface { // dbStore wraps a db (github.com/tendermint/tm-db) type dbStore struct { db dbm.DB + + StoreOptions +} + +type StoreOptions struct { + + // DiscardABCIResponses determines whether or not the store + // retains all ABCIResponses. If DiscardABCiResponses is enabled, + // the store will maintain only the response object from the latest + // height. + DiscardABCIResponses bool } var _ Store = (*dbStore)(nil) // NewStore creates the dbStore of the state pkg. -func NewStore(db dbm.DB) Store { - return dbStore{db} +func NewStore(db dbm.DB, options StoreOptions) Store { + return dbStore{db, options} } // LoadStateFromDBOrGenesisFile loads the most recent state from the database, @@ -358,12 +375,13 @@ func ABCIResponsesResultsHash(ar *tmstate.ABCIResponses) []byte { } // LoadABCIResponses loads the ABCIResponses for the given height from the -// database. If not found, ErrNoABCIResponsesForHeight is returned. -// -// This is useful for recovering from crashes where we called app.Commit and -// before we called s.Save(). It can also be used to produce Merkle proofs of -// the result of txs. +// database. If the node has DiscardABCIResponses set to true, ErrABCIResponsesNotPersisted +// is persisted. If not found, ErrNoABCIResponsesForHeight is returned. func (store dbStore) LoadABCIResponses(height int64) (*tmstate.ABCIResponses, error) { + if store.DiscardABCIResponses { + return nil, ErrABCIResponsesNotPersisted + } + buf, err := store.db.Get(calcABCIResponsesKey(height)) if err != nil { return nil, err @@ -385,12 +403,43 @@ func (store dbStore) LoadABCIResponses(height int64) (*tmstate.ABCIResponses, er return abciResponses, nil } +// LoadLastABCIResponses loads the ABCIResponses from the most recent height. +// The height parameter is used to ensure that the response corresponds to the latest height. +// If not, an error is returned. +// +// This method is used for recovering in the case that we called the Commit ABCI +// method on the application but crashed before persisting the results. +func (store dbStore) LoadLastABCIResponse(height int64) (*tmstate.ABCIResponses, error) { + bz, err := store.db.Get(lastABCIResponseKey) + if err != nil { + return nil, err + } + + if len(bz) == 0 { + return nil, errors.New("no last ABCI response has been persisted") + } + + abciResponse := new(tmstate.ABCIResponsesInfo) + err = abciResponse.Unmarshal(bz) + if err != nil { + tmos.Exit(fmt.Sprintf(`LoadLastABCIResponses: Data has been corrupted or its spec has + changed: %v\n`, err)) + } + + // Here we validate the result by comparing its height to the expected height. + if height != abciResponse.GetHeight() { + return nil, errors.New("expected height %d but last stored abci responses was at height %d") + } + + return abciResponse.AbciResponses, nil +} + // SaveABCIResponses persists the ABCIResponses to the database. // This is useful in case we crash after app.Commit and before s.Save(). // Responses are indexed by height so they can also be loaded later to produce // Merkle proofs. // -// Exposed for testing. +// CONTRACT: height must be monotonically increasing every time this is called. func (store dbStore) SaveABCIResponses(height int64, abciResponses *tmstate.ABCIResponses) error { var dtxs []*abci.ResponseDeliverTx // strip nil values, @@ -401,17 +450,30 @@ func (store dbStore) SaveABCIResponses(height int64, abciResponses *tmstate.ABCI } abciResponses.DeliverTxs = dtxs - bz, err := abciResponses.Marshal() - if err != nil { - return err + // If the flag is false then we save the ABCIResponse. This can be used for the /BlockResults + // query or to reindex an event using the command line. + if !store.DiscardABCIResponses { + bz, err := abciResponses.Marshal() + if err != nil { + return err + } + if err := store.db.Set(calcABCIResponsesKey(height), bz); err != nil { + return err + } } - err = store.db.SetSync(calcABCIResponsesKey(height), bz) + // We always save the last ABCI response for crash recovery. + // This overwrites the previous saved ABCI Response. + response := &tmstate.ABCIResponsesInfo{ + AbciResponses: abciResponses, + Height: height, + } + bz, err := response.Marshal() if err != nil { return err } - return nil + return store.db.SetSync(lastABCIResponseKey, bz) } //----------------------------------------------------------------------------- @@ -479,7 +541,7 @@ func loadValidatorsInfo(db dbm.DB, height int64) (*tmstate.ValidatorsInfo, error if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED tmos.Exit(fmt.Sprintf(`LoadValidators: Data has been corrupted or its spec has changed: - %v\n`, err)) + %v\n`, err)) } // TODO: ensure that buf is completely read. diff --git a/state/store_test.go b/state/store_test.go index e439215190..b91eeb578e 100644 --- a/state/store_test.go +++ b/state/store_test.go @@ -23,7 +23,9 @@ import ( func TestStoreLoadValidators(t *testing.T) { stateDB := dbm.NewMemDB() - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) val, _ := types.RandValidator(true, 10) vals := types.NewValidatorSet([]*types.Validator{val}) @@ -54,7 +56,9 @@ func BenchmarkLoadValidators(b *testing.B) { dbType := dbm.BackendType(config.DBBackend) stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) require.NoError(b, err) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) if err != nil { b.Fatal(err) @@ -107,7 +111,9 @@ func TestPruneStates(t *testing.T) { tc := tc t.Run(name, func(t *testing.T) { db := dbm.NewMemDB() - stateStore := sm.NewStore(db) + stateStore := sm.NewStore(db, sm.StoreOptions{ + DiscardABCIResponses: false, + }) pk := ed25519.GenPrivKey().PubKey() // Generate a bunch of state data. Validators change for heights ending with 3, and @@ -229,3 +235,72 @@ func sliceToMap(s []int64) map[int64]bool { } return m } + +func TestLastABCIResponses(t *testing.T) { + // create an empty state store. + t.Run("Not persisting responses", func(t *testing.T) { + stateDB := dbm.NewMemDB() + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) + responses, err := stateStore.LoadABCIResponses(1) + require.Error(t, err) + require.Nil(t, responses) + // stub the abciresponses. + response1 := &tmstate.ABCIResponses{ + BeginBlock: &abci.ResponseBeginBlock{}, + DeliverTxs: []*abci.ResponseDeliverTx{ + {Code: 32, Data: []byte("Hello"), Log: "Huh?"}, + }, + EndBlock: &abci.ResponseEndBlock{}, + } + // create new db and state store and set discard abciresponses to false. + stateDB = dbm.NewMemDB() + stateStore = sm.NewStore(stateDB, sm.StoreOptions{DiscardABCIResponses: false}) + height := int64(10) + // save the last abci response. + err = stateStore.SaveABCIResponses(height, response1) + require.NoError(t, err) + // search for the last abciresponse and check if it has saved. + lastResponse, err := stateStore.LoadLastABCIResponse(height) + require.NoError(t, err) + // check to see if the saved response height is the same as the loaded height. + assert.Equal(t, lastResponse, response1) + // use an incorret height to make sure the state store errors. + _, err = stateStore.LoadLastABCIResponse(height + 1) + assert.Error(t, err) + // check if the abci response didnt save in the abciresponses. + responses, err = stateStore.LoadABCIResponses(height) + require.NoError(t, err, responses) + require.Equal(t, response1, responses) + }) + + t.Run("persisting responses", func(t *testing.T) { + stateDB := dbm.NewMemDB() + height := int64(10) + // stub the second abciresponse. + response2 := &tmstate.ABCIResponses{ + BeginBlock: &abci.ResponseBeginBlock{}, + DeliverTxs: []*abci.ResponseDeliverTx{ + {Code: 44, Data: []byte("Hello again"), Log: "????"}, + }, + EndBlock: &abci.ResponseEndBlock{}, + } + // create a new statestore with the responses on. + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: true, + }) + // save an additional response. + err := stateStore.SaveABCIResponses(height+1, response2) + require.NoError(t, err) + // check to see if the response saved by calling the last response. + lastResponse2, err := stateStore.LoadLastABCIResponse(height + 1) + require.NoError(t, err) + // check to see if the saved response height is the same as the loaded height. + assert.Equal(t, response2, lastResponse2) + // should error as we are no longer saving the response. + _, err = stateStore.LoadABCIResponses(height + 1) + assert.Equal(t, sm.ErrABCIResponsesNotPersisted, err) + }) + +} diff --git a/state/tx_filter_test.go b/state/tx_filter_test.go index 7936d94c75..d5ab761ac6 100644 --- a/state/tx_filter_test.go +++ b/state/tx_filter_test.go @@ -33,7 +33,9 @@ func TestTxFilter(t *testing.T) { for i, tc := range testCases { stateDB, err := dbm.NewDB("state", "memdb", os.TempDir()) require.NoError(t, err) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) require.NoError(t, err) diff --git a/state/txindex/indexer.go b/state/txindex/indexer.go index 03474f43d5..5fa50ac2fd 100644 --- a/state/txindex/indexer.go +++ b/state/txindex/indexer.go @@ -10,7 +10,7 @@ import ( // XXX/TODO: These types should be moved to the indexer package. -//go:generate mockery --case underscore --name TxIndexer +//go:generate ../../scripts/mockery_generate.sh TxIndexer // TxIndexer interface defines methods to index and search transactions. type TxIndexer interface { diff --git a/state/txindex/indexer_service.go b/state/txindex/indexer_service.go index 828a63c8bb..0e8fbb9c91 100644 --- a/state/txindex/indexer_service.go +++ b/state/txindex/indexer_service.go @@ -3,7 +3,6 @@ package txindex import ( "context" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" @@ -20,9 +19,10 @@ const ( type IndexerService struct { service.BaseService - txIdxr TxIndexer - blockIdxr indexer.BlockIndexer - eventBus *types.EventBus + txIdxr TxIndexer + blockIdxr indexer.BlockIndexer + eventBus *types.EventBus + terminateOnError bool } // NewIndexerService returns a new service instance. @@ -30,9 +30,10 @@ func NewIndexerService( txIdxr TxIndexer, blockIdxr indexer.BlockIndexer, eventBus *types.EventBus, + terminateOnError bool, ) *IndexerService { - is := &IndexerService{txIdxr: txIdxr, blockIdxr: blockIdxr, eventBus: eventBus} + is := &IndexerService{txIdxr: txIdxr, blockIdxr: blockIdxr, eventBus: eventBus, terminateOnError: terminateOnError} is.BaseService = *service.NewBaseService(nil, "IndexerService", is) return is } @@ -74,24 +75,38 @@ func (is *IndexerService) OnStart() error { "index", txResult.Index, "err", err, ) + + if is.terminateOnError { + if err := is.Stop(); err != nil { + is.Logger.Error("failed to stop", "err", err) + } + return + } } } if err := is.blockIdxr.Index(eventDataHeader); err != nil { is.Logger.Error("failed to index block", "height", height, "err", err) + if is.terminateOnError { + if err := is.Stop(); err != nil { + is.Logger.Error("failed to stop", "err", err) + } + return + } } else { - is.Logger.Info("indexed block", "height", height) - } - - batch.Ops, err = DeduplicateBatch(batch.Ops, is.txIdxr) - if err != nil { - is.Logger.Error("deduplicate batch", "height", height) + is.Logger.Info("indexed block exents", "height", height) } if err = is.txIdxr.AddBatch(batch); err != nil { is.Logger.Error("failed to index block txs", "height", height, "err", err) + if is.terminateOnError { + if err := is.Stop(); err != nil { + is.Logger.Error("failed to stop", "err", err) + } + return + } } else { - is.Logger.Debug("indexed block txs", "height", height, "num_txs", eventDataHeader.NumTxs) + is.Logger.Debug("indexed transactions", "height", height, "num_txs", eventDataHeader.NumTxs) } } }() @@ -104,45 +119,3 @@ func (is *IndexerService) OnStop() { _ = is.eventBus.UnsubscribeAll(context.Background(), subscriber) } } - -// DeduplicateBatch consider the case of duplicate txs. -// if the current one under investigation is NOT OK, then we need to check -// whether there's a previously indexed tx. -// SKIP the current tx if the previously indexed record is found and successful. -func DeduplicateBatch(ops []*abci.TxResult, txIdxr TxIndexer) ([]*abci.TxResult, error) { - result := make([]*abci.TxResult, 0, len(ops)) - - // keep track of successful txs in this block in order to suppress latter ones being indexed. - var successfulTxsInThisBlock = make(map[string]struct{}) - - for _, txResult := range ops { - hash := types.Tx(txResult.Tx).Hash() - - if txResult.Result.IsOK() { - successfulTxsInThisBlock[string(hash)] = struct{}{} - } else { - // if it already appeared in current block and was successful, skip. - if _, found := successfulTxsInThisBlock[string(hash)]; found { - continue - } - - // check if this tx hash is already indexed - old, err := txIdxr.Get(hash) - - // if db op errored - // Not found is not an error - if err != nil { - return nil, err - } - - // if it's already indexed in an older block and was successful, skip. - if old != nil && old.Result.Code == abci.CodeTypeOK { - continue - } - } - - result = append(result, txResult) - } - - return result, nil -} diff --git a/state/txindex/indexer_service_test.go b/state/txindex/indexer_service_test.go index f7070f119f..8c7dca2ac8 100644 --- a/state/txindex/indexer_service_test.go +++ b/state/txindex/indexer_service_test.go @@ -32,7 +32,7 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { txIndexer := kv.NewTxIndex(store) blockIndexer := blockidxkv.New(db.NewPrefixDB(store, []byte("block_events"))) - service := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus) + service := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus, false) service.SetLogger(log.TestingLogger()) err = service.Start() require.NoError(t, err) @@ -79,164 +79,3 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { require.NoError(t, err) require.Equal(t, txResult2, res) } - -func TestTxIndexDuplicatePreviouslySuccessful(t *testing.T) { - var mockTx = types.Tx("MOCK_TX_HASH") - - testCases := []struct { - name string - tx1 abci.TxResult - tx2 abci.TxResult - expSkip bool // do we expect the second tx to be skipped by tx indexer - }{ - {"skip, previously successful", - abci.TxResult{ - Height: 1, - Index: 0, - Tx: mockTx, - Result: abci.ResponseDeliverTx{ - Code: abci.CodeTypeOK, - }, - }, - abci.TxResult{ - Height: 2, - Index: 0, - Tx: mockTx, - Result: abci.ResponseDeliverTx{ - Code: abci.CodeTypeOK + 1, - }, - }, - true, - }, - {"not skip, previously unsuccessful", - abci.TxResult{ - Height: 1, - Index: 0, - Tx: mockTx, - Result: abci.ResponseDeliverTx{ - Code: abci.CodeTypeOK + 1, - }, - }, - abci.TxResult{ - Height: 2, - Index: 0, - Tx: mockTx, - Result: abci.ResponseDeliverTx{ - Code: abci.CodeTypeOK + 1, - }, - }, - false, - }, - {"not skip, both successful", - abci.TxResult{ - Height: 1, - Index: 0, - Tx: mockTx, - Result: abci.ResponseDeliverTx{ - Code: abci.CodeTypeOK, - }, - }, - abci.TxResult{ - Height: 2, - Index: 0, - Tx: mockTx, - Result: abci.ResponseDeliverTx{ - Code: abci.CodeTypeOK, - }, - }, - false, - }, - {"not skip, both unsuccessful", - abci.TxResult{ - Height: 1, - Index: 0, - Tx: mockTx, - Result: abci.ResponseDeliverTx{ - Code: abci.CodeTypeOK + 1, - }, - }, - abci.TxResult{ - Height: 2, - Index: 0, - Tx: mockTx, - Result: abci.ResponseDeliverTx{ - Code: abci.CodeTypeOK + 1, - }, - }, - false, - }, - {"skip, same block, previously successful", - abci.TxResult{ - Height: 1, - Index: 0, - Tx: mockTx, - Result: abci.ResponseDeliverTx{ - Code: abci.CodeTypeOK, - }, - }, - abci.TxResult{ - Height: 1, - Index: 0, - Tx: mockTx, - Result: abci.ResponseDeliverTx{ - Code: abci.CodeTypeOK + 1, - }, - }, - true, - }, - {"not skip, same block, previously unsuccessful", - abci.TxResult{ - Height: 1, - Index: 0, - Tx: mockTx, - Result: abci.ResponseDeliverTx{ - Code: abci.CodeTypeOK + 1, - }, - }, - abci.TxResult{ - Height: 1, - Index: 0, - Tx: mockTx, - Result: abci.ResponseDeliverTx{ - Code: abci.CodeTypeOK, - }, - }, - false, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - indexer := kv.NewTxIndex(db.NewMemDB()) - - if tc.tx1.Height != tc.tx2.Height { - // index the first tx - err := indexer.AddBatch(&txindex.Batch{ - Ops: []*abci.TxResult{&tc.tx1}, - }) - require.NoError(t, err) - - // check if the second one should be skipped. - ops, err := txindex.DeduplicateBatch([]*abci.TxResult{&tc.tx2}, indexer) - require.NoError(t, err) - - if tc.expSkip { - require.Empty(t, ops) - } else { - require.Equal(t, []*abci.TxResult{&tc.tx2}, ops) - } - } else { - // same block - ops := []*abci.TxResult{&tc.tx1, &tc.tx2} - ops, err := txindex.DeduplicateBatch(ops, indexer) - require.NoError(t, err) - if tc.expSkip { - // the second one is skipped - require.Equal(t, []*abci.TxResult{&tc.tx1}, ops) - } else { - require.Equal(t, []*abci.TxResult{&tc.tx1, &tc.tx2}, ops) - } - } - }) - } -} diff --git a/state/txindex/kv/kv.go b/state/txindex/kv/kv.go index 22d9c60239..1d8e3a8eaf 100644 --- a/state/txindex/kv/kv.go +++ b/state/txindex/kv/kv.go @@ -82,6 +82,11 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { // that indexed from the tx's events is a composite of the event type and the // respective attribute's key delimited by a "." (eg. "account.number"). // Any event with an empty type is not indexed. +// +// If a transaction is indexed with the same hash as a previous transaction, it will +// be overwritten unless the tx result was NOT OK and the prior result was OK i.e. +// more transactions that successfully executed overwrite transactions that failed +// or successful yet older transactions. func (txi *TxIndex) Index(result *abci.TxResult) error { b := txi.store.NewBatch() defer b.Close() @@ -128,6 +133,19 @@ func (txi *TxIndex) indexResult(batch dbm.Batch, result *abci.TxResult) error { return err } + if !result.Result.IsOK() { + oldResult, err := txi.Get(hash) + if err != nil { + return err + } + + // if the new transaction failed and it's already indexed in an older block and was successful + // we skip it as we want users to get the older successful transaction when they query. + if oldResult != nil && oldResult.Result.Code == abci.CodeTypeOK { + return nil + } + } + // index tx by events err = txi.indexEvents(result, hash, batch) if err != nil { diff --git a/state/txindex/kv/kv_bench_test.go b/state/txindex/kv/kv_bench_test.go index fdfe550f39..85491aad53 100644 --- a/state/txindex/kv/kv_bench_test.go +++ b/state/txindex/kv/kv_bench_test.go @@ -4,7 +4,7 @@ import ( "context" "crypto/rand" "fmt" - "io/ioutil" + "os" "testing" dbm "github.com/tendermint/tm-db" @@ -15,7 +15,7 @@ import ( ) func BenchmarkTxSearch(b *testing.B) { - dbDir, err := ioutil.TempDir("", "benchmark_tx_search_test") + dbDir, err := os.MkdirTemp("", "benchmark_tx_search_test") if err != nil { b.Errorf("failed to create temporary directory: %s", err) } diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go index 03925f56d0..36985ffd12 100644 --- a/state/txindex/kv/kv_test.go +++ b/state/txindex/kv/kv_test.go @@ -3,7 +3,6 @@ package kv import ( "context" "fmt" - "io/ioutil" "os" "testing" @@ -288,6 +287,103 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { } } +func TestTxIndexDuplicatePreviouslySuccessful(t *testing.T) { + mockTx := types.Tx("MOCK_TX_HASH") + + testCases := []struct { + name string + tx1 *abci.TxResult + tx2 *abci.TxResult + expOverwrite bool // do we expect the second tx to overwrite the first tx + }{ + { + "don't overwrite as a non-zero code was returned and the previous tx was successful", + &abci.TxResult{ + Height: 1, + Index: 0, + Tx: mockTx, + Result: abci.ResponseDeliverTx{ + Code: abci.CodeTypeOK, + }, + }, + &abci.TxResult{ + Height: 2, + Index: 0, + Tx: mockTx, + Result: abci.ResponseDeliverTx{ + Code: abci.CodeTypeOK + 1, + }, + }, + false, + }, + { + "overwrite as the previous tx was also unsuccessful", + &abci.TxResult{ + Height: 1, + Index: 0, + Tx: mockTx, + Result: abci.ResponseDeliverTx{ + Code: abci.CodeTypeOK + 1, + }, + }, + &abci.TxResult{ + Height: 2, + Index: 0, + Tx: mockTx, + Result: abci.ResponseDeliverTx{ + Code: abci.CodeTypeOK + 1, + }, + }, + true, + }, + { + "overwrite as the most recent tx was successful", + &abci.TxResult{ + Height: 1, + Index: 0, + Tx: mockTx, + Result: abci.ResponseDeliverTx{ + Code: abci.CodeTypeOK, + }, + }, + &abci.TxResult{ + Height: 2, + Index: 0, + Tx: mockTx, + Result: abci.ResponseDeliverTx{ + Code: abci.CodeTypeOK, + }, + }, + true, + }, + } + + hash := mockTx.Hash() + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + indexer := NewTxIndex(db.NewMemDB()) + + // index the first tx + err := indexer.Index(tc.tx1) + require.NoError(t, err) + + // index the same tx with different results + err = indexer.Index(tc.tx2) + require.NoError(t, err) + + res, err := indexer.Get(hash) + require.NoError(t, err) + + if tc.expOverwrite { + require.Equal(t, tc.tx2, res) + } else { + require.Equal(t, tc.tx1, res) + } + }) + } +} + func TestTxSearchMultipleTxs(t *testing.T) { indexer := NewTxIndex(db.NewMemDB()) @@ -358,7 +454,7 @@ func txResultWithEvents(events []abci.Event) *abci.TxResult { } func benchmarkTxIndex(txsCount int64, b *testing.B) { - dir, err := ioutil.TempDir("", "tx_index_db") + dir, err := os.MkdirTemp("", "tx_index_db") require.NoError(b, err) defer os.RemoveAll(dir) diff --git a/state/txindex/mocks/tx_indexer.go b/state/txindex/mocks/tx_indexer.go index 3d02009804..93d0eb9c25 100644 --- a/state/txindex/mocks/tx_indexer.go +++ b/state/txindex/mocks/tx_indexer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.12.3. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -92,13 +92,13 @@ func (_m *TxIndexer) Search(ctx context.Context, q *query.Query) ([]*types.TxRes return r0, r1 } -type NewTxIndexerT interface { +type mockConstructorTestingTNewTxIndexer interface { mock.TestingT Cleanup(func()) } // NewTxIndexer creates a new instance of TxIndexer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTxIndexer(t NewTxIndexerT) *TxIndexer { +func NewTxIndexer(t mockConstructorTestingTNewTxIndexer) *TxIndexer { mock := &TxIndexer{} mock.Mock.Test(t) diff --git a/state/validation_test.go b/state/validation_test.go index 5041b376fa..2635196c6e 100644 --- a/state/validation_test.go +++ b/state/validation_test.go @@ -29,7 +29,9 @@ func TestValidateBlockHeader(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, privVals := makeState(3, 1) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) blockExec := sm.NewBlockExecutor( stateStore, log.TestingLogger(), @@ -112,7 +114,9 @@ func TestValidateBlockCommit(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, privVals := makeState(1, 1) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) blockExec := sm.NewBlockExecutor( stateStore, log.TestingLogger(), @@ -239,7 +243,9 @@ func TestValidateBlockEvidence(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, privVals := makeState(4, 1) - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) defaultEvidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) evpool := &mocks.EvidencePool{} diff --git a/statesync/chunks.go b/statesync/chunks.go index 028c863b9a..ec95419fe0 100644 --- a/statesync/chunks.go +++ b/statesync/chunks.go @@ -3,7 +3,6 @@ package statesync import ( "errors" "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -42,7 +41,7 @@ type chunkQueue struct { // newChunkQueue creates a new chunk queue for a snapshot, using a temp dir for storage. // Callers must call Close() when done. func newChunkQueue(snapshot *snapshot, tempDir string) (*chunkQueue, error) { - dir, err := ioutil.TempDir(tempDir, "tm-statesync") + dir, err := os.MkdirTemp(tempDir, "tm-statesync") if err != nil { return nil, fmt.Errorf("unable to create temp dir for state sync chunks: %w", err) } @@ -84,7 +83,7 @@ func (q *chunkQueue) Add(chunk *chunk) (bool, error) { } path := filepath.Join(q.dir, strconv.FormatUint(uint64(chunk.Index), 10)) - err := ioutil.WriteFile(path, chunk.Chunk, 0600) + err := os.WriteFile(path, chunk.Chunk, 0o600) if err != nil { return false, fmt.Errorf("failed to save chunk %v to file %v: %w", chunk.Index, path, err) } @@ -209,7 +208,7 @@ func (q *chunkQueue) load(index uint32) (*chunk, error) { if !ok { return nil, nil } - body, err := ioutil.ReadFile(path) + body, err := os.ReadFile(path) if err != nil { return nil, fmt.Errorf("failed to load chunk %v: %w", index, err) } diff --git a/statesync/chunks_test.go b/statesync/chunks_test.go index 2b9a5d7513..3bc42e6aad 100644 --- a/statesync/chunks_test.go +++ b/statesync/chunks_test.go @@ -1,7 +1,6 @@ package statesync import ( - "io/ioutil" "os" "testing" @@ -36,20 +35,20 @@ func TestNewChunkQueue_TempDir(t *testing.T) { Hash: []byte{7}, Metadata: nil, } - dir, err := ioutil.TempDir("", "newchunkqueue") + dir, err := os.MkdirTemp("", "newchunkqueue") require.NoError(t, err) defer os.RemoveAll(dir) queue, err := newChunkQueue(snapshot, dir) require.NoError(t, err) - files, err := ioutil.ReadDir(dir) + files, err := os.ReadDir(dir) require.NoError(t, err) assert.Len(t, files, 1) err = queue.Close() require.NoError(t, err) - files, err = ioutil.ReadDir(dir) + files, err = os.ReadDir(dir) require.NoError(t, err) assert.Len(t, files, 0) } diff --git a/statesync/messages.go b/statesync/messages.go index b07227bbf7..8eba205101 100644 --- a/statesync/messages.go +++ b/statesync/messages.go @@ -16,49 +16,6 @@ const ( chunkMsgSize = int(16e6) ) -// mustEncodeMsg encodes a Protobuf message, panicing on error. -func mustEncodeMsg(pb proto.Message) []byte { - msg := ssproto.Message{} - switch pb := pb.(type) { - case *ssproto.ChunkRequest: - msg.Sum = &ssproto.Message_ChunkRequest{ChunkRequest: pb} - case *ssproto.ChunkResponse: - msg.Sum = &ssproto.Message_ChunkResponse{ChunkResponse: pb} - case *ssproto.SnapshotsRequest: - msg.Sum = &ssproto.Message_SnapshotsRequest{SnapshotsRequest: pb} - case *ssproto.SnapshotsResponse: - msg.Sum = &ssproto.Message_SnapshotsResponse{SnapshotsResponse: pb} - default: - panic(fmt.Errorf("unknown message type %T", pb)) - } - bz, err := msg.Marshal() - if err != nil { - panic(fmt.Errorf("unable to marshal %T: %w", pb, err)) - } - return bz -} - -// decodeMsg decodes a Protobuf message. -func decodeMsg(bz []byte) (proto.Message, error) { - pb := &ssproto.Message{} - err := proto.Unmarshal(bz, pb) - if err != nil { - return nil, err - } - switch msg := pb.Sum.(type) { - case *ssproto.Message_ChunkRequest: - return msg.ChunkRequest, nil - case *ssproto.Message_ChunkResponse: - return msg.ChunkResponse, nil - case *ssproto.Message_SnapshotsRequest: - return msg.SnapshotsRequest, nil - case *ssproto.Message_SnapshotsResponse: - return msg.SnapshotsResponse, nil - default: - return nil, fmt.Errorf("unknown message type %T", msg) - } -} - // validateMsg validates a message. func validateMsg(pb proto.Message) error { if pb == nil { diff --git a/statesync/messages_test.go b/statesync/messages_test.go index 2a05f8d79e..c7497b6de1 100644 --- a/statesync/messages_test.go +++ b/statesync/messages_test.go @@ -7,6 +7,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/p2p" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) @@ -99,8 +100,9 @@ func TestStateSyncVectors(t *testing.T) { for _, tc := range testCases { tc := tc - - bz := mustEncodeMsg(tc.msg) + w := tc.msg.(p2p.Wrapper).Wrap() + bz, err := proto.Marshal(w) + require.NoError(t, err) require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) } diff --git a/statesync/mocks/state_provider.go b/statesync/mocks/state_provider.go index 7ff78561f5..f52b9e33d2 100644 --- a/statesync/mocks/state_provider.go +++ b/statesync/mocks/state_provider.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.14.0. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks diff --git a/statesync/reactor.go b/statesync/reactor.go index 8434b6adf0..835b6313b2 100644 --- a/statesync/reactor.go +++ b/statesync/reactor.go @@ -5,6 +5,8 @@ import ( "sort" "time" + "github.com/gogo/protobuf/proto" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" tmsync "github.com/tendermint/tendermint/libs/sync" @@ -66,12 +68,14 @@ func (r *Reactor) GetChannels() []*p2p.ChannelDescriptor { Priority: 5, SendQueueCapacity: 10, RecvMessageCapacity: snapshotMsgSize, + MessageType: &ssproto.Message{}, }, { ID: ChunkChannel, Priority: 3, SendQueueCapacity: 10, RecvMessageCapacity: chunkMsgSize, + MessageType: &ssproto.Message{}, }, } } @@ -100,27 +104,21 @@ func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { } // Receive implements p2p.Reactor. -func (r *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { +func (r *Reactor) ReceiveEnvelope(e p2p.Envelope) { if !r.IsRunning() { return } - msg, err := decodeMsg(msgBytes) - if err != nil { - r.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) - r.Switch.StopPeerForError(src, err) - return - } - err = validateMsg(msg) + err := validateMsg(e.Message) if err != nil { - r.Logger.Error("Invalid message", "peer", src, "msg", msg, "err", err) - r.Switch.StopPeerForError(src, err) + r.Logger.Error("Invalid message", "peer", e.Src, "msg", e.Message, "err", err) + r.Switch.StopPeerForError(e.Src, err) return } - switch chID { + switch e.ChannelID { case SnapshotChannel: - switch msg := msg.(type) { + switch msg := e.Message.(type) { case *ssproto.SnapshotsRequest: snapshots, err := r.recentSnapshots(recentSnapshots) if err != nil { @@ -129,14 +127,17 @@ func (r *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { } for _, snapshot := range snapshots { r.Logger.Debug("Advertising snapshot", "height", snapshot.Height, - "format", snapshot.Format, "peer", src.ID()) - src.Send(chID, mustEncodeMsg(&ssproto.SnapshotsResponse{ - Height: snapshot.Height, - Format: snapshot.Format, - Chunks: snapshot.Chunks, - Hash: snapshot.Hash, - Metadata: snapshot.Metadata, - })) + "format", snapshot.Format, "peer", e.Src.ID()) + p2p.SendEnvelopeShim(e.Src, p2p.Envelope{ //nolint: staticcheck + ChannelID: e.ChannelID, + Message: &ssproto.SnapshotsResponse{ + Height: snapshot.Height, + Format: snapshot.Format, + Chunks: snapshot.Chunks, + Hash: snapshot.Hash, + Metadata: snapshot.Metadata, + }, + }, r.Logger) } case *ssproto.SnapshotsResponse: @@ -146,8 +147,8 @@ func (r *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { r.Logger.Debug("Received unexpected snapshot, no state sync in progress") return } - r.Logger.Debug("Received snapshot", "height", msg.Height, "format", msg.Format, "peer", src.ID()) - _, err := r.syncer.AddSnapshot(src, &snapshot{ + r.Logger.Debug("Received snapshot", "height", msg.Height, "format", msg.Format, "peer", e.Src.ID()) + _, err := r.syncer.AddSnapshot(e.Src, &snapshot{ Height: msg.Height, Format: msg.Format, Chunks: msg.Chunks, @@ -157,7 +158,7 @@ func (r *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { // TODO: We may want to consider punishing the peer for certain errors if err != nil { r.Logger.Error("Failed to add snapshot", "height", msg.Height, "format", msg.Format, - "peer", src.ID(), "err", err) + "peer", e.Src.ID(), "err", err) return } @@ -166,10 +167,10 @@ func (r *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { } case ChunkChannel: - switch msg := msg.(type) { + switch msg := e.Message.(type) { case *ssproto.ChunkRequest: r.Logger.Debug("Received chunk request", "height", msg.Height, "format", msg.Format, - "chunk", msg.Index, "peer", src.ID()) + "chunk", msg.Index, "peer", e.Src.ID()) resp, err := r.conn.LoadSnapshotChunkSync(abci.RequestLoadSnapshotChunk{ Height: msg.Height, Format: msg.Format, @@ -181,30 +182,33 @@ func (r *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { return } r.Logger.Debug("Sending chunk", "height", msg.Height, "format", msg.Format, - "chunk", msg.Index, "peer", src.ID()) - src.Send(ChunkChannel, mustEncodeMsg(&ssproto.ChunkResponse{ - Height: msg.Height, - Format: msg.Format, - Index: msg.Index, - Chunk: resp.Chunk, - Missing: resp.Chunk == nil, - })) + "chunk", msg.Index, "peer", e.Src.ID()) + p2p.SendEnvelopeShim(e.Src, p2p.Envelope{ //nolint: staticcheck + ChannelID: ChunkChannel, + Message: &ssproto.ChunkResponse{ + Height: msg.Height, + Format: msg.Format, + Index: msg.Index, + Chunk: resp.Chunk, + Missing: resp.Chunk == nil, + }, + }, r.Logger) case *ssproto.ChunkResponse: r.mtx.RLock() defer r.mtx.RUnlock() if r.syncer == nil { - r.Logger.Debug("Received unexpected chunk, no state sync in progress", "peer", src.ID()) + r.Logger.Debug("Received unexpected chunk, no state sync in progress", "peer", e.Src.ID()) return } r.Logger.Debug("Received chunk, adding to sync", "height", msg.Height, "format", msg.Format, - "chunk", msg.Index, "peer", src.ID()) + "chunk", msg.Index, "peer", e.Src.ID()) _, err := r.syncer.AddChunk(&chunk{ Height: msg.Height, Format: msg.Format, Index: msg.Index, Chunk: msg.Chunk, - Sender: src.ID(), + Sender: e.Src.ID(), }) if err != nil { r.Logger.Error("Failed to add chunk", "height", msg.Height, "format", msg.Format, @@ -217,8 +221,26 @@ func (r *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { } default: - r.Logger.Error("Received message on invalid channel %x", chID) + r.Logger.Error("Received message on invalid channel %x", e.ChannelID) + } +} + +func (r *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) { + msg := &ssproto.Message{} + err := proto.Unmarshal(msgBytes, msg) + if err != nil { + panic(err) + } + um, err := msg.Unwrap() + if err != nil { + panic(err) } + + r.ReceiveEnvelope(p2p.Envelope{ + ChannelID: chID, + Src: peer, + Message: um, + }) } // recentSnapshots fetches the n most recent snapshots from the app @@ -269,7 +291,11 @@ func (r *Reactor) Sync(stateProvider StateProvider, discoveryTime time.Duration) hook := func() { r.Logger.Debug("Requesting snapshots from known peers") // Request snapshots from all currently connected peers - r.Switch.Broadcast(SnapshotChannel, mustEncodeMsg(&ssproto.SnapshotsRequest{})) + + r.Switch.BroadcastEnvelope(p2p.Envelope{ + ChannelID: SnapshotChannel, + Message: &ssproto.SnapshotsRequest{}, + }) } hook() diff --git a/statesync/reactor_test.go b/statesync/reactor_test.go index 053b47ef52..3dc089fcec 100644 --- a/statesync/reactor_test.go +++ b/statesync/reactor_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -53,10 +54,18 @@ func TestReactor_Receive_ChunkRequest(t *testing.T) { peer.On("ID").Return(p2p.ID("id")) var response *ssproto.ChunkResponse if tc.expectResponse != nil { - peer.On("Send", ChunkChannel, mock.Anything).Run(func(args mock.Arguments) { - msg, err := decodeMsg(args[1].([]byte)) + peer.On("SendEnvelope", mock.MatchedBy(func(i interface{}) bool { + e, ok := i.(p2p.Envelope) + return ok && e.ChannelID == ChunkChannel + })).Run(func(args mock.Arguments) { + e := args[0].(p2p.Envelope) + + // Marshal to simulate a wire roundtrip. + bz, err := proto.Marshal(e.Message) + require.NoError(t, err) + err = proto.Unmarshal(bz, e.Message) require.NoError(t, err) - response = msg.(*ssproto.ChunkResponse) + response = e.Message.(*ssproto.ChunkResponse) }).Return(true) } @@ -71,7 +80,11 @@ func TestReactor_Receive_ChunkRequest(t *testing.T) { } }) - r.Receive(ChunkChannel, peer, mustEncodeMsg(tc.request)) + r.ReceiveEnvelope(p2p.Envelope{ + ChannelID: ChunkChannel, + Src: peer, + Message: tc.request, + }) time.Sleep(100 * time.Millisecond) assert.Equal(t, tc.expectResponse, response) @@ -131,10 +144,18 @@ func TestReactor_Receive_SnapshotsRequest(t *testing.T) { peer := &p2pmocks.Peer{} if len(tc.expectResponses) > 0 { peer.On("ID").Return(p2p.ID("id")) - peer.On("Send", SnapshotChannel, mock.Anything).Run(func(args mock.Arguments) { - msg, err := decodeMsg(args[1].([]byte)) + peer.On("SendEnvelope", mock.MatchedBy(func(i interface{}) bool { + e, ok := i.(p2p.Envelope) + return ok && e.ChannelID == SnapshotChannel + })).Run(func(args mock.Arguments) { + e := args[0].(p2p.Envelope) + + // Marshal to simulate a wire roundtrip. + bz, err := proto.Marshal(e.Message) + require.NoError(t, err) + err = proto.Unmarshal(bz, e.Message) require.NoError(t, err) - responses = append(responses, msg.(*ssproto.SnapshotsResponse)) + responses = append(responses, e.Message.(*ssproto.SnapshotsResponse)) }).Return(true) } @@ -149,7 +170,11 @@ func TestReactor_Receive_SnapshotsRequest(t *testing.T) { } }) - r.Receive(SnapshotChannel, peer, mustEncodeMsg(&ssproto.SnapshotsRequest{})) + r.ReceiveEnvelope(p2p.Envelope{ + ChannelID: SnapshotChannel, + Src: peer, + Message: &ssproto.SnapshotsRequest{}, + }) time.Sleep(100 * time.Millisecond) assert.Equal(t, tc.expectResponses, responses) @@ -158,3 +183,21 @@ func TestReactor_Receive_SnapshotsRequest(t *testing.T) { }) } } + +func TestLegacyReactorReceiveBasic(t *testing.T) { + cfg := config.DefaultStateSyncConfig() + conn := &proxymocks.AppConnSnapshot{} + reactor := NewReactor(*cfg, conn, nil, "") + peer := p2p.CreateRandomPeer(false) + + reactor.InitPeer(peer) + reactor.AddPeer(peer) + m := &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1} + wm := m.Wrap() + msg, err := proto.Marshal(wm) + assert.NoError(t, err) + + assert.NotPanics(t, func() { + reactor.Receive(ChunkChannel, peer, msg) + }) +} diff --git a/statesync/snapshots.go b/statesync/snapshots.go index e4936c91a2..917c049475 100644 --- a/statesync/snapshots.go +++ b/statesync/snapshots.go @@ -132,7 +132,7 @@ func (p *snapshotPool) GetPeer(snapshot *snapshot) p2p.Peer { if len(peers) == 0 { return nil } - return peers[rand.Intn(len(peers))] // nolint:gosec // G404: Use of weak random number generator + return peers[rand.Intn(len(peers))] //nolint:gosec // G404: Use of weak random number generator } // GetPeers returns the peers for a snapshot. diff --git a/statesync/stateprovider.go b/statesync/stateprovider.go index ad60364681..2ed7b93a20 100644 --- a/statesync/stateprovider.go +++ b/statesync/stateprovider.go @@ -22,7 +22,7 @@ import ( "github.com/tendermint/tendermint/version" ) -//go:generate mockery --case underscore --name StateProvider +//go:generate ../scripts/mockery_generate.sh StateProvider // StateProvider is a provider of trusted state data for bootstrapping a node. This refers // to the state.State object, not the state machine. diff --git a/statesync/syncer.go b/statesync/syncer.go index e4c02988bc..54c4aa26ac 100644 --- a/statesync/syncer.go +++ b/statesync/syncer.go @@ -126,7 +126,11 @@ func (s *syncer) AddSnapshot(peer p2p.Peer, snapshot *snapshot) (bool, error) { // to discover snapshots, later we may want to do retries and stuff. func (s *syncer) AddPeer(peer p2p.Peer) { s.logger.Debug("Requesting snapshots from peer", "peer", peer.ID()) - peer.Send(SnapshotChannel, mustEncodeMsg(&ssproto.SnapshotsRequest{})) + e := p2p.Envelope{ + ChannelID: SnapshotChannel, + Message: &ssproto.SnapshotsRequest{}, + } + p2p.SendEnvelopeShim(peer, e, s.logger) //nolint: staticcheck } // RemovePeer removes a peer from the pool. @@ -467,11 +471,14 @@ func (s *syncer) requestChunk(snapshot *snapshot, chunk uint32) { } s.logger.Debug("Requesting snapshot chunk", "height", snapshot.Height, "format", snapshot.Format, "chunk", chunk, "peer", peer.ID()) - peer.Send(ChunkChannel, mustEncodeMsg(&ssproto.ChunkRequest{ - Height: snapshot.Height, - Format: snapshot.Format, - Index: chunk, - })) + p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: ChunkChannel, + Message: &ssproto.ChunkRequest{ + Height: snapshot.Height, + Format: snapshot.Format, + Index: chunk, + }, + }, s.logger) } // verifyApp verifies the sync, checking the app hash, last block height and app version diff --git a/statesync/syncer_test.go b/statesync/syncer_test.go index 4dabe72889..838508120e 100644 --- a/statesync/syncer_test.go +++ b/statesync/syncer_test.go @@ -98,13 +98,27 @@ func TestSyncer_SyncAny(t *testing.T) { // Adding a couple of peers should trigger snapshot discovery messages peerA := &p2pmocks.Peer{} peerA.On("ID").Return(p2p.ID("a")) - peerA.On("Send", SnapshotChannel, mustEncodeMsg(&ssproto.SnapshotsRequest{})).Return(true) + peerA.On("SendEnvelope", mock.MatchedBy(func(i interface{}) bool { + e, ok := i.(p2p.Envelope) + if !ok { + return false + } + req, ok := e.Message.(*ssproto.SnapshotsRequest) + return ok && e.ChannelID == SnapshotChannel && req != nil + })).Return(true) syncer.AddPeer(peerA) peerA.AssertExpectations(t) peerB := &p2pmocks.Peer{} peerB.On("ID").Return(p2p.ID("b")) - peerB.On("Send", SnapshotChannel, mustEncodeMsg(&ssproto.SnapshotsRequest{})).Return(true) + peerB.On("SendEnvelope", mock.MatchedBy(func(i interface{}) bool { + e, ok := i.(p2p.Envelope) + if !ok { + return false + } + req, ok := e.Message.(*ssproto.SnapshotsRequest) + return ok && e.ChannelID == SnapshotChannel && req != nil + })).Return(true) syncer.AddPeer(peerB) peerB.AssertExpectations(t) @@ -147,9 +161,9 @@ func TestSyncer_SyncAny(t *testing.T) { chunkRequests := make(map[uint32]int) chunkRequestsMtx := tmsync.Mutex{} onChunkRequest := func(args mock.Arguments) { - pb, err := decodeMsg(args[1].([]byte)) - require.NoError(t, err) - msg := pb.(*ssproto.ChunkRequest) + e, ok := args[0].(p2p.Envelope) + require.True(t, ok) + msg := e.Message.(*ssproto.ChunkRequest) require.EqualValues(t, 1, msg.Height) require.EqualValues(t, 1, msg.Format) require.LessOrEqual(t, msg.Index, uint32(len(chunks))) @@ -162,8 +176,14 @@ func TestSyncer_SyncAny(t *testing.T) { chunkRequests[msg.Index]++ chunkRequestsMtx.Unlock() } - peerA.On("Send", ChunkChannel, mock.Anything).Maybe().Run(onChunkRequest).Return(true) - peerB.On("Send", ChunkChannel, mock.Anything).Maybe().Run(onChunkRequest).Return(true) + peerA.On("SendEnvelope", mock.MatchedBy(func(i interface{}) bool { + e, ok := i.(p2p.Envelope) + return ok && e.ChannelID == ChunkChannel + })).Maybe().Run(onChunkRequest).Return(true) + peerB.On("SendEnvelope", mock.MatchedBy(func(i interface{}) bool { + e, ok := i.(p2p.Envelope) + return ok && e.ChannelID == ChunkChannel + })).Maybe().Run(onChunkRequest).Return(true) // The first time we're applying chunk 2 we tell it to retry the snapshot and discard chunk 1, // which should cause it to keep the existing chunk 0 and 2, and restart restoration from diff --git a/store/store.go b/store/store.go index 6f27d27d1e..48fd1c97e4 100644 --- a/store/store.go +++ b/store/store.go @@ -17,9 +17,9 @@ import ( BlockStore is a simple low level store for blocks. There are three types of information stored: - - BlockMeta: Meta information about each block - - Block part: Parts of each block, aggregated w/ PartSet - - Commit: The commit part of each block, for gossiping precommit votes + - BlockMeta: Meta information about each block + - Block part: Parts of each block, aggregated w/ PartSet + - Commit: The commit part of each block, for gossiping precommit votes Currently the precommit signatures are duplicated in the Block parts as well as the Commit. In the future this may change, perhaps by moving @@ -325,9 +325,10 @@ func (bs *BlockStore) PruneBlocks(height int64) (uint64, error) { // SaveBlock persists the given block, blockParts, and seenCommit to the underlying db. // blockParts: Must be parts of the block // seenCommit: The +2/3 precommits that were seen which committed at height. -// If all the nodes restart after committing a block, -// we need this to reload the precommits to catch-up nodes to the -// most recent height. Otherwise they'd stall at H-1. +// +// If all the nodes restart after committing a block, +// we need this to reload the precommits to catch-up nodes to the +// most recent height. Otherwise they'd stall at H-1. func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { if block == nil { panic("BlockStore can only save a non-nil block") diff --git a/store/store_test.go b/store/store_test.go index 912bf0361b..7125821bf3 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -67,7 +67,9 @@ func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFu // stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB()) blockDB := dbm.NewMemDB() stateDB := dbm.NewMemDB() - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) if err != nil { panic(fmt.Errorf("error constructing state from genesis file: %w", err)) @@ -376,7 +378,9 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { func TestLoadBaseMeta(t *testing.T) { config := cfg.ResetTestRoot("blockchain_reactor_test") defer os.RemoveAll(config.RootDir) - stateStore := sm.NewStore(dbm.NewMemDB()) + stateStore := sm.NewStore(dbm.NewMemDB(), sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) require.NoError(t, err) bs := NewBlockStore(dbm.NewMemDB()) @@ -432,7 +436,9 @@ func TestLoadBlockPart(t *testing.T) { func TestPruneBlocks(t *testing.T) { config := cfg.ResetTestRoot("blockchain_reactor_test") defer os.RemoveAll(config.RootDir) - stateStore := sm.NewStore(dbm.NewMemDB()) + stateStore := sm.NewStore(dbm.NewMemDB(), sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) require.NoError(t, err) db := dbm.NewMemDB() diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile index 6d472db4cc..d539354836 100644 --- a/test/docker/Dockerfile +++ b/test/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17 +FROM golang:1.18 # Grab deps (jq, hexdump, xxd, killall) RUN apt-get update && \ diff --git a/test/e2e/app/snapshots.go b/test/e2e/app/snapshots.go index 4ef20375ff..c460fde271 100644 --- a/test/e2e/app/snapshots.go +++ b/test/e2e/app/snapshots.go @@ -1,11 +1,9 @@ -// nolint: gosec package app import ( "encoding/json" "errors" "fmt" - "io/ioutil" "math" "os" "path/filepath" @@ -30,7 +28,7 @@ type SnapshotStore struct { // NewSnapshotStore creates a new snapshot store. func NewSnapshotStore(dir string) (*SnapshotStore, error) { store := &SnapshotStore{dir: dir} - if err := os.MkdirAll(dir, 0755); err != nil { + if err := os.MkdirAll(dir, 0o755); err != nil { return nil, err } if err := store.loadMetadata(); err != nil { @@ -45,7 +43,7 @@ func (s *SnapshotStore) loadMetadata() error { file := filepath.Join(s.dir, "metadata.json") metadata := []abci.Snapshot{} - bz, err := ioutil.ReadFile(file) + bz, err := os.ReadFile(file) switch { case errors.Is(err, os.ErrNotExist): case err != nil: @@ -72,7 +70,7 @@ func (s *SnapshotStore) saveMetadata() error { // save the file to a new file and move it to make saving atomic. newFile := filepath.Join(s.dir, "metadata.json.new") file := filepath.Join(s.dir, "metadata.json") - err = ioutil.WriteFile(newFile, bz, 0644) // nolint: gosec + err = os.WriteFile(newFile, bz, 0o644) //nolint: gosec if err != nil { return err } @@ -93,7 +91,8 @@ func (s *SnapshotStore) Create(state *State) (abci.Snapshot, error) { Hash: hashItems(state.Values), Chunks: byteChunks(bz), } - err = ioutil.WriteFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", state.Height)), bz, 0644) + //nolint:gosec // G306: Expect WriteFile permissions to be 0600 or less + err = os.WriteFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", state.Height)), bz, 0o644) if err != nil { return abci.Snapshot{}, err } @@ -122,7 +121,7 @@ func (s *SnapshotStore) LoadChunk(height uint64, format uint32, chunk uint32) ([ defer s.RUnlock() for _, snapshot := range s.metadata { if snapshot.Height == height && snapshot.Format == format { - bz, err := ioutil.ReadFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", height))) + bz, err := os.ReadFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", height))) if err != nil { return nil, err } diff --git a/test/e2e/app/state.go b/test/e2e/app/state.go index 1ede6fb4c3..c5ba2ba522 100644 --- a/test/e2e/app/state.go +++ b/test/e2e/app/state.go @@ -1,4 +1,3 @@ -//nolint: gosec package app import ( @@ -6,15 +5,16 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "os" "path/filepath" "sort" "sync" ) -const stateFileName = "app_state.json" -const prevStateFileName = "prev_app_state.json" +const ( + stateFileName = "app_state.json" + prevStateFileName = "prev_app_state.json" +) // State is the application state. type State struct { @@ -52,11 +52,11 @@ func NewState(dir string, persistInterval uint64) (*State, error) { // load loads state from disk. It does not take out a lock, since it is called // during construction. func (s *State) load() error { - bz, err := ioutil.ReadFile(s.currentFile) + bz, err := os.ReadFile(s.currentFile) if err != nil { // if the current state doesn't exist then we try recover from the previous state if errors.Is(err, os.ErrNotExist) { - bz, err = ioutil.ReadFile(s.previousFile) + bz, err = os.ReadFile(s.previousFile) if err != nil { return fmt.Errorf("failed to read both current and previous state (%q): %w", s.previousFile, err) @@ -82,7 +82,8 @@ func (s *State) save() error { // We write the state to a separate file and move it to the destination, to // make it atomic. newFile := fmt.Sprintf("%v.new", s.currentFile) - err = ioutil.WriteFile(newFile, bz, 0644) + //nolint:gosec // G306: Expect WriteFile permissions to be 0600 or less + err = os.WriteFile(newFile, bz, 0o644) if err != nil { return fmt.Errorf("failed to write state to %q: %w", s.currentFile, err) } @@ -160,7 +161,7 @@ func (s *State) Commit() (uint64, []byte, error) { } func (s *State) Rollback() error { - bz, err := ioutil.ReadFile(s.previousFile) + bz, err := os.ReadFile(s.previousFile) if err != nil { return fmt.Errorf("failed to read state from %q: %w", s.previousFile, err) } diff --git a/test/e2e/docker/Dockerfile b/test/e2e/docker/Dockerfile index 64a8e8de95..a25b6e8b60 100644 --- a/test/e2e/docker/Dockerfile +++ b/test/e2e/docker/Dockerfile @@ -1,7 +1,7 @@ # We need to build in a Linux environment to support C libraries, e.g. RocksDB. # We use Debian instead of Alpine, so that we can use binary database packages # instead of spending time compiling them. -FROM golang:1.17 +FROM golang:1.18 RUN apt-get -qq update -y && apt-get -qq upgrade -y >/dev/null RUN apt-get -qq install -y libleveldb-dev librocksdb-dev >/dev/null diff --git a/test/e2e/generator/main.go b/test/e2e/generator/main.go index f17b4f3f4f..57c8f4a4c5 100644 --- a/test/e2e/generator/main.go +++ b/test/e2e/generator/main.go @@ -1,4 +1,3 @@ -//nolint: gosec package main import ( @@ -58,11 +57,12 @@ func NewCLI() *CLI { // generate generates manifests in a directory. func (cli *CLI) generate(dir string, groups int) error { - err := os.MkdirAll(dir, 0755) + err := os.MkdirAll(dir, 0o755) if err != nil { return err } + //nolint:gosec // G404: Use of weak random number generator (math/rand instead of crypto/rand) manifests, err := Generate(rand.New(rand.NewSource(randomSeed))) if err != nil { return err diff --git a/test/e2e/node/config.go b/test/e2e/node/config.go index 7efb4e822c..6941d0a1c9 100644 --- a/test/e2e/node/config.go +++ b/test/e2e/node/config.go @@ -1,4 +1,3 @@ -//nolint: goconst package main import ( @@ -56,6 +55,8 @@ func LoadConfig(file string) (*Config, error) { // Validate validates the configuration. We don't do exhaustive config // validation here, instead relying on Testnet.Validate() to handle it. +// +//nolint:goconst func (cfg Config) Validate() error { switch { case cfg.ChainID == "": diff --git a/test/e2e/pkg/infra/docker/docker.go b/test/e2e/pkg/infra/docker/docker.go new file mode 100644 index 0000000000..19db876091 --- /dev/null +++ b/test/e2e/pkg/infra/docker/docker.go @@ -0,0 +1,103 @@ +package docker + +import ( + "bytes" + "os" + "path/filepath" + "strconv" + "text/template" + + e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/test/e2e/pkg/infra" +) + +var _ infra.Provider = &Provider{} + +// Provider implements a docker-compose backed infrastructure provider. +type Provider struct { + Testnet *e2e.Testnet +} + +// Setup generates the docker-compose file and write it to disk, erroring if +// any of these operations fail. +func (p *Provider) Setup() error { + compose, err := dockerComposeBytes(p.Testnet) + if err != nil { + return err + } + //nolint: gosec + // G306: Expect WriteFile permissions to be 0600 or less + err = os.WriteFile(filepath.Join(p.Testnet.Dir, "docker-compose.yml"), compose, 0644) + if err != nil { + return err + } + return nil +} + +// dockerComposeBytes generates a Docker Compose config file for a testnet and returns the +// file as bytes to be written out to disk. +func dockerComposeBytes(testnet *e2e.Testnet) ([]byte, error) { + // Must use version 2 Docker Compose format, to support IPv6. + tmpl, err := template.New("docker-compose").Funcs(template.FuncMap{ + "misbehaviorsToString": func(misbehaviors map[int64]string) string { + str := "" + for height, misbehavior := range misbehaviors { + // after the first behavior set, a comma must be prepended + if str != "" { + str += "," + } + heightString := strconv.Itoa(int(height)) + str += misbehavior + "," + heightString + } + return str + }, + }).Parse(`version: '2.4' + +networks: + {{ .Name }}: + labels: + e2e: true + driver: bridge +{{- if .IPv6 }} + enable_ipv6: true +{{- end }} + ipam: + driver: default + config: + - subnet: {{ .IP }} + +services: +{{- range .Nodes }} + {{ .Name }}: + labels: + e2e: true + container_name: {{ .Name }} + image: tendermint/e2e-node +{{- if eq .ABCIProtocol "builtin" }} + entrypoint: /usr/bin/entrypoint-builtin +{{- else if .Misbehaviors }} + entrypoint: /usr/bin/entrypoint-maverick + command: ["node", "--misbehaviors", "{{ misbehaviorsToString .Misbehaviors }}"] +{{- end }} + init: true + ports: + - 26656 + - {{ if .ProxyPort }}{{ .ProxyPort }}:{{ end }}26657 + - 6060 + volumes: + - ./{{ .Name }}:/tendermint + networks: + {{ $.Name }}: + ipv{{ if $.IPv6 }}6{{ else }}4{{ end}}_address: {{ .IP }} + +{{end}}`) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = tmpl.Execute(&buf, testnet) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/test/e2e/pkg/infra/provider.go b/test/e2e/pkg/infra/provider.go new file mode 100644 index 0000000000..03b821de38 --- /dev/null +++ b/test/e2e/pkg/infra/provider.go @@ -0,0 +1,20 @@ +package infra + +// Provider defines an API for manipulating the infrastructure of a +// specific set of testnet infrastructure. +type Provider interface { + + // Setup generates any necessary configuration for the infrastructure + // provider during testnet setup. + Setup() error +} + +// NoopProvider implements the provider interface by performing noops for every +// interface method. This may be useful if the infrastructure is managed by a +// separate process. +type NoopProvider struct { +} + +func (NoopProvider) Setup() error { return nil } + +var _ Provider = NoopProvider{} diff --git a/test/e2e/pkg/infrastructure.go b/test/e2e/pkg/infrastructure.go new file mode 100644 index 0000000000..2fc0e4bac6 --- /dev/null +++ b/test/e2e/pkg/infrastructure.go @@ -0,0 +1,80 @@ +package e2e + +import ( + "encoding/json" + "fmt" + "net" + "os" +) + +const ( + dockerIPv4CIDR = "10.186.73.0/24" + dockerIPv6CIDR = "fd80:b10c::/48" + + globalIPv4CIDR = "0.0.0.0/0" +) + +// InfrastructureData contains the relevant information for a set of existing +// infrastructure that is to be used for running a testnet. +type InfrastructureData struct { + + // Provider is the name of infrastructure provider backing the testnet. + // For example, 'docker' if it is running locally in a docker network or + // 'digital-ocean', 'aws', 'google', etc. if it is from a cloud provider. + Provider string `json:"provider"` + + // Instances is a map of all of the machine instances on which to run + // processes for a testnet. + // The key of the map is the name of the instance, which each must correspond + // to the names of one of the testnet nodes defined in the testnet manifest. + Instances map[string]InstanceData `json:"instances"` + + // Network is the CIDR notation range of IP addresses that all of the instances' + // IP addresses are expected to be within. + Network string `json:"network"` +} + +// InstanceData contains the relevant information for a machine instance backing +// one of the nodes in the testnet. +type InstanceData struct { + IPAddress net.IP `json:"ip_address"` +} + +func NewDockerInfrastructureData(m Manifest) (InfrastructureData, error) { + netAddress := dockerIPv4CIDR + if m.IPv6 { + netAddress = dockerIPv6CIDR + } + _, ipNet, err := net.ParseCIDR(netAddress) + if err != nil { + return InfrastructureData{}, fmt.Errorf("invalid IP network address %q: %w", netAddress, err) + } + ipGen := newIPGenerator(ipNet) + ifd := InfrastructureData{ + Provider: "docker", + Instances: make(map[string]InstanceData), + Network: netAddress, + } + for name := range m.Nodes { + ifd.Instances[name] = InstanceData{ + IPAddress: ipGen.Next(), + } + } + return ifd, nil +} + +func InfrastructureDataFromFile(p string) (InfrastructureData, error) { + ifd := InfrastructureData{} + b, err := os.ReadFile(p) + if err != nil { + return InfrastructureData{}, err + } + err = json.Unmarshal(b, &ifd) + if err != nil { + return InfrastructureData{}, err + } + if ifd.Network == "" { + ifd.Network = globalIPv4CIDR + } + return ifd, nil +} diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index 4d46de9224..0729bafca9 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -1,4 +1,3 @@ -//nolint: gosec package e2e import ( @@ -22,13 +21,13 @@ import ( const ( randomSeed int64 = 2308084734268 proxyPortFirst uint32 = 5701 - networkIPv4 = "10.186.73.0/24" - networkIPv6 = "fd80:b10c::/48" ) -type Mode string -type Protocol string -type Perturbation string +type ( + Mode string + Protocol string + Perturbation string +) const ( ModeValidator Mode = "validator" @@ -93,32 +92,20 @@ type Node struct { // The testnet generation must be deterministic, since it is generated // separately by the runner and the test cases. For this reason, testnets use a // random seed to generate e.g. keys. -func LoadTestnet(file string) (*Testnet, error) { - manifest, err := LoadManifest(file) - if err != nil { - return nil, err - } - dir := strings.TrimSuffix(file, filepath.Ext(file)) - - // Set up resource generators. These must be deterministic. - netAddress := networkIPv4 - if manifest.IPv6 { - netAddress = networkIPv6 - } - _, ipNet, err := net.ParseCIDR(netAddress) - if err != nil { - return nil, fmt.Errorf("invalid IP network address %q: %w", netAddress, err) - } - - ipGen := newIPGenerator(ipNet) +func LoadTestnet(manifest Manifest, fname string, ifd InfrastructureData) (*Testnet, error) { + dir := strings.TrimSuffix(fname, filepath.Ext(fname)) keyGen := newKeyGenerator(randomSeed) proxyPortGen := newPortGenerator(proxyPortFirst) + _, ipNet, err := net.ParseCIDR(ifd.Network) + if err != nil { + return nil, fmt.Errorf("invalid IP network address %q: %w", ifd.Network, err) + } testnet := &Testnet{ Name: filepath.Base(dir), - File: file, + File: fname, Dir: dir, - IP: ipGen.Network(), + IP: ipNet, InitialHeight: 1, InitialState: manifest.InitialState, Validators: map[*Node]int64{}, @@ -145,12 +132,16 @@ func LoadTestnet(file string) (*Testnet, error) { for _, name := range nodeNames { nodeManifest := manifest.Nodes[name] + ind, ok := ifd.Instances[name] + if !ok { + return nil, fmt.Errorf("information for node '%s' missing from infrastucture data", name) + } node := &Node{ Name: name, Testnet: testnet, PrivvalKey: keyGen.Generate(manifest.KeyType), NodeKey: keyGen.Generate("ed25519"), - IP: ipGen.Next(), + IP: ind.IPAddress, ProxyPort: proxyPortGen.Next(), Mode: ModeValidator, Database: "goleveldb", @@ -415,6 +406,7 @@ func (t Testnet) ArchiveNodes() []*Node { // RandomNode returns a random non-seed node. func (t Testnet) RandomNode() *Node { for { + //nolint:gosec // G404: Use of weak random number generator (math/rand instead of crypto/rand) node := t.Nodes[rand.Intn(len(t.Nodes))] if node.Mode != ModeSeed { return node @@ -491,7 +483,7 @@ type keyGenerator struct { func newKeyGenerator(seed int64) *keyGenerator { return &keyGenerator{ - random: rand.New(rand.NewSource(seed)), + random: rand.New(rand.NewSource(seed)), //nolint:gosec } } diff --git a/test/e2e/runner/exec.go b/test/e2e/runner/exec.go index f790f7fc15..0fd997f478 100644 --- a/test/e2e/runner/exec.go +++ b/test/e2e/runner/exec.go @@ -1,4 +1,3 @@ -//nolint: gosec package main import ( @@ -10,6 +9,7 @@ import ( // execute executes a shell command. func exec(args ...string) error { + //nolint:gosec // G204: Subprocess launched with a potential tainted input or cmd arguments cmd := osexec.Command(args[0], args[1:]...) out, err := cmd.CombinedOutput() switch err := err.(type) { @@ -24,6 +24,7 @@ func exec(args ...string) error { // execVerbose executes a shell command while displaying its output. func execVerbose(args ...string) error { + //nolint:gosec // G204: Subprocess launched with a potential tainted input or cmd arguments cmd := osexec.Command(args[0], args[1:]...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr diff --git a/test/e2e/runner/main.go b/test/e2e/runner/main.go index 51b68a0226..20c6a2a6c7 100644 --- a/test/e2e/runner/main.go +++ b/test/e2e/runner/main.go @@ -2,6 +2,7 @@ package main import ( "context" + "errors" "fmt" "os" "strconv" @@ -10,6 +11,8 @@ import ( "github.com/tendermint/tendermint/libs/log" e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/test/e2e/pkg/infra" + "github.com/tendermint/tendermint/test/e2e/pkg/infra/docker" ) var ( @@ -25,6 +28,7 @@ type CLI struct { root *cobra.Command testnet *e2e.Testnet preserve bool + infp infra.Provider } // NewCLI sets up the CLI. @@ -40,19 +44,57 @@ func NewCLI() *CLI { if err != nil { return err } - testnet, err := e2e.LoadTestnet(file) + m, err := e2e.LoadManifest(file) if err != nil { return err } + inft, err := cmd.Flags().GetString("infrastructure-type") + if err != nil { + return err + } + + var ifd e2e.InfrastructureData + switch inft { + case "docker": + var err error + ifd, err = e2e.NewDockerInfrastructureData(m) + if err != nil { + return err + } + case "digital-ocean": + p, err := cmd.Flags().GetString("infrastructure-data") + if err != nil { + return err + } + if p == "" { + return errors.New("'--infrastructure-data' must be set when using the 'digital-ocean' infrastructure-type") + } + ifd, err = e2e.InfrastructureDataFromFile(p) + if err != nil { + return fmt.Errorf("parsing infrastructure data: %s", err) + } + default: + return fmt.Errorf("unknown infrastructure type '%s'", inft) + } + + testnet, err := e2e.LoadTestnet(m, file, ifd) + if err != nil { + return fmt.Errorf("loading testnet: %s", err) + } + cli.testnet = testnet + cli.infp = &infra.NoopProvider{} + if inft == "docker" { + cli.infp = &docker.Provider{Testnet: testnet} + } return nil }, RunE: func(cmd *cobra.Command, args []string) error { if err := Cleanup(cli.testnet); err != nil { return err } - if err := Setup(cli.testnet); err != nil { + if err := Setup(cli.testnet, cli.infp); err != nil { return err } @@ -114,6 +156,10 @@ func NewCLI() *CLI { cli.root.PersistentFlags().StringP("file", "f", "", "Testnet TOML manifest") _ = cli.root.MarkPersistentFlagRequired("file") + cli.root.PersistentFlags().StringP("infrastructure-type", "", "docker", "Backing infrastructure used to run the testnet. Either 'digital-ocean' or 'docker'") + + cli.root.PersistentFlags().StringP("infrastructure-data", "", "", "path to the json file containing the infrastructure data. Only used if the 'infrastructure-type' is set to a value other than 'docker'") + cli.root.Flags().BoolVarP(&cli.preserve, "preserve", "p", false, "Preserves the running of the test net after tests are completed") @@ -121,7 +167,7 @@ func NewCLI() *CLI { Use: "setup", Short: "Generates the testnet directory and configuration", RunE: func(cmd *cobra.Command, args []string) error { - return Setup(cli.testnet) + return Setup(cli.testnet, cli.infp) }, }) @@ -131,7 +177,7 @@ func NewCLI() *CLI { RunE: func(cmd *cobra.Command, args []string) error { _, err := os.Stat(cli.testnet.Dir) if os.IsNotExist(err) { - err = Setup(cli.testnet) + err = Setup(cli.testnet, cli.infp) } if err != nil { return err @@ -231,7 +277,7 @@ Does not run any perbutations. if err := Cleanup(cli.testnet); err != nil { return err } - if err := Setup(cli.testnet); err != nil { + if err := Setup(cli.testnet, cli.infp); err != nil { return err } diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 919e4542fc..5493324f6b 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -1,4 +1,3 @@ -// nolint: gosec package main import ( @@ -7,14 +6,12 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "os" "path/filepath" "regexp" "sort" "strconv" "strings" - "text/template" "time" "github.com/BurntSushi/toml" @@ -25,6 +22,7 @@ import ( "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/privval" e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/test/e2e/pkg/infra" "github.com/tendermint/tendermint/types" ) @@ -41,7 +39,7 @@ const ( ) // Setup sets up the testnet configuration. -func Setup(testnet *e2e.Testnet) error { +func Setup(testnet *e2e.Testnet, infp infra.Provider) error { logger.Info("setup", "msg", log.NewLazySprintf("Generating testnet files in %q", testnet.Dir)) err := os.MkdirAll(testnet.Dir, os.ModePerm) @@ -49,11 +47,7 @@ func Setup(testnet *e2e.Testnet) error { return err } - compose, err := MakeDockerCompose(testnet) - if err != nil { - return err - } - err = ioutil.WriteFile(filepath.Join(testnet.Dir, "docker-compose.yml"), compose, 0644) + err = infp.Setup() if err != nil { return err } @@ -76,7 +70,7 @@ func Setup(testnet *e2e.Testnet) error { if node.Mode == e2e.ModeLight && strings.Contains(dir, "app") { continue } - err := os.MkdirAll(dir, 0755) + err := os.MkdirAll(dir, 0o755) if err != nil { return err } @@ -92,7 +86,8 @@ func Setup(testnet *e2e.Testnet) error { if err != nil { return err } - err = ioutil.WriteFile(filepath.Join(nodeDir, "config", "app.toml"), appCfg, 0644) + //nolint:gosec // G306: Expect WriteFile permissions to be 0600 or less + err = os.WriteFile(filepath.Join(nodeDir, "config", "app.toml"), appCfg, 0o644) if err != nil { return err } @@ -128,73 +123,6 @@ func Setup(testnet *e2e.Testnet) error { return nil } -// MakeDockerCompose generates a Docker Compose config for a testnet. -func MakeDockerCompose(testnet *e2e.Testnet) ([]byte, error) { - // Must use version 2 Docker Compose format, to support IPv6. - tmpl, err := template.New("docker-compose").Funcs(template.FuncMap{ - "misbehaviorsToString": func(misbehaviors map[int64]string) string { - str := "" - for height, misbehavior := range misbehaviors { - // after the first behavior set, a comma must be prepended - if str != "" { - str += "," - } - heightString := strconv.Itoa(int(height)) - str += misbehavior + "," + heightString - } - return str - }, - }).Parse(`version: '2.4' - -networks: - {{ .Name }}: - labels: - e2e: true - driver: bridge -{{- if .IPv6 }} - enable_ipv6: true -{{- end }} - ipam: - driver: default - config: - - subnet: {{ .IP }} - -services: -{{- range .Nodes }} - {{ .Name }}: - labels: - e2e: true - container_name: {{ .Name }} - image: tendermint/e2e-node -{{- if eq .ABCIProtocol "builtin" }} - entrypoint: /usr/bin/entrypoint-builtin -{{- else if .Misbehaviors }} - entrypoint: /usr/bin/entrypoint-maverick - command: ["node", "--misbehaviors", "{{ misbehaviorsToString .Misbehaviors }}"] -{{- end }} - init: true - ports: - - 26656 - - {{ if .ProxyPort }}{{ .ProxyPort }}:{{ end }}26657 - - 6060 - volumes: - - ./{{ .Name }}:/tendermint - networks: - {{ $.Name }}: - ipv{{ if $.IPv6 }}6{{ else }}4{{ end}}_address: {{ .IP }} - -{{end}}`) - if err != nil { - return nil, err - } - var buf bytes.Buffer - err = tmpl.Execute(&buf, testnet) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - // MakeGenesis generates a genesis document. func MakeGenesis(testnet *e2e.Testnet) (types.GenesisDoc, error) { genesis := types.GenesisDoc{ @@ -401,11 +329,12 @@ func UpdateConfigStateSync(node *e2e.Node, height int64, hash []byte) error { // FIXME Apparently there's no function to simply load a config file without // involving the entire Viper apparatus, so we'll just resort to regexps. - bz, err := ioutil.ReadFile(cfgPath) + bz, err := os.ReadFile(cfgPath) if err != nil { return err } bz = regexp.MustCompile(`(?m)^trust_height =.*`).ReplaceAll(bz, []byte(fmt.Sprintf(`trust_height = %v`, height))) bz = regexp.MustCompile(`(?m)^trust_hash =.*`).ReplaceAll(bz, []byte(fmt.Sprintf(`trust_hash = "%X"`, hash))) - return ioutil.WriteFile(cfgPath, bz, 0644) + //nolint:gosec // G306: Expect WriteFile permissions to be 0600 or less + return os.WriteFile(cfgPath, bz, 0o644) } diff --git a/test/e2e/tests/e2e_test.go b/test/e2e/tests/e2e_test.go index 763b99ea38..5df3309d41 100644 --- a/test/e2e/tests/e2e_test.go +++ b/test/e2e/tests/e2e_test.go @@ -66,23 +66,27 @@ func testNode(t *testing.T, testFunc func(*testing.T, e2e.Node)) { func loadTestnet(t *testing.T) e2e.Testnet { t.Helper() - manifest := os.Getenv("E2E_MANIFEST") - if manifest == "" { + manifestFile := os.Getenv("E2E_MANIFEST") + if manifestFile == "" { t.Skip("E2E_MANIFEST not set, not an end-to-end test run") } - if !filepath.IsAbs(manifest) { - manifest = filepath.Join("..", manifest) + if !filepath.IsAbs(manifestFile) { + manifestFile = filepath.Join("..", manifestFile) } testnetCacheMtx.Lock() defer testnetCacheMtx.Unlock() - if testnet, ok := testnetCache[manifest]; ok { + if testnet, ok := testnetCache[manifestFile]; ok { return testnet } + m, err := e2e.LoadManifest(manifestFile) + require.NoError(t, err) + ifd, err := e2e.NewDockerInfrastructureData(m) + require.NoError(t, err) - testnet, err := e2e.LoadTestnet(manifest) + testnet, err := e2e.LoadTestnet(m, manifestFile, ifd) require.NoError(t, err) - testnetCache[manifest] = *testnet + testnetCache[manifestFile] = *testnet return *testnet } diff --git a/test/fuzz/mempool/v0/fuzz_test.go b/test/fuzz/mempool/v0/fuzz_test.go index 4f8f1e9c8e..d371ee3ff0 100644 --- a/test/fuzz/mempool/v0/fuzz_test.go +++ b/test/fuzz/mempool/v0/fuzz_test.go @@ -1,12 +1,13 @@ package v0_test import ( - "io/ioutil" + "io" "os" "path/filepath" "testing" "github.com/stretchr/testify/require" + mempoolv0 "github.com/tendermint/tendermint/test/fuzz/mempool/v0" ) @@ -25,7 +26,7 @@ func TestMempoolTestdataCases(t *testing.T) { }() f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) require.NoError(t, err) - input, err := ioutil.ReadAll(f) + input, err := io.ReadAll(f) require.NoError(t, err) mempoolv0.Fuzz(input) }) diff --git a/test/fuzz/mempool/v1/fuzz_test.go b/test/fuzz/mempool/v1/fuzz_test.go index 863697a0af..000f6df3cb 100644 --- a/test/fuzz/mempool/v1/fuzz_test.go +++ b/test/fuzz/mempool/v1/fuzz_test.go @@ -1,12 +1,13 @@ package v1_test import ( - "io/ioutil" + "io" "os" "path/filepath" "testing" "github.com/stretchr/testify/require" + mempoolv1 "github.com/tendermint/tendermint/test/fuzz/mempool/v1" ) @@ -25,7 +26,7 @@ func TestMempoolTestdataCases(t *testing.T) { }() f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) require.NoError(t, err) - input, err := ioutil.ReadAll(f) + input, err := io.ReadAll(f) require.NoError(t, err) mempoolv1.Fuzz(input) }) diff --git a/test/fuzz/p2p/addrbook/fuzz.go b/test/fuzz/p2p/addrbook/fuzz.go index f2799ef04d..a7751c2246 100644 --- a/test/fuzz/p2p/addrbook/fuzz.go +++ b/test/fuzz/p2p/addrbook/fuzz.go @@ -1,4 +1,3 @@ -// nolint: gosec package addr import ( @@ -25,6 +24,7 @@ func Fuzz(data []byte) int { } // Also, make sure PickAddress always returns a non-nil address. + //nolint:gosec // G404: Use of weak random number generator (math/rand instead of crypto/rand) bias := rand.Intn(100) if p := addrBook.PickAddress(bias); p == nil { panic(fmt.Sprintf("picked a nil address (bias: %d, addrBook size: %v)", diff --git a/test/fuzz/p2p/addrbook/init-corpus/main.go b/test/fuzz/p2p/addrbook/init-corpus/main.go index d5cc3a9a91..b0623b298f 100644 --- a/test/fuzz/p2p/addrbook/init-corpus/main.go +++ b/test/fuzz/p2p/addrbook/init-corpus/main.go @@ -1,11 +1,9 @@ -// nolint: gosec package main import ( "encoding/json" "flag" "fmt" - "io/ioutil" "log" "net" "os" @@ -27,7 +25,7 @@ func initCorpus(baseDir string) { // create "corpus" directory corpusDir := filepath.Join(baseDir, "corpus") - if err := os.MkdirAll(corpusDir, 0755); err != nil { + if err := os.MkdirAll(corpusDir, 0o755); err != nil { log.Fatalf("Creating %q err: %v", corpusDir, err) } @@ -49,7 +47,8 @@ func initCorpus(baseDir string) { log.Fatalf("can't marshal %v: %v", addr, err) } - if err := ioutil.WriteFile(filename, bz, 0644); err != nil { + //nolint:gosec // G306: Expect WriteFile permissions to be 0600 or less + if err := os.WriteFile(filename, bz, 0o644); err != nil { log.Fatalf("can't write %v to %q: %v", addr, filename, err) } diff --git a/test/fuzz/p2p/pex/init-corpus/main.go b/test/fuzz/p2p/pex/init-corpus/main.go index 2fe09c0dce..dfa18363ef 100644 --- a/test/fuzz/p2p/pex/init-corpus/main.go +++ b/test/fuzz/p2p/pex/init-corpus/main.go @@ -1,10 +1,8 @@ -// nolint: gosec package main import ( "flag" "fmt" - "io/ioutil" "log" "math/rand" "os" @@ -22,11 +20,12 @@ func main() { initCorpus(*baseDir) } +//nolint:gosec func initCorpus(rootDir string) { log.SetFlags(0) corpusDir := filepath.Join(rootDir, "corpus") - if err := os.MkdirAll(corpusDir, 0755); err != nil { + if err := os.MkdirAll(corpusDir, 0o755); err != nil { log.Fatalf("Creating %q err: %v", corpusDir, err) } sizes := []int{0, 1, 2, 17, 5, 31} @@ -73,7 +72,7 @@ func initCorpus(rootDir string) { filename := filepath.Join(rootDir, "corpus", fmt.Sprintf("%d", n)) - if err := ioutil.WriteFile(filename, bz, 0644); err != nil { + if err := os.WriteFile(filename, bz, 0o644); err != nil { log.Fatalf("can't write %X to %q: %v", bz, filename, err) } diff --git a/test/fuzz/p2p/pex/reactor_receive.go b/test/fuzz/p2p/pex/reactor_receive.go index 4ac06c8927..be9c6bba0f 100644 --- a/test/fuzz/p2p/pex/reactor_receive.go +++ b/test/fuzz/p2p/pex/reactor_receive.go @@ -74,13 +74,17 @@ func (fp *fuzzPeer) RemoteIP() net.IP { return net.IPv4(0, 0, 0, 0) } func (fp *fuzzPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: fp.RemoteIP(), Port: 98991, Zone: ""} } -func (fp *fuzzPeer) IsOutbound() bool { return false } -func (fp *fuzzPeer) IsPersistent() bool { return false } -func (fp *fuzzPeer) CloseConn() error { return nil } -func (fp *fuzzPeer) NodeInfo() p2p.NodeInfo { return defaultNodeInfo } -func (fp *fuzzPeer) Status() p2p.ConnectionStatus { var cs p2p.ConnectionStatus; return cs } -func (fp *fuzzPeer) SocketAddr() *p2p.NetAddress { return p2p.NewNetAddress(fp.ID(), fp.RemoteAddr()) } -func (fp *fuzzPeer) Send(byte, []byte) bool { return true } -func (fp *fuzzPeer) TrySend(byte, []byte) bool { return true } -func (fp *fuzzPeer) Set(key string, value interface{}) { fp.m[key] = value } -func (fp *fuzzPeer) Get(key string) interface{} { return fp.m[key] } +func (fp *fuzzPeer) IsOutbound() bool { return false } +func (fp *fuzzPeer) IsPersistent() bool { return false } +func (fp *fuzzPeer) CloseConn() error { return nil } +func (fp *fuzzPeer) NodeInfo() p2p.NodeInfo { return defaultNodeInfo } +func (fp *fuzzPeer) Status() p2p.ConnectionStatus { var cs p2p.ConnectionStatus; return cs } +func (fp *fuzzPeer) SocketAddr() *p2p.NetAddress { return p2p.NewNetAddress(fp.ID(), fp.RemoteAddr()) } +func (fp *fuzzPeer) SendEnvelope(e p2p.Envelope) bool { return true } +func (fp *fuzzPeer) TrySendEnvelope(e p2p.Envelope) bool { return true } +func (fp *fuzzPeer) Send(byte, []byte) bool { return true } +func (fp *fuzzPeer) TrySend(byte, []byte) bool { return true } +func (fp *fuzzPeer) Set(key string, value interface{}) { fp.m[key] = value } +func (fp *fuzzPeer) Get(key string) interface{} { return fp.m[key] } +func (fp *fuzzPeer) SetRemovalFailed() {} +func (fp *fuzzPeer) GetRemovalFailed() bool { return false } diff --git a/test/fuzz/p2p/secret_connection/init-corpus/main.go b/test/fuzz/p2p/secret_connection/init-corpus/main.go index 635f2d99f9..f3e397f3a3 100644 --- a/test/fuzz/p2p/secret_connection/init-corpus/main.go +++ b/test/fuzz/p2p/secret_connection/init-corpus/main.go @@ -1,10 +1,8 @@ -// nolint: gosec package main import ( "flag" "fmt" - "io/ioutil" "log" "os" "path/filepath" @@ -21,7 +19,7 @@ func initCorpus(baseDir string) { log.SetFlags(0) corpusDir := filepath.Join(baseDir, "corpus") - if err := os.MkdirAll(corpusDir, 0755); err != nil { + if err := os.MkdirAll(corpusDir, 0o755); err != nil { log.Fatal(err) } @@ -39,7 +37,8 @@ func initCorpus(baseDir string) { for i, datum := range data { filename := filepath.Join(corpusDir, fmt.Sprintf("%d", i)) - if err := ioutil.WriteFile(filename, []byte(datum), 0644); err != nil { + //nolint:gosec // G306: Expect WriteFile permissions to be 0600 or less + if err := os.WriteFile(filename, []byte(datum), 0o644); err != nil { log.Fatalf("can't write %v to %q: %v", datum, filename, err) } diff --git a/test/fuzz/rpc/jsonrpc/server/handler.go b/test/fuzz/rpc/jsonrpc/server/handler.go index 98c75d5113..8189bcccc6 100644 --- a/test/fuzz/rpc/jsonrpc/server/handler.go +++ b/test/fuzz/rpc/jsonrpc/server/handler.go @@ -3,7 +3,7 @@ package handler import ( "bytes" "encoding/json" - "io/ioutil" + "io" "net/http" "net/http/httptest" @@ -29,7 +29,7 @@ func Fuzz(data []byte) int { rec := httptest.NewRecorder() mux.ServeHTTP(rec, req) res := rec.Result() - blob, err := ioutil.ReadAll(res.Body) + blob, err := io.ReadAll(res.Body) if err != nil { panic(err) } diff --git a/test/loadtime/Makefile b/test/loadtime/Makefile new file mode 100644 index 0000000000..bab69e28eb --- /dev/null +++ b/test/loadtime/Makefile @@ -0,0 +1,33 @@ +GOMOD="github.com/tendermint/tendermint/test/loadtime" +OUTPUT?=build/ + +build: + go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o $(OUTPUT)load ./cmd/load/ + go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o $(OUTPUT)report ./cmd/report/ +.PHONY: build + +check-proto-gen-deps: +ifeq (,$(shell which protoc)) + $(error "protoc is required for Protobuf generation. See instructions for your platform on how to install it.") +endif +ifeq (,$(shell which protoc-gen-go)) + $(error "protoc-gen-go is required for Protobuf generation. See instructions for your platform on how to install it.") +endif +.PHONY: check-proto-gen-deps + +check-proto-format-deps: +ifeq (,$(shell which clang-format)) + $(error "clang-format is required for Protobuf formatting. See instructions for your platform on how to install it.") +endif +.PHONY: check-proto-format-deps + +proto-format: check-proto-format-deps + @echo "Formatting Protobuf files" + @find . -name '*.proto' -exec clang-format -i {} \; +.PHONY: proto-format + +proto-gen: check-proto-gen-deps + @echo "Generating Protobuf files" + @find . -name '*.proto' -exec protoc \ + --go_out=paths=source_relative:. {} \; +.PHONY: proto-gen diff --git a/test/loadtime/README.md b/test/loadtime/README.md new file mode 100644 index 0000000000..8043d52973 --- /dev/null +++ b/test/loadtime/README.md @@ -0,0 +1,69 @@ +# loadtime + +This directory contains the `loadtime` tools, a set of tools for generating +transaction load against Tendermint and measuring their resulting latency. +`loadtime` generates transactions that contain the timestamp of when they were +generated as well as additional metadata to track the variables used when +generating the load. + + +## Building the tool set + +The `Makefile` contains a target for building the `loadtime` tools. + +The following command will build the tool and place the resulting binaries in `./build/`. + +```bash +make build +``` + +## `load` + +The `load` binary is built when `make build` is invoked. The `load` tool generates +transactions and broadcasts them to Tendermint. + +`load` leverages the [tm-load-test](https://github.com/informalsystems/tm-load-test) +framework. As a result, all flags and options specified on the `tm-load-test` apply to +`load`. + +Below is a basic invocation for generating load against a Tendermint websocket running +on `localhost:25567` + +```bash +./build/load \ + -c 1 -T 10 -r 1000 -s 1024 \ + --broadcast-tx-method sync \ + --endpoints ws://localhost:26657/websocket +``` + +## `report` + +The `report` binary is built when `make build` is invoked. The `report` tool +reads all of the blocks from the specified blockstore database and calculates +transaction latency metrics. `report` reads transactions generated by `load` +and uses the difference between the timestamp contained in the transaction and +the timestamp of the block the transaction was executed in to determine transaction latency. +`report` outputs a set of metrics calculated on the list of latencies, including +minimum, maximum, and average latency as well as the standard deviation. + +Below is a basic invocation of the report tool with a data directory under `/home/test/.tendermint/data/` +where the data was saved in a `goleveldb` database. + + +```bash +./build/report --database-type goleveldb --data-dir ~/.tendermint/data +``` + +The `report` tool also supports outputting the raw data as `csv`. This can be +useful if you want to use a more powerful tool to aggregate and analyze the data. + +Below is an invocation of the report tool that outputs the data to a `csv` file +in `out.csv` + +```bash +./build/report --database-type goleveldb --data-dir ~/.tendermint/data --csv out.csv +``` + +The `report` tool outputs the data for each experiment separately, identified +by the UUID generated by the `load` tool at the start of the experiment. It also +outputs the experimental values used for the run. diff --git a/test/loadtime/basic.sh b/test/loadtime/basic.sh new file mode 100755 index 0000000000..b135232b8d --- /dev/null +++ b/test/loadtime/basic.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +set -euo pipefail + +# A basic invocation of the loadtime tool. + +./build/load \ + -c 1 -T 10 -r 1000 -s 1024 \ + --broadcast-tx-method sync \ + --endpoints ws://localhost:26657/websocket + diff --git a/test/loadtime/cmd/load/main.go b/test/loadtime/cmd/load/main.go new file mode 100644 index 0000000000..0230d1dd12 --- /dev/null +++ b/test/loadtime/cmd/load/main.go @@ -0,0 +1,73 @@ +package main + +import ( + "fmt" + + "github.com/google/uuid" + "github.com/informalsystems/tm-load-test/pkg/loadtest" + + "github.com/tendermint/tendermint/test/loadtime/payload" +) + +// Ensure all of the interfaces are correctly satisfied. +var ( + _ loadtest.ClientFactory = (*ClientFactory)(nil) + _ loadtest.Client = (*TxGenerator)(nil) +) + +// ClientFactory implements the loadtest.ClientFactory interface. +type ClientFactory struct { + ID []byte +} + +// TxGenerator is responsible for generating transactions. +// TxGenerator holds the set of information that will be used to generate +// each transaction. +type TxGenerator struct { + id []byte + conns uint64 + rate uint64 + size uint64 +} + +func main() { + u := [16]byte(uuid.New()) // generate run ID on startup + if err := loadtest.RegisterClientFactory("loadtime-client", &ClientFactory{ID: u[:]}); err != nil { + panic(err) + } + loadtest.Run(&loadtest.CLIConfig{ + AppName: "loadtime", + AppShortDesc: "Generate timestamped transaction load.", + AppLongDesc: "loadtime generates transaction load for the purpose of measuring the end-to-end latency of a transaction from submission to execution in a Tendermint network.", //nolint:lll + DefaultClientFactory: "loadtime-client", + }) +} + +func (f *ClientFactory) ValidateConfig(cfg loadtest.Config) error { + psb, err := payload.MaxUnpaddedSize() + if err != nil { + return err + } + if psb > cfg.Size { + return fmt.Errorf("payload size exceeds configured size") + } + return nil +} + +func (f *ClientFactory) NewClient(cfg loadtest.Config) (loadtest.Client, error) { + return &TxGenerator{ + id: f.ID, + conns: uint64(cfg.Connections), + rate: uint64(cfg.Rate), + size: uint64(cfg.Size), + }, nil +} + +func (c *TxGenerator) GenerateTx() ([]byte, error) { + return payload.NewBytes(&payload.Payload{ + Connections: c.conns, + Rate: c.rate, + Size: c.size, + Id: c.id, + }) +} diff --git a/test/loadtime/cmd/report/main.go b/test/loadtime/cmd/report/main.go new file mode 100644 index 0000000000..9fbf6e8413 --- /dev/null +++ b/test/loadtime/cmd/report/main.go @@ -0,0 +1,104 @@ +package main + +import ( + "encoding/csv" + "flag" + "fmt" + "log" + "os" + "strconv" + "strings" + + dbm "github.com/tendermint/tm-db" + + "github.com/tendermint/tendermint/store" + "github.com/tendermint/tendermint/test/loadtime/report" +) + +var ( + db = flag.String("database-type", "goleveldb", "the type of database holding the blockstore") + dir = flag.String("data-dir", "", "path to the directory containing the tendermint databases") + csvOut = flag.String("csv", "", "dump the extracted latencies as raw csv for use in additional tooling") +) + +func main() { + flag.Parse() + if *db == "" { + log.Fatalf("must specify a database-type") + } + if *dir == "" { + log.Fatalf("must specify a data-dir") + } + d := strings.TrimPrefix(*dir, "~/") + if d != *dir { + h, err := os.UserHomeDir() + if err != nil { + panic(err) + } + d = h + "/" + d + } + _, err := os.Stat(d) + if err != nil { + panic(err) + } + dbType := dbm.BackendType(*db) + db, err := dbm.NewDB("blockstore", dbType, d) + if err != nil { + panic(err) + } + s := store.NewBlockStore(db) + defer s.Close() + rs, err := report.GenerateFromBlockStore(s) + if err != nil { + panic(err) + } + if *csvOut != "" { + cf, err := os.Create(*csvOut) + if err != nil { + panic(err) + } + w := csv.NewWriter(cf) + err = w.WriteAll(toCSVRecords(rs.List())) + if err != nil { + panic(err) + } + return + } + for _, r := range rs.List() { + fmt.Printf(""+ + "Experiment ID: %s\n\n"+ + "\tConnections: %d\n"+ + "\tRate: %d\n"+ + "\tSize: %d\n\n"+ + "\tTotal Valid Tx: %d\n"+ + "\tTotal Negative Latencies: %d\n"+ + "\tMinimum Latency: %s\n"+ + "\tMaximum Latency: %s\n"+ + "\tAverage Latency: %s\n"+ + "\tStandard Deviation: %s\n\n", r.ID, r.Connections, r.Rate, r.Size, len(r.All), r.NegativeCount, r.Min, r.Max, r.Avg, r.StdDev) //nolint:lll + + } + fmt.Printf("Total Invalid Tx: %d\n", rs.ErrorCount()) +} + +func toCSVRecords(rs []report.Report) [][]string { + total := 0 + for _, v := range rs { + total += len(v.All) + } + res := make([][]string, total+1) + + res[0] = []string{"experiment_id", "block_time", "duration_ns", "tx_hash", "connections", "rate", "size"} + offset := 1 + for _, r := range rs { + idStr := r.ID.String() + connStr := strconv.FormatInt(int64(r.Connections), 10) + rateStr := strconv.FormatInt(int64(r.Rate), 10) + sizeStr := strconv.FormatInt(int64(r.Size), 10) + for i, v := range r.All { + res[offset+i] = []string{idStr, strconv.FormatInt(v.BlockTime.UnixNano(), 10), strconv.FormatInt(int64(v.Duration), 10), fmt.Sprintf("%X", v.Hash), connStr, rateStr, sizeStr} //nolint: lll + } + offset += len(r.All) + } + return res +} diff --git a/test/loadtime/payload/payload.go b/test/loadtime/payload/payload.go new file mode 100644 index 0000000000..778729f8b9 --- /dev/null +++ b/test/loadtime/payload/payload.go @@ -0,0 +1,101 @@ +package payload + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "fmt" + "math" + + "google.golang.org/protobuf/proto" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const keyPrefix = "a=" +const maxPayloadSize = 4 * 1024 * 1024 + +// NewBytes generates a new payload and returns the encoded representation of +// the payload as a slice of bytes. NewBytes uses the fields on the Options +// to create the payload. +func NewBytes(p *Payload) ([]byte, error) { + p.Padding = make([]byte, 1) + if p.Time == nil { + p.Time = timestamppb.Now() + } + us, err := CalculateUnpaddedSize(p) + if err != nil { + return nil, err + } + if p.Size > maxPayloadSize { + return nil, fmt.Errorf("configured size %d is too large (>%d)", p.Size, maxPayloadSize) + } + pSize := int(p.Size) // #nosec -- The "if" above makes this cast safe + if pSize < us { + return nil, fmt.Errorf("configured size %d not large enough to fit unpadded transaction of size %d", pSize, us) + } + + // We halve the padding size because we transform the TX to hex + p.Padding = make([]byte, (pSize-us)/2) + _, err = rand.Read(p.Padding) + if err != nil { + return nil, err + } + b, err := proto.Marshal(p) + if err != nil { + return nil, err + } + h := []byte(hex.EncodeToString(b)) + + // prepend a single key so that the kv store only ever stores a single + // transaction instead of storing all tx and ballooning in size. + return append([]byte(keyPrefix), h...), nil +} + +// FromBytes extracts a paylod from the byte representation of the payload. +// FromBytes leaves the padding untouched, returning it to the caller to handle +// or discard per their preference. +func FromBytes(b []byte) (*Payload, error) { + trH := bytes.TrimPrefix(b, []byte(keyPrefix)) + if bytes.Equal(b, trH) { + return nil, fmt.Errorf("payload bytes missing key prefix '%s'", keyPrefix) + } + trB, err := hex.DecodeString(string(trH)) + if err != nil { + return nil, err + } + + p := &Payload{} + err = proto.Unmarshal(trB, p) + if err != nil { + return nil, err + } + return p, nil +} + +// MaxUnpaddedSize returns the maximum size that a payload may be if no padding +// is included. +func MaxUnpaddedSize() (int, error) { + p := &Payload{ + Time: timestamppb.Now(), + Connections: math.MaxUint64, + Rate: math.MaxUint64, + Size: math.MaxUint64, + Padding: make([]byte, 1), + } + return CalculateUnpaddedSize(p) +} + +// CalculateUnpaddedSize calculates the size of the passed in payload for the +// purpose of determining how much padding to add to add to reach the target size. +// CalculateUnpaddedSize returns an error if the payload Padding field is longer than 1. +func CalculateUnpaddedSize(p *Payload) (int, error) { + if len(p.Padding) != 1 { + return 0, fmt.Errorf("expected length of padding to be 1, received %d", len(p.Padding)) + } + b, err := proto.Marshal(p) + if err != nil { + return 0, err + } + h := []byte(hex.EncodeToString(b)) + return len(h) + len(keyPrefix), nil +} diff --git a/test/loadtime/payload/payload.pb.go b/test/loadtime/payload/payload.pb.go new file mode 100644 index 0000000000..765c81d3da --- /dev/null +++ b/test/loadtime/payload/payload.pb.go @@ -0,0 +1,202 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.20.1 +// source: payload/payload.proto + +package payload + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Payload is the structure of the loadtime transaction. Proto has a compact +// encoded representation, making it ideal for the loadtime usecase which aims to +// keep the generated transactions small. +type Payload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Connections uint64 `protobuf:"varint,1,opt,name=connections,proto3" json:"connections,omitempty"` + Rate uint64 `protobuf:"varint,2,opt,name=rate,proto3" json:"rate,omitempty"` + Size uint64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` + Time *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=time,proto3" json:"time,omitempty"` + Id []byte `protobuf:"bytes,5,opt,name=id,proto3" json:"id,omitempty"` + Padding []byte `protobuf:"bytes,6,opt,name=padding,proto3" json:"padding,omitempty"` +} + +func (x *Payload) Reset() { + *x = Payload{} + if protoimpl.UnsafeEnabled { + mi := &file_payload_payload_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Payload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Payload) ProtoMessage() {} + +func (x *Payload) ProtoReflect() protoreflect.Message { + mi := &file_payload_payload_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Payload.ProtoReflect.Descriptor instead. +func (*Payload) Descriptor() ([]byte, []int) { + return file_payload_payload_proto_rawDescGZIP(), []int{0} +} + +func (x *Payload) GetConnections() uint64 { + if x != nil { + return x.Connections + } + return 0 +} + +func (x *Payload) GetRate() uint64 { + if x != nil { + return x.Rate + } + return 0 +} + +func (x *Payload) GetSize() uint64 { + if x != nil { + return x.Size + } + return 0 +} + +func (x *Payload) GetTime() *timestamppb.Timestamp { + if x != nil { + return x.Time + } + return nil +} + +func (x *Payload) GetId() []byte { + if x != nil { + return x.Id + } + return nil +} + +func (x *Payload) GetPadding() []byte { + if x != nil { + return x.Padding + } + return nil +} + +var File_payload_payload_proto protoreflect.FileDescriptor + +var file_payload_payload_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, + 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x6c, 0x6f, 0x61, 0x64, 0x74, 0x69, 0x6d, + 0x65, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xad, 0x01, 0x0a, 0x07, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x74, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x72, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, + 0x12, 0x2e, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x70, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x38, 0x5a, 0x36, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x6d, + 0x69, 0x6e, 0x74, 0x2f, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x74, 0x2f, 0x74, + 0x65, 0x73, 0x74, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x70, 0x61, 0x79, + 0x6c, 0x6f, 0x61, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_payload_payload_proto_rawDescOnce sync.Once + file_payload_payload_proto_rawDescData = file_payload_payload_proto_rawDesc +) + +func file_payload_payload_proto_rawDescGZIP() []byte { + file_payload_payload_proto_rawDescOnce.Do(func() { + file_payload_payload_proto_rawDescData = protoimpl.X.CompressGZIP(file_payload_payload_proto_rawDescData) + }) + return file_payload_payload_proto_rawDescData +} + +var file_payload_payload_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_payload_payload_proto_goTypes = []interface{}{ + (*Payload)(nil), // 0: loadtime.payload.Payload + (*timestamppb.Timestamp)(nil), // 1: google.protobuf.Timestamp +} +var file_payload_payload_proto_depIdxs = []int32{ + 1, // 0: loadtime.payload.Payload.time:type_name -> google.protobuf.Timestamp + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_payload_payload_proto_init() } +func file_payload_payload_proto_init() { + if File_payload_payload_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_payload_payload_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Payload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_payload_payload_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_payload_payload_proto_goTypes, + DependencyIndexes: file_payload_payload_proto_depIdxs, + MessageInfos: file_payload_payload_proto_msgTypes, + }.Build() + File_payload_payload_proto = out.File + file_payload_payload_proto_rawDesc = nil + file_payload_payload_proto_goTypes = nil + file_payload_payload_proto_depIdxs = nil +} diff --git a/test/loadtime/payload/payload.proto b/test/loadtime/payload/payload.proto new file mode 100644 index 0000000000..19075ba512 --- /dev/null +++ b/test/loadtime/payload/payload.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; +package loadtime.payload; + +option go_package = "github.com/tendermint/tendermint/test/loadtime/payload"; + +import "google/protobuf/timestamp.proto"; + +// Payload is the structure of the loadtime transaction. Proto has a compact +// encoded representation, making it ideal for the loadtime usecase which aims to +// keep the generated transactions small. +message Payload { + uint64 connections = 1; + uint64 rate = 2; + uint64 size = 3; + google.protobuf.Timestamp time = 4; + bytes id = 5; + bytes padding = 6; +} diff --git a/test/loadtime/payload/payload_test.go b/test/loadtime/payload/payload_test.go new file mode 100644 index 0000000000..404144e9ff --- /dev/null +++ b/test/loadtime/payload/payload_test.go @@ -0,0 +1,58 @@ +package payload_test + +import ( + "bytes" + "testing" + + "github.com/google/uuid" + + "github.com/tendermint/tendermint/test/loadtime/payload" +) + +const payloadSizeTarget = 1024 // 1kb + +func TestSize(t *testing.T) { + s, err := payload.MaxUnpaddedSize() + if err != nil { + t.Fatalf("calculating max unpadded size %s", err) + } + if s > payloadSizeTarget { + t.Fatalf("unpadded payload size %d exceeds target %d", s, payloadSizeTarget) + } +} + +func TestRoundTrip(t *testing.T) { + const ( + testConns = 512 + testRate = 4 + ) + testID := [16]byte(uuid.New()) + b, err := payload.NewBytes(&payload.Payload{ + Size: payloadSizeTarget, + Connections: testConns, + Rate: testRate, + Id: testID[:], + }) + if err != nil { + t.Fatalf("generating payload %s", err) + } + if len(b) < payloadSizeTarget { + t.Fatalf("payload size in bytes %d less than expected %d", len(b), payloadSizeTarget) + } + p, err := payload.FromBytes(b) + if err != nil { + t.Fatalf("reading payload %s", err) + } + if p.Size != payloadSizeTarget { + t.Fatalf("payload size value %d does not match expected %d", p.Size, payloadSizeTarget) + } + if p.Connections != testConns { + t.Fatalf("payload connections value %d does not match expected %d", p.Connections, testConns) + } + if p.Rate != testRate { + t.Fatalf("payload rate value %d does not match expected %d", p.Rate, testRate) + } + if !bytes.Equal(p.Id, testID[:]) { + t.Fatalf("payload ID value %d does not match expected %d", p.Id, testID) + } +} diff --git a/test/loadtime/report/report.go b/test/loadtime/report/report.go new file mode 100644 index 0000000000..787926bc1a --- /dev/null +++ b/test/loadtime/report/report.go @@ -0,0 +1,221 @@ +package report + +import ( + "math" + "sync" + "time" + + "github.com/gofrs/uuid" + "gonum.org/v1/gonum/stat" + + "github.com/tendermint/tendermint/test/loadtime/payload" + "github.com/tendermint/tendermint/types" +) + +// BlockStore defines the set of methods needed by the report generator from +// Tendermint's store.Blockstore type. Using an interface allows for tests to +// more easily simulate the required behavior without having to use the more +// complex real API. +type BlockStore interface { + Height() int64 + Base() int64 + LoadBlock(int64) *types.Block +} + +// DataPoint contains the set of data collected for each transaction. +type DataPoint struct { + Duration time.Duration + BlockTime time.Time + Hash []byte +} + +// Report contains the data calculated from reading the timestamped transactions +// of each block found in the blockstore. +type Report struct { + ID uuid.UUID + Rate, Connections, Size uint64 + Max, Min, Avg, StdDev time.Duration + + // NegativeCount is the number of negative durations encountered while + // reading the transaction data. A negative duration means that + // a transaction timestamp was greater than the timestamp of the block it + // was included in and likely indicates an issue with the experimental + // setup. + NegativeCount int + + // All contains all data points gathered from all valid transactions. + // The order of the contents of All is not guaranteed to be match the order of transactions + // in the chain. + All []DataPoint + + // used for calculating average during report creation. + sum int64 +} + +type Reports struct { + s map[uuid.UUID]Report + l []Report + + // errorCount is the number of parsing errors encountered while reading the + // transaction data. Parsing errors may occur if a transaction not generated + // by the payload package is submitted to the chain. + errorCount int +} + +func (rs *Reports) List() []Report { + return rs.l +} + +func (rs *Reports) ErrorCount() int { + return rs.errorCount +} + +func (rs *Reports) addDataPoint(id uuid.UUID, l time.Duration, bt time.Time, hash []byte, conns, rate, size uint64) { + r, ok := rs.s[id] + if !ok { + r = Report{ + Max: 0, + Min: math.MaxInt64, + ID: id, + Connections: conns, + Rate: rate, + Size: size, + } + rs.s[id] = r + } + r.All = append(r.All, DataPoint{Duration: l, BlockTime: bt, Hash: hash}) + if l > r.Max { + r.Max = l + } + if l < r.Min { + r.Min = l + } + if int64(l) < 0 { + r.NegativeCount++ + } + // Using an int64 here makes an assumption about the scale and quantity of the data we are processing. + // If all latencies were 2 seconds, we would need around 4 billion records to overflow this. + // We are therefore assuming that the data does not exceed these bounds. + r.sum += int64(l) + rs.s[id] = r +} + +func (rs *Reports) calculateAll() { + rs.l = make([]Report, 0, len(rs.s)) + for _, r := range rs.s { + if len(r.All) == 0 { + r.Min = 0 + rs.l = append(rs.l, r) + continue + } + r.Avg = time.Duration(r.sum / int64(len(r.All))) + r.StdDev = time.Duration(int64(stat.StdDev(toFloat(r.All), nil))) + rs.l = append(rs.l, r) + } +} + +func (rs *Reports) addError() { + rs.errorCount++ +} + +// GenerateFromBlockStore creates a Report using the data in the provided +// BlockStore. +func GenerateFromBlockStore(s BlockStore) (*Reports, error) { + type payloadData struct { + id uuid.UUID + l time.Duration + bt time.Time + hash []byte + connections, rate, size uint64 + err error + } + type txData struct { + tx types.Tx + bt time.Time + } + reports := &Reports{ + s: make(map[uuid.UUID]Report), + } + + // Deserializing to proto can be slow but does not depend on other data + // and can therefore be done in parallel. + // Deserializing in parallel does mean that the resulting data is + // not guaranteed to be delivered in the same order it was given to the + // worker pool. + const poolSize = 16 + + txc := make(chan txData) + pdc := make(chan payloadData, poolSize) + + wg := &sync.WaitGroup{} + wg.Add(poolSize) + for i := 0; i < poolSize; i++ { + go func() { + defer wg.Done() + for b := range txc { + p, err := payload.FromBytes(b.tx) + if err != nil { + pdc <- payloadData{err: err} + continue + } + + l := b.bt.Sub(p.Time.AsTime()) + idb := (*[16]byte)(p.Id) + pdc <- payloadData{ + l: l, + bt: b.bt, + hash: b.tx.Hash(), + id: uuid.UUID(*idb), + connections: p.Connections, + rate: p.Rate, + size: p.Size, + } + } + }() + } + go func() { + wg.Wait() + close(pdc) + }() + + go func() { + base, height := s.Base(), s.Height() + prev := s.LoadBlock(base) + for i := base + 1; i < height; i++ { + // Data from two adjacent block are used here simultaneously, + // blocks of height H and H+1. The transactions of the block of + // height H are used with the timestamp from the block of height + // H+1. This is done because the timestamp from H+1 is calculated + // by using the precommits submitted at height H. The timestamp in + // block H+1 represents the time at which block H was committed. + // + // In the (very unlikely) event that the very last block of the + // chain contains payload transactions, those transactions will not + // be used in the latency calculations because the last block whose + // transactions are used is the block one before the last. + cur := s.LoadBlock(i) + for _, tx := range prev.Data.Txs { + txc <- txData{tx: tx, bt: cur.Time} + } + prev = cur + } + close(txc) + }() + for pd := range pdc { + if pd.err != nil { + reports.addError() + continue + } + reports.addDataPoint(pd.id, pd.l, pd.bt, pd.hash, pd.connections, pd.rate, pd.size) + } + reports.calculateAll() + return reports, nil +} + +func toFloat(in []DataPoint) []float64 { + r := make([]float64, len(in)) + for i, v := range in { + r[i] = float64(int64(v.Duration)) + } + return r +} diff --git a/test/loadtime/report/report_test.go b/test/loadtime/report/report_test.go new file mode 100644 index 0000000000..ed21c63fba --- /dev/null +++ b/test/loadtime/report/report_test.go @@ -0,0 +1,125 @@ +package report_test + +import ( + "testing" + "time" + + "github.com/google/uuid" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/tendermint/tendermint/test/loadtime/payload" + "github.com/tendermint/tendermint/test/loadtime/report" + "github.com/tendermint/tendermint/types" +) + +type mockBlockStore struct { + base int64 + blocks []*types.Block +} + +func (m *mockBlockStore) Height() int64 { + return m.base + int64(len(m.blocks)) +} + +func (m *mockBlockStore) Base() int64 { + return m.base +} + +func (m *mockBlockStore) LoadBlock(i int64) *types.Block { + return m.blocks[i-m.base] +} + +func TestGenerateReport(t *testing.T) { + t1 := time.Now() + u := [16]byte(uuid.New()) + b1, err := payload.NewBytes(&payload.Payload{ + Id: u[:], + Time: timestamppb.New(t1.Add(-10 * time.Second)), + Size: 1024, + }) + if err != nil { + t.Fatalf("generating payload %s", err) + } + b2, err := payload.NewBytes(&payload.Payload{ + Id: u[:], + Time: timestamppb.New(t1.Add(-4 * time.Second)), + Size: 1024, + }) + if err != nil { + t.Fatalf("generating payload %s", err) + } + b3, err := payload.NewBytes(&payload.Payload{ + Id: u[:], + Time: timestamppb.New(t1.Add(2 * time.Second)), + Size: 1024, + }) + t2 := t1.Add(time.Second) + if err != nil { + t.Fatalf("generating payload %s", err) + } + s := &mockBlockStore{ + blocks: []*types.Block{ + { + Data: types.Data{ + Txs: []types.Tx{b1, b2}, + }, + }, + { + // The timestamp from block H+1 is used to calculate the + // latency for the transactions in block H. + Header: types.Header{ + Time: t1, + }, + Data: types.Data{ + Txs: []types.Tx{[]byte("error")}, + }, + }, + { + Data: types.Data{ + Txs: []types.Tx{b3, b3}, + }, + }, + { + Header: types.Header{ + Time: t2, + }, + Data: types.Data{ + Txs: []types.Tx{}, + }, + }, + }, + } + rs, err := report.GenerateFromBlockStore(s) + if err != nil { + t.Fatalf("generating report %s", err) + } + if rs.ErrorCount() != 1 { + t.Fatalf("ErrorCount did not match expected. Expected %d but contained %d", 1, rs.ErrorCount()) + } + rl := rs.List() + if len(rl) != 1 { + t.Fatalf("number of reports did not match expected. Expected %d but contained %d", 1, len(rl)) + } + r := rl[0] + if len(r.All) != 4 { + t.Fatalf("report contained different number of data points from expected. Expected %d but contained %d", 4, len(r.All)) //nolint:lll + } + if r.NegativeCount != 2 { + t.Fatalf("NegativeCount did not match expected. Expected %d but contained %d", 2, r.NegativeCount) + } + if r.Avg != 3*time.Second { + t.Fatalf("Avg did not match expected. Expected %s but contained %s", 3*time.Second, r.Avg) + } + if r.Min != -time.Second { + t.Fatalf("Min did not match expected. Expected %s but contained %s", time.Second, r.Min) + } + if r.Max != 10*time.Second { + t.Fatalf("Max did not match expected. Expected %s but contained %s", 10*time.Second, r.Max) + } + // Verified using online standard deviation calculator: + // https://www.calculator.net/standard-deviation-calculator.html?numberinputs=10%2C+4%2C+-1%2C+-1&ctype=s&x=45&y=12 + expectedStdDev := 5228129047 * time.Nanosecond + if r.StdDev != expectedStdDev { + t.Fatalf("StdDev did not match expected. Expected %s but contained %s", expectedStdDev, r.StdDev) + } +} diff --git a/test/maverick/consensus/misbehavior.go b/test/maverick/consensus/misbehavior.go index 2ccfd68f37..b3ce2e0089 100644 --- a/test/maverick/consensus/misbehavior.go +++ b/test/maverick/consensus/misbehavior.go @@ -6,6 +6,8 @@ import ( tmcon "github.com/tendermint/tendermint/consensus" cstypes "github.com/tendermint/tendermint/consensus/types" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/p2p" + tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -98,9 +100,19 @@ func DoublePrevoteMisbehavior() Misbehavior { // there has to be at least two other peers connected else this behavior works normally for idx, peer := range peers { if idx%2 == 0 { // sign the proposal block - peer.Send(VoteChannel, tmcon.MustEncode(&tmcon.VoteMessage{Vote: prevote})) + p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: VoteChannel, + Message: &tmcons.Vote{ + Vote: prevote.ToProto(), + }, + }, cs.Logger) } else { // sign a nil block - peer.Send(VoteChannel, tmcon.MustEncode(&tmcon.VoteMessage{Vote: nilPrevote})) + p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: VoteChannel, + Message: &tmcons.Vote{ + Vote: nilPrevote.ToProto(), + }, + }, cs.Logger) } } } diff --git a/test/maverick/consensus/reactor.go b/test/maverick/consensus/reactor.go index bd303a2eef..31c6db16ee 100644 --- a/test/maverick/consensus/reactor.go +++ b/test/maverick/consensus/reactor.go @@ -148,6 +148,7 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor { Priority: 6, SendQueueCapacity: 100, RecvMessageCapacity: maxMsgSize, + MessageType: &tmcons.Message{}, }, { ID: DataChannel, // maybe split between gossiping current block and catchup stuff @@ -156,6 +157,7 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor { SendQueueCapacity: 100, RecvBufferCapacity: 50 * 4096, RecvMessageCapacity: maxMsgSize, + MessageType: &tmcons.Message{}, }, { ID: VoteChannel, @@ -163,6 +165,7 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor { SendQueueCapacity: 100, RecvBufferCapacity: 100 * 100, RecvMessageCapacity: maxMsgSize, + MessageType: &tmcons.Message{}, }, { ID: VoteSetBitsChannel, @@ -170,6 +173,7 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor { SendQueueCapacity: 2, RecvBufferCapacity: 1024, RecvMessageCapacity: maxMsgSize, + MessageType: &tmcons.Message{}, }, } } @@ -223,34 +227,37 @@ func (conR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { // Peer state updates can happen in parallel, but processing of // proposals, block parts, and votes are ordered by the receiveRoutine // NOTE: blocks on consensus state for proposals, block parts, and votes -func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { +func (conR *Reactor) ReceiveEnvelope(e p2p.Envelope) { if !conR.IsRunning() { - conR.Logger.Debug("Receive", "src", src, "chId", chID, "bytes", msgBytes) + conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID) return } - - msg, err := decodeMsg(msgBytes) + m := e.Message + if wm, ok := m.(p2p.Wrapper); ok { + m = wm.Wrap() + } + msg, err := tmcon.MsgFromProto(m.(*tmcons.Message)) if err != nil { - conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) - conR.Switch.StopPeerForError(src, err) + conR.Logger.Error("Error decoding message", "src", e.Src, "chId", e.ChannelID, "err", err) + conR.Switch.StopPeerForError(e.Src, err) return } - if err = msg.ValidateBasic(); err != nil { - conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err) - conR.Switch.StopPeerForError(src, err) + if err := msg.ValidateBasic(); err != nil { + conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err) + conR.Switch.StopPeerForError(e.Src, err) return } - conR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg) + conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", e.Message) // Get peer states - ps, ok := src.Get(types.PeerStateKey).(*PeerState) + ps, ok := e.Src.Get(types.PeerStateKey).(*PeerState) if !ok { - panic(fmt.Sprintf("Peer %v has no state", src)) + panic(fmt.Sprintf("Peer %v has no state", e.Src)) } - switch chID { + switch e.ChannelID { case StateChannel: switch msg := msg.(type) { case *tmcon.NewRoundStepMessage: @@ -258,8 +265,8 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { initialHeight := conR.conS.state.InitialHeight conR.conS.mtx.Unlock() if err = msg.ValidateHeight(initialHeight); err != nil { - conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err) - conR.Switch.StopPeerForError(src, err) + conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", msg, "err", err) + conR.Switch.StopPeerForError(e.Src, err) return } ps.ApplyNewRoundStepMessage(msg) @@ -278,7 +285,7 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { // Peer claims to have a maj23 for some BlockID at H,R,S, err := votes.SetPeerMaj23(msg.Round, msg.Type, ps.peer.ID(), msg.BlockID) if err != nil { - conR.Switch.StopPeerForError(src, err) + conR.Switch.StopPeerForError(e.Src, err) return } // Respond with a VoteSetBitsMessage showing which votes we have. @@ -292,13 +299,21 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { default: panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?") } - src.TrySend(VoteSetBitsChannel, tmcon.MustEncode(&tmcon.VoteSetBitsMessage{ + m := &tmcons.VoteSetBits{ Height: msg.Height, Round: msg.Round, Type: msg.Type, - BlockID: msg.BlockID, - Votes: ourVotes, - })) + BlockID: msg.BlockID.ToProto(), + } + v := ourVotes.ToProto() + if v != nil { + m.Votes = *v + } + + p2p.TrySendEnvelopeShim(e.Src, p2p.Envelope{ //nolint: staticcheck + ChannelID: VoteSetBitsChannel, + Message: m, + }, conR.Logger) default: conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } @@ -311,13 +326,13 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { switch msg := msg.(type) { case *tmcon.ProposalMessage: ps.SetHasProposal(msg.Proposal) - conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()} + conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()} case *tmcon.ProposalPOLMessage: ps.ApplyProposalPOLMessage(msg) case *tmcon.BlockPartMessage: ps.SetHasProposalBlockPart(msg.Height, msg.Round, int(msg.Part.Index)) - conR.Metrics.BlockParts.With("peer_id", string(src.ID())).Add(1) - conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()} + conR.Metrics.BlockParts.With("peer_id", string(e.Src.ID())).Add(1) + conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()} default: conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } @@ -337,7 +352,7 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { ps.EnsureVoteBitArrays(height-1, lastCommitSize) ps.SetHasVote(msg.Vote) - cs.peerMsgQueue <- msgInfo{msg, src.ID()} + cs.peerMsgQueue <- msgInfo{msg, e.Src.ID()} default: // don't punish (leave room for soft upgrades) @@ -376,10 +391,27 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { } default: - conR.Logger.Error(fmt.Sprintf("Unknown chId %X", chID)) + conR.Logger.Error(fmt.Sprintf("Unknown chId %X", e.ChannelID)) } } +func (conR *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) { + msg := &tmcons.Message{} + err := proto.Unmarshal(msgBytes, msg) + if err != nil { + panic(err) + } + um, err := msg.Unwrap() + if err != nil { + panic(err) + } + conR.ReceiveEnvelope(p2p.Envelope{ + ChannelID: chID, + Src: peer, + Message: um, + }) +} + // SetEventBus sets event bus. func (conR *Reactor) SetEventBus(b *types.EventBus) { conR.eventBus = b @@ -429,30 +461,43 @@ func (conR *Reactor) unsubscribeFromBroadcastEvents() { } func (conR *Reactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) { - nrsMsg := makeRoundStepMessage(rs) - conR.Switch.Broadcast(StateChannel, tmcon.MustEncode(nrsMsg)) + conR.Switch.BroadcastEnvelope(p2p.Envelope{ + ChannelID: StateChannel, + Message: &tmcons.NewRoundStep{ + Height: rs.Height, + Round: rs.Round, + Step: uint32(rs.Step), + SecondsSinceStartTime: int64(time.Since(rs.StartTime).Seconds()), + LastCommitRound: rs.LastCommit.GetRound(), + }, + }) } func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) { - csMsg := &tmcon.NewValidBlockMessage{ - Height: rs.Height, - Round: rs.Round, - BlockPartSetHeader: rs.ProposalBlockParts.Header(), - BlockParts: rs.ProposalBlockParts.BitArray(), - IsCommit: rs.Step == cstypes.RoundStepCommit, - } - conR.Switch.Broadcast(StateChannel, tmcon.MustEncode(csMsg)) + psh := rs.ProposalBlockParts.Header() + conR.Switch.BroadcastEnvelope(p2p.Envelope{ + ChannelID: StateChannel, + Message: &tmcons.NewValidBlock{ + Height: rs.Height, + Round: rs.Round, + BlockPartSetHeader: psh.ToProto(), + BlockParts: rs.ProposalBlockParts.BitArray().ToProto(), + IsCommit: rs.Step == cstypes.RoundStepCommit, + }, + }) } // Broadcasts HasVoteMessage to peers that care. func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) { - msg := &tmcon.HasVoteMessage{ - Height: vote.Height, - Round: vote.Round, - Type: vote.Type, - Index: vote.ValidatorIndex, - } - conR.Switch.Broadcast(StateChannel, tmcon.MustEncode(msg)) + conR.Switch.BroadcastEnvelope(p2p.Envelope{ + ChannelID: StateChannel, + Message: &tmcons.HasVote{ + Height: vote.Height, + Round: vote.Round, + Type: vote.Type, + Index: vote.ValidatorIndex, + }, + }) /* // TODO: Make this broadcast more selective. for _, peer := range conR.Switch.Peers().List() { @@ -473,21 +518,18 @@ func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) { */ } -func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *tmcon.NewRoundStepMessage) { - nrsMsg = &tmcon.NewRoundStepMessage{ - Height: rs.Height, - Round: rs.Round, - Step: rs.Step, - SecondsSinceStartTime: int64(time.Since(rs.StartTime).Seconds()), - LastCommitRound: rs.LastCommit.GetRound(), - } - return -} - func (conR *Reactor) sendNewRoundStepMessage(peer p2p.Peer) { rs := conR.conS.GetRoundState() - nrsMsg := makeRoundStepMessage(rs) - peer.Send(StateChannel, tmcon.MustEncode(nrsMsg)) + p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: StateChannel, + Message: &tmcons.NewRoundStep{ + Height: rs.Height, + Round: rs.Round, + Step: uint32(rs.Step), + SecondsSinceStartTime: int64(time.Since(rs.StartTime).Seconds()), + LastCommitRound: rs.LastCommit.GetRound(), + }, + }, conR.Logger) } func (conR *Reactor) gossipDataRoutine(peer p2p.Peer, ps *PeerState) { @@ -507,13 +549,19 @@ OUTER_LOOP: if rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartSetHeader) { if index, ok := rs.ProposalBlockParts.BitArray().Sub(prs.ProposalBlockParts.Copy()).PickRandom(); ok { part := rs.ProposalBlockParts.GetPart(index) - msg := &tmcon.BlockPartMessage{ - Height: rs.Height, // This tells peer that this part applies to us. - Round: rs.Round, // This tells peer that this part applies to us. - Part: part, - } logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round) - if peer.Send(DataChannel, tmcon.MustEncode(msg)) { + p, err := part.ToProto() + if err != nil { + panic(err) + } + if p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: DataChannel, + Message: &tmcons.BlockPart{ + Height: rs.Height, // This tells peer that this part applies to us. + Round: rs.Round, // This tells peer that this part applies to us. + Part: *p, + }, + }, logger) { ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) } continue OUTER_LOOP @@ -556,9 +604,12 @@ OUTER_LOOP: if rs.Proposal != nil && !prs.Proposal { // Proposal: share the proposal metadata with peer. { - msg := &tmcon.ProposalMessage{Proposal: rs.Proposal} + msg := &tmcons.Proposal{Proposal: *rs.Proposal.ToProto()} logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round) - if peer.Send(DataChannel, tmcon.MustEncode(msg)) { + if p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: DataChannel, + Message: msg, + }, logger) { // NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected! ps.SetHasProposal(rs.Proposal) } @@ -568,13 +619,16 @@ OUTER_LOOP: // rs.Proposal was validated, so rs.Proposal.POLRound <= rs.Round, // so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound). if 0 <= rs.Proposal.POLRound { - msg := &tmcon.ProposalPOLMessage{ + msg := &tmcons.ProposalPOL{ Height: rs.Height, - ProposalPOLRound: rs.Proposal.POLRound, - ProposalPOL: rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray(), + ProposalPolRound: rs.Proposal.POLRound, + ProposalPol: *rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray().ToProto(), } logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round) - peer.Send(DataChannel, tmcon.MustEncode(msg)) + p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: DataChannel, + Message: msg, + }, logger) } continue OUTER_LOOP } @@ -611,13 +665,21 @@ func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundSt return } // Send the part - msg := &tmcon.BlockPartMessage{ - Height: prs.Height, // Not our height, so it doesn't matter. - Round: prs.Round, // Not our height, so it doesn't matter. - Part: part, + + pp, err := part.ToProto() + if err != nil { + logger.Error("Could not convert part to proto", "index", index, "error", err) + return } logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index) - if peer.Send(DataChannel, tmcon.MustEncode(msg)) { + if p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: DataChannel, + Message: &tmcons.BlockPart{ + Height: prs.Height, // Not our height, so it doesn't matter. + Round: prs.Round, // Not our height, so it doesn't matter. + Part: *pp, + }, + }, logger) { ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) } else { logger.Debug("Sending block part for catchup failed") @@ -774,12 +836,16 @@ OUTER_LOOP: prs := ps.GetRoundState() if rs.Height == prs.Height { if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok { - peer.TrySend(StateChannel, tmcon.MustEncode(&tmcon.VoteSetMaj23Message{ - Height: prs.Height, - Round: prs.Round, - Type: tmproto.PrevoteType, - BlockID: maj23, - })) + + p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: StateChannel, + Message: &tmcons.VoteSetMaj23{ + Height: prs.Height, + Round: prs.Round, + Type: tmproto.PrevoteType, + BlockID: maj23.ToProto(), + }}, logger) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } @@ -791,12 +857,14 @@ OUTER_LOOP: prs := ps.GetRoundState() if rs.Height == prs.Height { if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok { - peer.TrySend(StateChannel, tmcon.MustEncode(&tmcon.VoteSetMaj23Message{ - Height: prs.Height, - Round: prs.Round, - Type: tmproto.PrecommitType, - BlockID: maj23, - })) + p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: StateChannel, + Message: &tmcons.VoteSetMaj23{ + Height: prs.Height, + Round: prs.Round, + Type: tmproto.PrecommitType, + BlockID: maj23.ToProto(), + }}, logger) time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } @@ -808,12 +876,14 @@ OUTER_LOOP: prs := ps.GetRoundState() if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 { if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok { - peer.TrySend(StateChannel, tmcon.MustEncode(&tmcon.VoteSetMaj23Message{ - Height: prs.Height, - Round: prs.ProposalPOLRound, - Type: tmproto.PrevoteType, - BlockID: maj23, - })) + p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: StateChannel, + Message: &tmcons.VoteSetMaj23{ + Height: prs.Height, + Round: prs.ProposalPOLRound, + Type: tmproto.PrevoteType, + BlockID: maj23.ToProto(), + }}, logger) time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } @@ -828,12 +898,14 @@ OUTER_LOOP: if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= conR.conS.blockStore.Height() && prs.Height >= conR.conS.blockStore.Base() { if commit := conR.conS.LoadCommit(prs.Height); commit != nil { - peer.TrySend(StateChannel, tmcon.MustEncode(&tmcon.VoteSetMaj23Message{ - Height: prs.Height, - Round: commit.Round, - Type: tmproto.PrecommitType, - BlockID: commit.BlockID, - })) + p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: StateChannel, + Message: &tmcons.VoteSetMaj23{ + Height: prs.Height, + Round: commit.Round, + Type: tmproto.PrecommitType, + BlockID: commit.BlockID.ToProto(), + }}, logger) time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } @@ -1047,9 +1119,11 @@ func (ps *PeerState) SetHasProposalBlockPart(height int64, round int32, index in // Returns true if vote was sent. func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool { if vote, ok := ps.PickVoteToSend(votes); ok { - msg := &tmcon.VoteMessage{Vote: vote} ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote) - if ps.peer.Send(VoteChannel, tmcon.MustEncode(msg)) { + if p2p.TrySendEnvelopeShim(ps.peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: VoteChannel, + Message: &tmcons.Vote{Vote: vote.ToProto()}, + }, ps.logger) { ps.SetHasVote(vote) return true } @@ -1408,12 +1482,3 @@ func (ps *PeerState) StringIndented(indent string) string { // tmjson.RegisterType(&VoteSetMaj23Message{}, "tendermint/VoteSetMaj23") // tmjson.RegisterType(&VoteSetBitsMessage{}, "tendermint/VoteSetBits") // } - -func decodeMsg(bz []byte) (msg tmcon.Message, err error) { - pb := &tmcons.Message{} - if err = proto.Unmarshal(bz, pb); err != nil { - return msg, err - } - - return tmcon.MsgFromProto(pb) -} diff --git a/test/maverick/consensus/replay_file.go b/test/maverick/consensus/replay_file.go index 2dbc5cf37a..9e4f2e17b6 100644 --- a/test/maverick/consensus/replay_file.go +++ b/test/maverick/consensus/replay_file.go @@ -298,7 +298,9 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo if err != nil { tmos.Exit(err.Error()) } - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) gdoc, err := sm.MakeGenesisDocFromFile(config.GenesisFile()) if err != nil { tmos.Exit(err.Error()) diff --git a/test/maverick/consensus/state.go b/test/maverick/consensus/state.go index 99bee4b474..c7bdbbd3f6 100644 --- a/test/maverick/consensus/state.go +++ b/test/maverick/consensus/state.go @@ -4,7 +4,7 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "io" "os" "reflect" "runtime/debug" @@ -236,7 +236,9 @@ func (cs *State) handleMsg(mi msgInfo) { // Enter (CreateEmptyBlocks): from enterNewRound(height,round) // Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ): -// after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval +// +// after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval +// // Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool func (cs *State) enterPropose(height int64, round int32) { logger := cs.Logger.With("height", height, "round", round) @@ -348,12 +350,12 @@ func (cs *State) enterPrecommit(height int64, round int32) { } else { defaultEnterPrecommit(cs, height, round) } - } func (cs *State) addVote( vote *types.Vote, - peerID p2p.ID) (added bool, err error) { + peerID p2p.ID, +) (added bool, err error) { cs.Logger.Debug( "addVote", "voteHeight", @@ -446,9 +448,7 @@ var ( //----------------------------------------------------------------------------- -var ( - msgQueueSize = 1000 -) +var msgQueueSize = 1000 // msgs from the reactor which may update the state type msgInfo struct { @@ -730,7 +730,6 @@ func (cs *State) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, err error // SetProposal inputs a proposal. func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.ID) error { - if peerID == "" { cs.internalMsgQueue <- msgInfo{&tmcon.ProposalMessage{Proposal: proposal}, ""} } else { @@ -743,7 +742,6 @@ func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.ID) error { // AddProposalBlockPart inputs a part of the proposal block. func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Part, peerID p2p.ID) error { - if peerID == "" { cs.internalMsgQueue <- msgInfo{&tmcon.BlockPartMessage{Height: height, Round: round, Part: part}, ""} } else { @@ -1071,7 +1069,6 @@ func (cs *State) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) { default: panic(fmt.Sprintf("Invalid timeout step: %v", ti.Step)) } - } func (cs *State) handleTxsAvailable() { @@ -1103,7 +1100,9 @@ func (cs *State) handleTxsAvailable() { // Used internally by handleTimeout and handleMsg to make state transitions // Enter: `timeoutNewHeight` by startTime (commitTime+timeoutCommit), -// or, if SkipTimeoutCommit==true, after receiving all precommits from (height,round-1) +// +// or, if SkipTimeoutCommit==true, after receiving all precommits from (height,round-1) +// // Enter: `timeoutPrecommits` after any +2/3 precommits from (height,round-1) // Enter: +2/3 precommits for nil at (height,round-1) // Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round) @@ -1249,7 +1248,6 @@ func (cs *State) isProposalComplete() bool { } // if this is false the proposer is lying or we haven't received the POL yet return cs.Votes.Prevotes(cs.Proposal.POLRound).HasTwoThirdsMajority() - } // Create the next block to propose and return it. Returns nil block upon error. @@ -1712,12 +1710,12 @@ func (cs *State) addProposalBlockPart(msg *tmcon.BlockPartMessage, peerID p2p.ID ) } if added && cs.ProposalBlockParts.IsComplete() { - bz, err := ioutil.ReadAll(cs.ProposalBlockParts.GetReader()) + bz, err := io.ReadAll(cs.ProposalBlockParts.GetReader()) if err != nil { return added, err } - var pbb = new(tmproto.Block) + pbb := new(tmproto.Block) err = proto.Unmarshal(bz, pbb) if err != nil { return added, err @@ -1775,7 +1773,7 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, error) { // If the vote height is off, we'll just ignore it, // But if it's a conflicting sig, add it to the cs.evpool. // If it's otherwise invalid, punish peer. - // nolint: gocritic + //nolint: gocritic if voteErr, ok := err.(*types.ErrVoteConflictingVotes); ok { if cs.privValidatorPubKey == nil { return false, errPubKeyIsNotSet @@ -1849,10 +1847,11 @@ func (cs *State) voteTime() time.Time { now := tmtime.Now() minVoteTime := now // TODO: We should remove next line in case we don't vote for v in case cs.ProposalBlock == nil, - // even if cs.LockedBlock != nil. See https://docs.tendermint.com/master/spec/. + // even if cs.LockedBlock != nil. See https://github.com/tendermint/tendermint/tree/v0.34.x/spec/. timeIota := time.Duration(cs.state.ConsensusParams.Block.TimeIotaMs) * time.Millisecond if cs.LockedBlock != nil { - // See the BFT time spec https://docs.tendermint.com/master/spec/consensus/bft-time.html + // See the BFT time spec + // https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/bft-time.md minVoteTime = cs.LockedBlock.Time.Add(timeIota) } else if cs.ProposalBlock != nil { minVoteTime = cs.ProposalBlock.Time.Add(timeIota) diff --git a/test/maverick/consensus/wal_generator.go b/test/maverick/consensus/wal_generator.go index 6997db14e1..4e3cc5924e 100644 --- a/test/maverick/consensus/wal_generator.go +++ b/test/maverick/consensus/wal_generator.go @@ -49,7 +49,9 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { } blockStoreDB := db.NewMemDB() stateDB := blockStoreDB - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, err := sm.MakeGenesisState(genDoc) if err != nil { return fmt.Errorf("failed to make genesis state: %w", err) diff --git a/test/maverick/node/node.go b/test/maverick/node/node.go index 919554c6d8..1e178adb0a 100644 --- a/test/maverick/node/node.go +++ b/test/maverick/node/node.go @@ -7,7 +7,7 @@ import ( "fmt" "net" "net/http" - _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port + _ "net/http/pprof" //nolint: gosec // securely exposed on separate, optional port "strconv" "strings" "time" @@ -62,7 +62,7 @@ import ( // a map of misbehaviors to be executed by the maverick node func ParseMisbehaviors(str string) (map[int64]cs.Misbehavior, error) { // check if string is empty in which case we run a normal node - var misbehaviors = make(map[int64]cs.Misbehavior) + misbehaviors := make(map[int64]cs.Misbehavior) if str == "" { return misbehaviors, nil } @@ -97,6 +97,8 @@ type DBContext struct { // DBProvider takes a DBContext and returns an instantiated DB. type DBProvider func(*DBContext) (dbm.DB, error) +const readHeaderTimeout = 10 * time.Second + // DefaultDBProvider returns a database using the DBBackend and DBDir // specified in the ctx.Config. func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) { @@ -139,7 +141,6 @@ func DefaultNewNode(config *cfg.Config, logger log.Logger, misbehaviors map[int6 logger, misbehaviors, ) - } // MetricsProvider returns a consensus, p2p and mempool Metrics. @@ -174,12 +175,12 @@ type fastSyncReactor interface { // WARNING: using any name from the below list of the existing reactors will // result in replacing it with the custom one. // -// - MEMPOOL -// - BLOCKCHAIN -// - CONSENSUS -// - EVIDENCE -// - PEX -// - STATESYNC +// - MEMPOOL +// - BLOCKCHAIN +// - CONSENSUS +// - EVIDENCE +// - PEX +// - STATESYNC func CustomReactors(reactors map[string]p2p.Reactor) Option { return func(n *Node) { for name, reactor := range reactors { @@ -298,7 +299,6 @@ func createAndStartIndexerService( eventBus *types.EventBus, logger log.Logger, ) (*txindex.IndexerService, txindex.TxIndexer, indexer.BlockIndexer, error) { - var ( txIndexer txindex.TxIndexer blockIndexer indexer.BlockIndexer @@ -318,7 +318,7 @@ func createAndStartIndexerService( blockIndexer = &blockidxnull.BlockerIndexer{} } - indexerService := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus) + indexerService := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus, false) indexerService.SetLogger(logger.With("module", "txindex")) if err := indexerService.Start(); err != nil { @@ -335,8 +335,8 @@ func doHandshake( genDoc *types.GenesisDoc, eventBus types.BlockEventPublisher, proxyApp proxy.AppConns, - consensusLogger log.Logger) error { - + consensusLogger log.Logger, +) error { handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc) handshaker.SetLogger(consensusLogger) handshaker.SetEventBus(eventBus) @@ -380,8 +380,8 @@ func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool { } func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns, - state sm.State, memplMetrics *mempl.Metrics, logger log.Logger) (p2p.Reactor, mempl.Mempool) { - + state sm.State, memplMetrics *mempl.Metrics, logger log.Logger, +) (p2p.Reactor, mempl.Mempool) { switch config.Mempool.Version { case cfg.MempoolV1: mp := mempoolv1.NewTxMempool( @@ -433,14 +433,17 @@ func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns, } func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider, - stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger) (*evidence.Reactor, *evidence.Pool, error) { - + stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger, +) (*evidence.Reactor, *evidence.Pool, error) { evidenceDB, err := dbProvider(&DBContext{"evidence", config}) if err != nil { return nil, nil, err } + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: config.Storage.DiscardABCIResponses, + }) evidenceLogger := logger.With("module", "evidence") - evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB), blockStore) + evidencePool, err := evidence.NewPool(evidenceDB, stateStore, blockStore) if err != nil { return nil, nil, err } @@ -454,8 +457,8 @@ func createBlockchainReactor(config *cfg.Config, blockExec *sm.BlockExecutor, blockStore *store.BlockStore, fastSync bool, - logger log.Logger) (bcReactor p2p.Reactor, err error) { - + logger log.Logger, +) (bcReactor p2p.Reactor, err error) { switch config.FastSync.Version { case "v0": bcReactor = bcv0.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) @@ -482,8 +485,8 @@ func createConsensusReactor(config *cfg.Config, waitSync bool, eventBus *types.EventBus, consensusLogger log.Logger, - misbehaviors map[int64]cs.Misbehavior) (*cs.Reactor, *cs.State) { - + misbehaviors map[int64]cs.Misbehavior, +) (*cs.Reactor, *cs.State) { consensusState := cs.NewState( config.Consensus, state.Copy(), @@ -586,8 +589,8 @@ func createSwitch(config *cfg.Config, evidenceReactor *evidence.Reactor, nodeInfo p2p.NodeInfo, nodeKey *p2p.NodeKey, - p2pLogger log.Logger) *p2p.Switch { - + p2pLogger log.Logger, +) *p2p.Switch { sw := p2p.NewSwitch( config.P2P, transport, @@ -609,8 +612,8 @@ func createSwitch(config *cfg.Config, } func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch, - p2pLogger log.Logger, nodeKey *p2p.NodeKey) (pex.AddrBook, error) { - + p2pLogger log.Logger, nodeKey *p2p.NodeKey, +) (pex.AddrBook, error) { addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict) addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile())) @@ -636,8 +639,8 @@ func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch, } func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config, - sw *p2p.Switch, logger log.Logger) *pex.Reactor { - + sw *p2p.Switch, logger log.Logger, +) *pex.Reactor { // TODO persistent peers ? so we can have their DNS addrs saved pexReactor := pex.NewReactor(addrBook, &pex.ReactorConfig{ @@ -659,7 +662,8 @@ func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config, // startStateSync starts an asynchronous state sync process, then switches to fast sync mode. func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reactor, stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, fastSync bool, - stateStore sm.Store, blockStore *store.BlockStore, state sm.State) error { + stateStore sm.Store, blockStore *store.BlockStore, state sm.State, +) error { ssR.Logger.Info("Starting state sync") if stateProvider == nil { @@ -722,14 +726,16 @@ func NewNode(config *cfg.Config, metricsProvider MetricsProvider, logger log.Logger, misbehaviors map[int64]cs.Misbehavior, - options ...Option) (*Node, error) { - + options ...Option, +) (*Node, error) { blockStore, stateDB, err := initDBs(config, dbProvider) if err != nil { return nil, err } - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider) if err != nil { @@ -901,6 +907,7 @@ func NewNode(config *cfg.Config, if config.RPC.PprofListenAddress != "" { go func() { logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress) + //nolint:gosec,nolintlint // G114: Use of net/http serve function that has no support for setting timeouts logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil)) }() } @@ -1215,6 +1222,7 @@ func (n *Node) startPrometheusServer(addr string) *http.Server { promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections}, ), ), + ReadHeaderTimeout: readHeaderTimeout, } go func() { if err := srv.ListenAndServe(); err != http.ErrServerClosed { @@ -1373,9 +1381,7 @@ func makeNodeInfo( //------------------------------------------------------------------------------ -var ( - genesisDocKey = []byte("genesisDoc") -) +var genesisDocKey = []byte("genesisDoc") // LoadStateFromDBOrGenesisDocProvider attempts to load the state from the // database, or creates one using the given genesisDocProvider and persists the @@ -1396,7 +1402,9 @@ func LoadStateFromDBOrGenesisDocProvider( // was changed, accidentally or not). Also good for audit trail. saveGenesisDoc(stateDB, genDoc) } - stateStore := sm.NewStore(stateDB) + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) if err != nil { return sm.State{}, nil, err diff --git a/test/maverick/node/privval.go b/test/maverick/node/privval.go index 441b6ca9da..1879e76c73 100644 --- a/test/maverick/node/privval.go +++ b/test/maverick/node/privval.go @@ -3,7 +3,7 @@ package node import ( "errors" "fmt" - "io/ioutil" + "os" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" @@ -62,11 +62,10 @@ func (pvKey FilePVKey) Save() { if err != nil { panic(err) } - err = tempfile.WriteFileAtomic(outFile, jsonBytes, 0600) + err = tempfile.WriteFileAtomic(outFile, jsonBytes, 0o600) if err != nil { panic(err) } - } //------------------------------------------------------------------------------- @@ -90,7 +89,6 @@ type FilePVLastSignState struct { // we have already signed for this HRS, and can reuse the existing signature). // It panics if the HRS matches the arguments, there's a SignBytes, but no Signature. func (lss *FilePVLastSignState) CheckHRS(height int64, round int32, step int8) (bool, error) { - if lss.Height > height { return false, fmt.Errorf("height regression. Got %v, last height %v", height, lss.Height) } @@ -133,7 +131,7 @@ func (lss *FilePVLastSignState) Save() { if err != nil { panic(err) } - err = tempfile.WriteFileAtomic(outFile, jsonBytes, 0600) + err = tempfile.WriteFileAtomic(outFile, jsonBytes, 0o600) if err != nil { panic(err) } @@ -185,7 +183,7 @@ func LoadFilePVEmptyState(keyFilePath, stateFilePath string) *FilePV { // If loadState is true, we load from the stateFilePath. Otherwise, we use an empty LastSignState. func loadFilePV(keyFilePath, stateFilePath string, loadState bool) *FilePV { - keyJSONBytes, err := ioutil.ReadFile(keyFilePath) + keyJSONBytes, err := os.ReadFile(keyFilePath) if err != nil { tmos.Exit(err.Error()) } @@ -203,7 +201,7 @@ func loadFilePV(keyFilePath, stateFilePath string, loadState bool) *FilePV { pvState := FilePVLastSignState{} if loadState { - stateJSONBytes, err := ioutil.ReadFile(stateFilePath) + stateJSONBytes, err := os.ReadFile(stateFilePath) if err != nil { tmos.Exit(err.Error()) } @@ -347,8 +345,8 @@ func (pv *FilePV) signProposal(chainID string, proposal *tmproto.Proposal) error // Persist height/round/step and signature func (pv *FilePV) saveSigned(height int64, round int32, step int8, - signBytes []byte, sig []byte) { - + signBytes []byte, sig []byte, +) { pv.LastSignState.Height = height pv.LastSignState.Round = round pv.LastSignState.Step = step diff --git a/tools/README.md b/tools/README.md index 44bd0691d7..0acd88682f 100644 --- a/tools/README.md +++ b/tools/README.md @@ -2,4 +2,4 @@ Tools for working with Tendermint and associated technologies. Documentation for these tools can be found online in the [Tendermint tools -documentation](https://docs.tendermint.com/master/tools/). +documentation](https://docs.tendermint.com/v0.34/tools/). diff --git a/tools/tm-signer-harness/internal/test_harness_test.go b/tools/tm-signer-harness/internal/test_harness_test.go index cf22bc8362..85a589185f 100644 --- a/tools/tm-signer-harness/internal/test_harness_test.go +++ b/tools/tm-signer-harness/internal/test_harness_test.go @@ -2,7 +2,6 @@ package internal import ( "fmt" - "io/ioutil" "os" "testing" "time" @@ -187,7 +186,7 @@ func cleanup(cfg TestHarnessConfig) { } func makeTempFile(name, content string) string { - tempFile, err := ioutil.TempFile("", fmt.Sprintf("%s-*", name)) + tempFile, err := os.CreateTemp("", fmt.Sprintf("%s-*", name)) if err != nil { panic(err) } diff --git a/tools/tm-signer-harness/main.go b/tools/tm-signer-harness/main.go index d624234ae0..3bb8ef3b15 100644 --- a/tools/tm-signer-harness/main.go +++ b/tools/tm-signer-harness/main.go @@ -3,7 +3,6 @@ package main import ( "flag" "fmt" - "io/ioutil" "os" "path/filepath" "time" @@ -136,7 +135,7 @@ func extractKey(tmhome, outputPath string) { stateFile := filepath.Join(internal.ExpandPath(tmhome), "data", "priv_validator_state.json") fpv := privval.LoadFilePV(keyFile, stateFile) pkb := []byte(fpv.Key.PrivKey.(ed25519.PrivKey)) - if err := ioutil.WriteFile(internal.ExpandPath(outputPath), pkb[:32], 0600); err != nil { + if err := os.WriteFile(internal.ExpandPath(outputPath), pkb[:32], 0o600); err != nil { logger.Info("Failed to write private key", "output", outputPath, "err", err) os.Exit(1) } diff --git a/types/block.go b/types/block.go index f65bc1a25c..6ad0aa8e36 100644 --- a/types/block.go +++ b/types/block.go @@ -338,7 +338,7 @@ func MakeBlock( // NOTE: changes to the Header should be duplicated in: // - header.Hash() // - abci.Header -// - https://github.com/tendermint/spec/blob/master/spec/blockchain/blockchain.md +// - https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/blockchain.md type Header struct { // basic block info Version tmversion.Consensus `json:"version"` diff --git a/types/block_test.go b/types/block_test.go index a7f7e1571b..f819ea2242 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -195,6 +195,11 @@ func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) BlockID { var nilBytes []byte +// This follows RFC-6962, i.e. `echo -n ” | sha256sum` +var emptyBytes = []byte{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, + 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, + 0x78, 0x52, 0xb8, 0x55} + func TestNilHeaderHashDoesntCrash(t *testing.T) { assert.Equal(t, nilBytes, []byte((*Header)(nil).Hash())) assert.Equal(t, nilBytes, []byte((new(Header)).Hash())) diff --git a/types/event_bus.go b/types/event_bus.go index 2506efa83b..ed9dd02fc7 100644 --- a/types/event_bus.go +++ b/types/event_bus.go @@ -23,7 +23,7 @@ type EventBusSubscriber interface { type Subscription interface { Out() <-chan tmpubsub.Message - Cancelled() <-chan struct{} // nolint: misspell + Cancelled() <-chan struct{} Err() error } diff --git a/types/genesis.go b/types/genesis.go index 20fc79721f..76873014c9 100644 --- a/types/genesis.go +++ b/types/genesis.go @@ -5,7 +5,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "os" "time" "github.com/tendermint/tendermint/crypto" @@ -52,7 +52,7 @@ func (genDoc *GenesisDoc) SaveAs(file string) error { if err != nil { return err } - return tmos.WriteFile(file, genDocBytes, 0644) + return tmos.WriteFile(file, genDocBytes, 0o644) } // ValidatorHash returns the hash of the validator set contained in the GenesisDoc @@ -126,7 +126,7 @@ func GenesisDocFromJSON(jsonBlob []byte) (*GenesisDoc, error) { // GenesisDocFromFile reads JSON data from a file and unmarshalls it into a GenesisDoc. func GenesisDocFromFile(genDocFile string) (*GenesisDoc, error) { - jsonBlob, err := ioutil.ReadFile(genDocFile) + jsonBlob, err := os.ReadFile(genDocFile) if err != nil { return nil, fmt.Errorf("couldn't read GenesisDoc file: %w", err) } diff --git a/types/genesis_test.go b/types/genesis_test.go index bfb122e646..213b1330f2 100644 --- a/types/genesis_test.go +++ b/types/genesis_test.go @@ -1,7 +1,6 @@ package types import ( - "io/ioutil" "os" "testing" @@ -122,7 +121,7 @@ func TestGenesisGood(t *testing.T) { } func TestGenesisSaveAs(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "genesis") + tmpfile, err := os.CreateTemp("", "genesis") require.NoError(t, err) defer os.Remove(tmpfile.Name()) diff --git a/types/part_set_test.go b/types/part_set_test.go index c6ea0f4525..c33ac0c7af 100644 --- a/types/part_set_test.go +++ b/types/part_set_test.go @@ -1,7 +1,7 @@ package types import ( - "io/ioutil" + "io" "testing" "github.com/stretchr/testify/assert" @@ -57,7 +57,7 @@ func TestBasicPartSet(t *testing.T) { // Reconstruct data, assert that they are equal. data2Reader := partSet2.GetReader() - data2, err := ioutil.ReadAll(data2Reader) + data2, err := io.ReadAll(data2Reader) require.NoError(t, err) assert.Equal(t, data, data2) @@ -145,8 +145,10 @@ func TestParSetHeaderProtoBuf(t *testing.T) { expPass bool }{ {"success empty", &PartSetHeader{}, true}, - {"success", - &PartSetHeader{Total: 1, Hash: []byte("hash")}, true}, + { + "success", + &PartSetHeader{Total: 1, Hash: []byte("hash")}, true, + }, } for _, tc := range testCases { @@ -162,7 +164,6 @@ func TestParSetHeaderProtoBuf(t *testing.T) { } func TestPartProtoBuf(t *testing.T) { - proof := merkle.Proof{ Total: 1, Index: 1, @@ -175,8 +176,10 @@ func TestPartProtoBuf(t *testing.T) { }{ {"failure empty", &Part{}, false}, {"failure nil", nil, false}, - {"success", - &Part{Index: 1, Bytes: tmrand.Bytes(32), Proof: proof}, true}, + { + "success", + &Part{Index: 1, Bytes: tmrand.Bytes(32), Proof: proof}, true, + }, } for _, tc := range testCases { diff --git a/types/utils.go b/types/utils.go index cec47e2028..60e82fe3fd 100644 --- a/types/utils.go +++ b/types/utils.go @@ -4,9 +4,9 @@ import "reflect" // Go lacks a simple and safe way to see if something is a typed nil. // See: -// - https://dave.cheney.net/2017/08/09/typed-nils-in-go-2 -// - https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I/discussion -// - https://github.com/golang/go/issues/21538 +// - https://dave.cheney.net/2017/08/09/typed-nils-in-go-2 +// - https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I/discussion +// - https://github.com/golang/go/issues/21538 func isTypedNil(o interface{}) bool { rv := reflect.ValueOf(o) switch rv.Kind() { diff --git a/types/validator_set.go b/types/validator_set.go index 5b2ec85a50..39a004b0b6 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -411,14 +411,17 @@ func processChanges(origChanges []*Validator) (updates, removals []*Validator, e // // Inputs: // updates - a list of proper validator changes, i.e. they have been verified by processChanges for duplicates -// and invalid values. +// +// and invalid values. +// // vals - the original validator set. Note that vals is NOT modified by this function. // removedPower - the total voting power that will be removed after the updates are verified and applied. // // Returns: // tvpAfterUpdatesBeforeRemovals - the new total voting power if these updates would be applied without the removals. -// Note that this will be < 2 * MaxTotalVotingPower in case high power validators are removed and -// validators are added/ updated with high power values. +// +// Note that this will be < 2 * MaxTotalVotingPower in case high power validators are removed and +// validators are added/ updated with high power values. // // err - non-nil if the maximum allowed total voting power would be exceeded func verifyUpdates( @@ -467,8 +470,9 @@ func numNewValidators(updates []*Validator, vals *ValidatorSet) int { // 'updates' parameter must be a list of unique validators to be added or updated. // // 'updatedTotalVotingPower' is the total voting power of a set where all updates would be applied but -// not the removals. It must be < 2*MaxTotalVotingPower and may be close to this limit if close to -// MaxTotalVotingPower will be removed. This is still safe from overflow since MaxTotalVotingPower is maxInt64/8. +// +// not the removals. It must be < 2*MaxTotalVotingPower and may be close to this limit if close to +// MaxTotalVotingPower will be removed. This is still safe from overflow since MaxTotalVotingPower is maxInt64/8. // // No changes are made to the validator set 'vals'. func computeNewPriorities(updates []*Validator, vals *ValidatorSet, updatedTotalVotingPower int64) { @@ -638,14 +642,15 @@ func (vals *ValidatorSet) updateWithChangeSet(changes []*Validator, allowDeletes // UpdateWithChangeSet attempts to update the validator set with 'changes'. // It performs the following steps: -// - validates the changes making sure there are no duplicates and splits them in updates and deletes -// - verifies that applying the changes will not result in errors -// - computes the total voting power BEFORE removals to ensure that in the next steps the priorities -// across old and newly added validators are fair -// - computes the priorities of new validators against the final set -// - applies the updates against the validator set -// - applies the removals against the validator set -// - performs scaling and centering of priority values +// - validates the changes making sure there are no duplicates and splits them in updates and deletes +// - verifies that applying the changes will not result in errors +// - computes the total voting power BEFORE removals to ensure that in the next steps the priorities +// across old and newly added validators are fair +// - computes the priorities of new validators against the final set +// - applies the updates against the validator set +// - applies the removals against the validator set +// - performs scaling and centering of priority values +// // If an error is detected during verification steps, it is returned and the validator set // is not changed. func (vals *ValidatorSet) UpdateWithChangeSet(changes []*Validator) error { diff --git a/types/validator_set_test.go b/types/validator_set_test.go index 67a9a96ea1..d9283001cc 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -1641,7 +1641,7 @@ func TestValidatorSetProtoBuf(t *testing.T) { } } -//--------------------- +// --------------------- // Sort validators by priority and address type validatorsByPriority []*Validator @@ -1663,8 +1663,6 @@ func (valz validatorsByPriority) Swap(i, j int) { valz[i], valz[j] = valz[j], valz[i] } -//------------------------------------- - type testValsByVotingPower []testVal func (tvals testValsByVotingPower) Len() int { @@ -1682,9 +1680,8 @@ func (tvals testValsByVotingPower) Swap(i, j int) { tvals[i], tvals[j] = tvals[j], tvals[i] } -//------------------------------------- +// ------------------------------------- // Benchmark tests -// func BenchmarkUpdates(b *testing.B) { const ( n = 100 diff --git a/types/vote_set.go b/types/vote_set.go index abdc18e61f..9686a580ea 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -25,38 +25,38 @@ const ( type P2PID string /* - VoteSet helps collect signatures from validators at each height+round for a - predefined vote type. +VoteSet helps collect signatures from validators at each height+round for a +predefined vote type. - We need VoteSet to be able to keep track of conflicting votes when validators - double-sign. Yet, we can't keep track of *all* the votes seen, as that could - be a DoS attack vector. +We need VoteSet to be able to keep track of conflicting votes when validators +double-sign. Yet, we can't keep track of *all* the votes seen, as that could +be a DoS attack vector. - There are two storage areas for votes. - 1. voteSet.votes - 2. voteSet.votesByBlock +There are two storage areas for votes. +1. voteSet.votes +2. voteSet.votesByBlock - `.votes` is the "canonical" list of votes. It always has at least one vote, - if a vote from a validator had been seen at all. Usually it keeps track of - the first vote seen, but when a 2/3 majority is found, votes for that get - priority and are copied over from `.votesByBlock`. +`.votes` is the "canonical" list of votes. It always has at least one vote, +if a vote from a validator had been seen at all. Usually it keeps track of +the first vote seen, but when a 2/3 majority is found, votes for that get +priority and are copied over from `.votesByBlock`. - `.votesByBlock` keeps track of a list of votes for a particular block. There - are two ways a &blockVotes{} gets created in `.votesByBlock`. - 1. the first vote seen by a validator was for the particular block. - 2. a peer claims to have seen 2/3 majority for the particular block. +`.votesByBlock` keeps track of a list of votes for a particular block. There +are two ways a &blockVotes{} gets created in `.votesByBlock`. +1. the first vote seen by a validator was for the particular block. +2. a peer claims to have seen 2/3 majority for the particular block. - Since the first vote from a validator will always get added in `.votesByBlock` - , all votes in `.votes` will have a corresponding entry in `.votesByBlock`. +Since the first vote from a validator will always get added in `.votesByBlock` +, all votes in `.votes` will have a corresponding entry in `.votesByBlock`. - When a &blockVotes{} in `.votesByBlock` reaches a 2/3 majority quorum, its - votes are copied into `.votes`. +When a &blockVotes{} in `.votesByBlock` reaches a 2/3 majority quorum, its +votes are copied into `.votes`. - All this is memory bounded because conflicting votes only get added if a peer - told us to track that block, each peer only gets to tell us 1 such block, and, - there's only a limited number of peers. +All this is memory bounded because conflicting votes only get added if a peer +told us to track that block, each peer only gets to tell us 1 such block, and, +there's only a limited number of peers. - NOTE: Assumes that the sum total of voting power does not exceed MaxUInt64. +NOTE: Assumes that the sum total of voting power does not exceed MaxUInt64. */ type VoteSet struct { chainID string @@ -133,8 +133,10 @@ func (voteSet *VoteSet) Size() int { // Returns added=true if vote is valid and new. // Otherwise returns err=ErrVote[ -// UnexpectedStep | InvalidIndex | InvalidAddress | -// InvalidSignature | InvalidBlockHash | ConflictingVotes ] +// +// UnexpectedStep | InvalidIndex | InvalidAddress | +// InvalidSignature | InvalidBlockHash | ConflictingVotes ] +// // Duplicate votes return added=false, err=nil. // Conflicting votes return added=*, err=ErrVoteConflictingVotes. // NOTE: vote should not be mutated after adding. @@ -636,10 +638,10 @@ func (voteSet *VoteSet) MakeCommit() *Commit { //-------------------------------------------------------------------------------- /* - Votes for a particular block - There are two ways a *blockVotes gets created for a blockKey. - 1. first (non-conflicting) vote of a validator w/ blockKey (peerMaj23=false) - 2. A peer claims to have a 2/3 majority w/ blockKey (peerMaj23=true) +Votes for a particular block +There are two ways a *blockVotes gets created for a blockKey. +1. first (non-conflicting) vote of a validator w/ blockKey (peerMaj23=false) +2. A peer claims to have a 2/3 majority w/ blockKey (peerMaj23=true) */ type blockVotes struct { peerMaj23 bool // peer claims to have maj23 diff --git a/version/version.go b/version/version.go index 79295b5a70..cf0fb61eef 100644 --- a/version/version.go +++ b/version/version.go @@ -1,13 +1,11 @@ package version -var ( - TMCoreSemVer = TMVersionDefault -) +var TMCoreSemVer = TMVersionDefault const ( // TMVersionDefault is the used as the fallback version of Tendermint Core // when not using git describe. It is formatted with semantic versioning. - TMVersionDefault = "0.34.20" + TMVersionDefault = "0.34.23" // ABCISemVer is the semantic version of the ABCI library ABCISemVer = "0.17.0"