diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 4a3b89074d..e8f5a600bc 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -12,7 +12,7 @@ jobs: split-test-files: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - name: Create a file with all the pkgs run: go list ./... > pkgs.txt - name: Split pkgs into 4 files @@ -47,7 +47,7 @@ jobs: - uses: actions/setup-go@v2 with: go-version: "1.17" - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | @@ -70,7 +70,7 @@ jobs: - uses: actions/setup-go@v2 with: go-version: "1.17" - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | @@ -99,7 +99,7 @@ jobs: runs-on: ubuntu-latest needs: tests steps: - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index a9ead295e2..a82ee0d8b5 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -14,7 +14,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - name: Prepare id: prep run: | diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml index d43bff12f2..4b6fc8c935 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-34x.yml @@ -25,7 +25,7 @@ jobs: with: go-version: '1.17' - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 with: ref: 'v0.34.x' diff --git a/.github/workflows/e2e-nightly-35x.yml b/.github/workflows/e2e-nightly-35x.yml index b3acdf62be..14662bbea7 100644 --- a/.github/workflows/e2e-nightly-35x.yml +++ b/.github/workflows/e2e-nightly-35x.yml @@ -25,7 +25,7 @@ jobs: with: go-version: '1.17' - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 with: ref: 'v0.35.x' diff --git a/.github/workflows/e2e-nightly-master.yml b/.github/workflows/e2e-nightly-master.yml index da8b07d70e..0d5dbb830c 100644 --- a/.github/workflows/e2e-nightly-master.yml +++ b/.github/workflows/e2e-nightly-master.yml @@ -24,7 +24,7 @@ jobs: with: go-version: '1.17' - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - name: Build working-directory: test/e2e diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 134ae979c9..e061afc5e1 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -17,7 +17,7 @@ jobs: - uses: actions/setup-go@v2 with: go-version: '1.17' - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | diff --git a/.github/workflows/fuzz-nightly.yml b/.github/workflows/fuzz-nightly.yml index 38ca6896d6..9e67aed530 100644 --- a/.github/workflows/fuzz-nightly.yml +++ b/.github/workflows/fuzz-nightly.yml @@ -17,7 +17,7 @@ jobs: with: go-version: '1.17' - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - name: Install go-fuzz working-directory: test/fuzz diff --git a/.github/workflows/jepsen.yml b/.github/workflows/jepsen.yml index 0e358af6e4..2266e5ceba 100644 --- a/.github/workflows/jepsen.yml +++ b/.github/workflows/jepsen.yml @@ -46,7 +46,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout the Jepsen repository - uses: actions/checkout@v2.3.4 + uses: actions/checkout@v2.3.5 with: repository: 'tendermint/jepsen' diff --git a/.github/workflows/linkchecker.yml b/.github/workflows/linkchecker.yml index c97b22cd19..14998c1368 100644 --- a/.github/workflows/linkchecker.yml +++ b/.github/workflows/linkchecker.yml @@ -6,7 +6,7 @@ jobs: markdown-link-check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - uses: gaurav-nelson/github-action-markdown-link-check@1.0.13 with: folder-path: "docs" diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 3e257e47c5..948a10951b 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 8 steps: - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 953170c59f..afb04132dc 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Code - uses: actions/checkout@v2.3.4 + uses: actions/checkout@v2.3.5 - name: Lint Code Base uses: docker://github/super-linter:v3 env: diff --git a/.github/workflows/proto.yml b/.github/workflows/proto.yml index 43a1972ecf..ed73f07d7b 100644 --- a/.github/workflows/proto.yml +++ b/.github/workflows/proto.yml @@ -11,13 +11,13 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 4 steps: - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - name: lint run: make proto-lint proto-breakage: runs-on: ubuntu-latest timeout-minutes: 4 steps: - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - name: check-breakage run: make proto-check-breaking-ci diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 567a607cab..dcf4f957a8 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2.3.4 + uses: actions/checkout@v2.3.5 with: fetch-depth: 0 diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index abb1e848eb..176a686aae 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -18,7 +18,7 @@ jobs: - uses: actions/setup-go@v2 with: go-version: "1.17" - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | @@ -50,7 +50,7 @@ jobs: - uses: actions/setup-go@v2 with: go-version: "1.17" - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | @@ -81,7 +81,7 @@ jobs: - uses: actions/setup-go@v2 with: go-version: "1.17" - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.3.5 - uses: technote-space/get-diff-action@v5 with: PATTERNS: | diff --git a/.gitignore b/.gitignore index 7f412d4612..38a280c64b 100644 --- a/.gitignore +++ b/.gitignore @@ -46,3 +46,10 @@ test/fuzz/**/corpus test/fuzz/**/crashers test/fuzz/**/suppressions test/fuzz/**/*.zip +proto/tendermint/blocksync/types.proto +proto/tendermint/consensus/types.proto +proto/tendermint/mempool/*.proto +proto/tendermint/p2p/*.proto +proto/tendermint/statesync/*.proto +proto/tendermint/types/*.proto +proto/tendermint/version/*.proto diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 9d8148783c..33d2c89c20 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -15,6 +15,7 @@ Special thanks to external contributors on this release: - [rpc] Remove the deprecated gRPC interface to the RPC service (@creachadair). - Apps + - [proto/tendermint] \#6976 Remove core protobuf files in favor of only housing them in the [tendermint/spec](https://github.com/tendermint/spec) repository. - P2P Protocol diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 16bef07ccf..e4613f84e2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -109,7 +109,7 @@ We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along For linting, checking breaking changes and generating proto stubs, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`. -We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`. +We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`. This command uses the spec repo to get the necessary protobuf files for generating the go code. If you are modifying the proto files manually for changes in the core data structures, you will need to clone them into the go repo and comment out lines 22-37 of the file `./scripts/protocgen.sh`. ### Visual Studio Code diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index f8f36e2fdc..b7b890af8a 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -1908,7 +1908,7 @@ type ResponseCheckTx struct { Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"` Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"` // mempool_error is set by Tendermint. - // ABCI applictions creating a ResponseCheckTX should not set mempool_error. + // ABCI applications creating a ResponseCheckTX should not set mempool_error. MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"` } @@ -8093,7 +8093,10 @@ func (m *Request) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -8175,7 +8178,10 @@ func (m *RequestEcho) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -8225,7 +8231,10 @@ func (m *RequestFlush) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -8377,7 +8386,10 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -8615,7 +8627,10 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -8770,7 +8785,10 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -8954,7 +8972,10 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -9057,7 +9078,10 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -9141,7 +9165,10 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -9210,7 +9237,10 @@ func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -9260,7 +9290,10 @@ func (m *RequestCommit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -9310,7 +9343,10 @@ func (m *RequestListSnapshots) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -9430,7 +9466,10 @@ func (m *RequestOfferSnapshot) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -9537,7 +9576,10 @@ func (m *RequestLoadSnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -9672,7 +9714,10 @@ func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -10364,7 +10409,10 @@ func (m *Response) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -10446,7 +10494,10 @@ func (m *ResponseException) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -10528,7 +10579,10 @@ func (m *ResponseEcho) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -10578,7 +10632,10 @@ func (m *ResponseFlush) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -10764,7 +10821,10 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -10918,7 +10978,10 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11225,7 +11288,10 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11309,7 +11375,10 @@ func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11663,7 +11732,10 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -11934,7 +12006,10 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12088,7 +12163,10 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12191,7 +12269,10 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12275,7 +12356,10 @@ func (m *ResponseListSnapshots) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12344,7 +12428,10 @@ func (m *ResponseOfferSnapshot) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12428,7 +12515,10 @@ func (m *ResponseLoadSnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12605,7 +12695,10 @@ func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12826,7 +12919,10 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -12942,7 +13038,10 @@ func (m *Event) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13076,7 +13175,10 @@ func (m *EventAttribute) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13231,7 +13333,10 @@ func (m *TxResult) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13334,7 +13439,10 @@ func (m *Validator) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13436,7 +13544,10 @@ func (m *ValidatorUpdate) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13539,7 +13650,10 @@ func (m *VoteInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13712,7 +13826,10 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -13887,7 +14004,10 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/config/config.go b/config/config.go index a9b2576fd5..da1f56c345 100644 --- a/config/config.go +++ b/config/config.go @@ -639,6 +639,18 @@ type P2PConfig struct { //nolint: maligned // Toggle to disable guard against peers connecting from the same ip. AllowDuplicateIP bool `mapstructure:"allow-duplicate-ip"` + // Time to wait before flushing messages out on the connection + FlushThrottleTimeout time.Duration `mapstructure:"flush-throttle-timeout"` + + // Maximum size of a message packet payload, in bytes + MaxPacketMsgPayloadSize int `mapstructure:"max-packet-msg-payload-size"` + + // Rate at which packets can be sent, in bytes/second + SendRate int64 `mapstructure:"send-rate"` + + // Rate at which packets can be received, in bytes/second + RecvRate int64 `mapstructure:"recv-rate"` + // Peer connection configuration. HandshakeTimeout time.Duration `mapstructure:"handshake-timeout"` DialTimeout time.Duration `mapstructure:"dial-timeout"` @@ -661,13 +673,40 @@ func DefaultP2PConfig() *P2PConfig { UPNP: false, MaxConnections: 64, MaxIncomingConnectionAttempts: 100, - PexReactor: true, - AllowDuplicateIP: false, - HandshakeTimeout: 20 * time.Second, - DialTimeout: 3 * time.Second, - TestDialFail: false, - QueueType: "priority", + FlushThrottleTimeout: 100 * time.Millisecond, + // The MTU (Maximum Transmission Unit) for Ethernet is 1500 bytes. + // The IP header and the TCP header take up 20 bytes each at least (unless + // optional header fields are used) and thus the max for (non-Jumbo frame) + // Ethernet is 1500 - 20 -20 = 1460 + // Source: https://stackoverflow.com/a/3074427/820520 + MaxPacketMsgPayloadSize: 1400, + SendRate: 5120000, // 5 mB/s + RecvRate: 5120000, // 5 mB/s + PexReactor: true, + AllowDuplicateIP: false, + HandshakeTimeout: 20 * time.Second, + DialTimeout: 3 * time.Second, + TestDialFail: false, + QueueType: "priority", + } +} + +// ValidateBasic performs basic validation (checking param bounds, etc.) and +// returns an error if any check fails. +func (cfg *P2PConfig) ValidateBasic() error { + if cfg.FlushThrottleTimeout < 0 { + return errors.New("flush-throttle-timeout can't be negative") + } + if cfg.MaxPacketMsgPayloadSize < 0 { + return errors.New("max-packet-msg-payload-size can't be negative") + } + if cfg.SendRate < 0 { + return errors.New("send-rate can't be negative") } + if cfg.RecvRate < 0 { + return errors.New("recv-rate can't be negative") + } + return nil } // TestP2PConfig returns a configuration for testing the peer-to-peer layer @@ -675,6 +714,8 @@ func TestP2PConfig() *P2PConfig { cfg := DefaultP2PConfig() cfg.ListenAddress = "tcp://127.0.0.1:36656" cfg.AllowDuplicateIP = true + cfg.FlushThrottleTimeout = 10 * time.Millisecond + return cfg } diff --git a/config/config_test.go b/config/config_test.go index 1813144924..08a77d0328 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -159,3 +159,21 @@ func TestInstrumentationConfigValidateBasic(t *testing.T) { cfg.MaxOpenConnections = -1 assert.Error(t, cfg.ValidateBasic()) } + +func TestP2PConfigValidateBasic(t *testing.T) { + cfg := TestP2PConfig() + assert.NoError(t, cfg.ValidateBasic()) + + fieldsToTest := []string{ + "FlushThrottleTimeout", + "MaxPacketMsgPayloadSize", + "SendRate", + "RecvRate", + } + + for _, fieldName := range fieldsToTest { + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1) + assert.Error(t, cfg.ValidateBasic()) + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0) + } +} diff --git a/config/toml.go b/config/toml.go index 3be385060c..9c7d012f0b 100644 --- a/config/toml.go +++ b/config/toml.go @@ -300,6 +300,23 @@ allow-duplicate-ip = {{ .P2P.AllowDuplicateIP }} handshake-timeout = "{{ .P2P.HandshakeTimeout }}" dial-timeout = "{{ .P2P.DialTimeout }}" +# Time to wait before flushing messages out on the connection +# TODO: Remove once MConnConnection is removed. +flush-throttle-timeout = "{{ .P2P.FlushThrottleTimeout }}" + +# Maximum size of a message packet payload, in bytes +# TODO: Remove once MConnConnection is removed. +max-packet-msg-payload-size = {{ .P2P.MaxPacketMsgPayloadSize }} + +# Rate at which packets can be sent, in bytes/second +# TODO: Remove once MConnConnection is removed. +send-rate = {{ .P2P.SendRate }} + +# Rate at which packets can be received, in bytes/second +# TODO: Remove once MConnConnection is removed. +recv-rate = {{ .P2P.RecvRate }} + + ####################################################### ### Mempool Configuration Option ### ####################################################### diff --git a/docs/tendermint-core/README.md b/docs/tendermint-core/README.md index f83349db28..c4c54a514f 100644 --- a/docs/tendermint-core/README.md +++ b/docs/tendermint-core/README.md @@ -5,8 +5,6 @@ parent: order: 5 --- -# Overview - This section dives into the internals of Go-Tendermint. - [Using Tendermint](./using-tendermint.md) @@ -17,5 +15,8 @@ This section dives into the internals of Go-Tendermint. - [State Sync](./state-sync.md) - [Mempool](./mempool.md) - [Light Client](./light-client.md) +- [Consensus](./consensus/README.md) +- [Pex](./pex/README.md) +- [Evidence](./evidence/README.md) -For full specifications refer to the [spec repo](https://github.com/tendermint/spec). \ No newline at end of file +For full specifications refer to the [spec repo](https://github.com/tendermint/spec). diff --git a/docs/tendermint-core/block-sync.md b/docs/tendermint-core/block-sync/README.md similarity index 93% rename from docs/tendermint-core/block-sync.md rename to docs/tendermint-core/block-sync/README.md index 43e849fcc0..3ffb0953d2 100644 --- a/docs/tendermint-core/block-sync.md +++ b/docs/tendermint-core/block-sync/README.md @@ -1,7 +1,11 @@ --- -order: 10 +order: 1 +parent: + title: Block Sync + order: 6 --- + # Block Sync *Formerly known as Fast Sync* @@ -61,3 +65,7 @@ another event for exposing the fast-sync `complete` status and the state `height The user can query the events by subscribing `EventQueryBlockSyncStatus` Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details. + +## Implementation + +To read more on the implamentation please see the [reactor doc](./reactor.md) and the [implementation doc](./implementation.md) diff --git a/docs/tendermint-core/block-sync/img/bc-reactor-routines.png b/docs/tendermint-core/block-sync/img/bc-reactor-routines.png new file mode 100644 index 0000000000..3f574a79b1 Binary files /dev/null and b/docs/tendermint-core/block-sync/img/bc-reactor-routines.png differ diff --git a/docs/tendermint-core/block-sync/img/bc-reactor.png b/docs/tendermint-core/block-sync/img/bc-reactor.png new file mode 100644 index 0000000000..f7fe0f8193 Binary files /dev/null and b/docs/tendermint-core/block-sync/img/bc-reactor.png differ diff --git a/docs/tendermint-core/block-sync/implementation.md b/docs/tendermint-core/block-sync/implementation.md new file mode 100644 index 0000000000..59274782cd --- /dev/null +++ b/docs/tendermint-core/block-sync/implementation.md @@ -0,0 +1,47 @@ +--- +order: 3 +--- + +# Implementation + +## Blocksync Reactor + +- coordinates the pool for syncing +- coordinates the store for persistence +- coordinates the playing of blocks towards the app using a sm.BlockExecutor +- handles switching between fastsync and consensus +- it is a p2p.BaseReactor +- starts the pool.Start() and its poolRoutine() +- registers all the concrete types and interfaces for serialisation + +### poolRoutine + +- listens to these channels: + - pool requests blocks from a specific peer by posting to requestsCh, block reactor then sends + a &bcBlockRequestMessage for a specific height + - pool signals timeout of a specific peer by posting to timeoutsCh + - switchToConsensusTicker to periodically try and switch to consensus + - trySyncTicker to periodically check if we have fallen behind and then catch-up sync + - if there aren't any new blocks available on the pool it skips syncing +- tries to sync the app by taking downloaded blocks from the pool, gives them to the app and stores + them on disk +- implements Receive which is called by the switch/peer + - calls AddBlock on the pool when it receives a new block from a peer + +## Block Pool + +- responsible for downloading blocks from peers +- makeRequestersRoutine() + - removes timeout peers + - starts new requesters by calling makeNextRequester() +- requestRoutine(): + - picks a peer and sends the request, then blocks until: + - pool is stopped by listening to pool.Quit + - requester is stopped by listening to Quit + - request is redone + - we receive a block + - gotBlockCh is strange + +## Go Routines in Blocksync Reactor + +![Go Routines Diagram](img/bc-reactor-routines.png) diff --git a/docs/tendermint-core/block-sync/reactor.md b/docs/tendermint-core/block-sync/reactor.md new file mode 100644 index 0000000000..3e28753403 --- /dev/null +++ b/docs/tendermint-core/block-sync/reactor.md @@ -0,0 +1,278 @@ +--- +order: 2 +--- +# Reactor + +The Blocksync Reactor's high level responsibility is to enable peers who are +far behind the current state of the consensus to quickly catch up by downloading +many blocks in parallel, verifying their commits, and executing them against the +ABCI application. + +Tendermint full nodes run the Blocksync Reactor as a service to provide blocks +to new nodes. New nodes run the Blocksync Reactor in "fast_sync" mode, +where they actively make requests for more blocks until they sync up. +Once caught up, "fast_sync" mode is disabled and the node switches to +using (and turns on) the Consensus Reactor. + +## Architecture and algorithm + +The Blocksync reactor is organised as a set of concurrent tasks: + +- Receive routine of Blocksync Reactor +- Task for creating Requesters +- Set of Requesters tasks and - Controller task. + +![Blocksync Reactor Architecture Diagram](img/bc-reactor.png) + +### Data structures + +These are the core data structures necessarily to provide the Blocksync Reactor logic. + +Requester data structure is used to track assignment of request for `block` at position `height` to a peer with id equals to `peerID`. + +```go +type Requester { + mtx Mutex + block Block + height int64 + peerID p2p.ID + redoChannel chan p2p.ID //redo may send multi-time; peerId is used to identify repeat +} +``` + +Pool is a core data structure that stores last executed block (`height`), assignment of requests to peers (`requesters`), current height for each peer and number of pending requests for each peer (`peers`), maximum peer height, etc. + +```go +type Pool { + mtx Mutex + requesters map[int64]*Requester + height int64 + peers map[p2p.ID]*Peer + maxPeerHeight int64 + numPending int32 + store BlockStore + requestsChannel chan<- BlockRequest + errorsChannel chan<- peerError +} +``` + +Peer data structure stores for each peer current `height` and number of pending requests sent to the peer (`numPending`), etc. + +```go +type Peer struct { + id p2p.ID + height int64 + numPending int32 + timeout *time.Timer + didTimeout bool +} +``` + +BlockRequest is internal data structure used to denote current mapping of request for a block at some `height` to a peer (`PeerID`). + +```go +type BlockRequest { + Height int64 + PeerID p2p.ID +} +``` + +### Receive routine of Blocksync Reactor + +It is executed upon message reception on the BlocksyncChannel inside p2p receive routine. There is a separate p2p receive routine (and therefore receive routine of the Blocksync Reactor) executed for each peer. Note that try to send will not block (returns immediately) if outgoing buffer is full. + +```go +handleMsg(pool, m): + upon receiving bcBlockRequestMessage m from peer p: + block = load block for height m.Height from pool.store + if block != nil then + try to send BlockResponseMessage(block) to p + else + try to send bcNoBlockResponseMessage(m.Height) to p + + upon receiving bcBlockResponseMessage m from peer p: + pool.mtx.Lock() + requester = pool.requesters[m.Height] + if requester == nil then + error("peer sent us a block we didn't expect") + continue + + if requester.block == nil and requester.peerID == p then + requester.block = m + pool.numPending -= 1 // atomic decrement + peer = pool.peers[p] + if peer != nil then + peer.numPending-- + if peer.numPending == 0 then + peer.timeout.Stop() + // NOTE: we don't send Quit signal to the corresponding requester task! + else + trigger peer timeout to expire after peerTimeout + pool.mtx.Unlock() + + + upon receiving bcStatusRequestMessage m from peer p: + try to send bcStatusResponseMessage(pool.store.Height) + + upon receiving bcStatusResponseMessage m from peer p: + pool.mtx.Lock() + peer = pool.peers[p] + if peer != nil then + peer.height = m.height + else + peer = create new Peer data structure with id = p and height = m.Height + pool.peers[p] = peer + + if m.Height > pool.maxPeerHeight then + pool.maxPeerHeight = m.Height + pool.mtx.Unlock() + +onTimeout(p): + send error message to pool error channel + peer = pool.peers[p] + peer.didTimeout = true +``` + +### Requester tasks + +Requester task is responsible for fetching a single block at position `height`. + +```go +fetchBlock(height, pool): + while true do { + peerID = nil + block = nil + peer = pickAvailablePeer(height) + peerID = peer.id + + enqueue BlockRequest(height, peerID) to pool.requestsChannel + redo = false + while !redo do + select { + upon receiving Quit message do + return + upon receiving redo message with id on redoChannel do + if peerID == id { + mtx.Lock() + pool.numPending++ + redo = true + mtx.UnLock() + } + } + } + +pickAvailablePeer(height): + selectedPeer = nil + while selectedPeer = nil do + pool.mtx.Lock() + for each peer in pool.peers do + if !peer.didTimeout and peer.numPending < maxPendingRequestsPerPeer and peer.height >= height then + peer.numPending++ + selectedPeer = peer + break + pool.mtx.Unlock() + + if selectedPeer = nil then + sleep requestIntervalMS + + return selectedPeer +``` + +sleep for requestIntervalMS + +### Task for creating Requesters + +This task is responsible for continuously creating and starting Requester tasks. + +```go +createRequesters(pool): + while true do + if !pool.isRunning then break + if pool.numPending < maxPendingRequests or size(pool.requesters) < maxTotalRequesters then + pool.mtx.Lock() + nextHeight = pool.height + size(pool.requesters) + requester = create new requester for height nextHeight + pool.requesters[nextHeight] = requester + pool.numPending += 1 // atomic increment + start requester task + pool.mtx.Unlock() + else + sleep requestIntervalMS + pool.mtx.Lock() + for each peer in pool.peers do + if !peer.didTimeout && peer.numPending > 0 && peer.curRate < minRecvRate then + send error on pool error channel + peer.didTimeout = true + if peer.didTimeout then + for each requester in pool.requesters do + if requester.getPeerID() == peer then + enqueue msg on requestor's redoChannel + delete(pool.peers, peerID) + pool.mtx.Unlock() +``` + +### Main blocksync reactor controller task + +```go +main(pool): + create trySyncTicker with interval trySyncIntervalMS + create statusUpdateTicker with interval statusUpdateIntervalSeconds + create switchToConsensusTicker with interval switchToConsensusIntervalSeconds + + while true do + select { + upon receiving BlockRequest(Height, Peer) on pool.requestsChannel: + try to send bcBlockRequestMessage(Height) to Peer + + upon receiving error(peer) on errorsChannel: + stop peer for error + + upon receiving message on statusUpdateTickerChannel: + broadcast bcStatusRequestMessage(bcR.store.Height) // message sent in a separate routine + + upon receiving message on switchToConsensusTickerChannel: + pool.mtx.Lock() + receivedBlockOrTimedOut = pool.height > 0 || (time.Now() - pool.startTime) > 5 Seconds + ourChainIsLongestAmongPeers = pool.maxPeerHeight == 0 || pool.height >= pool.maxPeerHeight + haveSomePeers = size of pool.peers > 0 + pool.mtx.Unlock() + if haveSomePeers && receivedBlockOrTimedOut && ourChainIsLongestAmongPeers then + switch to consensus mode + + upon receiving message on trySyncTickerChannel: + for i = 0; i < 10; i++ do + pool.mtx.Lock() + firstBlock = pool.requesters[pool.height].block + secondBlock = pool.requesters[pool.height].block + if firstBlock == nil or secondBlock == nil then continue + pool.mtx.Unlock() + verify firstBlock using LastCommit from secondBlock + if verification failed + pool.mtx.Lock() + peerID = pool.requesters[pool.height].peerID + redoRequestsForPeer(peerId) + delete(pool.peers, peerID) + stop peer peerID for error + pool.mtx.Unlock() + else + delete(pool.requesters, pool.height) + save firstBlock to store + pool.height++ + execute firstBlock + } + +redoRequestsForPeer(pool, peerId): + for each requester in pool.requesters do + if requester.getPeerID() == peerID + enqueue msg on redoChannel for requester +``` + +## Channels + +Defines `maxMsgSize` for the maximum size of incoming messages, +`SendQueueCapacity` and `RecvBufferCapacity` for maximum sending and +receiving buffers respectively. These are supposed to prevent amplification +attacks by setting up the upper limit on how much data we can receive & send to +a peer. + +Sending incorrectly encoded data will result in stopping the peer. diff --git a/docs/tendermint-core/consensus/README.md b/docs/tendermint-core/consensus/README.md new file mode 100644 index 0000000000..78e63ee220 --- /dev/null +++ b/docs/tendermint-core/consensus/README.md @@ -0,0 +1,41 @@ +--- +order: 1 +parent: + title: Consensus + order: 6 +--- + + +Tendermint Consensus is a distributed protocol executed by validator processes to agree on +the next block to be added to the Tendermint blockchain. The protocol proceeds in rounds, where +each round is a try to reach agreement on the next block. A round starts by having a dedicated +process (called proposer) suggesting to other processes what should be the next block with +the `ProposalMessage`. +The processes respond by voting for a block with `VoteMessage` (there are two kinds of vote +messages, prevote and precommit votes). Note that a proposal message is just a suggestion what the +next block should be; a validator might vote with a `VoteMessage` for a different block. If in some +round, enough number of processes vote for the same block, then this block is committed and later +added to the blockchain. `ProposalMessage` and `VoteMessage` are signed by the private key of the +validator. The internals of the protocol and how it ensures safety and liveness properties are +explained in a forthcoming document. + +For efficiency reasons, validators in Tendermint consensus protocol do not agree directly on the +block as the block size is big, i.e., they don't embed the block inside `Proposal` and +`VoteMessage`. Instead, they reach agreement on the `BlockID` (see `BlockID` definition in +[Blockchain](https://github.com/tendermint/spec/blob/master/spec/core/data_structures.md#blockid) section) +that uniquely identifies each block. The block itself is +disseminated to validator processes using peer-to-peer gossiping protocol. It starts by having a +proposer first splitting a block into a number of block parts, that are then gossiped between +processes using `BlockPartMessage`. + +Validators in Tendermint communicate by peer-to-peer gossiping protocol. Each validator is connected +only to a subset of processes called peers. By the gossiping protocol, a validator send to its peers +all needed information (`ProposalMessage`, `VoteMessage` and `BlockPartMessage`) so they can +reach agreement on some block, and also obtain the content of the chosen block (block parts). As +part of the gossiping protocol, processes also send auxiliary messages that inform peers about the +executed steps of the core consensus algorithm (`NewRoundStepMessage` and `NewValidBlockMessage`), and +also messages that inform peers what votes the process has seen (`HasVoteMessage`, +`VoteSetMaj23Message` and `VoteSetBitsMessage`). These messages are then used in the gossiping +protocol to determine what messages a process should send to its peers. + +We now describe the content of each message exchanged during Tendermint consensus protocol. diff --git a/docs/tendermint-core/consensus/reactor.md b/docs/tendermint-core/consensus/reactor.md new file mode 100644 index 0000000000..ee43846ece --- /dev/null +++ b/docs/tendermint-core/consensus/reactor.md @@ -0,0 +1,370 @@ +--- +order: 2 +--- + +# Reactor + +Consensus Reactor defines a reactor for the consensus service. It contains the ConsensusState service that +manages the state of the Tendermint consensus internal state machine. +When Consensus Reactor is started, it starts Broadcast Routine which starts ConsensusState service. +Furthermore, for each peer that is added to the Consensus Reactor, it creates (and manages) the known peer state +(that is used extensively in gossip routines) and starts the following three routines for the peer p: +Gossip Data Routine, Gossip Votes Routine and QueryMaj23Routine. Finally, Consensus Reactor is responsible +for decoding messages received from a peer and for adequate processing of the message depending on its type and content. +The processing normally consists of updating the known peer state and for some messages +(`ProposalMessage`, `BlockPartMessage` and `VoteMessage`) also forwarding message to ConsensusState module +for further processing. In the following text we specify the core functionality of those separate unit of executions +that are part of the Consensus Reactor. + +## ConsensusState service + +Consensus State handles execution of the Tendermint BFT consensus algorithm. It processes votes and proposals, +and upon reaching agreement, commits blocks to the chain and executes them against the application. +The internal state machine receives input from peers, the internal validator and from a timer. + +Inside Consensus State we have the following units of execution: Timeout Ticker and Receive Routine. +Timeout Ticker is a timer that schedules timeouts conditional on the height/round/step that are processed +by the Receive Routine. + +### Receive Routine of the ConsensusState service + +Receive Routine of the ConsensusState handles messages which may cause internal consensus state transitions. +It is the only routine that updates RoundState that contains internal consensus state. +Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities. +It receives messages from peers, internal validators and from Timeout Ticker +and invokes the corresponding handlers, potentially updating the RoundState. +The details of the protocol (together with formal proofs of correctness) implemented by the Receive Routine are +discussed in separate document. For understanding of this document +it is sufficient to understand that the Receive Routine manages and updates RoundState data structure that is +then extensively used by the gossip routines to determine what information should be sent to peer processes. + +## Round State + +RoundState defines the internal consensus state. It contains height, round, round step, a current validator set, +a proposal and proposal block for the current round, locked round and block (if some block is being locked), set of +received votes and last commit and last validators set. + +```go +type RoundState struct { + Height int64 + Round int + Step RoundStepType + Validators ValidatorSet + Proposal Proposal + ProposalBlock Block + ProposalBlockParts PartSet + LockedRound int + LockedBlock Block + LockedBlockParts PartSet + Votes HeightVoteSet + LastCommit VoteSet + LastValidators ValidatorSet +} +``` + +Internally, consensus will run as a state machine with the following states: + +- RoundStepNewHeight +- RoundStepNewRound +- RoundStepPropose +- RoundStepProposeWait +- RoundStepPrevote +- RoundStepPrevoteWait +- RoundStepPrecommit +- RoundStepPrecommitWait +- RoundStepCommit + +## Peer Round State + +Peer round state contains the known state of a peer. It is being updated by the Receive routine of +Consensus Reactor and by the gossip routines upon sending a message to the peer. + +```golang +type PeerRoundState struct { + Height int64 // Height peer is at + Round int // Round peer is at, -1 if unknown. + Step RoundStepType // Step peer is at + Proposal bool // True if peer has proposal for this round + ProposalBlockPartsHeader PartSetHeader + ProposalBlockParts BitArray + ProposalPOLRound int // Proposal's POL round. -1 if none. + ProposalPOL BitArray // nil until ProposalPOLMessage received. + Prevotes BitArray // All votes peer has for this round + Precommits BitArray // All precommits peer has for this round + LastCommitRound int // Round of commit for last height. -1 if none. + LastCommit BitArray // All commit precommits of commit for last height. + CatchupCommitRound int // Round that we have commit for. Not necessarily unique. -1 if none. + CatchupCommit BitArray // All commit precommits peer has for this height & CatchupCommitRound +} +``` + +## Receive method of Consensus reactor + +The entry point of the Consensus reactor is a receive method. When a message is +received from a peer p, normally the peer round state is updated +correspondingly, and some messages are passed for further processing, for +example to ConsensusState service. We now specify the processing of messages in +the receive method of Consensus reactor for each message type. In the following +message handler, `rs` and `prs` denote `RoundState` and `PeerRoundState`, +respectively. + +### NewRoundStepMessage handler + +```go +handleMessage(msg): + if msg is from smaller height/round/step then return + // Just remember these values. + prsHeight = prs.Height + prsRound = prs.Round + prsCatchupCommitRound = prs.CatchupCommitRound + prsCatchupCommit = prs.CatchupCommit + + Update prs with values from msg + if prs.Height or prs.Round has been updated then + reset Proposal related fields of the peer state + if prs.Round has been updated and msg.Round == prsCatchupCommitRound then + prs.Precommits = psCatchupCommit + if prs.Height has been updated then + if prsHeight+1 == msg.Height && prsRound == msg.LastCommitRound then + prs.LastCommitRound = msg.LastCommitRound + prs.LastCommit = prs.Precommits + } else { + prs.LastCommitRound = msg.LastCommitRound + prs.LastCommit = nil + } + Reset prs.CatchupCommitRound and prs.CatchupCommit +``` + +### NewValidBlockMessage handler + +```go +handleMessage(msg): + if prs.Height != msg.Height then return + + if prs.Round != msg.Round && !msg.IsCommit then return + + prs.ProposalBlockPartsHeader = msg.BlockPartsHeader + prs.ProposalBlockParts = msg.BlockParts +``` + +The number of block parts is limited to 1601 (`types.MaxBlockPartsCount`) to +protect the node against DOS attacks. + +### HasVoteMessage handler + +```go +handleMessage(msg): + if prs.Height == msg.Height then + prs.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index) +``` + +### VoteSetMaj23Message handler + +```go +handleMessage(msg): + if prs.Height == msg.Height then + Record in rs that a peer claim to have ⅔ majority for msg.BlockID + Send VoteSetBitsMessage showing votes node has for that BlockId +``` + +### ProposalMessage handler + +```go +handleMessage(msg): + if prs.Height != msg.Height || prs.Round != msg.Round || prs.Proposal then return + prs.Proposal = true + if prs.ProposalBlockParts == empty set then // otherwise it is set in NewValidBlockMessage handler + prs.ProposalBlockPartsHeader = msg.BlockPartsHeader + prs.ProposalPOLRound = msg.POLRound + prs.ProposalPOL = nil + Send msg through internal peerMsgQueue to ConsensusState service +``` + +### ProposalPOLMessage handler + +```go +handleMessage(msg): + if prs.Height != msg.Height or prs.ProposalPOLRound != msg.ProposalPOLRound then return + prs.ProposalPOL = msg.ProposalPOL +``` + +The number of votes is limited to 10000 (`types.MaxVotesCount`) to protect the +node against DOS attacks. + +### BlockPartMessage handler + +```go +handleMessage(msg): + if prs.Height != msg.Height || prs.Round != msg.Round then return + Record in prs that peer has block part msg.Part.Index + Send msg trough internal peerMsgQueue to ConsensusState service +``` + +### VoteMessage handler + +```go +handleMessage(msg): + Record in prs that a peer knows vote with index msg.vote.ValidatorIndex for particular height and round + Send msg trough internal peerMsgQueue to ConsensusState service +``` + +### VoteSetBitsMessage handler + +```go +handleMessage(msg): + Update prs for the bit-array of votes peer claims to have for the msg.BlockID +``` + +The number of votes is limited to 10000 (`types.MaxVotesCount`) to protect the +node against DOS attacks. + +## Gossip Data Routine + +It is used to send the following messages to the peer: `BlockPartMessage`, `ProposalMessage` and +`ProposalPOLMessage` on the DataChannel. The gossip data routine is based on the local RoundState (`rs`) +and the known PeerRoundState (`prs`). The routine repeats forever the logic shown below: + +```go +1a) if rs.ProposalBlockPartsHeader == prs.ProposalBlockPartsHeader and the peer does not have all the proposal parts then + Part = pick a random proposal block part the peer does not have + Send BlockPartMessage(rs.Height, rs.Round, Part) to the peer on the DataChannel + if send returns true, record that the peer knows the corresponding block Part + Continue + +1b) if (0 < prs.Height) and (prs.Height < rs.Height) then + help peer catch up using gossipDataForCatchup function + Continue + +1c) if (rs.Height != prs.Height) or (rs.Round != prs.Round) then + Sleep PeerGossipSleepDuration + Continue + +// at this point rs.Height == prs.Height and rs.Round == prs.Round +1d) if (rs.Proposal != nil and !prs.Proposal) then + Send ProposalMessage(rs.Proposal) to the peer + if send returns true, record that the peer knows Proposal + if 0 <= rs.Proposal.POLRound then + polRound = rs.Proposal.POLRound + prevotesBitArray = rs.Votes.Prevotes(polRound).BitArray() + Send ProposalPOLMessage(rs.Height, polRound, prevotesBitArray) + Continue + +2) Sleep PeerGossipSleepDuration +``` + +### Gossip Data For Catchup + +This function is responsible for helping peer catch up if it is at the smaller height (prs.Height < rs.Height). +The function executes the following logic: + +```go + if peer does not have all block parts for prs.ProposalBlockPart then + blockMeta = Load Block Metadata for height prs.Height from blockStore + if (!blockMeta.BlockID.PartsHeader == prs.ProposalBlockPartsHeader) then + Sleep PeerGossipSleepDuration + return + Part = pick a random proposal block part the peer does not have + Send BlockPartMessage(prs.Height, prs.Round, Part) to the peer on the DataChannel + if send returns true, record that the peer knows the corresponding block Part + return + else Sleep PeerGossipSleepDuration +``` + +## Gossip Votes Routine + +It is used to send the following message: `VoteMessage` on the VoteChannel. +The gossip votes routine is based on the local RoundState (`rs`) +and the known PeerRoundState (`prs`). The routine repeats forever the logic shown below: + +```go +1a) if rs.Height == prs.Height then + if prs.Step == RoundStepNewHeight then + vote = random vote from rs.LastCommit the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + + if prs.Step <= RoundStepPrevote and prs.Round != -1 and prs.Round <= rs.Round then + Prevotes = rs.Votes.Prevotes(prs.Round) + vote = random vote from Prevotes the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + + if prs.Step <= RoundStepPrecommit and prs.Round != -1 and prs.Round <= rs.Round then + Precommits = rs.Votes.Precommits(prs.Round) + vote = random vote from Precommits the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + + if prs.ProposalPOLRound != -1 then + PolPrevotes = rs.Votes.Prevotes(prs.ProposalPOLRound) + vote = random vote from PolPrevotes the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + +1b) if prs.Height != 0 and rs.Height == prs.Height+1 then + vote = random vote from rs.LastCommit peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + +1c) if prs.Height != 0 and rs.Height >= prs.Height+2 then + Commit = get commit from BlockStore for prs.Height + vote = random vote from Commit the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + +2) Sleep PeerGossipSleepDuration +``` + +## QueryMaj23Routine + +It is used to send the following message: `VoteSetMaj23Message`. `VoteSetMaj23Message` is sent to indicate that a given +BlockID has seen +2/3 votes. This routine is based on the local RoundState (`rs`) and the known PeerRoundState +(`prs`). The routine repeats forever the logic shown below. + +```go +1a) if rs.Height == prs.Height then + Prevotes = rs.Votes.Prevotes(prs.Round) + if there is a ⅔ majority for some blockId in Prevotes then + m = VoteSetMaj23Message(prs.Height, prs.Round, Prevote, blockId) + Send m to peer + Sleep PeerQueryMaj23SleepDuration + +1b) if rs.Height == prs.Height then + Precommits = rs.Votes.Precommits(prs.Round) + if there is a ⅔ majority for some blockId in Precommits then + m = VoteSetMaj23Message(prs.Height,prs.Round,Precommit,blockId) + Send m to peer + Sleep PeerQueryMaj23SleepDuration + +1c) if rs.Height == prs.Height and prs.ProposalPOLRound >= 0 then + Prevotes = rs.Votes.Prevotes(prs.ProposalPOLRound) + if there is a ⅔ majority for some blockId in Prevotes then + m = VoteSetMaj23Message(prs.Height,prs.ProposalPOLRound,Prevotes,blockId) + Send m to peer + Sleep PeerQueryMaj23SleepDuration + +1d) if prs.CatchupCommitRound != -1 and 0 < prs.Height and + prs.Height <= blockStore.Height() then + Commit = LoadCommit(prs.Height) + m = VoteSetMaj23Message(prs.Height,Commit.Round,Precommit,Commit.BlockID) + Send m to peer + Sleep PeerQueryMaj23SleepDuration + +2) Sleep PeerQueryMaj23SleepDuration +``` + +## Broadcast routine + +The Broadcast routine subscribes to an internal event bus to receive new round steps and votes messages, and broadcasts messages to peers upon receiving those +events. +It broadcasts `NewRoundStepMessage` or `CommitStepMessage` upon new round state event. Note that +broadcasting these messages does not depend on the PeerRoundState; it is sent on the StateChannel. +Upon receiving VoteMessage it broadcasts `HasVoteMessage` message to its peers on the StateChannel. + +## Channels + +Defines 4 channels: state, data, vote and vote_set_bits. Each channel +has `SendQueueCapacity` and `RecvBufferCapacity` and +`RecvMessageCapacity` set to `maxMsgSize`. + +Sending incorrectly encoded data will result in stopping the peer. diff --git a/docs/tendermint-core/evidence/README.md b/docs/tendermint-core/evidence/README.md new file mode 100644 index 0000000000..2070c48c03 --- /dev/null +++ b/docs/tendermint-core/evidence/README.md @@ -0,0 +1,13 @@ +--- +order: 1 +parent: + title: Evidence + order: 3 +--- + +Evidence is used to identify validators who have or are acting malicious. There are multiple types of evidence, to read more on the evidence types please see [Evidence Types](https://docs.tendermint.com/master/spec/core/data_structures.html#evidence). + +The evidence reactor works similar to the mempool reactor. When evidence is observed, it is sent to all the peers in a repetitive manner. This ensures evidence is sent to as many people as possible to avoid sensoring. After evidence is received by peers and committed in a block it is pruned from the evidence module. + +Sending incorrectly encoded data or data exceeding `maxMsgSize` will result +in stopping the peer. diff --git a/docs/tendermint-core/mempool.md b/docs/tendermint-core/mempool.md deleted file mode 100644 index 8dd9687819..0000000000 --- a/docs/tendermint-core/mempool.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -order: 12 ---- - -# Mempool - -## Transaction ordering - -Currently, there's no ordering of transactions other than the order they've -arrived (via RPC or from other nodes). - -So the only way to specify the order is to send them to a single node. - -valA: - -- `tx1` -- `tx2` -- `tx3` - -If the transactions are split up across different nodes, there's no way to -ensure they are processed in the expected order. - -valA: - -- `tx1` -- `tx2` - -valB: - -- `tx3` - -If valB is the proposer, the order might be: - -- `tx3` -- `tx1` -- `tx2` - -If valA is the proposer, the order might be: - -- `tx1` -- `tx2` -- `tx3` - -That said, if the transactions contain some internal value, like an -order/nonce/sequence number, the application can reject transactions that are -out of order. So if a node receives `tx3`, then `tx1`, it can reject `tx3` and then -accept `tx1`. The sender can then retry sending `tx3`, which should probably be -rejected until the node has seen `tx2`. diff --git a/docs/tendermint-core/mempool/README.md b/docs/tendermint-core/mempool/README.md new file mode 100644 index 0000000000..1821cf8492 --- /dev/null +++ b/docs/tendermint-core/mempool/README.md @@ -0,0 +1,71 @@ +--- +order: 1 +parent: + title: Mempool + order: 2 +--- + +The mempool is a in memory pool of potentially valid transactions, +both to broadcast to other nodes, as well as to provide to the +consensus reactor when it is selected as the block proposer. + +There are two sides to the mempool state: + +- External: get, check, and broadcast new transactions +- Internal: return valid transaction, update list after block commit + +## External functionality + +External functionality is exposed via network interfaces +to potentially untrusted actors. + +- CheckTx - triggered via RPC or P2P +- Broadcast - gossip messages after a successful check + +## Internal functionality + +Internal functionality is exposed via method calls to other +code compiled into the tendermint binary. + +- ReapMaxBytesMaxGas - get txs to propose in the next block. Guarantees that the + size of the txs is less than MaxBytes, and gas is less than MaxGas +- Update - remove tx that were included in last block +- ABCI.CheckTx - call ABCI app to validate the tx + +What does it provide the consensus reactor? +What guarantees does it need from the ABCI app? +(talk about interleaving processes in concurrency) + +## Optimizations + +The implementation within this library also implements a tx cache. +This is so that signatures don't have to be reverified if the tx has +already been seen before. +However, we only store valid txs in the cache, not invalid ones. +This is because invalid txs could become good later. +Txs that are included in a block aren't removed from the cache, +as they still may be getting received over the p2p network. +These txs are stored in the cache by their hash, to mitigate memory concerns. + +Applications should implement replay protection, read [Replay +Protection](https://github.com/tendermint/tendermint/blob/8cdaa7f515a9d366bbc9f0aff2a263a1a6392ead/docs/app-dev/app-development.md#replay-protection) for more information. + +## Configuration + +The mempool has various configurable paramet + +Sending incorrectly encoded data or data exceeding `maxMsgSize` will result +in stopping the peer. + +`maxMsgSize` equals `MaxBatchBytes` (10MB) + 4 (proto overhead). +`MaxBatchBytes` is a mempool config parameter -> defined locally. The reactor +sends transactions to the connected peers in batches. The maximum size of one +batch is `MaxBatchBytes`. + +The mempool will not send a tx back to any peer which it received it from. + +The reactor assigns an `uint16` number for each peer and maintains a map from +p2p.ID to `uint16`. Each mempool transaction carries a list of all the senders +(`[]uint16`). The list is updated every time mempool receives a transaction it +is already seen. `uint16` assumes that a node will never have over 65535 active +peers (0 is reserved for unknown source - e.g. RPC). diff --git a/docs/tendermint-core/mempool/config.md b/docs/tendermint-core/mempool/config.md new file mode 100644 index 0000000000..4e8a9ec73d --- /dev/null +++ b/docs/tendermint-core/mempool/config.md @@ -0,0 +1,105 @@ +--- +order: 2 +--- + +# Configuration + +Here we describe configuration options around mempool. +For the purposes of this document, they are described +in a toml file, but some of them can also be passed in as +environmental variables. + +Config: + +```toml +[mempool] + +recheck = true +broadcast = true +wal-dir = "" + +# Maximum number of transactions in the mempool +size = 5000 + +# Limit the total size of all txs in the mempool. +# This only accounts for raw transactions (e.g. given 1MB transactions and +# max-txs-bytes=5MB, mempool will only accept 5 transactions). +max-txs-bytes = 1073741824 + +# Size of the cache (used to filter transactions we saw earlier) in transactions +cache-size = 10000 + +# Do not remove invalid transactions from the cache (default: false) +# Set to true if it's not possible for any invalid transaction to become valid +# again in the future. +keep-invalid-txs-in-cache = false + +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max-tx-bytes}. +max-tx-bytes = 1048576 + +# Maximum size of a batch of transactions to send to a peer +# Including space needed by encoding (one varint per transaction). +# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 +max-batch-bytes = 0 +``` + + + +## Recheck + +Recheck determines if the mempool rechecks all pending +transactions after a block was committed. Once a block +is committed, the mempool removes all valid transactions +that were successfully included in the block. + +If `recheck` is true, then it will rerun CheckTx on +all remaining transactions with the new block state. + +## Broadcast + +Determines whether this node gossips any valid transactions +that arrive in mempool. Default is to gossip anything that +passes checktx. If this is disabled, transactions are not +gossiped, but instead stored locally and added to the next +block this node is the proposer. + +## WalDir + +This defines the directory where mempool writes the write-ahead +logs. These files can be used to reload unbroadcasted +transactions if the node crashes. + +If the directory passed in is an absolute path, the wal file is +created there. If the directory is a relative path, the path is +appended to home directory of the tendermint process to +generate an absolute path to the wal directory +(default `$HOME/.tendermint` or set via `TM_HOME` or `--home`) + +## Size + +Size defines the total amount of transactions stored in the mempool. Default is `5_000` but can be adjusted to any number you would like. The higher the size the more strain on the node. + +## Max Transactions Bytes + +Max transactions bytes defines the total size of all the transactions in the mempool. Default is 1 GB. + +## Cache size + +Cache size determines the size of the cache holding transactions we have already seen. The cache exists to avoid running `checktx` each time we receive a transaction. + +## Keep Invalid Transactions In Cache + +Keep invalid transactions in cache determines wether a transaction in the cache, which is invalid, should be evicted. An invalid transaction here may mean that the transaction may rely on a different tx that has not been included in a block. + +## Max Transaction Bytes + +Max transaction bytes defines the max size a transaction can be for your node. If you would like your node to only keep track of smaller transactions this field would need to be changed. Default is 1MB. + +## Max Batch Bytes + +Max batch bytes defines the amount of bytes the node will send to a peer. Default is 0. + +> Note: Unused due to https://github.com/tendermint/tendermint/issues/5796 diff --git a/docs/tendermint-core/pex/README.md b/docs/tendermint-core/pex/README.md new file mode 100644 index 0000000000..5f5c3ed42b --- /dev/null +++ b/docs/tendermint-core/pex/README.md @@ -0,0 +1,177 @@ +--- +order: 1 +parent: + title: Peer Exchange + order: 5 +--- + +# Peer Strategy and Exchange + +Here we outline the design of the PeerStore +and how it used by the Peer Exchange Reactor (PEX) to ensure we are connected +to good peers and to gossip peers to others. + +## Peer Types + +Certain peers are special in that they are specified by the user as `persistent`, +which means we auto-redial them if the connection fails, or if we fail to dial +them. +Some peers can be marked as `private`, which means +we will not put them in the peer store or gossip them to others. + +All peers except private peers and peers coming from them are tracked using the +peer store. + +The rest of our peers are only distinguished by being either +inbound (they dialed our public address) or outbound (we dialed them). + +## Discovery + +Peer discovery begins with a list of seeds. + +When we don't have enough peers, we + +1. ask existing peers +2. dial seeds if we're not dialing anyone currently + +On startup, we will also immediately dial the given list of `persistent_peers`, +and will attempt to maintain persistent connections with them. If the +connections die, or we fail to dial, we will redial every 5s for a few minutes, +then switch to an exponential backoff schedule, and after about a day of +trying, stop dialing the peer. This behavior is when `persistent_peers_max_dial_period` is configured to zero. + +But If `persistent_peers_max_dial_period` is set greater than zero, terms between each dial to each persistent peer +will not exceed `persistent_peers_max_dial_period` during exponential backoff. +Therefore, `dial_period` = min(`persistent_peers_max_dial_period`, `exponential_backoff_dial_period`) +and we keep trying again regardless of `maxAttemptsToDial` + +As long as we have less than `MaxNumOutboundPeers`, we periodically request +additional peers from each of our own and try seeds. + +## Listening + +Peers listen on a configurable ListenAddr that they self-report in their +NodeInfo during handshakes with other peers. Peers accept up to +`MaxNumInboundPeers` incoming peers. + +## Address Book + +Peers are tracked via their ID (their PubKey.Address()). +Peers are added to the peer store from the PEX when they first connect to us or +when we hear about them from other peers. + +The peer store is arranged in sets of buckets, and distinguishes between +vetted (old) and unvetted (new) peers. It keeps different sets of buckets for +vetted and unvetted peers. Buckets provide randomization over peer selection. +Peers are put in buckets according to their IP groups. + +IP group can be a masked IP (e.g. `1.2.0.0` or `2602:100::`) or `local` for +local addresses or `unroutable` for unroutable addresses. The mask which +corresponds to the `/16` subnet is used for IPv4, `/32` subnet - for IPv6. +Each group has a limited number of buckets to prevent DoS attacks coming from +that group (e.g. an attacker buying a `/16` block of IPs and launching a DoS +attack). + +[highwayhash](https://arxiv.org/abs/1612.06257) is used as a hashing function +when calculating a bucket. + +When placing a peer into a new bucket: + +```md +hash(key + sourcegroup + int64(hash(key + group + sourcegroup)) % bucket_per_group) % num_new_buckets +``` + +When placing a peer into an old bucket: + +```md +hash(key + group + int64(hash(key + addr)) % buckets_per_group) % num_old_buckets +``` + +where `key` - random 24 HEX string, `group` - IP group of the peer (e.g. `1.2.0.0`), +`sourcegroup` - IP group of the sender (peer who sent us this address) (e.g. `174.11.0.0`), +`addr` - string representation of the peer's address (e.g. `174.11.10.2:26656`). + +A vetted peer can only be in one bucket. An unvetted peer can be in multiple buckets, and +each instance of the peer can have a different IP:PORT. + +If we're trying to add a new peer but there's no space in its bucket, we'll +remove the worst peer from that bucket to make room. + +## Vetting + +When a peer is first added, it is unvetted. +Marking a peer as vetted is outside the scope of the `p2p` package. +For Tendermint, a Peer becomes vetted once it has contributed sufficiently +at the consensus layer; ie. once it has sent us valid and not-yet-known +votes and/or block parts for `NumBlocksForVetted` blocks. +Other users of the p2p package can determine their own conditions for when a peer is marked vetted. + +If a peer becomes vetted but there are already too many vetted peers, +a randomly selected one of the vetted peers becomes unvetted. + +If a peer becomes unvetted (either a new peer, or one that was previously vetted), +a randomly selected one of the unvetted peers is removed from the peer store. + +More fine-grained tracking of peer behaviour can be done using +a trust metric (see below), but it's best to start with something simple. + +## Select Peers to Dial + +When we need more peers, we pick addresses randomly from the addrbook with some +configurable bias for unvetted peers. The bias should be lower when we have +fewer peers and can increase as we obtain more, ensuring that our first peers +are more trustworthy, but always giving us the chance to discover new good +peers. + +We track the last time we dialed a peer and the number of unsuccessful attempts +we've made. If too many attempts are made, we mark the peer as bad. + +Connection attempts are made with exponential backoff (plus jitter). Because +the selection process happens every `ensurePeersPeriod`, we might not end up +dialing a peer for much longer than the backoff duration. + +If we fail to connect to the peer after 16 tries (with exponential backoff), we +remove from peer store completely. But for persistent peers, we indefinitely try to +dial all persistent peers unless `persistent_peers_max_dial_period` is configured to zero + +## Select Peers to Exchange + +When we’re asked for peers, we select them as follows: + +- select at most `maxGetSelection` peers +- try to select at least `minGetSelection` peers - if we have less than that, select them all. +- select a random, unbiased `getSelectionPercent` of the peers + +Send the selected peers. Note we select peers for sending without bias for vetted/unvetted. + +## Preventing Spam + +There are various cases where we decide a peer has misbehaved and we disconnect from them. +When this happens, the peer is removed from the peer store and black listed for +some amount of time. We call this "Disconnect and Mark". +Note that the bad behaviour may be detected outside the PEX reactor itself +(for instance, in the mconnection, or another reactor), but it must be communicated to the PEX reactor +so it can remove and mark the peer. + +In the PEX, if a peer sends us an unsolicited list of peers, +or if the peer sends a request too soon after another one, +we Disconnect and MarkBad. + +## Trust Metric + +The quality of peers can be tracked in more fine-grained detail using a +Proportional-Integral-Derivative (PID) controller that incorporates +current, past, and rate-of-change data to inform peer quality. + +While a PID trust metric has been implemented, it remains for future work +to use it in the PEX. + +See the [trustmetric](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-006-trust-metric.md) +and [trustmetric useage](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-007-trust-metric-usage.md) +architecture docs for more details. + + + + + + diff --git a/docs/tendermint-core/state-sync.md b/docs/tendermint-core/state-sync.md deleted file mode 100644 index 52286e6c7b..0000000000 --- a/docs/tendermint-core/state-sync.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -order: 11 ---- - -# State Sync - -With block sync a node is downloading all of the data of an application from genesis and verifying it. -With state sync your node will download data related to the head or near the head of the chain and verify the data. -This leads to drastically shorter times for joining a network. - -Information on how to configure state sync is located in the [nodes section](../nodes/state-sync.md) - -## Events - -When a node starts with the statesync flag enabled in the config file, it will emit two events: one upon starting statesync and the other upon completion. - -The user can query the events by subscribing `EventQueryStateSyncStatus` -Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details. \ No newline at end of file diff --git a/docs/tendermint-core/state-sync/README.md b/docs/tendermint-core/state-sync/README.md new file mode 100644 index 0000000000..39e76ce39d --- /dev/null +++ b/docs/tendermint-core/state-sync/README.md @@ -0,0 +1,85 @@ +--- +order: 1 +parent: + title: State Sync + order: 4 +--- + + +State sync allows new nodes to rapidly bootstrap and join the network by discovering, fetching, +and restoring state machine snapshots. For more information, see the [state sync ABCI section](https://docs.tendermint.com/master/spec/abci/abci.html#state-sync)). + +The state sync reactor has two main responsibilities: + +* Serving state machine snapshots taken by the local ABCI application to new nodes joining the + network. + +* Discovering existing snapshots and fetching snapshot chunks for an empty local application + being bootstrapped. + +The state sync process for bootstrapping a new node is described in detail in the section linked +above. While technically part of the reactor (see `statesync/syncer.go` and related components), +this document will only cover the P2P reactor component. + +For details on the ABCI methods and data types, see the [ABCI documentation](https://docs.tendermint.com/master/spec/abci/). + +Information on how to configure state sync is located in the [nodes section](../../nodes/state-sync.md) + +## State Sync P2P Protocol + +When a new node begin state syncing, it will ask all peers it encounters if it has any +available snapshots: + +```go +type snapshotsRequestMessage struct{} +``` + +The receiver will query the local ABCI application via `ListSnapshots`, and send a message +containing snapshot metadata (limited to 4 MB) for each of the 10 most recent snapshots: + +```go +type snapshotsResponseMessage struct { + Height uint64 + Format uint32 + Chunks uint32 + Hash []byte + Metadata []byte +} +``` + +The node running state sync will offer these snapshots to the local ABCI application via +`OfferSnapshot` ABCI calls, and keep track of which peers contain which snapshots. Once a snapshot +is accepted, the state syncer will request snapshot chunks from appropriate peers: + +```go +type chunkRequestMessage struct { + Height uint64 + Format uint32 + Index uint32 +} +``` + +The receiver will load the requested chunk from its local application via `LoadSnapshotChunk`, +and respond with it (limited to 16 MB): + +```go +type chunkResponseMessage struct { + Height uint64 + Format uint32 + Index uint32 + Chunk []byte + Missing bool +} +``` + +Here, `Missing` is used to signify that the chunk was not found on the peer, since an empty +chunk is a valid (although unlikely) response. + +The returned chunk is given to the ABCI application via `ApplySnapshotChunk` until the snapshot +is restored. If a chunk response is not returned within some time, it will be re-requested, +possibly from a different peer. + +The ABCI application is able to request peer bans and chunk refetching as part of the ABCI protocol. + +If no state sync is in progress (i.e. during normal operation), any unsolicited response messages +are discarded. diff --git a/internal/mempool/v0/clist_mempool.go b/internal/mempool/v0/clist_mempool.go index 2d71c54371..4e52525bf5 100644 --- a/internal/mempool/v0/clist_mempool.go +++ b/internal/mempool/v0/clist_mempool.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "errors" - "fmt" "sync" "sync/atomic" @@ -450,12 +449,35 @@ func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) { case *abci.Response_CheckTx: tx := req.GetCheckTx().Tx memTx := mem.recheckCursor.Value.(*mempoolTx) - if !bytes.Equal(tx, memTx.tx) { - panic(fmt.Sprintf( - "Unexpected tx response from proxy during recheck\nExpected %X, got %X", - memTx.tx, - tx)) + + // Search through the remaining list of tx to recheck for a transaction that matches + // the one we received from the ABCI application. + for { + if bytes.Equal(tx, memTx.tx) { + // We've found a tx in the recheck list that matches the tx that we + // received from the ABCI application. + // Break, and use this transaction for further checks. + break + } + + mem.logger.Error( + "re-CheckTx transaction mismatch", + "got", types.Tx(tx), + "expected", memTx.tx, + ) + + if mem.recheckCursor == mem.recheckEnd { + // we reached the end of the recheckTx list without finding a tx + // matching the one we received from the ABCI application. + // Return without processing any tx. + mem.recheckCursor = nil + return + } + + mem.recheckCursor = mem.recheckCursor.Next() + memTx = mem.recheckCursor.Value.(*mempoolTx) } + var postCheckErr error if mem.postCheck != nil { postCheckErr = mem.postCheck(tx, r.CheckTx) diff --git a/internal/mempool/v0/clist_mempool_test.go b/internal/mempool/v0/clist_mempool_test.go index 131e500c03..f150727d71 100644 --- a/internal/mempool/v0/clist_mempool_test.go +++ b/internal/mempool/v0/clist_mempool_test.go @@ -13,9 +13,11 @@ import ( "github.com/gogo/protobuf/proto" gogotypes "github.com/gogo/protobuf/types" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" abciclient "github.com/tendermint/tendermint/abci/client" + abciclimocks "github.com/tendermint/tendermint/abci/client/mocks" "github.com/tendermint/tendermint/abci/example/kvstore" abciserver "github.com/tendermint/tendermint/abci/server" abci "github.com/tendermint/tendermint/abci/types" @@ -214,6 +216,57 @@ func TestMempoolUpdate(t *testing.T) { } } +func TestMempoolUpdateDoesNotPanicWhenApplicationMissedTx(t *testing.T) { + var callback abciclient.Callback + mockClient := new(abciclimocks.Client) + mockClient.On("Start").Return(nil) + mockClient.On("SetLogger", mock.Anything) + + mockClient.On("Error").Return(nil).Times(4) + mockClient.On("FlushAsync", mock.Anything).Return(abciclient.NewReqRes(abci.ToRequestFlush()), nil) + mockClient.On("SetResponseCallback", mock.MatchedBy(func(cb abciclient.Callback) bool { callback = cb; return true })) + + cc := func() (abciclient.Client, error) { + return mockClient, nil + } + + mp, cleanup := newMempoolWithApp(cc) + defer cleanup() + + // Add 4 transactions to the mempool by calling the mempool's `CheckTx` on each of them. + txs := []types.Tx{[]byte{0x01}, []byte{0x02}, []byte{0x03}, []byte{0x04}} + for _, tx := range txs { + reqRes := abciclient.NewReqRes(abci.ToRequestCheckTx(abci.RequestCheckTx{Tx: tx})) + reqRes.Response = abci.ToResponseCheckTx(abci.ResponseCheckTx{Code: abci.CodeTypeOK}) + // SetDone allows the ReqRes to process its callback synchronously. + // This simulates the Response being ready for the client immediately. + reqRes.SetDone() + + mockClient.On("CheckTxAsync", mock.Anything, mock.Anything).Return(reqRes, nil) + err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) + require.NoError(t, err) + } + + // Calling update to remove the first transaction from the mempool. + // This call also triggers the mempool to recheck its remaining transactions. + err := mp.Update(0, []types.Tx{txs[0]}, abciResponses(1, abci.CodeTypeOK), nil, nil) + require.Nil(t, err) + + // The mempool has now sent its requests off to the client to be rechecked + // and is waiting for the corresponding callbacks to be called. + // We now call the mempool-supplied callback on the first and third transaction. + // This simulates the client dropping the second request. + // Previous versions of this code panicked when the ABCI application missed + // a recheck-tx request. + resp := abci.ResponseCheckTx{Code: abci.CodeTypeOK} + req := abci.RequestCheckTx{Tx: txs[1]} + callback(abci.ToRequestCheckTx(req), abci.ToResponseCheckTx(resp)) + + req = abci.RequestCheckTx{Tx: txs[3]} + callback(abci.ToRequestCheckTx(req), abci.ToResponseCheckTx(resp)) + mockClient.AssertExpectations(t) +} + func TestMempool_KeepInvalidTxsInCache(t *testing.T) { app := kvstore.NewApplication() cc := abciclient.NewLocalCreator(app) diff --git a/internal/mempool/v1/mempool.go b/internal/mempool/v1/mempool.go index a12fbc51ba..d78d9d2877 100644 --- a/internal/mempool/v1/mempool.go +++ b/internal/mempool/v1/mempool.go @@ -5,6 +5,7 @@ import ( "context" "errors" "fmt" + "reflect" "sync/atomic" "time" @@ -639,58 +640,88 @@ func (txmp *TxMempool) defaultTxCallback(req *abci.Request, res *abci.Response) txmp.metrics.RecheckTimes.Add(1) checkTxRes, ok := res.Value.(*abci.Response_CheckTx) - if ok { - tx := req.GetCheckTx().Tx - wtx := txmp.recheckCursor.Value.(*WrappedTx) - if !bytes.Equal(tx, wtx.tx) { - panic(fmt.Sprintf("re-CheckTx transaction mismatch; got: %X, expected: %X", wtx.tx.Hash(), types.Tx(tx).Key())) + if !ok { + txmp.logger.Error("received incorrect type in mempool callback", + "expected", reflect.TypeOf(&abci.Response_CheckTx{}).Name(), + "got", reflect.TypeOf(res.Value).Name(), + ) + return + } + tx := req.GetCheckTx().Tx + wtx := txmp.recheckCursor.Value.(*WrappedTx) + + // Search through the remaining list of tx to recheck for a transaction that matches + // the one we received from the ABCI application. + for { + if bytes.Equal(tx, wtx.tx) { + // We've found a tx in the recheck list that matches the tx that we + // received from the ABCI application. + // Break, and use this transaction for further checks. + break } - // Only evaluate transactions that have not been removed. This can happen - // if an existing transaction is evicted during CheckTx and while this - // callback is being executed for the same evicted transaction. - if !txmp.txStore.IsTxRemoved(wtx.hash) { - var err error - if txmp.postCheck != nil { - err = txmp.postCheck(tx, checkTxRes.CheckTx) - } - - if checkTxRes.CheckTx.Code == abci.CodeTypeOK && err == nil { - wtx.priority = checkTxRes.CheckTx.Priority - } else { - txmp.logger.Debug( - "existing transaction no longer valid; failed re-CheckTx callback", - "priority", wtx.priority, - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "err", err, - "code", checkTxRes.CheckTx.Code, - ) - - if wtx.gossipEl != txmp.recheckCursor { - panic("corrupted reCheckTx cursor") - } - - txmp.removeTx(wtx, !txmp.config.KeepInvalidTxsInCache) - } - } + txmp.logger.Error( + "re-CheckTx transaction mismatch", + "got", wtx.tx.Hash(), + "expected", types.Tx(tx).Key(), + ) - // move reCheckTx cursor to next element if txmp.recheckCursor == txmp.recheckEnd { + // we reached the end of the recheckTx list without finding a tx + // matching the one we received from the ABCI application. + // Return without processing any tx. txmp.recheckCursor = nil - } else { - txmp.recheckCursor = txmp.recheckCursor.Next() + return } - if txmp.recheckCursor == nil { - txmp.logger.Debug("finished rechecking transactions") + txmp.recheckCursor = txmp.recheckCursor.Next() + wtx = txmp.recheckCursor.Value.(*WrappedTx) + } + + // Only evaluate transactions that have not been removed. This can happen + // if an existing transaction is evicted during CheckTx and while this + // callback is being executed for the same evicted transaction. + if !txmp.txStore.IsTxRemoved(wtx.hash) { + var err error + if txmp.postCheck != nil { + err = txmp.postCheck(tx, checkTxRes.CheckTx) + } - if txmp.Size() > 0 { - txmp.notifyTxsAvailable() + if checkTxRes.CheckTx.Code == abci.CodeTypeOK && err == nil { + wtx.priority = checkTxRes.CheckTx.Priority + } else { + txmp.logger.Debug( + "existing transaction no longer valid; failed re-CheckTx callback", + "priority", wtx.priority, + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "err", err, + "code", checkTxRes.CheckTx.Code, + ) + + if wtx.gossipEl != txmp.recheckCursor { + panic("corrupted reCheckTx cursor") } + + txmp.removeTx(wtx, !txmp.config.KeepInvalidTxsInCache) } + } - txmp.metrics.Size.Set(float64(txmp.Size())) + // move reCheckTx cursor to next element + if txmp.recheckCursor == txmp.recheckEnd { + txmp.recheckCursor = nil + } else { + txmp.recheckCursor = txmp.recheckCursor.Next() } + + if txmp.recheckCursor == nil { + txmp.logger.Debug("finished rechecking transactions") + + if txmp.Size() > 0 { + txmp.notifyTxsAvailable() + } + } + + txmp.metrics.Size.Set(float64(txmp.Size())) } // updateReCheckTxs updates the recheck cursors by using the gossipIndex. For diff --git a/internal/p2p/pex/reactor.go b/internal/p2p/pex/reactor.go index 645cc19e13..9a5535a91c 100644 --- a/internal/p2p/pex/reactor.go +++ b/internal/p2p/pex/reactor.go @@ -1,7 +1,6 @@ package pex import ( - "context" "fmt" "runtime/debug" "sync" @@ -21,8 +20,6 @@ var ( _ p2p.Wrapper = (*protop2p.PexMessage)(nil) ) -// TODO: Consolidate with params file. -// See https://github.com/tendermint/tendermint/issues/6371 const ( // PexChannel is a channel for PEX messages PexChannel = 0x00 @@ -46,9 +43,6 @@ const ( // the maximum amount of addresses that can be included in a response maxAddresses uint16 = 100 - // allocated time to resolve a node address into a set of endpoints - resolveTimeout = 3 * time.Second - // How long to wait when there are no peers available before trying again noAvailablePeersWaitPeriod = 1 * time.Second @@ -217,55 +211,7 @@ func (r *Reactor) handlePexMessage(envelope p2p.Envelope) error { logger := r.Logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { - case *protop2p.PexRequest: - // Check if the peer hasn't sent a prior request too close to this one - // in time. - if err := r.markPeerRequest(envelope.From); err != nil { - return err - } - - // parse and send the legacy PEX addresses - pexAddresses := r.resolve(r.peerManager.Advertise(envelope.From, maxAddresses)) - r.pexCh.Out <- p2p.Envelope{ - To: envelope.From, - Message: &protop2p.PexResponse{Addresses: pexAddresses}, - } - - case *protop2p.PexResponse: - // check if the response matches a request that was made to that peer - if err := r.markPeerResponse(envelope.From); err != nil { - return err - } - - // check the size of the response - if len(msg.Addresses) > int(maxAddresses) { - return fmt.Errorf("peer sent too many addresses (max: %d, got: %d)", - maxAddresses, - len(msg.Addresses), - ) - } - - for _, pexAddress := range msg.Addresses { - // no protocol is prefixed so we assume the default (mconn) - peerAddress, err := p2p.ParseNodeAddress( - fmt.Sprintf("%s@%s:%d", pexAddress.ID, pexAddress.IP, pexAddress.Port)) - if err != nil { - continue - } - added, err := r.peerManager.Add(peerAddress) - if err != nil { - logger.Error("failed to add PEX address", "address", peerAddress, "err", err) - } - if added { - r.newPeers++ - logger.Debug("added PEX address", "address", peerAddress) - } - r.totalPeers++ - } - - // V2 PEX MESSAGES - case *protop2p.PexRequestV2: // check if the peer hasn't sent a prior request too close to this one // in time if err := r.markPeerRequest(envelope.From); err != nil { @@ -275,18 +221,18 @@ func (r *Reactor) handlePexMessage(envelope p2p.Envelope) error { // request peers from the peer manager and parse the NodeAddresses into // URL strings nodeAddresses := r.peerManager.Advertise(envelope.From, maxAddresses) - pexAddressesV2 := make([]protop2p.PexAddressV2, len(nodeAddresses)) + pexAddresses := make([]protop2p.PexAddress, len(nodeAddresses)) for idx, addr := range nodeAddresses { - pexAddressesV2[idx] = protop2p.PexAddressV2{ + pexAddresses[idx] = protop2p.PexAddress{ URL: addr.String(), } } r.pexCh.Out <- p2p.Envelope{ To: envelope.From, - Message: &protop2p.PexResponseV2{Addresses: pexAddressesV2}, + Message: &protop2p.PexResponse{Addresses: pexAddresses}, } - case *protop2p.PexResponseV2: + case *protop2p.PexResponse: // check if the response matches a request that was made to that peer if err := r.markPeerResponse(envelope.From); err != nil { return err @@ -307,11 +253,11 @@ func (r *Reactor) handlePexMessage(envelope p2p.Envelope) error { } added, err := r.peerManager.Add(peerAddress) if err != nil { - logger.Error("failed to add V2 PEX address", "address", peerAddress, "err", err) + logger.Error("failed to add PEX address", "address", peerAddress, "err", err) } if added { r.newPeers++ - logger.Debug("added V2 PEX address", "address", peerAddress) + logger.Debug("added PEX address", "address", peerAddress) } r.totalPeers++ } @@ -323,55 +269,6 @@ func (r *Reactor) handlePexMessage(envelope p2p.Envelope) error { return nil } -// resolve resolves a set of peer addresses into PEX addresses. -// -// FIXME: This is necessary because the current PEX protocol only supports -// IP/port pairs, while the P2P stack uses NodeAddress URLs. The PEX protocol -// should really use URLs too, to exchange DNS names instead of IPs and allow -// different transport protocols (e.g. QUIC and MemoryTransport). -// -// FIXME: We may want to cache and parallelize this, but for now we'll just rely -// on the operating system to cache it for us. -func (r *Reactor) resolve(addresses []p2p.NodeAddress) []protop2p.PexAddress { - limit := len(addresses) - pexAddresses := make([]protop2p.PexAddress, 0, limit) - - for _, address := range addresses { - ctx, cancel := context.WithTimeout(context.Background(), resolveTimeout) - endpoints, err := address.Resolve(ctx) - r.Logger.Debug("resolved node address", "endpoints", endpoints) - cancel() - - if err != nil { - r.Logger.Debug("failed to resolve address", "address", address, "err", err) - continue - } - - for _, endpoint := range endpoints { - r.Logger.Debug("checking endpint", "IP", endpoint.IP, "Port", endpoint.Port) - if len(pexAddresses) >= limit { - return pexAddresses - - } else if endpoint.IP != nil { - r.Logger.Debug("appending pex address") - // PEX currently only supports IP-networked transports (as - // opposed to e.g. p2p.MemoryTransport). - // - // FIXME: as the PEX address contains no information about the - // protocol, we jam this into the ID. We won't need to this once - // we support URLs - pexAddresses = append(pexAddresses, protop2p.PexAddress{ - ID: string(address.NodeID), - IP: endpoint.IP.String(), - Port: uint32(endpoint.Port), - }) - } - } - } - - return pexAddresses -} - // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. @@ -444,17 +341,10 @@ func (r *Reactor) sendRequestForPeers() { break } - // The node accommodates for both pex systems - if r.isLegacyPeer(peerID) { - r.pexCh.Out <- p2p.Envelope{ - To: peerID, - Message: &protop2p.PexRequest{}, - } - } else { - r.pexCh.Out <- p2p.Envelope{ - To: peerID, - Message: &protop2p.PexRequestV2{}, - } + // send out the pex request + r.pexCh.Out <- p2p.Envelope{ + To: peerID, + Message: &protop2p.PexRequest{}, } // remove the peer from the abvailable peers list and mark it in the requestsSent map @@ -538,14 +428,3 @@ func (r *Reactor) markPeerResponse(peer types.NodeID) error { r.availablePeers[peer] = struct{}{} return nil } - -// all addresses must use a MCONN protocol for the peer to be considered part of the -// legacy p2p pex system -func (r *Reactor) isLegacyPeer(peer types.NodeID) bool { - for _, addr := range r.peerManager.Addresses(peer) { - if addr.Protocol != p2p.MConnProtocol { - return false - } - } - return true -} diff --git a/internal/p2p/pex/reactor_test.go b/internal/p2p/pex/reactor_test.go index b7e1a01c3d..04b347cb50 100644 --- a/internal/p2p/pex/reactor_test.go +++ b/internal/p2p/pex/reactor_test.go @@ -1,7 +1,6 @@ package pex_test import ( - "context" "strings" "testing" "time" @@ -27,7 +26,6 @@ const ( firstNode = 0 secondNode = 1 thirdNode = 2 - fourthNode = 3 ) func TestReactorBasic(t *testing.T) { @@ -44,8 +42,8 @@ func TestReactorBasic(t *testing.T) { // assert that when a mock node sends a request it receives a response (and // the correct one) - testNet.sendRequest(t, firstNode, secondNode, true) - testNet.listenForResponse(t, secondNode, firstNode, shortWait, []p2pproto.PexAddressV2(nil)) + testNet.sendRequest(t, firstNode, secondNode) + testNet.listenForResponse(t, secondNode, firstNode, shortWait, []p2pproto.PexAddress(nil)) } func TestReactorConnectFullNetwork(t *testing.T) { @@ -71,17 +69,17 @@ func TestReactorSendsRequestsTooOften(t *testing.T) { r.pexInCh <- p2p.Envelope{ From: badNode, - Message: &p2pproto.PexRequestV2{}, + Message: &p2pproto.PexRequest{}, } resp := <-r.pexOutCh - msg, ok := resp.Message.(*p2pproto.PexResponseV2) + msg, ok := resp.Message.(*p2pproto.PexResponse) require.True(t, ok) require.Empty(t, msg.Addresses) r.pexInCh <- p2p.Envelope{ From: badNode, - Message: &p2pproto.PexRequestV2{}, + Message: &p2pproto.PexRequest{}, } peerErr := <-r.pexErrCh @@ -102,8 +100,8 @@ func TestReactorSendsResponseWithoutRequest(t *testing.T) { // firstNode sends the secondNode an unrequested response // NOTE: secondNode will send a request by default during startup so we send // two responses to counter that. - testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode}, true) - testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode}, true) + testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode}) + testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode}) // secondNode should evict the firstNode testNet.listenForPeerUpdate(t, secondNode, firstNode, p2p.PeerStatusDown, shortWait) @@ -136,10 +134,10 @@ func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) { require.NoError(t, err) require.True(t, added) - addresses := make([]p2pproto.PexAddressV2, 101) + addresses := make([]p2pproto.PexAddress, 101) for i := 0; i < len(addresses); i++ { nodeAddress := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID(t)} - addresses[i] = p2pproto.PexAddressV2{ + addresses[i] = p2pproto.PexAddress{ URL: nodeAddress.String(), } } @@ -152,12 +150,12 @@ func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) { select { // wait for a request and then send a response with too many addresses case req := <-r.pexOutCh: - if _, ok := req.Message.(*p2pproto.PexRequestV2); !ok { + if _, ok := req.Message.(*p2pproto.PexRequest); !ok { t.Fatal("expected v2 pex request") } r.pexInCh <- p2p.Envelope{ From: peer.NodeID, - Message: &p2pproto.PexResponseV2{ + Message: &p2pproto.PexResponse{ Addresses: addresses, }, } @@ -239,38 +237,6 @@ func TestReactorWithNetworkGrowth(t *testing.T) { } } -func TestReactorIntegrationWithLegacyHandleRequest(t *testing.T) { - testNet := setupNetwork(t, testOptions{ - MockNodes: 1, - TotalNodes: 3, - }) - testNet.connectAll(t) - testNet.start(t) - t.Log(testNet.nodes) - - // mock node sends a V1 Pex message to the second node - testNet.sendRequest(t, firstNode, secondNode, false) - addrs := testNet.getAddressesFor(t, []int{thirdNode}) - testNet.listenForLegacyResponse(t, secondNode, firstNode, shortWait, addrs) -} - -func TestReactorIntegrationWithLegacyHandleResponse(t *testing.T) { - testNet := setupNetwork(t, testOptions{ - MockNodes: 1, - TotalNodes: 4, - BufferSize: 4, - }) - testNet.connectPeers(t, firstNode, secondNode) - testNet.connectPeers(t, firstNode, thirdNode) - testNet.connectPeers(t, firstNode, fourthNode) - testNet.start(t) - - testNet.listenForRequest(t, secondNode, firstNode, shortWait) - // send a v1 response instead - testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode, fourthNode}, false) - testNet.requireNumberOfPeers(t, secondNode, len(testNet.nodes)-1, shortWait) -} - type singleTestReactor struct { reactor *pex.Reactor pexInCh chan p2p.Envelope @@ -484,11 +450,11 @@ func (r *reactorTestSuite) listenForRequest(t *testing.T, fromNode, toNode int, r.logger.Info("Listening for request", "from", fromNode, "to", toNode) to, from := r.checkNodePair(t, toNode, fromNode) conditional := func(msg p2p.Envelope) bool { - _, ok := msg.Message.(*p2pproto.PexRequestV2) + _, ok := msg.Message.(*p2pproto.PexRequest) return ok && msg.From == from } assertion := func(t *testing.T, msg p2p.Envelope) bool { - require.Equal(t, &p2pproto.PexRequestV2{}, msg.Message) + require.Equal(t, &p2pproto.PexRequest{}, msg.Message) return true } r.listenFor(t, to, conditional, assertion, waitPeriod) @@ -503,11 +469,11 @@ func (r *reactorTestSuite) pingAndlistenForNAddresses( r.logger.Info("Listening for addresses", "from", fromNode, "to", toNode) to, from := r.checkNodePair(t, toNode, fromNode) conditional := func(msg p2p.Envelope) bool { - _, ok := msg.Message.(*p2pproto.PexResponseV2) + _, ok := msg.Message.(*p2pproto.PexResponse) return ok && msg.From == from } assertion := func(t *testing.T, msg p2p.Envelope) bool { - m, ok := msg.Message.(*p2pproto.PexResponseV2) + m, ok := msg.Message.(*p2pproto.PexResponse) if !ok { require.Fail(t, "expected pex response v2") return true @@ -519,34 +485,14 @@ func (r *reactorTestSuite) pingAndlistenForNAddresses( // if we didn't get the right length, we wait and send the // request again time.Sleep(300 * time.Millisecond) - r.sendRequest(t, toNode, fromNode, true) + r.sendRequest(t, toNode, fromNode) return false } - r.sendRequest(t, toNode, fromNode, true) + r.sendRequest(t, toNode, fromNode) r.listenFor(t, to, conditional, assertion, waitPeriod) } func (r *reactorTestSuite) listenForResponse( - t *testing.T, - fromNode, toNode int, - waitPeriod time.Duration, - addresses []p2pproto.PexAddressV2, -) { - r.logger.Info("Listening for response", "from", fromNode, "to", toNode) - to, from := r.checkNodePair(t, toNode, fromNode) - conditional := func(msg p2p.Envelope) bool { - _, ok := msg.Message.(*p2pproto.PexResponseV2) - r.logger.Info("message", msg, "ok", ok) - return ok && msg.From == from - } - assertion := func(t *testing.T, msg p2p.Envelope) bool { - require.Equal(t, &p2pproto.PexResponseV2{Addresses: addresses}, msg.Message) - return true - } - r.listenFor(t, to, conditional, assertion, waitPeriod) -} - -func (r *reactorTestSuite) listenForLegacyResponse( t *testing.T, fromNode, toNode int, waitPeriod time.Duration, @@ -556,6 +502,7 @@ func (r *reactorTestSuite) listenForLegacyResponse( to, from := r.checkNodePair(t, toNode, fromNode) conditional := func(msg p2p.Envelope) bool { _, ok := msg.Message.(*p2pproto.PexResponse) + r.logger.Info("message", msg, "ok", ok) return ok && msg.From == from } assertion := func(t *testing.T, msg p2p.Envelope) bool { @@ -591,46 +538,22 @@ func (r *reactorTestSuite) listenForPeerUpdate( } } -func (r *reactorTestSuite) getV2AddressesFor(nodes []int) []p2pproto.PexAddressV2 { - addresses := make([]p2pproto.PexAddressV2, len(nodes)) - for idx, node := range nodes { - nodeID := r.nodes[node] - addresses[idx] = p2pproto.PexAddressV2{ - URL: r.network.Nodes[nodeID].NodeAddress.String(), - } - } - return addresses -} - -func (r *reactorTestSuite) getAddressesFor(t *testing.T, nodes []int) []p2pproto.PexAddress { +func (r *reactorTestSuite) getAddressesFor(nodes []int) []p2pproto.PexAddress { addresses := make([]p2pproto.PexAddress, len(nodes)) for idx, node := range nodes { nodeID := r.nodes[node] - nodeAddrs := r.network.Nodes[nodeID].NodeAddress - endpoints, err := nodeAddrs.Resolve(context.Background()) - require.NoError(t, err) - require.Len(t, endpoints, 1) addresses[idx] = p2pproto.PexAddress{ - ID: string(nodeAddrs.NodeID), - IP: endpoints[0].IP.String(), - Port: uint32(endpoints[0].Port), + URL: r.network.Nodes[nodeID].NodeAddress.String(), } } return addresses } -func (r *reactorTestSuite) sendRequest(t *testing.T, fromNode, toNode int, v2 bool) { +func (r *reactorTestSuite) sendRequest(t *testing.T, fromNode, toNode int) { to, from := r.checkNodePair(t, toNode, fromNode) - if v2 { - r.pexChannels[from].Out <- p2p.Envelope{ - To: to, - Message: &p2pproto.PexRequestV2{}, - } - } else { - r.pexChannels[from].Out <- p2p.Envelope{ - To: to, - Message: &p2pproto.PexRequest{}, - } + r.pexChannels[from].Out <- p2p.Envelope{ + To: to, + Message: &p2pproto.PexRequest{}, } } @@ -638,25 +561,14 @@ func (r *reactorTestSuite) sendResponse( t *testing.T, fromNode, toNode int, withNodes []int, - v2 bool, ) { from, to := r.checkNodePair(t, fromNode, toNode) - if v2 { - addrs := r.getV2AddressesFor(withNodes) - r.pexChannels[from].Out <- p2p.Envelope{ - To: to, - Message: &p2pproto.PexResponseV2{ - Addresses: addrs, - }, - } - } else { - addrs := r.getAddressesFor(t, withNodes) - r.pexChannels[from].Out <- p2p.Envelope{ - To: to, - Message: &p2pproto.PexResponse{ - Addresses: addrs, - }, - } + addrs := r.getAddressesFor(withNodes) + r.pexChannels[from].Out <- p2p.Envelope{ + To: to, + Message: &p2pproto.PexResponse{ + Addresses: addrs, + }, } } @@ -759,32 +671,6 @@ func (r *reactorTestSuite) connectPeers(t *testing.T, sourceNode, targetNode int require.True(t, added) } -// nolint: unused -func (r *reactorTestSuite) pexAddresses(t *testing.T, nodeIndices []int) []p2pproto.PexAddress { - var addresses []p2pproto.PexAddress - for _, i := range nodeIndices { - if i < len(r.nodes) { - require.Fail(t, "index for pex address is greater than number of nodes") - } - nodeAddrs := r.network.Nodes[r.nodes[i]].NodeAddress - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - endpoints, err := nodeAddrs.Resolve(ctx) - cancel() - require.NoError(t, err) - for _, endpoint := range endpoints { - if endpoint.IP != nil { - addresses = append(addresses, p2pproto.PexAddress{ - ID: string(nodeAddrs.NodeID), - IP: endpoint.IP.String(), - Port: uint32(endpoint.Port), - }) - } - } - - } - return addresses -} - func (r *reactorTestSuite) checkNodePair(t *testing.T, first, second int) (types.NodeID, types.NodeID) { require.NotEqual(t, first, second) require.Less(t, first, r.total) diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 6c46946244..402e2f0ed6 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -21,8 +21,6 @@ import ( const queueBufferDefault = 32 -const dialRandomizerIntervalMillisecond = 3000 - // Envelope contains a message with sender/receiver routing info. type Envelope struct { From types.NodeID // sender (empty if outbound) @@ -536,8 +534,15 @@ func (r *Router) filterPeersID(ctx context.Context, id types.NodeID) error { func (r *Router) dialSleep(ctx context.Context) { if r.options.DialSleep == nil { + const ( + maxDialerInterval = 3000 + minDialerInterval = 250 + ) + // nolint:gosec // G404: Use of weak random number generator - timer := time.NewTimer(time.Duration(rand.Int63n(dialRandomizerIntervalMillisecond)) * time.Millisecond) + dur := time.Duration(rand.Int63n(maxDialerInterval-minDialerInterval+1) + minDialerInterval) + + timer := time.NewTimer(dur * time.Millisecond) defer timer.Stop() select { diff --git a/internal/state/rollback.go b/internal/state/rollback.go index 6e13da0e29..e78957b028 100644 --- a/internal/state/rollback.go +++ b/internal/state/rollback.go @@ -19,6 +19,23 @@ func Rollback(bs BlockStore, ss Store) (int64, []byte, error) { return -1, nil, errors.New("no state found") } + height := bs.Height() + + // NOTE: persistence of state and blocks don't happen atomically. Therefore it is possible that + // when the user stopped the node the state wasn't updated but the blockstore was. In this situation + // we don't need to rollback any state and can just return early + if height == invalidState.LastBlockHeight+1 { + return invalidState.LastBlockHeight, invalidState.AppHash, nil + } + + // If the state store isn't one below nor equal to the blockstore height than this violates the + // invariant + if height != invalidState.LastBlockHeight { + return -1, nil, fmt.Errorf("statestore height (%d) is not one below or equal to blockstore height (%d)", + invalidState.LastBlockHeight, height) + } + + // state store height is equal to blockstore height. We're good to proceed with rolling back state rollbackHeight := invalidState.LastBlockHeight rollbackBlock := bs.LoadBlockMeta(rollbackHeight) if rollbackBlock == nil { diff --git a/internal/state/rollback_test.go b/internal/state/rollback_test.go index ae5c8ee84a..e782b4d899 100644 --- a/internal/state/rollback_test.go +++ b/internal/state/rollback_test.go @@ -14,42 +14,14 @@ import ( ) func TestRollback(t *testing.T) { - stateStore := state.NewStore(dbm.NewMemDB()) - blockStore := &mocks.BlockStore{} var ( height int64 = 100 appVersion uint64 = 10 ) - - valSet, _ := factory.RandValidatorSet(5, 10) - - params := types.DefaultConsensusParams() - params.Version.AppVersion = appVersion - newParams := types.DefaultConsensusParams() - newParams.Block.MaxBytes = 10000 - - initialState := state.State{ - Version: state.Version{ - Consensus: version.Consensus{ - Block: version.BlockProtocol, - App: 10, - }, - Software: version.TMVersion, - }, - ChainID: factory.DefaultTestChainID, - InitialHeight: 10, - LastBlockID: factory.MakeBlockID(), - AppHash: factory.RandomHash(), - LastResultsHash: factory.RandomHash(), - LastBlockHeight: height, - LastValidators: valSet, - Validators: valSet.CopyIncrementProposerPriority(1), - NextValidators: valSet.CopyIncrementProposerPriority(2), - LastHeightValidatorsChanged: height + 1, - ConsensusParams: *params, - LastHeightConsensusParamsChanged: height + 1, - } - require.NoError(t, stateStore.Bootstrap(initialState)) + blockStore := &mocks.BlockStore{} + stateStore := setupStateStore(t, height) + initialState, err := stateStore.Load() + require.NoError(t, err) height++ block := &types.BlockMeta{ @@ -61,9 +33,13 @@ func TestRollback(t *testing.T) { }, } blockStore.On("LoadBlockMeta", height).Return(block) + blockStore.On("Height").Return(height) + // perform the rollback over a version bump appVersion++ + newParams := types.DefaultConsensusParams() newParams.Version.AppVersion = appVersion + newParams.Block.MaxBytes = 1000 nextState := initialState.Copy() nextState.LastBlockHeight = height nextState.Version.Consensus.App = appVersion @@ -102,19 +78,34 @@ func TestRollbackNoState(t *testing.T) { } func TestRollbackNoBlocks(t *testing.T) { - stateStore := state.NewStore(dbm.NewMemDB()) + const height = int64(100) + stateStore := setupStateStore(t, height) blockStore := &mocks.BlockStore{} - var ( - height int64 = 100 - appVersion uint64 = 10 - ) + blockStore.On("Height").Return(height) + blockStore.On("LoadBlockMeta", height).Return(nil) + + _, _, err := state.Rollback(blockStore, stateStore) + require.Error(t, err) + require.Contains(t, err.Error(), "block at height 100 not found") +} +func TestRollbackDifferentStateHeight(t *testing.T) { + const height = int64(100) + stateStore := setupStateStore(t, height) + blockStore := &mocks.BlockStore{} + blockStore.On("Height").Return(height + 2) + + _, _, err := state.Rollback(blockStore, stateStore) + require.Error(t, err) + require.Equal(t, err.Error(), "statestore height (100) is not one below or equal to blockstore height (102)") +} + +func setupStateStore(t *testing.T, height int64) state.Store { + stateStore := state.NewStore(dbm.NewMemDB()) valSet, _ := factory.RandValidatorSet(5, 10) params := types.DefaultConsensusParams() - params.Version.AppVersion = appVersion - newParams := types.DefaultConsensusParams() - newParams.Block.MaxBytes = 10000 + params.Version.AppVersion = 10 initialState := state.State{ Version: state.Version{ @@ -137,10 +128,6 @@ func TestRollbackNoBlocks(t *testing.T) { ConsensusParams: *params, LastHeightConsensusParamsChanged: height + 1, } - require.NoError(t, stateStore.Save(initialState)) - blockStore.On("LoadBlockMeta", height).Return(nil) - - _, _, err := state.Rollback(blockStore, stateStore) - require.Error(t, err) - require.Contains(t, err.Error(), "block at height 100 not found") + require.NoError(t, stateStore.Bootstrap(initialState)) + return stateStore } diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index 68d1ec9412..1ae111f63f 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -1,14 +1,12 @@ -// Package pubsub implements a pub-sub model with a single publisher (Server) -// and multiple subscribers (clients). +// Package pubsub implements an event dispatching server with a single publisher +// and multiple subscriber clients. Multiple goroutines can safely publish to a +// single Server instance. // -// Though you can have multiple publishers by sharing a pointer to a server or -// by giving the same channel to each publisher and publishing messages from -// that channel (fan-in). -// -// Clients subscribe for messages, which could be of any type, using a query. -// When some message is published, we match it with all queries. If there is a -// match, this message will be pushed to all clients, subscribed to that query. -// See query subpackage for our implementation. +// Clients register subscriptions with a query to select which messages they +// wish to receive. When messages are published, they are broadcast to all +// clients whose subscription query matches that message. Queries are +// constructed using the github.com/tendermint/tendermint/libs/pubsub/query +// package. // // Example: // @@ -16,7 +14,7 @@ // if err != nil { // return err // } -// ctx, cancel := context.WithTimeout(context.Background(), 1 * time.Second) +// ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) // defer cancel() // subscription, err := pubsub.Subscribe(ctx, "johns-transactions", q) // if err != nil { @@ -38,22 +36,12 @@ import ( "context" "errors" "fmt" + "sync" "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/libs/service" ) -type operation int - -const ( - sub operation = iota - pub - unsub - shutdown -) - var ( // ErrSubscriptionNotFound is returned when a client tries to unsubscribe // from not existing subscription. @@ -62,6 +50,10 @@ var ( // ErrAlreadySubscribed is returned when a client tries to subscribe twice or // more using the same query. ErrAlreadySubscribed = errors.New("already subscribed") + + // ErrServerStopped is returned when attempting to publish or subscribe to a + // server that has been stopped. + ErrServerStopped = errors.New("pubsub server is stopped") ) // Query defines an interface for a query to be used for subscribing. A query @@ -75,17 +67,21 @@ type Query interface { String() string } +// UnsubscribeArgs are the parameters to remove a subscription. +// The subscriber ID must be populated, and at least one of the client ID or +// the registered query. type UnsubscribeArgs struct { - ID string - Subscriber string - Query Query + Subscriber string // subscriber ID chosen by the client (required) + ID string // subscription ID (assigned by the server) + Query Query // the query registered with the subscription } +// Validate returns nil if args are valid to identify a subscription to remove. +// Otherwise, it reports an error. func (args UnsubscribeArgs) Validate() error { if args.Subscriber == "" { return errors.New("must specify a subscriber") } - if args.ID == "" && args.Query == nil { return fmt.Errorf("subscription is not fully defined [subscriber=%q]", args.Subscriber) } @@ -93,35 +89,28 @@ func (args UnsubscribeArgs) Validate() error { return nil } -type cmd struct { - op operation - - // subscribe, unsubscribe - query Query - subscription *Subscription - clientID string - - // publish - msg interface{} - events []types.Event -} - // Server allows clients to subscribe/unsubscribe for messages, publishing // messages with or without events, and manages internal state. type Server struct { service.BaseService - cmds chan cmd - cmdsCap int - - // check if we have subscription before - // subscribing or unsubscribing - mtx tmsync.RWMutex + queue chan item + done <-chan struct{} // closed when server should exit + stop func() // signal the server to exit + pubs sync.RWMutex // excl: shutdown; shared: active publisher + exited chan struct{} // server exited + + // All subscriptions currently known. + // Lock exclusive to add, remove, or cancel subscriptions. + // Lock shared to look up or publish to subscriptions. + subs struct { + sync.RWMutex + index *subIndex + } - // subscriber -> [query->id (string) OR id->query (string))], - // track connections both by ID (new) and query (legacy) to - // avoid breaking the interface. - subscriptions map[string]map[string]string + // TODO(creachadair): Rework the options so that this does not need to live + // as a field. It is not otherwise needed. + queueCap int } // Option sets a parameter for the server. @@ -131,37 +120,34 @@ type Option func(*Server) // for a detailed description of how to configure buffering. If no options are // provided, the resulting server's queue is unbuffered. func NewServer(options ...Option) *Server { - s := &Server{ - subscriptions: make(map[string]map[string]string), + s := new(Server) + for _, opt := range options { + opt(s) } s.BaseService = *service.NewBaseService(nil, "PubSub", s) - for _, option := range options { - option(s) - } + // The queue receives items to be published. + s.queue = make(chan item, s.queueCap) - // if BufferCapacity option was not set, the channel is unbuffered - s.cmds = make(chan cmd, s.cmdsCap) + // The index tracks subscriptions by ID and query terms. + s.subs.index = newSubIndex() return s } -// BufferCapacity allows you to specify capacity for the internal server's -// queue. Since the server, given Y subscribers, could only process X messages, -// this option could be used to survive spikes (e.g. high amount of -// transactions during peak hours). +// BufferCapacity allows you to specify capacity for publisher's queue. This +// is the number of messages that can be published without blocking. If no +// buffer is specified, publishing is synchronous with delivery. This function +// will panic if cap < 0. func BufferCapacity(cap int) Option { - return func(s *Server) { - if cap > 0 { - s.cmdsCap = cap - } + if cap < 0 { + panic("negative buffer capacity") } + return func(s *Server) { s.queueCap = cap } } -// BufferCapacity returns capacity of the internal server's queue. -func (s *Server) BufferCapacity() int { - return s.cmdsCap -} +// BufferCapacity returns capacity of the publication queue. +func (s *Server) BufferCapacity() int { return cap(s.queue) } // Subscribe creates a subscription for the given client. // @@ -195,331 +181,223 @@ func (s *Server) SubscribeUnbuffered(ctx context.Context, clientID string, query } func (s *Server) subscribe(ctx context.Context, clientID string, query Query, outCapacity int) (*Subscription, error) { - s.mtx.RLock() - clientSubscriptions, ok := s.subscriptions[clientID] - if ok { - _, ok = clientSubscriptions[query.String()] - } - s.mtx.RUnlock() - if ok { + s.subs.Lock() + defer s.subs.Unlock() + + if s.subs.index == nil { + return nil, ErrServerStopped + } else if s.subs.index.contains(clientID, query.String()) { return nil, ErrAlreadySubscribed } - subscription := NewSubscription(outCapacity) - select { - case s.cmds <- cmd{op: sub, clientID: clientID, query: query, subscription: subscription}: - s.mtx.Lock() - if _, ok = s.subscriptions[clientID]; !ok { - s.subscriptions[clientID] = make(map[string]string) - } - s.subscriptions[clientID][query.String()] = subscription.id - s.subscriptions[clientID][subscription.id] = query.String() - s.mtx.Unlock() - return subscription, nil - case <-ctx.Done(): - return nil, ctx.Err() - case <-s.Quit(): - return nil, nil - } + sub := NewSubscription(outCapacity) + s.subs.index.add(&subInfo{ + clientID: clientID, + query: query, + subID: sub.id, + sub: sub, + }) + return sub, nil } -// Unsubscribe removes the subscription on the given query. An error will be -// returned to the caller if the context is canceled or if subscription does -// not exist. +// Unsubscribe removes the subscription for the given client and/or query. It +// returns ErrSubscriptionNotFound if no such subscription exists. func (s *Server) Unsubscribe(ctx context.Context, args UnsubscribeArgs) error { if err := args.Validate(); err != nil { return err } - var qs string - - if args.Query != nil { - qs = args.Query.String() + s.subs.Lock() + defer s.subs.Unlock() + if s.subs.index == nil { + return ErrServerStopped } - clientSubscriptions, err := func() (map[string]string, error) { - s.mtx.RLock() - defer s.mtx.RUnlock() - - clientSubscriptions, ok := s.subscriptions[args.Subscriber] - if args.ID != "" { - qs, ok = clientSubscriptions[args.ID] - - if ok && args.Query == nil { - var err error - args.Query, err = query.New(qs) - if err != nil { - return nil, err - } - } - } else if qs != "" { - args.ID, ok = clientSubscriptions[qs] - } - - if !ok { - return nil, ErrSubscriptionNotFound + // TODO(creachadair): Do we need to support unsubscription for an "empty" + // query? I believe that case is not possible by the Query grammar, but we + // should make sure. + // + // Revisit this logic once we are able to remove indexing by query. + + var evict subInfoSet + if args.Subscriber != "" { + evict = s.subs.index.findClientID(args.Subscriber) + if args.Query != nil { + evict = evict.withQuery(args.Query.String()) } - - return clientSubscriptions, nil - }() - - if err != nil { - return err + } else { + evict = s.subs.index.findQuery(args.Query.String()) } - select { - case s.cmds <- cmd{op: unsub, clientID: args.Subscriber, query: args.Query, subscription: &Subscription{id: args.ID}}: - s.mtx.Lock() - defer s.mtx.Unlock() - - delete(clientSubscriptions, args.ID) - delete(clientSubscriptions, qs) - - if len(clientSubscriptions) == 0 { - delete(s.subscriptions, args.Subscriber) - } - return nil - case <-ctx.Done(): - return ctx.Err() - case <-s.Quit(): - return nil + if len(evict) == 0 { + return ErrSubscriptionNotFound } + s.removeSubs(evict, ErrUnsubscribed) + return nil } -// UnsubscribeAll removes all client subscriptions. An error will be returned -// to the caller if the context is canceled or if subscription does not exist. +// UnsubscribeAll removes all subscriptions for the given client ID. +// It returns ErrSubscriptionNotFound if no subscriptions exist for that client. func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { - s.mtx.RLock() - _, ok := s.subscriptions[clientID] - s.mtx.RUnlock() - if !ok { - return ErrSubscriptionNotFound - } + s.subs.Lock() + defer s.subs.Unlock() - select { - case s.cmds <- cmd{op: unsub, clientID: clientID}: - s.mtx.Lock() - defer s.mtx.Unlock() - - delete(s.subscriptions, clientID) - - return nil - case <-ctx.Done(): - return ctx.Err() - case <-s.Quit(): - return nil + evict := s.subs.index.findClientID(clientID) + if len(evict) == 0 { + return ErrSubscriptionNotFound } + s.removeSubs(evict, ErrUnsubscribed) + return nil } // NumClients returns the number of clients. func (s *Server) NumClients() int { - s.mtx.RLock() - defer s.mtx.RUnlock() - return len(s.subscriptions) + s.subs.RLock() + defer s.subs.RUnlock() + return len(s.subs.index.byClient) } // NumClientSubscriptions returns the number of subscriptions the client has. func (s *Server) NumClientSubscriptions(clientID string) int { - s.mtx.RLock() - defer s.mtx.RUnlock() - return len(s.subscriptions[clientID]) / 2 + s.subs.RLock() + defer s.subs.RUnlock() + return len(s.subs.index.findClientID(clientID)) } // Publish publishes the given message. An error will be returned to the caller // if the context is canceled. func (s *Server) Publish(ctx context.Context, msg interface{}) error { - return s.PublishWithEvents(ctx, msg, []types.Event{}) + return s.publish(ctx, msg, []types.Event{}) } // PublishWithEvents publishes the given message with the set of events. The set // is matched with clients queries. If there is a match, the message is sent to // the client. func (s *Server) PublishWithEvents(ctx context.Context, msg interface{}, events []types.Event) error { - select { - case s.cmds <- cmd{op: pub, msg: msg, events: events}: - return nil - case <-ctx.Done(): - return ctx.Err() - case <-s.Quit(): - return nil - } + return s.publish(ctx, msg, events) } // OnStop implements Service.OnStop by shutting down the server. -func (s *Server) OnStop() { - s.cmds <- cmd{op: shutdown} -} - -// NOTE: not goroutine safe -type state struct { - // query string -> client -> subscription - subscriptions map[string]map[string]*Subscription - // query string -> queryPlusRefCount - queries map[string]*queryPlusRefCount -} +func (s *Server) OnStop() { s.stop() } -// queryPlusRefCount holds a pointer to a query and reference counter. When -// refCount is zero, query will be removed. -type queryPlusRefCount struct { - q Query - refCount int -} +// Wait implements Service.Wait by blocking until the server has exited, then +// yielding to the base service wait. +func (s *Server) Wait() { <-s.exited; s.BaseService.Wait() } // OnStart implements Service.OnStart by starting the server. -func (s *Server) OnStart() error { - go s.loop(state{ - subscriptions: make(map[string]map[string]*Subscription), - queries: make(map[string]*queryPlusRefCount), - }) - return nil -} +func (s *Server) OnStart() error { s.run(); return nil } -// OnReset implements Service.OnReset -func (s *Server) OnReset() error { - return nil -} +// OnReset implements Service.OnReset. It has no effect for this service. +func (s *Server) OnReset() error { return nil } -func (s *Server) loop(state state) { -loop: - for cmd := range s.cmds { - switch cmd.op { - case unsub: - if cmd.query != nil { - state.remove(cmd.clientID, cmd.query.String(), cmd.subscription.id, ErrUnsubscribed) - } else { - state.removeClient(cmd.clientID, ErrUnsubscribed) - } - case shutdown: - state.removeAll(nil) - break loop - case sub: - state.add(cmd.clientID, cmd.query, cmd.subscription) - case pub: - if err := state.send(cmd.msg, cmd.events); err != nil { - s.Logger.Error("Error querying for events", "err", err) - } - } - } -} - -func (state *state) add(clientID string, q Query, subscription *Subscription) { - qStr := q.String() +func (s *Server) publish(ctx context.Context, data interface{}, events []types.Event) error { + s.pubs.RLock() + defer s.pubs.RUnlock() - // initialize subscription for this client per query if needed - if _, ok := state.subscriptions[qStr]; !ok { - state.subscriptions[qStr] = make(map[string]*Subscription) - } - - if _, ok := state.subscriptions[subscription.id]; !ok { - state.subscriptions[subscription.id] = make(map[string]*Subscription) - } - - // create subscription - state.subscriptions[qStr][clientID] = subscription - state.subscriptions[subscription.id][clientID] = subscription - - // initialize query if needed - if _, ok := state.queries[qStr]; !ok { - state.queries[qStr] = &queryPlusRefCount{q: q, refCount: 0} + select { + case <-s.done: + return ErrServerStopped + case <-ctx.Done(): + return ctx.Err() + case s.queue <- item{ + Data: data, + Events: events, + }: + return nil } - // increment reference counter - state.queries[qStr].refCount++ } -func (state *state) remove(clientID string, qStr, id string, reason error) { - clientSubscriptions, ok := state.subscriptions[qStr] - if !ok { - return - } - - subscription, ok := clientSubscriptions[clientID] - if !ok { - return - } +func (s *Server) run() { + // The server runs until ctx is canceled. + ctx, cancel := context.WithCancel(context.Background()) + s.done = ctx.Done() + s.stop = cancel + + // Shutdown monitor: When the context ends, wait for any active publish + // calls to exit, then close the queue to signal the sender to exit. + go func() { + <-ctx.Done() + s.pubs.Lock() + defer s.pubs.Unlock() + close(s.queue) + }() - subscription.cancel(reason) + s.exited = make(chan struct{}) + go func() { + defer close(s.exited) - // remove client from query map. - // if query has no other clients subscribed, remove it. - delete(state.subscriptions[qStr], clientID) - delete(state.subscriptions[id], clientID) - if len(state.subscriptions[qStr]) == 0 { - delete(state.subscriptions, qStr) - } - - // decrease ref counter in queries - if ref, ok := state.queries[qStr]; ok { - ref.refCount-- - if ref.refCount == 0 { - // remove the query if nobody else is using it - delete(state.queries, qStr) + // Sender: Service the queue and forward messages to subscribers. + for it := range s.queue { + if err := s.send(it.Data, it.Events); err != nil { + s.Logger.Error("Error sending event", "err", err) + } } - } + // Terminate all subscribers without error before exit. + s.subs.Lock() + defer s.subs.Unlock() + for si := range s.subs.index.all { + si.sub.cancel(nil) + } + s.subs.index = nil + }() } -func (state *state) removeClient(clientID string, reason error) { - seen := map[string]struct{}{} - for qStr, clientSubscriptions := range state.subscriptions { - if sub, ok := clientSubscriptions[clientID]; ok { - if _, ok = seen[sub.id]; ok { - // all subscriptions are double indexed by ID and query, only - // process them once. - continue - } - state.remove(clientID, qStr, sub.id, reason) - seen[sub.id] = struct{}{} - } +// removeSubs cancels and removes all the subscriptions in evict with the given +// error. The caller must hold the s.subs lock. +func (s *Server) removeSubs(evict subInfoSet, reason error) { + for si := range evict { + si.sub.cancel(reason) } + s.subs.index.removeAll(evict) } -func (state *state) removeAll(reason error) { - for qStr, clientSubscriptions := range state.subscriptions { - sub, ok := clientSubscriptions[qStr] - if !ok || ok && sub.id == qStr { - // all subscriptions are double indexed by ID and query, only - // process them once. - continue +// send delivers the given message to all matching subscribers. An error in +// query matching stops transmission and is returned. +func (s *Server) send(data interface{}, events []types.Event) error { + // At exit, evict any subscriptions that were too slow. + evict := make(subInfoSet) + defer func() { + if len(evict) != 0 { + s.subs.Lock() + defer s.subs.Unlock() + s.removeSubs(evict, ErrOutOfCapacity) } + }() - for clientID := range clientSubscriptions { - state.remove(clientID, qStr, sub.id, reason) - } - } -} + // N.B. Order is important here. We must acquire and defer the lock release + // AFTER deferring the eviction cleanup: The cleanup must happen after the + // reader lock has released, or it will deadlock. + s.subs.RLock() + defer s.subs.RUnlock() -func (state *state) send(msg interface{}, events []types.Event) error { - for qStr, clientSubscriptions := range state.subscriptions { - if sub, ok := clientSubscriptions[qStr]; ok && sub.id == qStr { - continue - } - var q Query - if qi, ok := state.queries[qStr]; ok { - q = qi.q - } else { + for si := range s.subs.index.all { + match, err := si.query.Matches(events) + if err != nil { + return fmt.Errorf("match failed against query: %w", err) + // TODO(creachadair): Should we evict this subscription? + } else if !match { continue } - match, err := q.Matches(events) - if err != nil { - return fmt.Errorf("failed to match against query %s: %w", q.String(), err) + // Subscriptions may be buffered or unbuffered. Unbuffered subscriptions + // are intended for internal use such as indexing, where we don't want to + // penalize a slow reader. Buffered subscribers must keep up with their + // queue, or they will be terminated. + // + // TODO(creachadair): Unbuffered subscriptions used by the event indexer + // to avoid losing events if it happens to be slow. Rework this so that + // use case doesn't require this affordance, and then remove unbuffered + // subscriptions. + msg := NewMessage(si.sub.id, data, events) + if cap(si.sub.out) == 0 { + si.sub.out <- msg + continue } - - if match { - for clientID, subscription := range clientSubscriptions { - if cap(subscription.out) == 0 { - // block on unbuffered channel - select { - case subscription.out <- NewMessage(subscription.id, msg, events): - case <-subscription.canceled: - } - } else { - // don't block on buffered channels - select { - case subscription.out <- NewMessage(subscription.id, msg, events): - default: - state.remove(clientID, qStr, subscription.id, ErrOutOfCapacity) - } - } - } + select { + case si.sub.out <- msg: + // ok, delivered + default: + // slow subscriber, cancel them + evict.add(si) } } diff --git a/libs/pubsub/subindex.go b/libs/pubsub/subindex.go new file mode 100644 index 0000000000..48dccf72d8 --- /dev/null +++ b/libs/pubsub/subindex.go @@ -0,0 +1,113 @@ +package pubsub + +import "github.com/tendermint/tendermint/abci/types" + +// An item to be published to subscribers. +type item struct { + Data interface{} + Events []types.Event +} + +// A subInfo value records a single subscription. +type subInfo struct { + clientID string // chosen by the client + query Query // chosen by the client + subID string // assigned at registration + sub *Subscription // receives published events +} + +// A subInfoSet is an unordered set of subscription info records. +type subInfoSet map[*subInfo]struct{} + +func (s subInfoSet) contains(si *subInfo) bool { _, ok := s[si]; return ok } +func (s subInfoSet) add(si *subInfo) { s[si] = struct{}{} } +func (s subInfoSet) remove(si *subInfo) { delete(s, si) } + +// withQuery returns the subset of s whose query string matches qs. +func (s subInfoSet) withQuery(qs string) subInfoSet { + out := make(subInfoSet) + for si := range s { + if si.query.String() == qs { + out.add(si) + } + } + return out +} + +// A subIndex is an indexed collection of subscription info records. +// The index is not safe for concurrent use without external synchronization. +type subIndex struct { + all subInfoSet // all subscriptions + byClient map[string]subInfoSet // per-client subscriptions + byQuery map[string]subInfoSet // per-query subscriptions + + // TODO(creachadair): We allow indexing by query to support existing use by + // the RPC service methods for event streaming. Fix up those methods not to + // require this, and then remove indexing by query. +} + +// newSubIndex constructs a new, empty subscription index. +func newSubIndex() *subIndex { + return &subIndex{ + all: make(subInfoSet), + byClient: make(map[string]subInfoSet), + byQuery: make(map[string]subInfoSet), + } +} + +// findClients returns the set of subscriptions for the given client ID, or nil. +func (idx *subIndex) findClientID(id string) subInfoSet { return idx.byClient[id] } + +// findQuery returns the set of subscriptions on the given query string, or nil. +func (idx *subIndex) findQuery(qs string) subInfoSet { return idx.byQuery[qs] } + +// contains reports whether idx contains any subscription matching the given +// client ID and query pair. +func (idx *subIndex) contains(clientID, query string) bool { + csubs, qsubs := idx.byClient[clientID], idx.byQuery[query] + if len(csubs) == 0 || len(qsubs) == 0 { + return false + } + for si := range csubs { + if qsubs.contains(si) { + return true + } + } + return false +} + +// add adds si to the index, replacing any previous entry with the same terms. +// It is the caller's responsibility to check for duplicates before adding. +// See also the contains method. +func (idx *subIndex) add(si *subInfo) { + idx.all.add(si) + if m := idx.byClient[si.clientID]; m == nil { + idx.byClient[si.clientID] = subInfoSet{si: struct{}{}} + } else { + m.add(si) + } + qs := si.query.String() + if m := idx.byQuery[qs]; m == nil { + idx.byQuery[qs] = subInfoSet{si: struct{}{}} + } else { + m.add(si) + } +} + +// removeAll removes all the elements of s from the index. +func (idx *subIndex) removeAll(s subInfoSet) { + for si := range s { + idx.all.remove(si) + idx.byClient[si.clientID].remove(si) + if len(idx.byClient[si.clientID]) == 0 { + delete(idx.byClient, si.clientID) + } + if si.query != nil { + qs := si.query.String() + idx.byQuery[qs].remove(si) + if len(idx.byQuery[qs]) == 0 { + delete(idx.byQuery, qs) + } + } + } +} diff --git a/libs/pubsub/subscription.go b/libs/pubsub/subscription.go index 40b84711e8..4210416b6d 100644 --- a/libs/pubsub/subscription.go +++ b/libs/pubsub/subscription.go @@ -44,9 +44,7 @@ func NewSubscription(outCapacity int) *Subscription { // Out returns a channel onto which messages and events are published. // Unsubscribe/UnsubscribeAll does not close the channel to avoid clients from // receiving a nil message. -func (s *Subscription) Out() <-chan Message { - return s.out -} +func (s *Subscription) Out() <-chan Message { return s.out } func (s *Subscription) ID() string { return s.id } diff --git a/node/node_test.go b/node/node_test.go index 6ffc4d96c9..c2a7e7a90c 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -132,7 +132,7 @@ func TestNodeSetAppVersion(t *testing.T) { n := getTestNode(t, cfg, log.TestingLogger()) // default config uses the kvstore app - var appVersion uint64 = kvstore.ProtocolVersion + appVersion := kvstore.ProtocolVersion // check version is set in state state, err := n.stateStore.Load() diff --git a/node/setup.go b/node/setup.go index 40ced410cc..2fddceac14 100644 --- a/node/setup.go +++ b/node/setup.go @@ -402,8 +402,14 @@ func createConsensusReactor( } func createTransport(logger log.Logger, cfg *config.Config) *p2p.MConnTransport { + conf := conn.DefaultMConnConfig() + conf.FlushThrottle = cfg.P2P.FlushThrottleTimeout + conf.SendRate = cfg.P2P.SendRate + conf.RecvRate = cfg.P2P.RecvRate + conf.MaxPacketMsgPayloadSize = cfg.P2P.MaxPacketMsgPayloadSize + return p2p.NewMConnTransport( - logger, conn.DefaultMConnConfig(), []*p2p.ChannelDescriptor{}, + logger, conf, []*p2p.ChannelDescriptor{}, p2p.MConnTransportOptions{ MaxAcceptedConnections: uint32(cfg.P2P.MaxConnections), }, diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto deleted file mode 100644 index 9c66b1e434..0000000000 --- a/proto/tendermint/abci/types.proto +++ /dev/null @@ -1,381 +0,0 @@ -syntax = "proto3"; -package tendermint.abci; - -option go_package = "github.com/tendermint/tendermint/abci/types"; - -// For more information on gogo.proto, see: -// https://github.com/gogo/protobuf/blob/master/extensions.md -import "tendermint/crypto/proof.proto"; -import "tendermint/types/types.proto"; -import "tendermint/crypto/keys.proto"; -import "tendermint/types/params.proto"; -import "google/protobuf/timestamp.proto"; -import "gogoproto/gogo.proto"; - -// This file is copied from http://github.com/tendermint/abci -// NOTE: When using custom types, mind the warnings. -// https://github.com/gogo/protobuf/blob/master/custom_types.md#warnings-and-issues - -//---------------------------------------- -// Request types - -message Request { - oneof value { - RequestEcho echo = 1; - RequestFlush flush = 2; - RequestInfo info = 3; - RequestInitChain init_chain = 4; - RequestQuery query = 5; - RequestBeginBlock begin_block = 6; - RequestCheckTx check_tx = 7; - RequestDeliverTx deliver_tx = 8; - RequestEndBlock end_block = 9; - RequestCommit commit = 10; - RequestListSnapshots list_snapshots = 11; - RequestOfferSnapshot offer_snapshot = 12; - RequestLoadSnapshotChunk load_snapshot_chunk = 13; - RequestApplySnapshotChunk apply_snapshot_chunk = 14; - RequestPreprocessTxs preprocess_txs = 15; - } -} - -message RequestEcho { - string message = 1; -} - -message RequestFlush {} - -message RequestInfo { - string version = 1; - uint64 block_version = 2; - uint64 p2p_version = 3; - string abci_version = 4; -} - -message RequestInitChain { - google.protobuf.Timestamp time = 1 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - string chain_id = 2; - tendermint.types.ConsensusParams consensus_params = 3; - repeated ValidatorUpdate validators = 4 [(gogoproto.nullable) = false]; - bytes app_state_bytes = 5; - int64 initial_height = 6; -} - -message RequestQuery { - bytes data = 1; - string path = 2; - int64 height = 3; - bool prove = 4; -} - -message RequestBeginBlock { - bytes hash = 1; - tendermint.types.Header header = 2 [(gogoproto.nullable) = false]; - LastCommitInfo last_commit_info = 3 [(gogoproto.nullable) = false]; - repeated Evidence byzantine_validators = 4 [(gogoproto.nullable) = false]; -} - -enum CheckTxType { - NEW = 0 [(gogoproto.enumvalue_customname) = "New"]; - RECHECK = 1 [(gogoproto.enumvalue_customname) = "Recheck"]; -} - -message RequestCheckTx { - bytes tx = 1; - CheckTxType type = 2; -} - -message RequestDeliverTx { - bytes tx = 1; -} - -message RequestEndBlock { - int64 height = 1; -} - -message RequestCommit {} - -// lists available snapshots -message RequestListSnapshots {} - -// offers a snapshot to the application -message RequestOfferSnapshot { - Snapshot snapshot = 1; // snapshot offered by peers - bytes app_hash = 2; // light client-verified app hash for snapshot height -} - -// loads a snapshot chunk -message RequestLoadSnapshotChunk { - uint64 height = 1; - uint32 format = 2; - uint32 chunk = 3; -} - -// Applies a snapshot chunk -message RequestApplySnapshotChunk { - uint32 index = 1; - bytes chunk = 2; - string sender = 3; -} - -message RequestPreprocessTxs { - repeated bytes txs = 1; -} - -//---------------------------------------- -// Response types - -message Response { - oneof value { - ResponseException exception = 1; - ResponseEcho echo = 2; - ResponseFlush flush = 3; - ResponseInfo info = 4; - ResponseInitChain init_chain = 5; - ResponseQuery query = 6; - ResponseBeginBlock begin_block = 7; - ResponseCheckTx check_tx = 8; - ResponseDeliverTx deliver_tx = 9; - ResponseEndBlock end_block = 10; - ResponseCommit commit = 11; - ResponseListSnapshots list_snapshots = 12; - ResponseOfferSnapshot offer_snapshot = 13; - ResponseLoadSnapshotChunk load_snapshot_chunk = 14; - ResponseApplySnapshotChunk apply_snapshot_chunk = 15; - ResponsePreprocessTxs preprocess_txs = 16; - } -} - -// nondeterministic -message ResponseException { - string error = 1; -} - -message ResponseEcho { - string message = 1; -} - -message ResponseFlush {} - -message ResponseInfo { - string data = 1; - - // this is the software version of the application. TODO: remove? - string version = 2; - uint64 app_version = 3; - - int64 last_block_height = 4; - bytes last_block_app_hash = 5; -} - -message ResponseInitChain { - tendermint.types.ConsensusParams consensus_params = 1; - repeated ValidatorUpdate validators = 2 [(gogoproto.nullable) = false]; - bytes app_hash = 3; -} - -message ResponseQuery { - uint32 code = 1; - // bytes data = 2; // use "value" instead. - string log = 3; // nondeterministic - string info = 4; // nondeterministic - int64 index = 5; - bytes key = 6; - bytes value = 7; - tendermint.crypto.ProofOps proof_ops = 8; - int64 height = 9; - string codespace = 10; -} - -message ResponseBeginBlock { - repeated Event events = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; -} - -message ResponseCheckTx { - uint32 code = 1; - bytes data = 2; - string log = 3; // nondeterministic - string info = 4; // nondeterministic - int64 gas_wanted = 5 [json_name = "gas_wanted"]; - int64 gas_used = 6 [json_name = "gas_used"]; - repeated Event events = 7 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; - string codespace = 8; - string sender = 9; - int64 priority = 10; - - // mempool_error is set by Tendermint. - // ABCI applictions creating a ResponseCheckTX should not set mempool_error. - string mempool_error = 11; -} - -message ResponseDeliverTx { - uint32 code = 1; - bytes data = 2; - string log = 3; // nondeterministic - string info = 4; // nondeterministic - int64 gas_wanted = 5 [json_name = "gas_wanted"]; - int64 gas_used = 6 [json_name = "gas_used"]; - repeated Event events = 7 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; // nondeterministic - string codespace = 8; -} - -message ResponseEndBlock { - repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable) = false]; - tendermint.types.ConsensusParams consensus_param_updates = 2; - repeated Event events = 3 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; -} - -message ResponseCommit { - // reserve 1 - bytes data = 2; - int64 retain_height = 3; -} - -message ResponseListSnapshots { - repeated Snapshot snapshots = 1; -} - -message ResponseOfferSnapshot { - Result result = 1; - - enum Result { - UNKNOWN = 0; // Unknown result, abort all snapshot restoration - ACCEPT = 1; // Snapshot accepted, apply chunks - ABORT = 2; // Abort all snapshot restoration - REJECT = 3; // Reject this specific snapshot, try others - REJECT_FORMAT = 4; // Reject all snapshots of this format, try others - REJECT_SENDER = 5; // Reject all snapshots from the sender(s), try others - } -} - -message ResponseLoadSnapshotChunk { - bytes chunk = 1; -} - -message ResponseApplySnapshotChunk { - Result result = 1; - repeated uint32 refetch_chunks = 2; // Chunks to refetch and reapply - repeated string reject_senders = 3; // Chunk senders to reject and ban - - enum Result { - UNKNOWN = 0; // Unknown result, abort all snapshot restoration - ACCEPT = 1; // Chunk successfully accepted - ABORT = 2; // Abort all snapshot restoration - RETRY = 3; // Retry chunk (combine with refetch and reject) - RETRY_SNAPSHOT = 4; // Retry snapshot (combine with refetch and reject) - REJECT_SNAPSHOT = 5; // Reject this snapshot, try others - } -} - -message ResponsePreprocessTxs { - repeated bytes txs = 1; - tendermint.types.Messages messages = 2; -} - -//---------------------------------------- -// Misc. - -message LastCommitInfo { - int32 round = 1; - repeated VoteInfo votes = 2 [(gogoproto.nullable) = false]; -} - -// Event allows application developers to attach additional information to -// ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and ResponseDeliverTx. -// Later, transactions may be queried using these events. -message Event { - string type = 1; - repeated EventAttribute attributes = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "attributes,omitempty"]; -} - -// EventAttribute is a single key-value pair, associated with an event. -message EventAttribute { - string key = 1; - string value = 2; - bool index = 3; // nondeterministic -} - -// TxResult contains results of executing the transaction. -// -// One usage is indexing transaction results. -message TxResult { - int64 height = 1; - uint32 index = 2; - bytes tx = 3; - ResponseDeliverTx result = 4 [(gogoproto.nullable) = false]; -} - -//---------------------------------------- -// Blockchain Types - -// Validator -message Validator { - bytes address = 1; // The first 20 bytes of SHA256(public key) - // PubKey pub_key = 2 [(gogoproto.nullable)=false]; - int64 power = 3; // The voting power -} - -// ValidatorUpdate -message ValidatorUpdate { - tendermint.crypto.PublicKey pub_key = 1 [(gogoproto.nullable) = false]; - int64 power = 2; -} - -// VoteInfo -message VoteInfo { - Validator validator = 1 [(gogoproto.nullable) = false]; - bool signed_last_block = 2; -} - -enum EvidenceType { - UNKNOWN = 0; - DUPLICATE_VOTE = 1; - LIGHT_CLIENT_ATTACK = 2; -} - -message Evidence { - EvidenceType type = 1; - // The offending validator - Validator validator = 2 [(gogoproto.nullable) = false]; - // The height when the offense occurred - int64 height = 3; - // The corresponding time where the offense occurred - google.protobuf.Timestamp time = 4 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - // Total voting power of the validator set in case the ABCI application does - // not store historical validators. - // https://github.com/tendermint/tendermint/issues/4581 - int64 total_voting_power = 5; -} - -//---------------------------------------- -// State Sync Types - -message Snapshot { - uint64 height = 1; // The height at which the snapshot was taken - uint32 format = 2; // The application-specific snapshot format - uint32 chunks = 3; // Number of chunks in the snapshot - bytes hash = 4; // Arbitrary snapshot hash, equal only if identical - bytes metadata = 5; // Arbitrary application metadata -} - -//---------------------------------------- -// Service Definition - -service ABCIApplication { - rpc Echo(RequestEcho) returns (ResponseEcho); - rpc Flush(RequestFlush) returns (ResponseFlush); - rpc Info(RequestInfo) returns (ResponseInfo); - rpc DeliverTx(RequestDeliverTx) returns (ResponseDeliverTx); - rpc CheckTx(RequestCheckTx) returns (ResponseCheckTx); - rpc Query(RequestQuery) returns (ResponseQuery); - rpc Commit(RequestCommit) returns (ResponseCommit); - rpc InitChain(RequestInitChain) returns (ResponseInitChain); - rpc BeginBlock(RequestBeginBlock) returns (ResponseBeginBlock); - rpc EndBlock(RequestEndBlock) returns (ResponseEndBlock); - rpc ListSnapshots(RequestListSnapshots) returns (ResponseListSnapshots); - rpc OfferSnapshot(RequestOfferSnapshot) returns (ResponseOfferSnapshot); - rpc LoadSnapshotChunk(RequestLoadSnapshotChunk) returns (ResponseLoadSnapshotChunk); - rpc ApplySnapshotChunk(RequestApplySnapshotChunk) returns (ResponseApplySnapshotChunk); - rpc PreprocessTxs(RequestPreprocessTxs) returns (ResponsePreprocessTxs); -} diff --git a/proto/tendermint/blocksync/types.pb.go b/proto/tendermint/blocksync/types.pb.go index fcbef7107a..43ad139ecf 100644 --- a/proto/tendermint/blocksync/types.pb.go +++ b/proto/tendermint/blocksync/types.pb.go @@ -899,7 +899,10 @@ func (m *BlockRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -968,7 +971,10 @@ func (m *NoBlockResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1054,7 +1060,10 @@ func (m *BlockResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1104,7 +1113,10 @@ func (m *StatusRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1192,7 +1204,10 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1417,7 +1432,10 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/blocksync/types.proto b/proto/tendermint/blocksync/types.proto deleted file mode 100644 index 8c187c793e..0000000000 --- a/proto/tendermint/blocksync/types.proto +++ /dev/null @@ -1,41 +0,0 @@ -syntax = "proto3"; -package tendermint.blocksync; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/blocksync"; - -import "tendermint/types/block.proto"; - -// BlockRequest requests a block for a specific height -message BlockRequest { - int64 height = 1; -} - -// NoBlockResponse informs the node that the peer does not have block at the requested height -message NoBlockResponse { - int64 height = 1; -} - -// BlockResponse returns block to the requested -message BlockResponse { - tendermint.types.Block block = 1; -} - -// StatusRequest requests the status of a peer. -message StatusRequest { -} - -// StatusResponse is a peer response to inform their status. -message StatusResponse { - int64 height = 1; - int64 base = 2; -} - -message Message { - oneof sum { - BlockRequest block_request = 1; - NoBlockResponse no_block_response = 2; - BlockResponse block_response = 3; - StatusRequest status_request = 4; - StatusResponse status_response = 5; - } -} diff --git a/proto/tendermint/consensus/types.pb.go b/proto/tendermint/consensus/types.pb.go index 6372a88d40..67efd1c2c2 100644 --- a/proto/tendermint/consensus/types.pb.go +++ b/proto/tendermint/consensus/types.pb.go @@ -1932,7 +1932,10 @@ func (m *NewRoundStep) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2109,7 +2112,10 @@ func (m *NewValidBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2192,7 +2198,10 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2313,7 +2322,10 @@ func (m *ProposalPOL) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2434,7 +2446,10 @@ func (m *BlockPart) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2520,7 +2535,10 @@ func (m *Vote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2646,7 +2664,10 @@ func (m *HasVote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2786,7 +2807,10 @@ func (m *VoteSetMaj23) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2959,7 +2983,10 @@ func (m *VoteSetBits) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -3324,7 +3351,10 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/consensus/types.proto b/proto/tendermint/consensus/types.proto deleted file mode 100644 index 6e1f413711..0000000000 --- a/proto/tendermint/consensus/types.proto +++ /dev/null @@ -1,92 +0,0 @@ -syntax = "proto3"; -package tendermint.consensus; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/consensus"; - -import "gogoproto/gogo.proto"; -import "tendermint/types/types.proto"; -import "tendermint/libs/bits/types.proto"; - -// NewRoundStep is sent for every step taken in the ConsensusState. -// For every height/round/step transition -message NewRoundStep { - int64 height = 1; - int32 round = 2; - uint32 step = 3; - int64 seconds_since_start_time = 4; - int32 last_commit_round = 5; -} - -// NewValidBlock is sent when a validator observes a valid block B in some round r, -//i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. -// In case the block is also committed, then IsCommit flag is set to true. -message NewValidBlock { - int64 height = 1; - int32 round = 2; - tendermint.types.PartSetHeader block_part_set_header = 3 [(gogoproto.nullable) = false]; - tendermint.libs.bits.BitArray block_parts = 4; - bool is_commit = 5; -} - -// Proposal is sent when a new block is proposed. -message Proposal { - tendermint.types.Proposal proposal = 1 [(gogoproto.nullable) = false]; -} - -// ProposalPOL is sent when a previous proposal is re-proposed. -message ProposalPOL { - int64 height = 1; - int32 proposal_pol_round = 2; - tendermint.libs.bits.BitArray proposal_pol = 3 [(gogoproto.nullable) = false]; -} - -// BlockPart is sent when gossipping a piece of the proposed block. -message BlockPart { - int64 height = 1; - int32 round = 2; - tendermint.types.Part part = 3 [(gogoproto.nullable) = false]; -} - -// Vote is sent when voting for a proposal (or lack thereof). -message Vote { - tendermint.types.Vote vote = 1; -} - -// HasVote is sent to indicate that a particular vote has been received. -message HasVote { - int64 height = 1; - int32 round = 2; - tendermint.types.SignedMsgType type = 3; - int32 index = 4; -} - -// VoteSetMaj23 is sent to indicate that a given BlockID has seen +2/3 votes. -message VoteSetMaj23 { - int64 height = 1; - int32 round = 2; - tendermint.types.SignedMsgType type = 3; - tendermint.types.BlockID block_id = 4 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; -} - -// VoteSetBits is sent to communicate the bit-array of votes seen for the BlockID. -message VoteSetBits { - int64 height = 1; - int32 round = 2; - tendermint.types.SignedMsgType type = 3; - tendermint.types.BlockID block_id = 4 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; - tendermint.libs.bits.BitArray votes = 5 [(gogoproto.nullable) = false]; -} - -message Message { - oneof sum { - NewRoundStep new_round_step = 1; - NewValidBlock new_valid_block = 2; - Proposal proposal = 3; - ProposalPOL proposal_pol = 4; - BlockPart block_part = 5; - Vote vote = 6; - HasVote has_vote = 7; - VoteSetMaj23 vote_set_maj23 = 8; - VoteSetBits vote_set_bits = 9; - } -} diff --git a/proto/tendermint/consensus/wal.pb.go b/proto/tendermint/consensus/wal.pb.go index fd80819cd0..86ff1be01f 100644 --- a/proto/tendermint/consensus/wal.pb.go +++ b/proto/tendermint/consensus/wal.pb.go @@ -921,7 +921,10 @@ func (m *MsgInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1061,7 +1064,10 @@ func (m *TimeoutInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1130,7 +1136,10 @@ func (m *EndHeight) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1320,7 +1329,10 @@ func (m *WALMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { @@ -1439,7 +1451,10 @@ func (m *TimedWALMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthWal } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/crypto/keys.pb.go b/proto/tendermint/crypto/keys.pb.go index 24c6c1b1ba..8ff4c4a4fe 100644 --- a/proto/tendermint/crypto/keys.pb.go +++ b/proto/tendermint/crypto/keys.pb.go @@ -687,7 +687,10 @@ func (m *PublicKey) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthKeys + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthKeys } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/crypto/proof.pb.go b/proto/tendermint/crypto/proof.pb.go index 82fb943fcd..97350c64c7 100644 --- a/proto/tendermint/crypto/proof.pb.go +++ b/proto/tendermint/crypto/proof.pb.go @@ -820,7 +820,10 @@ func (m *Proof) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -940,7 +943,10 @@ func (m *ValueOp) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -1086,7 +1092,10 @@ func (m *DominoOp) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -1236,7 +1245,10 @@ func (m *ProofOp) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { @@ -1320,7 +1332,10 @@ func (m *ProofOps) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthProof } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/libs/bits/types.pb.go b/proto/tendermint/libs/bits/types.pb.go index c0ebcb9760..ad87f854f4 100644 --- a/proto/tendermint/libs/bits/types.pb.go +++ b/proto/tendermint/libs/bits/types.pb.go @@ -307,7 +307,10 @@ func (m *BitArray) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/mempool/types.pb.go b/proto/tendermint/mempool/types.pb.go index 11e259551d..3487652bc8 100644 --- a/proto/tendermint/mempool/types.pb.go +++ b/proto/tendermint/mempool/types.pb.go @@ -370,7 +370,10 @@ func (m *Txs) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -455,7 +458,10 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/mempool/types.proto b/proto/tendermint/mempool/types.proto deleted file mode 100644 index b55d9717b1..0000000000 --- a/proto/tendermint/mempool/types.proto +++ /dev/null @@ -1,14 +0,0 @@ -syntax = "proto3"; -package tendermint.mempool; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/mempool"; - -message Txs { - repeated bytes txs = 1; -} - -message Message { - oneof sum { - Txs txs = 1; - } -} diff --git a/proto/tendermint/p2p/conn.pb.go b/proto/tendermint/p2p/conn.pb.go index 47a3bb0cd8..7c26d3fcd4 100644 --- a/proto/tendermint/p2p/conn.pb.go +++ b/proto/tendermint/p2p/conn.pb.go @@ -723,7 +723,10 @@ func (m *PacketPing) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -773,7 +776,10 @@ func (m *PacketPong) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -896,7 +902,10 @@ func (m *PacketMsg) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -1051,7 +1060,10 @@ func (m *Packet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { @@ -1168,7 +1180,10 @@ func (m *AuthSigMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthConn } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/p2p/conn.proto b/proto/tendermint/p2p/conn.proto deleted file mode 100644 index b12de6c827..0000000000 --- a/proto/tendermint/p2p/conn.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto3"; -package tendermint.p2p; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/p2p"; - -import "gogoproto/gogo.proto"; -import "tendermint/crypto/keys.proto"; - -message PacketPing {} - -message PacketPong {} - -message PacketMsg { - int32 channel_id = 1 [(gogoproto.customname) = "ChannelID"]; - bool eof = 2 [(gogoproto.customname) = "EOF"]; - bytes data = 3; -} - -message Packet { - oneof sum { - PacketPing packet_ping = 1; - PacketPong packet_pong = 2; - PacketMsg packet_msg = 3; - } -} - -message AuthSigMessage { - tendermint.crypto.PublicKey pub_key = 1 [(gogoproto.nullable) = false]; - bytes sig = 2; -} diff --git a/proto/tendermint/p2p/pex.go b/proto/tendermint/p2p/pex.go index 38c8239dde..61036142fb 100644 --- a/proto/tendermint/p2p/pex.go +++ b/proto/tendermint/p2p/pex.go @@ -13,10 +13,6 @@ func (m *PexMessage) Wrap(pb proto.Message) error { m.Sum = &PexMessage_PexRequest{PexRequest: msg} case *PexResponse: m.Sum = &PexMessage_PexResponse{PexResponse: msg} - case *PexRequestV2: - m.Sum = &PexMessage_PexRequestV2{PexRequestV2: msg} - case *PexResponseV2: - m.Sum = &PexMessage_PexResponseV2{PexResponseV2: msg} default: return fmt.Errorf("unknown pex message: %T", msg) } @@ -31,10 +27,6 @@ func (m *PexMessage) Unwrap() (proto.Message, error) { return msg.PexRequest, nil case *PexMessage_PexResponse: return msg.PexResponse, nil - case *PexMessage_PexRequestV2: - return msg.PexRequestV2, nil - case *PexMessage_PexResponseV2: - return msg.PexResponseV2, nil default: return nil, fmt.Errorf("unknown pex message: %T", msg) } diff --git a/proto/tendermint/p2p/pex.pb.go b/proto/tendermint/p2p/pex.pb.go index 63882c3643..25d636e43d 100644 --- a/proto/tendermint/p2p/pex.pb.go +++ b/proto/tendermint/p2p/pex.pb.go @@ -24,9 +24,7 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type PexAddress struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - IP string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` - Port uint32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` + URL string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` } func (m *PexAddress) Reset() { *m = PexAddress{} } @@ -62,27 +60,13 @@ func (m *PexAddress) XXX_DiscardUnknown() { var xxx_messageInfo_PexAddress proto.InternalMessageInfo -func (m *PexAddress) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *PexAddress) GetIP() string { +func (m *PexAddress) GetURL() string { if m != nil { - return m.IP + return m.URL } return "" } -func (m *PexAddress) GetPort() uint32 { - if m != nil { - return m.Port - } - return 0 -} - type PexRequest struct { } @@ -163,136 +147,10 @@ func (m *PexResponse) GetAddresses() []PexAddress { return nil } -type PexAddressV2 struct { - URL string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` -} - -func (m *PexAddressV2) Reset() { *m = PexAddressV2{} } -func (m *PexAddressV2) String() string { return proto.CompactTextString(m) } -func (*PexAddressV2) ProtoMessage() {} -func (*PexAddressV2) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{3} -} -func (m *PexAddressV2) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PexAddressV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PexAddressV2.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PexAddressV2) XXX_Merge(src proto.Message) { - xxx_messageInfo_PexAddressV2.Merge(m, src) -} -func (m *PexAddressV2) XXX_Size() int { - return m.Size() -} -func (m *PexAddressV2) XXX_DiscardUnknown() { - xxx_messageInfo_PexAddressV2.DiscardUnknown(m) -} - -var xxx_messageInfo_PexAddressV2 proto.InternalMessageInfo - -func (m *PexAddressV2) GetURL() string { - if m != nil { - return m.URL - } - return "" -} - -type PexRequestV2 struct { -} - -func (m *PexRequestV2) Reset() { *m = PexRequestV2{} } -func (m *PexRequestV2) String() string { return proto.CompactTextString(m) } -func (*PexRequestV2) ProtoMessage() {} -func (*PexRequestV2) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{4} -} -func (m *PexRequestV2) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PexRequestV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PexRequestV2.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PexRequestV2) XXX_Merge(src proto.Message) { - xxx_messageInfo_PexRequestV2.Merge(m, src) -} -func (m *PexRequestV2) XXX_Size() int { - return m.Size() -} -func (m *PexRequestV2) XXX_DiscardUnknown() { - xxx_messageInfo_PexRequestV2.DiscardUnknown(m) -} - -var xxx_messageInfo_PexRequestV2 proto.InternalMessageInfo - -type PexResponseV2 struct { - Addresses []PexAddressV2 `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses"` -} - -func (m *PexResponseV2) Reset() { *m = PexResponseV2{} } -func (m *PexResponseV2) String() string { return proto.CompactTextString(m) } -func (*PexResponseV2) ProtoMessage() {} -func (*PexResponseV2) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{5} -} -func (m *PexResponseV2) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PexResponseV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PexResponseV2.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PexResponseV2) XXX_Merge(src proto.Message) { - xxx_messageInfo_PexResponseV2.Merge(m, src) -} -func (m *PexResponseV2) XXX_Size() int { - return m.Size() -} -func (m *PexResponseV2) XXX_DiscardUnknown() { - xxx_messageInfo_PexResponseV2.DiscardUnknown(m) -} - -var xxx_messageInfo_PexResponseV2 proto.InternalMessageInfo - -func (m *PexResponseV2) GetAddresses() []PexAddressV2 { - if m != nil { - return m.Addresses - } - return nil -} - type PexMessage struct { // Types that are valid to be assigned to Sum: // *PexMessage_PexRequest // *PexMessage_PexResponse - // *PexMessage_PexRequestV2 - // *PexMessage_PexResponseV2 Sum isPexMessage_Sum `protobuf_oneof:"sum"` } @@ -300,7 +158,7 @@ func (m *PexMessage) Reset() { *m = PexMessage{} } func (m *PexMessage) String() string { return proto.CompactTextString(m) } func (*PexMessage) ProtoMessage() {} func (*PexMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{6} + return fileDescriptor_81c2f011fd13be57, []int{3} } func (m *PexMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -336,22 +194,14 @@ type isPexMessage_Sum interface { } type PexMessage_PexRequest struct { - PexRequest *PexRequest `protobuf:"bytes,1,opt,name=pex_request,json=pexRequest,proto3,oneof" json:"pex_request,omitempty"` + PexRequest *PexRequest `protobuf:"bytes,3,opt,name=pex_request,json=pexRequest,proto3,oneof" json:"pex_request,omitempty"` } type PexMessage_PexResponse struct { - PexResponse *PexResponse `protobuf:"bytes,2,opt,name=pex_response,json=pexResponse,proto3,oneof" json:"pex_response,omitempty"` -} -type PexMessage_PexRequestV2 struct { - PexRequestV2 *PexRequestV2 `protobuf:"bytes,3,opt,name=pex_request_v2,json=pexRequestV2,proto3,oneof" json:"pex_request_v2,omitempty"` -} -type PexMessage_PexResponseV2 struct { - PexResponseV2 *PexResponseV2 `protobuf:"bytes,4,opt,name=pex_response_v2,json=pexResponseV2,proto3,oneof" json:"pex_response_v2,omitempty"` + PexResponse *PexResponse `protobuf:"bytes,4,opt,name=pex_response,json=pexResponse,proto3,oneof" json:"pex_response,omitempty"` } -func (*PexMessage_PexRequest) isPexMessage_Sum() {} -func (*PexMessage_PexResponse) isPexMessage_Sum() {} -func (*PexMessage_PexRequestV2) isPexMessage_Sum() {} -func (*PexMessage_PexResponseV2) isPexMessage_Sum() {} +func (*PexMessage_PexRequest) isPexMessage_Sum() {} +func (*PexMessage_PexResponse) isPexMessage_Sum() {} func (m *PexMessage) GetSum() isPexMessage_Sum { if m != nil { @@ -374,27 +224,11 @@ func (m *PexMessage) GetPexResponse() *PexResponse { return nil } -func (m *PexMessage) GetPexRequestV2() *PexRequestV2 { - if x, ok := m.GetSum().(*PexMessage_PexRequestV2); ok { - return x.PexRequestV2 - } - return nil -} - -func (m *PexMessage) GetPexResponseV2() *PexResponseV2 { - if x, ok := m.GetSum().(*PexMessage_PexResponseV2); ok { - return x.PexResponseV2 - } - return nil -} - // XXX_OneofWrappers is for the internal use of the proto package. func (*PexMessage) XXX_OneofWrappers() []interface{} { return []interface{}{ (*PexMessage_PexRequest)(nil), (*PexMessage_PexResponse)(nil), - (*PexMessage_PexRequestV2)(nil), - (*PexMessage_PexResponseV2)(nil), } } @@ -402,42 +236,33 @@ func init() { proto.RegisterType((*PexAddress)(nil), "tendermint.p2p.PexAddress") proto.RegisterType((*PexRequest)(nil), "tendermint.p2p.PexRequest") proto.RegisterType((*PexResponse)(nil), "tendermint.p2p.PexResponse") - proto.RegisterType((*PexAddressV2)(nil), "tendermint.p2p.PexAddressV2") - proto.RegisterType((*PexRequestV2)(nil), "tendermint.p2p.PexRequestV2") - proto.RegisterType((*PexResponseV2)(nil), "tendermint.p2p.PexResponseV2") proto.RegisterType((*PexMessage)(nil), "tendermint.p2p.PexMessage") } func init() { proto.RegisterFile("tendermint/p2p/pex.proto", fileDescriptor_81c2f011fd13be57) } var fileDescriptor_81c2f011fd13be57 = []byte{ - // 407 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xdd, 0x8a, 0xda, 0x40, - 0x14, 0xc7, 0xf3, 0x61, 0x2d, 0x9e, 0x44, 0x0b, 0x43, 0x29, 0xa9, 0x6d, 0xa3, 0xe4, 0xca, 0xde, - 0x24, 0x30, 0xa5, 0x97, 0x2d, 0x36, 0x08, 0xb5, 0x50, 0xa9, 0x1d, 0xd8, 0x5c, 0xec, 0x8d, 0xe8, - 0x66, 0xc8, 0x06, 0x56, 0x33, 0x9b, 0x49, 0x16, 0x1f, 0x63, 0xdf, 0x61, 0x5f, 0xc6, 0x4b, 0x2f, - 0xf7, 0x4a, 0x96, 0xf8, 0x22, 0x8b, 0x13, 0x31, 0x23, 0xba, 0x7b, 0x37, 0xe7, 0x7f, 0xbe, 0x7e, - 0xe7, 0xcc, 0x01, 0x2b, 0xa3, 0x8b, 0x90, 0xa6, 0xf3, 0x78, 0x91, 0x79, 0x0c, 0x33, 0x8f, 0xd1, - 0xa5, 0xcb, 0xd2, 0x24, 0x4b, 0x50, 0xab, 0xf2, 0xb8, 0x0c, 0xb3, 0xf6, 0xfb, 0x28, 0x89, 0x12, - 0xe1, 0xf2, 0x76, 0xaf, 0x32, 0xca, 0x19, 0x03, 0x8c, 0xe9, 0xf2, 0x57, 0x18, 0xa6, 0x94, 0x73, - 0xf4, 0x01, 0xb4, 0x38, 0xb4, 0xd4, 0xae, 0xda, 0x6b, 0xf8, 0xf5, 0x62, 0xd3, 0xd1, 0xfe, 0x0c, - 0x88, 0x16, 0x87, 0x42, 0x67, 0x96, 0x26, 0xe9, 0x63, 0xa2, 0xc5, 0x0c, 0x21, 0xa8, 0xb1, 0x24, - 0xcd, 0x2c, 0xbd, 0xab, 0xf6, 0x9a, 0x44, 0xbc, 0x1d, 0x53, 0x54, 0x24, 0xf4, 0x36, 0xa7, 0x3c, - 0x73, 0x46, 0x60, 0x08, 0x8b, 0xb3, 0x64, 0xc1, 0x29, 0xfa, 0x09, 0x8d, 0x69, 0xd9, 0x8b, 0x72, - 0x4b, 0xed, 0xea, 0x3d, 0x03, 0xb7, 0xdd, 0x63, 0x50, 0xb7, 0xe2, 0xf1, 0x6b, 0xab, 0x4d, 0x47, - 0x21, 0x55, 0x8a, 0xf3, 0x15, 0xcc, 0xca, 0x1d, 0x60, 0xf4, 0x11, 0xf4, 0x3c, 0xbd, 0xd9, 0x13, - 0xbf, 0x2d, 0x36, 0x1d, 0xfd, 0x82, 0xfc, 0x25, 0x3b, 0xcd, 0x69, 0x89, 0xd0, 0x3d, 0x47, 0x80, - 0x9d, 0xff, 0xd0, 0x94, 0x48, 0x02, 0x8c, 0xfa, 0xa7, 0x2c, 0x9f, 0x5f, 0x66, 0x09, 0xf0, 0x29, - 0xcd, 0x83, 0x26, 0x66, 0x1d, 0x51, 0xce, 0xa7, 0x11, 0x45, 0x3f, 0xc0, 0x60, 0x74, 0x39, 0x49, - 0xcb, 0x96, 0x02, 0xea, 0xfc, 0x78, 0x7b, 0xa8, 0xa1, 0x42, 0x80, 0x1d, 0x2c, 0xd4, 0x07, 0xb3, - 0x4c, 0x2f, 0x09, 0xc5, 0xba, 0x0d, 0xfc, 0xe9, 0x6c, 0x7e, 0x19, 0x32, 0x54, 0x88, 0xc1, 0xa4, - 0xed, 0x0e, 0xa0, 0x25, 0x01, 0x4c, 0xee, 0xb0, 0xf8, 0x98, 0xf3, 0x63, 0x1d, 0x16, 0x33, 0x54, - 0x88, 0xc9, 0x24, 0x1b, 0xfd, 0x86, 0x77, 0x32, 0xc7, 0xae, 0x4c, 0x4d, 0x94, 0xf9, 0xf2, 0x0a, - 0x8a, 0xa8, 0xd3, 0x64, 0xb2, 0xe0, 0xbf, 0x01, 0x9d, 0xe7, 0x73, 0xff, 0xdf, 0xaa, 0xb0, 0xd5, - 0x75, 0x61, 0xab, 0x4f, 0x85, 0xad, 0xde, 0x6f, 0x6d, 0x65, 0xbd, 0xb5, 0x95, 0xc7, 0xad, 0xad, - 0x5c, 0x7e, 0x8f, 0xe2, 0xec, 0x3a, 0x9f, 0xb9, 0x57, 0xc9, 0xdc, 0x93, 0xee, 0x58, 0x3e, 0x69, - 0x71, 0xaf, 0xc7, 0x37, 0x3e, 0xab, 0x0b, 0xf5, 0xdb, 0x73, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9f, - 0x9b, 0xfd, 0x75, 0xfc, 0x02, 0x00, 0x00, + // 311 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x28, 0x49, 0xcd, 0x4b, + 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x30, 0x2a, 0xd0, 0x2f, 0x48, 0xad, 0xd0, 0x2b, + 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x43, 0xc8, 0xe8, 0x15, 0x18, 0x15, 0x48, 0x89, 0xa4, 0xe7, + 0xa7, 0xe7, 0x83, 0xa5, 0xf4, 0x41, 0x2c, 0x88, 0x2a, 0x25, 0x63, 0x2e, 0xae, 0x80, 0xd4, 0x0a, + 0xc7, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x21, 0x49, 0x2e, 0xe6, 0xd2, 0xa2, 0x1c, 0x09, 0x46, + 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xf6, 0x47, 0xf7, 0xe4, 0x99, 0x43, 0x83, 0x7c, 0x82, 0x40, 0x62, + 0x5e, 0x2c, 0x1c, 0x4c, 0x02, 0xcc, 0x5e, 0x2c, 0x1c, 0xcc, 0x02, 0x2c, 0x4a, 0x3c, 0x60, 0x4d, + 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x4a, 0xbe, 0x5c, 0xdc, 0x60, 0x5e, 0x71, 0x41, 0x7e, + 0x5e, 0x71, 0xaa, 0x90, 0x1d, 0x17, 0x67, 0x22, 0xc4, 0xb8, 0xd4, 0x62, 0x09, 0x46, 0x05, 0x66, + 0x0d, 0x6e, 0x23, 0x29, 0x3d, 0x54, 0xb7, 0xe8, 0x21, 0xac, 0x74, 0x62, 0x39, 0x71, 0x4f, 0x9e, + 0x21, 0x08, 0xa1, 0x45, 0x69, 0x01, 0x23, 0xd8, 0x74, 0xdf, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, + 0x21, 0x5b, 0x2e, 0xee, 0x82, 0xd4, 0x8a, 0xf8, 0x22, 0x88, 0x65, 0x12, 0xcc, 0x0a, 0x8c, 0x38, + 0x0c, 0x84, 0x3a, 0xc7, 0x83, 0x21, 0x88, 0xab, 0x00, 0xce, 0x13, 0x72, 0xe0, 0xe2, 0x81, 0x68, + 0x87, 0xb8, 0x4e, 0x82, 0x05, 0xac, 0x5f, 0x1a, 0xab, 0x7e, 0x88, 0x12, 0x0f, 0x86, 0x20, 0xee, + 0x02, 0x04, 0xd7, 0x89, 0x95, 0x8b, 0xb9, 0xb8, 0x34, 0xd7, 0x8b, 0x85, 0x83, 0x51, 0x80, 0x09, + 0x12, 0x0a, 0x4e, 0xfe, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, + 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0x65, 0x9a, + 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x8f, 0x14, 0x33, 0xc8, 0x91, 0x04, + 0x8e, 0x01, 0xd4, 0x58, 0x4b, 0x62, 0x03, 0x8b, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa7, + 0x1d, 0xdd, 0x6f, 0xce, 0x01, 0x00, 0x00, } func (m *PexAddress) Marshal() (dAtA []byte, err error) { @@ -460,22 +285,10 @@ func (m *PexAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Port != 0 { - i = encodeVarintPex(dAtA, i, uint64(m.Port)) - i-- - dAtA[i] = 0x18 - } - if len(m.IP) > 0 { - i -= len(m.IP) - copy(dAtA[i:], m.IP) - i = encodeVarintPex(dAtA, i, uint64(len(m.IP))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintPex(dAtA, i, uint64(len(m.ID))) + if len(m.URL) > 0 { + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintPex(dAtA, i, uint64(len(m.URL))) i-- dAtA[i] = 0xa } @@ -542,96 +355,6 @@ func (m *PexResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PexAddressV2) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PexAddressV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexAddressV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.URL) > 0 { - i -= len(m.URL) - copy(dAtA[i:], m.URL) - i = encodeVarintPex(dAtA, i, uint64(len(m.URL))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PexRequestV2) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PexRequestV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexRequestV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *PexResponseV2) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PexResponseV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexResponseV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Addresses) > 0 { - for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - func (m *PexMessage) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -681,7 +404,7 @@ func (m *PexMessage_PexRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintPex(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x1a } return len(dAtA) - i, nil } @@ -702,48 +425,6 @@ func (m *PexMessage_PexResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) i = encodeVarintPex(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} -func (m *PexMessage_PexRequestV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexMessage_PexRequestV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.PexRequestV2 != nil { - { - size, err := m.PexRequestV2.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - return len(dAtA) - i, nil -} -func (m *PexMessage_PexResponseV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexMessage_PexResponseV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.PexResponseV2 != nil { - { - size, err := m.PexResponseV2.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPex(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x22 } return len(dAtA) - i, nil @@ -765,17 +446,10 @@ func (m *PexAddress) Size() (n int) { } var l int _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovPex(uint64(l)) - } - l = len(m.IP) + l = len(m.URL) if l > 0 { n += 1 + l + sovPex(uint64(l)) } - if m.Port != 0 { - n += 1 + sovPex(uint64(m.Port)) - } return n } @@ -803,100 +477,39 @@ func (m *PexResponse) Size() (n int) { return n } -func (m *PexAddressV2) Size() (n int) { +func (m *PexMessage) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.URL) - if l > 0 { - n += 1 + l + sovPex(uint64(l)) + if m.Sum != nil { + n += m.Sum.Size() } return n } -func (m *PexRequestV2) Size() (n int) { +func (m *PexMessage_PexRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l + if m.PexRequest != nil { + l = m.PexRequest.Size() + n += 1 + l + sovPex(uint64(l)) + } return n } - -func (m *PexResponseV2) Size() (n int) { +func (m *PexMessage_PexResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Addresses) > 0 { - for _, e := range m.Addresses { - l = e.Size() - n += 1 + l + sovPex(uint64(l)) - } - } - return n -} - -func (m *PexMessage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Sum != nil { - n += m.Sum.Size() - } - return n -} - -func (m *PexMessage_PexRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PexRequest != nil { - l = m.PexRequest.Size() - n += 1 + l + sovPex(uint64(l)) - } - return n -} -func (m *PexMessage_PexResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PexResponse != nil { - l = m.PexResponse.Size() - n += 1 + l + sovPex(uint64(l)) - } - return n -} -func (m *PexMessage_PexRequestV2) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PexRequestV2 != nil { - l = m.PexRequestV2.Size() - n += 1 + l + sovPex(uint64(l)) - } - return n -} -func (m *PexMessage_PexResponseV2) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PexResponseV2 != nil { - l = m.PexResponseV2.Size() - n += 1 + l + sovPex(uint64(l)) + if m.PexResponse != nil { + l = m.PexResponse.Size() + n += 1 + l + sovPex(uint64(l)) } return n } @@ -938,39 +551,7 @@ func (m *PexAddress) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -998,34 +579,18 @@ func (m *PexAddress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IP = string(dAtA[iNdEx:postIndex]) + m.URL = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipPex(dAtA[iNdEx:]) if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthPex + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { @@ -1075,7 +640,10 @@ func (m *PexRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthPex + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { @@ -1159,223 +727,10 @@ func (m *PexResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PexAddressV2) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PexAddressV2: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PexAddressV2: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.URL = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPex(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PexRequestV2) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PexRequestV2: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PexRequestV2: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipPex(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { return ErrInvalidLengthPex } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PexResponseV2) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PexResponseV2: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PexResponseV2: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addresses = append(m.Addresses, PexAddressV2{}) - if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPex(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { @@ -1419,7 +774,7 @@ func (m *PexMessage) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: PexMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PexRequest", wireType) } @@ -1454,7 +809,7 @@ func (m *PexMessage) Unmarshal(dAtA []byte) error { } m.Sum = &PexMessage_PexRequest{v} iNdEx = postIndex - case 2: + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PexResponse", wireType) } @@ -1489,83 +844,16 @@ func (m *PexMessage) Unmarshal(dAtA []byte) error { } m.Sum = &PexMessage_PexResponse{v} iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PexRequestV2", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &PexRequestV2{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Sum = &PexMessage_PexRequestV2{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PexResponseV2", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &PexResponseV2{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Sum = &PexMessage_PexResponseV2{v} - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipPex(dAtA[iNdEx:]) if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthPex + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthPex } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/p2p/pex.proto b/proto/tendermint/p2p/pex.proto deleted file mode 100644 index 1f78c98643..0000000000 --- a/proto/tendermint/p2p/pex.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; -package tendermint.p2p; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/p2p"; - -import "gogoproto/gogo.proto"; - -message PexAddress { - string id = 1 [(gogoproto.customname) = "ID"]; - string ip = 2 [(gogoproto.customname) = "IP"]; - uint32 port = 3; -} - -message PexRequest {} - -message PexResponse { - repeated PexAddress addresses = 1 [(gogoproto.nullable) = false]; -} - -message PexAddressV2 { - string url = 1 [(gogoproto.customname) = "URL"]; -} - -message PexRequestV2 {} - -message PexResponseV2 { - repeated PexAddressV2 addresses = 1 [(gogoproto.nullable) = false]; -} - -message PexMessage { - oneof sum { - PexRequest pex_request = 1; - PexResponse pex_response = 2; - PexRequestV2 pex_request_v2 = 3; - PexResponseV2 pex_response_v2 = 4; - } -} diff --git a/proto/tendermint/p2p/types.pb.go b/proto/tendermint/p2p/types.pb.go index bffa6884fe..a0e647ee7b 100644 --- a/proto/tendermint/p2p/types.pb.go +++ b/proto/tendermint/p2p/types.pb.go @@ -917,7 +917,10 @@ func (m *ProtocolVersion) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1227,7 +1230,10 @@ func (m *NodeInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1341,7 +1347,10 @@ func (m *NodeInfoOther) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1493,7 +1502,10 @@ func (m *PeerInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1666,7 +1678,10 @@ func (m *PeerAddressInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/p2p/types.proto b/proto/tendermint/p2p/types.proto deleted file mode 100644 index 216a6d8d06..0000000000 --- a/proto/tendermint/p2p/types.proto +++ /dev/null @@ -1,42 +0,0 @@ -syntax = "proto3"; -package tendermint.p2p; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/p2p"; - -import "gogoproto/gogo.proto"; -import "google/protobuf/timestamp.proto"; - -message ProtocolVersion { - uint64 p2p = 1 [(gogoproto.customname) = "P2P"]; - uint64 block = 2; - uint64 app = 3; -} - -message NodeInfo { - ProtocolVersion protocol_version = 1 [(gogoproto.nullable) = false]; - string node_id = 2 [(gogoproto.customname) = "NodeID"]; - string listen_addr = 3; - string network = 4; - string version = 5; - bytes channels = 6; - string moniker = 7; - NodeInfoOther other = 8 [(gogoproto.nullable) = false]; -} - -message NodeInfoOther { - string tx_index = 1; - string rpc_address = 2 [(gogoproto.customname) = "RPCAddress"]; -} - -message PeerInfo { - string id = 1 [(gogoproto.customname) = "ID"]; - repeated PeerAddressInfo address_info = 2; - google.protobuf.Timestamp last_connected = 3 [(gogoproto.stdtime) = true]; -} - -message PeerAddressInfo { - string address = 1; - google.protobuf.Timestamp last_dial_success = 2 [(gogoproto.stdtime) = true]; - google.protobuf.Timestamp last_dial_failure = 3 [(gogoproto.stdtime) = true]; - uint32 dial_failures = 4; -} diff --git a/proto/tendermint/privval/types.pb.go b/proto/tendermint/privval/types.pb.go index 56b35e7271..da30f75270 100644 --- a/proto/tendermint/privval/types.pb.go +++ b/proto/tendermint/privval/types.pb.go @@ -1708,7 +1708,10 @@ func (m *RemoteSignerError) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1790,7 +1793,10 @@ func (m *PubKeyRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1909,7 +1915,10 @@ func (m *PubKeyResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2027,7 +2036,10 @@ func (m *SignVoteRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2146,7 +2158,10 @@ func (m *SignedVoteResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2264,7 +2279,10 @@ func (m *SignProposalRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2383,7 +2401,10 @@ func (m *SignedProposalResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2433,7 +2454,10 @@ func (m *PingRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2483,7 +2507,10 @@ func (m *PingResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2813,7 +2840,10 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2930,7 +2960,10 @@ func (m *AuthSigMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/state/types.pb.go b/proto/tendermint/state/types.pb.go index 85f38cada4..d94724fff2 100644 --- a/proto/tendermint/state/types.pb.go +++ b/proto/tendermint/state/types.pb.go @@ -1069,7 +1069,10 @@ func (m *ABCIResponses) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1174,7 +1177,10 @@ func (m *ValidatorsInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1276,7 +1282,10 @@ func (m *ConsensusParamsInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1391,7 +1400,10 @@ func (m *Version) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1857,7 +1869,10 @@ func (m *State) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/statesync/types.pb.go b/proto/tendermint/statesync/types.pb.go index 5541c28037..93e844730a 100644 --- a/proto/tendermint/statesync/types.pb.go +++ b/proto/tendermint/statesync/types.pb.go @@ -1740,7 +1740,10 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1790,7 +1793,10 @@ func (m *SnapshotsRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -1965,7 +1971,10 @@ func (m *SnapshotsResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2072,7 +2081,10 @@ func (m *ChunkRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2233,7 +2245,10 @@ func (m *ChunkResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2302,7 +2317,10 @@ func (m *LightBlockRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2388,7 +2406,10 @@ func (m *LightBlockResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2457,7 +2478,10 @@ func (m *ParamsRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -2559,7 +2583,10 @@ func (m *ParamsResponse) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/statesync/types.proto b/proto/tendermint/statesync/types.proto deleted file mode 100644 index fcfd05f687..0000000000 --- a/proto/tendermint/statesync/types.proto +++ /dev/null @@ -1,62 +0,0 @@ -syntax = "proto3"; -package tendermint.statesync; - -import "gogoproto/gogo.proto"; -import "tendermint/types/types.proto"; -import "tendermint/types/params.proto"; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/statesync"; - -message Message { - oneof sum { - SnapshotsRequest snapshots_request = 1; - SnapshotsResponse snapshots_response = 2; - ChunkRequest chunk_request = 3; - ChunkResponse chunk_response = 4; - LightBlockRequest light_block_request = 5; - LightBlockResponse light_block_response = 6; - ParamsRequest params_request = 7; - ParamsResponse params_response = 8; - } -} - -message SnapshotsRequest {} - -message SnapshotsResponse { - uint64 height = 1; - uint32 format = 2; - uint32 chunks = 3; - bytes hash = 4; - bytes metadata = 5; -} - -message ChunkRequest { - uint64 height = 1; - uint32 format = 2; - uint32 index = 3; -} - -message ChunkResponse { - uint64 height = 1; - uint32 format = 2; - uint32 index = 3; - bytes chunk = 4; - bool missing = 5; -} - -message LightBlockRequest { - uint64 height = 1; -} - -message LightBlockResponse { - tendermint.types.LightBlock light_block = 1; -} - -message ParamsRequest { - uint64 height = 1; -} - -message ParamsResponse { - uint64 height = 1; - tendermint.types.ConsensusParams consensus_params = 2 [(gogoproto.nullable) = false]; -} \ No newline at end of file diff --git a/proto/tendermint/types/block.pb.go b/proto/tendermint/types/block.pb.go index 6dd30be50a..08101585e2 100644 --- a/proto/tendermint/types/block.pb.go +++ b/proto/tendermint/types/block.pb.go @@ -334,7 +334,10 @@ func (m *Block) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthBlock + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthBlock } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/block.proto b/proto/tendermint/types/block.proto deleted file mode 100644 index bf4b35664f..0000000000 --- a/proto/tendermint/types/block.proto +++ /dev/null @@ -1,13 +0,0 @@ -syntax = "proto3"; -package tendermint.types; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; - -import "gogoproto/gogo.proto"; -import "tendermint/types/types.proto"; - -message Block { - Header header = 1 [(gogoproto.nullable) = false]; - Data data = 2 [(gogoproto.nullable) = false]; - Commit last_commit = 4; -} diff --git a/proto/tendermint/types/canonical.pb.go b/proto/tendermint/types/canonical.pb.go index 7098310430..38b17ddb16 100644 --- a/proto/tendermint/types/canonical.pb.go +++ b/proto/tendermint/types/canonical.pb.go @@ -775,7 +775,10 @@ func (m *CanonicalBlockID) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -878,7 +881,10 @@ func (m *CanonicalPartSetHeader) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -1087,7 +1093,10 @@ func (m *CanonicalProposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { @@ -1277,7 +1286,10 @@ func (m *CanonicalVote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthCanonical } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/events.pb.go b/proto/tendermint/types/events.pb.go index a9aa26a799..1c49aef647 100644 --- a/proto/tendermint/types/events.pb.go +++ b/proto/tendermint/types/events.pb.go @@ -285,7 +285,10 @@ func (m *EventDataRoundState) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthEvents } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/params.pb.go b/proto/tendermint/types/params.pb.go index 5a9f103a9c..a295bca9e8 100644 --- a/proto/tendermint/types/params.pb.go +++ b/proto/tendermint/types/params.pb.go @@ -1123,7 +1123,10 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -1211,7 +1214,10 @@ func (m *BlockParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -1332,7 +1338,10 @@ func (m *EvidenceParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -1414,7 +1423,10 @@ func (m *ValidatorParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -1483,7 +1495,10 @@ func (m *VersionParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { @@ -1571,7 +1586,10 @@ func (m *HashedParams) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/params.proto b/proto/tendermint/types/params.proto deleted file mode 100644 index cc926b64e0..0000000000 --- a/proto/tendermint/types/params.proto +++ /dev/null @@ -1,69 +0,0 @@ -syntax = "proto3"; -package tendermint.types; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; - -import "gogoproto/gogo.proto"; -import "google/protobuf/duration.proto"; - -option (gogoproto.equal_all) = true; - -// ConsensusParams contains consensus critical parameters that determine the -// validity of blocks. -message ConsensusParams { - BlockParams block = 1; - EvidenceParams evidence = 2; - ValidatorParams validator = 3; - VersionParams version = 4; -} - -// BlockParams contains limits on the block size. -message BlockParams { - // Max block size, in bytes. - // Note: must be greater than 0 - int64 max_bytes = 1; - // Max gas per block. - // Note: must be greater or equal to -1 - int64 max_gas = 2; -} - -// EvidenceParams determine how we handle evidence of malfeasance. -message EvidenceParams { - // Max age of evidence, in blocks. - // - // The basic formula for calculating this is: MaxAgeDuration / {average block - // time}. - int64 max_age_num_blocks = 1; - - // Max age of evidence, in time. - // - // It should correspond with an app's "unbonding period" or other similar - // mechanism for handling [Nothing-At-Stake - // attacks](https://github.com/ethereum/wiki/wiki/Proof-of-Stake-FAQ#what-is-the-nothing-at-stake-problem-and-how-can-it-be-fixed). - google.protobuf.Duration max_age_duration = 2 - [(gogoproto.nullable) = false, (gogoproto.stdduration) = true]; - - // This sets the maximum size of total evidence in bytes that can be committed in a single block. - // and should fall comfortably under the max block bytes. - // Default is 1048576 or 1MB - int64 max_bytes = 3; -} - -// ValidatorParams restrict the public key types validators can use. -// NOTE: uses ABCI pubkey naming, not Amino names. -message ValidatorParams { - repeated string pub_key_types = 1; -} - -// VersionParams contains the ABCI application version. -message VersionParams { - uint64 app_version = 1; -} - -// HashedParams is a subset of ConsensusParams. -// -// It is hashed into the Header.ConsensusHash. -message HashedParams { - int64 block_max_bytes = 1; - int64 block_max_gas = 2; -} diff --git a/proto/tendermint/types/types.pb.go b/proto/tendermint/types/types.pb.go index d15c6f2163..b8e59375bc 100644 --- a/proto/tendermint/types/types.pb.go +++ b/proto/tendermint/types/types.pb.go @@ -4172,7 +4172,10 @@ func (m *Header) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4353,7 +4356,10 @@ func (m *Data) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -4546,7 +4552,10 @@ func (m *DuplicateVoteEvidence) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -5225,7 +5234,10 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -5339,7 +5351,10 @@ func (m *DataAvailabilityHeader) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -5599,7 +5614,10 @@ func (m *Vote) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -5754,7 +5772,10 @@ func (m *Commit) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -5924,7 +5945,10 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -6150,7 +6174,10 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -6272,7 +6299,10 @@ func (m *SignedHeader) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -6394,7 +6424,10 @@ func (m *LightBlock) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -6548,7 +6581,10 @@ func (m *BlockMeta) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { @@ -6702,7 +6738,10 @@ func (m *TxProof) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/types.proto b/proto/tendermint/types/types.proto deleted file mode 100644 index c24c91af83..0000000000 --- a/proto/tendermint/types/types.proto +++ /dev/null @@ -1,221 +0,0 @@ -syntax = "proto3"; -package tendermint.types; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; - -import "gogoproto/gogo.proto"; -import "google/protobuf/timestamp.proto"; -import "tendermint/crypto/proof.proto"; -import "tendermint/version/types.proto"; -import "tendermint/types/validator.proto"; - -// BlockIdFlag indicates which BlcokID the signature is for -enum BlockIDFlag { - option (gogoproto.goproto_enum_stringer) = true; - option (gogoproto.goproto_enum_prefix) = false; - - BLOCK_ID_FLAG_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "BlockIDFlagUnknown"]; - BLOCK_ID_FLAG_ABSENT = 1 [(gogoproto.enumvalue_customname) = "BlockIDFlagAbsent"]; - BLOCK_ID_FLAG_COMMIT = 2 [(gogoproto.enumvalue_customname) = "BlockIDFlagCommit"]; - BLOCK_ID_FLAG_NIL = 3 [(gogoproto.enumvalue_customname) = "BlockIDFlagNil"]; -} - -// SignedMsgType is a type of signed message in the consensus. -enum SignedMsgType { - option (gogoproto.goproto_enum_stringer) = true; - option (gogoproto.goproto_enum_prefix) = false; - - SIGNED_MSG_TYPE_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "UnknownType"]; - // Votes - SIGNED_MSG_TYPE_PREVOTE = 1 [(gogoproto.enumvalue_customname) = "PrevoteType"]; - SIGNED_MSG_TYPE_PRECOMMIT = 2 [(gogoproto.enumvalue_customname) = "PrecommitType"]; - - // Proposals - SIGNED_MSG_TYPE_PROPOSAL = 32 [(gogoproto.enumvalue_customname) = "ProposalType"]; -} - -// PartsetHeader -message PartSetHeader { - uint32 total = 1; - bytes hash = 2; -} - -message Part { - uint32 index = 1; - bytes bytes = 2; - tendermint.crypto.Proof proof = 3 [(gogoproto.nullable) = false]; -} - -// BlockID -message BlockID { - bytes hash = 1; - PartSetHeader part_set_header = 2 [(gogoproto.nullable) = false]; -} - -// -------------------------------- - -// Header defines the structure of a Tendermint block header. -message Header { - // basic block info - tendermint.version.Consensus version = 1 [(gogoproto.nullable) = false]; - string chain_id = 2 [(gogoproto.customname) = "ChainID"]; - int64 height = 3; - google.protobuf.Timestamp time = 4 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - - // prev block info - BlockID last_block_id = 5 [(gogoproto.nullable) = false]; - - // hashes of block data - bytes last_commit_hash = 6; // commit from validators from the last block - bytes data_hash = 7; // transactions - - // hashes from the app output from the prev block - bytes validators_hash = 8; // validators for the current block - bytes next_validators_hash = 9; // validators for the next block - bytes consensus_hash = 10; // consensus params for current block - bytes app_hash = 11; // state after txs from the previous block - bytes last_results_hash = 12; // root hash of all results from the txs from the previous block - - // consensus info - bytes evidence_hash = 13; // evidence included in the block - bytes proposer_address = 14; // original proposer of the block -} - -// Data contains the set of transactions included in the block -message Data { - // Txs that will be applied by state @ block.Height+1. - // NOTE: not all txs here are valid. We're just agreeing on the order first. - // This means that block.AppHash does not include these txs. - repeated bytes txs = 1; - - IntermediateStateRoots intermediate_state_roots = 2 [(gogoproto.nullable) = false]; - EvidenceList evidence = 3 [(gogoproto.nullable) = false]; - Messages messages = 4 [(gogoproto.nullable) = false]; -} - -// DuplicateVoteEvidence contains evidence of a validator signed two conflicting votes. -message DuplicateVoteEvidence { - tendermint.types.Vote vote_a = 1; - tendermint.types.Vote vote_b = 2; - int64 total_voting_power = 3; - int64 validator_power = 4; - google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; -} - -// LightClientAttackEvidence contains evidence of a set of validators attempting to mislead a light client. -message LightClientAttackEvidence { - tendermint.types.LightBlock conflicting_block = 1; - int64 common_height = 2; - repeated tendermint.types.Validator byzantine_validators = 3; - int64 total_voting_power = 4; - google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; -} - -message Evidence { - oneof sum { - DuplicateVoteEvidence duplicate_vote_evidence = 1; - LightClientAttackEvidence light_client_attack_evidence = 2; - } -} - -// EvidenceData contains any evidence of malicious wrong-doing by validators -message EvidenceList { - repeated Evidence evidence = 1 [(gogoproto.nullable) = false]; -} - -message IntermediateStateRoots { - repeated bytes raw_roots_list = 1; -} - -message Messages { - repeated Message messages_list = 1; -} - -message Message { - bytes namespace_id = 1; - bytes data = 2; -} - -// DataAvailabilityHeader contains the row and column roots of the erasure -// coded version of the data in Block.Data. -// Therefor the original Block.Data is arranged in a -// k × k matrix, which is then "extended" to a -// 2k × 2k matrix applying multiple times Reed-Solomon encoding. -// For details see Section 5.2: https://arxiv.org/abs/1809.09044 -// or the Celestia specification: -// https://github.com/celestiaorg/celestia-specs/blob/master/specs/data_structures.md#availabledataheader -// Note that currently we list row and column roots in separate fields -// (different from the spec). -message DataAvailabilityHeader { - // RowRoot_j = root((M_{j,1} || M_{j,2} || ... || M_{j,2k} )) - repeated bytes row_roots = 1; - // ColumnRoot_j = root((M_{1,j} || M_{2,j} || ... || M_{2k,j} )) - repeated bytes column_roots = 2; -} - -// Vote represents a prevote, precommit, or commit vote from validators for -// consensus. -message Vote { - SignedMsgType type = 1; - int64 height = 2; - int32 round = 3; - BlockID block_id = 4 - [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"]; // zero if vote is nil. - google.protobuf.Timestamp timestamp = 5 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - bytes validator_address = 6; - int32 validator_index = 7; - bytes signature = 8; -} - -// Commit contains the evidence that a block was committed by a set of validators. -message Commit { - int64 height = 1; - int32 round = 2; - BlockID block_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"]; - repeated CommitSig signatures = 4 [(gogoproto.nullable) = false]; -} - -// CommitSig is a part of the Vote included in a Commit. -message CommitSig { - BlockIDFlag block_id_flag = 1; - bytes validator_address = 2; - google.protobuf.Timestamp timestamp = 3 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - bytes signature = 4; -} - -message Proposal { - SignedMsgType type = 1; - int64 height = 2; - int32 round = 3; - int32 pol_round = 4; - BlockID block_id = 5 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; - google.protobuf.Timestamp timestamp = 6 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - bytes signature = 7; -} - -message SignedHeader { - Header header = 1; - Commit commit = 2; -} - -message LightBlock { - SignedHeader signed_header = 1; - tendermint.types.ValidatorSet validator_set = 2; -} - -message BlockMeta { - BlockID block_id = 1 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; - int64 block_size = 2; - Header header = 3 [(gogoproto.nullable) = false]; - int64 num_txs = 4; -} - -// TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. -message TxProof { - bytes root_hash = 1; - bytes data = 2; - tendermint.crypto.Proof proof = 3; -} diff --git a/proto/tendermint/types/validator.pb.go b/proto/tendermint/types/validator.pb.go index 23b30ed3cb..2c3468b83f 100644 --- a/proto/tendermint/types/validator.pb.go +++ b/proto/tendermint/types/validator.pb.go @@ -583,7 +583,10 @@ func (m *ValidatorSet) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthValidator + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthValidator } if (iNdEx + skippy) > l { @@ -738,7 +741,10 @@ func (m *Validator) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthValidator + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthValidator } if (iNdEx + skippy) > l { @@ -843,7 +849,10 @@ func (m *SimpleValidator) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthValidator + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthValidator } if (iNdEx + skippy) > l { diff --git a/proto/tendermint/types/validator.proto b/proto/tendermint/types/validator.proto deleted file mode 100644 index 49860b96d6..0000000000 --- a/proto/tendermint/types/validator.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; -package tendermint.types; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; - -import "gogoproto/gogo.proto"; -import "tendermint/crypto/keys.proto"; - -message ValidatorSet { - repeated Validator validators = 1; - Validator proposer = 2; - int64 total_voting_power = 3; -} - -message Validator { - bytes address = 1; - tendermint.crypto.PublicKey pub_key = 2 [(gogoproto.nullable) = false]; - int64 voting_power = 3; - int64 proposer_priority = 4; -} - -message SimpleValidator { - tendermint.crypto.PublicKey pub_key = 1; - int64 voting_power = 2; -} diff --git a/proto/tendermint/version/types.pb.go b/proto/tendermint/version/types.pb.go index 6e224392e8..9aeb3ae1a9 100644 --- a/proto/tendermint/version/types.pb.go +++ b/proto/tendermint/version/types.pb.go @@ -265,7 +265,10 @@ func (m *Consensus) Unmarshal(dAtA []byte) error { if err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } if (iNdEx + skippy) > l { diff --git a/scripts/protocgen.sh b/scripts/protocgen.sh index 1b84c2c568..a0757bcf5b 100755 --- a/scripts/protocgen.sh +++ b/scripts/protocgen.sh @@ -2,6 +2,37 @@ set -eo pipefail +: ${VERS:=master} +URL_PATH=archive/ +if [[ VERS -ne master ]]; then + URL_PATH=archive/refs/tags/v +fi + +# Edit this line to clone your branch, if you are modifying protobuf files +curl -qL "https://github.com/celestiaorg/spec/${URL_PATH}${VERS}.tar.gz" | tar -xjf - spec-"$VERS"/proto/ + +cp -r ./spec-"$VERS"/proto/tendermint/** ./proto/tendermint + buf generate --path proto/tendermint mv ./proto/tendermint/abci/types.pb.go ./abci/types + +echo "proto files have been generated" + +echo "removing copied files" + +rm -rf ./proto/tendermint/abci +rm -rf ./proto/tendermint/blocksync/types.proto +rm -rf ./proto/tendermint/consensus/types.proto +rm -rf ./proto/tendermint/mempool/types.proto +rm -rf ./proto/tendermint/p2p/types.proto +rm -rf ./proto/tendermint/p2p/conn.proto +rm -rf ./proto/tendermint/p2p/pex.proto +rm -rf ./proto/tendermint/statesync/types.proto +rm -rf ./proto/tendermint/types/block.proto +rm -rf ./proto/tendermint/types/params.proto +rm -rf ./proto/tendermint/types/types.proto +rm -rf ./proto/tendermint/types/validator.proto +rm -rf ./proto/tendermint/version/types.proto + +rm -rf ./spec-"$VERS" diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 61b4bf7d38..10a180f6ad 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -278,6 +278,7 @@ func generateNode( Database: nodeDatabases.Choose(r), PrivvalProtocol: nodePrivvalProtocols.Choose(r), Mempool: nodeMempools.Choose(r), + BlockSync: "v0", StateSync: e2e.StateSyncDisabled, PersistInterval: ptrUint64(uint64(nodePersistIntervals.Choose(r).(int))), SnapshotInterval: uint64(nodeSnapshotIntervals.Choose(r).(int)), @@ -285,6 +286,13 @@ func generateNode( Perturb: nodePerturbations.Choose(r), } + if node.Mempool == "" { + node.Mempool = "v1" + } + if node.PrivvalProtocol == "" { + node.PrivvalProtocol = "file" + } + if startAt > 0 { node.StateSync = nodeStateSyncs.Choose(r) if manifest.InitialHeight-startAt <= 5 && node.StateSync == e2e.StateSyncDisabled { diff --git a/test/e2e/generator/generate_test.go b/test/e2e/generator/generate_test.go index 79a20f27e8..3894830270 100644 --- a/test/e2e/generator/generate_test.go +++ b/test/e2e/generator/generate_test.go @@ -26,15 +26,27 @@ func TestGenerator(t *testing.T) { numStateSyncs++ } t.Run(name, func(t *testing.T) { - if node.StartAt > m.InitialHeight+5 && !node.Stateless() { - require.NotEqual(t, node.StateSync, e2e.StateSyncDisabled) + t.Run("StateSync", func(t *testing.T) { + if node.StartAt > m.InitialHeight+5 && !node.Stateless() { + require.NotEqual(t, node.StateSync, e2e.StateSyncDisabled) + } + if node.StateSync != e2e.StateSyncDisabled { + require.Zero(t, node.Seeds, node.StateSync) + require.True(t, len(node.PersistentPeers) >= 2 || len(node.PersistentPeers) == 0, + "peers: %v", node.PersistentPeers) + } + }) + if e2e.Mode(node.Mode) != e2e.ModeLight { + t.Run("Mempool", func(t *testing.T) { + require.NotZero(t, node.Mempool) + }) + t.Run("PrivvalProtocol", func(t *testing.T) { + require.NotZero(t, node.PrivvalProtocol) + }) + t.Run("BlockSync", func(t *testing.T) { + require.NotZero(t, node.BlockSync) + }) } - if node.StateSync != e2e.StateSyncDisabled { - require.Zero(t, node.Seeds, node.StateSync) - require.True(t, len(node.PersistentPeers) >= 2 || len(node.PersistentPeers) == 0, - "peers: %v", node.PersistentPeers) - } - }) } require.True(t, numStateSyncs <= 2) diff --git a/test/e2e/runner/evidence.go b/test/e2e/runner/evidence.go index 25c4a1cc44..720357fae8 100644 --- a/test/e2e/runner/evidence.go +++ b/test/e2e/runner/evidence.go @@ -33,14 +33,10 @@ func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amo var targetNode *e2e.Node for _, idx := range r.Perm(len(testnet.Nodes)) { - targetNode = testnet.Nodes[idx] - - if targetNode.Mode == e2e.ModeSeed || targetNode.Mode == e2e.ModeLight { - targetNode = nil - continue + if !testnet.Nodes[idx].Stateless() { + targetNode = testnet.Nodes[idx] + break } - - break } if targetNode == nil { @@ -55,15 +51,14 @@ func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amo } // request the latest block and validator set from the node - blockRes, err := client.Block(context.Background(), nil) + blockRes, err := client.Block(ctx, nil) if err != nil { return err } - evidenceHeight := blockRes.Block.Height - waitHeight := blockRes.Block.Height + 3 + evidenceHeight := blockRes.Block.Height - 3 nValidators := 100 - valRes, err := client.Validators(context.Background(), &evidenceHeight, nil, &nValidators) + valRes, err := client.Validators(ctx, &evidenceHeight, nil, &nValidators) if err != nil { return err } @@ -79,12 +74,8 @@ func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amo return err } - wctx, cancel := context.WithTimeout(ctx, time.Minute) - defer cancel() - - // wait for the node to reach the height above the forged height so that - // it is able to validate the evidence - _, err = waitForNode(wctx, targetNode, waitHeight) + // request the latest block and validator set from the node + blockRes, err = client.Block(ctx, &evidenceHeight) if err != nil { return err } @@ -104,24 +95,28 @@ func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amo return err } - _, err := client.BroadcastEvidence(context.Background(), ev) + _, err := client.BroadcastEvidence(ctx, ev) if err != nil { return err } } - wctx, cancel = context.WithTimeout(ctx, 30*time.Second) + logger.Info("Finished sending evidence", + "node", testnet.Name, + "amount", amount, + "height", evidenceHeight, + ) + + wctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() - // wait for the node to reach the height above the forged height so that - // it is able to validate the evidence - _, err = waitForNode(wctx, targetNode, blockRes.Block.Height+2) + // wait for the node to make progress after submitting + // evidence (3 (forged height) + 1 (progress)) + _, err = waitForNode(wctx, targetNode, evidenceHeight+4) if err != nil { return err } - logger.Info(fmt.Sprintf("Finished sending evidence (height %d)", blockRes.Block.Height+2)) - return nil } diff --git a/test/e2e/runner/rpc.go b/test/e2e/runner/rpc.go index ad5fa7a64d..f6a32b114a 100644 --- a/test/e2e/runner/rpc.go +++ b/test/e2e/runner/rpc.go @@ -70,9 +70,7 @@ func waitForHeight(ctx context.Context, testnet *e2e.Testnet, height int64) (*ty clients[node.Name] = client } - wctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - result, err := client.Status(wctx) + result, err := client.Status(ctx) if err != nil { continue }