diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 79097af3..00000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright 2020 Coinbase, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: 2.1 -executors: - default: - docker: - - image: circleci/golang:1.13 - user: root # go directory is owned by root - working_directory: /go/src/github.com/coinbase/rosetta-sdk-go - environment: - - GO111MODULE: "on" - -fast-checkout: &fast-checkout - attach_workspace: - at: /go - -jobs: - setup: - executor: - name: default - steps: - - checkout - - run: make deps - - persist_to_workspace: - root: /go - paths: - - src - - bin - - pkg/mod/cache - test: - executor: - name: default - steps: - - *fast-checkout - - run: make test - lint: - executor: - name: default - steps: - - *fast-checkout - - run: curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.24.0 - - run: make lint - check-license: - executor: - name: default - steps: - - *fast-checkout - - run: make check-license - check-format: - executor: - name: default - steps: - - *fast-checkout - - run: make check-format - coverage: - executor: - name: default - steps: - - *fast-checkout - - run: make test-cover - salus: - machine: true - steps: - - checkout - - run: docker run -t -v $(pwd):/home/repo coinbase/salus - -workflows: - version: 2 - build: - jobs: - - setup - - test: - requires: - - setup - - lint: - requires: - - setup - - check-license: - requires: - - setup - - check-format: - requires: - - setup - - coverage: - requires: - - setup - - salus - -notify: - webhooks: - - url: https://coveralls.io/webhook?repo_token=$COVERALLS_TOKEN diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 4e098f8a..5b07f85f 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -8,12 +8,13 @@ assignees: '' --- **Describe the bug** -A clear and concise description of what the bug is. + **To Reproduce** -Steps to reproduce the behavior: + **Expected behavior** -A clear and concise description of what you expected to happen. + **Additional context** + diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 36014cde..735569fe 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -8,13 +8,14 @@ assignees: '' --- **Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + **Describe the solution you'd like** -A clear and concise description of what you want to happen. + **Describe alternatives you've considered** -A clear and concise description of any alternative solutions or features you've considered. + **Additional context** -Add any other context or screenshots about the feature request here. + + diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..8fb10535 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,80 @@ +name: CI + +on: + push: + branches: + - master + pull_request: + +env: + go_version: 1.16 + GO111MODULE: on + COVERALLS_TOKEN: ${{ secrets.COVERALLS_TOKEN }} + +jobs: + Build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v3 + with: + go-version: ${{ env.go_version }} + - run: make build + Test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v3 + with: + go-version: ${{ env.go_version }} + - run: make test + Lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v3 + with: + go-version: ${{ env.go_version }} + - uses: golangci/golangci-lint-action@v3 + with: + version: latest + args: --timeout 3m + + Check-License: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + version: latest + - uses: actions/setup-go@v3 + with: + go-version: ${{ env.go_version }} + - run: make check-license + + Check-format: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + version: latest + - run: make check-format + + Coverage: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + version: latest + - uses: actions/setup-go@v3 + with: + go-version: ${{ env.go_version }} + - run: make test-cover COVERALLS_TOKEN="$COVERALLS_TOKEN" + + # Salus: + # runs-on: ubuntu-latest + # steps: + # - uses: actions/checkout@v3 + # with: + # version: latest + # - run: make salus + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 70312d15..23a0341a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,52 +2,87 @@ ## Code of Conduct -All interactions with this project follow our [Code of Conduct][code-of-conduct]. -By participating, you are expected to honor this code. Violators can be banned -from further participation in this project, or potentially all Coinbase projects. +All interactions with this project follow our [Code of Conduct](https://github.com/coinbase/code-of-conduct). By participating, you are expected to honor this code. Violators can be banned from further participation in this project, or potentially all Coinbase projects. -[code-of-conduct]: https://github.com/coinbase/code-of-conduct +## How to Contribute -## Bug Reports +You can contribute to this repository by asking questions, providing feedback, and reporting issues. -* Ensure your issue [has not already been reported][1]. It may already be fixed! -* Include the steps you carried out to produce the problem. -* Include the behavior you observed along with the behavior you expected, and - why you expected it. -* Include any relevant stack traces or debugging output. +### Asking Questions -## Feature Requests +Submit your questions via the [Rosetta Community boards][13]. -We welcome feedback with or without pull requests. If you have an idea for how -to improve the project, great! All we ask is that you take the time to write a -clear and concise explanation of what need you are trying to solve. If you have -thoughts on _how_ it can be solved, include those too! +### Providing Feedback + +You can also use the [Rosetta Community boards][13] to provide feedback. + +### Reporting Issues + +You can report issues by submitting bug reports, feature requests, or pull requests via GitHub. You **must** submit [security issues](#security-issues) and [support requests](#support-requests) through the links provided. + +#### Bug Reports + +Before filing a bug report, ensure that your issue [has not already been reported][1]. It may already be fixed! + +If your bug hasn’t been fixed, follow these steps to file a bug report: + +1. [Open an issue in GitHub][10]. +2. Add a title for your bug report. It should briefly describe the problem. +3. Follow the template that appears in the Write text box. This is the best way to describe the bug. +4. Click _Submit new issue_ to finish filing the bug report. + +#### Feature Requests + +We welcome feedback with or without pull requests. If you have an idea for how to improve the project, great! All we ask is that you take the time to write a clear and concise explanation of the need you are trying to solve. If you have thoughts on _how_ it can be solved, include those too! + +To submit a feature request, follow these steps: + +1. [Open an issue in GitHub][10]. +2. Add a title for your feature request. It should briefly describe your requested feature. +3. Follow the template that appears in the Write text box. This is the best way to explain your request. Be clear and concise in your responses. +4. Click _Submit new issue_ to submit the feature request. The best way to see a feature added, however, is to submit a pull request. -## Pull Requests +#### Pull Requests + +Before creating your pull request, it's usually worth asking whether the code you're planning on writing will be considered for merging. You can do this by [opening an issue][1] and asking. It may also help give the maintainers context for when the time comes to review your code. -* Before creating your pull request, it's usually worth asking if the code - you're planning on writing will actually be considered for merging. You can - do this by [opening an issue][1] and asking. It may also help give the - maintainers context for when the time comes to review your code. +Ensure that your [commit messages are well-written][2]. This can double as your pull request message, so it pays to take the time to write a clear message. -* Ensure your [commit messages are well-written][2]. This can double as your - pull request message, so it pays to take the time to write a clear message. +Additionally, make sure that you have written unit tests for your changes. If you're unsure as to what to test, don't hesitate to [open an issue][1] and ask! -* Add tests for your feature. You should be able to look at other tests for - examples. If you're unsure, don't hesitate to [open an issue][1] and ask! +To submit your pull request, follow these steps: -* Submit your pull request! +1. Follow these instructions on how to [open a pull request in GitHub][11]. +2. Click _Create pull request_ to submit your pull request. -## Support Requests +Once you submit your pull request, a reviewer will revise it, and either approve it or offer suggestions. -For security reasons, any communication referencing support tickets for Coinbase -products will be ignored. The request will have its content redacted and will -be locked to prevent further discussion. +#### Security Issues + +You can send a report through Coinbase's [H1 program][12]. Check out the [Security][14] tab for more information. + +#### Support Requests All support requests must be made via [our support team][3]. +**For security reasons, any communication referencing support tickets for Coinbase products will be ignored.** The request will have its content redacted and will be locked to prevent further discussion. + +© 2022 Coinbase + + [1]: https://github.com/coinbase/rosetta-cli/issues -[2]: https://medium.com/brigade-engineering/the-secrets-to-great-commit-messages-106fc0a92a25 +[2]: https://chris.beams.io/posts/git-commit/#seven-rules [3]: https://support.coinbase.com/customer/en/portal/articles/2288496-how-can-i-contact-coinbase-support- + +[5]: https://github.com/coinbase/rosetta-cli/issues/new/choose +[6]: https://github.com/coinbase/rosetta-cli/issues/new?assignees=&labels=bug&template=bug_report.md&title= +[7]: https://github.com/coinbase/rosetta-cli/issues/new?assignees=&labels=enhancement&template=feature_request.md&title= +[8]: https://github.com/coinbase/rosetta-cli/pulls +[9]: https://github.com/coinbase/rosetta-cli/compare +[10]: https://docs.github.com/en/desktop/contributing-and-collaborating-using-github-desktop/working-with-your-remote-repository-on-github-or-github-enterprise/creating-an-issue-or-pull-request#creating-an-issue +[11]: https://docs.github.com/en/desktop/contributing-and-collaborating-using-github-desktop/working-with-your-remote-repository-on-github-or-github-enterprise/creating-an-issue-or-pull-request#creating-a-pull-request +[12]: https://hackerone.com/coinbase +[13]: https://community.rosetta-api.org +[14]: https://github.com/coinbase/rosetta-cli/security diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..a6bba933 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,50 @@ +# Copyright 2020 Coinbase, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Compile golang +FROM ubuntu:20.04 as cli + +RUN apt-get update && apt-get install -y curl make gcc g++ git +ENV GOLANG_VERSION 1.17.9 +ENV GOLANG_DOWNLOAD_URL https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz +ENV GOLANG_DOWNLOAD_SHA256 9dacf782028fdfc79120576c872dee488b81257b1c48e9032d122cfdb379cca6 + +RUN curl -fsSL "$GOLANG_DOWNLOAD_URL" -o golang.tar.gz \ + && echo "$GOLANG_DOWNLOAD_SHA256 golang.tar.gz" | sha256sum -c - \ + && tar -C /usr/local -xzf golang.tar.gz \ + && rm golang.tar.gz + +ENV GOPATH /go +ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH +RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH" + +WORKDIR /go/src + +ARG VERSION=v0.10.3 +RUN git clone https://github.com/coinbase/rosetta-cli.git && \ + cd rosetta-cli && \ + git fetch --all --tags && \ + git checkout $VERSION && \ + make install + +FROM ubuntu:20.04 + +RUN apt-get update -y && apt-get install -y \ + curl + +# Copy all the binaries +COPY --from=cli /go/bin/ /usr/local/bin/ + +WORKDIR /app +ENTRYPOINT ["rosetta-cli"] \ No newline at end of file diff --git a/Makefile b/Makefile index efd40154..c2487590 100644 --- a/Makefile +++ b/Makefile @@ -7,20 +7,24 @@ # it is necessary to use `go run `. Running `go get` does # not install any binaries that could be used to run # the commands directly. -ADDLICENSE_CMD=go run github.com/google/addlicense -ADDLICENCE_SCRIPT=${ADDLICENSE_CMD} -c "Coinbase, Inc." -l "apache" -v -GOLINES_CMD=go run github.com/segmentio/golines -GOVERALLS_CMD=go run github.com/mattn/goveralls -COVERAGE_TEST_DIRECTORIES=./configuration/... ./pkg/constructor/... \ - ./pkg/logger/... ./pkg/scenario/... -TEST_SCRIPT=go test -v ./pkg/... ./configuration/... +ADDLICENSE_INSTALL=go install github.com/google/addlicense@latest +ADDLICENSE_CMD=addlicense +ADDLICENSE_IGNORE=-ignore ".github/**/*" -ignore ".idea/**/*" +ADDLICENCE_SCRIPT=${ADDLICENSE_CMD} -c "Coinbase, Inc." -l "apache" -v ${ADDLICENSE_IGNORE} +GOLINES_INSTALL=go install github.com/segmentio/golines@latest +GOLINES_CMD=golines +GOVERALLS_INSTALL=go install github.com/mattn/goveralls@latest +GOVERALLS_CMD=goveralls +COVERAGE_TEST_DIRECTORIES=./configuration/... ./pkg/results/... \ + ./pkg/logger/... ./cmd +TEST_SCRIPT=go test -v ./pkg/... ./configuration/... ./cmd COVERAGE_TEST_SCRIPT=go test -v ${COVERAGE_TEST_DIRECTORIES} deps: go get ./... lint: - golangci-lint run -v \ + golangci-lint run --timeout 2m0s -v \ -E golint,misspell,gocyclo,whitespace,goconst,gocritic,gocognit,bodyclose,unconvert,lll,unparam,gomnd; format: @@ -30,8 +34,6 @@ check-format: ! gofmt -s -l . | read; validate-configuration-files: - go run main.go configuration:validate examples/configuration/bitcoin.json; - go run main.go configuration:validate examples/configuration/ethereum.json; go run main.go configuration:validate examples/configuration/simple.json; go run main.go configuration:create examples/configuration/default.json; go run main.go configuration:validate examples/configuration/default.json; @@ -40,16 +42,20 @@ validate-configuration-files: test: | validate-configuration-files ${TEST_SCRIPT} -test-cover: +test-cover: + ${GOVERALLS_INSTALL} if [ "${COVERALLS_TOKEN}" ]; then ${COVERAGE_TEST_SCRIPT} -coverprofile=c.out -covermode=count; ${GOVERALLS_CMD} -coverprofile=c.out -repotoken ${COVERALLS_TOKEN}; fi add-license: + ${ADDLICENSE_INSTALL} ${ADDLICENCE_SCRIPT} . check-license: + ${ADDLICENSE_INSTALL} ${ADDLICENCE_SCRIPT} -check . shorten-lines: + ${GOLINES_INSTALL} ${GOLINES_CMD} -w --shorten-comments pkg cmd configuration salus: @@ -57,6 +63,7 @@ salus: release: add-license shorten-lines format test lint salus +# This command is to generate multi-platform binaries. compile: ./scripts/compile.sh $(version) @@ -65,8 +72,3 @@ build: install: go install ./... - -mocks: - rm -rf mocks; - mockery --dir pkg/constructor --all --case underscore --outpkg constructor --output mocks/constructor; - ${ADDLICENCE_SCRIPT} .; diff --git a/README.md b/README.md index 2e2f216a..01c2d69c 100644 --- a/README.md +++ b/README.md @@ -16,427 +16,128 @@ CLI to validate the correctness of Rosetta API implementations

-## Overview -The `rosetta-cli` is used by developers to test the correctness of their Rosetta -API implementations. The CLI also provides the ability to look up block contents -and account balances. +The `rosetta-cli` tool is used by developers to test the correctness of their Rosetta API implementations. The CLI also provides the ability to look up block contents and account balances. -## Documentation -Before diving into the CLI, we recommend taking a look at the Rosetta API Docs: - -* [Overview](https://www.rosetta-api.org/docs/welcome.html) -* [Data API](https://www.rosetta-api.org/docs/data_api_introduction.html) -* [Construction API](https://www.rosetta-api.org/docs/construction_api_introduction.html) +## Installation -## Install To download a binary for the latest release, run: ``` curl -sSfL https://raw.githubusercontent.com/coinbase/rosetta-cli/master/scripts/install.sh | sh -s ``` -The binary will be installed inside the `./bin` directory (relative to where the install command was run). +The binary will be installed inside the `./bin` directory (relative to the directory where you ran the installation command). _Downloading binaries from the Github UI will cause permission errors on Mac._ -## Usage -``` -CLI for the Rosetta API - -Usage: - rosetta-cli [command] - -Available Commands: - check:construction Check the correctness of a Rosetta Construction API Implementation - check:data Check the correctness of a Rosetta Data API Implementation - configuration:create Create a default configuration file at the provided path - configuration:validate Ensure a configuration file at the provided path is formatted correctly - help Help about any command - utils:asserter-configuration Generate a static configuration file for the Asserter - utils:train-zstd Generate a zstd dictionary for enhanced compression performance - version Print rosetta-cli version - view:account View an account balance - view:block View a block - view:networks View all network statuses - -Flags: - --configuration-file string Configuration file that provides connection and test settings. - If you would like to generate a starter configuration file (populated - with the defaults), run rosetta-cli configuration:create. - - Any fields not populated in the configuration file will be populated with - default values. - -h, --help help for rosetta-cli - -Use "rosetta-cli [command] --help" for more information about a command. -``` - -### Configuration -All `rosetta-cli` parameters are populated from a configuration file (`--configuration-file`) -provided at runtime. If a configuration file is not provided, the default -configuration is used. This default configuration can be viewed -[here](examples/configuration/default.json). - -In the `examples/configuration` directory, you can find examples configuration -files for running tests against a Bitcoin Rosetta implementation -([config](examples/configuration/bitcoin.json)) and an Ethereum Rosetta -implementation ([config](examples/configuration/ethereum.json)). - -#### Disable Complex Checks -If you are just getting started with your implementation, you may want -to disable balance tracking (did any address balance go below zero?) and -reconciliation (does the balance I calculated match the balance returned -by the `/account/balance` endpoint?). Take a look at the -[simple configuration](examples/configuration/simple.json) for an example of -how to do this. - -#### Future Work -In the near future, we will add support for providing complex exit conditions -(i.e. did we reach tip? did we reconcile every account?) for both -`check:construction` and `check:data` so that the `rosetta-cli` -can be integrated into a CI flow. Currently, the only way to exit with a -successful status in the `rosetta-cli` is to provide an `--end` flag -when running `check:data` (returns 0 if no errors up to a block index -are observed). - -### Commands -#### version +### Installing in a Custom Location +To download the binary into a specific directory, run: ``` -Print rosetta-cli version - -Usage: - rosetta-cli version [flags] - -Flags: - -h, --help help for version - -Global Flags: - --configuration-file string Configuration file that provides connection and test settings. - If you would like to generate a starter configuration file (populated - with the defaults), run rosetta-cli configuration:create. - - Any fields not populated in the configuration file will be populated with - default values. +curl -sSfL https://raw.githubusercontent.com/coinbase/rosetta-cli/master/scripts/install.sh | sh -s -- -b ``` -#### check:data +### Run via Docker +Running the following command will start a Docker container and present CLI for the Rosetta API. ``` -Check all server responses are properly constructed, that -there are no duplicate blocks and transactions, that blocks can be processed -from genesis to the current block (re-orgs handled automatically), and that -computed balance changes are equal to balance changes reported by the node. - -When re-running this command, it will start where it left off if you specify -some data directory. Otherwise, it will create a new temporary directory and start -again from the genesis block. If you want to discard some number of blocks -populate the --start flag with some block index. Starting from a given index -can be useful to debug a small range of blocks for issues but it is highly -recommended you sync from start to finish to ensure all correctness checks -are performed. - -By default, account balances are looked up at specific heights (instead of -only at the current block). If your node does not support this functionality -set historical balance disabled to true. This will make reconciliation much -less efficient but it will still work. - -If check fails due to an INACTIVE reconciliation error (balance changed without -any corresponding operation), the cli will automatically try to find the block -missing an operation. If historical balance disabled is true, this automatic -debugging tool does not work. - -To debug an INACTIVE account reconciliation error without historical balance lookup, -set the interesting accounts to the path of a JSON file containing -accounts that will be actively checked for balance changes at each block. This -will return an error at the block where a balance change occurred with no -corresponding operations. - -If your blockchain has a genesis allocation of funds and you set -historical balance disabled to true, you must provide an -absolute path to a JSON file containing initial balances with the -bootstrap balance config. You can look at the examples folder for an example -of what one of these files looks like. - -Usage: - rosetta-cli check:data [flags] - -Flags: - -h, --help help for check:data - -Global Flags: - --configuration-file string Configuration file that provides connection and test settings. - If you would like to generate a starter configuration file (populated - with the defaults), run rosetta-cli configuration:create. - - Any fields not populated in the configuration file will be populated with - default values. +docker run -it [image-name] [command] ``` -##### Status Codes -If there are no issues found while running `check`, it will exit with a `0` status code. -If there are any issues, it will exit with a `1` status code. It can be useful -to run this command as an integration test for any changes to your implementation. - -#### check:construction +Example: To validate that the Data API implementation is correct, running the following command will start a Docker container with a data directory at ``. ``` -The check:construction command runs an automated test of a -Construction API implementation by creating and broadcasting transactions -on a blockchain. In short, this tool generates new addresses, requests -funds, constructs transactions, signs transactions, broadcasts transactions, -and confirms transactions land on-chain. At each phase, a series of tests -are run to ensure that intermediate representations are correct (i.e. does -an unsigned transaction return a superset of operations provided during -construction?). - -Check out the https://github.com/coinbase/rosetta-cli/tree/master/examples -directory for examples of how to configure this test for Bitcoin and -Ethereum. - -Right now, this tool only supports transfer testing (for both account-based -and UTXO-based blockchains). However, we plan to add support for testing -arbitrary scenarios (i.e. staking, governance). - -Usage: - rosetta-cli check:construction [flags] - -Flags: - -h, --help help for check:construction - -Global Flags: - --configuration-file string Configuration file that provides connection and test settings. - If you would like to generate a starter configuration file (populated - with the defaults), run rosetta-cli configuration:create. - - Any fields not populated in the configuration file will be populated with - default values. -``` - -#### configuration:create +docker run -v "$(pwd):/data" -it [image-name] check:data --configuration-file /data/config.json ``` -Create a default configuration file at the provided path -Usage: - rosetta-cli configuration:create [flags] +## Key Sign Tool +Rosetta CLI comes with a handy key sign tool for local testing. Please refer to this [README](./cmd/README.md) on how to use it. -Flags: - -h, --help help for configuration:create +## Updates and Releases -Global Flags: - --configuration-file string Configuration file that provides connection and test settings. - If you would like to generate a starter configuration file (populated - with the defaults), run rosetta-cli configuration:create. - - Any fields not populated in the configuration file will be populated with - default values. -``` - -#### configuration:validate -``` -Validate the correctness of a configuration file at the provided path +We recommend that you continually update your installation to the latest release as soon as possible. The latest release notes are available in our [Community](https://community.rosetta-api.org) board under the [Release](https://community.rosetta-api.org/c/releases/13) category. -Usage: - rosetta-cli configuration:validate [flags] +You can also view releases and change log information in the [Releases](https://github.com/coinbase/rosetta-cli/releases) section of this repository. -Flags: - -h, --help help for configuration:validate - -Global Flags: - --configuration-file string Configuration file that provides connection and test settings. - If you would like to generate a starter configuration file (populated - with the defaults), run rosetta-cli configuration:create. - - Any fields not populated in the configuration file will be populated with - default values. -``` - -#### view:networks -``` -While debugging a Data API implementation, it can be very -useful to view network(s) status. This command fetches the network -status from all available networks and prints it to the terminal. - -If this command errors, it is likely because the /network/* endpoints are -not formatted correctly. - -Usage: - rosetta-cli view:networks [flags] - -Flags: - -h, --help help for view:networks +## Documentation -Global Flags: - --configuration-file string Configuration file that provides connection and test settings. - If you would like to generate a starter configuration file (populated - with the defaults), run rosetta-cli configuration:create. +You can find the Rosetta API documentation at [rosetta-api.org](https://www.rosetta-api.org/docs/welcome.html) - Any fields not populated in the configuration file will be populated with - default values. -``` +For more information on the rosetta-cli tool, read our [The rosetta-cli tool](https://www.rosetta-api.org/docs/rosetta_cli.html) documentation. -#### view:account -``` -While debugging, it is often useful to inspect the state -of an account at a certain block. This command allows you to look up -any account by providing a JSON representation of a types.AccountIdentifier -(and optionally a height to perform the query). +For more information on how to test your implementation file with the `rosetta-cli` tool, read our [How to Test Your Rosetta Implementation](https://www.rosetta-api.org/docs/rosetta_test.html) documentation. -For example, you could run view:account '{"address":"interesting address"}' 1000 -to lookup the balance of an interesting address at block 1000. Allowing the -address to specified as JSON allows for querying by SubAccountIdentifier. +Our documentation is divided into the following sections: -Usage: - rosetta-cli view:account [flags] +* [Product Overview](https://www.rosetta-api.org/docs/welcome.html) +* [Getting Started](https://www.rosetta-api.org/docs/getting_started.html) +* [Rosetta API Spec](https://www.rosetta-api.org/docs/Reference.html) +* [Testing](https://www.rosetta-api.org/docs/rosetta_cli.html) +* [Best Practices](https://www.rosetta-api.org/docs/node_deployment.html) +* [Repositories](https://www.rosetta-api.org/docs/rosetta_specifications.html) -Flags: - -h, --help help for view:account +## Contributing -Global Flags: - --configuration-file string Configuration file that provides connection and test settings. - If you would like to generate a starter configuration file (populated - with the defaults), run rosetta-cli configuration:create. +You may contribute to the `rosetta-cli` project in various ways: - Any fields not populated in the configuration file will be populated with - default values. -``` +* [Asking Questions](CONTRIBUTING.md/#asking-questions) +* [Providing Feedback](CONTRIBUTING.md/#providing-feedback) +* [Reporting Issues](CONTRIBUTING.md/#reporting-issues) -#### view:block -``` -While debugging a Data API implementation, it can be very -useful to inspect block contents. This command allows you to fetch any -block by index to inspect its contents. It uses the -fetcher (https://github.com/coinbase/rosetta-sdk-go/tree/master/fetcher) package -to automatically get all transactions in the block and assert the format -of the block is correct before printing. - -If this command errors, it is likely because the block you are trying to -fetch is formatted incorrectly. +Read our [Contributing](CONTRIBUTING.MD) documentation for more information. -Usage: - rosetta-cli view:block [flags] +## rosetta-cli Tool Development -Flags: - -h, --help help for view:block +While working on improvements to this repository, we recommend that you use these commands to check your code: -Global Flags: - --configuration-file string Configuration file that provides connection and test settings. - If you would like to generate a starter configuration file (populated - with the defaults), run rosetta-cli configuration:create. +* `make deps` to install dependencies +* `make test` to run tests +* `make lint` to lint the source code (included generated code) +* `make release` to run one last check before opening a PR +* `make compile version=RELEASE_TAG` to generate binaries - Any fields not populated in the configuration file will be populated with - default values. +If you are developing on both the `rosetta-cli` and `rosetta-sdk-go` repositories, use [go.mod replace](https://golang.org/ref/mod#go-mod-file-replace) to reference local changes: ``` - -#### utils:asserter-configuration +replace "github.com/coinbase/rosetta-sdk-go" v0.6.8 => "" ``` -In production deployments, it is useful to initialize the response -Asserter (https://github.com/coinbase/rosetta-sdk-go/tree/master/asserter) using -a static configuration instead of intializing a configuration dynamically -from the node. This allows a client to error on new types/statuses that may -have been added in an update instead of silently erroring. - -To use this command, simply provide an absolute path as the argument for where -the configuration file should be saved (in JSON). - -Usage: - rosetta-cli utils:asserter-configuration [flags] - -Flags: - -h, --help help for utils:asserter-configuration +### Release +* When we release a new rosetta-cli version, please update the version number to follow [PR](https://github.com/coinbase/rosetta-cli/pull/334) so that `rosetta-cli version` command can print the correct value. +* Create binaries and upload all the binaries in the new release tag (e.g. https://github.com/coinbase/rosetta-cli/releases/tag/v0.7.7) + * Ensure `$GOPATH/bin` is added to `$PATH` + * Run `make compile version=` + * All the binaries will be created in the `bin` folder and should have extension as `tar.gz` and new version number -Global Flags: - --configuration-file string Configuration file that provides connection and test settings. - If you would like to generate a starter configuration file (populated - with the defaults), run rosetta-cli configuration:create. +### Helper/Handler +Many of the packages use a `Helper/Handler` interface pattern to acquire required information or to send events to some client implementation. An example of this is in the `reconciler` package where a `Helper` is used to get the account balance and the `Handler` is called to indicate whether the reconciliation of an account was successful. - Any fields not populated in the configuration file will be populated with - default values. +### Repo Structure ``` - -#### utils:train-zstd +cmd +examples // examples of different config files +pkg + logger // logic to write syncing information to stdout/files + processor // Helper/Handler implementations for reconciler, storage, and syncer + tester // test orchestrators ``` -Zstandard (https://github.com/facebook/zstd) is used by -rosetta-sdk-go/storage to compress data stored to disk. It is possible -to improve compression performance by training a dictionary on a particular -storage namespace. This command runs this training and outputs a dictionary -that can be used with rosetta-sdk-go/storage. -The arguments for this command are: - () +### Troubleshooting -You can learn more about dictionary compression on the Zstandard -website: https://github.com/facebook/zstd#the-case-for-small-data-compression +While running the `check:data` or `check:construction` option, if you get the following error: -Usage: - rosetta-cli utils:train-zstd [flags] +```dial tcp 127.0.0.1:8080: socket: too many open files: unable to sync to 1902533: unable to sync to 1902533``` + +Please run the `ulimit -n 10000` command to increase the max concurrent opened file limit. -Flags: - -h, --help help for utils:train-zstd +_Note: MacOS users, if you face `ulimit: setrlimit failed: invalid argument` error while setting `ulimit`, please run `sudo launchctl limit maxfiles 10000 200000` before setting the `ulimit`._ -Global Flags: - --configuration-file string Configuration file that provides connection and test settings. - If you would like to generate a starter configuration file (populated - with the defaults), run rosetta-cli configuration:create. +## Related Projects - Any fields not populated in the configuration file will be populated with - default values. -``` +* [`rosetta-sdk-go`](https://github.com/coinbase/rosetta-sdk-go) — The `rosetta-sdk-go` SDK provides a collection of packages used for interaction with the Rosetta API specification. Much of the SDK code is generated from this, the [`rosetta-specifications`](https://github.com/coinbase/rosetta-specifications) repository. +* [`rosetta-specifications`](https://github.com/coinbase/rosetta-specifications) — The `rosetta-specifications` repository generates the SDK code in the [`rosetta-sdk-go`](https://github.com/coinbase/rosetta-sdk-go) repository. -## Development -* `make deps` to install dependencies -* `make test` to run tests -* `make lint` to lint the source code (included generated code) -* `make release` to run one last check before opening a PR -* `make compile version=RELEASE_TAG` to generate binaries +### Reference Implementations -### Helper/Handler -Many of the packages use a `Helper/Handler` interface pattern to acquire -required information or to send events to some client implementation. An example -of this is in the `reconciler` package where a `Helper` is used to get -the account balance and the `Handler` is called to incidate whether the -reconciliation of an account was successful. +To help you with examples, we developed complete Rosetta API reference implementations for [Bitcoin](https://github.com/coinbase/rosetta-bitcoin) and [Ethereum](https://github.com/coinbase/rosetta-ethereum). Developers of Bitcoin-like or Ethereum-like blockchains may find it easier to fork these reference implementations than to write an implementation from scratch. -### Repo Structure -``` -cmd -examples // examples of different config files -internal - logger // logic to write syncing information to stdout/files - processor // Helper/Handler implementations for reconciler, storage, and syncer - storage // persists block to temporary storage and allows for querying balances - utils // useful functions -``` +You can also find community implementations for a variety of blockchains in the [rosetta-ecosystem](https://github.com/coinbase/rosetta-ecosystem) repository, and in the [ecosystem category](https://community.rosetta-api.org/c/ecosystem) of our community site. -## Correctness Checks -This tool performs a variety of correctness checks using the Rosetta Server. If -any correctness check fails, the CLI will exit and print out a detailed -message explaining the error. - -### Response Correctness -The validator uses the autogenerated [Go Client package](https://github.com/coinbase/rosetta-sdk-go) -to communicate with the Rosetta Server and assert that responses adhere -to the Rosetta interface specification. - -### Duplicate Hashes -The validator checks that a block hash or transaction hash is -never duplicated. - -### Non-negative Balances -The validator checks that an account balance does not go -negative from any operations. - -### Balance Reconciliation -#### Active Addresses -The CLI checks that the balance of an account computed by -its operations is equal to the balance of the account according -to the node. If this balance is not identical, the CLI will -exit. - -#### Inactive Addresses -The CLI randomly checks the balances of accounts that aren't -involved in any transactions. The balances of accounts could change -on the blockchain node without being included in an operation -returned by the Rosetta Data API. Recall that all balance-changing -operations should be returned by the Rosetta Data API. ## License This project is available open source under the terms of the [Apache 2.0 License](https://opensource.org/licenses/Apache-2.0). - -© 2020 Coinbase +© 2022 Coinbase \ No newline at end of file diff --git a/cmd/README.md b/cmd/README.md new file mode 100644 index 00000000..0c3bd6cf --- /dev/null +++ b/cmd/README.md @@ -0,0 +1,37 @@ +## Key Sign Tool + +Rosetta CLI has a key sign tool, which you can use to sign and verify various curves supported +by rosetta-specifications. This should only be used for local development. Never share private keys anywhere. + +### Usage +#### Key Generate +``` +rosetta-cli key:gen --curve-type secp256k1 +``` +Curve Type options are specified by [rosetta-specifications](https://github.com/coinbase/rosetta-specifications/blob/master/models/CurveType.yaml) +#### Sign +``` +rosetta-cli key:sign --configuration-file config.json +``` + +A sample config file is located [here](../examples/configuration/sign.json) + +Required fields includes +- `pub_key` +- `private_key` +- `signing_payload` + + +#### Verify +``` +rosetta-cli key:verify --configuration-file verify.json +``` +A sample config file is located [here](../examples/configuration/verify.json) + +Required fields includes +- `pub_key` +- `signing_payload` +- `signature` + +### Troubleshoot +- `account_identifier` field in `signing_payload` field should've a dummy address for providing valid payload. \ No newline at end of file diff --git a/cmd/check_construction.go b/cmd/check_construction.go index 470ebad0..933f5797 100644 --- a/cmd/check_construction.go +++ b/cmd/check_construction.go @@ -17,12 +17,17 @@ package cmd import ( "context" "fmt" - "log" "time" + cliErrs "github.com/coinbase/rosetta-cli/pkg/errors" + "github.com/coinbase/rosetta-cli/pkg/logger" + "github.com/fatih/color" + + "github.com/coinbase/rosetta-cli/pkg/results" "github.com/coinbase/rosetta-cli/pkg/tester" "github.com/coinbase/rosetta-sdk-go/fetcher" + "github.com/coinbase/rosetta-sdk-go/types" "github.com/coinbase/rosetta-sdk-go/utils" "github.com/spf13/cobra" "golang.org/x/sync/errgroup" @@ -37,7 +42,7 @@ Construction API implementation by creating and broadcasting transactions on a blockchain. In short, this tool generates new addresses, requests funds, constructs transactions, signs transactions, broadcasts transactions, and confirms transactions land on-chain. At each phase, a series of tests -are run to ensure that intermediate representations are correct (i.e. does +are run to ensure that intermediate representations are correct (for example, does an unsigned transaction return a superset of operations provided during construction?). @@ -47,48 +52,87 @@ Ethereum. Right now, this tool only supports transfer testing (for both account-based and UTXO-based blockchains). However, we plan to add support for testing -arbitrary scenarios (i.e. staking, governance).`, - Run: runCheckConstructionCmd, +arbitrary scenarios (for example, staking and governance).`, + RunE: runCheckConstructionCmd, } + constructionMetadata string ) -func runCheckConstructionCmd(cmd *cobra.Command, args []string) { +func runCheckConstructionCmd(_ *cobra.Command, _ []string) error { if Config.Construction == nil { - log.Fatal("construction configuration is missing!") + return results.ExitConstruction( + Config, + nil, + nil, + cliErrs.ErrConstructionConfigMissing, + ) } + metadataMap := logger.ConvertStringToMap(Config.InfoMetaData) + metadataMap = logger.AddRequestUUIDToMap(metadataMap, Config.RequestUUID) + constructionMetadata = logger.ConvertMapToString(metadataMap) + ensureDataDirectoryExists() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(Context) + + fetcherOpts := []fetcher.Option{ + fetcher.WithMaxConnections(Config.MaxOnlineConnections), + fetcher.WithRetryElapsedTime(time.Duration(Config.RetryElapsedTime) * time.Second), + fetcher.WithTimeout(time.Duration(Config.HTTPTimeout) * time.Second), + fetcher.WithMaxRetries(Config.MaxRetries), + fetcher.WithMetaData(metadata), + } + if Config.ForceRetry { + fetcherOpts = append(fetcherOpts, fetcher.WithForceRetry()) + } fetcher := fetcher.New( Config.OnlineURL, - fetcher.WithTransactionConcurrency(Config.TransactionConcurrency), - fetcher.WithRetryElapsedTime(time.Duration(Config.RetryElapsedTime)*time.Second), - fetcher.WithTimeout(time.Duration(Config.HTTPTimeout)*time.Second), + fetcherOpts..., ) - _, _, fetchErr := fetcher.InitializeAsserter(ctx, Config.Network) + _, _, fetchErr := fetcher.InitializeAsserter(ctx, Config.Network, Config.ValidationFile) if fetchErr != nil { - tester.ExitConstruction( + cancel() + err := fmt.Errorf("unable to initialize asserter for fetcher: %w%s", fetchErr.Err, constructionMetadata) + color.Red(err.Error()) + return results.ExitConstruction( Config, nil, nil, - fmt.Errorf("%w: unable to initialize asserter", fetchErr.Err), - 1, + err, ) } _, err := utils.CheckNetworkSupported(ctx, Config.Network, fetcher) if err != nil { - tester.ExitConstruction( + cancel() + err = fmt.Errorf("unable to confirm network %s is supported: %w%s", types.PrintStruct(Config.Network), err, constructionMetadata) + color.Red(err.Error()) + return results.ExitConstruction( Config, nil, nil, - fmt.Errorf("%w: unable to confirm network is supported", err), - 1, + err, ) } + if asserterConfigurationFile != "" { + if err := validateNetworkOptionsMatchesAsserterConfiguration( + ctx, fetcher, Config.Network, asserterConfigurationFile, + ); err != nil { + cancel() + err = fmt.Errorf("network options don't match asserter configuration file %s: %w%s", asserterConfigurationFile, err, constructionMetadata) + color.Red(err.Error()) + return results.ExitConstruction( + Config, + nil, + nil, + err, + ) + } + } + constructionTester, err := tester.InitializeConstruction( ctx, Config, @@ -98,22 +142,31 @@ func runCheckConstructionCmd(cmd *cobra.Command, args []string) { &SignalReceived, ) if err != nil { - tester.ExitConstruction( + err = fmt.Errorf("unable to initialize construction tester: %w%s", err, constructionMetadata) + color.Red(err.Error()) + return results.ExitConstruction( Config, nil, nil, - fmt.Errorf("%w: unable to initialize construction tester", err), - 1, + err, ) } - defer constructionTester.CloseDatabase(ctx) if err := constructionTester.PerformBroadcasts(ctx); err != nil { - log.Fatalf("%s: unable to perform broadcasts", err.Error()) + err = fmt.Errorf("unable to perform broadcasts: %w%s", err, constructionMetadata) + color.Red(err.Error()) + return results.ExitConstruction( + Config, + nil, + nil, + err, + ) } g, ctx := errgroup.WithContext(ctx) + ctx = logger.AddMetadataMapToContext(ctx, metadataMap) + g.Go(func() error { return constructionTester.StartPeriodicLogger(ctx) }) @@ -130,9 +183,21 @@ func runCheckConstructionCmd(cmd *cobra.Command, args []string) { return constructionTester.WatchEndConditions(ctx) }) + g.Go(func() error { + return tester.LogMemoryLoop(ctx) + }) + + g.Go(func() error { + return tester.StartServer( + ctx, + "check:construction status", + constructionTester, + Config.Construction.StatusPort, + ) + }) + sigListeners := []context.CancelFunc{cancel} - go handleSignals(sigListeners) + go handleSignals(&sigListeners) - err = g.Wait() - constructionTester.HandleErr(err) + return constructionTester.HandleErr(g.Wait(), &sigListeners) } diff --git a/cmd/check_data.go b/cmd/check_data.go index 2e1c13c7..535d6c71 100644 --- a/cmd/check_data.go +++ b/cmd/check_data.go @@ -1,4 +1,4 @@ -// Copyright 2020 Coinbase, Inc. +// Copyright 2022 Coinbase, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,10 +19,13 @@ import ( "fmt" "time" + "github.com/coinbase/rosetta-cli/pkg/logger" + "github.com/coinbase/rosetta-cli/pkg/results" "github.com/coinbase/rosetta-cli/pkg/tester" - "github.com/coinbase/rosetta-sdk-go/fetcher" + "github.com/coinbase/rosetta-sdk-go/types" "github.com/coinbase/rosetta-sdk-go/utils" + "github.com/fatih/color" "github.com/spf13/cobra" "golang.org/x/sync/errgroup" ) @@ -31,11 +34,11 @@ var ( checkDataCmd = &cobra.Command{ Use: "check:data", Short: "Check the correctness of a Rosetta Data API Implementation", - Long: `Check all server responses are properly constructed, that -there are no duplicate blocks and transactions, that blocks can be processed + Long: `Check all server responses are +properly constructed, that there are no duplicate blocks and transactions, that blocks can be processed from genesis to the current block (re-orgs handled automatically), and that computed balance changes are equal to balance changes reported by the node. - + When re-running this command, it will start where it left off if you specify some data directory. Otherwise, it will create a new temporary directory and start again from the genesis block. If you want to discard some number of blocks @@ -43,51 +46,67 @@ populate the --start flag with some block index. Starting from a given index can be useful to debug a small range of blocks for issues but it is highly recommended you sync from start to finish to ensure all correctness checks are performed. - + By default, account balances are looked up at specific heights (instead of only at the current block). If your node does not support this functionality set historical balance disabled to true. This will make reconciliation much less efficient but it will still work. - + If check fails due to an INACTIVE reconciliation error (balance changed without any corresponding operation), the cli will automatically try to find the block missing an operation. If historical balance disabled is true, this automatic debugging tool does not work. - + To debug an INACTIVE account reconciliation error without historical balance lookup, set the interesting accounts to the path of a JSON file containing accounts that will be actively checked for balance changes at each block. This will return an error at the block where a balance change occurred with no corresponding operations. - + If your blockchain has a genesis allocation of funds and you set historical balance disabled to true, you must provide an absolute path to a JSON file containing initial balances with the bootstrap balance config. You can look at the examples folder for an example of what one of these files looks like.`, - Run: runCheckDataCmd, + RunE: runCheckDataCmd, } + metadata string ) -func runCheckDataCmd(cmd *cobra.Command, args []string) { +func runCheckDataCmd(_ *cobra.Command, _ []string) error { ensureDataDirectoryExists() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(Context) + + metadataMap := logger.ConvertStringToMap(Config.InfoMetaData) + metadataMap = logger.AddRequestUUIDToMap(metadataMap, Config.RequestUUID) + metadata = logger.ConvertMapToString(metadataMap) + + fetcherOpts := []fetcher.Option{ + fetcher.WithMaxConnections(Config.MaxOnlineConnections), + fetcher.WithRetryElapsedTime(time.Duration(Config.RetryElapsedTime) * time.Second), + fetcher.WithTimeout(time.Duration(Config.HTTPTimeout) * time.Second), + fetcher.WithMaxRetries(Config.MaxRetries), + fetcher.WithMetaData(metadata), + } + if Config.ForceRetry { + fetcherOpts = append(fetcherOpts, fetcher.WithForceRetry()) + } fetcher := fetcher.New( Config.OnlineURL, - fetcher.WithTransactionConcurrency(Config.TransactionConcurrency), - fetcher.WithRetryElapsedTime(time.Duration(Config.RetryElapsedTime)*time.Second), - fetcher.WithTimeout(time.Duration(Config.HTTPTimeout)*time.Second), + fetcherOpts..., ) - _, _, fetchErr := fetcher.InitializeAsserter(ctx, Config.Network) + _, _, fetchErr := fetcher.InitializeAsserter(ctx, Config.Network, Config.ValidationFile) if fetchErr != nil { - tester.ExitData( + cancel() + err := fmt.Errorf("unable to initialize asserter for fetcher: %w%s", fetchErr.Err, metadata) + color.Red(err.Error()) + return results.ExitData( Config, nil, nil, - fmt.Errorf("%w: unable to initialize asserter", fetchErr.Err), - 1, + err, "", "", ) @@ -95,18 +114,38 @@ func runCheckDataCmd(cmd *cobra.Command, args []string) { networkStatus, err := utils.CheckNetworkSupported(ctx, Config.Network, fetcher) if err != nil { - tester.ExitData( + cancel() + err = fmt.Errorf("unable to confirm network %s is supported: %w%s", types.PrintStruct(Config.Network), err, metadata) + color.Red(err.Error()) + return results.ExitData( Config, nil, nil, - fmt.Errorf("%w: unable to confirm network", err), - 1, + err, "", "", ) } - dataTester := tester.InitializeData( + if asserterConfigurationFile != "" { + if err := validateNetworkOptionsMatchesAsserterConfiguration( + ctx, fetcher, Config.Network, asserterConfigurationFile, + ); err != nil { + cancel() + err = fmt.Errorf("network options don't match asserter configuration file %s: %w%s", asserterConfigurationFile, err, metadata) + color.Red(err.Error()) + return results.ExitData( + Config, + nil, + nil, + err, + "", + "", + ) + } + } + + dataTester, err := tester.InitializeData( ctx, Config, Config.Network, @@ -116,10 +155,23 @@ func runCheckDataCmd(cmd *cobra.Command, args []string) { nil, // only populated when doing recursive search &SignalReceived, ) - + if err != nil { + err = fmt.Errorf("unable to initialize data tester: %w%s", err, metadata) + color.Red(err.Error()) + return results.ExitData( + Config, + nil, + nil, + err, + "", + "", + ) + } defer dataTester.CloseDatabase(ctx) g, ctx := errgroup.WithContext(ctx) + ctx = logger.AddMetadataMapToContext(ctx, metadataMap) + g.Go(func() error { return dataTester.StartPeriodicLogger(ctx) }) @@ -132,20 +184,35 @@ func runCheckDataCmd(cmd *cobra.Command, args []string) { return dataTester.StartSyncing(ctx) }) + g.Go(func() error { + return dataTester.StartPruning(ctx) + }) + g.Go(func() error { return dataTester.WatchEndConditions(ctx) }) - sigListeners := []context.CancelFunc{cancel} - go handleSignals(sigListeners) + g.Go(func() error { + return dataTester.StartReconcilerCountUpdater(ctx) + }) + + g.Go(func() error { + return tester.LogMemoryLoop(ctx) + }) - err = g.Wait() + g.Go(func() error { + return tester.StartServer( + ctx, + "check:data status", + dataTester, + Config.Data.StatusPort, + ) + }) - // Initialize new context because calling context - // will no longer be usable when after termination. - ctx = context.Background() + sigListeners := []context.CancelFunc{cancel} + go handleSignals(&sigListeners) // HandleErr will exit if we should not attempt // to find missing operations. - dataTester.HandleErr(ctx, err, sigListeners) + return dataTester.HandleErr(g.Wait(), &sigListeners) } diff --git a/cmd/check_perf.go b/cmd/check_perf.go new file mode 100644 index 00000000..613503af --- /dev/null +++ b/cmd/check_perf.go @@ -0,0 +1,65 @@ +// Copyright 2022 Coinbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "context" + "fmt" + "time" + + "github.com/coinbase/rosetta-cli/pkg/results" + t "github.com/coinbase/rosetta-cli/pkg/tester" + "github.com/spf13/cobra" + "golang.org/x/sync/errgroup" +) + +var ( + checkPerfCmd = &cobra.Command{ + Use: "check:perf", + Short: "Benchmark performance of time-critical endpoints of Asset Issuer's Rosetta Implementation", + Long: `This command can be used to benchmark the performance of time critical methods for a Rosetta server. +This is useful for ensuring that there are no performance degradations in the rosetta-server.`, + RunE: runCheckPerfCmd, + } +) + +func runCheckPerfCmd(_ *cobra.Command, _ []string) error { + ctx, cancel := context.WithCancel(Context) + defer cancel() + g, ctx := errgroup.WithContext(ctx) + + TotalNumEndpoints := int64(Config.Perf.NumTimesToHitEndpoints) * (Config.Perf.EndBlock - Config.Perf.StartBlock) + perfRawStats := &results.CheckPerfRawStats{AccountBalanceEndpointTotalTime: -1, BlockEndpointTotalTime: -1} + + fmt.Printf("Running Check:Perf for %s:%s for blocks %d-%d \n", Config.Network.Blockchain, Config.Network.Network, Config.Perf.StartBlock, Config.Perf.EndBlock) + + fetcher, timer, elapsed := t.SetupBenchmarking(Config) + blockEndpointTimeConstraint := time.Duration(Config.Perf.BlockEndpointTimeConstraintMs*TotalNumEndpoints) * time.Millisecond + blockEndpointCtx, blockEndpointCancel := context.WithTimeout(ctx, blockEndpointTimeConstraint) + g.Go(func() error { + return t.BmarkBlock(blockEndpointCtx, Config, fetcher, timer, elapsed, perfRawStats) + }) + defer blockEndpointCancel() + + fetcher, timer, elapsed = t.SetupBenchmarking(Config) + accountBalanceEndpointTimeConstraint := time.Duration(Config.Perf.AccountBalanceEndpointTimeConstraintMs*TotalNumEndpoints) * time.Millisecond + accountBalanceEndpointCtx, accountBalanceEndpointCancel := context.WithTimeout(ctx, accountBalanceEndpointTimeConstraint) + g.Go(func() error { + return t.BmarkAccountBalance(accountBalanceEndpointCtx, Config, fetcher, timer, elapsed, perfRawStats) + }) + defer accountBalanceEndpointCancel() + + return results.ExitPerf(Config.Perf, g.Wait(), perfRawStats) +} diff --git a/cmd/check_spec.go b/cmd/check_spec.go new file mode 100644 index 00000000..129b8c44 --- /dev/null +++ b/cmd/check_spec.go @@ -0,0 +1,449 @@ +// Copyright 2022 Coinbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "context" + "fmt" + "time" + + "github.com/coinbase/rosetta-cli/pkg/results" + "github.com/coinbase/rosetta-sdk-go/fetcher" + "github.com/coinbase/rosetta-sdk-go/types" + "github.com/spf13/cobra" + + cliErrs "github.com/coinbase/rosetta-cli/pkg/errors" +) + +var ( + checkSpecCmd = &cobra.Command{ + Use: "check:spec", + Short: "Check that a Rosetta implementation satisfies Rosetta spec", + Long: `Check:spec checks whether a Rosetta implementation satisfies either Coinbase-specific requirements or +minimum requirements specified in rosetta-api.org. + +By default, check:spec will verify only Coinbase spec requirements. To verify the minimum requirements as well, +add the --all flag to the check:spec command: + +rosetta-cli check:spec --all --configuration-file [filepath] + +The minimum requirements verify whether an API response contains the required fields, and that the fields are +correctly formatted with proper values. For example, it would check whether the response of /network/list +contains a list of network identifiers. + +The Coinbase specific requirements are not documented in rosetta-api.org. However, we highly recommend that your +implementation satisfies them. This ensures that, when you want to integrate your asset into the Coinbase platform, +you can limit or eliminate implementation issues. + +Here are a few examples of Coinbase spec requirements: +1. The network_identifier in Rosetta configuration should be static. Network upgrade shouldn't change its value. +2. When block_identifier is not specified, the call to /block endpoint should return the tip block. +3. The online_url and offline_url should be different.`, + RunE: runCheckSpecCmd, + } +) + +type checkSpec struct { + onlineFetcher *fetcher.Fetcher + offlineFetcher *fetcher.Fetcher +} + +func newCheckSpec(ctx context.Context) (*checkSpec, error) { + if Config.Construction == nil { + return nil, cliErrs.ErrConstructionConfigMissing + } + + onlineFetcherOpts := []fetcher.Option{ + fetcher.WithMaxConnections(Config.MaxOnlineConnections), + fetcher.WithRetryElapsedTime(time.Duration(Config.RetryElapsedTime) * time.Second), + fetcher.WithTimeout(time.Duration(Config.HTTPTimeout) * time.Second), + fetcher.WithMaxRetries(Config.MaxRetries), + } + + offlineFetcherOpts := []fetcher.Option{ + fetcher.WithMaxConnections(Config.Construction.MaxOfflineConnections), + fetcher.WithRetryElapsedTime(time.Duration(Config.RetryElapsedTime) * time.Second), + fetcher.WithTimeout(time.Duration(Config.HTTPTimeout) * time.Second), + fetcher.WithMaxRetries(Config.MaxRetries), + } + + if Config.ForceRetry { + onlineFetcherOpts = append(onlineFetcherOpts, fetcher.WithForceRetry()) + offlineFetcherOpts = append(offlineFetcherOpts, fetcher.WithForceRetry()) + } + + onlineFetcher := fetcher.New( + Config.OnlineURL, + onlineFetcherOpts..., + ) + offlineFetcher := fetcher.New( + Config.Construction.OfflineURL, + offlineFetcherOpts..., + ) + + _, _, fetchErr := onlineFetcher.InitializeAsserter(ctx, Config.Network, Config.ValidationFile) + if fetchErr != nil { + return nil, results.ExitData( + Config, + nil, + nil, + fmt.Errorf("unable to initialize asserter for online fetcher: %w", fetchErr.Err), + "", + "", + ) + } + + return &checkSpec{ + onlineFetcher: onlineFetcher, + offlineFetcher: offlineFetcher, + }, nil +} + +func (cs *checkSpec) networkOptions(ctx context.Context) checkSpecOutput { + if checkAllSpecs { + printInfo("validating /network/options ...\n") + output := checkSpecOutput{ + api: networkOptions, + validation: map[checkSpecRequirement]checkSpecStatus{ + version: { + status: checkSpecSuccess, + }, + allow: { + status: checkSpecSuccess, + }, + offlineMode: { + status: checkSpecSuccess, + }, + }, + } + defer printInfo("/network/options validated\n") + + // NetworkOptionsRetry handles validation of /network/options response + // This is an endpoint for offline mode + _, err := cs.offlineFetcher.NetworkOptionsRetry(ctx, Config.Network, nil) + if err != nil { + printError("unable to fetch network options: %v\n", err.Err) + markAllValidationsFailed(output) + return output + } + + return output + } + + return checkSpecOutput{} +} + +func (cs *checkSpec) networkList(ctx context.Context) checkSpecOutput { + printInfo("validating /network/list ...\n") + output := checkSpecOutput{ + api: networkList, + validation: map[checkSpecRequirement]checkSpecStatus{ + staticNetworkID: { + status: checkSpecSuccess, + coinbaseSpec: true, + }, + }, + } + + if checkAllSpecs { + output.validation[networkIDs] = checkSpecStatus{ + status: checkSpecSuccess, + } + output.validation[offlineMode] = checkSpecStatus{ + status: checkSpecSuccess, + } + } + + defer printInfo("/network/list validated\n") + networks, err := cs.offlineFetcher.NetworkListRetry(ctx, nil) + + // endpoint for offline mode + if err != nil { + printError("unable to fetch network list: %v\n", err.Err) + markAllValidationsFailed(output) + return output + } + + if checkAllSpecs && len(networks.NetworkIdentifiers) == 0 { + printError("network_identifiers is required") + setValidationStatusFailed(output, networkIDs) + } + + // static network ID + for _, network := range networks.NetworkIdentifiers { + if isEqual(network.Network, Config.Network.Network) && + isEqual(network.Blockchain, Config.Network.Blockchain) { + return output + } + } + + printError("network_identifier in configuration file is not returned by /network/list") + setValidationStatusFailed(output, staticNetworkID) + return output +} + +func (cs *checkSpec) accountCoins(ctx context.Context) checkSpecOutput { + if checkAllSpecs { + printInfo("validating /account/coins ...\n") + output := checkSpecOutput{ + api: accountCoins, + validation: map[checkSpecRequirement]checkSpecStatus{ + blockID: { + status: checkSpecSuccess, + }, + coins: { + status: checkSpecSuccess, + }, + }, + } + defer printInfo("/account/coins validated\n") + + if isUTXO() { + acct, _, currencies, err := cs.getAccount(ctx) + if err != nil { + printError("unable to get an account: %v\n", err) + markAllValidationsFailed(output) + return output + } + if acct == nil { + printError("%v\n", cliErrs.ErrAccountNullPointer) + markAllValidationsFailed(output) + return output + } + + _, _, _, fetchErr := cs.onlineFetcher.AccountCoinsRetry( + ctx, + Config.Network, + acct, + false, + currencies) + if fetchErr != nil { + printError("unable to get coins for account %s: %v\n", types.PrintStruct(acct), fetchErr.Err) + markAllValidationsFailed(output) + return output + } + } + + return output + } + + return checkSpecOutput{} +} + +func (cs *checkSpec) block(ctx context.Context) checkSpecOutput { + printInfo("validating /block ...\n") + output := checkSpecOutput{ + api: block, + validation: map[checkSpecRequirement]checkSpecStatus{ + defaultTip: { + status: checkSpecSuccess, + coinbaseSpec: true, + }, + }, + } + defer printInfo("/block validated\n") + + if checkAllSpecs { + output.validation[idempotent] = checkSpecStatus{ + status: checkSpecSuccess, + } + } + + res, fetchErr := cs.onlineFetcher.NetworkStatusRetry(ctx, Config.Network, nil) + if fetchErr != nil { + printError("unable to get network status: %v\n", fetchErr.Err) + markAllValidationsFailed(output) + return output + } + + if checkAllSpecs { + // multiple calls with the same hash should return the same block + var block *types.Block + tip := res.CurrentBlockIdentifier + callTimes := 3 + + for i := 0; i < callTimes; i++ { + blockID := types.PartialBlockIdentifier{ + Hash: &tip.Hash, + } + b, fetchErr := cs.onlineFetcher.BlockRetry(ctx, Config.Network, &blockID) + if fetchErr != nil { + printError("unable to fetch block %s: %v\n", types.PrintStruct(blockID), fetchErr.Err) + markAllValidationsFailed(output) + return output + } + + if block == nil { + block = b + } else if !isEqual(types.Hash(*block), types.Hash(*b)) { + printError("%v\n", cliErrs.ErrBlockNotIdempotent) + setValidationStatusFailed(output, idempotent) + } + } + } + + // fetch the tip block again + res, fetchErr = cs.onlineFetcher.NetworkStatusRetry(ctx, Config.Network, nil) + if fetchErr != nil { + printError("unable to get network status: %v\n", fetchErr.Err) + setValidationStatusFailed(output, defaultTip) + return output + } + tip := res.CurrentBlockIdentifier + + // tip should be returned if block_identifier is not specified + emptyBlockID := &types.PartialBlockIdentifier{} + block, fetchErr := cs.onlineFetcher.BlockRetry(ctx, Config.Network, emptyBlockID) + if fetchErr != nil { + printError("unable to fetch tip block: %v\n", fetchErr.Err) + setValidationStatusFailed(output, defaultTip) + return output + } + + // block index returned from /block should be >= the index returned by /network/status + if isNegative(block.BlockIdentifier.Index - tip.Index) { + printError("%v\n", cliErrs.ErrBlockTip) + setValidationStatusFailed(output, defaultTip) + } + + return output +} + +func (cs *checkSpec) errorObject(ctx context.Context) checkSpecOutput { + if checkAllSpecs { + printInfo("validating error object ...\n") + output := checkSpecOutput{ + api: errorObject, + validation: map[checkSpecRequirement]checkSpecStatus{ + errorCode: { + status: checkSpecSuccess, + }, + errorMessage: { + status: checkSpecSuccess, + }, + }, + } + defer printInfo("error object validated\n") + + printInfo("%v\n", "sending request to /network/status ...") + emptyNetwork := &types.NetworkIdentifier{} + _, err := cs.onlineFetcher.NetworkStatusRetry(ctx, emptyNetwork, nil) + validateErrorObject(err, output) + + printInfo("%v\n", "sending request to /network/options ...") + _, err = cs.onlineFetcher.NetworkOptionsRetry(ctx, emptyNetwork, nil) + validateErrorObject(err, output) + + printInfo("%v\n", "sending request to /account/balance ...") + emptyAcct := &types.AccountIdentifier{} + emptyPartBlock := &types.PartialBlockIdentifier{} + emptyCur := []*types.Currency{} + _, _, _, err = cs.onlineFetcher.AccountBalanceRetry(ctx, emptyNetwork, emptyAcct, emptyPartBlock, emptyCur) + validateErrorObject(err, output) + + if isUTXO() { + printInfo("%v\n", "sending request to /account/coins ...") + _, _, _, err = cs.onlineFetcher.AccountCoinsRetry(ctx, emptyNetwork, emptyAcct, false, emptyCur) + validateErrorObject(err, output) + } else { + printInfo("%v\n", "skip /account/coins for account based chain") + } + + printInfo("%v\n", "sending request to /block ...") + _, err = cs.onlineFetcher.BlockRetry(ctx, emptyNetwork, emptyPartBlock) + validateErrorObject(err, output) + + printInfo("%v\n", "sending request to /block/transaction ...") + emptyTx := []*types.TransactionIdentifier{} + emptyBlock := &types.BlockIdentifier{} + _, err = cs.onlineFetcher.UnsafeTransactions(ctx, emptyNetwork, emptyBlock, emptyTx) + validateErrorObject(err, output) + + return output + } + + return checkSpecOutput{} +} + +// Searching for an account backwards from the tip +func (cs *checkSpec) getAccount(ctx context.Context) ( + *types.AccountIdentifier, + *types.PartialBlockIdentifier, + []*types.Currency, + error) { + res, err := cs.onlineFetcher.NetworkStatusRetry(ctx, Config.Network, nil) + if err != nil { + return nil, nil, nil, fmt.Errorf("unable to get network status of network %s: %w", types.PrintStruct(Config.Network), err.Err) + } + + var acct *types.AccountIdentifier + var blockID *types.PartialBlockIdentifier + tip := res.CurrentBlockIdentifier.Index + genesis := res.GenesisBlockIdentifier.Index + currencies := []*types.Currency{} + + for i := tip; i >= genesis && acct == nil; i-- { + blockID = &types.PartialBlockIdentifier{ + Index: &i, + } + + block, err := cs.onlineFetcher.BlockRetry(ctx, Config.Network, blockID) + if err != nil { + return nil, nil, nil, fmt.Errorf("unable to fetch block at index %d: %w", i, err.Err) + } + + // looking for an account in block transactions + for _, tx := range block.Transactions { + for _, op := range tx.Operations { + if op.Account != nil && op.Amount.Currency != nil { + acct = op.Account + currencies = append(currencies, op.Amount.Currency) + break + } + } + + if acct != nil { + break + } + } + } + + return acct, blockID, currencies, nil +} + +func runCheckSpecCmd(_ *cobra.Command, _ []string) error { + ctx := context.Background() + cs, err := newCheckSpec(ctx) + if err != nil { + return fmt.Errorf("unable to create checkSpec object with online URL: %w", err) + } + + output := []checkSpecOutput{} + // validate api endpoints + output = append(output, cs.networkList(ctx)) + output = append(output, cs.networkOptions(ctx)) + output = append(output, cs.accountCoins(ctx)) + output = append(output, cs.block(ctx)) + output = append(output, cs.errorObject(ctx)) + output = append(output, twoModes()) + + printInfo("check:spec is complete\n") + printCheckSpecOutputHeader() + for _, o := range output { + printCheckSpecOutputBody(o) + } + + return nil +} diff --git a/cmd/check_spec_utils.go b/cmd/check_spec_utils.go new file mode 100644 index 00000000..bc119727 --- /dev/null +++ b/cmd/check_spec_utils.go @@ -0,0 +1,177 @@ +// Copyright 2022 Coinbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "fmt" + "strconv" + + "github.com/coinbase/rosetta-sdk-go/fetcher" + "github.com/fatih/color" + + cliErrs "github.com/coinbase/rosetta-cli/pkg/errors" +) + +type checkSpecAPI string +type checkSpecRequirement string + +const ( + networkList checkSpecAPI = "/network/list" + networkOptions checkSpecAPI = "/network/options" + accountCoins checkSpecAPI = "/account/coins" + block checkSpecAPI = "/block" + errorObject checkSpecAPI = "error object" + modes checkSpecAPI = "modes" + + networkIDs checkSpecRequirement = "network_identifiers is required" + offlineMode checkSpecRequirement = "endpoint should work in offline mode" + staticNetworkID checkSpecRequirement = "network_identifier must be static" + version checkSpecRequirement = "field version is required" + allow checkSpecRequirement = "field allow is required" + + blockID checkSpecRequirement = "block_identifier is required" + coins checkSpecRequirement = "field coins is required" + idempotent checkSpecRequirement = "same hash should return the same block" + defaultTip checkSpecRequirement = "tip should be returned if block_identifier is not specified" + + errorCode checkSpecRequirement = "error code is required" + errorMessage checkSpecRequirement = "error message is required" + diffURLs checkSpecRequirement = "offline_url should be different from offline_url and not empty" + + checkSpecSuccess string = "Success" + checkSpecFailure string = "Failure" + cbSpec bool = true +) + +type checkSpecStatus struct { + status string + coinbaseSpec bool +} + +type checkSpecOutput struct { + api checkSpecAPI + validation map[checkSpecRequirement]checkSpecStatus +} + +func twoModes() checkSpecOutput { + output := checkSpecOutput{ + api: modes, + validation: map[checkSpecRequirement]checkSpecStatus{ + diffURLs: { + status: checkSpecSuccess, + coinbaseSpec: cbSpec, + }, + }, + } + + if isEmpty(Config.OnlineURL) || + isEmpty(Config.Construction.OfflineURL) || + isEqual(Config.OnlineURL, Config.Construction.OfflineURL) { + setValidationStatusFailed(output, diffURLs) + } + + return output +} + +func markAllValidationsFailed(output checkSpecOutput) { + for k, v := range output.validation { + output.validation[k] = checkSpecStatus{ + status: checkSpecFailure, + coinbaseSpec: v.coinbaseSpec, + } + } +} + +func setValidationStatusFailed(output checkSpecOutput, req checkSpecRequirement) { + output.validation[req] = checkSpecStatus{ + status: checkSpecFailure, + coinbaseSpec: output.validation[req].coinbaseSpec, + } +} + +func validateErrorObject(err *fetcher.Error, output checkSpecOutput) { + if err != nil { + if err.ClientErr != nil && isNegative(int64(err.ClientErr.Code)) { + printError("%v\n", cliErrs.ErrErrorNegativeCode) + setValidationStatusFailed(output, errorCode) + } + + if err.ClientErr != nil && isEmpty(err.ClientErr.Message) { + printError("%v\n", cliErrs.ErrErrorEmptyMessage) + setValidationStatusFailed(output, errorMessage) + } + } +} + +func printInfo(format string, a ...interface{}) { + fmt.Printf(format, a...) +} + +func printError(format string, a ...interface{}) { + fmt.Print(color.RedString(format, a...)) +} + +func printSuccess(format string, a ...interface{}) { + fmt.Print(color.GreenString(format, a...)) +} + +func printValidationResult(format string, css checkSpecStatus, a ...interface{}) { + if css.status == checkSpecFailure { + printError(format, a...) + } else { + printSuccess(format, a...) + } +} + +func printCheckSpecOutputHeader() { + printInfo("%v\n", "+--------------------------+-------------------------------------------------------------------+-----------+-----------------+") + printInfo("%v\n", "| API | Requirement | Status | Coinbase Spec |") + printInfo("%v\n", "+--------------------------+-------------------------------------------------------------------+-----------+-----------------+") +} + +func printCheckSpecOutputBody(output checkSpecOutput) { + for k, v := range output.validation { + // print api + printInfo("%v", "| ") + printValidationResult("%v", v, output.api) + for j := 0; j < 24-len(output.api); j++ { + printInfo("%v", " ") + } + + // print requirement description + printInfo("%v", "| ") + printValidationResult("%v", v, k) + for j := 0; j < 65-len(k); j++ { + printInfo(" ") + } + + // print validation status + printInfo("%v", "| ") + printValidationResult("%v", v, v.status) + for j := 0; j < 9-len(v.status); j++ { + printInfo("%v", " ") + } + + // print coinbase spec flag + printInfo("%v", "| ") + printValidationResult("%v", v, v.coinbaseSpec) + for j := 0; j < 11-len(strconv.FormatBool(v.coinbaseSpec)); j++ { + printInfo("%v", " ") + } + + printInfo("%v\n", "|") + printInfo("%v\n", "+--------------------------+-------------------------------------------------------------------+-----------+-----------------+") + } +} diff --git a/cmd/configuration_create.go b/cmd/configuration_create.go index 99e56d9f..42c728ef 100644 --- a/cmd/configuration_create.go +++ b/cmd/configuration_create.go @@ -15,7 +15,7 @@ package cmd import ( - "log" + "fmt" "github.com/coinbase/rosetta-cli/configuration" @@ -27,13 +27,15 @@ var ( configurationCreateCmd = &cobra.Command{ Use: "configuration:create", Short: "Create a default configuration file at the provided path", - Run: runConfigurationCreateCmd, + RunE: runConfigurationCreateCmd, Args: cobra.ExactArgs(1), } ) -func runConfigurationCreateCmd(cmd *cobra.Command, args []string) { +func runConfigurationCreateCmd(cmd *cobra.Command, args []string) error { if err := utils.SerializeAndWrite(args[0], configuration.DefaultConfiguration()); err != nil { - log.Fatalf("%s: unable to save configuration file to %s", err.Error(), args[0]) + return fmt.Errorf("unable to save configuration file to %s: %w", args[0], err) } + + return nil } diff --git a/cmd/configuration_validate.go b/cmd/configuration_validate.go index 178d2799..6fb2c9d2 100644 --- a/cmd/configuration_validate.go +++ b/cmd/configuration_validate.go @@ -15,10 +15,11 @@ package cmd import ( - "log" + "fmt" "github.com/coinbase/rosetta-cli/configuration" + "github.com/fatih/color" "github.com/spf13/cobra" ) @@ -26,16 +27,17 @@ var ( configurationValidateCmd = &cobra.Command{ Use: "configuration:validate", Short: "Ensure a configuration file at the provided path is formatted correctly", - Run: runConfigurationValidateCmd, + RunE: runConfigurationValidateCmd, Args: cobra.ExactArgs(1), } ) -func runConfigurationValidateCmd(cmd *cobra.Command, args []string) { - _, err := configuration.LoadConfiguration(args[0]) +func runConfigurationValidateCmd(cmd *cobra.Command, args []string) error { + _, err := configuration.LoadConfiguration(Context, args[0]) if err != nil { - log.Fatalf("%s: unable to save configuration file to %s", err.Error(), args[0]) + return fmt.Errorf("configuration validation failed %s: %w", args[0], err) } - log.Println("Configuration file validated!") + color.Green("Configuration file validated!") + return nil } diff --git a/cmd/key_gen.go b/cmd/key_gen.go new file mode 100644 index 00000000..49834644 --- /dev/null +++ b/cmd/key_gen.go @@ -0,0 +1,56 @@ +// Copyright 2023 Coinbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "encoding/hex" + "errors" + + "github.com/coinbase/rosetta-sdk-go/keys" + "github.com/coinbase/rosetta-sdk-go/types" + "github.com/fatih/color" + "github.com/spf13/cobra" +) + +var ( + keyGenCmd = &cobra.Command{ + Use: "key:gen", + Short: "Used to generate a public private key pair", + Long: `Used to generate a public private key pair + It supports Keypair specified by https://github.com/coinbase/rosetta-specifications + Please provide valid CurveType`, + RunE: runKeyGenCmd, + } +) + +func runKeyGenCmd(_ *cobra.Command, _ []string) error { + if len(curveType) == 0 { + color.Red("please provide a non-empty curve type") + return errors.New("invalid curve-type string") + } + + curve := types.CurveType(curveType) + + color.Yellow("Generating new %s keypair...", curve) + keyPair, err := keys.GenerateKeypair(curve) + if err != nil { + color.Red("failed to generate keypair with error %#v", err) + } + + color.Green("CurveType: %s", curve) + color.Green("Public Key (hex): %s", hex.EncodeToString(keyPair.PublicKey.Bytes)) + color.Green("Private Key (hex): %s", hex.EncodeToString(keyPair.PrivateKey)) + return nil +} diff --git a/cmd/key_sign.go b/cmd/key_sign.go new file mode 100644 index 00000000..41a114fa --- /dev/null +++ b/cmd/key_sign.go @@ -0,0 +1,79 @@ +// Copyright 2023 Coinbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "encoding/hex" + "errors" + + "github.com/coinbase/rosetta-sdk-go/keys" + "github.com/fatih/color" + "github.com/spf13/cobra" +) + +var ( + keySignCmd = &cobra.Command{ + Use: "key:sign", + Short: "Sign an unsigned payload with given private key", + Long: `Sign an unsigned payload with given private key + It supports Keypair specified by https://github.com/coinbase/rosetta-specifications + Please provide valid PrivateKey, CurveType, SignaturePayload`, + RunE: runKeySignCmd, + } +) + +func runKeySignCmd(_ *cobra.Command, _ []string) error { + if Config.Sign == nil { + return errors.New("sign configuration is missing") + } + + if len(Config.Sign.PrivateKey) == 0 || + Config.Sign.PubKey.CurveType == "" || + Config.Sign.SigningPayload == nil || + Config.Sign.SigningPayload.SignatureType == "" { + color.Red("invalid sign input") + } + + keyPair, err := keys.ImportPrivateKey(Config.Sign.PrivateKey, Config.Sign.PubKey.CurveType) + if err != nil { + color.Red("unable to import private keys %#v", err) + return err + } + + err = keyPair.IsValid() + if err != nil { + color.Red("keypair invalid with err %#v", err) + return err + } + + signer, err := keyPair.Signer() + if err != nil { + color.Red("signer invalid with err %#v", err) + return err + } + + signingPayload := Config.Sign.SigningPayload + signatureType := Config.Sign.SigningPayload.SignatureType + + sign, err := signer.Sign(signingPayload, signatureType) + if err != nil { + color.Red("unable to sign with err %#v", err) + return err + } + + hexSig := hex.EncodeToString(sign.Bytes) + color.Green("Signature: %s", hexSig) + return nil +} diff --git a/cmd/key_verify.go b/cmd/key_verify.go new file mode 100644 index 00000000..838228fd --- /dev/null +++ b/cmd/key_verify.go @@ -0,0 +1,69 @@ +// Copyright 2023 Coinbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "errors" + "github.com/coinbase/rosetta-sdk-go/keys" + "github.com/fatih/color" + "github.com/spf13/cobra" +) + +var ( + keyVerifyCmd = &cobra.Command{ + Use: "key:verify", + Short: "Verify the signature using the public key", + Long: `Verify the signature using the public key + It supports Keypair specified by https://github.com/coinbase/rosetta-specifications`, + RunE: runKeyVerifyCmd, + } +) + +func runKeyVerifyCmd(_ *cobra.Command, _ []string) error { + if Config.Sign == nil { + return errors.New("sign configuration is missing") + } + + if len(Config.Sign.Signature.Bytes) == 0 || + Config.Sign.SigningPayload == nil || + Config.Sign.SigningPayload.SignatureType == "" || + Config.Sign.PubKey == nil { + color.Red("invalid verify input") + } + + keyPair := keys.KeyPair{ + PublicKey: Config.Sign.PubKey, + } + + signer, err := keyPair.Signer() + if err != nil { + color.Red("signer invalid with err %#v", err) + return err + } + + signature := Config.Sign.Signature + signature.SignatureType = Config.Sign.SigningPayload.SignatureType + signature.SigningPayload = Config.Sign.SigningPayload + signature.PublicKey = Config.Sign.PubKey + + err = signer.Verify(signature) + if err != nil { + color.Red("invalid signature with err %#v", err) + return err + } + + color.Green("Signature Verified.") + return nil +} diff --git a/cmd/root.go b/cmd/root.go index a8fab93a..db54134b 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -17,9 +17,13 @@ package cmd import ( "context" "fmt" + "github.com/coinbase/rosetta-sdk-go/types" "log" "os" "os/signal" + "path" + "runtime" + "runtime/pprof" "syscall" "github.com/coinbase/rosetta-cli/configuration" @@ -29,34 +33,171 @@ import ( "github.com/spf13/cobra" ) +const ( + // configEnvKey is an env variable name that sets a config file location + configEnvKey = "ROSETTA_CONFIGURATION_FILE" +) + var ( rootCmd = &cobra.Command{ - Use: "rosetta-cli", - Short: "CLI for the Rosetta API", + Use: "rosetta-cli", + Short: "CLI for the Rosetta API", + PersistentPreRunE: rootPreRun, } - configurationFile string + configurationFile string + cpuProfile string + memProfile string + blockProfile string + onlineURL string + offlineURL string + startIndex int64 + endIndex int64 + dataResultFile string + constructionResultFile string + dataDirectory string + inMemoryMode bool + tableSize int64 + requestUUID string + statusPort uint + InfoMetaData string + targetAccount string // Config is the populated *configuration.Configuration from // the configurationFile. If none is provided, this is set // to the default settings. Config *configuration.Configuration + // Context is the context to use for this invocation of the cli. + Context context.Context + // SignalReceived is set to true when a signal causes us to exit. This makes // determining the error message to show on exit much more easy. SignalReceived = false + + // cpuProfileCleanup is called after the root command is executed to + // cleanup a running cpu profile. + cpuProfileCleanup func() + + // blockProfileCleanup is called after the root command is executed to + // cleanup a running block profile. + blockProfileCleanup func() + + // OnlyChanges is a boolean indicating if only the balance changes should be + // logged to the console. + OnlyChanges bool + + // allSpecs is a boolean indicating whether check:spec should verify only Coinbase + // spec requirements, or the minimum requirements as well. + checkAllSpecs bool + + // If non-empty, used to validate that /network/options matches the contents of the file + // located at this path. The intended use case is someone previously ran + // utils:asserter-configuration `asserterConfigurationFile`, so the validation is being done + // against the same file (or an identical copy of that file at this path). This file contains a + // snapshot of /network/options (and some other unrelated configuration options), so this will + // trigger validation of the current /network/options against that. See + // utils:asserter-configuration for more details. + // + // The main goal is to verify these core configuration options don't change across releases, + // which has caused production incidents in the past. This can be used for both check:data + // and check:construction. + asserterConfigurationFile string + + // curveType is used to specify curve type to generate a keypair using rosetta-cli key:gen + // command + curveType string ) +// rootPreRun is executed before the root command runs and sets up cpu +// profiling. +// +// Based on https://golang.org/pkg/runtime/pprof/#hdr-Profiling_a_Go_program +func rootPreRun(*cobra.Command, []string) error { + if cpuProfile != "" { + f, err := os.Create(path.Clean(cpuProfile)) + if err != nil { + return fmt.Errorf("unable to create CPU profile file: %w", err) + } + if err := pprof.StartCPUProfile(f); err != nil { + if err := f.Close(); err != nil { + log.Printf("error while closing cpu profile file: %v\n", err) + } + return err + } + + cpuProfileCleanup = func() { + pprof.StopCPUProfile() + if err := f.Close(); err != nil { + log.Printf("error while closing cpu profile file: %v\n", err) + } + } + } + + if blockProfile != "" { + runtime.SetBlockProfileRate(1) + f, err := os.Create(path.Clean(blockProfile)) + if err != nil { + return fmt.Errorf("unable to create block profile file: %w", err) + } + + p := pprof.Lookup("block") + blockProfileCleanup = func() { + if err := p.WriteTo(f, 0); err != nil { + log.Printf("error while writing block profile file: %v\n", err) + } + if err := f.Close(); err != nil { + log.Printf("error while closing block profile file: %v\n", err) + } + } + } + + return nil +} + +// rootPostRun is executed after the root command runs and performs memory +// profiling. +func rootPostRun() { + if cpuProfileCleanup != nil { + cpuProfileCleanup() + } + + if blockProfileCleanup != nil { + blockProfileCleanup() + } + + if memProfile != "" { + f, err := os.Create(path.Clean(memProfile)) + if err != nil { + log.Printf("error while creating mem-profile file: %v", err) + return + } + + defer func() { + if err := f.Close(); err != nil { + log.Printf("error while closing mem-profile file: %v", err) + } + }() + + runtime.GC() + if err := pprof.WriteHeapProfile(f); err != nil { + log.Printf("error while writing heap profile: %v", err) + } + } +} + // Execute handles all invocations of the // rosetta-cli cmd. func Execute() error { + defer rootPostRun() return rootCmd.Execute() } func init() { cobra.OnInitialize(initConfig) - rootCmd.PersistentFlags().StringVar( + rootFlags := rootCmd.PersistentFlags() + rootFlags.StringVar( &configurationFile, "configuration-file", "", @@ -67,6 +208,24 @@ with the defaults), run rosetta-cli configuration:create. Any fields not populated in the configuration file will be populated with default values.`, ) + rootFlags.StringVar( + &cpuProfile, + "cpu-profile", + "", + `Save the pprof cpu profile in the specified file`, + ) + rootFlags.StringVar( + &memProfile, + "mem-profile", + "", + `Save the pprof mem profile in the specified file`, + ) + rootFlags.StringVar( + &blockProfile, + "block-profile", + "", + `Save the pprof block profile in the specified file`, + ) rootCmd.AddCommand(versionCmd) // Configuration Commands @@ -74,10 +233,142 @@ default values.`, rootCmd.AddCommand(configurationValidateCmd) // Check commands + checkDataCmd.Flags().StringVar( + &asserterConfigurationFile, + "asserter-configuration-file", + "", // Default to skip validation + `Check that /network/options matches contents of file at this path`, + ) + + checkDataCmd.Flags().StringVar( + &onlineURL, + "online-url", + "", + "Override online node url in configuration file", + ) + + checkDataCmd.Flags().StringVar( + &targetAccount, + "target-account", + "", + "Override target account in configuration file", + ) + + checkDataCmd.Flags().Int64Var( + &startIndex, + "start-block", + -1, + `Start-block is the block height to start syncing from. This will override the start_index from configuration file`, + ) + + checkDataCmd.Flags().Int64Var( + &endIndex, + "end-block", + -1, + `End-block configures the syncer to stop once reaching a particular block height. This will override the index from configuration file`, + ) + + checkDataCmd.Flags().StringVar( + &dataResultFile, + "result-file", + "", + "Result-file configures the location of validation result. This will override the results_output_file from configuration file", + ) + + checkDataCmd.Flags().StringVar( + &dataDirectory, + "data-dir", + "", + "Data-dir configures the location of logs and data for validation. This will override the data_directory from configuration file", + ) + + checkDataCmd.Flags().Int64Var( + &tableSize, + "table-size", + -1, + "Table-size configures the TableSize for badger DB. If table-size != -1, this will override the table_size from configuration file", + ) + + checkDataCmd.Flags().BoolVar( + &inMemoryMode, + "in-memory-mode", + false, + "In-memory-mode configures badger DB inMeomry option. Only when in-memory-mode=true, this will override the all_in_memory_enabled", + ) + + checkDataCmd.Flags().StringVar( + &requestUUID, + "requestUUID", + "", + "requestUUID configures the requestUUID in logs, which aims to enable search logs by requestUUID", + ) + + checkDataCmd.Flags().StringVar( + &InfoMetaData, + "info-metadata", + "", + "metadata configures the metadata which aims to show in logs", + ) + + checkDataCmd.Flags().UintVar( + &statusPort, + "status-port", + 0, + "status-port configures the status query port, this will override the status_port", + ) + rootCmd.AddCommand(checkDataCmd) + checkConstructionCmd.Flags().StringVar( + &asserterConfigurationFile, + "asserter-configuration-file", + "", // Default to skip validation + `Check that /network/options matches contents of file at this path`, + ) + + checkConstructionCmd.Flags().StringVar( + &onlineURL, + "online-url", + "", + "Override online node url in configuration file", + ) + + checkConstructionCmd.Flags().StringVar( + &offlineURL, + "offline-url", + "", + "Override offline node url in configuration file", + ) + + checkConstructionCmd.Flags().StringVar( + &constructionResultFile, + "result-file", + "", + "Result-file configures the location of validation result. This will override the results_output_file from configuration file", + ) + + checkConstructionCmd.Flags().StringVar( + &requestUUID, + "requestUUID", + "", + "requestUUID configures the requestUUID in logs, which aims to enable search logs by requestUUID", + ) + + checkConstructionCmd.Flags().UintVar( + &statusPort, + "status-port", + 0, + "status-port configures the status query port, this will override the status_port", + ) + rootCmd.AddCommand(checkConstructionCmd) // View Commands + viewBlockCmd.Flags().BoolVar( + &OnlyChanges, + "only-changes", + false, + `Only print balance changes for accounts in the block`, + ) rootCmd.AddCommand(viewBlockCmd) rootCmd.AddCommand(viewAccountCmd) rootCmd.AddCommand(viewNetworksCmd) @@ -85,17 +376,114 @@ default values.`, // Utils rootCmd.AddCommand(utilsAsserterConfigurationCmd) rootCmd.AddCommand(utilsTrainZstdCmd) + + // Benchmark commands + rootCmd.AddCommand(checkPerfCmd) + + // check:spec + checkSpecCmd.Flags().BoolVar( + &checkAllSpecs, + "all", + false, + `Verify both minimum and Coinbase spec requirements`, + ) + rootCmd.AddCommand(checkSpecCmd) + + // Key Sign command + rootCmd.AddCommand(keySignCmd) + + // Key Verify command + rootCmd.AddCommand(keyVerifyCmd) + + keyGenCmd.Flags().StringVar( + &curveType, + "curve-type", + string(types.Secp256k1), + "curve type used to generate the public/private keypair", + ) + // Key Gen command + rootCmd.AddCommand(keyGenCmd) } func initConfig() { + Context = context.Background() var err error + + // Use path provided by the environment variable if config path arg is not set. + // Default configuration will be used if the env var is not + if len(configurationFile) == 0 { + configurationFile = os.Getenv(configEnvKey) + } + if len(configurationFile) == 0 { Config = configuration.DefaultConfiguration() } else { - Config, err = configuration.LoadConfiguration(configurationFile) + Config, err = configuration.LoadConfiguration(Context, configurationFile) } + if err != nil { - log.Fatalf("%s: unable to load configuration", err.Error()) + log.Fatalf("unable to load configuration: %s", err.Error()) + } + + // Override node url in configuration file when it's explicitly set via CLI + if len(onlineURL) != 0 { + Config.OnlineURL = onlineURL + } + if len(offlineURL) != 0 { + Config.Construction.OfflineURL = offlineURL + } + + if len(targetAccount) != 0 { + Config.TargetAccount = targetAccount + } + + // Override start and end syncing index in configuration file when it's explicitly set via CLI + if startIndex != -1 { + Config.Data.StartIndex = &startIndex + // Configures rosetta-cli to lookup the balance of newly seen accounts at the + // parent block before applying operations. Otherwise the balance will be 0. + Config.Data.InitialBalanceFetchDisabled = false + } + + if endIndex != -1 { + Config.Data.EndConditions.Index = &endIndex + } + + if len(dataResultFile) != 0 { + Config.Data.ResultsOutputFile = dataResultFile + } + + if len(constructionResultFile) != 0 { + Config.Construction.ResultsOutputFile = constructionResultFile + } + + if len(dataDirectory) != 0 { + Config.DataDirectory = dataDirectory + } + + if inMemoryMode { + Config.AllInMemoryEnabled = inMemoryMode + } + + if tableSize >= 2 && tableSize <= 100 { + Config.TableSize = &tableSize + } else if tableSize != -1 { + log.Fatalf("table-size %d is not in the range [2, 100], please check your input", tableSize) + } + + if len(requestUUID) != 0 { + Config.RequestUUID = requestUUID + } + + if statusPort > 0 { + Config.Data.StatusPort = statusPort + if Config.Construction != nil { + Config.Construction.StatusPort = statusPort + } + } + + if len(InfoMetaData) != 0 { + Config.InfoMetaData = InfoMetaData } } @@ -105,7 +493,7 @@ func ensureDataDirectoryExists() { if len(Config.DataDirectory) == 0 { tmpDir, err := utils.CreateTempDir() if err != nil { - log.Fatalf("%s: unable to create temporary directory", err.Error()) + log.Fatalf("unable to create temporary directory: %s", err.Error()) } Config.DataDirectory = tmpDir @@ -115,14 +503,14 @@ func ensureDataDirectoryExists() { // handleSignals handles OS signals so we can ensure we close database // correctly. We call multiple sigListeners because we // may need to cancel more than 1 context. -func handleSignals(listeners []context.CancelFunc) { +func handleSignals(listeners *[]context.CancelFunc) { sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) go func() { sig := <-sigs color.Red("Received signal: %s", sig) SignalReceived = true - for _, listener := range listeners { + for _, listener := range *listeners { listener() } }() @@ -132,6 +520,6 @@ var versionCmd = &cobra.Command{ Use: "version", Short: "Print rosetta-cli version", Run: func(cmd *cobra.Command, args []string) { - fmt.Println("v0.5.0") + fmt.Println("v0.10.3") }, } diff --git a/cmd/utils_asserter_configuration.go b/cmd/utils_asserter_configuration.go index 8b22c056..db8e4100 100644 --- a/cmd/utils_asserter_configuration.go +++ b/cmd/utils_asserter_configuration.go @@ -15,12 +15,14 @@ package cmd import ( - "context" - "log" + "fmt" + "sort" "time" + "github.com/coinbase/rosetta-sdk-go/asserter" "github.com/coinbase/rosetta-sdk-go/fetcher" "github.com/coinbase/rosetta-sdk-go/utils" + "github.com/fatih/color" "github.com/spf13/cobra" ) @@ -30,39 +32,53 @@ var ( Short: "Generate a static configuration file for the Asserter", Long: `In production deployments, it is useful to initialize the response Asserter (https://github.com/coinbase/rosetta-sdk-go/tree/master/asserter) using -a static configuration instead of intializing a configuration dynamically +a static configuration instead of initializing a configuration dynamically from the node. This allows a client to error on new types/statuses that may have been added in an update instead of silently erroring. To use this command, simply provide an absolute path as the argument for where the configuration file should be saved (in JSON).`, - Run: runCreateConfigurationCmd, + RunE: runCreateConfigurationCmd, Args: cobra.ExactArgs(1), } ) -func runCreateConfigurationCmd(cmd *cobra.Command, args []string) { - ctx := context.Background() - +func runCreateConfigurationCmd(cmd *cobra.Command, args []string) error { // Create a new fetcher newFetcher := fetcher.New( Config.OnlineURL, fetcher.WithRetryElapsedTime(time.Duration(Config.RetryElapsedTime)*time.Second), fetcher.WithTimeout(time.Duration(Config.HTTPTimeout)*time.Second), + fetcher.WithMaxRetries(Config.MaxRetries), ) // Initialize the fetcher's asserter - _, _, fetchErr := newFetcher.InitializeAsserter(ctx, Config.Network) + _, _, fetchErr := newFetcher.InitializeAsserter(Context, Config.Network, Config.ValidationFile) if fetchErr != nil { - log.Fatalf("%s: failed to initialize asserter", fetchErr.Err.Error()) + return fmt.Errorf("failed to initialize asserter for fetcher: %w", fetchErr.Err) } configuration, err := newFetcher.Asserter.ClientConfiguration() if err != nil { - log.Fatalf("%s: unable to generate spec", err.Error()) + return fmt.Errorf("unable to generate asserter configuration: %w", err) } + sortArrayFieldsOnConfiguration(configuration) + if err := utils.SerializeAndWrite(args[0], configuration); err != nil { - log.Fatalf("%s: unable to serialize asserter configuration", err.Error()) + return fmt.Errorf("unable to serialize asserter configuration: %w", err) } + + color.Green("Configuration file saved!") + return nil +} + +func sortArrayFieldsOnConfiguration(configuration *asserter.Configuration) { + sort.Strings(configuration.AllowedOperationTypes) + sort.Slice(configuration.AllowedOperationStatuses, func(i, j int) bool { + return configuration.AllowedOperationStatuses[i].Status < configuration.AllowedOperationStatuses[j].Status + }) + sort.Slice(configuration.AllowedErrors, func(i, j int) bool { + return configuration.AllowedErrors[i].Code < configuration.AllowedErrors[j].Code + }) } diff --git a/cmd/utils_asserter_configuration_test.go b/cmd/utils_asserter_configuration_test.go new file mode 100644 index 00000000..d7844e90 --- /dev/null +++ b/cmd/utils_asserter_configuration_test.go @@ -0,0 +1,111 @@ +// Copyright 2020 Coinbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import "github.com/coinbase/rosetta-sdk-go/types" + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/coinbase/rosetta-sdk-go/asserter" +) + +var ( + basicNetwork = &types.NetworkIdentifier{ + Blockchain: "blockchain", + Network: "network", + } + + basicBlock = &types.BlockIdentifier{ + Index: 10, + Hash: "block 10", + } + + allowedOperationTypes = []string{"OUTPUT", "INPUT", "TRANSFER"} + + allowedOperationStatuses = []*types.OperationStatus{ + { + Status: "SUCCESS", + Successful: true, + }, + { + Status: "SKIPPED", + Successful: true, + }, + } + + allowedErrors = []*types.Error{ + { + Code: 4, + Message: "Block not found", + Retriable: false, + }, + { + Code: 0, + Message: "Endpoint not implemented", + Retriable: false, + }, + { + Code: 3, + Message: "Bitcoind error", + Retriable: false, + }, + } + + timestampStartIndex = int64(6) +) + +func TestSortArrayFields(t *testing.T) { + var clientConfiguration = &asserter.Configuration{ + NetworkIdentifier: basicNetwork, + GenesisBlockIdentifier: basicBlock, + AllowedOperationTypes: allowedOperationTypes, + AllowedOperationStatuses: allowedOperationStatuses, + AllowedErrors: allowedErrors, + AllowedTimestampStartIndex: timestampStartIndex, + } + var assert = assert.New(t) + sortArrayFieldsOnConfiguration(clientConfiguration) + assert.Equal([]string{"INPUT", "OUTPUT", "TRANSFER"}, clientConfiguration.AllowedOperationTypes) + assert.Equal([]*types.OperationStatus{ + { + Status: "SKIPPED", + Successful: true, + }, + { + Status: "SUCCESS", + Successful: true, + }, + }, clientConfiguration.AllowedOperationStatuses) + assert.Equal([]*types.Error{ + { + Code: 0, + Message: "Endpoint not implemented", + Retriable: false, + }, + { + Code: 3, + Message: "Bitcoind error", + Retriable: false, + }, + { + Code: 4, + Message: "Block not found", + Retriable: false, + }, + }, clientConfiguration.AllowedErrors) +} diff --git a/cmd/utils_shared.go b/cmd/utils_shared.go new file mode 100644 index 00000000..97147088 --- /dev/null +++ b/cmd/utils_shared.go @@ -0,0 +1,31 @@ +// Copyright 2022 Coinbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +func isEmpty(s string) bool { + return s == "" +} + +func isNegative(n int64) bool { + return n < 0 +} + +func isEqual(s1 string, s2 string) bool { + return s1 == s2 +} + +func isUTXO() bool { + return Config.CoinSupported +} diff --git a/cmd/utils_train_zstd.go b/cmd/utils_train_zstd.go index d1bec9af..294436f9 100644 --- a/cmd/utils_train_zstd.go +++ b/cmd/utils_train_zstd.go @@ -15,12 +15,14 @@ package cmd import ( - "context" + "fmt" "log" "path" "strconv" - "github.com/coinbase/rosetta-sdk-go/storage" + "github.com/coinbase/rosetta-sdk-go/storage/database" + "github.com/coinbase/rosetta-sdk-go/storage/encoder" + "github.com/fatih/color" "github.com/spf13/cobra" ) @@ -43,25 +45,23 @@ The arguments for this command are: You can learn more about dictionary compression on the Zstandard website: https://github.com/facebook/zstd#the-case-for-small-data-compression`, - Run: runTrainZstdCmd, + RunE: runTrainZstdCmd, Args: cobra.MinimumNArgs(trainArgs), } ) -func runTrainZstdCmd(cmd *cobra.Command, args []string) { - ctx := context.Background() - +func runTrainZstdCmd(cmd *cobra.Command, args []string) error { namespace := args[0] databasePath := path.Clean(args[1]) dictionaryPath := path.Clean(args[2]) maxItems, err := strconv.Atoi(args[3]) if err != nil { - log.Fatalf("%s: unable to convert max items to integer", err.Error()) + return fmt.Errorf("unable to convert max items to integer: %w", err) } - compressorEntries := []*storage.CompressorEntry{} + compressorEntries := []*encoder.CompressorEntry{} if len(args) > trainArgs { - compressorEntries = append(compressorEntries, &storage.CompressorEntry{ + compressorEntries = append(compressorEntries, &encoder.CompressorEntry{ Namespace: namespace, DictionaryPath: args[4], }) @@ -71,8 +71,8 @@ func runTrainZstdCmd(cmd *cobra.Command, args []string) { log.Printf("Running zstd training (this could take a while)...") - _, _, err = storage.BadgerTrain( - ctx, + _, _, err = database.BadgerTrain( + Context, namespace, databasePath, dictionaryPath, @@ -80,6 +80,9 @@ func runTrainZstdCmd(cmd *cobra.Command, args []string) { compressorEntries, ) if err != nil { - log.Fatal(err) + return fmt.Errorf("badger training failed: %w", err) } + + color.Green("Training successful!") + return nil } diff --git a/cmd/validate_asserter_config.go b/cmd/validate_asserter_config.go new file mode 100644 index 00000000..8fddeecd --- /dev/null +++ b/cmd/validate_asserter_config.go @@ -0,0 +1,166 @@ +// Copyright 2020 Coinbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "context" + "fmt" + "reflect" + "sort" + "strings" + + cliErrs "github.com/coinbase/rosetta-cli/pkg/errors" + "github.com/coinbase/rosetta-sdk-go/asserter" + "github.com/coinbase/rosetta-sdk-go/fetcher" + "github.com/coinbase/rosetta-sdk-go/types" + "github.com/coinbase/rosetta-sdk-go/utils" +) + +// Common helper across Construction and Data +// Issues an RPC to fetch /network/options, and extracts the `Allow` +// Reads the JSON file at `asserterConfigurationFile` and loads into a Go object +// Validates the `Allow`s across both objects match +func validateNetworkOptionsMatchesAsserterConfiguration( + ctx context.Context, f *fetcher.Fetcher, network *types.NetworkIdentifier, + asserterConfigurationFile string, +) error { + var asserterConfiguration asserter.Configuration + if err := utils.LoadAndParse(asserterConfigurationFile, &asserterConfiguration); err != nil { + return fmt.Errorf("failed to load and parse asserter configuration file %s: %w", asserterConfigurationFile, err) + } + + resp, fetchErr := f.NetworkOptions(ctx, network, nil) + if fetchErr != nil { + return fmt.Errorf("failed to get network options: %w", fetchErr.Err) + } + + err := validateNetworkAndAsserterAllowMatch(resp.Allow, &asserterConfiguration) + if err != nil { + return fmt.Errorf("failed to validate network options allowlist with asserter configuration: %w", err) + } + + return nil +} + +func validateNetworkAndAsserterAllowMatch( + networkAllow *types.Allow, asserterConfiguration *asserter.Configuration, +) error { + if networkAllow == nil { + return cliErrs.ErrNetworkOptionsAllowlistIsNil + } + if asserterConfiguration == nil { + return cliErrs.ErrAsserterConfigurationIsNil + } + + if err := verifyTimestampStartIndex( + networkAllow.TimestampStartIndex, asserterConfiguration.AllowedTimestampStartIndex, + ); err != nil { + return fmt.Errorf("failed to verify timestamp start index: %w", err) + } + + if err := verifyOperationTypes( + networkAllow.OperationTypes, asserterConfiguration.AllowedOperationTypes, + ); err != nil { + return fmt.Errorf("failed to verify operation types: %w", err) + } + + if err := verifyOperationStatuses( + networkAllow.OperationStatuses, asserterConfiguration.AllowedOperationStatuses, + ); err != nil { + return fmt.Errorf("failed to verify operation statuses: %w", err) + } + + if err := verifyErrors( + networkAllow.Errors, asserterConfiguration.AllowedErrors, + ); err != nil { + return fmt.Errorf("failed to verify errors: %w", err) + } + + return nil +} + +func verifyTimestampStartIndex(networkTsi *int64, assertTsi int64) error { + var networkTsiVal int64 = 1 + if networkTsi != nil { // This field is optional and defaults to all allowed + networkTsiVal = *networkTsi + } + if networkTsiVal != assertTsi { + return fmt.Errorf("network options timestamp start index %d, asserter configuration timestamp start index %d: %w", networkTsiVal, assertTsi, cliErrs.ErrTimestampStartIndexMismatch) + } + + return nil +} + +func verifyOperationTypes(networkOt, asserterOt []string) error { + if len(networkOt) != len(asserterOt) { + return fmt.Errorf("network options operation type length %d, asserter configuration operation type length %d: %w", len(networkOt), len(asserterOt), cliErrs.ErrOperationTypeLengthMismatch) + } + + sort.Strings(networkOt) + sort.Strings(asserterOt) + + for i, networkOperationType := range networkOt { + asserterOperationType := asserterOt[i] + if networkOperationType != asserterOperationType { + return fmt.Errorf("network options operation type %s, asserter configuration operation type %s: %w", networkOperationType, asserterOperationType, cliErrs.ErrOperationTypeMismatch) + } + } + + return nil +} + +func verifyOperationStatuses(networkOs, asserterOs []*types.OperationStatus) error { + if len(networkOs) != len(asserterOs) { + return fmt.Errorf("network options operation status length %d, asserter configuration operation status length %d: %w", len(networkOs), len(asserterOs), cliErrs.ErrOperationStatusLengthMismatch) + } + + sort.Slice(networkOs, func(i, j int) bool { + return strings.Compare(networkOs[i].Status, networkOs[j].Status) < 0 + }) + sort.Slice(asserterOs, func(i, j int) bool { + return strings.Compare(asserterOs[i].Status, asserterOs[j].Status) < 0 + }) + + for i, networkOperationStatus := range networkOs { + asserterOperationStatus := asserterOs[i] + if !reflect.DeepEqual(networkOperationStatus, asserterOperationStatus) { + return fmt.Errorf("network options operation type %s, asserter configuration operation type %s: %w", types.PrintStruct(networkOperationStatus), types.PrintStruct(asserterOperationStatus), cliErrs.ErrOperationStatusMismatch) + } + } + + return nil +} + +func verifyErrors(networkErrors, asserterErrors []*types.Error) error { + if len(networkErrors) != len(asserterErrors) { + return fmt.Errorf("network options error length %d, asserter configuration error length %d: %w", len(networkErrors), len(asserterErrors), cliErrs.ErrErrorLengthMismatch) + } + + sort.Slice(networkErrors, func(i, j int) bool { + return networkErrors[i].Code < networkErrors[j].Code + }) + sort.Slice(asserterErrors, func(i, j int) bool { + return asserterErrors[i].Code < asserterErrors[j].Code + }) + + for i, networkError := range networkErrors { + asserterError := asserterErrors[i] + if !reflect.DeepEqual(networkError, asserterError) { + return fmt.Errorf("network options error %s, asserter configuration error %s: %w", types.PrintStruct(networkError), types.PrintStruct(asserterError), cliErrs.ErrErrorMismatch) + } + } + + return nil +} diff --git a/cmd/validate_asserter_config_test.go b/cmd/validate_asserter_config_test.go new file mode 100644 index 00000000..2a646056 --- /dev/null +++ b/cmd/validate_asserter_config_test.go @@ -0,0 +1,162 @@ +// Copyright 2020 Coinbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "github.com/coinbase/rosetta-sdk-go/asserter" + "github.com/coinbase/rosetta-sdk-go/types" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestMatch(t *testing.T) { + networkAllow, asserterConfiguration := generateNetworkAllowAndAsserterConfiguration() + confirmSuccess(t, networkAllow, asserterConfiguration) +} + +func TestNil(t *testing.T) { + networkAllow, asserterConfiguration := generateNetworkAllowAndAsserterConfiguration() + confirmError(t, nil, asserterConfiguration) + confirmError(t, networkAllow, nil) +} + +func TestTsi(t *testing.T) { + // Confirm nil defaults to 1 + networkAllow, asserterConfiguration := generateNetworkAllowAndAsserterConfiguration() + networkAllow.TimestampStartIndex = nil + confirmError(t, networkAllow, asserterConfiguration) + asserterConfiguration.AllowedTimestampStartIndex = 1 + confirmSuccess(t, networkAllow, asserterConfiguration) + + networkAllow, asserterConfiguration = generateNetworkAllowAndAsserterConfiguration() + asserterConfiguration.AllowedTimestampStartIndex = 567 + confirmError(t, networkAllow, asserterConfiguration) +} + +func TestOperationTypes(t *testing.T) { + networkAllow, asserterConfiguration := generateNetworkAllowAndAsserterConfiguration() + networkAllow.OperationTypes[1] = "mismatchType" + confirmError(t, networkAllow, asserterConfiguration) + + networkAllow.OperationTypes = append(generateOperationTypes(), "extra") + confirmError(t, networkAllow, asserterConfiguration) + + networkAllow, _ = generateNetworkAllowAndAsserterConfiguration() + asserterConfiguration.AllowedOperationTypes = nil + confirmError(t, networkAllow, asserterConfiguration) +} + +func TestOperationStatuses(t *testing.T) { + networkAllow, asserterConfiguration := generateNetworkAllowAndAsserterConfiguration() + networkAllow.OperationStatuses[0].Successful = !networkAllow.OperationStatuses[0].Successful + confirmError(t, networkAllow, asserterConfiguration) + + networkAllow, _ = generateNetworkAllowAndAsserterConfiguration() + asserterConfiguration.AllowedOperationStatuses[1].Status = "mismatchStatus" + confirmError(t, networkAllow, asserterConfiguration) + + _, asserterConfiguration = generateNetworkAllowAndAsserterConfiguration() + asserterConfiguration.AllowedOperationStatuses = append(generateOperationStatuses(), + &types.OperationStatus{Status: "extra"}) + confirmError(t, networkAllow, asserterConfiguration) + + _, asserterConfiguration = generateNetworkAllowAndAsserterConfiguration() + networkAllow.OperationStatuses = nil + confirmError(t, networkAllow, asserterConfiguration) +} + +func TestErrors(t *testing.T) { + networkAllow, asserterConfiguration := generateNetworkAllowAndAsserterConfiguration() + networkAllow.Errors[0].Code = 123 + confirmError(t, networkAllow, asserterConfiguration) + + networkAllow, _ = generateNetworkAllowAndAsserterConfiguration() + asserterConfiguration.AllowedErrors[1].Message = "mismatchMessage" + confirmError(t, networkAllow, asserterConfiguration) + + _, asserterConfiguration = generateNetworkAllowAndAsserterConfiguration() + networkAllow.Errors[0].Details = map[string]interface{}{"key": "value"} + asserterConfiguration.AllowedErrors[0].Details = map[string]interface{}{"key": "differentValue"} + confirmError(t, networkAllow, asserterConfiguration) + + networkAllow, asserterConfiguration = generateNetworkAllowAndAsserterConfiguration() + asserterConfiguration.AllowedErrors = append(asserterConfiguration.AllowedErrors, + &types.Error{Code: 123, Message: "extra"}) + confirmError(t, networkAllow, asserterConfiguration) +} + +// Generate simple configs for testing +// Generators used internally below are so they are logically equal but can be mutated separately +func generateNetworkAllowAndAsserterConfiguration() ( + *types.Allow, *asserter.Configuration, +) { + var tsi int64 = 5 + allow := &types.Allow{ + OperationStatuses: generateOperationStatuses(), + OperationTypes: generateOperationTypes(), + Errors: generateErrors(), + TimestampStartIndex: &tsi, + } + config := &asserter.Configuration{ + AllowedOperationStatuses: generateOperationStatuses(), + AllowedOperationTypes: generateOperationTypes(), + AllowedErrors: generateErrors(), + AllowedTimestampStartIndex: tsi, + } + + return allow, config +} + +func generateOperationTypes() []string { + return []string{"type0", "type1"} +} + +func generateOperationStatuses() []*types.OperationStatus { + return []*types.OperationStatus{ + { + Successful: true, + Status: "status0", + }, + { + // Successful: false + Status: "status1", + }, + } +} + +func generateErrors() []*types.Error { + return []*types.Error{ + { + Code: 1, + Message: "message1", + }, + { + Code: 2, + Message: "message2", + }, + } +} + +func confirmSuccess( + t *testing.T, networkAllow *types.Allow, asserterConfiguration *asserter.Configuration, +) { + assert.NoError(t, validateNetworkAndAsserterAllowMatch(networkAllow, asserterConfiguration)) +} + +func confirmError( + t *testing.T, networkAllow *types.Allow, asserterConfiguration *asserter.Configuration, +) { + assert.Error(t, validateNetworkAndAsserterAllowMatch(networkAllow, asserterConfiguration)) +} diff --git a/cmd/view_account.go b/cmd/view_balance.go similarity index 63% rename from cmd/view_account.go rename to cmd/view_balance.go index fb50fa42..72642452 100644 --- a/cmd/view_account.go +++ b/cmd/view_balance.go @@ -15,7 +15,6 @@ package cmd import ( - "context" "encoding/json" "fmt" "log" @@ -31,73 +30,82 @@ import ( var ( viewAccountCmd = &cobra.Command{ - Use: "view:account", + Use: "view:balance", Short: "View an account balance", Long: `While debugging, it is often useful to inspect the state of an account at a certain block. This command allows you to look up any account by providing a JSON representation of a types.AccountIdentifier (and optionally a height to perform the query). -For example, you could run view:account '{"address":"interesting address"}' 1000 +For example, you could run view:balance '{"address":"interesting address"}' 1000 to lookup the balance of an interesting address at block 1000. Allowing the address to specified as JSON allows for querying by SubAccountIdentifier.`, - Run: runViewAccountCmd, + RunE: runViewBalanceCmd, Args: cobra.MinimumNArgs(1), } ) -func runViewAccountCmd(cmd *cobra.Command, args []string) { - ctx := context.Background() - +func runViewBalanceCmd(cmd *cobra.Command, args []string) error { account := &types.AccountIdentifier{} if err := json.Unmarshal([]byte(args[0]), account); err != nil { - log.Fatal(fmt.Errorf("%w: unable to unmarshal account %s", err, args[0])) + return fmt.Errorf("unable to unmarshal account %s: %w", args[0], err) } if err := asserter.AccountIdentifier(account); err != nil { - log.Fatal(fmt.Errorf("%w: invalid account identifier %+v", err, account)) + return fmt.Errorf("invalid account identifier %s: %w", types.PrintStruct(account), err) } // Create a new fetcher + fetcherOpts := []fetcher.Option{ + fetcher.WithMaxConnections(Config.MaxOnlineConnections), + fetcher.WithRetryElapsedTime(time.Duration(Config.RetryElapsedTime) * time.Second), + fetcher.WithTimeout(time.Duration(Config.HTTPTimeout) * time.Second), + fetcher.WithMaxRetries(Config.MaxRetries), + } + if Config.ForceRetry { + fetcherOpts = append(fetcherOpts, fetcher.WithForceRetry()) + } + newFetcher := fetcher.New( Config.OnlineURL, - fetcher.WithRetryElapsedTime(time.Duration(Config.RetryElapsedTime)*time.Second), - fetcher.WithTimeout(time.Duration(Config.HTTPTimeout)*time.Second), + fetcherOpts..., ) // Initialize the fetcher's asserter - _, _, fetchErr := newFetcher.InitializeAsserter(ctx, Config.Network) + _, _, fetchErr := newFetcher.InitializeAsserter(Context, Config.Network, Config.ValidationFile) if fetchErr != nil { - log.Fatal(fetchErr.Err) + return fmt.Errorf("unable to initialize asserter for fetcher: %w", fetchErr.Err) } - _, err := utils.CheckNetworkSupported(ctx, Config.Network, newFetcher) + _, err := utils.CheckNetworkSupported(Context, Config.Network, newFetcher) if err != nil { - log.Fatalf("%s: unable to confirm network is supported", err.Error()) + return fmt.Errorf("unable to confirm network %s is supported: %w", types.PrintStruct(Config.Network), err) } var lookupBlock *types.PartialBlockIdentifier if len(args) > 1 { index, err := strconv.ParseInt(args[1], 10, 64) if err != nil { - log.Fatal(fmt.Errorf("%w: unable to parse index %s", err, args[0])) + return fmt.Errorf("unable to parse index %s: %w", args[0], err) } lookupBlock = &types.PartialBlockIdentifier{Index: &index} } - block, amounts, coins, metadata, fetchErr := newFetcher.AccountBalanceRetry( - ctx, + block, amounts, metadata, fetchErr := newFetcher.AccountBalanceRetry( + Context, Config.Network, account, lookupBlock, + nil, ) if fetchErr != nil { - log.Fatal(fmt.Errorf("%w: unable to fetch account %+v", fetchErr.Err, account)) + return fmt.Errorf("unable to fetch account balance for account %s: %w", types.PrintStruct(account), fetchErr.Err) } log.Printf("Amounts: %s\n", types.PrettyPrintStruct(amounts)) - log.Printf("Coins: %s\n", types.PrettyPrintStruct(coins)) log.Printf("Metadata: %s\n", types.PrettyPrintStruct(metadata)) log.Printf("Balance Fetched At: %s\n", types.PrettyPrintStruct(block)) + + return nil } diff --git a/cmd/view_block.go b/cmd/view_block.go index b0287c32..57834c46 100644 --- a/cmd/view_block.go +++ b/cmd/view_block.go @@ -15,16 +15,17 @@ package cmd import ( - "context" "fmt" - "log" "strconv" "time" + cliErrs "github.com/coinbase/rosetta-cli/pkg/errors" + "github.com/coinbase/rosetta-sdk-go/fetcher" "github.com/coinbase/rosetta-sdk-go/parser" "github.com/coinbase/rosetta-sdk-go/types" "github.com/coinbase/rosetta-sdk-go/utils" + "github.com/fatih/color" "github.com/spf13/cobra" ) @@ -41,23 +42,52 @@ of the block is correct before printing. If this command errors, it is likely because the block you are trying to fetch is formatted incorrectly.`, - Run: runViewBlockCmd, + RunE: runViewBlockCmd, Args: cobra.ExactArgs(1), } ) -func runViewBlockCmd(cmd *cobra.Command, args []string) { - ctx := context.Background() +func printChanges(balanceChanges []*parser.BalanceChange) error { + for _, balanceChange := range balanceChanges { + parsedDiff, err := types.BigInt(balanceChange.Difference) + if err != nil { + return fmt.Errorf("unable to parse balance change difference: %w", err) + } + + if parsedDiff.Sign() == 0 { + continue + } + + fmt.Println( + types.PrintStruct(balanceChange.Account), + "->", + utils.PrettyAmount(parsedDiff, balanceChange.Currency), + ) + } + + return nil +} + +func runViewBlockCmd(_ *cobra.Command, args []string) error { index, err := strconv.ParseInt(args[0], 10, 64) if err != nil { - log.Fatal(fmt.Errorf("%w: unable to parse index %s", err, args[0])) + return fmt.Errorf("unable to parse index %s: %w", args[0], err) } // Create a new fetcher + fetcherOpts := []fetcher.Option{ + fetcher.WithMaxConnections(Config.MaxOnlineConnections), + fetcher.WithRetryElapsedTime(time.Duration(Config.RetryElapsedTime) * time.Second), + fetcher.WithTimeout(time.Duration(Config.HTTPTimeout) * time.Second), + fetcher.WithMaxRetries(Config.MaxRetries), + } + if Config.ForceRetry { + fetcherOpts = append(fetcherOpts, fetcher.WithForceRetry()) + } + newFetcher := fetcher.New( Config.OnlineURL, - fetcher.WithRetryElapsedTime(time.Duration(Config.RetryElapsedTime)*time.Second), - fetcher.WithTimeout(time.Duration(Config.HTTPTimeout)*time.Second), + fetcherOpts..., ) // Initialize the fetcher's asserter @@ -65,14 +95,14 @@ func runViewBlockCmd(cmd *cobra.Command, args []string) { // Behind the scenes this makes a call to get the // network status and uses the response to inform // the asserter what are valid responses. - _, _, fetchErr := newFetcher.InitializeAsserter(ctx, Config.Network) + _, _, fetchErr := newFetcher.InitializeAsserter(Context, Config.Network, Config.ValidationFile) if fetchErr != nil { - log.Fatal(fetchErr.Err) + return fmt.Errorf("unable to initialize asserter for fetcher: %w", fetchErr.Err) } - _, err = utils.CheckNetworkSupported(ctx, Config.Network, newFetcher) + _, err = utils.CheckNetworkSupported(Context, Config.Network, newFetcher) if err != nil { - log.Fatalf("%s: unable to confirm network is supported", err.Error()) + return fmt.Errorf("unable to confirm network %s is supported: %w", types.PrintStruct(Config.Network), err) } // Fetch the specified block with retries (automatically @@ -85,34 +115,76 @@ func runViewBlockCmd(cmd *cobra.Command, args []string) { // to fully populate the block by fetching all these // transactions. block, fetchErr := newFetcher.BlockRetry( - ctx, + Context, Config.Network, &types.PartialBlockIdentifier{ Index: &index, }, ) if fetchErr != nil { - log.Fatal(fmt.Errorf("%w: unable to fetch block", fetchErr.Err)) + return fmt.Errorf("unable to fetch block %d: %w", index, fetchErr.Err) + } + // It's valid for a block to be omitted without triggering an error + if block == nil { + return cliErrs.ErrBlockNotFound } - log.Printf("Current Block: %s\n", types.PrettyPrintStruct(block)) + fmt.Printf("\n") + if !OnlyChanges { + color.Cyan("Current Block:") + fmt.Println(types.PrettyPrintStruct(block)) + } // Print out all balance changes in a given block. This does NOT exempt // any operations/accounts from parsing. - p := parser.New(newFetcher.Asserter, func(*types.Operation) bool { return false }) - changes, err := p.BalanceChanges(ctx, block, false) + color.Cyan("Balance Changes:") + p := parser.New(newFetcher.Asserter, func(*types.Operation) bool { return false }, nil) + balanceChanges, err := p.BalanceChanges(Context, block, false) if err != nil { - log.Fatal(fmt.Errorf("%w: unable to calculate balance changes", err)) + return fmt.Errorf("unable to calculate balance changes: %w", err) + } + + fmt.Println("Cumulative:", block.BlockIdentifier.Hash) + + if err := printChanges(balanceChanges); err != nil { + return err } - log.Printf("Balance Changes: %s\n", types.PrettyPrintStruct(changes)) + fmt.Printf("\n") - // Print out all OperationGroups for each transaction in a block. + // Print out balance changes by transaction hash + // + // TODO: modify parser to allow for calculating balance + // changes for a single transaction. for _, tx := range block.Transactions { - log.Printf( - "Transaction %s Operation Groups: %s\n", - tx.TransactionIdentifier.Hash, - types.PrettyPrintStruct(parser.GroupOperations(tx)), - ) + balanceChanges, err := p.BalanceChanges(Context, &types.Block{ + Transactions: []*types.Transaction{ + tx, + }, + }, false) + if err != nil { + return fmt.Errorf("unable to calculate balance changes: %w", err) + } + + fmt.Println("Transaction:", tx.TransactionIdentifier.Hash) + + if err := printChanges(balanceChanges); err != nil { + return err + } + fmt.Printf("\n") } + + if !OnlyChanges { + // Print out all OperationGroups for each transaction in a block. + color.Cyan("Operation Groups:") + for _, tx := range block.Transactions { + fmt.Printf( + "Transaction %s Operation Groups: %s\n", + tx.TransactionIdentifier.Hash, + types.PrettyPrintStruct(parser.GroupOperations(tx)), + ) + } + } + + return nil } diff --git a/cmd/view_networks.go b/cmd/view_networks.go index 74e2d549..375e4db3 100644 --- a/cmd/view_networks.go +++ b/cmd/view_networks.go @@ -15,10 +15,12 @@ package cmd import ( - "context" + "fmt" "log" "time" + "github.com/coinbase/rosetta-cli/pkg/errors" + "github.com/coinbase/rosetta-sdk-go/fetcher" "github.com/coinbase/rosetta-sdk-go/types" "github.com/fatih/color" @@ -35,51 +37,60 @@ status from all available networks and prints it to the terminal. If this command errors, it is likely because the /network/* endpoints are not formatted correctly.`, - Run: runViewNetworksCmd, + RunE: runViewNetworksCmd, } ) -func runViewNetworksCmd(cmd *cobra.Command, args []string) { - ctx := context.Background() +func runViewNetworksCmd(cmd *cobra.Command, args []string) error { + fetcherOpts := []fetcher.Option{ + fetcher.WithMaxConnections(Config.MaxOnlineConnections), + fetcher.WithRetryElapsedTime(time.Duration(Config.RetryElapsedTime) * time.Second), + fetcher.WithTimeout(time.Duration(Config.HTTPTimeout) * time.Second), + fetcher.WithMaxRetries(Config.MaxRetries), + } + if Config.ForceRetry { + fetcherOpts = append(fetcherOpts, fetcher.WithForceRetry()) + } f := fetcher.New( Config.OnlineURL, - fetcher.WithRetryElapsedTime(time.Duration(Config.RetryElapsedTime)*time.Second), - fetcher.WithTimeout(time.Duration(Config.HTTPTimeout)*time.Second), + fetcherOpts..., ) // Attempt to fetch network list - networkList, fetchErr := f.NetworkListRetry(ctx, nil) + networkList, fetchErr := f.NetworkListRetry(Context, nil) if fetchErr != nil { - log.Fatalf("%s: unable to fetch network list", fetchErr.Err.Error()) + return fmt.Errorf("unable to get network list: %w", fetchErr.Err) } if len(networkList.NetworkIdentifiers) == 0 { - log.Fatal("no networks available") + return errors.ErrNoAvailableNetwork } for _, network := range networkList.NetworkIdentifiers { color.Cyan(types.PrettyPrintStruct(network)) networkOptions, fetchErr := f.NetworkOptions( - ctx, + Context, network, nil, ) if fetchErr != nil { - log.Fatalf("%s: unable to get network options", fetchErr.Err.Error()) + return fmt.Errorf("unable to get network options: %w", fetchErr.Err) } log.Printf("Network options: %s\n", types.PrettyPrintStruct(networkOptions)) networkStatus, fetchErr := f.NetworkStatusRetry( - ctx, + Context, network, nil, ) if fetchErr != nil { - log.Fatalf("%s: unable to get network status", fetchErr.Err.Error()) + return fmt.Errorf("unable to get network status: %w", fetchErr.Err) } log.Printf("Network status: %s\n", types.PrettyPrintStruct(networkStatus)) } + + return nil } diff --git a/configuration/configuration.go b/configuration/configuration.go index 997c3bf8..0e4fd965 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -15,126 +15,23 @@ package configuration import ( + "context" "encoding/hex" - "errors" "fmt" "log" + "path" + "runtime" + "strings" + cliErrs "github.com/coinbase/rosetta-cli/pkg/errors" "github.com/coinbase/rosetta-sdk-go/asserter" + "github.com/coinbase/rosetta-sdk-go/constructor/dsl" "github.com/coinbase/rosetta-sdk-go/constructor/job" - "github.com/coinbase/rosetta-sdk-go/storage" "github.com/coinbase/rosetta-sdk-go/types" "github.com/coinbase/rosetta-sdk-go/utils" "github.com/fatih/color" ) -// CheckDataEndCondition is a type of "successful" end -// for the "check:data" method. -type CheckDataEndCondition string - -const ( - // IndexEndCondition is used to indicate that the index end condition - // has been met. - IndexEndCondition CheckDataEndCondition = "Index End Condition" - - // DurationEndCondition is used to indicate that the duration - // end condition has been met. - DurationEndCondition CheckDataEndCondition = "Duration End Condition" - - // TipEndCondition is used to indicate that the tip end condition - // has been met. - TipEndCondition CheckDataEndCondition = "Tip End Condition" - - // ReconciliationCoverageEndCondition is used to indicate that the reconciliation - // coverage end condition has been met. - ReconciliationCoverageEndCondition CheckDataEndCondition = "Reconciliation Coverage End Condition" -) - -// Default Configuration Values -const ( - DefaultURL = "http://localhost:8080" - DefaultSyncConcurrency = 8 - DefaultTransactionConcurrency = 16 - DefaultActiveReconciliationConcurrency = 16 - DefaultInactiveReconciliationConcurrency = 4 - DefaultInactiveReconciliationFrequency = 250 - DefaultTimeout = 10 - DefaultRetryElapsedTime = 60 - DefaultConfirmationDepth = 10 - DefaultStaleDepth = 30 - DefaultBroadcastLimit = 3 - DefaultTipDelay = 300 - DefaultBlockBroadcastLimit = 5 - - // ETH Defaults - EthereumIDBlockchain = "Ethereum" - EthereumIDNetwork = "Ropsten" -) - -// Default Configuration Values -var ( - EthereumNetwork = &types.NetworkIdentifier{ - Blockchain: EthereumIDBlockchain, - Network: EthereumIDNetwork, - } -) - -// ConstructionConfiguration contains all configurations -// to run check:construction. -type ConstructionConfiguration struct { - // OfflineURL is the URL of a Rosetta API implementation in "offline mode". - OfflineURL string `json:"offline_url"` - - // StaleDepth is the number of blocks to wait before attempting - // to rebroadcast after not finding a transaction on-chain. - StaleDepth int64 `json:"stale_depth"` - - // BroadcastLimit is the number of times to attempt re-broadcast - // before giving up on a transaction broadcast. - BroadcastLimit int `json:"broadcast_limit"` - - // IgnoreBroadcastFailures determines if we should exit when there - // are broadcast failures (that surpass the BroadcastLimit). - IgnoreBroadcastFailures bool `json:"ignore_broadcast_failures"` - - // ClearBroadcasts indicates if all pending broadcasts should - // be removed from BroadcastStorage on restart. - ClearBroadcasts bool `json:"clear_broadcasts"` - - // BroadcastBehindTip indicates if we should broadcast transactions - // when we are behind tip (as defined by TipDelay). - BroadcastBehindTip bool `json:"broadcast_behind_tip"` - - // BlockBroadcastLimit is the number of transactions to attempt - // broadcast in a single block. When there are many pending - // broadcasts, it may make sense to limit the number of broadcasts. - BlockBroadcastLimit int `json:"block_broadcast_limit"` - - // RebroadcastAll indicates if all pending broadcasts should be - // rebroadcast from BroadcastStorage on restart. - RebroadcastAll bool `json:"rebroadcast_all"` - - // PrefundedAccounts is an array of prefunded accounts - // to use while testing. - PrefundedAccounts []*storage.PrefundedAccount `json:"prefunded_accounts,omitempty"` - - // Workflows are executed by the rosetta-cli to test - // certain construction flows. Make sure to define a - // "request_funds" and "create_account" workflow. - Workflows []*job.Workflow `json:"workflows"` - - // EndConditions is a map of workflow:count that - // indicates how many of each workflow should be performed - // before check:construction should stop. For example, - // {"create_account": 5} indicates that 5 "create_account" - // workflows should be performed before stopping. - EndConditions map[string]int `json:"end_conditions,omitempty"` - - // ResultsOutputFile is the absolute filepath of where to save - // the results of a check:construction run. - ResultsOutputFile string `json:"results_output_file,omitempty"` -} - // DefaultDataConfiguration returns the default *DataConfiguration // for running `check:data`. func DefaultDataConfiguration() *DataConfiguration { @@ -142,176 +39,55 @@ func DefaultDataConfiguration() *DataConfiguration { ActiveReconciliationConcurrency: DefaultActiveReconciliationConcurrency, InactiveReconciliationConcurrency: DefaultInactiveReconciliationConcurrency, InactiveReconciliationFrequency: DefaultInactiveReconciliationFrequency, + StatusPort: DefaultStatusPort, + } +} + +// DefaultPerfConfiguration returns the default *CheckPerfConfiguration +// for running `check:perf`. +func DefaultPerfConfiguration() *CheckPerfConfiguration { + return &CheckPerfConfiguration{ + StartBlock: 10, + BlockEndpointTimeConstraintMs: 50000000, + AccountBalanceEndpointTimeConstraintMs: 50000000, + EndBlock: 50, + NumTimesToHitEndpoints: 1, + StatsOutputFile: "./check_perf_stats.json", } } // DefaultConfiguration returns a *Configuration with the -// EthereumNetwork, DefaultURL, DefaultTimeout, -// DefaultConstructionConfiguration and DefaultDataConfiguration. +// EthereumNetwork, DefaultURL, DefaultTimeout, and DefaultDataConfiguration. func DefaultConfiguration() *Configuration { return &Configuration{ - Network: EthereumNetwork, - OnlineURL: DefaultURL, - HTTPTimeout: DefaultTimeout, - RetryElapsedTime: DefaultRetryElapsedTime, - SyncConcurrency: DefaultSyncConcurrency, - TransactionConcurrency: DefaultTransactionConcurrency, - TipDelay: DefaultTipDelay, - Data: DefaultDataConfiguration(), + Network: EthereumNetwork, + OnlineURL: DefaultURL, + MaxOnlineConnections: DefaultMaxOnlineConnections, + HTTPTimeout: DefaultTimeout, + MaxRetries: DefaultMaxRetries, + MaxSyncConcurrency: DefaultMaxSyncConcurrency, + TipDelay: DefaultTipDelay, + MaxReorgDepth: DefaultMaxReorgDepth, + Data: DefaultDataConfiguration(), } } -// DataEndConditions contains all the conditions for the syncer to stop -// when running check:data. -type DataEndConditions struct { - // Index configures the syncer to stop once reaching a particular block height. - Index *int64 `json:"index,omitempty"` - - // Tip configures the syncer to stop once it reached the tip. - // Make sure to configure `tip_delay` if you use this end - // condition. - Tip *bool `json:"tip,omitempty"` - - // Duration configures the syncer to stop after running - // for Duration seconds. - Duration *uint64 `json:"duration,omitempty"` - - // ReconciliationCoverage configures the syncer to stop - // once it has reached tip AND some proportion of - // all addresses have been reconciled at an index >= - // to when tip was first reached. The range of inputs - // for this condition are [0.0, 1.0]. - ReconciliationCoverage *float64 `json:"reconciliation_coverage,omitempty"` -} - -// DataConfiguration contains all configurations to run check:data. -type DataConfiguration struct { - // ActiveReconciliationConcurrency is the concurrency to use while fetching accounts - // during active reconciliation. - ActiveReconciliationConcurrency uint64 `json:"active_reconciliation_concurrency"` - - // InactiveReconciliationConcurrency is the concurrency to use while fetching accounts - // during inactive reconciliation. - InactiveReconciliationConcurrency uint64 `json:"inactive_reconciliation_concurrency"` - - // InactiveReconciliationFrequency is the number of blocks to wait between - // inactive reconiliations on each account. - InactiveReconciliationFrequency uint64 `json:"inactive_reconciliation_frequency"` - - // LogBlocks is a boolean indicating whether to log processed blocks. - LogBlocks bool `json:"log_blocks"` - - // LogTransactions is a boolean indicating whether to log processed transactions. - LogTransactions bool `json:"log_transactions"` - - // LogBalanceChanges is a boolean indicating whether to log all balance changes. - LogBalanceChanges bool `json:"log_balance_changes"` - - // LogReconciliations is a boolean indicating whether to log all reconciliations. - LogReconciliations bool `json:"log_reconciliations"` - - // IgnoreReconciliationError determines if block processing should halt on a reconciliation - // error. It can be beneficial to collect all reconciliation errors or silence - // reconciliation errors during development. - IgnoreReconciliationError bool `json:"ignore_reconciliation_error"` - - // ExemptAccounts is a path to a file listing all accounts to exempt from balance - // tracking and reconciliation. Look at the examples directory for an example of - // how to structure this file. - ExemptAccounts string `json:"exempt_accounts"` - - // BootstrapBalances is a path to a file used to bootstrap balances - // before starting syncing. If this value is populated after beginning syncing, - // it will be ignored. - BootstrapBalances string `json:"bootstrap_balances"` - - // HistoricalBalanceDisabled is a boolean that dictates how balance lookup is performed. - // When set to true, balances are looked up at the block where a balance - // change occurred instead of at the current block. Blockchains that do not support - // historical balance lookup should set this to false. - HistoricalBalanceDisabled bool `json:"historical_balance_disabled"` - - // InterestingAccounts is a path to a file listing all accounts to check on each block. Look - // at the examples directory for an example of how to structure this file. - InterestingAccounts string `json:"interesting_accounts"` - - // ReconciliationDisabled is a boolean that indicates reconciliation should not - // be attempted. When first testing an implementation, it can be useful to disable - // some of the more advanced checks to confirm syncing is working as expected. - ReconciliationDisabled bool `json:"reconciliation_disabled"` - - // InactiveDiscrepencySearchDisabled is a boolean indicating if a search - // should be performed to find any inactive reconciliation discrepencies. - // Note, a search will never be performed if historical balance lookup - // is disabled. - InactiveDiscrepencySearchDisabled bool `json:"inactive_discrepency_search_disabled"` - - // BalanceTrackingDisabled is a boolean that indicates balances calculation - // should not be attempted. When first testing an implemenation, it can be - // useful to just try to fetch all blocks before checking for balance - // consistency. - BalanceTrackingDisabled bool `json:"balance_tracking_disabled"` - - // CoinTrackingDisabled is a boolean that indicates coin (or UTXO) tracking - // should not be attempted. When first testing an implemenation, it can be - // useful to just try to fetch all blocks before checking for coin - // consistency. - CoinTrackingDisabled bool `json:"coin_tracking_disabled"` - - // StartIndex is the block height to start syncing from. If no StartIndex - // is provided, syncing will start from the last saved block. - // If no blocks have ever been synced, syncing will start from genesis. - StartIndex *int64 `json:"start_index,omitempty"` - - // EndCondition contains the conditions for the syncer to stop - EndConditions *DataEndConditions `json:"end_conditions,omitempty"` - - // ResultsOutputFile is the absolute filepath of where to save - // the results of a check:data run. - ResultsOutputFile string `json:"results_output_file"` -} - -// Configuration contains all configuration settings for running -// check:data or check:construction. -type Configuration struct { - // Network is the *types.NetworkIdentifier where transactions should - // be constructed and where blocks should be synced to monitor - // for broadcast success. - Network *types.NetworkIdentifier `json:"network"` - - // OnlineURL is the URL of a Rosetta API implementation in "online mode". - OnlineURL string `json:"online_url"` - - // DataDirectory is a folder used to store logs and any data used to perform validation. - DataDirectory string `json:"data_directory"` - - // HTTPTimeout is the timeout for a HTTP request in seconds. - HTTPTimeout uint64 `json:"http_timeout"` - - // RetryElapsedTime is the total time to spend retrying a HTTP request in seconds. - RetryElapsedTime uint64 `json:"retry_elapsed_time"` - - // SyncConcurrency is the concurrency to use while syncing blocks. - SyncConcurrency uint64 `json:"sync_concurrency"` - - // TransactionConcurrency is the concurrency to use while fetching transactions (if required). - TransactionConcurrency uint64 `json:"transaction_concurrency"` - - // TipDelay dictates how many seconds behind the current time is considered - // tip. If we are > TipDelay seconds from the last processed block, - // we are considered to be behind tip. - TipDelay int64 `json:"tip_delay"` +func populatePerfMissingFields( + perfConfig *CheckPerfConfiguration, +) *CheckPerfConfiguration { + if perfConfig == nil { + return nil + } - // DisableMemoryLimit uses a performance-optimized database mode - // that uses more memory. - DisableMemoryLimit bool `json:"disable_memory_limit"` + if len(perfConfig.StatsOutputFile) == 0 { + perfConfig.StatsOutputFile = DefaultOutputFile + } - // LogConfiguration determines if the configuration settings - // should be printed to the console when a file is loaded. - LogConfiguration bool `json:"log_configuration"` + if perfConfig.NumTimesToHitEndpoints == 0 { + perfConfig.NumTimesToHitEndpoints = DefaultNumTimesToHitEndpoints + } - Construction *ConstructionConfiguration `json:"construction"` - Data *DataConfiguration `json:"data"` + return perfConfig } func populateConstructionMissingFields( @@ -325,6 +101,10 @@ func populateConstructionMissingFields( constructionConfig.OfflineURL = DefaultURL } + if constructionConfig.MaxOfflineConnections == 0 { + constructionConfig.MaxOfflineConnections = DefaultMaxOfflineConnections + } + if constructionConfig.StaleDepth == 0 { constructionConfig.StaleDepth = DefaultStaleDepth } @@ -337,6 +117,10 @@ func populateConstructionMissingFields( constructionConfig.BlockBroadcastLimit = DefaultBlockBroadcastLimit } + if constructionConfig.StatusPort == 0 { + constructionConfig.StatusPort = DefaultStatusPort + } + return constructionConfig } @@ -357,6 +141,10 @@ func populateDataMissingFields(dataConfig *DataConfiguration) *DataConfiguration dataConfig.InactiveReconciliationFrequency = DefaultInactiveReconciliationFrequency } + if dataConfig.StatusPort == 0 { + dataConfig.StatusPort = DefaultStatusPort + } + return dataConfig } @@ -377,58 +165,82 @@ func populateMissingFields(config *Configuration) *Configuration { config.HTTPTimeout = DefaultTimeout } - if config.RetryElapsedTime == 0 { - config.RetryElapsedTime = DefaultRetryElapsedTime + if config.MaxRetries == 0 { + config.MaxRetries = DefaultMaxRetries } - if config.SyncConcurrency == 0 { - config.SyncConcurrency = DefaultSyncConcurrency + if config.MaxOnlineConnections == 0 { + config.MaxOnlineConnections = DefaultMaxOnlineConnections } - if config.TransactionConcurrency == 0 { - config.TransactionConcurrency = DefaultTransactionConcurrency + if config.MaxSyncConcurrency == 0 { + config.MaxSyncConcurrency = DefaultMaxSyncConcurrency } if config.TipDelay == 0 { config.TipDelay = DefaultTipDelay } + if config.MaxReorgDepth == 0 { + config.MaxReorgDepth = DefaultMaxReorgDepth + } + + numCPU := runtime.NumCPU() + if config.SeenBlockWorkers == 0 { + config.SeenBlockWorkers = numCPU + } + + if config.SerialBlockWorkers == 0 { + config.SerialBlockWorkers = numCPU + } + + if len(strings.TrimSpace(config.ValidationFile)) == 0 { + config.ValidationFile = "" + } + config.Construction = populateConstructionMissingFields(config.Construction) config.Data = populateDataMissingFields(config.Data) + config.Perf = populatePerfMissingFields(config.Perf) return config } -func assertConstructionConfiguration(config *ConstructionConfiguration) error { +func assertConstructionConfiguration(ctx context.Context, config *ConstructionConfiguration) error { if config == nil { return nil } - seenCreateAccount := false - seenRequestFunds := false - for _, workflow := range config.Workflows { - sawReserved := false - if workflow.Name == string(job.CreateAccount) { - sawReserved = true - seenCreateAccount = true - } + if len(config.Workflows) > 0 && len(config.ConstructorDSLFile) > 0 { + return cliErrs.ErrMultipleDSLFiles + } - if workflow.Name == string(job.RequestFunds) { - seenRequestFunds = true - sawReserved = true - } + if len(config.Workflows) == 0 && len(config.ConstructorDSLFile) == 0 { + return cliErrs.ErrNoDSLFile + } - if sawReserved && workflow.Concurrency != job.ReservedWorkflowConcurrency { - return errors.New("reserved workflow must have concurrency 1") + // Compile ConstructorDSLFile and save to Workflows + if len(config.ConstructorDSLFile) > 0 { + compiledWorkflows, err := dsl.Parse(ctx, config.ConstructorDSLFile) + if err != nil { + err.Log() + return fmt.Errorf("DSL file is invalid, line %d, line contents %s: %w", err.Line, err.LineContents, err.Err) } - } - if !seenCreateAccount { - return errors.New("missing create_account workflow") + config.Workflows = compiledWorkflows } - if !seenRequestFunds { - return errors.New("missing request_funds workflow") + // Parse provided Workflows + for _, workflow := range config.Workflows { + if workflow.Name == string(job.CreateAccount) || workflow.Name == string(job.RequestFunds) { + if workflow.Concurrency != job.ReservedWorkflowConcurrency { + return fmt.Errorf( + "DSL file is invalid, reserved workflow %s must have concurrency %d: %w", + workflow.Name, + job.ReservedWorkflowConcurrency, + cliErrs.ErrWrongWorkflowConcurrency, + ) + } + } } for _, account := range config.PrefundedAccounts { @@ -436,36 +248,43 @@ func assertConstructionConfiguration(config *ConstructionConfiguration) error { _, err := hex.DecodeString(account.PrivateKeyHex) if err != nil { return fmt.Errorf( - "%w: private key %s is not hex encoded for prefunded account", - err, + "private key %s is not hex encoded for prefunded account: %w", account.PrivateKeyHex, + err, ) } - // Checks if valid curvetype - err = asserter.CurveType(account.CurveType) - if err != nil { - return fmt.Errorf("%w: invalid CurveType for prefunded account", err) + // Checks if valid CurveType + if err := asserter.CurveType(account.CurveType); err != nil { + return fmt.Errorf("prefunded account curve type %s is invalid: %w", types.PrintStruct(account.CurveType), err) } - // Checks if address is not empty string - if account.Address == "" { - return fmt.Errorf("Account.Address is missing for prefunded account") + // Checks if valid AccountIdentifier + if err := asserter.AccountIdentifier(account.AccountIdentifier); err != nil { + return fmt.Errorf("prefunded account identifier %s is invalid: %w", types.PrintStruct(account.AccountIdentifier), err) } - // Check if currency is valid - err = asserter.Amount(&types.Amount{Value: "0", Currency: account.Currency}) - if err != nil { - return fmt.Errorf("%w: invalid currency for prefunded account", err) + // Check if valid Currency when Currency is specified + // If Currency is not specified, the balances of all available currencies + // for the specific pre-funded account will be stored in the balance storage + if account.Currency != nil { + err = asserter.Currency(account.Currency) + if err != nil { + return fmt.Errorf("prefunded account currency %s is invalid: %w", types.PrintStruct(account.Currency), err) + } } } return nil } -func assertDataConfiguration(config *DataConfiguration) error { +func assertDataConfiguration(config *DataConfiguration) error { // nolint:gocognit if config.StartIndex != nil && *config.StartIndex < 0 { - return fmt.Errorf("start index %d cannot be negative", *config.StartIndex) + return fmt.Errorf("start index %d is invalid: %w", *config.StartIndex, cliErrs.ErrNegativeStartIndex) + } + + if !config.ReconciliationDisabled && config.BalanceTrackingDisabled { + return cliErrs.ErrBalanceTrackingIsDisabledForReconciliation } if config.EndConditions == nil { @@ -474,66 +293,128 @@ func assertDataConfiguration(config *DataConfiguration) error { if config.EndConditions.Index != nil { if *config.EndConditions.Index < 0 { - return fmt.Errorf("end index %d cannot be negative", *config.EndConditions.Index) + return fmt.Errorf("end index %d is invalid: %w", *config.EndConditions.Index, cliErrs.ErrNegativeEndIndex) } } if config.EndConditions.ReconciliationCoverage != nil { - coverage := *config.EndConditions.ReconciliationCoverage + coverage := config.EndConditions.ReconciliationCoverage.Coverage if coverage < 0 || coverage > 1 { - return fmt.Errorf("reconciliation coverage %f must be [0.0,1.0]", coverage) + return fmt.Errorf("reconciliation coverage %f is invalid: %w", coverage, cliErrs.ErrReconciliationOutOfRange) } - if config.BalanceTrackingDisabled { - return errors.New( - "balance tracking must be enabled for reconciliation coverage end condition", + index := config.EndConditions.ReconciliationCoverage.Index + if index != nil && *index < 0 { + return fmt.Errorf("reconciliation coverage index %d is invalid: %w", *index, cliErrs.ErrNegativeReconciliationCoverageIndex) + } + + accountCount := config.EndConditions.ReconciliationCoverage.AccountCount + if accountCount != nil && *accountCount < 0 { + return fmt.Errorf( + "reconciliation coverage account count %d is invalid: %w", + *accountCount, + cliErrs.ErrNegativeReconciliationCoverageAccountCount, ) } + if config.BalanceTrackingDisabled { + return cliErrs.ErrBalanceTrackingIsDisabledForReconciliationCoverageEndCondition + } + if config.IgnoreReconciliationError { - return errors.New( - "reconciliation errors cannot be ignored for reconciliation coverage end condition", - ) + return cliErrs.ErrReconciliationErrorIsIgnoredForReconciliationCoverageEndCondition } if config.ReconciliationDisabled { - return errors.New( - "reconciliation cannot be disabled for reconciliation coverage end condition", - ) + return cliErrs.ErrReconciliationIsDisabledForReconciliationCoverageEndCondition } } return nil } -func assertConfiguration(config *Configuration) error { +func assertConfiguration(ctx context.Context, config *Configuration) error { if err := asserter.NetworkIdentifier(config.Network); err != nil { - return fmt.Errorf("%w: invalid network identifier", err) + return fmt.Errorf("invalid network identifier %s: %w", types.PrintStruct(config.Network), err) + } + + if config.SeenBlockWorkers <= 0 { + return fmt.Errorf("the number of seen block workers %d is invalid: %w", config.SeenBlockWorkers, cliErrs.ErrNegativeSeenBlockWorkers) + } + + if config.SerialBlockWorkers <= 0 { + return fmt.Errorf("the number of serial block workers %d is invalid: %w", config.SerialBlockWorkers, cliErrs.ErrNegativeSerialBlockWorkers) + } + + if config.TableSize != nil && (*config.TableSize < 1 || *config.TableSize > 100) { + return fmt.Errorf("table size %d is invalid: %w", *config.TableSize, cliErrs.ErrTableSizeIsOutOfRange) + } + + if config.ValueLogFileSize != nil && (*config.ValueLogFileSize < 128 || *config.ValueLogFileSize > 2048) { + return fmt.Errorf("value log file size %d is invalid: %w", *config.ValueLogFileSize, cliErrs.ErrValueLogFileSizeIsOutOfRange) } if err := assertDataConfiguration(config.Data); err != nil { - return fmt.Errorf("%w: invalid data configuration", err) + return fmt.Errorf("data configuration is invalid: %w", err) } - if err := assertConstructionConfiguration(config.Construction); err != nil { - return fmt.Errorf("%w: invalid construction configuration", err) + if err := assertConstructionConfiguration(ctx, config.Construction); err != nil { + return fmt.Errorf("construction configuration is invalid: %w", err) } return nil } +// modifyFilePaths modifies a collection of filepaths in a *Configuration +// file to make them relative to the configuration file (this makes it a lot easier +// to store all config-related files in the same directory and to run the rosetta-cli +// from a different directory). +func modifyFilePaths(config *Configuration, fileDir string) { + if config.Data != nil { + if len(config.Data.BootstrapBalances) > 0 { + config.Data.BootstrapBalances = path.Join(fileDir, config.Data.BootstrapBalances) + } + + if len(config.Data.InterestingAccounts) > 0 { + config.Data.InterestingAccounts = path.Join(fileDir, config.Data.InterestingAccounts) + } + + if len(config.Data.ExemptAccounts) > 0 { + config.Data.ExemptAccounts = path.Join(fileDir, config.Data.ExemptAccounts) + } + } + + if config.Construction != nil { + if len(config.Construction.ConstructorDSLFile) > 0 { + config.Construction.ConstructorDSLFile = path.Join( + fileDir, + config.Construction.ConstructorDSLFile, + ) + } + } + + if len(config.ValidationFile) > 0 { + config.ValidationFile = path.Join(fileDir, config.ValidationFile) + } +} + // LoadConfiguration returns a parsed and asserted Configuration for running // tests. -func LoadConfiguration(filePath string) (*Configuration, error) { +func LoadConfiguration(ctx context.Context, filePath string) (*Configuration, error) { var configRaw Configuration if err := utils.LoadAndParse(filePath, &configRaw); err != nil { - return nil, fmt.Errorf("%w: unable to open configuration file", err) + return nil, fmt.Errorf("unable to load and parse configuration file: %w", err) } config := populateMissingFields(&configRaw) - if err := assertConfiguration(config); err != nil { - return nil, fmt.Errorf("%w: invalid configuration", err) + // Get the configuration file directory so we can load all files + // relative to the location of the configuration file. + fileDir := path.Dir(filePath) + modifyFilePaths(config, fileDir) + + if err := assertConfiguration(ctx, config); err != nil { + return nil, fmt.Errorf("configuration is invalid: %w", err) } color.Cyan( diff --git a/configuration/configuration_test.go b/configuration/configuration_test.go index 5ec55bf6..ecf88b49 100644 --- a/configuration/configuration_test.go +++ b/configuration/configuration_test.go @@ -15,31 +15,46 @@ package configuration import ( - "io/ioutil" - "os" + "context" + "os/exec" + "path" + "runtime" "testing" "github.com/coinbase/rosetta-sdk-go/constructor/job" - "github.com/coinbase/rosetta-sdk-go/storage" + "github.com/coinbase/rosetta-sdk-go/storage/modules" "github.com/coinbase/rosetta-sdk-go/types" "github.com/coinbase/rosetta-sdk-go/utils" "github.com/stretchr/testify/assert" ) var ( - startIndex = int64(89) - badStartIndex = int64(-10) - goodCoverage = float64(0.33) - badCoverage = float64(-2) - endTip = false - fakeWorkflows = []*job.Workflow{ + startIndex = int64(89) + badStartIndex = int64(-10) + goodCoverage = float64(0.33) + badCoverage = float64(-2) + endTip = false + historicalDisabled = false + fakeWorkflows = []*job.Workflow{ { Name: string(job.CreateAccount), Concurrency: job.ReservedWorkflowConcurrency, + Scenarios: []*job.Scenario{ + { + Name: "blah", + Actions: []*job.Action{}, + }, + }, }, { Name: string(job.RequestFunds), Concurrency: job.ReservedWorkflowConcurrency, + Scenarios: []*job.Scenario{ + { + Name: "blah", + Actions: []*job.Action{}, + }, + }, }, } whackyConfig = &Configuration{ @@ -47,17 +62,23 @@ var ( Blockchain: "sweet", Network: "sweeter", }, - OnlineURL: "http://hasudhasjkdk", - HTTPTimeout: 21, - RetryElapsedTime: 1000, - SyncConcurrency: 12, - TransactionConcurrency: 2, - TipDelay: 1231, + OnlineURL: "http://hasudhasjkdk", + MaxOnlineConnections: 10, + HTTPTimeout: 21, + MaxRetries: 1000, + MaxSyncConcurrency: 12, + TipDelay: 1231, + MaxReorgDepth: 12, + SeenBlockWorkers: 300, + SerialBlockWorkers: 200, + ErrorStackTraceDisabled: false, Construction: &ConstructionConfiguration{ - OfflineURL: "https://ashdjaksdkjshdk", - StaleDepth: 12, - BroadcastLimit: 200, - BlockBroadcastLimit: 992, + OfflineURL: "https://ashdjaksdkjshdk", + MaxOfflineConnections: 21, + StaleDepth: 12, + BroadcastLimit: 200, + BlockBroadcastLimit: 992, + StatusPort: 21, Workflows: append( fakeWorkflows, &job.Workflow{ @@ -71,10 +92,13 @@ var ( InactiveReconciliationConcurrency: 2938, InactiveReconciliationFrequency: 3, ReconciliationDisabled: false, - HistoricalBalanceDisabled: true, + HistoricalBalanceDisabled: &historicalDisabled, StartIndex: &startIndex, + StatusPort: 123, EndConditions: &DataEndConditions{ - ReconciliationCoverage: &goodCoverage, + ReconciliationCoverage: &ReconciliationCoverage{ + Coverage: goodCoverage, + }, }, }, } @@ -85,7 +109,7 @@ var ( } invalidPrefundedAccounts = &Configuration{ Construction: &ConstructionConfiguration{ - PrefundedAccounts: []*storage.PrefundedAccount{ + PrefundedAccounts: []*modules.PrefundedAccount{ { PrivateKeyHex: "hello", }, @@ -115,13 +139,19 @@ var ( invalidReconciliationCoverage = &Configuration{ Data: &DataConfiguration{ EndConditions: &DataEndConditions{ - ReconciliationCoverage: &badCoverage, + ReconciliationCoverage: &ReconciliationCoverage{ + Coverage: badCoverage, + }, }, }, } ) func TestLoadConfiguration(t *testing.T) { + var ( + goodAccountCount = int64(10) + badAccountCount = int64(-10) + ) var tests = map[string]struct { provided *Configuration expected *Configuration @@ -130,7 +160,13 @@ func TestLoadConfiguration(t *testing.T) { }{ "nothing provided": { provided: &Configuration{}, - expected: DefaultConfiguration(), + expected: func() *Configuration { + cfg := DefaultConfiguration() + cfg.SeenBlockWorkers = runtime.NumCPU() + cfg.SerialBlockWorkers = runtime.NumCPU() + + return cfg + }(), }, "no overwrite": { provided: whackyConfig, @@ -145,12 +181,75 @@ func TestLoadConfiguration(t *testing.T) { }, expected: func() *Configuration { cfg := DefaultConfiguration() + cfg.SeenBlockWorkers = runtime.NumCPU() + cfg.SerialBlockWorkers = runtime.NumCPU() + cfg.Construction = &ConstructionConfiguration{ + OfflineURL: DefaultURL, + MaxOfflineConnections: DefaultMaxOfflineConnections, + StaleDepth: DefaultStaleDepth, + BroadcastLimit: DefaultBroadcastLimit, + BlockBroadcastLimit: DefaultBlockBroadcastLimit, + StatusPort: DefaultStatusPort, + Workflows: fakeWorkflows, + } + + return cfg + }(), + }, + "overwrite missing with DSL": { + provided: &Configuration{ + Construction: &ConstructionConfiguration{ + ConstructorDSLFile: "test.ros", + }, + Data: &DataConfiguration{}, + }, + expected: func() *Configuration { + cfg := DefaultConfiguration() + cfg.SeenBlockWorkers = runtime.NumCPU() + cfg.SerialBlockWorkers = runtime.NumCPU() + cfg.Construction = &ConstructionConfiguration{ + OfflineURL: DefaultURL, + MaxOfflineConnections: DefaultMaxOfflineConnections, + StaleDepth: DefaultStaleDepth, + BroadcastLimit: DefaultBroadcastLimit, + BlockBroadcastLimit: DefaultBlockBroadcastLimit, + StatusPort: DefaultStatusPort, + Workflows: fakeWorkflows, + ConstructorDSLFile: "test.ros", + } + + return cfg + }(), + }, + "transfer workflow": { + provided: &Configuration{ + Construction: &ConstructionConfiguration{ + Workflows: []*job.Workflow{ + { + Name: "transfer", + Concurrency: 10, + }, + }, + }, + Data: &DataConfiguration{}, + }, + expected: func() *Configuration { + cfg := DefaultConfiguration() + cfg.SeenBlockWorkers = runtime.NumCPU() + cfg.SerialBlockWorkers = runtime.NumCPU() cfg.Construction = &ConstructionConfiguration{ - OfflineURL: DefaultURL, - StaleDepth: DefaultStaleDepth, - BroadcastLimit: DefaultBroadcastLimit, - BlockBroadcastLimit: DefaultBlockBroadcastLimit, - Workflows: fakeWorkflows, + OfflineURL: DefaultURL, + MaxOfflineConnections: DefaultMaxOfflineConnections, + StaleDepth: DefaultStaleDepth, + BroadcastLimit: DefaultBroadcastLimit, + BlockBroadcastLimit: DefaultBlockBroadcastLimit, + StatusPort: DefaultStatusPort, + Workflows: []*job.Workflow{ + { + Name: "transfer", + Concurrency: 10, + }, + }, } return cfg @@ -181,7 +280,9 @@ func TestLoadConfiguration(t *testing.T) { Data: &DataConfiguration{ ReconciliationDisabled: true, EndConditions: &DataEndConditions{ - ReconciliationCoverage: &goodCoverage, + ReconciliationCoverage: &ReconciliationCoverage{ + Coverage: goodCoverage, + }, }, }, }, @@ -192,7 +293,9 @@ func TestLoadConfiguration(t *testing.T) { Data: &DataConfiguration{ BalanceTrackingDisabled: true, EndConditions: &DataEndConditions{ - ReconciliationCoverage: &goodCoverage, + ReconciliationCoverage: &ReconciliationCoverage{ + Coverage: goodCoverage, + }, }, }, }, @@ -203,13 +306,68 @@ func TestLoadConfiguration(t *testing.T) { Data: &DataConfiguration{ IgnoreReconciliationError: true, EndConditions: &DataEndConditions{ - ReconciliationCoverage: &goodCoverage, + ReconciliationCoverage: &ReconciliationCoverage{ + Coverage: goodCoverage, + }, + }, + }, + }, + err: true, + }, + "valid reconciliation coverage (with account count)": { + provided: &Configuration{ + Data: &DataConfiguration{ + EndConditions: &DataEndConditions{ + ReconciliationCoverage: &ReconciliationCoverage{ + Coverage: goodCoverage, + AccountCount: &goodAccountCount, + Index: &goodAccountCount, + }, + }, + }, + }, + expected: func() *Configuration { + cfg := DefaultConfiguration() + cfg.SeenBlockWorkers = runtime.NumCPU() + cfg.SerialBlockWorkers = runtime.NumCPU() + cfg.Data.EndConditions = &DataEndConditions{ + ReconciliationCoverage: &ReconciliationCoverage{ + Coverage: goodCoverage, + AccountCount: &goodAccountCount, + Index: &goodAccountCount, + }, + } + + return cfg + }(), + }, + "invalid reconciliation coverage (with account count)": { + provided: &Configuration{ + Data: &DataConfiguration{ + EndConditions: &DataEndConditions{ + ReconciliationCoverage: &ReconciliationCoverage{ + Coverage: goodCoverage, + AccountCount: &badAccountCount, + }, + }, + }, + }, + err: true, + }, + "invalid reconciliation coverage (with index)": { + provided: &Configuration{ + Data: &DataConfiguration{ + EndConditions: &DataEndConditions{ + ReconciliationCoverage: &ReconciliationCoverage{ + Coverage: goodCoverage, + Index: &badAccountCount, + }, }, }, }, err: true, }, - "missing reserved workflows": { + "empty workflows": { provided: &Configuration{ Construction: &ConstructionConfiguration{ Workflows: []*job.Workflow{}, @@ -217,10 +375,20 @@ func TestLoadConfiguration(t *testing.T) { }, err: true, }, + "non-existent dsl file": { + provided: &Configuration{ + Construction: &ConstructionConfiguration{ + ConstructorDSLFile: "blah.ros", + }, + }, + err: true, + }, "multiple end conditions": { provided: multipleEndConditions, expected: func() *Configuration { def := DefaultConfiguration() + def.SeenBlockWorkers = runtime.NumCPU() + def.SerialBlockWorkers = runtime.NumCPU() def.Data.EndConditions = multipleEndConditions.Data.EndConditions return def @@ -231,24 +399,32 @@ func TestLoadConfiguration(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { - // Write configuration file to tempdir - tmpfile, err := ioutil.TempFile("", "test.json") + dir, err := utils.CreateTempDir() assert.NoError(t, err) - defer os.Remove(tmpfile.Name()) + defer utils.RemoveTempDir(dir) - err = utils.SerializeAndWrite(tmpfile.Name(), test.provided) + filePath := path.Join(dir, "test.json") + err = utils.SerializeAndWrite(filePath, test.provided) assert.NoError(t, err) + // Copy test.ros to temp dir + cmd := exec.Command("cp", "testdata/test.ros", path.Join(dir, "test.ros")) + assert.NoError(t, cmd.Run()) + // Check if expected fields populated - config, err := LoadConfiguration(tmpfile.Name()) + config, err := LoadConfiguration(context.Background(), filePath) if test.err { assert.Error(t, err) assert.Nil(t, config) } else { assert.NoError(t, err) + + // Ensure test.ros expected file path is right + if test.expected.Construction != nil && len(test.expected.Construction.ConstructorDSLFile) > 0 { + test.expected.Construction.ConstructorDSLFile = path.Join(dir, test.expected.Construction.ConstructorDSLFile) + } assert.Equal(t, test.expected, config) } - assert.NoError(t, tmpfile.Close()) }) } } diff --git a/configuration/testdata/test.ros b/configuration/testdata/test.ros new file mode 100644 index 00000000..86b07cc9 --- /dev/null +++ b/configuration/testdata/test.ros @@ -0,0 +1,9 @@ +create_account(1){ + blah{ + } +} + +request_funds(1){ + blah{ + } +} diff --git a/configuration/types.go b/configuration/types.go new file mode 100644 index 00000000..4e2ecfeb --- /dev/null +++ b/configuration/types.go @@ -0,0 +1,511 @@ +// Copyright 2020 Coinbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configuration + +import ( + "github.com/coinbase/rosetta-sdk-go/constructor/job" + "github.com/coinbase/rosetta-sdk-go/storage/modules" + "github.com/coinbase/rosetta-sdk-go/types" +) + +// CheckDataEndCondition is a type of "successful" end +// for the "check:data" method. +type CheckDataEndCondition string + +const ( + // IndexEndCondition is used to indicate that the index end condition + // has been met. + IndexEndCondition CheckDataEndCondition = "Index End Condition" + + // DurationEndCondition is used to indicate that the duration + // end condition has been met. + DurationEndCondition CheckDataEndCondition = "Duration End Condition" + + // TipEndCondition is used to indicate that the tip end condition + // has been met. + TipEndCondition CheckDataEndCondition = "Tip End Condition" + + // ReconciliationCoverageEndCondition is used to indicate that the reconciliation + // coverage end condition has been met. + ReconciliationCoverageEndCondition CheckDataEndCondition = "Reconciliation Coverage End Condition" +) + +// Default Configuration Values +const ( + DefaultURL = "http://localhost:8080" + DefaultTimeout = 10 + DefaultMaxRetries = 5 + DefaultMaxOnlineConnections = 120 // most OS have a default limit of 128 + DefaultMaxOfflineConnections = 4 // we shouldn't need many connections for construction + DefaultMaxSyncConcurrency = 64 + DefaultActiveReconciliationConcurrency = 16 + DefaultInactiveReconciliationConcurrency = 4 + DefaultInactiveReconciliationFrequency = 250 + DefaultConfirmationDepth = 10 + DefaultStaleDepth = 30 + DefaultBroadcastLimit = 3 + DefaultTipDelay = 300 + DefaultBlockBroadcastLimit = 5 + DefaultStatusPort = 9090 + DefaultMaxReorgDepth = 100 + + // Check Perf Default Configs + DefaultStartBlock = 100 + DefaultEndBlock = 10000 + DefaultNumTimesToHitEndpoints = 50 + DefaultOutputFile = "./check_perf_stats.json" + DefaultBlockEndpointTimeConstraintMs = 5000 + DefaultAccountBalanceEndpointTimeConstraintMs = 5000 + + // ETH Defaults + EthereumIDBlockchain = "Ethereum" + EthereumIDNetwork = "Ropsten" +) + +// Default Configuration Values +var ( + EthereumNetwork = &types.NetworkIdentifier{ + Blockchain: EthereumIDBlockchain, + Network: EthereumIDNetwork, + } +) + +// ConstructionConfiguration contains all configurations +// to run check:construction. +type ConstructionConfiguration struct { + // OfflineURL is the URL of a Rosetta API implementation in "offline mode". + OfflineURL string `json:"offline_url"` + + // MaxOffineConnections is the maximum number of open connections that the offline + // fetcher will open. + MaxOfflineConnections int `json:"max_offline_connections"` + + // ForceRetry overrides the default retry handling to retry + // on all non-200 responses. + ForceRetry bool `json:"force_retry,omitempty"` + + // StaleDepth is the number of blocks to wait before attempting + // to rebroadcast after not finding a transaction on-chain. + StaleDepth int64 `json:"stale_depth"` + + // BroadcastLimit is the number of times to attempt re-broadcast + // before giving up on a transaction broadcast. + BroadcastLimit int `json:"broadcast_limit"` + + // IgnoreBroadcastFailures determines if we should exit when there + // are broadcast failures (that surpass the BroadcastLimit). + IgnoreBroadcastFailures bool `json:"ignore_broadcast_failures"` + + // ClearBroadcasts indicates if all pending broadcasts should + // be removed from BroadcastStorage on restart. + ClearBroadcasts bool `json:"clear_broadcasts"` + + // BroadcastBehindTip indicates if we should broadcast transactions + // when we are behind tip (as defined by TipDelay). + BroadcastBehindTip bool `json:"broadcast_behind_tip"` + + // BlockBroadcastLimit is the number of transactions to attempt + // broadcast in a single block. When there are many pending + // broadcasts, it may make sense to limit the number of broadcasts. + BlockBroadcastLimit int `json:"block_broadcast_limit"` + + // RebroadcastAll indicates if all pending broadcasts should be + // rebroadcast from BroadcastStorage on restart. + RebroadcastAll bool `json:"rebroadcast_all"` + + // PrefundedAccounts is an array of prefunded accounts + // to use while testing. + PrefundedAccounts []*modules.PrefundedAccount `json:"prefunded_accounts,omitempty"` + + // Workflows are executed by the rosetta-cli to test + // certain construction flows. + Workflows []*job.Workflow `json:"workflows"` + + // ConstructorDSLFile is the path relative to the configuration file + // of a Rosetta Constructor DSL file (*.ros) + // that describes which Workflows to test. + // + // DSL Spec: https://github.com/coinbase/rosetta-sdk-go/tree/master/constructor/dsl + ConstructorDSLFile string `json:"constructor_dsl_file"` + + // EndConditions is a map of workflow:count that + // indicates how many of each workflow should be performed + // before check:construction should stop. For example, + // {"create_account": 5} indicates that 5 "create_account" + // workflows should be performed before stopping. + EndConditions map[string]int `json:"end_conditions,omitempty"` + + // StatusPort allows the caller to query a running check:construction + // test to get stats about progress. This can be used instead + // of parsing logs to populate some sort of status dashboard. + StatusPort uint `json:"status_port,omitempty"` + + // ResultsOutputFile is the absolute filepath of where to save + // the results of a check:construction run. + ResultsOutputFile string `json:"results_output_file,omitempty"` + + // Quiet is a boolean indicating if all request and response + // logging should be silenced. + Quiet bool `json:"quiet,omitempty"` + + // InitialBalanceFetchDisabled configures rosetta-cli + // not to lookup the balance of newly seen accounts at the + // parent block before applying operations. Disabling this + // is only a good idea if you create multiple new accounts each block + // and don't fund any accounts before starting check:construction. + // + // This is a separate config from the data config because it + // is usually false whereas the data config by the same name is usually true. + InitialBalanceFetchDisabled bool `json:"initial_balance_fetch_disabled"` +} + +// ReconciliationCoverage is used to add conditions +// to reconciliation coverage for exiting `check:data`. +// All provided conditions must be satisfied before +// the end condition is considered satisfied. +// +// If FromTip, Tip, Height, and AccountCount are not provided, +// `check:data` will halt as soon as coverage surpasses +// Coverage. +type ReconciliationCoverage struct { + // Coverage is some value [0.0, 1.0] that represents + // the % of accounts reconciled. + Coverage float64 `json:"coverage"` + + // FromTip is a boolean indicating if reconciliation coverage + // should only be measured from tip (i.e. reconciliations + // performed at or after tip was reached). + FromTip bool `json:"from_tip,omitempty"` + + // Tip is a boolean indicating that tip must be reached + // before reconciliation coverage is considered valid. + Tip bool `json:"tip,omitempty"` + + // Index is an int64 indicating the height that must be + // reached before reconciliation coverage is considered valid. + Index *int64 `json:"index,omitempty"` + + // AccountCount is an int64 indicating the number of accounts + // that must be observed before reconciliation coverage is considered + // valid. + AccountCount *int64 `json:"account_count,omitempty"` +} + +// DataEndConditions contains all the conditions for the syncer to stop +// when running check:data. If any one of these conditions is considered +// true, `check:data` will stop with success. +type DataEndConditions struct { + // Index configures the syncer to stop once reaching a particular block height. + Index *int64 `json:"index,omitempty"` + + // Tip configures the syncer to stop once it reached the tip. + // Make sure to configure `tip_delay` if you use this end + // condition. + Tip *bool `json:"tip,omitempty"` + + // Duration configures the syncer to stop after running + // for Duration seconds. + Duration *uint64 `json:"duration,omitempty"` + + // ReconciliationCoverage configures the syncer to stop once it reaches + // some level of reconciliation coverage. + ReconciliationCoverage *ReconciliationCoverage `json:"reconciliation_coverage,omitempty"` +} + +// DataConfiguration contains all configurations to run check:data. +type DataConfiguration struct { + // ActiveReconciliationConcurrency is the concurrency to use while fetching accounts + // during active reconciliation. + ActiveReconciliationConcurrency uint64 `json:"active_reconciliation_concurrency"` + + // InactiveReconciliationConcurrency is the concurrency to use while fetching accounts + // during inactive reconciliation. + InactiveReconciliationConcurrency uint64 `json:"inactive_reconciliation_concurrency"` + + // InactiveReconciliationFrequency is the number of blocks to wait between + // inactive reconciliations on each account. + InactiveReconciliationFrequency uint64 `json:"inactive_reconciliation_frequency"` + + // LogBlocks is a boolean indicating whether to log processed blocks. + LogBlocks bool `json:"log_blocks"` + + // LogTransactions is a boolean indicating whether to log processed transactions. + LogTransactions bool `json:"log_transactions"` + + // LogBalanceChanges is a boolean indicating whether to log all balance changes. + LogBalanceChanges bool `json:"log_balance_changes"` + + // LogReconciliations is a boolean indicating whether to log all reconciliations. + LogReconciliations bool `json:"log_reconciliations"` + + // IgnoreReconciliationError determines if block processing should halt on a reconciliation + // error. It can be beneficial to collect all reconciliation errors or silence + // reconciliation errors during development. + IgnoreReconciliationError bool `json:"ignore_reconciliation_error"` + + // ExemptAccounts is a path relative to the configuration file + // to a file listing all accounts to exempt from balance + // tracking and reconciliation. Look at the examples directory for an example of + // how to structure this file. + ExemptAccounts string `json:"exempt_accounts"` + + // BootstrapBalances is a path relative to the configuration file to a file used + // to bootstrap balances before starting syncing. If this value is populated after + // beginning syncing, it will be ignored. + BootstrapBalances string `json:"bootstrap_balances"` + + // HistoricalBalanceDisabled is a boolean that dictates how balance lookup is performed. + // When set to false, balances are looked up at the block where a balance + // change occurred instead of at the current block. Blockchains that do not support + // historical balance lookup should set this to true. + HistoricalBalanceDisabled *bool `json:"historical_balance_disabled,omitempty"` + + // InterestingAccounts is a path to a file listing all accounts to check on each block. Look + // at the examples directory for an example of how to structure this file. + InterestingAccounts string `json:"interesting_accounts"` + + // ReconciliationDisabled is a boolean that indicates reconciliation should not + // be attempted. When first testing an implementation, it can be useful to disable + // some of the more advanced checks to confirm syncing is working as expected. + ReconciliationDisabled bool `json:"reconciliation_disabled"` + + // ReconciliationDrainDisabled is a boolean that configures the rosetta-cli + // to exit check:data before the entire active reconciliation queue has + // been drained (if reconciliation is enabled). + ReconciliationDrainDisabled bool `json:"reconciliation_drain_disabled"` + + // InactiveDiscrepancySearchDisabled is a boolean indicating if a search + // should be performed to find any inactive reconciliation discrepancies. + // Note, a search will never be performed if historical balance lookup + // is disabled. + InactiveDiscrepancySearchDisabled bool `json:"inactive_discrepancy_search_disabled"` + + // BalanceTrackingDisabled is a boolean that indicates balances calculation + // should not be attempted. When first testing an implemenation, it can be + // useful to just try to fetch all blocks before checking for balance + // consistency. + BalanceTrackingDisabled bool `json:"balance_tracking_disabled"` + + // CoinTrackingDisabled is a boolean that indicates coin (or UTXO) tracking + // should not be attempted. When first testing an implemenation, it can be + // useful to just try to fetch all blocks before checking for coin + // consistency. + CoinTrackingDisabled bool `json:"coin_tracking_disabled"` + + // StartIndex is the block height to start syncing from. If no StartIndex + // is provided, syncing will start from the last saved block. + // If no blocks have ever been synced, syncing will start from genesis. + StartIndex *int64 `json:"start_index,omitempty"` + + // EndCondition contains the conditions for the syncer to stop. + EndConditions *DataEndConditions `json:"end_conditions,omitempty"` + + // StatusPort allows the caller to query a running check:data + // test to get stats about progress. This can be used instead + // of parsing logs to populate some sort of status dashboard. + StatusPort uint `json:"status_port,omitempty"` + + // ResultsOutputFile is the absolute filepath of where to save + // the results of a check:data run. + ResultsOutputFile string `json:"results_output_file"` + + // PruningBlockDisabled is a boolean that indicates storage pruning should + // not be attempted. This should really only ever be set to true if you + // wish to use `start_index` at a later point to restart from some + // previously synced block. + PruningBlockDisabled bool `json:"pruning_block_disabled"` + + // PruningBalanceDisabled is a boolean that indicates balance pruning + // should not be attempted. + PruningBalanceDisabled bool `json:"pruning_balance_disabled"` + + // PruningFrequency is the frequency (in seconds) that we attempt + // to prune blocks. If not populated, we use the default value + // provided in the `statefulsyncer` package. + PruningFrequency *int `json:"pruning_frequency,omitempty"` + + // InitialBalanceFetchDisabled configures rosetta-cli + // not to lookup the balance of newly seen accounts at the + // parent block before applying operations. Disabling + // this step can significantly speed up performance + // without impacting validation accuracy (if all genesis + // accounts are provided using bootstrap_balances and + // syncing starts from genesis). + InitialBalanceFetchDisabled bool `json:"initial_balance_fetch_disabled"` + + // ReconcilerActiveBacklog is the maximum number of pending changes + // to keep in the active reconciliation backlog before skipping + // reconciliation on new changes. + ReconcilerActiveBacklog *int `json:"reconciler_active_backlog,omitempty"` +} + +// Configuration contains all configuration settings for running +// check:data, check:construction, or check:perf. +type Configuration struct { + // Network is the *types.NetworkIdentifier where transactions should + // be constructed and where blocks should be synced to monitor + // for broadcast success. + Network *types.NetworkIdentifier `json:"network"` + + // OnlineURL is the URL of a Rosetta API implementation in "online mode". + OnlineURL string `json:"online_url"` + + // TargetAccount will be the only interest account + TargetAccount string `json:"target_account,omitempty"` + + // DataDirectory is a folder used to store logs and any data used to perform validation. + // The path can be absolute, or it can be relative to where rosetta-cli + // binary is being executed. + DataDirectory string `json:"data_directory"` + + // Make search log easier when validation pool is working on different request + RequestUUID string `json:"requestUUID,omitempty"` + + // HTTPTimeout is the timeout for a HTTP request in seconds. + HTTPTimeout uint64 `json:"http_timeout"` + + // MaxRetries is the number of times we will retry an HTTP request. If retry_elapsed_time + // is also populated, we may stop attempting retries early. + MaxRetries uint64 `json:"max_retries"` + + // RetryElapsedTime is the total time to spend retrying a HTTP request in seconds. + RetryElapsedTime uint64 `json:"retry_elapsed_time"` + + // MaxOnlineConnections is the maximum number of open connections that the online + // fetcher will open. + MaxOnlineConnections int `json:"max_online_connections"` + + // ForceRetry overrides the default retry handling to retry + // on all non-200 responses. + ForceRetry bool `json:"force_retry,omitempty"` + + // MaxSyncConcurrency is the maximum sync concurrency to use while syncing blocks. + // Sync concurrency is managed automatically by the `syncer` package. + MaxSyncConcurrency int64 `json:"max_sync_concurrency"` + + // TipDelay dictates how many seconds behind the current time is considered + // tip. If we are > TipDelay seconds from the last processed block, + // we are considered to be behind tip. + TipDelay int64 `json:"tip_delay"` + + // MaxReorgDepth specifies the maximum possible reorg depth of the blockchain + // being synced. This value is used to determine how aggressively to prune + // old block data. + // + // It is better to be overly cautious here as keeping a few + // too many blocks around is much better than running into an + // error caused by missing block data! + MaxReorgDepth int `json:"max_reorg_depth,omitempty"` + + // LogConfiguration determines if the configuration settings + // should be printed to the console when a file is loaded. + LogConfiguration bool `json:"log_configuration"` + + // CompressionDisabled configures the storage layer to not + // perform data compression before writing to disk. This leads + // to significantly more on-disk storage usage but can lead + // to performance gains. + CompressionDisabled bool `json:"compression_disabled"` + + // L0InMemoryEnabled configures storage to increase memory + // usage. Enabling this massively increases performance + // but can use 10s of GBs of RAM, even with pruning enabled. + L0InMemoryEnabled bool `json:"l0_in_memory_enabled"` + + // AllInMemoryDisabled configures storage to increase memory + // usage. Enabling this massively increases performance + // but can use >20s of GBs of RAM, even with pruning enabled. + AllInMemoryEnabled bool `json:"all_in_memory_enabled"` + + // TableSize unit is GB, enable users to define MaxTableSize + // when AllInMemoryEnabled == true or L0InMemoryEnabled== true, Cli will look up this config + // default value is 2, modification range is [1, 100] + TableSize *int64 `json:"table_size,omitempty"` + + // ValueLogFileSize unit is MB, enable users to define ValueLogFileSize + // when AllInMemoryEnabled == true or L0InMemoryEnabled== true, Cli will look up this config + // default value is 512, modification range is [128, 2048] + ValueLogFileSize *int64 `json:"value_log_file_size,omitempty"` + + // SeenBlockWorkers is the number of goroutines spawned to store + // seen blocks in storage before we attempt to sequence. If not populated, + // this value defaults to runtime.NumCPU(). + SeenBlockWorkers int `json:"seen_block_workers,omitempty"` + + // SerialBlockWorkers is the number of goroutines spawned to help + // with block sequencing (i.e. updating balances, updating coins, etc). + // If not populated, this value defaults to runtime.NumCPU(). + SerialBlockWorkers int `json:"serial_block_workers,omitempty"` + + // ValidationFile is the file used for asset specific validation + // If not provided, then this will be empty string and no asset + // specific validation will be done + ValidationFile string `json:"validation_file,omitempty"` + + // ErrorStackTraceDisabled if false then it will print error stack trace + // if the data or construction check fails + ErrorStackTraceDisabled bool `json:"error_stack_trace_disabled"` + + // CoinSupported indicates whether your implementation support coins or not. + // If your implementation is based on account-based blockchain (e.g. Ethereum), + // this value must be false. If your implementation is UTXO-based blockchain (e.g. Bitcoin), + // then this value must be true. + CoinSupported bool `json:"coin_supported"` + + // InfoMetaData is a string, rosetta-cli will convert it into a map[string]string + // key-value are separated by ":" + // different key-value pairs are separated by "," + // an example: if users want to record "instance_name" as "1234", and "blockchain_name" as "Bitcoin", + // this field would be "instance_name:1234,blockchain_name:Bitcoin" + // if adding spaces before and after ":" and ",", it will be trimmed when building map + // " instance_name : xxxx , blockchain_name : xxxx " will be recorded same as + // "instance_name:xxxx,blockchain_name:xxxx" + InfoMetaData string `json:"info_metadata,omitempty"` + + Construction *ConstructionConfiguration `json:"construction"` + Data *DataConfiguration `json:"data"` + Perf *CheckPerfConfiguration `json:"perf"` + Sign *SignConfiguration `json:"sign"` +} + +// SignConfiguration configuration for signing +type SignConfiguration struct { + PubKey *types.PublicKey `json:"pub_key"` + PrivateKey string `json:"private_key"` + SigningPayload *types.SigningPayload `json:"signing_payload"` + Signature *types.Signature `json:"signature,omitempty"` +} + +// CheckPerfConfiguration configuration for check perf +type CheckPerfConfiguration struct { + + // StartBlock is the starting block for running check:perf. + // If not provided, then this defaults to 0 (the genesis block) + StartBlock int64 `json:"start_block,omitempty"` + + BlockEndpointTimeConstraintMs int64 `json:"block_endpoint_time_constraint_ms"` + + AccountBalanceEndpointTimeConstraintMs int64 `json:"account_balance_endpoint_time_constraint_ms"` + + // EndBlock is the ending block for running check:perf. + // Must be provided when running check:perf + EndBlock int64 `json:"end_block"` + + // NumTimesToHitEndpoints is the number of times each rosetta-server endpoint will be benchmarked + NumTimesToHitEndpoints int `json:"num_times_to_hit_endpoints"` + + // Location to output test results + StatsOutputFile string `json:"check_perf_output_dir"` +} diff --git a/examples/configuration/bitcoin.json b/examples/configuration/bitcoin.json deleted file mode 100644 index 86f6989f..00000000 --- a/examples/configuration/bitcoin.json +++ /dev/null @@ -1,212 +0,0 @@ -{ - "network": { - "blockchain": "Bitcoin", - "network": "Testnet3" - }, - "online_url": "", - "data_directory": "bitcoin-data", - "http_timeout": 300, - "retry_elapsed_time": 0, - "sync_concurrency": 0, - "transaction_concurrency": 0, - "tip_delay": 1800, - "disable_memory_limit": true, - "log_configuration": false, - "construction": { - "offline_url": "", - "stale_depth": 0, - "broadcast_limit": 0, - "ignore_broadcast_failures": false, - "clear_broadcasts": false, - "broadcast_behind_tip": false, - "block_broadcast_limit": 0, - "rebroadcast_all": false, - "workflows": [ - { - "name": "request_funds", - "concurrency": 1, - "scenarios": [ - { - "name": "find_address", - "actions": [ - { - "input": "{\"symbol\":\"tBTC\", \"decimals\":8}", - "type": "set_variable", - "output_path": "currency" - }, - { - "input": "{\"minimum_balance\":{\"value\": \"0\", \"currency\": {{currency}}}, \"create_limit\":1}", - "type": "find_balance", - "output_path": "random_address" - } - ] - }, - { - "name": "request", - "actions": [ - { - "input": "{\"address\": {{random_address.account.address}}, \"minimum_balance\":{\"value\": \"1000000\", \"currency\": {{currency}}}}", - "type": "find_balance", - "output_path": "loaded_address" - } - ] - } - ] - }, - { - "name": "create_account", - "concurrency": 1, - "scenarios": [ - { - "name": "create_account", - "actions": [ - { - "input": "{\"network\":\"Testnet3\", \"blockchain\":\"Bitcoin\"}", - "type": "set_variable", - "output_path": "network" - }, - { - "input": "{\"curve_type\": \"secp256k1\"}", - "type": "generate_key", - "output_path": "key" - }, - { - "input": "{\"network_identifier\": {{network}}, \"public_key\": {{key.public_key}}}", - "type": "derive", - "output_path": "address" - }, - { - "input": "{\"address\": {{address.address}}, \"keypair\": {{key}}}", - "type": "save_address" - } - ] - } - ] - }, - { - "name": "transfer", - "concurrency": 10, - "scenarios": [ - { - "name": "transfer", - "actions": [ - { - "input": "{\"network\":\"Testnet3\", \"blockchain\":\"Bitcoin\"}", - "type": "set_variable", - "output_path": "transfer.network" - }, - { - "input": "{\"symbol\":\"tBTC\", \"decimals\":8}", - "type": "set_variable", - "output_path": "currency" - }, - { - "input": "\"600\"", - "type": "set_variable", - "output_path": "dust_amount" - }, - { - "input": "\"600\"", - "type": "set_variable", - "output_path": "fee_amount" - }, - { - "input": "{\"operation\":\"addition\", \"left_value\": {{dust_amount}}, \"right_value\": {{fee_amount}}}", - "type": "math", - "output_path": "send_buffer" - }, - { - "input": "\"1800\"", - "type": "set_variable", - "output_path": "reserved_amount" - }, - { - "input": "{\"require_coin\":true, \"minimum_balance\":{\"value\": {{reserved_amount}}, \"currency\": {{currency}}}}", - "type": "find_balance", - "output_path": "sender" - }, - { - "input": "{\"operation\":\"subtraction\", \"left_value\": {{sender.balance.value}}, \"right_value\": {{send_buffer}}}", - "type": "math", - "output_path": "available_amount" - }, - { - "input": "{\"minimum\": {{dust_amount}}, \"maximum\": {{available_amount}}}", - "type": "random_number", - "output_path": "recipient_amount" - }, - { - "input": "{\"recipient_amount\":{{recipient_amount}}}", - "type": "print_message" - }, - { - "input": "{\"operation\":\"subtraction\", \"left_value\": {{sender.balance.value}}, \"right_value\": {{recipient_amount}}}", - "type": "math", - "output_path": "change_amount" - }, - { - "input": "{\"operation\":\"subtraction\", \"left_value\": {{change_amount}}, \"right_value\": {{fee_amount}}}", - "type": "math", - "output_path": "change_amount" - }, - { - "input": "{\"change_amount\":{{change_amount}}}", - "type": "print_message" - }, - { - "input": "{\"operation\":\"subtraction\", \"left_value\": \"0\", \"right_value\":{{sender.balance.value}}}", - "type": "math", - "output_path": "sender_amount" - }, - { - "input": "{\"not_address\":[{{sender.account.address}}], \"not_coins\":[{{sender.coin}}], \"minimum_balance\":{\"value\": \"0\", \"currency\": {{currency}}}, \"create_limit\": 100, \"create_probability\": 50}", - "type": "find_balance", - "output_path": "recipient" - }, - { - "input": "\"1\"", - "type": "set_variable", - "output_path": "transfer.confirmation_depth" - }, - { - "input": "[{\"operation_identifier\":{\"index\":0},\"type\":\"INPUT\",\"account\":{\"address\":{{sender.account.address}}},\"amount\":{\"value\":{{sender_amount}},\"currency\":{{currency}}}, \"coin_change\":{\"coin_action\":\"coin_spent\", \"coin_identifier\":{{sender.coin}}}},{\"operation_identifier\":{\"index\":1},\"type\":\"OUTPUT\",\"account\":{\"address\":{{recipient.account.address}}},\"amount\":{\"value\":{{recipient_amount}},\"currency\":{{currency}}}}, {\"operation_identifier\":{\"index\":2},\"type\":\"OUTPUT\",\"account\":{\"address\":{{sender.account.address}}},\"amount\":{\"value\":{{change_amount}},\"currency\":{{currency}}}}]", - "type": "set_variable", - "output_path": "transfer.operations" - }, - { - "input": "{{transfer.operations}}", - "type": "print_message" - } - ] - } - ] - } - ], - "end_conditions": { - "create_account": 10, - "transfer": 10 - } - }, - "data": { - "active_reconciliation_concurrency": 0, - "inactive_reconciliation_concurrency": 0, - "inactive_reconciliation_frequency": 0, - "log_blocks": false, - "log_transactions": false, - "log_balance_changes": false, - "log_reconciliations": false, - "ignore_reconciliation_error": false, - "exempt_accounts": "", - "bootstrap_balances": "", - "historical_balance_disabled": true, - "interesting_accounts": "", - "reconciliation_disabled": false, - "inactive_discrepency_search_disabled": false, - "balance_tracking_disabled": false, - "coin_tracking_disabled": false, - "end_conditions": { - "reconciliation_coverage": 0.95 - }, - "results_output_file": "" - } -} diff --git a/examples/configuration/default.json b/examples/configuration/default.json index 530cd1f8..97e38a82 100644 --- a/examples/configuration/default.json +++ b/examples/configuration/default.json @@ -6,12 +6,18 @@ "online_url": "http://localhost:8080", "data_directory": "", "http_timeout": 10, - "retry_elapsed_time": 60, - "sync_concurrency": 8, - "transaction_concurrency": 16, + "max_retries": 5, + "retry_elapsed_time": 0, + "max_online_connections": 120, + "max_sync_concurrency": 64, "tip_delay": 300, - "disable_memory_limit": false, + "max_reorg_depth": 100, "log_configuration": false, + "compression_disabled": false, + "l0_in_memory_enabled": false, + "all_in_memory_enabled": false, + "error_stack_trace_disabled": false, + "coin_supported": false, "construction": null, "data": { "active_reconciliation_concurrency": 16, @@ -24,12 +30,18 @@ "ignore_reconciliation_error": false, "exempt_accounts": "", "bootstrap_balances": "", - "historical_balance_disabled": false, "interesting_accounts": "", "reconciliation_disabled": false, - "inactive_discrepency_search_disabled": false, + "reconciliation_drain_disabled": false, + "inactive_discrepancy_search_disabled": false, "balance_tracking_disabled": false, "coin_tracking_disabled": false, - "results_output_file": "" - } + "status_port": 9090, + "results_output_file": "", + "pruning_block_disabled": false, + "pruning_balance_disabled": false, + "initial_balance_fetch_disabled": false + }, + "perf": null, + "sign": null } \ No newline at end of file diff --git a/examples/configuration/ethereum.json b/examples/configuration/ethereum.json deleted file mode 100644 index 1b79d344..00000000 --- a/examples/configuration/ethereum.json +++ /dev/null @@ -1,179 +0,0 @@ -{ - "network": { - "blockchain": "Ethereum", - "network": "Ropsten" - }, - "online_url": "", - "data_directory": "ethereum-data", - "http_timeout": 300, - "retry_elapsed_time": 0, - "sync_concurrency": 0, - "transaction_concurrency": 0, - "tip_delay": 60, - "disable_memory_limit": true, - "log_configuration": false, - "construction": { - "offline_url": "", - "stale_depth": 0, - "broadcast_limit": 0, - "ignore_broadcast_failures": false, - "clear_broadcasts": false, - "broadcast_behind_tip": false, - "block_broadcast_limit": 0, - "rebroadcast_all": false, - "workflows": [ - { - "name": "request_funds", - "concurrency": 1, - "scenarios": [ - { - "name": "find_address", - "actions": [ - { - "input": "{\"symbol\":\"ETH\", \"decimals\":18}", - "type": "set_variable", - "output_path": "currency" - }, - { - "input": "{\"minimum_balance\":{\"value\": \"0\", \"currency\": {{currency}}}, \"create_limit\":1}", - "type": "find_balance", - "output_path": "random_address" - } - ] - }, - { - "name": "request", - "actions": [ - { - "input": "{\"address\": {{random_address.account.address}}, \"minimum_balance\":{\"value\": \"10000000000000000\", \"currency\": {{currency}}}}", - "type": "find_balance", - "output_path": "loaded_address" - } - ] - } - ] - }, - { - "name": "create_account", - "concurrency": 1, - "scenarios": [ - { - "name": "create_account", - "actions": [ - { - "input": "{\"network\":\"Ropsten\", \"blockchain\":\"Ethereum\"}", - "type": "set_variable", - "output_path": "network" - }, - { - "input": "{\"curve_type\": \"secp256k1\"}", - "type": "generate_key", - "output_path": "key" - }, - { - "input": "{\"network_identifier\": {{network}}, \"public_key\": {{key.public_key}}}", - "type": "derive", - "output_path": "address" - }, - { - "input": "{\"address\": {{address.address}}, \"keypair\": {{key}}}", - "type": "save_address" - } - ] - } - ] - }, - { - "name": "transfer", - "concurrency": 10, - "scenarios": [ - { - "name": "transfer", - "actions": [ - { - "input": "{\"network\":\"Ropsten\", \"blockchain\":\"Ethereum\"}", - "type": "set_variable", - "output_path": "transfer.network" - }, - { - "input": "{\"symbol\":\"ETH\", \"decimals\":18}", - "type": "set_variable", - "output_path": "currency" - }, - { - "input": "{\"minimum_balance\":{\"value\": \"10000000000000000\", \"currency\": {{currency}}}}", - "type": "find_balance", - "output_path": "sender" - }, - { - "input": "\"42000000000000\"", - "type": "set_variable", - "output_path": "max_fee" - }, - { - "input": "{\"operation\":\"subtraction\", \"left_value\": {{sender.balance.value}}, \"right_value\": {{max_fee}}}", - "type": "math", - "output_path": "available_amount" - }, - { - "input": "{\"minimum\": \"1\", \"maximum\": {{available_amount}}}", - "type": "random_number", - "output_path": "recipient_amount" - }, - { - "input": "{\"recipient_amount\":{{recipient_amount}}}", - "type": "print_message" - }, - { - "input": "{\"operation\":\"subtraction\", \"left_value\": \"0\", \"right_value\":{{recipient_amount}}}", - "type": "math", - "output_path": "sender_amount" - }, - { - "input": "{\"not_address\":[{{sender.account.address}}], \"minimum_balance\":{\"value\": \"0\", \"currency\": {{currency}}}, \"create_limit\": 100, \"create_probability\": 50}", - "type": "find_balance", - "output_path": "recipient" - }, - { - "input": "\"1\"", - "type": "set_variable", - "output_path": "transfer.confirmation_depth" - }, - { - "input": "[{\"operation_identifier\":{\"index\":0},\"type\":\"transfer\",\"account\":{\"address\":{{sender.account.address}}},\"amount\":{\"value\":{{sender_amount}},\"currency\":{{currency}}}},{\"operation_identifier\":{\"index\":1},\"type\":\"transfer\",\"account\":{\"address\":{{recipient.account.address}}},\"amount\":{\"value\":{{recipient_amount}},\"currency\":{{currency}}}}]", - "type": "set_variable", - "output_path": "transfer.operations" - } - ] - } - ] - } - ], - "end_conditions": { - "create_account": 10, - "transfer": 10 - } - }, - "data": { - "active_reconciliation_concurrency": 0, - "inactive_reconciliation_concurrency": 0, - "inactive_reconciliation_frequency": 0, - "log_blocks": false, - "log_transactions": false, - "log_balance_changes": false, - "log_reconciliations": false, - "ignore_reconciliation_error": false, - "exempt_accounts": "", - "bootstrap_balances": "", - "historical_balance_disabled": false, - "interesting_accounts": "", - "reconciliation_disabled": false, - "inactive_discrepency_search_disabled": false, - "balance_tracking_disabled": false, - "coin_tracking_disabled": false, - "end_conditions": { - "reconciliation_coverage": 0.95 - }, - "results_output_file": "" - } -} \ No newline at end of file diff --git a/examples/configuration/sign.json b/examples/configuration/sign.json new file mode 100644 index 00000000..def4ebbe --- /dev/null +++ b/examples/configuration/sign.json @@ -0,0 +1,16 @@ +{ + "sign": { + "pub_key": { + "curve_type": "secp256k1", + "hex_bytes": "03c7e625aa08cad8f257d9ee2b9b7a0214f19f981afd5b498c728ad7ed6c0c3df6" + }, + "private_key": "", + "signing_payload": { + "hex_bytes": "370e74254e8cbaa343af3564901456082ec7af967e45ff24ba061233b1a1b04f", + "signature_type": "ecdsa", + "account_identifier": { + "address": "dummy" + } + } + } +} \ No newline at end of file diff --git a/examples/configuration/simple.json b/examples/configuration/simple.json index 90b21c8f..95e9479a 100644 --- a/examples/configuration/simple.json +++ b/examples/configuration/simple.json @@ -6,13 +6,11 @@ "online_url": "http://localhost:8080", "data_directory": "", "http_timeout": 10, - "sync_concurrency": 8, - "transaction_concurrency": 16, "tip_delay": 300, "data": { "historical_balance_disabled": true, "reconciliation_disabled": true, - "inactive_discrepency_search_disabled": true, + "inactive_discrepancy_search_disabled": true, "balance_tracking_disabled": true, "end_conditions": { "tip": true diff --git a/examples/configuration/verify.json b/examples/configuration/verify.json new file mode 100644 index 00000000..9554d13c --- /dev/null +++ b/examples/configuration/verify.json @@ -0,0 +1,18 @@ +{ + "sign": { + "pub_key": { + "curve_type": "secp256k1", + "hex_bytes": "03c7e625aa08cad8f257d9ee2b9b7a0214f19f981afd5b498c728ad7ed6c0c3df6" + }, + "signing_payload": { + "hex_bytes": "370e74254e8cbaa343af3564901456082ec7af967e45ff24ba061233b1a1b04f", + "signature_type": "ecdsa", + "account_identifier": { + "address": "dummy" + } + }, + "signature": { + "hex_bytes": "c80547470b7e4d3fc17c988b2244dfebc909b3e9f7fd0c1387763263cc70d16d24f326b9c12ba2ea278164c0b30f128a809585fc503eda43de429aadb9f893ef" + } + } +} \ No newline at end of file diff --git a/go.mod b/go.mod index 0a081964..df02ef7f 100644 --- a/go.mod +++ b/go.mod @@ -1,17 +1,20 @@ module github.com/coinbase/rosetta-cli -go 1.13 +go 1.16 require ( - github.com/coinbase/rosetta-sdk-go v0.3.5-0.20200901205702-b739db12905a - github.com/fatih/color v1.9.0 - github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a - github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c - github.com/spf13/cobra v1.0.0 - github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/testify v1.6.1 - golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 - src.techknowlogick.com/xgo v1.1.1-0.20200814033943-12cf2e8194ca // indirect + github.com/coinbase/rosetta-sdk-go v0.8.3 + github.com/coinbase/rosetta-sdk-go/types v1.0.0 + github.com/fatih/color v1.13.0 + github.com/google/go-cmp v0.5.6 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/olekukonko/tablewriter v0.0.5 + github.com/pkg/errors v0.9.1 + github.com/spf13/cobra v1.4.0 + github.com/stretchr/testify v1.7.2 + go.uber.org/zap v1.21.0 + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c + google.golang.org/protobuf v1.27.1 // indirect ) replace github.com/coinbase/rosetta-sdk-go v0.3.5-0.20200824221853-d7b1fe2f9239 => github.com/CodaProtocol/rosetta-sdk-go v0.3.5-0.20200903180025-a94555f1b459 diff --git a/go.sum b/go.sum index dfa83c11..c9d5ca80 100644 --- a/go.sum +++ b/go.sum @@ -1,42 +1,79 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= -github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= -github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/edwards25519 v1.0.0-rc.1 h1:m0VOOB23frXZvAOK44usCgLWvtsxIoMCTBGJZlpmGfU= +filippo.io/edwards25519 v1.0.0-rc.1/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= +git.sr.ht/~sircmpwn/getopt v0.0.0-20191230200459-23622cc906b3/go.mod h1:wMEGFFFNuPos7vHmWXfszqImLppbc0wEhh6JBfJIUgw= +git.sr.ht/~sircmpwn/go-bare v0.0.0-20210406120253-ab86bc2846d9/go.mod h1:BVJwbDfVjCjoFiKrhkei6NdGcZYpkDkdyCdg1ukytRA= +github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM= +github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= -github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= +github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8= +github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw= +github.com/Zilliqa/gozilliqa-sdk v1.2.1-0.20201201074141-dd0ecada1be6 h1:1d9pzdbkth4D9AX6ndKSl7of3UTV0RYl3z64U2dXMGo= +github.com/Zilliqa/gozilliqa-sdk v1.2.1-0.20201201074141-dd0ecada1be6/go.mod h1:eSYp2T6f0apnuW8TzhV3f6Aff2SE8Dwio++U4ha4yEM= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= -github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go-v2 v1.2.0/go.mod h1:zEQs02YRBw1DjK0PoJv3ygDYOFTre1ejlJWl8FwAuQo= +github.com/aws/aws-sdk-go-v2/config v1.1.1/go.mod h1:0XsVy9lBI/BCXm+2Tuvt39YmdHwS5unDQmxZOYe8F5Y= +github.com/aws/aws-sdk-go-v2/credentials v1.1.1/go.mod h1:mM2iIjwl7LULWtS6JCACyInboHirisUUdkBPoTHMOUo= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.2/go.mod h1:3hGg3PpiEjHnrkrlasTfxFqUsZ2GCk/fMUn4CbKgSkM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.2/go.mod h1:45MfaXZ0cNbeuT0KQ1XJylq8A6+OpVV2E5kvY/Kq+u8= +github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1/go.mod h1:rLiOUrPLW/Er5kRcQ7NkwbjlijluLsrIbu/iyl35RO4= +github.com/aws/aws-sdk-go-v2/service/sso v1.1.1/go.mod h1:SuZJxklHxLAXgLTc1iFXbEWkXs7QRTQpCLGaKIprQW0= +github.com/aws/aws-sdk-go-v2/service/sts v1.1.1/go.mod h1:Wi0EBZwiz/K44YliU0EKxqTCJGUfYTWXrrBwkq736bM= +github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ= +github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/btcsuite/btcd v0.0.0-20190315201642-aa6e0f35703c/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= -github.com/btcsuite/btcd v0.21.0-beta h1:At9hIZdJW0s9E/fAz28nrz6AmcNlSVucCH796ZteX1M= -github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= +github.com/btcsuite/btcd v0.21.0-beta.0.20201114000516-e9c7a5ac6401/go.mod h1:Sv4JPQ3/M+teHz9Bo5jBpkNcP0x6r7rdihlNL/7tTAs= +github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c= +github.com/btcsuite/btcd v0.22.1/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y= +github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= +github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce h1:YtWJF7RHm2pYCvA5t0RPmAaLUhREsKuKd+SLhxFbFeQ= +github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= @@ -44,6 +81,9 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/bwesterb/go-ristretto v1.2.0 h1:xxWOVbN5m8NNKiSDZXE1jtZvZnC6JSJ9cYFADiZcWtw= +github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -51,295 +91,481 @@ github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U= -github.com/coinbase/rosetta-sdk-go v0.3.5-0.20200901205702-b739db12905a h1:virCY7xRX4Qi9EvWDordUyvCDlaP3k7ftIMLgcLzs1s= -github.com/coinbase/rosetta-sdk-go v0.3.5-0.20200901205702-b739db12905a/go.mod h1:CE6c0Ws+rKwv4yiQDtAOuYwlC3tpXr2Cq5RTE/oVivY= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304= +github.com/coinbase/kryptology v1.8.0 h1:Aoq4gdTsJhSU3lNWsD5BWmFSz2pE0GlmrljaOxepdYY= +github.com/coinbase/kryptology v1.8.0/go.mod h1:RYXOAPdzOGUe3qlSFkMGn58i3xUA8hmxYHksuq+8ciI= +github.com/coinbase/rosetta-sdk-go v0.8.3 h1:IYqd+Ser5NVh0s7p8p2Ir82iCvi75E1l0NH2H4NEr0Y= +github.com/coinbase/rosetta-sdk-go v0.8.3/go.mod h1:ChOHc+BNq7zqJDDkui0DA124GOvlAiRbdgAc1U9GMDQ= +github.com/coinbase/rosetta-sdk-go/types v1.0.0 h1:jpVIwLcPoOeCR6o1tU+Xv7r5bMONNbHU7MuEHboiFuA= +github.com/coinbase/rosetta-sdk-go/types v1.0.0/go.mod h1:eq7W2TMRH22GTW0N0beDnN931DW0/WOI1R2sdHNHG4c= +github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ= +github.com/consensys/bavard v0.1.8-0.20210915155054-088da2f7f54a/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= +github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q= +github.com/consensys/gnark-crypto v0.5.3 h1:4xLFGZR3NWEH2zy+YzvzHicpToQR8FXFbfLNvpGB+rE= +github.com/consensys/gnark-crypto v0.5.3/go.mod h1:hOdPlWQV1gDLp7faZVeg8Y0iEPFaOUnCc4XeCCk96p0= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/dave/dst v0.23.1/go.mod h1:LjPcLEauK4jC5hQ1fE/wr05O41zK91Pr4Qs22Ljq7gs= -github.com/dave/gopackages v0.0.0-20170318123100-46e7023ec56e/go.mod h1:i00+b/gKdIDIxuLDFob7ustLAVqhsZRk2qVZrArELGQ= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= -github.com/dave/kerr v0.0.0-20170318121727-bc25dd6abe8e/go.mod h1:qZqlPyPvfsDJt+3wHJ1EvSXDuVjFTK0j2p/ca+gtsb8= -github.com/dave/rebecca v0.9.1/go.mod h1:N6XYdMD/OKw3lkF3ywh8Z6wPGuwNFDNtWYEMFWEmXBA= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= +github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= -github.com/dgraph-io/badger/v2 v2.2007.1 h1:t36VcBCpo4SsmAD5M8wVv1ieVzcALyGfaJ92z4ccULM= -github.com/dgraph-io/badger/v2 v2.2007.1/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= +github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= +github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= +github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= +github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.3 h1:jh22xisGBjrEVnRZ1DVTpBVQm0Xndu8sMl0CWDzSIBI= github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= -github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/dop251/goja v0.0.0-20200219165308-d1232e640a87/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= +github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/docker v1.6.2/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= +github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ethereum/go-ethereum v1.9.20 h1:kk/J5OIoaoz3DRrCXznz3RGi212mHHXwzXlY/ZQxcj0= -github.com/ethereum/go-ethereum v1.9.20/go.mod h1:JSSTypSMTkGZtAdAChH2wP5dZEvPGh3nUTuDpH+hNrg= -github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= -github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/ethereum/go-ethereum v1.10.21 h1:5lqsEx92ZaZzRyOqBEXux4/UR06m296RGzN3ol3teJY= +github.com/ethereum/go-ethereum v1.10.21/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY= +github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= +github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= -github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.2-0.20200707131729-196ae77b8a26 h1:lMm2hD9Fy0ynom5+85/pbdkiYcBqM1JWmhpAXLmy0fw= -github.com/golang/snappy v0.0.2-0.20200707131729-196ae77b8a26/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/addlicense v0.0.0-20200817051935-6f4cd4aacc89/go.mod h1:EMjYTRimagHs1FwlIqKyX3wAM0u3rA+McvlIIWmSamA= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/pprof v0.0.0-20181127221834-b4f47329b966/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= +github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/holiman/uint256 v1.1.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= +github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= +github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= +github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= -github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= +github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= +github.com/influxdata/influxdb-client-go/v2 v2.4.0/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= +github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= +github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= +github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= +github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= +github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a/go.mod h1:yL958EeXv8Ylng6IfnvG4oflryUi3vgA3xPs9hmII1s= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= -github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= +github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= +github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= +github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lucasjones/reggen v0.0.0-20180717132126-cdb49ff09d77 h1:6xiz3+ZczT3M4+I+JLpcPGG1bQKm8067HktB17EDWEE= github.com/lucasjones/reggen v0.0.0-20180717132126-cdb49ff09d77/go.mod h1:5ELEyG+X8f+meRWHuqUOewBOhvHkl7M76pdGEansxW4= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.7 h1:bQGKb3vps/j0E9GfJQ03JyhRuxsvdAanXlT9BTw3mdw= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= +github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= +github.com/neilotoole/errgroup v0.1.6 h1:PODGqPXdT5BC/zCYIMoTrwV+ujKcW+gBXM6Ye9Ve3R8= +github.com/neilotoole/errgroup v0.1.6/go.mod h1:Q2nLGf+594h0CLBs/Mbg6qOr7GtqDK7C2S41udRnToE= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c h1:1RHs3tNxjXGHeul8z2t6H2N2TlAqpKe5yryJztRx4Jk= -github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= +github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= +github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/segmentio/golines v0.0.0-20200808004416-0a9796b248e8/go.mod h1:bQSh5qdVR67XiCKbaVvYO41s50c5hQo+3cY/1CQQ3xQ= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtrmhM= +github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= +github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shirou/gopsutil v2.20.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= -github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= -github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= -github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= -github.com/tidwall/gjson v1.6.1 h1:LRbvNuNuvAiISWg6gxLEFuCe72UKy5hDqhxW/8183ws= -github.com/tidwall/gjson v1.6.1/go.mod h1:BaHyNc5bjzYkPqgLq7mdVzeiRtULKULXLgZFKsxEHI0= -github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= -github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.0.1/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.0.2 h1:Z7S3cePv9Jwm1KwS0513MRaoUe3S01WPbLNV40pwWZU= -github.com/tidwall/pretty v1.0.2/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/sjson v1.1.1 h1:7h1vk049Jnd5EH9NyzNiEuwYW4b5qgreBbqRC19AS3U= -github.com/tidwall/sjson v1.1.1/go.mod h1:yvVuSnpEQv5cYIrO+AT6kw4QVfd5SDZoGIS7/5+fZFs= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/supranational/blst v0.3.8-0.20220526154634-513d2456b344/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.14.1 h1:iymTbGkQBhveq21bEvAQ81I0LEBork8BFe1CUZXdyuo= +github.com/tidwall/gjson v1.14.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.4 h1:cuiLzLnaMeBhRmEv00Lpk3tkYrcxpmbU81tAY4Dw0tc= +github.com/tidwall/sjson v1.2.4/go.mod h1:098SZ494YoMWPmMO6ct4dcFnqxwj9r/gF0Etp19pSNM= +github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= +github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/tyler-smith/go-bip39 v1.0.2/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vmihailenco/msgpack/v4 v4.3.11/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= -github.com/vmihailenco/msgpack/v5 v5.0.0-beta.1 h1:d71/KA0LhvkrJ/Ok+Wx9qK7bU8meKA1Hk0jpVI5kJjk= -github.com/vmihailenco/msgpack/v5 v5.0.0-beta.1/go.mod h1:xlngVLeyQ/Qi05oQxhQ+oTuqa03RjMwMfk/7/TCs+QI= -github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= -github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees= -github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= +github.com/urfave/cli/v2 v2.10.2/go.mod h1:f8iq5LtQ/bLxafbdBSLPPNsgaW0l/2fYYEHhAyPlwvo= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= +github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= +github.com/ybbus/jsonrpc v2.1.2+incompatible/go.mod h1:XJrh1eMSzdIYFbM08flv0wp5G35eRniyeGut1z+LSiE= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= +go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc h1:zK/HqS5bZxDptfPJNq8v7vJfXtkU7r9TLIoSr1bXaP4= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d h1:4SFsTMi4UahlKoloni7L4eYzhFRifURQLw+yv0QDCx8= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -347,68 +573,170 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191024172528-b4ff53e7a1cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200817155316-9781c653f443 h1:X18bCaipMcoJGm27Nv7zr4XYPKGUy92GtqboKC2Hxaw= -golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181127232545-e782529d0ddd/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191024220359-3d91e92cde03/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191126055441-b0650ceb63d9/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= -gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/src-d/go-billy.v4 v4.3.0/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -src.techknowlogick.com/xgo v1.1.1-0.20200814033943-12cf2e8194ca/go.mod h1:31CE1YKtDOrKTk9PSnjTpe6YbO6W/0LTYZ1VskL09oU= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= diff --git a/main.go b/main.go index a68c3eea..1d25c276 100644 --- a/main.go +++ b/main.go @@ -15,13 +15,17 @@ package main import ( - "log" + "os" "github.com/coinbase/rosetta-cli/cmd" + + "github.com/fatih/color" ) func main() { - if err := cmd.Execute(); err != nil { - log.Fatal(err) + err := cmd.Execute() + if err != nil { + color.Red("Command Failed: %s", err.Error()) + os.Exit(1) } } diff --git a/mocks/constructor/handler.go b/mocks/constructor/handler.go deleted file mode 100644 index 4afe30ef..00000000 --- a/mocks/constructor/handler.go +++ /dev/null @@ -1,43 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package constructor - -import ( - context "context" - - types "github.com/coinbase/rosetta-sdk-go/types" - mock "github.com/stretchr/testify/mock" -) - -// Handler is an autogenerated mock type for the Handler type -type Handler struct { - mock.Mock -} - -// AddressCreated provides a mock function with given fields: _a0, _a1 -func (_m *Handler) AddressCreated(_a0 context.Context, _a1 string) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// TransactionCreated provides a mock function with given fields: _a0, _a1, _a2 -func (_m *Handler) TransactionCreated(_a0 context.Context, _a1 string, _a2 *types.TransactionIdentifier) error { - ret := _m.Called(_a0, _a1, _a2) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string, *types.TransactionIdentifier) error); ok { - r0 = rf(_a0, _a1, _a2) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/mocks/constructor/helper.go b/mocks/constructor/helper.go deleted file mode 100644 index be7a65cb..00000000 --- a/mocks/constructor/helper.go +++ /dev/null @@ -1,441 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package constructor - -import ( - big "math/big" - - context "context" - - keys "github.com/coinbase/rosetta-sdk-go/keys" - - mock "github.com/stretchr/testify/mock" - - storage "github.com/coinbase/rosetta-sdk-go/storage" - - types "github.com/coinbase/rosetta-sdk-go/types" -) - -// Helper is an autogenerated mock type for the Helper type -type Helper struct { - mock.Mock -} - -// AccountBalance provides a mock function with given fields: _a0, _a1, _a2 -func (_m *Helper) AccountBalance(_a0 context.Context, _a1 *types.AccountIdentifier, _a2 *types.Currency) (*big.Int, error) { - ret := _m.Called(_a0, _a1, _a2) - - var r0 *big.Int - if rf, ok := ret.Get(0).(func(context.Context, *types.AccountIdentifier, *types.Currency) *big.Int); ok { - r0 = rf(_a0, _a1, _a2) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *types.AccountIdentifier, *types.Currency) error); ok { - r1 = rf(_a0, _a1, _a2) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AllAddresses provides a mock function with given fields: ctx -func (_m *Helper) AllAddresses(ctx context.Context) ([]string, error) { - ret := _m.Called(ctx) - - var r0 []string - if rf, ok := ret.Get(0).(func(context.Context) []string); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]string) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AllBroadcasts provides a mock function with given fields: ctx -func (_m *Helper) AllBroadcasts(ctx context.Context) ([]*storage.Broadcast, error) { - ret := _m.Called(ctx) - - var r0 []*storage.Broadcast - if rf, ok := ret.Get(0).(func(context.Context) []*storage.Broadcast); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*storage.Broadcast) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Broadcast provides a mock function with given fields: _a0, _a1, _a2, _a3, _a4 -func (_m *Helper) Broadcast(_a0 context.Context, _a1 string, _a2 []*types.Operation, _a3 *types.TransactionIdentifier, _a4 string) error { - ret := _m.Called(_a0, _a1, _a2, _a3, _a4) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string, []*types.Operation, *types.TransactionIdentifier, string) error); ok { - r0 = rf(_a0, _a1, _a2, _a3, _a4) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// ClearBroadcasts provides a mock function with given fields: ctx -func (_m *Helper) ClearBroadcasts(ctx context.Context) ([]*storage.Broadcast, error) { - ret := _m.Called(ctx) - - var r0 []*storage.Broadcast - if rf, ok := ret.Get(0).(func(context.Context) []*storage.Broadcast); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*storage.Broadcast) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CoinBalance provides a mock function with given fields: _a0, _a1, _a2 -func (_m *Helper) CoinBalance(_a0 context.Context, _a1 *types.AccountIdentifier, _a2 *types.Currency) (*big.Int, *types.CoinIdentifier, error) { - ret := _m.Called(_a0, _a1, _a2) - - var r0 *big.Int - if rf, ok := ret.Get(0).(func(context.Context, *types.AccountIdentifier, *types.Currency) *big.Int); ok { - r0 = rf(_a0, _a1, _a2) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - var r1 *types.CoinIdentifier - if rf, ok := ret.Get(1).(func(context.Context, *types.AccountIdentifier, *types.Currency) *types.CoinIdentifier); ok { - r1 = rf(_a0, _a1, _a2) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*types.CoinIdentifier) - } - } - - var r2 error - if rf, ok := ret.Get(2).(func(context.Context, *types.AccountIdentifier, *types.Currency) error); ok { - r2 = rf(_a0, _a1, _a2) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// Combine provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *Helper) Combine(_a0 context.Context, _a1 *types.NetworkIdentifier, _a2 string, _a3 []*types.Signature) (string, error) { - ret := _m.Called(_a0, _a1, _a2, _a3) - - var r0 string - if rf, ok := ret.Get(0).(func(context.Context, *types.NetworkIdentifier, string, []*types.Signature) string); ok { - r0 = rf(_a0, _a1, _a2, _a3) - } else { - r0 = ret.Get(0).(string) - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *types.NetworkIdentifier, string, []*types.Signature) error); ok { - r1 = rf(_a0, _a1, _a2, _a3) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Derive provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *Helper) Derive(_a0 context.Context, _a1 *types.NetworkIdentifier, _a2 *types.PublicKey, _a3 map[string]interface{}) (string, map[string]interface{}, error) { - ret := _m.Called(_a0, _a1, _a2, _a3) - - var r0 string - if rf, ok := ret.Get(0).(func(context.Context, *types.NetworkIdentifier, *types.PublicKey, map[string]interface{}) string); ok { - r0 = rf(_a0, _a1, _a2, _a3) - } else { - r0 = ret.Get(0).(string) - } - - var r1 map[string]interface{} - if rf, ok := ret.Get(1).(func(context.Context, *types.NetworkIdentifier, *types.PublicKey, map[string]interface{}) map[string]interface{}); ok { - r1 = rf(_a0, _a1, _a2, _a3) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(map[string]interface{}) - } - } - - var r2 error - if rf, ok := ret.Get(2).(func(context.Context, *types.NetworkIdentifier, *types.PublicKey, map[string]interface{}) error); ok { - r2 = rf(_a0, _a1, _a2, _a3) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// Hash provides a mock function with given fields: _a0, _a1, _a2 -func (_m *Helper) Hash(_a0 context.Context, _a1 *types.NetworkIdentifier, _a2 string) (*types.TransactionIdentifier, error) { - ret := _m.Called(_a0, _a1, _a2) - - var r0 *types.TransactionIdentifier - if rf, ok := ret.Get(0).(func(context.Context, *types.NetworkIdentifier, string) *types.TransactionIdentifier); ok { - r0 = rf(_a0, _a1, _a2) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.TransactionIdentifier) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *types.NetworkIdentifier, string) error); ok { - r1 = rf(_a0, _a1, _a2) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// HeadBlockExists provides a mock function with given fields: _a0 -func (_m *Helper) HeadBlockExists(_a0 context.Context) bool { - ret := _m.Called(_a0) - - var r0 bool - if rf, ok := ret.Get(0).(func(context.Context) bool); ok { - r0 = rf(_a0) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// LockedAddresses provides a mock function with given fields: _a0 -func (_m *Helper) LockedAddresses(_a0 context.Context) ([]string, error) { - ret := _m.Called(_a0) - - var r0 []string - if rf, ok := ret.Get(0).(func(context.Context) []string); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]string) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Metadata provides a mock function with given fields: _a0, _a1, _a2 -func (_m *Helper) Metadata(_a0 context.Context, _a1 *types.NetworkIdentifier, _a2 map[string]interface{}) (map[string]interface{}, error) { - ret := _m.Called(_a0, _a1, _a2) - - var r0 map[string]interface{} - if rf, ok := ret.Get(0).(func(context.Context, *types.NetworkIdentifier, map[string]interface{}) map[string]interface{}); ok { - r0 = rf(_a0, _a1, _a2) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]interface{}) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *types.NetworkIdentifier, map[string]interface{}) error); ok { - r1 = rf(_a0, _a1, _a2) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Parse provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *Helper) Parse(_a0 context.Context, _a1 *types.NetworkIdentifier, _a2 bool, _a3 string) ([]*types.Operation, []string, map[string]interface{}, error) { - ret := _m.Called(_a0, _a1, _a2, _a3) - - var r0 []*types.Operation - if rf, ok := ret.Get(0).(func(context.Context, *types.NetworkIdentifier, bool, string) []*types.Operation); ok { - r0 = rf(_a0, _a1, _a2, _a3) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*types.Operation) - } - } - - var r1 []string - if rf, ok := ret.Get(1).(func(context.Context, *types.NetworkIdentifier, bool, string) []string); ok { - r1 = rf(_a0, _a1, _a2, _a3) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).([]string) - } - } - - var r2 map[string]interface{} - if rf, ok := ret.Get(2).(func(context.Context, *types.NetworkIdentifier, bool, string) map[string]interface{}); ok { - r2 = rf(_a0, _a1, _a2, _a3) - } else { - if ret.Get(2) != nil { - r2 = ret.Get(2).(map[string]interface{}) - } - } - - var r3 error - if rf, ok := ret.Get(3).(func(context.Context, *types.NetworkIdentifier, bool, string) error); ok { - r3 = rf(_a0, _a1, _a2, _a3) - } else { - r3 = ret.Error(3) - } - - return r0, r1, r2, r3 -} - -// Payloads provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *Helper) Payloads(_a0 context.Context, _a1 *types.NetworkIdentifier, _a2 []*types.Operation, _a3 map[string]interface{}) (string, []*types.SigningPayload, error) { - ret := _m.Called(_a0, _a1, _a2, _a3) - - var r0 string - if rf, ok := ret.Get(0).(func(context.Context, *types.NetworkIdentifier, []*types.Operation, map[string]interface{}) string); ok { - r0 = rf(_a0, _a1, _a2, _a3) - } else { - r0 = ret.Get(0).(string) - } - - var r1 []*types.SigningPayload - if rf, ok := ret.Get(1).(func(context.Context, *types.NetworkIdentifier, []*types.Operation, map[string]interface{}) []*types.SigningPayload); ok { - r1 = rf(_a0, _a1, _a2, _a3) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).([]*types.SigningPayload) - } - } - - var r2 error - if rf, ok := ret.Get(2).(func(context.Context, *types.NetworkIdentifier, []*types.Operation, map[string]interface{}) error); ok { - r2 = rf(_a0, _a1, _a2, _a3) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// Preprocess provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *Helper) Preprocess(_a0 context.Context, _a1 *types.NetworkIdentifier, _a2 []*types.Operation, _a3 map[string]interface{}) (map[string]interface{}, error) { - ret := _m.Called(_a0, _a1, _a2, _a3) - - var r0 map[string]interface{} - if rf, ok := ret.Get(0).(func(context.Context, *types.NetworkIdentifier, []*types.Operation, map[string]interface{}) map[string]interface{}); ok { - r0 = rf(_a0, _a1, _a2, _a3) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]interface{}) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *types.NetworkIdentifier, []*types.Operation, map[string]interface{}) error); ok { - r1 = rf(_a0, _a1, _a2, _a3) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RandomAmount provides a mock function with given fields: _a0, _a1 -func (_m *Helper) RandomAmount(_a0 *big.Int, _a1 *big.Int) *big.Int { - ret := _m.Called(_a0, _a1) - - var r0 *big.Int - if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) *big.Int); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - return r0 -} - -// Sign provides a mock function with given fields: _a0, _a1 -func (_m *Helper) Sign(_a0 context.Context, _a1 []*types.SigningPayload) ([]*types.Signature, error) { - ret := _m.Called(_a0, _a1) - - var r0 []*types.Signature - if rf, ok := ret.Get(0).(func(context.Context, []*types.SigningPayload) []*types.Signature); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*types.Signature) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, []*types.SigningPayload) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// StoreKey provides a mock function with given fields: _a0, _a1, _a2 -func (_m *Helper) StoreKey(_a0 context.Context, _a1 string, _a2 *keys.KeyPair) error { - ret := _m.Called(_a0, _a1, _a2) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string, *keys.KeyPair) error); ok { - r0 = rf(_a0, _a1, _a2) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/pkg/errors/errors.go b/pkg/errors/errors.go new file mode 100644 index 00000000..da824fa0 --- /dev/null +++ b/pkg/errors/errors.go @@ -0,0 +1,76 @@ +// Copyright 2020 Coinbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "errors" +) + +var ( + // Configuration errors + ErrMultipleDSLFiles = errors.New("multiple DSL files are found") + ErrNoDSLFile = errors.New("no DSL file") + ErrWrongWorkflowConcurrency = errors.New("reserved workflow concurrency doesn't match") + ErrNegativeStartIndex = errors.New("start index is negative") + ErrNegativeEndIndex = errors.New("end index is negative") + ErrNegativeReconciliationCoverageIndex = errors.New("reconciliation coverage index is negative") + ErrNegativeReconciliationCoverageAccountCount = errors.New("reconciliation coverage account is negative") + ErrNegativeSeenBlockWorkers = errors.New("the number of seen block workers is negative") + ErrNegativeSerialBlockWorkers = errors.New("the number of serial block workers is negative") + ErrReconciliationOutOfRange = errors.New("reconciliation is out of range, it must be in the range [0, 1]") + ErrTableSizeIsOutOfRange = errors.New("table size is out of range, it must be in the range [1, 100]") + ErrValueLogFileSizeIsOutOfRange = errors.New("value log file size is out of range, it must be in the range [128, 2048]") + ErrBalanceTrackingIsDisabledForReconciliation = errors.New("balance tracking cannot be disabled for reconciliation") + ErrBalanceTrackingIsDisabledForReconciliationCoverageEndCondition = errors.New("balance tracking cannot be disabled for reconciliation coverage end condition") + ErrReconciliationErrorIsIgnoredForReconciliationCoverageEndCondition = errors.New("reconciliation error cannot be ignored for reconciliation coverage end condition") + ErrReconciliationIsDisabledForReconciliationCoverageEndCondition = errors.New("reconciliation cannot be disabled for reconciliation coverage end condition") + ErrConstructionConfigMissing = errors.New("construction configuration is missing") + + // Data check errors + ErrDataCheckHalt = errors.New("data check halted") + ErrReconciliationFailure = errors.New("reconciliation failure") + ErrInitDataTester = errors.New("unexpected error occurred while trying to initialize data tester") + ErrReconcilerDrainHalt = errors.New("reconciler queue drain halted") + ErrMissingOps = errors.New("search for block with missing ops halted") + ErrUnableToFindMissingOps = errors.New("unable to find missing ops") + + // Spec check errors + ErrErrorEmptyMessage = errors.New("error object can't have empty message") + ErrErrorNegativeCode = errors.New("error object can't have negative code") + ErrAccountNullPointer = errors.New("account is nil") + ErrBlockNotIdempotent = errors.New("multiple calls with the same hash don't return the same block") + ErrBlockTip = errors.New("unspecified block_identifier doesn't give the tip block") + + // Construction check errors + ErrConstructionCheckHalt = errors.New("construction check halted") + ErrBalanceExemptionsWithInitialBalanceFetchDisabled = errors.New("found balance exemptions but initial balance fetch disabled") + + // Command errors + ErrBlockNotFound = errors.New("block not found") + ErrNoAvailableNetwork = errors.New("no networks available") + ErrNetworkOptionsAllowlistIsNil = errors.New("network options allowlist is nil") + ErrAsserterConfigurationIsNil = errors.New("asserter configuration is nil") + ErrTimestampStartIndexMismatch = errors.New("timestamp start index mismatch") + ErrOperationTypeLengthMismatch = errors.New("operation type length mismatch") + ErrOperationTypeMismatch = errors.New("operation type mismatch") + ErrOperationStatusLengthMismatch = errors.New("operation status length mismatch") + ErrOperationStatusMismatch = errors.New("operation status mismatch") + ErrErrorLengthMismatch = errors.New("error length mismatch") + ErrErrorMismatch = errors.New("error mismatch") + ErrAsserterConfigError = errors.New("asserter configuration validation failed") + ErrNoHeadBlock = errors.New("no head block") + ErrBlockBenchmarkTimeout = errors.New("/block endpoint benchmarking timed out") + ErrAccountBalanceBenchmarkTimeout = errors.New("/account/balance endpoint benchmarking timed out") +) diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 053f536f..f18d34fe 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -18,14 +18,18 @@ import ( "context" "fmt" "log" - "math/big" "os" "path" + "strings" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "github.com/coinbase/rosetta-cli/pkg/results" "github.com/coinbase/rosetta-sdk-go/parser" "github.com/coinbase/rosetta-sdk-go/reconciler" "github.com/coinbase/rosetta-sdk-go/statefulsyncer" - "github.com/coinbase/rosetta-sdk-go/storage" "github.com/coinbase/rosetta-sdk-go/types" "github.com/coinbase/rosetta-sdk-go/utils" "github.com/fatih/color" @@ -33,7 +37,15 @@ import ( var _ statefulsyncer.Logger = (*Logger)(nil) +type CheckType string + +type contextKey int + const ( + RequestUUID contextKey = iota + + MetadataMapKey contextKey = iota + // blockStreamFile contains the stream of processed // blocks and whether they were added or removed. blockStreamFile = "blocks.txt" @@ -58,6 +70,11 @@ const ( // removeEvent is printed in a stream // when an event is orphaned. removeEvent = "Remove" + + // Construction identifies construction check + Construction CheckType = "construction" + // Data identifies data check + Data CheckType = "data" ) // Logger contains all logic to record validator output @@ -68,148 +85,155 @@ type Logger struct { logTransactions bool logBalanceChanges bool logReconciliation bool + logMetadataMap map[string]string - lastStatsMessage string + lastStatsMessage string + lastProgressMessage string - CounterStorage *storage.CounterStorage - BalanceStorage *storage.BalanceStorage + zapLogger *zap.Logger } // NewLogger constructs a new Logger. func NewLogger( - counterStorage *storage.CounterStorage, - balanceStorage *storage.BalanceStorage, logDir string, logBlocks bool, logTransactions bool, logBalanceChanges bool, logReconciliation bool, -) *Logger { + checkType CheckType, + network *types.NetworkIdentifier, + logMetadataMap map[string]string, + fields ...zap.Field, +) (*Logger, error) { + zapLogger, err := buildZapLogger(checkType, network, fields...) + if err != nil { + return nil, fmt.Errorf("failed to build zap logger: %w", err) + } return &Logger{ - CounterStorage: counterStorage, - BalanceStorage: balanceStorage, logDir: logDir, logBlocks: logBlocks, logTransactions: logTransactions, logBalanceChanges: logBalanceChanges, logReconciliation: logReconciliation, - } + logMetadataMap: logMetadataMap, + zapLogger: zapLogger, + }, nil } -// LogDataStats logs all data values in CounterStorage. -func (l *Logger) LogDataStats(ctx context.Context) error { - blocks, err := l.CounterStorage.Get(ctx, storage.BlockCounter) - if err != nil { - return fmt.Errorf("%w cannot get block counter", err) - } +func buildZapLogger( + checkType CheckType, + network *types.NetworkIdentifier, + fields ...zap.Field, +) (*zap.Logger, error) { + config := zap.NewProductionConfig() + config.EncoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder - if blocks.Sign() == 0 { // wait for at least 1 block to be processed - return nil + baseSlice := []zap.Field{ + zap.String("blockchain", network.Blockchain), + zap.String("network", network.Network), + zap.String("check_type", string(checkType)), } + mergedSlice := append(baseSlice, fields...) - orphans, err := l.CounterStorage.Get(ctx, storage.OrphanCounter) - if err != nil { - return fmt.Errorf("%w cannot get orphan counter", err) - } - - txs, err := l.CounterStorage.Get(ctx, storage.TransactionCounter) - if err != nil { - return fmt.Errorf("%w cannot get transaction counter", err) - } - - ops, err := l.CounterStorage.Get(ctx, storage.OperationCounter) - if err != nil { - return fmt.Errorf("%w cannot get operations counter", err) - } - - activeReconciliations, err := l.CounterStorage.Get(ctx, storage.ActiveReconciliationCounter) - if err != nil { - return fmt.Errorf("%w cannot get active reconciliations counter", err) - } + zapLogger, err := config.Build( + zap.Fields(mergedSlice...), + ) + return zapLogger, err +} - inactiveReconciliations, err := l.CounterStorage.Get(ctx, storage.InactiveReconciliationCounter) - if err != nil { - return fmt.Errorf("%w cannot get inactive reconciliations counter", err) +// LogDataStatus logs results.CheckDataStatus. +func (l *Logger) LogDataStatus(ctx context.Context, status *results.CheckDataStatus) { + if status.Stats.Blocks == 0 { // wait for at least 1 block to be processed + return } statsMessage := fmt.Sprintf( - "[STATS] Blocks: %s (Orphaned: %s) Transactions: %s Operations: %s", - blocks.String(), - orphans.String(), - txs.String(), - ops.String(), + "[STATS] Blocks: %d (Orphaned: %d) Transactions: %d Operations: %d Accounts: %d Reconciliations: %d (Inactive: %d, Exempt: %d, Skipped: %d, Coverage: %f%%)", // nolint:lll + status.Stats.Blocks, + status.Stats.Orphans, + status.Stats.Transactions, + status.Stats.Operations, + status.Stats.Accounts, + status.Stats.ActiveReconciliations+status.Stats.InactiveReconciliations, + status.Stats.InactiveReconciliations, + status.Stats.ExemptReconciliations, + status.Stats.SkippedReconciliations, + status.Stats.ReconciliationCoverage*utils.OneHundred, ) - if l.BalanceStorage != nil { - coverage, err := l.BalanceStorage.ReconciliationCoverage(ctx, 0) - if err != nil { - return fmt.Errorf("%w: cannot get reconcile coverage", err) - } - - statsMessage = fmt.Sprintf( - "%s Reconciliations: %s (Inactive: %s, Coverage: %f%%)", - statsMessage, - new(big.Int).Add(activeReconciliations, inactiveReconciliations).String(), - inactiveReconciliations.String(), - coverage*utils.OneHundred, - ) - } + statsMessage = AddMetadata(statsMessage, l.logMetadataMap) // Don't print out the same stats message twice. if statsMessage == l.lastStatsMessage { - return nil + return } l.lastStatsMessage = statsMessage color.Cyan(statsMessage) - return nil -} - -// LogConstructionStats logs all construction values in CounterStorage. -func (l *Logger) LogConstructionStats(ctx context.Context, inflightTransactions int) error { - transactionsCreated, err := l.CounterStorage.Get(ctx, storage.TransactionsCreatedCounter) - if err != nil { - return fmt.Errorf("%w cannot get transactions created counter", err) + // If Progress is nil, it means we're already done. + if status.Progress == nil { + return } - transactionsConfirmed, err := l.CounterStorage.Get(ctx, storage.TransactionsConfirmedCounter) - if err != nil { - return fmt.Errorf("%w cannot get transactions confirmed counter", err) - } + progressMessage := fmt.Sprintf( + "[PROGRESS] Blocks Synced: %d/%d (Completed: %f%%, Rate: %f/second) Time Remaining: %s Reconciler Queue: %d (Last Index Checked: %d)", // nolint:lll + status.Progress.Blocks, + status.Progress.Tip, + status.Progress.Completed, + status.Progress.Rate, + status.Progress.TimeRemaining, + status.Progress.ReconcilerQueueSize, + status.Progress.ReconcilerLastIndex, + ) - staleBroadcasts, err := l.CounterStorage.Get(ctx, storage.StaleBroadcastsCounter) - if err != nil { - return fmt.Errorf("%w cannot get stale broadcasts counter", err) - } + progressMessage = AddMetadata(progressMessage, l.logMetadataMap) - failedBroadcasts, err := l.CounterStorage.Get(ctx, storage.FailedBroadcastsCounter) - if err != nil { - return fmt.Errorf("%w cannot get failed broadcasts counter", err) + // Don't print out the same progress message twice. + if progressMessage == l.lastProgressMessage { + return } - addressesCreated, err := l.CounterStorage.Get(ctx, storage.AddressesCreatedCounter) - if err != nil { - return fmt.Errorf("%w cannot get addresses created counter", err) - } + l.lastProgressMessage = progressMessage + color.Cyan(progressMessage) +} +// LogConstructionStatus logs results.CheckConstructionStatus. +func (l *Logger) LogConstructionStatus( + ctx context.Context, + status *results.CheckConstructionStatus, +) { statsMessage := fmt.Sprintf( "[STATS] Transactions Confirmed: %d (Created: %d, In Progress: %d, Stale: %d, Failed: %d) Addresses Created: %d", - transactionsConfirmed, - transactionsCreated, - inflightTransactions, - staleBroadcasts, - failedBroadcasts, - addressesCreated, + status.Stats.TransactionsConfirmed, + status.Stats.TransactionsCreated, + status.Progress.Broadcasting, + status.Stats.StaleBroadcasts, + status.Stats.FailedBroadcasts, + status.Stats.AddressesCreated, ) if statsMessage == l.lastStatsMessage { - return nil + return } + statsMessage = AddMetadata(statsMessage, l.logMetadataMap) + l.lastStatsMessage = statsMessage color.Cyan(statsMessage) +} - return nil +// LogMemoryStats logs memory usage information. +func LogMemoryStats(ctx context.Context) { + memUsage := utils.MonitorMemoryUsage(ctx, -1) + statsMessage := fmt.Sprintf( + "[MEMORY] Heap: %fMB Stack: %fMB System: %fMB GCs: %d", + memUsage.Heap, + memUsage.Stack, + memUsage.System, + memUsage.GarbageCollections, + ) + statsMessage = AddMetadataMapFromContext(ctx, statsMessage) + color.Cyan(statsMessage) } // AddBlockStream writes the next processed block to the end of the @@ -228,21 +252,25 @@ func (l *Logger) AddBlockStream( os.FileMode(utils.DefaultFilePermissions), ) if err != nil { + err = fmt.Errorf("failed to open file %s: %w", path.Join(l.logDir, blockStreamFile), err) + color.Red(err.Error()) return err } defer closeFile(f) - _, err = f.WriteString(fmt.Sprintf( - "%s Block %d:%s with Parent Block %d:%s\n", + blockString := fmt.Sprintf( + "%s Block %d:%s with Parent Block %d:%s", addEvent, block.BlockIdentifier.Index, block.BlockIdentifier.Hash, block.ParentBlockIdentifier.Index, block.ParentBlockIdentifier.Hash, - )) - if err != nil { - return err + ) + blockString = AddMetadata(blockString, l.logMetadataMap) + color.Cyan(blockString) + if _, err := f.WriteString(blockString); err != nil { + return fmt.Errorf("failed to write block string %s: %w", blockString, err) } return l.TransactionStream(ctx, block) @@ -264,18 +292,25 @@ func (l *Logger) RemoveBlockStream( os.FileMode(utils.DefaultFilePermissions), ) if err != nil { + err = fmt.Errorf("failed to open file %s: %w", path.Join(l.logDir, blockStreamFile), err) + color.Red(err.Error()) return err } defer closeFile(f) - _, err = f.WriteString(fmt.Sprintf( - "%s Block %d:%s\n", + blockString := fmt.Sprintf( + "%s Block %d:%s", removeEvent, block.Index, block.Hash, - )) + ) + blockString = AddMetadata(blockString, l.logMetadataMap) + color.Cyan(blockString) + _, err = f.WriteString(blockString) if err != nil { + err = fmt.Errorf("failed to write block string %s: %w", blockString, err) + color.Red(err.Error()) return err } @@ -298,19 +333,26 @@ func (l *Logger) TransactionStream( os.FileMode(utils.DefaultFilePermissions), ) if err != nil { + err = fmt.Errorf("failed to open file %s: %w", path.Join(l.logDir, transactionStreamFile), err) + color.Red(err.Error()) return err } defer closeFile(f) for _, tx := range block.Transactions { - _, err = f.WriteString(fmt.Sprintf( - "Transaction %s at Block %d:%s\n", + transactionString := fmt.Sprintf( + "Transaction %s at Block %d:%s", tx.TransactionIdentifier.Hash, block.BlockIdentifier.Index, block.BlockIdentifier.Hash, - )) + ) + transactionString = AddMetadata(transactionString, l.logMetadataMap) + color.Cyan(transactionString) + _, err = f.WriteString(transactionString) if err != nil { + err = fmt.Errorf("failed to write transaction string %s: %w", transactionString, err) + color.Red(err.Error()) return err } @@ -331,17 +373,22 @@ func (l *Logger) TransactionStream( networkIndex = *op.OperationIdentifier.NetworkIndex } - _, err = f.WriteString(fmt.Sprintf( - "TxOp %d(%d) %s %s %s %s %s\n", + transactionOperationString := fmt.Sprintf( + "TxOp %d(%d) %s %s %s %s %s", op.OperationIdentifier.Index, networkIndex, op.Type, participant, amount, symbol, - op.Status, - )) + *op.Status, + ) + transactionOperationString = AddMetadata(transactionOperationString, l.logMetadataMap) + color.Cyan(transactionOperationString) + _, err = f.WriteString(transactionOperationString) if err != nil { + err = fmt.Errorf("failed to write transaction operation string %s: %w", transactionOperationString, err) + color.Red(err.Error()) return err } } @@ -366,6 +413,8 @@ func (l *Logger) BalanceStream( os.FileMode(utils.DefaultFilePermissions), ) if err != nil { + err = fmt.Errorf("failed to open file %s: %w", path.Join(l.logDir, balanceStreamFile), err) + color.Red(err.Error()) return err } @@ -380,8 +429,11 @@ func (l *Logger) BalanceStream( balanceChange.Block.Index, balanceChange.Block.Hash, ) - + balanceLog = AddMetadata(balanceLog, l.logMetadataMap) + color.Cyan(balanceLog) if _, err := f.WriteString(fmt.Sprintf("%s\n", balanceLog)); err != nil { + err = fmt.Errorf("failed to write balance log %s: %w", balanceLog, err) + color.Red(err.Error()) return err } } @@ -408,28 +460,38 @@ func (l *Logger) ReconcileSuccessStream( os.FileMode(utils.DefaultFilePermissions), ) if err != nil { + err = fmt.Errorf("failed to open file %s: %w", path.Join(l.logDir, reconcileSuccessStreamFile), err) + color.Red(err.Error()) return err } defer closeFile(f) - log.Printf( - "%s Reconciled %s at %d\n", + reconciledLog := fmt.Sprintf( + "%s Reconciled %s at %d", reconciliationType, types.AccountString(account), block.Index, ) + reconciledLog = AddMetadata(reconciledLog, l.logMetadataMap) + color.Cyan(reconciledLog) - _, err = f.WriteString(fmt.Sprintf( - "Type:%s Account: %s Currency: %s Balance: %s Block: %d:%s\n", + reconciliationSuccessString := fmt.Sprintf( + "Type:%s Account: %s Currency: %s Balance: %s Block: %d:%s", reconciliationType, types.AccountString(account), types.CurrencyString(currency), balance, block.Index, block.Hash, - )) + ) + reconciliationSuccessString = AddMetadata(reconciliationSuccessString, l.logMetadataMap) + color.Cyan(reconciliationSuccessString) + + _, err = f.WriteString(reconciliationSuccessString) if err != nil { + err = fmt.Errorf("failed to write reconciliation success string %s: %w", reconciliationSuccessString, err) + color.Red(err.Error()) return err } @@ -444,27 +506,27 @@ func (l *Logger) ReconcileFailureStream( account *types.AccountIdentifier, currency *types.Currency, computedBalance string, - nodeBalance string, + liveBalance string, block *types.BlockIdentifier, ) error { // Always print out reconciliation failures if reconciliationType == reconciler.InactiveReconciliation { color.Yellow( - "Missing balance-changing operation detected for %s computed balance: %s%s node balance: %s%s", + "Missing balance-changing operation detected for %s computed: %s%s live: %s%s", types.AccountString(account), computedBalance, currency.Symbol, - nodeBalance, + liveBalance, currency.Symbol, ) } else { color.Yellow( - "Reconciliation failed for %s at %d computed: %s%s node: %s%s", + "Reconciliation failed for %s at %d computed: %s%s live: %s%s", types.AccountString(account), block.Index, computedBalance, currency.Symbol, - nodeBalance, + liveBalance, currency.Symbol, ) } @@ -479,33 +541,83 @@ func (l *Logger) ReconcileFailureStream( os.FileMode(utils.DefaultFilePermissions), ) if err != nil { + err = fmt.Errorf("failed to open file %s: %w", path.Join(l.logDir, reconcileFailureStreamFile), err) + color.Red(err.Error()) return err } defer closeFile(f) - _, err = f.WriteString(fmt.Sprintf( - "Type:%s Account: %s Currency: %s Block: %s:%d computed: %s node: %s\n", + reconciliationFailureString := fmt.Sprintf( + "Type:%s Account: %s Currency: %s Block: %s:%d computed: %s live: %s", reconciliationType, types.AccountString(account), types.CurrencyString(currency), block.Hash, block.Index, computedBalance, - nodeBalance, - )) + liveBalance, + ) + reconciliationFailureString = AddMetadata(reconciliationFailureString, l.logMetadataMap) + color.Cyan(reconciliationFailureString) + _, err = f.WriteString(reconciliationFailureString) if err != nil { + err = fmt.Errorf("failed to write reconciliation failure string %s: %w", reconciliationFailureString, err) + color.Red(err.Error()) return err } return nil } +// Info logs at Info level +func (l *Logger) Info(msg string, fields ...zap.Field) { + l.zapLogger.Info(msg, fields...) +} + +// Debug logs at Debug level +func (l *Logger) Debug(msg string, fields ...zap.Field) { + l.zapLogger.Debug(msg, fields...) +} + +// Error logs at Error level +func (l *Logger) Error(msg string, fields ...zap.Field) { + l.zapLogger.Error(msg, fields...) +} + +// Warn logs at Warn level +func (l *Logger) Warn(msg string, fields ...zap.Field) { + l.zapLogger.Warn(msg, fields...) +} + +// Panic logs at Panic level +func (l *Logger) Panic(msg string, fields ...zap.Field) { + l.zapLogger.Panic(msg, fields...) +} + +// Fatal logs at Fatal level +func (l *Logger) Fatal(msg string, fields ...zap.Field) { + l.zapLogger.Fatal(msg, fields...) +} + +// return a string of metadata +func (l *Logger) GetMetadata() string { + metadatMap := l.logMetadataMap + metadata := ConvertMapToString(metadatMap) + return metadata +} + +// return a map of metadatMap +func (l *Logger) GetMetadataMap() map[string]string { + metadatMap := l.logMetadataMap + return metadatMap +} + // Helper function to close log file func closeFile(f *os.File) { err := f.Close() if err != nil { - log.Fatal(fmt.Errorf("%w: unable to close file", err)) + log.Fatal(fmt.Errorf("unable to close file: %w", err)) } } @@ -519,3 +631,85 @@ func LogTransactionCreated( transactionIdentifier.Hash, ) } + +// Add InfoMetaData k-v pairs to the tip +func AddMetadataMapFromContext(ctx context.Context, msg string) string { + metadataMap := metadataMapFromContext(ctx) + if len(metadataMap) != 0 { + for k, v := range metadataMap { + if len(k) != 0 && len(v) != 0 { + msg = fmt.Sprintf("%s, %s: %s", msg, k, v) + } + } + } + return msg +} + +// AddMetadataMapToContext will add InfoMetaData to the context, and return the new context +func AddMetadataMapToContext(ctx context.Context, metadataMap map[string]string) context.Context { + return context.WithValue(ctx, MetadataMapKey, metadataMap) +} + +// AddMetadata k-v pairs to the tip +func AddMetadata(msg string, metadataMap map[string]string) string { + if len(metadataMap) != 0 { + for k, v := range metadataMap { + if len(k) != 0 && len(v) != 0 { + msg = fmt.Sprintf("%s, %s: %s", msg, k, v) + } + } + } + return msg +} + +// metadataMapFromContext is used to extract metadataMap from a context +func metadataMapFromContext(ctx context.Context) map[string]string { + var metadataMap map[string]string + switch v := ctx.Value(MetadataMapKey).(type) { + case map[string]string: + metadataMap = v + default: + metadataMap = nil + } + return metadataMap +} + +// ConvertStringToMap is used to convert a string to map by split , and ; +func ConvertStringToMap(metadata string) map[string]string { + metadataMap := make(map[string]string) + if len(metadata) == 0 { + return metadataMap + } + pairs := strings.Split(metadata, ",") + for _, pair := range pairs { + kv := strings.Split(pair, ":") + if len(kv) != 2 { + log := fmt.Sprintf("the %s from %s could be transfer to key value pair", pair, metadata) + color.Yellow(log) + } else { + metadataMap[strings.TrimSpace(kv[0])] = strings.TrimSpace(kv[1]) + } + } + return metadataMap +} + +// add requesrUUID to metadataMap +func AddRequestUUIDToMap(metadataMap map[string]string, requestUUID string) map[string]string { + if len(requestUUID) > 0 { + metadataMap["RequestID"] = requestUUID + } + return metadataMap +} + +// convert metadataMap to a string, aims to support fmt.Errorf +func ConvertMapToString(metadataMap map[string]string) string { + metadata := "" + if len(metadataMap) != 0 { + for k, v := range metadataMap { + if len(k) != 0 && len(v) != 0 { + metadata = fmt.Sprintf("%s, %s: %s", metadata, k, v) + } + } + } + return metadata +} diff --git a/pkg/processor/balance_storage_handler.go b/pkg/processor/balance_storage_handler.go index b7623b4d..d94f51d0 100644 --- a/pkg/processor/balance_storage_handler.go +++ b/pkg/processor/balance_storage_handler.go @@ -16,38 +16,44 @@ package processor import ( "context" + "fmt" + "math/big" "github.com/coinbase/rosetta-cli/pkg/logger" "github.com/coinbase/rosetta-sdk-go/parser" "github.com/coinbase/rosetta-sdk-go/reconciler" - "github.com/coinbase/rosetta-sdk-go/storage" + "github.com/coinbase/rosetta-sdk-go/storage/database" + "github.com/coinbase/rosetta-sdk-go/storage/modules" "github.com/coinbase/rosetta-sdk-go/types" ) -var _ storage.BalanceStorageHandler = (*BalanceStorageHandler)(nil) +var _ modules.BalanceStorageHandler = (*BalanceStorageHandler)(nil) // BalanceStorageHandler is invoked whenever a block is added // or removed from block storage so that balance changes // can be sent to other functions (ex: reconciler). type BalanceStorageHandler struct { - logger *logger.Logger - reconciler *reconciler.Reconciler + logger *logger.Logger + reconciler *reconciler.Reconciler + counterStorage *modules.CounterStorage reconcile bool - interestingAccount *reconciler.AccountCurrency + interestingAccount *types.AccountCurrency } // NewBalanceStorageHandler returns a new *BalanceStorageHandler. func NewBalanceStorageHandler( logger *logger.Logger, reconciler *reconciler.Reconciler, + counterStorage *modules.CounterStorage, reconcile bool, - interestingAccount *reconciler.AccountCurrency, + interestingAccount *types.AccountCurrency, ) *BalanceStorageHandler { return &BalanceStorageHandler{ logger: logger, reconciler: reconciler, + counterStorage: counterStorage, reconcile: reconcile, interestingAccount: interestingAccount, } @@ -73,7 +79,7 @@ func (h *BalanceStorageHandler) BlockAdded( if h.interestingAccount != nil { var interestingChange *parser.BalanceChange for _, change := range changes { - if types.Hash(&reconciler.AccountCurrency{ + if types.Hash(&types.AccountCurrency{ Account: change.Account, Currency: change.Currency, }) == types.Hash(h.interestingAccount) { @@ -106,3 +112,39 @@ func (h *BalanceStorageHandler) BlockRemoved( // not removed return nil } + +// AccountsReconciled updates the total accounts reconciled by count. +func (h *BalanceStorageHandler) AccountsReconciled( + ctx context.Context, + dbTx database.Transaction, + count int, +) error { + _, err := h.counterStorage.UpdateTransactional( + ctx, + dbTx, + modules.ReconciledAccounts, + big.NewInt(int64(count)), + ) + if err != nil { + return fmt.Errorf("failed to update the total accounts reconciled by count: %w", err) + } + return nil +} + +// AccountsSeen updates the total accounts seen by count. +func (h *BalanceStorageHandler) AccountsSeen( + ctx context.Context, + dbTx database.Transaction, + count int, +) error { + _, err := h.counterStorage.UpdateTransactional( + ctx, + dbTx, + modules.SeenAccounts, + big.NewInt(int64(count)), + ) + if err != nil { + return fmt.Errorf("failed to update the total accounts seen by count: %w", err) + } + return nil +} diff --git a/pkg/processor/balance_storage_helper.go b/pkg/processor/balance_storage_helper.go index b55cbccd..704ec6a9 100644 --- a/pkg/processor/balance_storage_helper.go +++ b/pkg/processor/balance_storage_helper.go @@ -17,27 +17,32 @@ package processor import ( "context" "fmt" + "math/big" "github.com/coinbase/rosetta-sdk-go/asserter" "github.com/coinbase/rosetta-sdk-go/fetcher" "github.com/coinbase/rosetta-sdk-go/parser" - "github.com/coinbase/rosetta-sdk-go/reconciler" - "github.com/coinbase/rosetta-sdk-go/storage" + "github.com/coinbase/rosetta-sdk-go/storage/database" + "github.com/coinbase/rosetta-sdk-go/storage/modules" + "github.com/coinbase/rosetta-sdk-go/syncer" "github.com/coinbase/rosetta-sdk-go/types" "github.com/coinbase/rosetta-sdk-go/utils" ) -var _ storage.BalanceStorageHelper = (*BalanceStorageHelper)(nil) +var _ modules.BalanceStorageHelper = (*BalanceStorageHelper)(nil) // BalanceStorageHelper implements the storage.Helper // interface. type BalanceStorageHelper struct { - network *types.NetworkIdentifier - fetcher *fetcher.Fetcher + network *types.NetworkIdentifier + fetcher *fetcher.Fetcher + counterStorage *modules.CounterStorage // Configuration settings lookupBalanceByBlock bool exemptAccounts map[string]struct{} + balanceExemptions []*types.BalanceExemption + initialFetchDisabled bool // Interesting-only Parsing interestingOnly bool @@ -48,25 +53,37 @@ type BalanceStorageHelper struct { func NewBalanceStorageHelper( network *types.NetworkIdentifier, fetcher *fetcher.Fetcher, + counterStorage *modules.CounterStorage, lookupBalanceByBlock bool, - exemptAccounts []*reconciler.AccountCurrency, + exemptAccounts []*types.AccountCurrency, interestingOnly bool, + balanceExemptions []*types.BalanceExemption, + initialFetchDisabled bool, ) *BalanceStorageHelper { exemptMap := map[string]struct{}{} // Pre-process exemptAccounts on initialization // to provide fast lookup while syncing. for _, account := range exemptAccounts { - exemptMap[types.Hash(account)] = struct{}{} + // if users do not specify Currency, we add the address + // by this, all the Currencies in this address will be skipped + if account.Currency == nil { + exemptMap[account.Account.Address] = struct{}{} + } else { + exemptMap[types.Hash(account)] = struct{}{} + } } return &BalanceStorageHelper{ network: network, fetcher: fetcher, + counterStorage: counterStorage, lookupBalanceByBlock: lookupBalanceByBlock, exemptAccounts: exemptMap, interestingAddresses: map[string]struct{}{}, interestingOnly: interestingOnly, + balanceExemptions: balanceExemptions, + initialFetchDisabled: initialFetchDisabled, } } @@ -78,9 +95,9 @@ func (h *BalanceStorageHelper) AccountBalance( ctx context.Context, account *types.AccountIdentifier, currency *types.Currency, - block *types.BlockIdentifier, + lookupBlock *types.BlockIdentifier, ) (*types.Amount, error) { - if !h.lookupBalanceByBlock { + if !h.lookupBalanceByBlock || h.initialFetchDisabled { return &types.Amount{ Value: "0", Currency: currency, @@ -90,16 +107,22 @@ func (h *BalanceStorageHelper) AccountBalance( // In the case that we are syncing from arbitrary height, // we may need to recover the balance of an account to // perform validations. - amount, _, _, err := utils.CurrencyBalance( + amount, block, err := utils.CurrencyBalance( ctx, h.network, h.fetcher, account, currency, - block, + lookupBlock.Index, ) if err != nil { - return nil, fmt.Errorf("%w: unable to get currency balance", err) + return nil, fmt.Errorf("unable to get balance of currency %s for account %s: %w", types.PrintStruct(currency), types.PrintStruct(account), err) + } + + // If the returned balance block does not match the intended + // block a re-org could've occurred. + if types.Hash(lookupBlock) != types.Hash(block) { + return nil, syncer.ErrOrphanHead } return &types.Amount{ @@ -127,8 +150,14 @@ func (h *BalanceStorageHelper) ExemptFunc() parser.ExemptOperation { return true } } + // if exemptAccounts have the Account address means all the + // currencies in this Account address need to be skipped + _, existsAddress := h.exemptAccounts[op.Account.Address] + if existsAddress { + return existsAddress + } - thisAcct := types.Hash(&reconciler.AccountCurrency{ + thisAcct := types.Hash(&types.AccountCurrency{ Account: op.Account, Currency: op.Amount.Currency, }) @@ -137,3 +166,24 @@ func (h *BalanceStorageHelper) ExemptFunc() parser.ExemptOperation { return exists } } + +// BalanceExemptions returns a list of *types.BalanceExemption. +func (h *BalanceStorageHelper) BalanceExemptions() []*types.BalanceExemption { + return h.balanceExemptions +} + +// AccountsReconciled returns the total accounts reconciled by count. +func (h *BalanceStorageHelper) AccountsReconciled( + ctx context.Context, + dbTx database.Transaction, +) (*big.Int, error) { + return h.counterStorage.GetTransactional(ctx, dbTx, modules.ReconciledAccounts) +} + +// AccountsSeen returns the total accounts seen by count. +func (h *BalanceStorageHelper) AccountsSeen( + ctx context.Context, + dbTx database.Transaction, +) (*big.Int, error) { + return h.counterStorage.GetTransactional(ctx, dbTx, modules.SeenAccounts) +} diff --git a/pkg/processor/balance_storage_helper_test.go b/pkg/processor/balance_storage_helper_test.go index f1731537..2e7cc5ee 100644 --- a/pkg/processor/balance_storage_helper_test.go +++ b/pkg/processor/balance_storage_helper_test.go @@ -17,13 +17,12 @@ package processor import ( "testing" - "github.com/coinbase/rosetta-sdk-go/reconciler" "github.com/coinbase/rosetta-sdk-go/types" "github.com/stretchr/testify/assert" ) var ( - opAmountCurrency = &reconciler.AccountCurrency{ + opAmountCurrency = &types.AccountCurrency{ Account: &types.AccountIdentifier{ Address: "hello", }, @@ -36,12 +35,12 @@ var ( func TestExemptFuncExemptAccounts(t *testing.T) { var tests = map[string]struct { - exemptAccounts []*reconciler.AccountCurrency + exemptAccounts []*types.AccountCurrency exempt bool }{ "no exempt accounts": {}, "account not exempt": { - exemptAccounts: []*reconciler.AccountCurrency{ + exemptAccounts: []*types.AccountCurrency{ { Account: &types.AccountIdentifier{ Address: "addr1", @@ -57,7 +56,7 @@ func TestExemptFuncExemptAccounts(t *testing.T) { }, }, "account is exempt": { - exemptAccounts: []*reconciler.AccountCurrency{ + exemptAccounts: []*types.AccountCurrency{ { Account: &types.AccountIdentifier{ Address: "addr1", @@ -79,11 +78,14 @@ func TestExemptFuncExemptAccounts(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { helper := NewBalanceStorageHelper( + nil, nil, nil, false, test.exemptAccounts, false, + nil, + false, ) result := helper.ExemptFunc()(&types.Operation{ @@ -120,11 +122,14 @@ func TestExemptFuncInterestingParsing(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { helper := NewBalanceStorageHelper( + nil, nil, nil, false, nil, true, + nil, + false, ) for _, addr := range test.interestingAddresses { diff --git a/pkg/processor/broadcast_storage_handler.go b/pkg/processor/broadcast_storage_handler.go index 4bfafe78..aad34491 100644 --- a/pkg/processor/broadcast_storage_handler.go +++ b/pkg/processor/broadcast_storage_handler.go @@ -18,23 +18,26 @@ import ( "context" "fmt" "math/big" + "reflect" "github.com/coinbase/rosetta-cli/configuration" "github.com/coinbase/rosetta-sdk-go/constructor/coordinator" "github.com/coinbase/rosetta-sdk-go/parser" - "github.com/coinbase/rosetta-sdk-go/storage" + "github.com/coinbase/rosetta-sdk-go/storage/database" + "github.com/coinbase/rosetta-sdk-go/storage/modules" "github.com/coinbase/rosetta-sdk-go/types" ) -var _ storage.BroadcastStorageHandler = (*BroadcastStorageHandler)(nil) +var _ modules.BroadcastStorageHandler = (*BroadcastStorageHandler)(nil) // BroadcastStorageHandler is invoked whenever a block is added // or removed from block storage so that balance changes // can be sent to other functions (ex: reconciler). type BroadcastStorageHandler struct { config *configuration.Configuration - counterStorage *storage.CounterStorage + blockStorage *modules.BlockStorage + counterStorage *modules.CounterStorage coordinator *coordinator.Coordinator parser *parser.Parser } @@ -42,12 +45,14 @@ type BroadcastStorageHandler struct { // NewBroadcastStorageHandler returns a new *BroadcastStorageHandler. func NewBroadcastStorageHandler( config *configuration.Configuration, - counterStorage *storage.CounterStorage, + blockStorage *modules.BlockStorage, + counterStorage *modules.CounterStorage, coordinator *coordinator.Coordinator, parser *parser.Parser, ) *BroadcastStorageHandler { return &BroadcastStorageHandler{ config: config, + blockStorage: blockStorage, counterStorage: counterStorage, coordinator: coordinator, parser: parser, @@ -58,20 +63,42 @@ func NewBroadcastStorageHandler( // last time at a block height < current block height - confirmationDepth. func (h *BroadcastStorageHandler) TransactionConfirmed( ctx context.Context, - dbTx storage.DatabaseTransaction, + dbTx database.Transaction, identifier string, blockIdentifier *types.BlockIdentifier, transaction *types.Transaction, intent []*types.Operation, + intentMetadata map[string]interface{}, ) error { - if err := h.parser.ExpectedOperations(intent, transaction.Operations, false, true); err != nil { - return fmt.Errorf("%w: confirmed transaction did not match intent", err) + _, _, relatedTransactions, err := h.blockStorage.FindRelatedTransactions(ctx, transaction.TransactionIdentifier, dbTx) + if err != nil { + return fmt.Errorf("failed to find related transactions %s: %w", types.PrintStruct(transaction.TransactionIdentifier), err) + } + + observed := transaction.Operations + for _, relatedTransaction := range relatedTransactions { + observed = append(observed, relatedTransaction.Operations...) + } + + if err := h.parser.ExpectedOperations(intent, observed, false, true); err != nil { + return fmt.Errorf("confirmed transaction did not match intent: %w", err) + } + + // Validate destination memo if it's needed + if intentMemo, found := intentMetadata["memo"]; found { + if observedMemo, found := transaction.Metadata["memo"]; found { + if !reflect.DeepEqual(intentMemo, observedMemo) { + return fmt.Errorf("observed destination memo did not match intent destination memo, observed destination memo: %v, intent destination memo: %v", observedMemo, intentMemo) + } + } else { + return fmt.Errorf("observed destination memo did not found, observed destination memo: %v, intent destination memo: %v", observedMemo, intentMemo) + } } _, _ = h.counterStorage.UpdateTransactional( ctx, dbTx, - storage.TransactionsConfirmedCounter, + modules.TransactionsConfirmedCounter, big.NewInt(1), ) @@ -81,7 +108,7 @@ func (h *BroadcastStorageHandler) TransactionConfirmed( identifier, transaction, ); err != nil { - return fmt.Errorf("%w: coordinator could not handle transaction", err) + return fmt.Errorf("coordinator could not handle transaction: %w", err) } return nil @@ -92,14 +119,14 @@ func (h *BroadcastStorageHandler) TransactionConfirmed( // current block height - last broadcast > staleDepth. func (h *BroadcastStorageHandler) TransactionStale( ctx context.Context, - dbTx storage.DatabaseTransaction, + dbTx database.Transaction, identifier string, transactionIdentifier *types.TransactionIdentifier, ) error { _, _ = h.counterStorage.UpdateTransactional( ctx, dbTx, - storage.StaleBroadcastsCounter, + modules.StaleBroadcastsCounter, big.NewInt(1), ) @@ -110,7 +137,7 @@ func (h *BroadcastStorageHandler) TransactionStale( // put it over the provided broadcast limit. func (h *BroadcastStorageHandler) BroadcastFailed( ctx context.Context, - dbTx storage.DatabaseTransaction, + dbTx database.Transaction, identifier string, transactionIdentifier *types.TransactionIdentifier, intent []*types.Operation, @@ -118,7 +145,7 @@ func (h *BroadcastStorageHandler) BroadcastFailed( _, _ = h.counterStorage.UpdateTransactional( ctx, dbTx, - storage.FailedBroadcastsCounter, + modules.FailedBroadcastsCounter, big.NewInt(1), ) @@ -128,7 +155,7 @@ func (h *BroadcastStorageHandler) BroadcastFailed( identifier, nil, ); err != nil { - return fmt.Errorf("%w: coordinator could not handle transaction", err) + return fmt.Errorf("coordinator could not handle transaction: %w", err) } if h.config.Construction.IgnoreBroadcastFailures { diff --git a/pkg/processor/broadcast_storage_helper.go b/pkg/processor/broadcast_storage_helper.go index f8e4f133..c5095599 100644 --- a/pkg/processor/broadcast_storage_helper.go +++ b/pkg/processor/broadcast_storage_helper.go @@ -18,26 +18,32 @@ import ( "context" "fmt" + "github.com/coinbase/rosetta-sdk-go/utils" + "github.com/coinbase/rosetta-sdk-go/fetcher" - "github.com/coinbase/rosetta-sdk-go/storage" + "github.com/coinbase/rosetta-sdk-go/storage/database" + "github.com/coinbase/rosetta-sdk-go/storage/modules" "github.com/coinbase/rosetta-sdk-go/types" ) -var _ storage.BroadcastStorageHelper = (*BroadcastStorageHelper)(nil) +var _ modules.BroadcastStorageHelper = (*BroadcastStorageHelper)(nil) // BroadcastStorageHelper implements the storage.Helper // interface. type BroadcastStorageHelper struct { - blockStorage *storage.BlockStorage + network *types.NetworkIdentifier + blockStorage *modules.BlockStorage fetcher *fetcher.Fetcher } // NewBroadcastStorageHelper returns a new BroadcastStorageHelper. func NewBroadcastStorageHelper( - blockStorage *storage.BlockStorage, + network *types.NetworkIdentifier, + blockStorage *modules.BlockStorage, fetcher *fetcher.Fetcher, ) *BroadcastStorageHelper { return &BroadcastStorageHelper{ + network: network, blockStorage: blockStorage, fetcher: fetcher, } @@ -48,9 +54,9 @@ func (h *BroadcastStorageHelper) AtTip( ctx context.Context, tipDelay int64, ) (bool, error) { - atTip, _, err := h.blockStorage.AtTip(ctx, tipDelay) + atTip, _, err := utils.CheckStorageTip(ctx, h.network, tipDelay, h.fetcher, h.blockStorage) if err != nil { - return false, fmt.Errorf("%w: unable to determine if at tip", err) + return false, fmt.Errorf("failed to check storage tip: %w", err) } return atTip, nil @@ -63,7 +69,7 @@ func (h *BroadcastStorageHelper) CurrentBlockIdentifier( ) (*types.BlockIdentifier, error) { blockIdentifier, err := h.blockStorage.GetHeadBlockIdentifier(ctx) if err != nil { - return nil, fmt.Errorf("%w: unable to get head block identifier", err) + return nil, fmt.Errorf("unable to get head block identifier: %w", err) } return blockIdentifier, nil @@ -75,11 +81,11 @@ func (h *BroadcastStorageHelper) CurrentBlockIdentifier( func (h *BroadcastStorageHelper) FindTransaction( ctx context.Context, transactionIdentifier *types.TransactionIdentifier, - txn storage.DatabaseTransaction, + txn database.Transaction, ) (*types.BlockIdentifier, *types.Transaction, error) { newestBlock, transaction, err := h.blockStorage.FindTransaction(ctx, transactionIdentifier, txn) if err != nil { - return nil, nil, fmt.Errorf("%w: unable to perform transaction search", err) + return nil, nil, fmt.Errorf("unable to perform transaction search for transaction %s: %w", types.PrintStruct(transactionIdentifier), err) } return newestBlock, transaction, nil @@ -98,7 +104,7 @@ func (h *BroadcastStorageHelper) BroadcastTransaction( networkTransaction, ) if fetchErr != nil { - return nil, fmt.Errorf("%w: unable to broadcast transaction", fetchErr.Err) + return nil, fmt.Errorf("unable to broadcast transaction %s: %w", networkTransaction, fetchErr.Err) } return transactionIdentifier, nil diff --git a/pkg/processor/coin_storage_helper.go b/pkg/processor/coin_storage_helper.go index dae4b41a..afad8c5f 100644 --- a/pkg/processor/coin_storage_helper.go +++ b/pkg/processor/coin_storage_helper.go @@ -17,20 +17,21 @@ package processor import ( "context" - "github.com/coinbase/rosetta-sdk-go/storage" + "github.com/coinbase/rosetta-sdk-go/storage/database" + "github.com/coinbase/rosetta-sdk-go/storage/modules" "github.com/coinbase/rosetta-sdk-go/types" ) -var _ storage.CoinStorageHelper = (*CoinStorageHelper)(nil) +var _ modules.CoinStorageHelper = (*CoinStorageHelper)(nil) // CoinStorageHelper implements the storage.CoinStorageHelper // interface. type CoinStorageHelper struct { - blockStorage *storage.BlockStorage + blockStorage *modules.BlockStorage } // NewCoinStorageHelper returns a new *CoinStorageHelper. -func NewCoinStorageHelper(blockStorage *storage.BlockStorage) *CoinStorageHelper { +func NewCoinStorageHelper(blockStorage *modules.BlockStorage) *CoinStorageHelper { return &CoinStorageHelper{blockStorage: blockStorage} } @@ -38,7 +39,7 @@ func NewCoinStorageHelper(blockStorage *storage.BlockStorage) *CoinStorageHelper // the context of a storage.DatabaseTransaction. func (c *CoinStorageHelper) CurrentBlockIdentifier( ctx context.Context, - transaction storage.DatabaseTransaction, + transaction database.Transaction, ) (*types.BlockIdentifier, error) { return c.blockStorage.GetHeadBlockIdentifierTransactional(ctx, transaction) } diff --git a/pkg/processor/coordinator_handler.go b/pkg/processor/coordinator_handler.go index 2201800d..e78ae765 100644 --- a/pkg/processor/coordinator_handler.go +++ b/pkg/processor/coordinator_handler.go @@ -19,7 +19,7 @@ import ( "math/big" "github.com/coinbase/rosetta-sdk-go/constructor/coordinator" - "github.com/coinbase/rosetta-sdk-go/storage" + "github.com/coinbase/rosetta-sdk-go/storage/modules" "github.com/coinbase/rosetta-sdk-go/types" ) @@ -28,13 +28,13 @@ var _ coordinator.Handler = (*CoordinatorHandler)(nil) // CoordinatorHandler is invoked by the Coordinator // when addresses are created or transactions are created. type CoordinatorHandler struct { - counterStorage *storage.CounterStorage + counterStorage *modules.CounterStorage } // NewCoordinatorHandler returns a new // *CoordinatorHandler. func NewCoordinatorHandler( - counterStorage *storage.CounterStorage, + counterStorage *modules.CounterStorage, ) *CoordinatorHandler { return &CoordinatorHandler{ counterStorage: counterStorage, @@ -50,7 +50,7 @@ func (h *CoordinatorHandler) TransactionCreated( ) error { _, _ = h.counterStorage.Update( ctx, - storage.TransactionsCreatedCounter, + modules.TransactionsCreatedCounter, big.NewInt(1), ) diff --git a/pkg/processor/coordinator_helper.go b/pkg/processor/coordinator_helper.go index 8edd7bb7..f21d91aa 100644 --- a/pkg/processor/coordinator_helper.go +++ b/pkg/processor/coordinator_helper.go @@ -17,15 +17,46 @@ package processor import ( "context" "fmt" + "log" "math/big" + cliErrs "github.com/coinbase/rosetta-cli/pkg/errors" "github.com/coinbase/rosetta-sdk-go/constructor/coordinator" "github.com/coinbase/rosetta-sdk-go/fetcher" "github.com/coinbase/rosetta-sdk-go/keys" - "github.com/coinbase/rosetta-sdk-go/storage" + "github.com/coinbase/rosetta-sdk-go/storage/database" + "github.com/coinbase/rosetta-sdk-go/storage/modules" "github.com/coinbase/rosetta-sdk-go/types" ) +const ( + request = "REQUEST" + response = "RESPONSE" + reqerror = "ERROR" + queue = "QUEUE" + + constructionDerive = "/construction/derive" + constructionPreprocess = "/construction/preprocess" + constructionMetadata = "/construction/metadata" + constructionPayloads = "/construction/payloads" + constructionParse = "/construction/parse" + constructionCombine = "/construction/combine" + constructionHash = "/construction/hash" + constructionSubmit = "/construction/submit" + + argNetwork = "network_identifier" + argMetadata = "metadata" + argError = "error" + argAccount = "account_identifier" + argIntent = "intent" + argPublicKeys = "public_keys" + argUnsignedTransaction = "unsigned_transaction" + argTransactionIdentifier = "transaction_identifier" + argNetworkTransaction = "network_transaction" + + kvPrefix = "coordinator_kv" +) + var _ coordinator.Helper = (*CoordinatorHelper)(nil) // CoordinatorHelper implements the Coordinator.Helper @@ -34,29 +65,34 @@ type CoordinatorHelper struct { offlineFetcher *fetcher.Fetcher onlineFetcher *fetcher.Fetcher - database storage.Database - blockStorage *storage.BlockStorage - keyStorage *storage.KeyStorage - balanceStorage *storage.BalanceStorage - coinStorage *storage.CoinStorage - broadcastStorage *storage.BroadcastStorage - counterStorage *storage.CounterStorage + database database.Database + blockStorage *modules.BlockStorage + keyStorage *modules.KeyStorage + balanceStorage *modules.BalanceStorage + coinStorage *modules.CoinStorage + broadcastStorage *modules.BroadcastStorage + counterStorage *modules.CounterStorage balanceStorageHelper *BalanceStorageHelper + + // quiet determines if requests/responses logging + // should be silenced. + quiet bool } // NewCoordinatorHelper returns a new *CoordinatorHelper. func NewCoordinatorHelper( offlineFetcher *fetcher.Fetcher, onlineFetcher *fetcher.Fetcher, - database storage.Database, - blockStorage *storage.BlockStorage, - keyStorage *storage.KeyStorage, - balanceStorage *storage.BalanceStorage, - coinStorage *storage.CoinStorage, - broadcastStorage *storage.BroadcastStorage, + database database.Database, + blockStorage *modules.BlockStorage, + keyStorage *modules.KeyStorage, + balanceStorage *modules.BalanceStorage, + coinStorage *modules.CoinStorage, + broadcastStorage *modules.BroadcastStorage, balanceStorageHelper *BalanceStorageHelper, - counterStorage *storage.CounterStorage, + counterStorage *modules.CounterStorage, + quiet bool, ) *CoordinatorHelper { return &CoordinatorHelper{ offlineFetcher: offlineFetcher, @@ -69,12 +105,32 @@ func NewCoordinatorHelper( broadcastStorage: broadcastStorage, counterStorage: counterStorage, balanceStorageHelper: balanceStorageHelper, + quiet: quiet, } } -// DatabaseTransaction returns a new write-ready storage.DatabaseTransaction. -func (c *CoordinatorHelper) DatabaseTransaction(ctx context.Context) storage.DatabaseTransaction { - return c.database.NewDatabaseTransaction(ctx, true) +// DatabaseTransaction returns a new write-ready database.Transaction. +func (c *CoordinatorHelper) DatabaseTransaction(ctx context.Context) database.Transaction { + return c.database.Transaction(ctx) +} + +type arg struct { + name string + val interface{} +} + +// verboseLog logs a request or response if c.verbose is true. +func (c *CoordinatorHelper) verboseLog(reqres string, endpoint string, args ...arg) { + if c.quiet { + return + } + + l := fmt.Sprintf("%s %s", reqres, endpoint) + for _, a := range args { + l = fmt.Sprintf("%s %s:%s", l, a.name, types.PrintStruct(a.val)) + } + + log.Println(l) } // Derive returns a new address for a provided publicKey. @@ -83,18 +139,28 @@ func (c *CoordinatorHelper) Derive( networkIdentifier *types.NetworkIdentifier, publicKey *types.PublicKey, metadata map[string]interface{}, -) (string, map[string]interface{}, error) { - add, metadata, fetchErr := c.offlineFetcher.ConstructionDerive( +) (*types.AccountIdentifier, map[string]interface{}, error) { + c.verboseLog(request, constructionDerive, + arg{argNetwork, networkIdentifier}, + arg{"public_key", publicKey}, + arg{argMetadata, metadata}, + ) + account, metadata, fetchErr := c.offlineFetcher.ConstructionDerive( ctx, networkIdentifier, publicKey, metadata, ) if fetchErr != nil { - return "", nil, fetchErr.Err + c.verboseLog(reqerror, constructionDerive, arg{argError, fetchErr}) + return nil, nil, fmt.Errorf("/construction/derive call is failed: %w", fetchErr.Err) } - return add, metadata, nil + c.verboseLog(response, constructionDerive, + arg{argAccount, account}, + arg{argMetadata, metadata}, + ) + return account, metadata, nil } // Preprocess calls the /construction/preprocess endpoint @@ -104,8 +170,13 @@ func (c *CoordinatorHelper) Preprocess( networkIdentifier *types.NetworkIdentifier, intent []*types.Operation, metadata map[string]interface{}, -) (map[string]interface{}, error) { - res, fetchErr := c.offlineFetcher.ConstructionPreprocess( +) (map[string]interface{}, []*types.AccountIdentifier, error) { + c.verboseLog(request, constructionPreprocess, + arg{argNetwork, networkIdentifier}, + arg{argIntent, intent}, + arg{argMetadata, metadata}, + ) + options, requiredPublicKeys, fetchErr := c.offlineFetcher.ConstructionPreprocess( ctx, networkIdentifier, intent, @@ -113,10 +184,15 @@ func (c *CoordinatorHelper) Preprocess( ) if fetchErr != nil { - return nil, fetchErr.Err + c.verboseLog(reqerror, constructionPreprocess, arg{argError, fetchErr}) + return nil, nil, fmt.Errorf("/construction/preprocess call is failed: %w", fetchErr.Err) } - return res, nil + c.verboseLog(response, constructionPreprocess, + arg{"options", options}, + arg{"required_public_keys", requiredPublicKeys}, + ) + return options, requiredPublicKeys, nil } // Metadata calls the /construction/metadata endpoint @@ -125,18 +201,30 @@ func (c *CoordinatorHelper) Metadata( ctx context.Context, networkIdentifier *types.NetworkIdentifier, metadataRequest map[string]interface{}, -) (map[string]interface{}, error) { - res, fetchErr := c.offlineFetcher.ConstructionMetadata( + publicKeys []*types.PublicKey, +) (map[string]interface{}, []*types.Amount, error) { + c.verboseLog(request, constructionMetadata, + arg{argNetwork, networkIdentifier}, + arg{argMetadata, metadataRequest}, + arg{argPublicKeys, publicKeys}, + ) + metadata, suggestedFee, fetchErr := c.onlineFetcher.ConstructionMetadata( ctx, networkIdentifier, metadataRequest, + publicKeys, ) if fetchErr != nil { - return nil, fetchErr.Err + c.verboseLog(reqerror, constructionMetadata, arg{argError, fetchErr}) + return nil, nil, fmt.Errorf("/construction/metadata call is failed: %w", fetchErr.Err) } - return res, nil + c.verboseLog(response, constructionMetadata, + arg{argMetadata, metadata}, + arg{"suggested_fee", suggestedFee}, + ) + return metadata, suggestedFee, nil } // Payloads calls the /construction/payloads endpoint @@ -146,18 +234,30 @@ func (c *CoordinatorHelper) Payloads( networkIdentifier *types.NetworkIdentifier, intent []*types.Operation, requiredMetadata map[string]interface{}, + publicKeys []*types.PublicKey, ) (string, []*types.SigningPayload, error) { + c.verboseLog(request, constructionPayloads, + arg{argNetwork, networkIdentifier}, + arg{argIntent, intent}, + arg{argPublicKeys, publicKeys}, + ) res, payloads, fetchErr := c.offlineFetcher.ConstructionPayloads( ctx, networkIdentifier, intent, requiredMetadata, + publicKeys, ) if fetchErr != nil { - return "", nil, fetchErr.Err + c.verboseLog(reqerror, constructionPayloads, arg{argError, fetchErr}) + return "", nil, fmt.Errorf("/construction/payloads call is failed: %w", fetchErr.Err) } + c.verboseLog(response, constructionPayloads, + arg{argUnsignedTransaction, res}, + arg{"payloads", payloads}, + ) return res, payloads, nil } @@ -168,7 +268,12 @@ func (c *CoordinatorHelper) Parse( networkIdentifier *types.NetworkIdentifier, signed bool, transaction string, -) ([]*types.Operation, []string, map[string]interface{}, error) { +) ([]*types.Operation, []*types.AccountIdentifier, map[string]interface{}, error) { + c.verboseLog(request, constructionParse, + arg{argNetwork, networkIdentifier}, + arg{"signed", signed}, + arg{"transaction", transaction}, + ) ops, signers, metadata, fetchErr := c.offlineFetcher.ConstructionParse( ctx, networkIdentifier, @@ -177,9 +282,15 @@ func (c *CoordinatorHelper) Parse( ) if fetchErr != nil { - return nil, nil, nil, fetchErr.Err + c.verboseLog(reqerror, constructionParse, arg{argError, fetchErr}) + return nil, nil, nil, fmt.Errorf("/construction/parse call is failed: %w", fetchErr.Err) } + c.verboseLog(response, constructionParse, + arg{"operations", ops}, + arg{"signers", signers}, + arg{argMetadata, metadata}, + ) return ops, signers, metadata, nil } @@ -191,6 +302,11 @@ func (c *CoordinatorHelper) Combine( unsignedTransaction string, signatures []*types.Signature, ) (string, error) { + c.verboseLog(request, constructionCombine, + arg{argNetwork, networkIdentifier}, + arg{argUnsignedTransaction, unsignedTransaction}, + arg{"signatures", signatures}, + ) res, fetchErr := c.offlineFetcher.ConstructionCombine( ctx, networkIdentifier, @@ -199,9 +315,11 @@ func (c *CoordinatorHelper) Combine( ) if fetchErr != nil { - return "", fetchErr.Err + c.verboseLog(reqerror, constructionCombine, arg{argError, fetchErr}) + return "", fmt.Errorf("/construction/combine call is failed: %w", fetchErr.Err) } + c.verboseLog(response, constructionCombine, arg{argNetworkTransaction, res}) return res, nil } @@ -212,6 +330,10 @@ func (c *CoordinatorHelper) Hash( networkIdentifier *types.NetworkIdentifier, networkTransaction string, ) (*types.TransactionIdentifier, error) { + c.verboseLog(request, constructionHash, + arg{argNetwork, networkIdentifier}, + arg{argNetworkTransaction, networkTransaction}, + ) res, fetchErr := c.offlineFetcher.ConstructionHash( ctx, networkIdentifier, @@ -219,9 +341,11 @@ func (c *CoordinatorHelper) Hash( ) if fetchErr != nil { - return nil, fetchErr.Err + c.verboseLog(reqerror, constructionHash, arg{argError, fetchErr}) + return nil, fmt.Errorf("/construction/hash call is failed: %w", fetchErr.Err) } + c.verboseLog(response, constructionHash, arg{argTransactionIdentifier, res}) return res, nil } @@ -234,19 +358,34 @@ func (c *CoordinatorHelper) Sign( return c.keyStorage.Sign(ctx, payloads) } +// GetKey is called to get the *types.KeyPair +// associated with an address. +func (c *CoordinatorHelper) GetKey( + ctx context.Context, + dbTx database.Transaction, + account *types.AccountIdentifier, +) (*keys.KeyPair, error) { + return c.keyStorage.GetTransactional(ctx, dbTx, account) +} + // StoreKey stores a KeyPair and address // in KeyStorage. func (c *CoordinatorHelper) StoreKey( ctx context.Context, - dbTx storage.DatabaseTransaction, - address string, + dbTx database.Transaction, + account *types.AccountIdentifier, keyPair *keys.KeyPair, ) error { // We optimisically add the interesting address although the dbTx could be reverted. - c.balanceStorageHelper.AddInterestingAddress(address) + c.balanceStorageHelper.AddInterestingAddress(account.Address) - _, _ = c.counterStorage.UpdateTransactional(ctx, dbTx, storage.AddressesCreatedCounter, big.NewInt(1)) - return c.keyStorage.StoreTransactional(ctx, address, keyPair, dbTx) + _, _ = c.counterStorage.UpdateTransactional( + ctx, + dbTx, + modules.AddressesCreatedCounter, + big.NewInt(1), + ) + return c.keyStorage.StoreTransactional(ctx, account, keyPair, dbTx) } // Balance returns the balance @@ -255,26 +394,32 @@ func (c *CoordinatorHelper) StoreKey( // 0 will be returned. func (c *CoordinatorHelper) Balance( ctx context.Context, - dbTx storage.DatabaseTransaction, + dbTx database.Transaction, accountIdentifier *types.AccountIdentifier, currency *types.Currency, ) (*types.Amount, error) { - amount, _, err := c.balanceStorage.GetBalanceTransactional( + headBlock, err := c.blockStorage.GetHeadBlockIdentifier(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get head block identifier: %w", err) + } + if headBlock == nil { + return nil, cliErrs.ErrNoHeadBlock + } + + return c.balanceStorage.GetOrSetBalanceTransactional( ctx, dbTx, accountIdentifier, currency, - nil, + headBlock, ) - - return amount, err } // Coins returns all *types.Coin owned by // an account. func (c *CoordinatorHelper) Coins( ctx context.Context, - dbTx storage.DatabaseTransaction, + dbTx database.Transaction, accountIdentifier *types.AccountIdentifier, currency *types.Currency, ) ([]*types.Coin, error) { @@ -284,7 +429,7 @@ func (c *CoordinatorHelper) Coins( accountIdentifier, ) if err != nil { - return nil, fmt.Errorf("%w: unable to get coins", err) + return nil, fmt.Errorf("unable to get coins for account %s: %w", types.PrintStruct(accountIdentifier), err) } coinsToReturn := []*types.Coin{} @@ -299,36 +444,44 @@ func (c *CoordinatorHelper) Coins( return coinsToReturn, nil } -// LockedAddresses returns a slice of all addresses currently sending or receiving +// LockedAccounts returns a slice of all accounts currently sending or receiving // funds. -func (c *CoordinatorHelper) LockedAddresses( +func (c *CoordinatorHelper) LockedAccounts( ctx context.Context, - dbTx storage.DatabaseTransaction, -) ([]string, error) { - return c.broadcastStorage.LockedAddresses(ctx, dbTx) + dbTx database.Transaction, +) ([]*types.AccountIdentifier, error) { + return c.broadcastStorage.LockedAccounts(ctx, dbTx) } // AllBroadcasts returns a slice of all in-progress broadcasts in BroadcastStorage. -func (c *CoordinatorHelper) AllBroadcasts(ctx context.Context) ([]*storage.Broadcast, error) { +func (c *CoordinatorHelper) AllBroadcasts(ctx context.Context) ([]*modules.Broadcast, error) { return c.broadcastStorage.GetAllBroadcasts(ctx) } // ClearBroadcasts deletes all pending broadcasts. -func (c *CoordinatorHelper) ClearBroadcasts(ctx context.Context) ([]*storage.Broadcast, error) { +func (c *CoordinatorHelper) ClearBroadcasts(ctx context.Context) ([]*modules.Broadcast, error) { return c.broadcastStorage.ClearBroadcasts(ctx) } // Broadcast enqueues a particular intent for broadcast. func (c *CoordinatorHelper) Broadcast( ctx context.Context, - dbTx storage.DatabaseTransaction, + dbTx database.Transaction, identifier string, network *types.NetworkIdentifier, intent []*types.Operation, transactionIdentifier *types.TransactionIdentifier, payload string, confirmationDepth int64, + transactionMetadata map[string]interface{}, ) error { + c.verboseLog(queue, constructionSubmit, + arg{argNetwork, network}, + arg{argIntent, intent}, + arg{argTransactionIdentifier, transactionIdentifier}, + arg{argNetworkTransaction, payload}, + arg{argMetadata, transactionMetadata}, + ) return c.broadcastStorage.Broadcast( ctx, dbTx, @@ -338,6 +491,7 @@ func (c *CoordinatorHelper) Broadcast( transactionIdentifier, payload, confirmationDepth, + transactionMetadata, ) } @@ -348,12 +502,12 @@ func (c *CoordinatorHelper) BroadcastAll( return c.broadcastStorage.BroadcastAll(ctx, true) } -// AllAddresses returns a slice of all known addresses. -func (c *CoordinatorHelper) AllAddresses( +// AllAccounts returns a slice of all known accounts. +func (c *CoordinatorHelper) AllAccounts( ctx context.Context, - dbTx storage.DatabaseTransaction, -) ([]string, error) { - return c.keyStorage.GetAllAddressesTransactional(ctx, dbTx) + dbTx database.Transaction, +) ([]*types.AccountIdentifier, error) { + return c.keyStorage.GetAllAccountsTransactional(ctx, dbTx) } // HeadBlockExists returns a boolean indicating if a block has been @@ -363,3 +517,30 @@ func (c *CoordinatorHelper) HeadBlockExists(ctx context.Context) bool { return headBlock != nil } + +func kvKey(key string) []byte { + return []byte(fmt.Sprintf("%s/%s", kvPrefix, key)) +} + +// SetBlob transactionally persists +// a key and value. +func (c *CoordinatorHelper) SetBlob( + ctx context.Context, + dbTx database.Transaction, + key string, + value []byte, +) error { + // We defensively don't claim the value slice + // in our buffer pool. + return dbTx.Set(ctx, kvKey(key), value, false) +} + +// GetBlob transactionally retrieves +// a key and value. +func (c *CoordinatorHelper) GetBlob( + ctx context.Context, + dbTx database.Transaction, + key string, +) (bool, []byte, error) { + return dbTx.Get(ctx, kvKey(key)) +} diff --git a/pkg/processor/reconciler_handler.go b/pkg/processor/reconciler_handler.go index 95f58ab3..b6a6154b 100644 --- a/pkg/processor/reconciler_handler.go +++ b/pkg/processor/reconciler_handler.go @@ -16,47 +16,109 @@ package processor import ( "context" - "errors" "fmt" "math/big" + "sync" + "time" "github.com/coinbase/rosetta-cli/pkg/logger" + cliErrs "github.com/coinbase/rosetta-cli/pkg/errors" "github.com/coinbase/rosetta-sdk-go/reconciler" - "github.com/coinbase/rosetta-sdk-go/storage" + "github.com/coinbase/rosetta-sdk-go/storage/modules" "github.com/coinbase/rosetta-sdk-go/types" ) +const ( + updateFrequency = 10 * time.Second +) + +var _ reconciler.Handler = (*ReconcilerHandler)(nil) + var ( - // ErrReconciliationFailure is returned if reconciliation fails. - ErrReconciliationFailure = errors.New("reconciliation failure") + countKeys = []string{ + modules.FailedReconciliationCounter, + modules.SkippedReconciliationsCounter, + modules.ExemptReconciliationCounter, + modules.ActiveReconciliationCounter, + modules.InactiveReconciliationCounter, + } ) // ReconcilerHandler implements the Reconciler.Handler interface. type ReconcilerHandler struct { logger *logger.Logger - balanceStorage *storage.BalanceStorage + counterStorage *modules.CounterStorage + balanceStorage *modules.BalanceStorage haltOnReconciliationError bool - InactiveFailure *reconciler.AccountCurrency + InactiveFailure *types.AccountCurrency InactiveFailureBlock *types.BlockIdentifier ActiveFailureBlock *types.BlockIdentifier + + counterLock sync.Mutex + counts map[string]int64 } // NewReconcilerHandler creates a new ReconcilerHandler. func NewReconcilerHandler( logger *logger.Logger, - balanceStorage *storage.BalanceStorage, + counterStorage *modules.CounterStorage, + balanceStorage *modules.BalanceStorage, haltOnReconciliationError bool, ) *ReconcilerHandler { + counts := map[string]int64{} + for _, key := range countKeys { + counts[key] = 0 + } + return &ReconcilerHandler{ logger: logger, + counterStorage: counterStorage, balanceStorage: balanceStorage, haltOnReconciliationError: haltOnReconciliationError, + counts: counts, + } +} + +// Updater periodically updates modules.with cached counts. +func (h *ReconcilerHandler) Updater(ctx context.Context) error { + tc := time.NewTicker(updateFrequency) + defer tc.Stop() + + for { + select { + case <-tc.C: + if err := h.UpdateCounts(ctx); err != nil { + return err + } + case <-ctx.Done(): + return ctx.Err() + } } } +// UpdateCounts forces cached counts to be written to modules. +func (h *ReconcilerHandler) UpdateCounts(ctx context.Context) error { + for _, key := range countKeys { + h.counterLock.Lock() + count := h.counts[key] + h.counts[key] = 0 + h.counterLock.Unlock() + + if count == 0 { + continue + } + + if _, err := h.counterStorage.Update(ctx, key, big.NewInt(count)); err != nil { + return fmt.Errorf("failed to key %s in counter storage: %w", key, err) + } + } + + return nil +} + // ReconciliationFailed is called each time a reconciliation fails. // In this Handler implementation, we halt if haltOnReconciliationError // was set to true. We also cancel the context. @@ -66,60 +128,108 @@ func (h *ReconcilerHandler) ReconciliationFailed( account *types.AccountIdentifier, currency *types.Currency, computedBalance string, - nodeBalance string, + liveBalance string, block *types.BlockIdentifier, ) error { + h.counterLock.Lock() + h.counts[modules.FailedReconciliationCounter]++ + h.counterLock.Unlock() + err := h.logger.ReconcileFailureStream( ctx, reconciliationType, account, currency, computedBalance, - nodeBalance, + liveBalance, block, ) if err != nil { - return err + return fmt.Errorf("failed to log reconciliation checks when reconciliation is failed: %w", err) } if h.haltOnReconciliationError { + // Update counts before exiting + _ = h.UpdateCounts(ctx) + if reconciliationType == reconciler.InactiveReconciliation { // Populate inactive failure information so we can try to find block with // missing ops. - h.InactiveFailure = &reconciler.AccountCurrency{ + h.InactiveFailure = &types.AccountCurrency{ Account: account, Currency: currency, } h.InactiveFailureBlock = block return fmt.Errorf( - "%w: inactive reconciliation error for %s at %d (computed: %s%s, live: %s%s)", - ErrReconciliationFailure, + "inactive reconciliation error for account address %s at block index %d (computed: %s%s, live: %s%s): %w", account.Address, block.Index, computedBalance, currency.Symbol, - nodeBalance, + liveBalance, currency.Symbol, + cliErrs.ErrReconciliationFailure, ) } // If we halt on an active reconciliation error, store in the handler. h.ActiveFailureBlock = block return fmt.Errorf( - "%w: active reconciliation error for %s at %d (computed: %s%s, live: %s%s)", - ErrReconciliationFailure, + "active reconciliation error for account address %s at block index %d (computed: %s%s, live: %s%s): %w", account.Address, block.Index, computedBalance, currency.Symbol, - nodeBalance, + liveBalance, currency.Symbol, + cliErrs.ErrReconciliationFailure, ) } return nil } +// ReconciliationExempt is called each time a reconciliation fails +// but is considered exempt because of provided []*types.BalanceExemption. +func (h *ReconcilerHandler) ReconciliationExempt( + ctx context.Context, + reconciliationType string, + account *types.AccountIdentifier, + currency *types.Currency, + computedBalance string, + liveBalance string, + block *types.BlockIdentifier, + exemption *types.BalanceExemption, +) error { + h.counterLock.Lock() + h.counts[modules.ExemptReconciliationCounter]++ + h.counterLock.Unlock() + + // Although the reconciliation was exempt (non-zero difference that was ignored), + // we still mark the account as being reconciled because the balance was in the range + // specified by exemption. + if err := h.balanceStorage.Reconciled(ctx, account, currency, block); err != nil { + return fmt.Errorf("unable to store updated reconciliation currency %s of account %s at block %s: %w", types.PrintStruct(currency), types.PrintStruct(account), types.PrintStruct(block), err) + } + + return nil +} + +// ReconciliationSkipped is called each time a reconciliation is skipped. +func (h *ReconcilerHandler) ReconciliationSkipped( + ctx context.Context, + reconciliationType string, + account *types.AccountIdentifier, + currency *types.Currency, + cause string, +) error { + h.counterLock.Lock() + h.counts[modules.SkippedReconciliationsCounter]++ + h.counterLock.Unlock() + + return nil +} + // ReconciliationSucceeded is called each time a reconciliation succeeds. func (h *ReconcilerHandler) ReconciliationSucceeded( ctx context.Context, @@ -130,18 +240,17 @@ func (h *ReconcilerHandler) ReconciliationSucceeded( block *types.BlockIdentifier, ) error { // Update counters + counter := modules.ActiveReconciliationCounter if reconciliationType == reconciler.InactiveReconciliation { - _, _ = h.logger.CounterStorage.Update( - ctx, - storage.InactiveReconciliationCounter, - big.NewInt(1), - ) - } else { - _, _ = h.logger.CounterStorage.Update(ctx, storage.ActiveReconciliationCounter, big.NewInt(1)) + counter = modules.InactiveReconciliationCounter } + h.counterLock.Lock() + h.counts[counter]++ + h.counterLock.Unlock() + if err := h.balanceStorage.Reconciled(ctx, account, currency, block); err != nil { - return fmt.Errorf("%w: unable to store updated reconciliation", err) + return fmt.Errorf("unable to store updated reconciliation currency %s of account %s at block %s: %w", types.PrintStruct(currency), types.PrintStruct(account), types.PrintStruct(block), err) } return h.logger.ReconcileSuccessStream( diff --git a/pkg/processor/reconciler_helper.go b/pkg/processor/reconciler_helper.go index a23e5630..a87fbc20 100644 --- a/pkg/processor/reconciler_helper.go +++ b/pkg/processor/reconciler_helper.go @@ -16,57 +16,88 @@ package processor import ( "context" - "errors" + "fmt" + + "github.com/coinbase/rosetta-cli/configuration" "github.com/coinbase/rosetta-sdk-go/fetcher" - "github.com/coinbase/rosetta-sdk-go/storage" + "github.com/coinbase/rosetta-sdk-go/reconciler" + "github.com/coinbase/rosetta-sdk-go/storage/database" + "github.com/coinbase/rosetta-sdk-go/storage/modules" "github.com/coinbase/rosetta-sdk-go/types" "github.com/coinbase/rosetta-sdk-go/utils" ) +var _ reconciler.Helper = (*ReconcilerHelper)(nil) + // ReconcilerHelper implements the Reconciler.Helper // interface. type ReconcilerHelper struct { + config *configuration.Configuration + network *types.NetworkIdentifier fetcher *fetcher.Fetcher - blockStorage *storage.BlockStorage - balanceStorage *storage.BalanceStorage + database database.Database + blockStorage *modules.BlockStorage + balanceStorage *modules.BalanceStorage + forceInactiveReconciliation *bool } // NewReconcilerHelper returns a new ReconcilerHelper. func NewReconcilerHelper( + config *configuration.Configuration, network *types.NetworkIdentifier, fetcher *fetcher.Fetcher, - blockStorage *storage.BlockStorage, - balanceStorage *storage.BalanceStorage, + database database.Database, + blockStorage *modules.BlockStorage, + balanceStorage *modules.BalanceStorage, + forceInactiveReconciliation *bool, ) *ReconcilerHelper { return &ReconcilerHelper{ - network: network, - fetcher: fetcher, - blockStorage: blockStorage, - balanceStorage: balanceStorage, + config: config, + network: network, + fetcher: fetcher, + database: database, + blockStorage: blockStorage, + balanceStorage: balanceStorage, + forceInactiveReconciliation: forceInactiveReconciliation, } } -// BlockExists returns a boolean indicating if block_storage -// contains a block. This is necessary to reconcile across +// DatabaseTransaction returns a new read-only database.Transaction. +func (h *ReconcilerHelper) DatabaseTransaction( + ctx context.Context, +) database.Transaction { + return h.database.ReadTransaction(ctx) +} + +// CanonicalBlock returns a boolean indicating if a block +// is in the canonical chain. This is necessary to reconcile across // reorgs. If the block returned on an account balance fetch // does not exist, reconciliation will be skipped. -func (h *ReconcilerHelper) BlockExists( +func (h *ReconcilerHelper) CanonicalBlock( ctx context.Context, + dbTx database.Transaction, block *types.BlockIdentifier, ) (bool, error) { - _, err := h.blockStorage.GetBlock(ctx, types.ConstructPartialBlockIdentifier(block)) - if err == nil { - return true, nil - } - - if errors.Is(err, storage.ErrBlockNotFound) { - return false, nil - } + return h.blockStorage.CanonicalBlockTransactional(ctx, block, dbTx) +} - return false, err +// IndexAtTip returns a boolean indicating if a block +// index is at tip (provided some acceptable +// tip delay). If the index is ahead of the head block +// and the head block is at tip, we consider the +// index at tip. +func (h *ReconcilerHelper) IndexAtTip( + ctx context.Context, + index int64, +) (bool, error) { + return h.blockStorage.IndexAtTip( + ctx, + h.config.TipDelay, + index, + ) } // CurrentBlock returns the last processed block and is used @@ -74,8 +105,9 @@ func (h *ReconcilerHelper) BlockExists( // inactive reconciliation. func (h *ReconcilerHelper) CurrentBlock( ctx context.Context, + dbTx database.Transaction, ) (*types.BlockIdentifier, error) { - return h.blockStorage.GetHeadBlockIdentifier(ctx) + return h.blockStorage.GetHeadBlockIdentifierTransactional(ctx, dbTx) } // ComputedBalance returns the balance of an account in block storage. @@ -83,11 +115,12 @@ func (h *ReconcilerHelper) CurrentBlock( // package to allow for separation from a default storage backend. func (h *ReconcilerHelper) ComputedBalance( ctx context.Context, + dbTx database.Transaction, account *types.AccountIdentifier, currency *types.Currency, - headBlock *types.BlockIdentifier, -) (*types.Amount, *types.BlockIdentifier, error) { - return h.balanceStorage.GetBalance(ctx, account, currency, headBlock) + index int64, +) (*types.Amount, error) { + return h.balanceStorage.GetBalanceTransactional(ctx, dbTx, account, currency, index) } // LiveBalance returns the live balance of an account. @@ -95,18 +128,56 @@ func (h *ReconcilerHelper) LiveBalance( ctx context.Context, account *types.AccountIdentifier, currency *types.Currency, - headBlock *types.BlockIdentifier, + index int64, ) (*types.Amount, *types.BlockIdentifier, error) { - amt, block, _, err := utils.CurrencyBalance( + amt, block, err := utils.CurrencyBalance( ctx, h.network, h.fetcher, account, currency, - headBlock, + index, ) if err != nil { - return nil, nil, err + return nil, nil, fmt.Errorf("failed to get current balance of currency %s of account %s: %w", types.PrintStruct(currency), types.PrintStruct(account), err) } return amt, block, nil } + +// PruneBalances removes all historical balance states +// <= some index. This can significantly reduce storage +// usage in scenarios where historical balances are only +// retrieved once (like reconciliation). +func (h *ReconcilerHelper) PruneBalances( + ctx context.Context, + account *types.AccountIdentifier, + currency *types.Currency, + index int64, +) error { + if h.config.Data.PruningBalanceDisabled { + return nil + } + + return h.balanceStorage.PruneBalances( + ctx, + account, + currency, + index, + ) +} + +// ForceInactiveReconciliation overrides the default +// calculation to determine if an account should be +// reconciled inactively. +func (h *ReconcilerHelper) ForceInactiveReconciliation( + ctx context.Context, + account *types.AccountIdentifier, + currency *types.Currency, + lastChecked *types.BlockIdentifier, +) bool { + if h.forceInactiveReconciliation == nil { + return false + } + + return *h.forceInactiveReconciliation +} diff --git a/pkg/tester/construction_results.go b/pkg/results/construction_results.go similarity index 67% rename from pkg/tester/construction_results.go rename to pkg/results/construction_results.go index 55ff54b0..3d968247 100644 --- a/pkg/tester/construction_results.go +++ b/pkg/results/construction_results.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package tester +package results import ( "context" @@ -21,9 +21,11 @@ import ( "os" "strconv" + pkgError "github.com/pkg/errors" + "github.com/coinbase/rosetta-cli/configuration" - "github.com/coinbase/rosetta-sdk-go/storage" + "github.com/coinbase/rosetta-sdk-go/storage/modules" "github.com/coinbase/rosetta-sdk-go/types" "github.com/coinbase/rosetta-sdk-go/utils" "github.com/fatih/color" @@ -51,8 +53,10 @@ func (c *CheckConstructionResults) Print() { } fmt.Printf("\n") - c.Stats.Print() - fmt.Printf("\n") + if c.Stats != nil { + c.Stats.Print() + fmt.Printf("\n") + } } // Output writes CheckConstructionResults to the provided @@ -61,7 +65,7 @@ func (c *CheckConstructionResults) Output(path string) { if len(path) > 0 { writeErr := utils.SerializeAndWrite(path, c) if writeErr != nil { - log.Printf("%s: unable to save results\n", writeErr.Error()) + log.Printf("unable to save results: %s\n", writeErr.Error()) } } } @@ -71,8 +75,8 @@ func (c *CheckConstructionResults) Output(path string) { func ComputeCheckConstructionResults( cfg *configuration.Configuration, err error, - counterStorage *storage.CounterStorage, - jobStorage *storage.JobStorage, + counterStorage *modules.CounterStorage, + jobStorage *modules.JobStorage, ) *CheckConstructionResults { ctx := context.Background() stats := ComputeCheckConstructionStats(ctx, cfg, counterStorage, jobStorage) @@ -81,7 +85,7 @@ func ComputeCheckConstructionResults( } if err != nil { - results.Error = err.Error() + results.Error = fmt.Sprintf("%+v", err) // We never want to populate an end condition // if there was an error! @@ -167,38 +171,38 @@ func (c *CheckConstructionStats) Print() { func ComputeCheckConstructionStats( ctx context.Context, config *configuration.Configuration, - counters *storage.CounterStorage, - jobs *storage.JobStorage, + counters *modules.CounterStorage, + jobs *modules.JobStorage, ) *CheckConstructionStats { if counters == nil || jobs == nil { return nil } - transactionsCreated, err := counters.Get(ctx, storage.TransactionsCreatedCounter) + transactionsCreated, err := counters.Get(ctx, modules.TransactionsCreatedCounter) if err != nil { log.Printf("%s cannot get transactions created counter\n", err.Error()) return nil } - transactionsConfirmed, err := counters.Get(ctx, storage.TransactionsConfirmedCounter) + transactionsConfirmed, err := counters.Get(ctx, modules.TransactionsConfirmedCounter) if err != nil { log.Printf("%s cannot get transactions confirmed counter\n", err.Error()) return nil } - staleBroadcasts, err := counters.Get(ctx, storage.StaleBroadcastsCounter) + staleBroadcasts, err := counters.Get(ctx, modules.StaleBroadcastsCounter) if err != nil { log.Printf("%s cannot get stale broadcasts counter\n", err) return nil } - failedBroadcasts, err := counters.Get(ctx, storage.FailedBroadcastsCounter) + failedBroadcasts, err := counters.Get(ctx, modules.FailedBroadcastsCounter) if err != nil { log.Printf("%s cannot get failed broadcasts counter\n", err.Error()) return nil } - addressesCreated, err := counters.Get(ctx, storage.AddressesCreatedCounter) + addressesCreated, err := counters.Get(ctx, modules.AddressesCreatedCounter) if err != nil { log.Printf("%s cannot get addresses created counter\n", err.Error()) return nil @@ -225,23 +229,94 @@ func ComputeCheckConstructionStats( } } -// ExitConstruction exits check:data, logs the test results to the console, +// CheckConstructionProgress contains the number of +// currently broadcasting transactions and processing +// jobs. +type CheckConstructionProgress struct { + Broadcasting int `json:"broadcasting"` + Processing int `json:"processing"` +} + +// ComputeCheckConstructionProgress computes +// *CheckConstructionProgress. +func ComputeCheckConstructionProgress( + ctx context.Context, + broadcasts *modules.BroadcastStorage, + jobs *modules.JobStorage, +) *CheckConstructionProgress { + inflight, err := broadcasts.GetAllBroadcasts(ctx) + if err != nil { + log.Printf("%s cannot get all broadcasts\n", err.Error()) + return nil + } + + processing, err := jobs.AllProcessing(ctx) + if err != nil { + log.Printf("%s cannot get all jobs\n", err.Error()) + return nil + } + + return &CheckConstructionProgress{ + Broadcasting: len(inflight), + Processing: len(processing), + } +} + +// CheckConstructionStatus contains CheckConstructionStats. +type CheckConstructionStatus struct { + Stats *CheckConstructionStats `json:"stats"` + Progress *CheckConstructionProgress `json:"progress"` +} + +// ComputeCheckConstructionStatus returns a populated +// *CheckConstructionStatus. +func ComputeCheckConstructionStatus( + ctx context.Context, + config *configuration.Configuration, + counters *modules.CounterStorage, + broadcasts *modules.BroadcastStorage, + jobs *modules.JobStorage, +) *CheckConstructionStatus { + return &CheckConstructionStatus{ + Stats: ComputeCheckConstructionStats(ctx, config, counters, jobs), + Progress: ComputeCheckConstructionProgress(ctx, broadcasts, jobs), + } +} + +// FetchCheckConstructionStatus fetches *CheckConstructionStatus. +func FetchCheckConstructionStatus(url string) (*CheckConstructionStatus, error) { + var status CheckConstructionStatus + if err := JSONFetch(url, &status); err != nil { + return nil, fmt.Errorf("unable to fetch check construction status: %w", err) + } + + return &status, nil +} + +// ExitConstruction exits check:construction, logs the test results to the console, // and to a provided output path. func ExitConstruction( config *configuration.Configuration, - counterStorage *storage.CounterStorage, - jobStorage *storage.JobStorage, + counterStorage *modules.CounterStorage, + jobStorage *modules.JobStorage, err error, - status int, -) { +) error { + if !config.ErrorStackTraceDisabled { + err = pkgError.WithStack(err) + } + results := ComputeCheckConstructionResults( config, err, counterStorage, jobStorage, ) - results.Print() - results.Output(config.Construction.ResultsOutputFile) + if results != nil { + results.Print() + if config.Construction != nil { + results.Output(config.Construction.ResultsOutputFile) + } + } - os.Exit(status) + return err } diff --git a/pkg/results/data_results.go b/pkg/results/data_results.go new file mode 100644 index 00000000..7ff08dfa --- /dev/null +++ b/pkg/results/data_results.go @@ -0,0 +1,729 @@ +// Copyright 2020 Coinbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package results + +import ( + "context" + "errors" + "fmt" + "log" + "math/big" + "os" + "strconv" + + pkgError "github.com/pkg/errors" + + "github.com/coinbase/rosetta-cli/configuration" + + cliErrs "github.com/coinbase/rosetta-cli/pkg/errors" + "github.com/coinbase/rosetta-sdk-go/asserter" + "github.com/coinbase/rosetta-sdk-go/fetcher" + "github.com/coinbase/rosetta-sdk-go/reconciler" + storageErrs "github.com/coinbase/rosetta-sdk-go/storage/errors" + "github.com/coinbase/rosetta-sdk-go/storage/modules" + "github.com/coinbase/rosetta-sdk-go/syncer" + "github.com/coinbase/rosetta-sdk-go/types" + "github.com/coinbase/rosetta-sdk-go/utils" + "github.com/fatih/color" + "github.com/olekukonko/tablewriter" +) + +var ( + f = false + tr = true +) + +// EndCondition contains the type of +// end condition and any detail associated +// with the stop. +type EndCondition struct { + Type configuration.CheckDataEndCondition `json:"type"` + Detail string `json:"detail"` +} + +// CheckDataResults contains any error that occurred +// on a check:data run, the outcome of certain tests, +// and a collection of interesting stats. +type CheckDataResults struct { + Error string `json:"error"` + EndCondition *EndCondition `json:"end_condition"` + Tests *CheckDataTests `json:"tests"` + Stats *CheckDataStats `json:"stats"` +} + +// Print logs CheckDataResults to the console. +func (c *CheckDataResults) Print() { + if len(c.Error) > 0 { + fmt.Printf("\n") + color.Red("Error: %s", c.Error) + } + + if c.EndCondition != nil { + fmt.Printf("\n") + color.Green("Success: %s [%s]", c.EndCondition.Type, c.EndCondition.Detail) + } + + fmt.Printf("\n") + if c.Tests != nil { + c.Tests.Print() + fmt.Printf("\n") + } + if c.Stats != nil { + c.Stats.Print() + fmt.Printf("\n") + } +} + +// Output writes *CheckDataResults to the provided +// path. +func (c *CheckDataResults) Output(path string) { + if len(path) > 0 { + writeErr := utils.SerializeAndWrite(path, c) + if writeErr != nil { + log.Printf("unable to save results: %s\n", writeErr.Error()) + } + } +} + +// CheckDataStats contains interesting stats that +// are counted while running the check:data. +type CheckDataStats struct { + Blocks int64 `json:"blocks"` + Orphans int64 `json:"orphans"` + Transactions int64 `json:"transactions"` + Operations int64 `json:"operations"` + Accounts int64 `json:"accounts"` + ActiveReconciliations int64 `json:"active_reconciliations"` + InactiveReconciliations int64 `json:"inactive_reconciliations"` + ExemptReconciliations int64 `json:"exempt_reconciliations"` + FailedReconciliations int64 `json:"failed_reconciliations"` + SkippedReconciliations int64 `json:"skipped_reconciliations"` + ReconciliationCoverage float64 `json:"reconciliation_coverage"` +} + +// Print logs CheckDataStats to the console. +func (c *CheckDataStats) Print() { + table := tablewriter.NewWriter(os.Stdout) + table.SetRowLine(true) + table.SetRowSeparator("-") + table.SetHeader([]string{"check:data Stats", "Description", "Value"}) + table.Append([]string{"Blocks", "# of blocks synced", strconv.FormatInt(c.Blocks, 10)}) + table.Append([]string{"Orphans", "# of blocks orphaned", strconv.FormatInt(c.Orphans, 10)}) + table.Append( + []string{ + "Transactions", + "# of transaction processed", + strconv.FormatInt(c.Transactions, 10), + }, + ) + table.Append( + []string{"Operations", "# of operations processed", strconv.FormatInt(c.Operations, 10)}, + ) + table.Append( + []string{"Accounts", "# of accounts seen", strconv.FormatInt(c.Accounts, 10)}, + ) + table.Append( + []string{ + "Active Reconciliations", + "# of reconciliations performed after seeing an account in a block", + strconv.FormatInt(c.ActiveReconciliations, 10), + }, + ) + table.Append( + []string{ + "Inactive Reconciliations", + "# of reconciliations performed on randomly selected accounts", + strconv.FormatInt(c.InactiveReconciliations, 10), + }, + ) + table.Append( + []string{ + "Exempt Reconciliations", + "# of reconciliation failures considered exempt", + strconv.FormatInt(c.ExemptReconciliations, 10), + }, + ) + table.Append( + []string{ + "Failed Reconciliations", + "# of reconciliation failures", + strconv.FormatInt(c.FailedReconciliations, 10), + }, + ) + table.Append( + []string{ + "Skipped Reconciliations", + "# of reconciliations skipped", + strconv.FormatInt(c.SkippedReconciliations, 10), + }, + ) + table.Append( + []string{ + "Reconciliation Coverage", + "% of accounts that have been reconciled", + fmt.Sprintf("%f%%", c.ReconciliationCoverage*utils.OneHundred), + }, + ) + + table.Render() +} + +// ComputeCheckDataStats returns a populated CheckDataStats. +func ComputeCheckDataStats( + ctx context.Context, + counters *modules.CounterStorage, + balances *modules.BalanceStorage, +) *CheckDataStats { + if counters == nil { + return nil + } + + blocks, err := counters.Get(ctx, modules.BlockCounter) + if err != nil { + log.Printf("cannot get block counter: %s", err.Error()) + return nil + } + + orphans, err := counters.Get(ctx, modules.OrphanCounter) + if err != nil { + log.Printf("cannot get orphan counter: %s", err.Error()) + return nil + } + + txs, err := counters.Get(ctx, modules.TransactionCounter) + if err != nil { + log.Printf("cannot get transaction counter: %s", err.Error()) + return nil + } + + ops, err := counters.Get(ctx, modules.OperationCounter) + if err != nil { + log.Printf("cannot get operations counter: %s", err.Error()) + return nil + } + + accounts, err := counters.Get(ctx, modules.SeenAccounts) + if err != nil { + log.Printf("cannot get accounts counter: %s", err.Error()) + return nil + } + + activeReconciliations, err := counters.Get(ctx, modules.ActiveReconciliationCounter) + if err != nil { + log.Printf("cannot get active reconciliations counter: %s", err.Error()) + return nil + } + + inactiveReconciliations, err := counters.Get(ctx, modules.InactiveReconciliationCounter) + if err != nil { + log.Printf("cannot get inactive reconciliations counter: %s", err.Error()) + return nil + } + + exemptReconciliations, err := counters.Get(ctx, modules.ExemptReconciliationCounter) + if err != nil { + log.Printf("cannot get exempt reconciliations counter: %s", err.Error()) + return nil + } + + failedReconciliations, err := counters.Get(ctx, modules.FailedReconciliationCounter) + if err != nil { + log.Printf("cannot get failed reconciliations counter: %s", err.Error()) + return nil + } + + skippedReconciliations, err := counters.Get(ctx, modules.SkippedReconciliationsCounter) + if err != nil { + log.Printf("cannot get skipped reconciliations counter: %s", err.Error()) + return nil + } + + stats := &CheckDataStats{ + Blocks: blocks.Int64(), + Orphans: orphans.Int64(), + Transactions: txs.Int64(), + Operations: ops.Int64(), + Accounts: accounts.Int64(), + ActiveReconciliations: activeReconciliations.Int64(), + InactiveReconciliations: inactiveReconciliations.Int64(), + ExemptReconciliations: exemptReconciliations.Int64(), + FailedReconciliations: failedReconciliations.Int64(), + SkippedReconciliations: skippedReconciliations.Int64(), + } + + if balances != nil { + coverage, err := balances.EstimatedReconciliationCoverage(ctx) + switch { + case err == nil: + stats.ReconciliationCoverage = coverage + case errors.Is(err, storageErrs.ErrHelperHandlerMissing): + // In this case, we use the default 0 value for the reconciliation + // coverage in stats. + case err != nil: + log.Printf("cannot get reconciliation coverage: %s", err.Error()) + return nil + } + } + + return stats +} + +// CheckDataProgress contains information +// about check:data's syncing progress. +type CheckDataProgress struct { + Blocks int64 `json:"blocks"` + Tip int64 `json:"tip"` + Completed float64 `json:"completed"` + Rate float64 `json:"rate"` + TimeRemaining string `json:"time_remaining"` + ReconcilerQueueSize int `json:"reconciler_queue_size"` + ReconcilerLastIndex int64 `json:"reconciler_last_index"` +} + +// ComputeCheckDataProgress returns +// a populated *CheckDataProgress. +func ComputeCheckDataProgress( + ctx context.Context, + fetcher *fetcher.Fetcher, + network *types.NetworkIdentifier, + counters *modules.CounterStorage, + blockStorage *modules.BlockStorage, + reconciler *reconciler.Reconciler, +) *CheckDataProgress { + networkStatus, fetchErr := fetcher.NetworkStatusRetry(ctx, network, nil) + if fetchErr != nil { + fmt.Printf("cannot get network status: %s", fetchErr.Err.Error()) + return nil + } + tipIndex := networkStatus.CurrentBlockIdentifier.Index + + genesisBlockIndex := int64(0) + if networkStatus.GenesisBlockIdentifier != nil { + genesisBlockIndex = networkStatus.GenesisBlockIdentifier.Index + } + + // Get current tip in the case that re-orgs occurred + // or a custom start index was provided. + headBlock, err := blockStorage.GetHeadBlockIdentifier(ctx) + if errors.Is(err, storageErrs.ErrHeadBlockNotFound) { + return nil + } + if err != nil { + fmt.Printf("cannot get head block: %s", err.Error()) + return nil + } + + blocks, err := counters.Get(ctx, modules.BlockCounter) + if err != nil { + fmt.Printf("cannot get block counter: %s", err.Error()) + return nil + } + + if blocks.Sign() == 0 { // wait for at least 1 block to be processed + return nil + } + + orphans, err := counters.Get(ctx, modules.OrphanCounter) + if err != nil { + fmt.Printf("cannot get orphan counter: %s", err.Error()) + return nil + } + + // adjustedBlocks is used to calculate the sync rate (regardless + // of which block we started syncing at) + adjustedBlocks := blocks.Int64() - orphans.Int64() + if tipIndex-adjustedBlocks <= 0 { // return if no blocks to sync + return nil + } + + elapsedTime, err := counters.Get(ctx, TimeElapsedCounter) + if err != nil { + fmt.Printf("cannot get elapsed time: %s", err.Error()) + return nil + } + + if elapsedTime.Sign() == 0 { // wait for at least some elapsed time + return nil + } + + blocksPerSecond := new( + big.Float, + ).Quo( + new(big.Float).SetInt64(adjustedBlocks), + new(big.Float).SetInt(elapsedTime), + ) + blocksPerSecondFloat, _ := blocksPerSecond.Float64() + + // some blockchains don't start their genesis block from 0 height + // So take the height of genesis block and calculate sync percentage based on that + blocksSynced := new( + big.Float, + ).Quo( + new(big.Float).SetInt64(headBlock.Index-genesisBlockIndex), + new(big.Float).SetInt64(tipIndex-genesisBlockIndex), + ) + blocksSyncedFloat, _ := blocksSynced.Float64() + + return &CheckDataProgress{ + Blocks: headBlock.Index, + Tip: tipIndex, + Completed: blocksSyncedFloat * utils.OneHundred, + Rate: blocksPerSecondFloat, + TimeRemaining: utils.TimeToTip( + blocksPerSecondFloat, + headBlock.Index, + tipIndex, + ).String(), + ReconcilerQueueSize: reconciler.QueueSize(), + ReconcilerLastIndex: reconciler.LastIndexReconciled(), + } +} + +// CheckDataStatus contains both CheckDataStats +// and CheckDataProgress. +type CheckDataStatus struct { + Stats *CheckDataStats `json:"stats"` + Progress *CheckDataProgress `json:"progress"` +} + +// ComputeCheckDataStatus returns a populated +// *CheckDataStatus. +func ComputeCheckDataStatus( + ctx context.Context, + blocks *modules.BlockStorage, + counters *modules.CounterStorage, + balances *modules.BalanceStorage, + fetcher *fetcher.Fetcher, + network *types.NetworkIdentifier, + reconciler *reconciler.Reconciler, +) *CheckDataStatus { + return &CheckDataStatus{ + Stats: ComputeCheckDataStats( + ctx, + counters, + balances, + ), + Progress: ComputeCheckDataProgress( + ctx, + fetcher, + network, + counters, + blocks, + reconciler, + ), + } +} + +// FetchCheckDataStatus fetches *CheckDataStatus. +func FetchCheckDataStatus(url string) (*CheckDataStatus, error) { + var status CheckDataStatus + if err := JSONFetch(url, &status); err != nil { + return nil, fmt.Errorf("unable to fetch check data status: %w", err) + } + + return &status, nil +} + +// CheckDataTests indicates which tests passed. +// If a test is nil, it did not apply to the run. +// +// TODO: add CoinTracking +type CheckDataTests struct { + RequestResponse bool `json:"request_response"` + ResponseAssertion bool `json:"response_assertion"` + BlockSyncing *bool `json:"block_syncing"` + BalanceTracking *bool `json:"balance_tracking"` + Reconciliation *bool `json:"reconciliation"` +} + +// convertBool converts a *bool +// to a test result. +func convertBool(v *bool) string { + if v == nil { + return "NOT TESTED" + } + + if *v { + return "PASSED" + } + + return "FAILED" +} + +// Print logs CheckDataTests to the console. +func (c *CheckDataTests) Print() { + table := tablewriter.NewWriter(os.Stdout) + table.SetRowLine(true) + table.SetRowSeparator("-") + table.SetHeader([]string{"check:data Tests", "Description", "Status"}) + table.Append( + []string{ + "Request/Response", + "Rosetta implementation serviced all requests", + convertBool(&c.RequestResponse), + }, + ) + table.Append( + []string{ + "Response Assertion", + "All responses are correctly formatted", + convertBool(&c.ResponseAssertion), + }, + ) + table.Append( + []string{ + "Block Syncing", + "Blocks are connected into a single canonical chain", + convertBool(c.BlockSyncing), + }, + ) + table.Append( + []string{ + "Balance Tracking", + "Account balances did not go negative", + convertBool(c.BalanceTracking), + }, + ) + table.Append( + []string{ + "Reconciliation", + "No balance discrepancies were found between computed and live balances", + convertBool(c.Reconciliation), + }, + ) + + table.Render() +} + +// RequestResponseTest returns a boolean +// indicating if all endpoints received +// a non-500 response. +func RequestResponseTest(err error) bool { + return !(fetcher.Err(err) || + errors.Is(err, utils.ErrNetworkNotSupported)) +} + +// ResponseAssertionTest returns a boolean +// indicating if all responses received from +// the server were correctly formatted. +func ResponseAssertionTest(err error) bool { + is, _ := asserter.Err(err) + return !is +} + +// BlockSyncingTest returns a boolean +// indicating if it was possible to sync +// blocks. +func BlockSyncingTest(err error, blocksSynced bool) *bool { + syncPass := true + storageFailed, _ := storageErrs.Err(err) + if syncer.Err(err) || + (storageFailed && !errors.Is(err, storageErrs.ErrNegativeBalance)) { + syncPass = false + } + + if !blocksSynced && syncPass { + return nil + } + + return &syncPass +} + +// BalanceTrackingTest returns a boolean +// indicating if any balances went negative +// while syncing. +func BalanceTrackingTest(cfg *configuration.Configuration, err error, operationsSeen bool) *bool { + balancePass := true + for _, balanceStorageErr := range storageErrs.BalanceStorageErrs { + if errors.Is(err, balanceStorageErr) { + balancePass = false + break + } + } + + if (cfg.Data.BalanceTrackingDisabled || !operationsSeen) && balancePass { + return nil + } + + return &balancePass +} + +// ReconciliationTest returns a boolean +// if no reconciliation errors were received. +func ReconciliationTest( + cfg *configuration.Configuration, + err error, + reconciliationsPerformed bool, + reconciliationsFailed bool, +) *bool { + if errors.Is(err, cliErrs.ErrReconciliationFailure) { + return &f + } + + if cfg.Data.BalanceTrackingDisabled || + cfg.Data.ReconciliationDisabled || + (!reconciliationsPerformed && !reconciliationsFailed) { + return nil + } + + if reconciliationsFailed { + return &f + } + + return &tr +} + +// ComputeCheckDataTests returns a populated CheckDataTests. +func ComputeCheckDataTests( // nolint:gocognit + ctx context.Context, + cfg *configuration.Configuration, + err error, + counterStorage *modules.CounterStorage, +) *CheckDataTests { + operationsSeen := false + reconciliationsPerformed := false + reconciliationsFailed := false + blocksSynced := false + if counterStorage != nil { + blocks, err := counterStorage.Get(ctx, modules.BlockCounter) + if err == nil && blocks.Int64() > 0 { + blocksSynced = true + } + + ops, err := counterStorage.Get(ctx, modules.OperationCounter) + if err == nil && ops.Int64() > 0 { + operationsSeen = true + } + + activeReconciliations, err := counterStorage.Get(ctx, modules.ActiveReconciliationCounter) + if err == nil && activeReconciliations.Int64() > 0 { + reconciliationsPerformed = true + } + + inactiveReconciliations, err := counterStorage.Get( + ctx, + modules.InactiveReconciliationCounter, + ) + if err == nil && inactiveReconciliations.Int64() > 0 { + reconciliationsPerformed = true + } + + exemptReconciliations, err := counterStorage.Get( + ctx, + modules.ExemptReconciliationCounter, + ) + if err == nil && exemptReconciliations.Int64() > 0 { + reconciliationsPerformed = true + } + + failedReconciliations, err := counterStorage.Get( + ctx, + modules.FailedReconciliationCounter, + ) + if err == nil && failedReconciliations.Int64() > 0 { + reconciliationsPerformed = true + reconciliationsFailed = true + } + } + + return &CheckDataTests{ + RequestResponse: RequestResponseTest(err), + ResponseAssertion: ResponseAssertionTest(err), + BlockSyncing: BlockSyncingTest(err, blocksSynced), + BalanceTracking: BalanceTrackingTest(cfg, err, operationsSeen), + Reconciliation: ReconciliationTest( + cfg, + err, + reconciliationsPerformed, + reconciliationsFailed, + ), + } +} + +// ComputeCheckDataResults returns a populated CheckDataResults. +func ComputeCheckDataResults( + cfg *configuration.Configuration, + err error, + counterStorage *modules.CounterStorage, + balanceStorage *modules.BalanceStorage, + endCondition configuration.CheckDataEndCondition, + endConditionDetail string, +) *CheckDataResults { + ctx := context.Background() + tests := ComputeCheckDataTests(ctx, cfg, err, counterStorage) + stats := ComputeCheckDataStats(ctx, counterStorage, balanceStorage) + results := &CheckDataResults{ + Tests: tests, + Stats: stats, + } + + if err != nil { + results.Error = fmt.Sprintf("%+v", err) + + // If all tests pass, but we still encountered an error, + // then we hard exit without showing check:data results + // because the error falls beyond our test coverage. + if tests.RequestResponse && + tests.ResponseAssertion && + (tests.BlockSyncing == nil || *tests.BlockSyncing) && + (tests.BalanceTracking == nil || *tests.BalanceTracking) && + (tests.Reconciliation == nil || *tests.Reconciliation) { + results.Tests = nil + } + + // We never want to populate an end condition + // if there was an error! + return results + } + + if len(endCondition) > 0 { + results.EndCondition = &EndCondition{ + Type: endCondition, + Detail: endConditionDetail, + } + } + + return results +} + +// ExitData exits check:data, logs the test results to the console, +// and to a provided output path. +func ExitData( + config *configuration.Configuration, + counterStorage *modules.CounterStorage, + balanceStorage *modules.BalanceStorage, + err error, + endCondition configuration.CheckDataEndCondition, + endConditionDetail string, +) error { + if !config.ErrorStackTraceDisabled { + err = pkgError.WithStack(err) + } + + results := ComputeCheckDataResults( + config, + err, + counterStorage, + balanceStorage, + endCondition, + endConditionDetail, + ) + if results != nil { + results.Print() + results.Output(config.Data.ResultsOutputFile) + } + + return err +} diff --git a/pkg/tester/data_results_test.go b/pkg/results/data_results_test.go similarity index 68% rename from pkg/tester/data_results_test.go rename to pkg/results/data_results_test.go index 25950f4e..b5816fc8 100644 --- a/pkg/tester/data_results_test.go +++ b/pkg/results/data_results_test.go @@ -12,30 +12,64 @@ // See the License for the specific language governing permissions and // limitations under the License. -package tester +package results import ( "context" + "errors" "fmt" "math/big" "path" "testing" "github.com/coinbase/rosetta-cli/configuration" - "github.com/coinbase/rosetta-cli/pkg/processor" + cliErrs "github.com/coinbase/rosetta-cli/pkg/errors" + "github.com/coinbase/rosetta-sdk-go/asserter" "github.com/coinbase/rosetta-sdk-go/fetcher" - "github.com/coinbase/rosetta-sdk-go/storage" + sdkMocks "github.com/coinbase/rosetta-sdk-go/mocks/storage/modules" + "github.com/coinbase/rosetta-sdk-go/parser" + "github.com/coinbase/rosetta-sdk-go/storage/database" + storageErrs "github.com/coinbase/rosetta-sdk-go/storage/errors" + "github.com/coinbase/rosetta-sdk-go/storage/modules" "github.com/coinbase/rosetta-sdk-go/syncer" "github.com/coinbase/rosetta-sdk-go/types" "github.com/coinbase/rosetta-sdk-go/utils" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" ) -var ( - tr = true - f = false -) +func baseAsserter() *asserter.Asserter { + a, _ := asserter.NewClientWithOptions( + &types.NetworkIdentifier{ + Blockchain: "bitcoin", + Network: "mainnet", + }, + &types.BlockIdentifier{ + Hash: "block 0", + Index: 0, + }, + []string{"Transfer"}, + []*types.OperationStatus{ + { + Status: "Success", + Successful: true, + }, + }, + []*types.Error{}, + nil, + &asserter.Validations{ + Enabled: false, + }, + ) + return a +} + +func exemptFunc() parser.ExemptOperation { + return func(op *types.Operation) bool { + return false + } +} func TestComputeCheckDataResults(t *testing.T) { var tests = map[string]struct { @@ -47,6 +81,7 @@ func TestComputeCheckDataResults(t *testing.T) { operationCount int64 activeReconciliations int64 inactiveReconciliations int64 + reconciliationFailures int64 // balance storage values provideBalanceStorage bool @@ -91,7 +126,7 @@ func TestComputeCheckDataResults(t *testing.T) { }, "default configuration, no storage, assertion errors": { cfg: configuration.DefaultConfiguration(), - err: []error{fetcher.ErrAssertionFailed}, + err: []error{asserter.ErrAmountValueMissing}, result: &CheckDataResults{ Tests: &CheckDataTests{ RequestResponse: true, @@ -104,8 +139,8 @@ func TestComputeCheckDataResults(t *testing.T) { err: []error{ syncer.ErrCannotRemoveGenesisBlock, syncer.ErrOutOfOrder, - storage.ErrDuplicateKey, - storage.ErrDuplicateTransactionHash, + storageErrs.ErrDuplicateKey, + storageErrs.ErrDuplicateTransactionHash, }, result: &CheckDataResults{ Tests: &CheckDataTests{ @@ -118,7 +153,7 @@ func TestComputeCheckDataResults(t *testing.T) { "default configuration, counter storage no blocks, balance errors": { cfg: configuration.DefaultConfiguration(), provideCounterStorage: true, - err: []error{storage.ErrNegativeBalance}, + err: []error{storageErrs.ErrNegativeBalance}, result: &CheckDataResults{ Tests: &CheckDataTests{ RequestResponse: true, @@ -132,7 +167,7 @@ func TestComputeCheckDataResults(t *testing.T) { cfg: configuration.DefaultConfiguration(), provideCounterStorage: true, blockCount: 100, - err: []error{storage.ErrNegativeBalance}, + err: []error{storageErrs.ErrNegativeBalance}, result: &CheckDataResults{ Tests: &CheckDataTests{ RequestResponse: true, @@ -268,7 +303,7 @@ func TestComputeCheckDataResults(t *testing.T) { }, "default configuration, no storage, balance errors": { cfg: configuration.DefaultConfiguration(), - err: []error{storage.ErrNegativeBalance}, + err: []error{storageErrs.ErrNegativeBalance}, result: &CheckDataResults{ Tests: &CheckDataTests{ RequestResponse: true, @@ -279,13 +314,55 @@ func TestComputeCheckDataResults(t *testing.T) { }, "default configuration, no storage, reconciliation errors": { cfg: configuration.DefaultConfiguration(), - err: []error{processor.ErrReconciliationFailure}, + err: []error{cliErrs.ErrReconciliationFailure}, + result: &CheckDataResults{ + Tests: &CheckDataTests{ + RequestResponse: true, + ResponseAssertion: true, + Reconciliation: &f, + }, + }, + }, + "default configuration, counter storage, reconciliation errors": { + cfg: configuration.DefaultConfiguration(), + err: []error{cliErrs.ErrReconciliationFailure}, + provideCounterStorage: true, + activeReconciliations: 10, + reconciliationFailures: 19, result: &CheckDataResults{ Tests: &CheckDataTests{ RequestResponse: true, ResponseAssertion: true, Reconciliation: &f, }, + Stats: &CheckDataStats{ + ActiveReconciliations: 10, + FailedReconciliations: 19, + }, + }, + }, + "default configuration, no storage, unknown errors": { + cfg: configuration.DefaultConfiguration(), + err: []error{errors.New("unsure how to handle this error")}, + result: &CheckDataResults{}, + }, + "default configuration, counter storage no blocks, unknown errors": { + cfg: configuration.DefaultConfiguration(), + provideCounterStorage: true, + err: []error{errors.New("unsure how to handle this error")}, + result: &CheckDataResults{ + Stats: &CheckDataStats{}, + }, + }, + "default configuration, counter storage with blocks, unknown errors": { + cfg: configuration.DefaultConfiguration(), + provideCounterStorage: true, + blockCount: 100, + err: []error{errors.New("unsure how to handle this error")}, + result: &CheckDataResults{ + Stats: &CheckDataStats{ + Blocks: 100, + }, }, }, } @@ -297,7 +374,7 @@ func TestComputeCheckDataResults(t *testing.T) { var testErr error if err != nil { testName = err.Error() - testErr = fmt.Errorf("%w: test wrapping", err) + testErr = fmt.Errorf("test wrapping: %w", err) test.result.Error = testErr.Error() } @@ -305,77 +382,81 @@ func TestComputeCheckDataResults(t *testing.T) { assert.NoError(t, err) ctx := context.Background() - localStore, err := storage.NewBadgerStorage(ctx, dir) + localStore, err := database.NewBadgerDatabase( + ctx, + dir, + database.WithIndexCacheSize(database.TinyIndexCacheSize), + ) assert.NoError(t, err) logPath := path.Join(dir, "results.json") - var counterStorage *storage.CounterStorage + var counterStorage *modules.CounterStorage if test.provideCounterStorage { - counterStorage = storage.NewCounterStorage(localStore) + counterStorage = modules.NewCounterStorage(localStore) _, err = counterStorage.Update( ctx, - storage.BlockCounter, + modules.BlockCounter, big.NewInt(test.blockCount), ) assert.NoError(t, err) _, err = counterStorage.Update( ctx, - storage.OperationCounter, + modules.OperationCounter, big.NewInt(test.operationCount), ) assert.NoError(t, err) _, err = counterStorage.Update( ctx, - storage.ActiveReconciliationCounter, + modules.ActiveReconciliationCounter, big.NewInt(test.activeReconciliations), ) assert.NoError(t, err) _, err = counterStorage.Update( ctx, - storage.InactiveReconciliationCounter, + modules.InactiveReconciliationCounter, big.NewInt(test.inactiveReconciliations), ) assert.NoError(t, err) + + _, err = counterStorage.Update( + ctx, + modules.FailedReconciliationCounter, + big.NewInt(test.reconciliationFailures), + ) + assert.NoError(t, err) } - var balanceStorage *storage.BalanceStorage + var balanceStorage *modules.BalanceStorage if test.provideBalanceStorage { - balanceStorage = storage.NewBalanceStorage(localStore) - - j := 0 - currency := &types.Currency{Symbol: "BLAH"} - block := &types.BlockIdentifier{Hash: "0", Index: 0} - for i := 0; i < test.totalAccounts; i++ { - dbTransaction := localStore.NewDatabaseTransaction(ctx, true) - acct := &types.AccountIdentifier{ - Address: fmt.Sprintf("account %d", i), - } - assert.NoError(t, balanceStorage.SetBalance( - ctx, - dbTransaction, - acct, - &types.Amount{Value: "1", Currency: currency}, - block, - )) - assert.NoError(t, dbTransaction.Commit(ctx)) - - if j >= test.reconciledAccounts { - continue - } - - assert.NoError(t, balanceStorage.Reconciled( - ctx, - acct, - currency, - block, - )) - - j++ - } + balanceStorage = modules.NewBalanceStorage(localStore) + mockHelper := &sdkMocks.BalanceStorageHelper{} + mockHelper.On("Asserter").Return(baseAsserter()) + mockHelper.On("ExemptFunc").Return(exemptFunc()) + mockHelper.On("BalanceExemptions").Return([]*types.BalanceExemption{}) + mockHelper.On( + "AccountsSeen", + mock.Anything, + mock.Anything, + mock.Anything, + ).Return( + big.NewInt(int64(test.totalAccounts)), + nil, + ) + mockHelper.On( + "AccountsReconciled", + mock.Anything, + mock.Anything, + mock.Anything, + ).Return( + big.NewInt(int64(test.reconciledAccounts)), + nil, + ) + mockHandler := &sdkMocks.BalanceStorageHandler{} + balanceStorage.Initialize(mockHelper, mockHandler) } t.Run(testName, func(t *testing.T) { diff --git a/pkg/results/perf_results.go b/pkg/results/perf_results.go new file mode 100644 index 00000000..9d5c7ee2 --- /dev/null +++ b/pkg/results/perf_results.go @@ -0,0 +1,157 @@ +// Copyright 2022 Coinbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package results + +import ( + "fmt" + "log" + "os" + "strconv" + "time" + + "github.com/coinbase/rosetta-cli/configuration" + "github.com/coinbase/rosetta-sdk-go/utils" + "github.com/olekukonko/tablewriter" +) + +// Output writes *CheckPerfResults to the provided +// path. +func (c *CheckPerfStats) Output(path string) { + if len(path) > 0 { + writeErr := utils.SerializeAndWrite(path, c) + if writeErr != nil { + log.Printf("unable to save results: %s\n", writeErr.Error()) + } + } +} + +type CheckPerfRawStats struct { + BlockEndpointTotalTime time.Duration + BlockEndpointNumErrors int64 + AccountBalanceEndpointTotalTime time.Duration + AccountBalanceNumErrors int64 +} + +// CheckPerfStats contains interesting stats that +// are counted while running the check:perf. +type CheckPerfStats struct { + StartBlock int64 `json:"start_block"` + EndBlock int64 `json:"end_block"` + NumTimesHitEachEndpoint int `json:"num_times_hit_each_endpoint"` + AccountBalanceEndpointAverageTimeMs int64 `json:"account_balance_endpoint_average_time_ms"` + AccountBalanceEndpointTotalTimeMs int64 `json:"account_balance_endpoint_total_time_ms"` + AccountBalanceEndpointNumErrors int64 `json:"account_balance_endpoint_num_errors"` + BlockEndpointAverageTimeMs int64 `json:"block_endpoint_average_time_ms"` + BlockEndpointTotalTimeMs int64 `json:"block_endpoint_total_time_ms"` + BlockEndpointNumErrors int64 `json:"block_endpoint_num_errors"` +} + +// Print logs CheckPerfStats to the console. +func (c *CheckPerfStats) Print() { + table := tablewriter.NewWriter(os.Stdout) + table.SetRowLine(true) + table.SetRowSeparator("-") + table.SetHeader([]string{"check:perf Stats", "Description", "Value"}) + table.Append([]string{"Start Block", "The Starting Block", strconv.FormatInt(c.StartBlock, 10)}) + table.Append([]string{"End Block", "The Ending Block", strconv.FormatInt(c.EndBlock, 10)}) + table.Append([]string{"Num Times Each Endpoint", "Number of times that each endpoint was hit", strconv.FormatInt(int64(c.NumTimesHitEachEndpoint), 10)}) + table.Append( + []string{ + "/Block Endpoint Total Time", + "Total elapsed time taken to fetch all blocks (ms)", + strconv.FormatInt(c.BlockEndpointTotalTimeMs, 10), + }, + ) + table.Append( + []string{ + "/Block Endpoint Average Time", + "Average time taken to fetch each block (ms)", + strconv.FormatInt(c.BlockEndpointAverageTimeMs, 10), + }, + ) + table.Append( + []string{ + "/Block Endpoint Num Errors", + "Total num errors occurred while fetching blocks", + strconv.FormatInt(c.BlockEndpointNumErrors, 10), + }, + ) + table.Append( + []string{ + "/Account/Balance Endpoint Average Time", + "Average time taken to fetch each account balance (ms)", + strconv.FormatInt(c.AccountBalanceEndpointAverageTimeMs, 10), + }, + ) + table.Append( + []string{ + "/Account/Balance Endpoint Total Time", + "Total elapsed time taken to fetch all account balances (ms)", + strconv.FormatInt(c.AccountBalanceEndpointTotalTimeMs, 10), + }, + ) + table.Append( + []string{ + "/Account/Balance Endpoint Num Errors", + "Total num errors occurred while fetching account balances", + strconv.FormatInt(c.AccountBalanceEndpointNumErrors, 10), + }, + ) + + table.Render() +} + +// ComputeCheckPerfStats returns a populated CheckPerfStats. +func ComputeCheckPerfStats( + config *configuration.CheckPerfConfiguration, + rawStats *CheckPerfRawStats, +) *CheckPerfStats { + totalNumEndpointsHit := (config.EndBlock - config.StartBlock) * int64(config.NumTimesToHitEndpoints) + stats := &CheckPerfStats{ + BlockEndpointAverageTimeMs: rawStats.BlockEndpointTotalTime.Milliseconds() / totalNumEndpointsHit, + BlockEndpointTotalTimeMs: rawStats.BlockEndpointTotalTime.Milliseconds(), + BlockEndpointNumErrors: rawStats.BlockEndpointNumErrors, + AccountBalanceEndpointAverageTimeMs: rawStats.AccountBalanceEndpointTotalTime.Milliseconds() / totalNumEndpointsHit, + AccountBalanceEndpointTotalTimeMs: rawStats.AccountBalanceEndpointTotalTime.Milliseconds(), + AccountBalanceEndpointNumErrors: rawStats.AccountBalanceNumErrors, + StartBlock: config.StartBlock, + EndBlock: config.EndBlock, + NumTimesHitEachEndpoint: config.NumTimesToHitEndpoints, + } + + return stats +} + +// ExitPerf exits check:perf, logs the test results to the console, +// and to a provided output path. +func ExitPerf( + config *configuration.CheckPerfConfiguration, + err error, + rawStats *CheckPerfRawStats, +) error { + if err != nil { + log.Fatal(fmt.Errorf("Check:Perf Failed!: %w", err)) + } + + stats := ComputeCheckPerfStats( + config, + rawStats, + ) + + stats.Print() + stats.Output(config.StatsOutputFile) + + return err +} diff --git a/pkg/results/types.go b/pkg/results/types.go new file mode 100644 index 00000000..a4215928 --- /dev/null +++ b/pkg/results/types.go @@ -0,0 +1,20 @@ +// Copyright 2020 Coinbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package results + +const ( + // TimeElapsedCounter tracks the total time elapsed in seconds. + TimeElapsedCounter = "time_elapsed" +) diff --git a/pkg/results/utils.go b/pkg/results/utils.go new file mode 100644 index 00000000..e9de29af --- /dev/null +++ b/pkg/results/utils.go @@ -0,0 +1,47 @@ +// Copyright 2020 Coinbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package results + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" +) + +// JSONFetch makes a GET request to the URL and unmarshal +// the response into output. +func JSONFetch(url string, output interface{}) error { + resp, err := http.Get(url) // #nosec + if err != nil { + return fmt.Errorf("unable to fetch url %s: %w", url, err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("unable to read body: %w", err) + } + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("received %d status with body %s", resp.StatusCode, string(body)) + } + + if err := json.Unmarshal(body, output); err != nil { + return fmt.Errorf("unable to unmarshal: %w", err) + } + + return nil +} diff --git a/pkg/results/utils_test.go b/pkg/results/utils_test.go new file mode 100644 index 00000000..8079600d --- /dev/null +++ b/pkg/results/utils_test.go @@ -0,0 +1,75 @@ +// Copyright 2020 Coinbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package results + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestJSONFetch(t *testing.T) { + var tests = map[string]struct { + status int + body string + + expectedResult map[string]interface{} + expectedError string + }{ + "simple 200": { + status: http.StatusOK, + body: `{"test":"123"}`, + expectedResult: map[string]interface{}{ + "test": "123", + }, + }, + "not 200": { + status: http.StatusUnsupportedMediaType, + body: `hello`, + expectedError: "received 415 status with body hello\n", + }, + "not JSON": { + status: http.StatusOK, + body: `hello`, + expectedError: "unable to unmarshal: invalid character 'h' looking for beginning of value", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + + w.Header().Set("Content-Type", "application/json; charset=UTF-8") + w.WriteHeader(test.status) + fmt.Fprintln(w, test.body) + })) + defer ts.Close() + + var obj map[string]interface{} + err := JSONFetch(ts.URL, &obj) + if len(test.expectedError) > 0 { + assert.EqualError(t, err, test.expectedError) + assert.Len(t, obj, 0) + } else { + assert.NoError(t, err) + assert.Equal(t, test.expectedResult, obj) + } + }) + } +} diff --git a/pkg/tester/benchmark_utils.go b/pkg/tester/benchmark_utils.go new file mode 100644 index 00000000..eb64dbe5 --- /dev/null +++ b/pkg/tester/benchmark_utils.go @@ -0,0 +1,24 @@ +// Copyright 2022 Coinbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tester + +import "time" + +func timerFactory() func() time.Duration { + start := time.Now() + return func() time.Duration { + return time.Since(start) + } +} diff --git a/pkg/tester/construction.go b/pkg/tester/construction.go index 850d8881..9c181388 100644 --- a/pkg/tester/construction.go +++ b/pkg/tester/construction.go @@ -16,24 +16,32 @@ package tester import ( "context" + "encoding/json" "errors" "fmt" "log" - "os" + "net/http" "time" + cliErrs "github.com/coinbase/rosetta-cli/pkg/errors" + "github.com/coinbase/rosetta-cli/configuration" "github.com/coinbase/rosetta-cli/pkg/logger" "github.com/coinbase/rosetta-cli/pkg/processor" + "github.com/coinbase/rosetta-cli/pkg/results" "github.com/coinbase/rosetta-sdk-go/constructor/coordinator" "github.com/coinbase/rosetta-sdk-go/fetcher" "github.com/coinbase/rosetta-sdk-go/parser" "github.com/coinbase/rosetta-sdk-go/statefulsyncer" - "github.com/coinbase/rosetta-sdk-go/storage" + "github.com/coinbase/rosetta-sdk-go/storage/database" + storageErrs "github.com/coinbase/rosetta-sdk-go/storage/errors" + "github.com/coinbase/rosetta-sdk-go/storage/modules" + "github.com/coinbase/rosetta-sdk-go/syncer" "github.com/coinbase/rosetta-sdk-go/types" "github.com/coinbase/rosetta-sdk-go/utils" "github.com/fatih/color" + "golang.org/x/sync/errgroup" ) const ( @@ -42,20 +50,24 @@ const ( constructionCmdName = "check-construction" endConditionsCheckInterval = 10 * time.Second + tipWaitInterval = 10 * time.Second ) +var _ http.Handler = (*ConstructionTester)(nil) +var constructionMetadata string + // ConstructionTester coordinates the `check:construction` test. type ConstructionTester struct { network *types.NetworkIdentifier - database storage.Database + database database.Database config *configuration.Configuration syncer *statefulsyncer.StatefulSyncer logger *logger.Logger onlineFetcher *fetcher.Fetcher - broadcastStorage *storage.BroadcastStorage - blockStorage *storage.BlockStorage - jobStorage *storage.JobStorage - counterStorage *storage.CounterStorage + broadcastStorage *modules.BroadcastStorage + blockStorage *modules.BlockStorage + jobStorage *modules.JobStorage + counterStorage *modules.CounterStorage coordinator *coordinator.Coordinator cancel context.CancelFunc signalReceived *bool @@ -73,54 +85,94 @@ func InitializeConstruction( signalReceived *bool, ) (*ConstructionTester, error) { dataPath, err := utils.CreateCommandPath(config.DataDirectory, constructionCmdName, network) + metadataMap := logger.ConvertStringToMap(config.InfoMetaData) + metadataMap = logger.AddRequestUUIDToMap(metadataMap, config.RequestUUID) + constructionMetadata = logger.ConvertMapToString(metadataMap) if err != nil { - log.Fatalf("%s: cannot create command path", err.Error()) + err = fmt.Errorf("failed to create command path: %w%s", err, constructionMetadata) + color.Red(err.Error()) + return nil, err } - opts := []storage.BadgerOption{} - if !config.DisableMemoryLimit { - opts = append(opts, storage.WithMemoryLimit()) + opts := []database.BadgerOption{} + // add constructionMetadata into localStore + opts = append(opts, database.WithMetaData(constructionMetadata)) + if config.CompressionDisabled { + opts = append(opts, database.WithoutCompression()) } - localStore, err := storage.NewBadgerStorage(ctx, dataPath, opts...) + if config.L0InMemoryEnabled { + opts = append( + opts, + database.WithCustomSettings(database.PerformanceBadgerOptions(dataPath)), + ) + } + + // add constructionMetadata into localStore + localStore, err := database.NewBadgerDatabase(ctx, dataPath, opts...) if err != nil { - log.Fatalf("%s: unable to initialize database", err.Error()) + err = fmt.Errorf("unable to initialize database: %w%s", err, constructionMetadata) + color.Red(err.Error()) + return nil, err } - counterStorage := storage.NewCounterStorage(localStore) - logger := logger.NewLogger( - counterStorage, - nil, + networkOptions, fetchErr := onlineFetcher.NetworkOptionsRetry(ctx, network, nil) + if fetchErr != nil { + err := fmt.Errorf("unable to get network options: %w%s", fetchErr.Err, constructionMetadata) + color.Red(err.Error()) + return nil, err + } + + if len(networkOptions.Allow.BalanceExemptions) > 0 && + config.Construction.InitialBalanceFetchDisabled { + return nil, cliErrs.ErrBalanceExemptionsWithInitialBalanceFetchDisabled + } + + counterStorage := modules.NewCounterStorage(localStore) + //add constructionMetadata into logger + logger, err := logger.NewLogger( dataPath, false, false, false, false, + logger.Construction, + network, + metadataMap, ) + if err != nil { + err = fmt.Errorf("unable to initialize logger with error: %w%s", err, constructionMetadata) + color.Red(err.Error()) + return nil, err + } - blockStorage := storage.NewBlockStorage(localStore) - keyStorage := storage.NewKeyStorage(localStore) + blockStorage := modules.NewBlockStorage(localStore, config.SerialBlockWorkers) + keyStorage := modules.NewKeyStorage(localStore) coinStorageHelper := processor.NewCoinStorageHelper(blockStorage) - coinStorage := storage.NewCoinStorage(localStore, coinStorageHelper, onlineFetcher.Asserter) - balanceStorage := storage.NewBalanceStorage(localStore) + coinStorage := modules.NewCoinStorage(localStore, coinStorageHelper, onlineFetcher.Asserter) + balanceStorage := modules.NewBalanceStorage(localStore) balanceStorageHelper := processor.NewBalanceStorageHelper( network, onlineFetcher, + counterStorage, false, nil, true, + networkOptions.Allow.BalanceExemptions, + config.Construction.InitialBalanceFetchDisabled, ) balanceStorageHandler := processor.NewBalanceStorageHandler( logger, nil, + counterStorage, false, nil, ) balanceStorage.Initialize(balanceStorageHelper, balanceStorageHandler) - broadcastStorage := storage.NewBroadcastStorage( + broadcastStorage := modules.NewBroadcastStorage( localStore, config.Construction.StaleDepth, config.Construction.BroadcastLimit, @@ -129,67 +181,121 @@ func InitializeConstruction( config.Construction.BlockBroadcastLimit, ) - parser := parser.New(onlineFetcher.Asserter, nil) + parser := parser.New(onlineFetcher.Asserter, nil, networkOptions.Allow.BalanceExemptions) broadcastHelper := processor.NewBroadcastStorageHelper( + network, blockStorage, onlineFetcher, ) + //add constructionMetadata into fetcher + fetcherOpts := []fetcher.Option{ + fetcher.WithMaxConnections(config.Construction.MaxOfflineConnections), + fetcher.WithAsserter(onlineFetcher.Asserter), + fetcher.WithTimeout(time.Duration(config.HTTPTimeout) * time.Second), + fetcher.WithMaxRetries(config.MaxRetries), + fetcher.WithMetaData(constructionMetadata), + } + if config.Construction.ForceRetry { + fetcherOpts = append(fetcherOpts, fetcher.WithForceRetry()) + } + offlineFetcher := fetcher.New( config.Construction.OfflineURL, - fetcher.WithAsserter(onlineFetcher.Asserter), - fetcher.WithTimeout(time.Duration(config.HTTPTimeout)*time.Second), + fetcherOpts..., ) // Import prefunded account and save to database err = keyStorage.ImportAccounts(ctx, config.Construction.PrefundedAccounts) if err != nil { + err = fmt.Errorf("%w%s", err, constructionMetadata) + color.Red(err.Error()) return nil, err } // Load all accounts for network - addresses, err := keyStorage.GetAllAddresses(ctx) + accounts, err := keyStorage.GetAllAccounts(ctx) if err != nil { - return nil, fmt.Errorf("%w: unable to load addresses", err) + err = fmt.Errorf("unable to load addresses: %w%s", err, constructionMetadata) + color.Red(err.Error()) + return nil, err } - log.Printf("construction tester initialized with %d addresses\n", len(addresses)) - // Track balances on all addresses - for _, address := range addresses { - balanceStorageHelper.AddInterestingAddress(address) + for _, account := range accounts { + balanceStorageHelper.AddInterestingAddress(account.Address) } + color.Cyan("construction tester initialized with %d accounts%s\n", len(accounts), constructionMetadata) + // Load prefunded accounts var accountBalanceRequests []*utils.AccountBalanceRequest + var acctCoinsReqs []*utils.AccountCoinsRequest for _, prefundedAcc := range config.Construction.PrefundedAccounts { - address := prefundedAcc.Address accountBalance := &utils.AccountBalanceRequest{ - Account: &types.AccountIdentifier{ - Address: address, - }, + Account: prefundedAcc.AccountIdentifier, Network: network, Currency: prefundedAcc.Currency, } accountBalanceRequests = append(accountBalanceRequests, accountBalance) + + if config.CoinSupported { + acctCoinsReq := &utils.AccountCoinsRequest{ + Account: prefundedAcc.AccountIdentifier, + Network: network, + Currencies: []*types.Currency{prefundedAcc.Currency}, + IncludeMempool: false, + } + + acctCoinsReqs = append(acctCoinsReqs, acctCoinsReq) + } } accBalances, err := utils.GetAccountBalances(ctx, onlineFetcher, accountBalanceRequests) if err != nil { - return nil, fmt.Errorf("%w: unable to get account balances", err) + err = fmt.Errorf("unable to get account balances: %w%s", err, constructionMetadata) + color.Red(err.Error()) + return nil, err } err = balanceStorage.SetBalanceImported(ctx, nil, accBalances) if err != nil { - return nil, fmt.Errorf("%w: unable to set balances", err) + err = fmt.Errorf("unable to set balances: %w%s", err, constructionMetadata) + color.Red(err.Error()) + return nil, err } - err = coinStorage.SetCoinsImported(ctx, accBalances) - if err != nil { - return nil, fmt.Errorf("%w: unable to set coin balances", err) + // ------------------------------------------------------------------------- + // ------------ Get account coins and add them in coins storage ------------ + // ------------------------------------------------------------------------- + + if config.CoinSupported { + acctCoins, errAccCoins := utils.GetAccountCoins(ctx, onlineFetcher, acctCoinsReqs) + if errAccCoins != nil { + err = fmt.Errorf("unable to get account coins: %w%s", errAccCoins, constructionMetadata) + color.Red(err.Error()) + return nil, err + } + + // Extract accounts from account coins requests + var accts []*types.AccountIdentifier + for _, req := range acctCoinsReqs { + accts = append(accts, req.Account) + } + + err = coinStorage.SetCoinsImported(ctx, accts, acctCoins) + if err != nil { + err = fmt.Errorf("unable to set coin balances: %w%s", err, constructionMetadata) + color.Red(err.Error()) + return nil, err + } } - jobStorage := storage.NewJobStorage(localStore) + // -------------------------------------------------------------------------- + // ---------------------- End of adding account coins ----------------------- + // -------------------------------------------------------------------------- + + jobStorage := modules.NewJobStorage(localStore) coordinatorHelper := processor.NewCoordinatorHelper( offlineFetcher, onlineFetcher, @@ -201,6 +307,7 @@ func InitializeConstruction( broadcastStorage, balanceStorageHelper, counterStorage, + config.Construction.Quiet, ) coordinatorHandler := processor.NewCoordinatorHandler( @@ -214,11 +321,14 @@ func InitializeConstruction( config.Construction.Workflows, ) if err != nil { - log.Fatalf("%s: unable to create coordinator", err.Error()) + msg := fmt.Sprintf("unable to create coordinator: %s%s", err.Error(), constructionMetadata) + color.Red(msg) + log.Fatalf(msg) } broadcastHandler := processor.NewBroadcastStorageHandler( config, + blockStorage, counterStorage, coordinator, parser, @@ -226,6 +336,7 @@ func InitializeConstruction( broadcastStorage.Initialize(broadcastHelper, broadcastHandler) + //add constructionMetadata into syncer syncer := statefulsyncer.New( ctx, network, @@ -234,8 +345,12 @@ func InitializeConstruction( counterStorage, logger, cancel, - []storage.BlockWorker{balanceStorage, coinStorage, broadcastStorage}, - config.SyncConcurrency, + []modules.BlockWorker{counterStorage, balanceStorage, coinStorage, broadcastStorage}, + statefulsyncer.WithCacheSize(syncer.DefaultCacheSize), + statefulsyncer.WithMaxConcurrency(config.MaxSyncConcurrency), + statefulsyncer.WithPastBlockLimit(config.MaxReorgDepth), + statefulsyncer.WithSeenConcurrency(int64(config.SeenBlockWorkers)), + statefulsyncer.WithMetaData(constructionMetadata), ) return &ConstructionTester{ @@ -258,7 +373,9 @@ func InitializeConstruction( // CloseDatabase closes the database used by ConstructionTester. func (t *ConstructionTester) CloseDatabase(ctx context.Context) { if err := t.database.Close(ctx); err != nil { - log.Fatalf("%s: error closing database", err.Error()) + msg := fmt.Sprintf("error closing database: %s%s", err.Error(), constructionMetadata) + color.Red(msg) + log.Fatalf(msg) } } @@ -273,14 +390,65 @@ func (t *ConstructionTester) StartPeriodicLogger( for { select { case <-ctx.Done(): - // Print stats one last time before exiting - inflight, _ := t.broadcastStorage.GetAllBroadcasts(ctx) - _ = t.logger.LogConstructionStats(ctx, len(inflight)) - return ctx.Err() case <-tc.C: - inflight, _ := t.broadcastStorage.GetAllBroadcasts(ctx) - _ = t.logger.LogConstructionStats(ctx, len(inflight)) + status := results.ComputeCheckConstructionStatus( + ctx, + t.config, + t.counterStorage, + t.broadcastStorage, + t.jobStorage, + ) + t.logger.LogConstructionStatus(ctx, status) + } + } +} + +func (t *ConstructionTester) checkTip(ctx context.Context) (int64, error) { + atTip, blockIdentifier, err := utils.CheckNetworkTip( + ctx, + t.network, + t.config.TipDelay, + t.onlineFetcher, + ) + if err != nil { + err = fmt.Errorf("failed to check network tip: %w%s", err, constructionMetadata) + color.Red(err.Error()) + return -1, err + } + + if atTip { + return blockIdentifier.Index, nil + } + + return -1, nil +} + +// waitForTip loops until the Rosetta implementation is at tip. +func (t *ConstructionTester) waitForTip(ctx context.Context) (int64, error) { + tc := time.NewTicker(tipWaitInterval) + defer tc.Stop() + + for { + // Don't wait any time before first tick if at tip. + tipIndex, err := t.checkTip(ctx) + if err != nil { + err = fmt.Errorf("failed to check tip: %w%s", err, constructionMetadata) + color.Red(err.Error()) + return -1, err + } + + if tipIndex != -1 { + return tipIndex, nil + } + + color.Cyan("waiting for implementation to reach tip before testing...%s", constructionMetadata) + + select { + case <-ctx.Done(): + return -1, ctx.Err() + case <-tc.C: + continue } } } @@ -294,17 +462,19 @@ func (t *ConstructionTester) StartSyncer( ) error { startIndex := int64(-1) _, err := t.blockStorage.GetHeadBlockIdentifier(ctx) - if errors.Is(err, storage.ErrHeadBlockNotFound) { - // If a block has yet to be synced, start syncing from tip. - // TODO: make configurable - status, fetchErr := t.onlineFetcher.NetworkStatusRetry(ctx, t.network, nil) - if fetchErr != nil { - return fmt.Errorf("%w: unable to fetch network status", fetchErr.Err) + if errors.Is(err, storageErrs.ErrHeadBlockNotFound) { + // If no head block exists, ensure we are at tip before starting. Otherwise, + // we will unnecessarily sync tons of blocks before reaching any that matter. + startIndex, err = t.waitForTip(ctx) + if err != nil { + err = fmt.Errorf("unable to wait for tip: %w%s", err, constructionMetadata) + color.Red(err.Error()) + return err } - - startIndex = status.CurrentBlockIdentifier.Index } else if err != nil { - return fmt.Errorf("%w: unable to get last block synced", err) + err = fmt.Errorf("unable to get last block synced: %w%s", err, constructionMetadata) + color.Red(err.Error()) + return err } return t.syncer.Sync(ctx, startIndex, -1) @@ -319,15 +489,35 @@ func (t *ConstructionTester) StartConstructor( if t.config.Construction.ClearBroadcasts { broadcasts, err := t.broadcastStorage.ClearBroadcasts(ctx) if err != nil { - return fmt.Errorf("%w: unable to clear broadcasts", err) + err = fmt.Errorf("unable to clear broadcasts: %w%s", err, constructionMetadata) + color.Red(err.Error()) + return err } - log.Printf("cleared %d broadcasts\n", len(broadcasts)) + color.Cyan("cleared %d broadcasts%s\n", len(broadcasts), constructionMetadata) } return t.coordinator.Process(ctx) } +// ServeHTTP serves a CheckDataStatus response on all paths. +func (t *ConstructionTester) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=UTF-8") + w.WriteHeader(http.StatusOK) + + status := results.ComputeCheckConstructionStatus( + r.Context(), + t.config, + t.counterStorage, + t.broadcastStorage, + t.jobStorage, + ) + + if err := json.NewEncoder(w).Encode(status); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + // PerformBroadcasts attempts to rebroadcast all pending transactions // if the RebroadcastAll configuration is set to true. func (t *ConstructionTester) PerformBroadcasts(ctx context.Context) error { @@ -335,10 +525,12 @@ func (t *ConstructionTester) PerformBroadcasts(ctx context.Context) error { return nil } - color.Magenta("Rebroadcasting all transactions...") + color.Magenta("Rebroadcasting all transactions...%s", constructionMetadata) if err := t.broadcastStorage.BroadcastAll(ctx, false); err != nil { - return fmt.Errorf("%w: unable to broadcast all transactions", err) + err = fmt.Errorf("unable to broadcast all transactions: %w%s", err, constructionMetadata) + color.Red(err.Error()) + return err } return nil @@ -367,7 +559,9 @@ func (t *ConstructionTester) WatchEndConditions( for workflow, minOccurences := range endConditions { completed, err := t.jobStorage.Completed(ctx, workflow) if err != nil { - return fmt.Errorf("%w: unable to fetch completed %s", err, workflow) + err = fmt.Errorf("unable to fetch completed %s: %w%s", workflow, err, constructionMetadata) + color.Red(err.Error()) + return err } if len(completed) < minOccurences { @@ -385,17 +579,75 @@ func (t *ConstructionTester) WatchEndConditions( } } -// HandleErr is called when `check:construction` returns an error. -func (t *ConstructionTester) HandleErr(err error) { +func (t *ConstructionTester) returnFunds( + ctx context.Context, + sigListeners *[]context.CancelFunc, +) { + // To cancel all execution, need to call multiple cancel functions. + ctx, cancel := context.WithCancel(ctx) + *sigListeners = append(*sigListeners, cancel) + + var returnFundsSuccess bool + g, ctx := errgroup.WithContext(ctx) + g.Go(func() error { + return t.StartSyncer(ctx, cancel) + }) + g.Go(func() error { + return t.StartPeriodicLogger(ctx) + }) + g.Go(func() error { + err := t.coordinator.ReturnFunds(ctx) + + // If the error is nil, we need to cancel the syncer + // or we will sync forever. + if err == nil { + returnFundsSuccess = true // makes error parsing much easier + cancel() + return nil + } + + return err + }) + + err := g.Wait() if *t.signalReceived { - color.Red("Check halted") - os.Exit(1) + color.Red("Fund return halted%s", constructionMetadata) return } - if t.reachedEndConditions { - ExitConstruction(t.config, t.counterStorage, t.jobStorage, nil, 0) + if !returnFundsSuccess { + color.Cyan("unable to return funds %v%s\n", err, constructionMetadata) } +} + +// HandleErr is called when `check:construction` returns an error. +func (t *ConstructionTester) HandleErr( + err error, + sigListeners *[]context.CancelFunc, +) error { + if *t.signalReceived { + err = fmt.Errorf("%v: %w%s", err.Error(), cliErrs.ErrConstructionCheckHalt, constructionMetadata) + color.Red(err.Error()) + return results.ExitConstruction( + t.config, + t.counterStorage, + t.jobStorage, + err, + ) + } + + if !t.reachedEndConditions { + color.Red("%v%s", err, constructionMetadata) + return results.ExitConstruction(t.config, t.counterStorage, t.jobStorage, err) + } + + // We optimistically run the ReturnFunds function on the coordinator + // and only log if it fails. If there is no ReturnFunds workflow defined, + // this will just return nil. + t.returnFunds( + context.Background(), + sigListeners, + ) - ExitConstruction(t.config, t.counterStorage, t.jobStorage, err, 1) + return results.ExitConstruction(t.config, t.counterStorage, t.jobStorage, nil) } diff --git a/pkg/tester/data.go b/pkg/tester/data.go index 36f24be9..1de767a5 100644 --- a/pkg/tester/data.go +++ b/pkg/tester/data.go @@ -16,20 +16,28 @@ package tester import ( "context" + "encoding/json" "errors" "fmt" "log" - "os" + "math/big" + "net/http" "time" + cliErrs "github.com/coinbase/rosetta-cli/pkg/errors" + "github.com/coinbase/rosetta-cli/configuration" "github.com/coinbase/rosetta-cli/pkg/logger" "github.com/coinbase/rosetta-cli/pkg/processor" - + "github.com/coinbase/rosetta-cli/pkg/results" "github.com/coinbase/rosetta-sdk-go/fetcher" + "github.com/coinbase/rosetta-sdk-go/parser" "github.com/coinbase/rosetta-sdk-go/reconciler" "github.com/coinbase/rosetta-sdk-go/statefulsyncer" - "github.com/coinbase/rosetta-sdk-go/storage" + "github.com/coinbase/rosetta-sdk-go/storage/database" + storageErrs "github.com/coinbase/rosetta-sdk-go/storage/errors" + "github.com/coinbase/rosetta-sdk-go/storage/modules" + "github.com/coinbase/rosetta-sdk-go/syncer" "github.com/coinbase/rosetta-sdk-go/types" "github.com/coinbase/rosetta-sdk-go/utils" "github.com/fatih/color" @@ -48,35 +56,56 @@ const ( // until the client halts the search or the block is found). InactiveFailureLookbackWindow = 250 + // periodicLoggingSeconds is the frequency to print stats in seconds. + periodicLoggingSeconds = 10 + // PeriodicLoggingFrequency is the frequency that stats are printed // to the terminal. - // - // TODO: make configurable - PeriodicLoggingFrequency = 10 * time.Second + PeriodicLoggingFrequency = periodicLoggingSeconds * time.Second // EndAtTipCheckInterval is the frequency that EndAtTip condition - // is evaludated - // - // TODO: make configurable + // is evaluated EndAtTipCheckInterval = 10 * time.Second + + //MinTableSize unit is GB + MinTableSize = int64(1) + + //MaxTableSize unit is GB + MaxTableSize = int64(100) + + //MinTableSize unit is MB + MinValueLogFileSize = int64(128) + + //MaxTableSize unit is MB + MaxValueLogFileSize = int64(2048) + + // empty requestUUID + EmptyRequestUUID = "" ) +var _ http.Handler = (*DataTester)(nil) +var _ statefulsyncer.PruneHelper = (*DataTester)(nil) +var metadata string + // DataTester coordinates the `check:data` test. type DataTester struct { - network *types.NetworkIdentifier - database storage.Database - config *configuration.Configuration - syncer *statefulsyncer.StatefulSyncer - reconciler *reconciler.Reconciler - logger *logger.Logger - balanceStorage *storage.BalanceStorage - blockStorage *storage.BlockStorage - counterStorage *storage.CounterStorage - reconcilerHandler *processor.ReconcilerHandler - fetcher *fetcher.Fetcher - signalReceived *bool - genesisBlock *types.BlockIdentifier - cancel context.CancelFunc + network *types.NetworkIdentifier + database database.Database + config *configuration.Configuration + syncer *statefulsyncer.StatefulSyncer + reconciler *reconciler.Reconciler + logger *logger.Logger + balanceStorage *modules.BalanceStorage + blockStorage *modules.BlockStorage + counterStorage *modules.CounterStorage + reconcilerHandler *processor.ReconcilerHandler + fetcher *fetcher.Fetcher + signalReceived *bool + genesisBlock *types.BlockIdentifier + cancel context.CancelFunc + historicalBalanceEnabled bool + parser *parser.Parser + forceInactiveReconciliation *bool endCondition configuration.CheckDataEndCondition endConditionDetail string @@ -94,32 +123,62 @@ func shouldReconcile(config *configuration.Configuration) bool { return true } -// loadAccounts is a utility function to parse the []*reconciler.AccountCurrency +// loadAccounts is a utility function to parse the []*types.AccountCurrency // in a file. -func loadAccounts(filePath string) ([]*reconciler.AccountCurrency, error) { +func loadAccounts(filePath string) ([]*types.AccountCurrency, error) { if len(filePath) == 0 { - return []*reconciler.AccountCurrency{}, nil + return []*types.AccountCurrency{}, nil } - accounts := []*reconciler.AccountCurrency{} + accounts := []*types.AccountCurrency{} if err := utils.LoadAndParse(filePath, &accounts); err != nil { - return nil, fmt.Errorf("%w: unable to open account file", err) + err = fmt.Errorf("unable to load and parse %s: %w%s", filePath, err, metadata) + color.Red(err.Error()) + return nil, err } - log.Printf( - "Found %d accounts at %s: %s\n", + msg := fmt.Sprintf( + "Found %d accounts at %s: %s%s\n", len(accounts), filePath, types.PrettyPrintStruct(accounts), + metadata, ) + color.Cyan(msg) return accounts, nil } +// loadAccount is a utility function to parse the []*types.AccountCurrency +// from a string. +func loadAccount(accountAddress string) []*types.AccountCurrency { + if len(accountAddress) == 0 { + return []*types.AccountCurrency{} + } + + accounts := []*types.AccountCurrency{} + accountIndentifier := &types.AccountIdentifier{ + Address: accountAddress, + // You can set other fields of AccountIdentifier here if needed. + } + + // Create an AccountCurrency instance with the Account field set to the created AccountIdentifier. + targetAccount := &types.AccountCurrency{ + Account: accountIndentifier, + // You can set other fields of AccountCurrency here if needed. + } + + accounts = append(accounts, targetAccount) + + return accounts +} + // CloseDatabase closes the database used by DataTester. func (t *DataTester) CloseDatabase(ctx context.Context) { if err := t.database.Close(ctx); err != nil { - log.Fatalf("%s: error closing database", err.Error()) + msg := fmt.Sprintf("error closing database: %s%s", err.Error(), metadata) + color.Red(msg) + log.Fatalf(msg) } } @@ -131,61 +190,130 @@ func InitializeData( fetcher *fetcher.Fetcher, cancel context.CancelFunc, genesisBlock *types.BlockIdentifier, - interestingAccount *reconciler.AccountCurrency, + interestingAccount *types.AccountCurrency, signalReceived *bool, -) *DataTester { +) (*DataTester, error) { dataPath, err := utils.CreateCommandPath(config.DataDirectory, dataCmdName, network) + metadataMap := logger.ConvertStringToMap(config.InfoMetaData) + metadataMap = logger.AddRequestUUIDToMap(metadataMap, config.RequestUUID) + metadata = logger.ConvertMapToString(metadataMap) + if err != nil { - log.Fatalf("%s: cannot create command path", err.Error()) + err = fmt.Errorf("failed to create command path: %w%s", err, metadata) + color.Red(err.Error()) + return nil, err + } + + //add metadata into localStore + opts := []database.BadgerOption{} + opts = append(opts, database.WithMetaData(metadata)) + dataPathBackup := dataPath + + if config.AllInMemoryEnabled { + opts = append( + opts, + database.WithCustomSettings(database.AllInMemoryBadgerOptions(dataPath)), + database.WithoutCompression(), + ) + // for all in memory mode, the path need to be "", as badgerDB will not write to disk + dataPathBackup = "" + } else { + if config.CompressionDisabled { + opts = append(opts, database.WithoutCompression()) + } + if config.L0InMemoryEnabled { + opts = append( + opts, + database.WithCustomSettings(database.PerformanceBadgerOptions(dataPath)), + ) + } } - opts := []storage.BadgerOption{} - if !config.DisableMemoryLimit { - opts = append(opts, storage.WithMemoryLimit()) + // If we enable all-in-memory or L0-in-memory mode, badger DB's TableSize and ValueLogFileSize will change + // according to users config. tableSize means the LSM table size, when the table more than the tableSize, + // will trigger a compact. + // In default mode, we will not change the badger DB's TableSize and ValueLogFileSize for limiting memory usage + if config.AllInMemoryEnabled || config.L0InMemoryEnabled { + if config.TableSize != nil { + if *config.TableSize >= MinTableSize && *config.TableSize <= MaxTableSize { + opts = append( + opts, + database.WithTableSize(*config.TableSize), + ) + } + } + if config.ValueLogFileSize != nil { + if *config.TableSize >= MinValueLogFileSize && *config.TableSize <= MinValueLogFileSize { + opts = append( + opts, + database.WithValueLogFileSize(*config.TableSize), + ) + } + } } - localStore, err := storage.NewBadgerStorage(ctx, dataPath, opts...) + + //add metadata into localStore + localStore, err := database.NewBadgerDatabase(ctx, dataPathBackup, opts...) if err != nil { - log.Fatalf("%s: unable to initialize database", err.Error()) + err = fmt.Errorf("unable to initialize database: %w%s", err, metadata) + color.Red(err.Error()) + return nil, err } exemptAccounts, err := loadAccounts(config.Data.ExemptAccounts) if err != nil { - log.Fatalf("%s: unable to load exempt accounts", err.Error()) + err = fmt.Errorf("unable to load exempt accounts: %w%s", err, metadata) + color.Red(err.Error()) + return nil, err } interestingAccounts, err := loadAccounts(config.Data.InterestingAccounts) if err != nil { - log.Fatalf("%s: unable to load interesting accounts", err.Error()) + err = fmt.Errorf("unable to load interesting accounts: %w%s", err, metadata) + color.Red(err.Error()) + return nil, err + } + if len(config.TargetAccount) != 0 { + interestingAccounts = loadAccount(config.TargetAccount) } - counterStorage := storage.NewCounterStorage(localStore) - blockStorage := storage.NewBlockStorage(localStore) - balanceStorage := storage.NewBalanceStorage(localStore) - - loggerBalanceStorage := balanceStorage - if !shouldReconcile(config) { - loggerBalanceStorage = nil + interestingOnly := false + if len(interestingAccounts) != 0 { + interestingOnly = true } - logger := logger.NewLogger( - counterStorage, - loggerBalanceStorage, + counterStorage := modules.NewCounterStorage(localStore) + blockStorage := modules.NewBlockStorage(localStore, config.SerialBlockWorkers) + balanceStorage := modules.NewBalanceStorage(localStore) + logger, err := logger.NewLogger( dataPath, config.Data.LogBlocks, config.Data.LogTransactions, config.Data.LogBalanceChanges, config.Data.LogReconciliations, + logger.Data, + network, + metadataMap, ) + if err != nil { + err = fmt.Errorf("unable to initialize logger with error: %w%s", err, metadata) + color.Red(err.Error()) + return nil, err + } + var forceInactiveReconciliation bool reconcilerHelper := processor.NewReconcilerHelper( + config, network, fetcher, + localStore, blockStorage, balanceStorage, + &forceInactiveReconciliation, ) - reconcilerHandler := processor.NewReconcilerHandler( logger, + counterStorage, balanceStorage, !config.Data.IgnoreReconciliationError, ) @@ -193,67 +321,155 @@ func InitializeData( // Get all previously seen accounts seenAccounts, err := balanceStorage.GetAllAccountCurrency(ctx) if err != nil { - log.Fatalf("%s: unable to get previously seen accounts", err.Error()) + err = fmt.Errorf("unable to get previously seen accounts: %w%s", err, metadata) + color.Red(err.Error()) + return nil, err } - r := reconciler.New( - reconcilerHelper, - reconcilerHandler, + networkOptions, fetchErr := fetcher.NetworkOptionsRetry(ctx, network, nil) + if fetchErr != nil { + msg := fmt.Sprintf("unable to get network options: %s%s", fetchErr.Err.Error(), metadata) + color.Red(msg) + log.Fatalf(msg) + } + + if len(networkOptions.Allow.BalanceExemptions) > 0 && config.Data.InitialBalanceFetchDisabled { + err = fmt.Errorf("found balance exemptions but initial balance fetch disabled%s", metadata) + color.Red(err.Error()) + return nil, err + } + + parser := parser.New( + fetcher.Asserter, + nil, + networkOptions.Allow.BalanceExemptions, + ) + + // Determine if we should perform historical balance lookups + var historicalBalanceEnabled bool + if config.Data.HistoricalBalanceDisabled != nil { + historicalBalanceEnabled = !*config.Data.HistoricalBalanceDisabled + } else { // we must look it up + historicalBalanceEnabled = networkOptions.Allow.HistoricalBalanceLookup + } + + //add metadata into reconciler + rOpts := []reconciler.Option{ reconciler.WithActiveConcurrency(int(config.Data.ActiveReconciliationConcurrency)), reconciler.WithInactiveConcurrency(int(config.Data.InactiveReconciliationConcurrency)), - reconciler.WithLookupBalanceByBlock(!config.Data.HistoricalBalanceDisabled), reconciler.WithInterestingAccounts(interestingAccounts), reconciler.WithSeenAccounts(seenAccounts), - reconciler.WithDebugLogging(config.Data.LogReconciliations), reconciler.WithInactiveFrequency(int64(config.Data.InactiveReconciliationFrequency)), + reconciler.WithBalancePruning(), + reconciler.WithMetaData(metadata), + } + if config.Data.ReconcilerActiveBacklog != nil { + rOpts = append(rOpts, reconciler.WithBacklogSize(*config.Data.ReconcilerActiveBacklog)) + } + if historicalBalanceEnabled { + rOpts = append(rOpts, reconciler.WithLookupBalanceByBlock()) + } + if config.Data.LogReconciliations { + rOpts = append(rOpts, reconciler.WithDebugLogging()) + } + + //add metadata into reconciler + r := reconciler.New( + reconcilerHelper, + reconcilerHandler, + parser, + rOpts..., ) - blockWorkers := []storage.BlockWorker{} + blockWorkers := []modules.BlockWorker{counterStorage} if !config.Data.BalanceTrackingDisabled { balanceStorageHelper := processor.NewBalanceStorageHelper( network, fetcher, - !config.Data.HistoricalBalanceDisabled, + counterStorage, + historicalBalanceEnabled, exemptAccounts, - false, + interestingOnly, + networkOptions.Allow.BalanceExemptions, + config.Data.InitialBalanceFetchDisabled, ) + if interestingOnly { + for _, interesinterestingAccount := range interestingAccounts { + balanceStorageHelper.AddInterestingAddress(interesinterestingAccount.Account.Address) + } + } + balanceStorageHandler := processor.NewBalanceStorageHandler( logger, r, + counterStorage, shouldReconcile(config), interestingAccount, ) balanceStorage.Initialize(balanceStorageHelper, balanceStorageHandler) - // Bootstrap balances if provided - if len(config.Data.BootstrapBalances) > 0 { + blockWorkers = append(blockWorkers, balanceStorage) + + // Bootstrap balances, if provided. We need to do before initializing + // the reconciler otherwise we won't reconcile bootstrapped accounts + // until rosetta-cli restart. + // + // We need to do this after instantiating the balance storage handler + // because it is invoked within BootstrapBalances. + // + // We only need to bootstrap balances when we run this test from + // genesis block. If it is not genesis block, we use the balances from + // previous block + if (config.Data.StartIndex == nil || *config.Data.StartIndex == genesisBlock.Index) && + len(config.Data.BootstrapBalances) > 0 { _, err := blockStorage.GetHeadBlockIdentifier(ctx) - if err == storage.ErrHeadBlockNotFound { + switch { + case err == storageErrs.ErrHeadBlockNotFound: err = balanceStorage.BootstrapBalances( ctx, config.Data.BootstrapBalances, genesisBlock, ) if err != nil { - log.Fatalf("%s: unable to bootstrap balances", err.Error()) + err = fmt.Errorf("unable to bootstrap balances: %w%s", err, metadata) + color.Red(err.Error()) + return nil, err } - } else { - log.Println("Skipping balance bootstrapping because already started syncing") + case err != nil: + err = fmt.Errorf("unable to get head block identifier: %w%s", err, metadata) + color.Red(err.Error()) + return nil, err + default: + color.Cyan("Skipping balance bootstrapping because already started syncing%s", metadata) } } - - blockWorkers = append(blockWorkers, balanceStorage) } if !config.Data.CoinTrackingDisabled { coinStorageHelper := processor.NewCoinStorageHelper(blockStorage) - coinStorage := storage.NewCoinStorage(localStore, coinStorageHelper, fetcher.Asserter) + coinStorage := modules.NewCoinStorage(localStore, coinStorageHelper, fetcher.Asserter) blockWorkers = append(blockWorkers, coinStorage) } + //add metadata into statefulsyncer + statefulSyncerOptions := []statefulsyncer.Option{ + statefulsyncer.WithCacheSize(syncer.DefaultCacheSize), + statefulsyncer.WithMaxConcurrency(config.MaxSyncConcurrency), + statefulsyncer.WithPastBlockLimit(config.MaxReorgDepth), + statefulsyncer.WithSeenConcurrency(int64(config.SeenBlockWorkers)), + statefulsyncer.WithMetaData(metadata), + } + if config.Data.PruningFrequency != nil { + statefulSyncerOptions = append( + statefulSyncerOptions, + statefulsyncer.WithPruneSleepTime(*config.Data.PruningFrequency), + ) + } + + //add metadata into syncer syncer := statefulsyncer.New( ctx, network, @@ -263,25 +479,28 @@ func InitializeData( logger, cancel, blockWorkers, - config.SyncConcurrency, + statefulSyncerOptions..., ) return &DataTester{ - network: network, - database: localStore, - config: config, - syncer: syncer, - cancel: cancel, - reconciler: r, - logger: logger, - balanceStorage: balanceStorage, - blockStorage: blockStorage, - counterStorage: counterStorage, - reconcilerHandler: reconcilerHandler, - fetcher: fetcher, - signalReceived: signalReceived, - genesisBlock: genesisBlock, - } + network: network, + database: localStore, + config: config, + syncer: syncer, + cancel: cancel, + reconciler: r, + logger: logger, + balanceStorage: balanceStorage, + blockStorage: blockStorage, + counterStorage: counterStorage, + reconcilerHandler: reconcilerHandler, + fetcher: fetcher, + signalReceived: signalReceived, + genesisBlock: genesisBlock, + historicalBalanceEnabled: historicalBalanceEnabled, + parser: parser, + forceInactiveReconciliation: &forceInactiveReconciliation, + }, nil } // StartSyncing syncs from startIndex to endIndex. @@ -304,6 +523,39 @@ func (t *DataTester) StartSyncing( return t.syncer.Sync(ctx, startIndex, endIndex) } +// StartPruning attempts to prune block storage +// every 10 seconds. +func (t *DataTester) StartPruning( + ctx context.Context, +) error { + if t.config.Data.PruningBlockDisabled { + return nil + } + + return t.syncer.Prune(ctx, t) +} + +// StartReconcilerCountUpdater attempts to periodically +// write cached reconciler count updates to storage. +func (t *DataTester) StartReconcilerCountUpdater( + ctx context.Context, +) error { + return t.reconcilerHandler.Updater(ctx) +} + +// PruneableIndex is the index that is +// safe for pruning. +func (t *DataTester) PruneableIndex( + ctx context.Context, + headIndex int64, +) (int64, error) { + // We don't need blocks to exist to reconcile + // balances at their index. + // + // It is ok if the returned value here is negative. + return headIndex - int64(t.config.MaxReorgDepth), nil +} + // StartReconciler starts the reconciler if // reconciliation is enabled. func (t *DataTester) StartReconciler( @@ -327,86 +579,251 @@ func (t *DataTester) StartPeriodicLogger( for { select { case <-ctx.Done(): - // Print stats one last time before exiting - _ = t.logger.LogDataStats(ctx) - return ctx.Err() case <-tc.C: - _ = t.logger.LogDataStats(ctx) + // Update the elapsed time in counter storage so that + // we can log metrics about the current check:data run. + _, _ = t.counterStorage.Update( + ctx, + results.TimeElapsedCounter, + big.NewInt(periodicLoggingSeconds), + ) + + status := results.ComputeCheckDataStatus( + ctx, + t.blockStorage, + t.counterStorage, + t.balanceStorage, + t.fetcher, + t.config.Network, + t.reconciler, + ) + t.logger.LogDataStatus(ctx, status) } } } +// ServeHTTP serves a CheckDataStatus response on all paths. +func (t *DataTester) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=UTF-8") + w.WriteHeader(http.StatusOK) + + status := results.ComputeCheckDataStatus( + r.Context(), + t.blockStorage, + t.counterStorage, + t.balanceStorage, + t.fetcher, + t.network, + t.reconciler, + ) + + if err := json.NewEncoder(w).Encode(status); err != nil { + msg := err.Error() + msg = fmt.Sprintf("%s%s", msg, metadata) + color.Red(msg) + http.Error(w, msg, http.StatusInternalServerError) + } +} + +// syncedStatus returns a boolean indicating if we are synced to tip and +// the last synced block. +func (t *DataTester) syncedStatus(ctx context.Context) (bool, int64, error) { + atTip, blockIdentifier, err := utils.CheckStorageTip( + ctx, + t.network, + t.config.TipDelay, + t.fetcher, + t.blockStorage, + ) + if err != nil { + err = fmt.Errorf("failed to check storage tip: %w%s", err, metadata) + color.Red(err.Error()) + return false, -1, err + } + + var blockIndex int64 = -1 + + if blockIdentifier != nil { + blockIndex = blockIdentifier.Index + } + + return atTip, blockIndex, nil +} + // EndAtTipLoop runs a loop that evaluates end condition EndAtTip func (t *DataTester) EndAtTipLoop( ctx context.Context, - minReconciliationCoverage float64, ) { tc := time.NewTicker(EndAtTipCheckInterval) defer tc.Stop() - firstTipIndex := int64(-1) - for { select { case <-ctx.Done(): return case <-tc.C: - atTip, blockIdentifier, err := t.blockStorage.AtTip(ctx, t.config.TipDelay) + atTip, blockIndex, err := t.syncedStatus(ctx) if err != nil { - log.Printf( - "%s: unable to evaluate if syncer is at tip", + color.Red( + "unable to evaluate if syncer is at tip: %s%s", err.Error(), + metadata, ) continue } - // If we fall behind tip, we must reset the firstTipIndex. - if !atTip { - firstTipIndex = int64(-1) - continue - } - - // If minReconciliationCoverage is less than 0, - // we should just stop at tip. - if minReconciliationCoverage < 0 { + if atTip { t.endCondition = configuration.TipEndCondition t.endConditionDetail = fmt.Sprintf( "Tip: %d", - blockIdentifier.Index, + blockIndex, ) + msg := fmt.Sprintf( + "%s%s", + t.endConditionDetail, + metadata, + ) + color.Cyan(msg) t.cancel() return } + } + } +} + +// EndReconciliationCoverage runs a loop that evaluates ReconciliationEndCondition +func (t *DataTester) EndReconciliationCoverage( // nolint:gocognit + ctx context.Context, + reconciliationCoverage *configuration.ReconciliationCoverage, +) { + tc := time.NewTicker(EndAtTipCheckInterval) + defer tc.Stop() + + firstTipIndex := int64(-1) + + for { + select { + case <-ctx.Done(): + return + + case <-tc.C: + atTip, blockIndex, err := t.syncedStatus(ctx) + if err != nil { + color.Red( + "unable to evaluate syncer height or if at tip: %s%s", + err.Error(), + metadata, + ) + continue + } + + // Check if we are at tip and set tip height if fromTip is true. + if reconciliationCoverage.Tip || reconciliationCoverage.FromTip { + // If we fall behind tip, we must reset the firstTipIndex. + var disableForceReconciliation bool + if !atTip { + disableForceReconciliation = true + firstTipIndex = int64(-1) + continue + } + + // forceInactiveReconciliation should NEVER be nil + // by this point but we check just to be sure. + if t.forceInactiveReconciliation != nil { + *t.forceInactiveReconciliation = !disableForceReconciliation + } + + // Once at tip, we want to consider + // coverage. It is not feasible that we could + // get high reconciliation coverage at the tip + // block, so we take the range from when first + // at tip to the current block. + if firstTipIndex < 0 { + firstTipIndex = blockIndex + } + } + + // minIndex is the greater of firstTipIndex + // and reconciliationCoverage.Index + minIndex := firstTipIndex + + // Check if at required minimum index + if reconciliationCoverage.Index != nil { + if *reconciliationCoverage.Index < blockIndex { + continue + } + + // Override the firstTipIndex if reconciliationCoverage.Index + // is greater + if *reconciliationCoverage.Index > minIndex { + minIndex = *reconciliationCoverage.Index + } + } + + // Check if all accounts reconciled at index (+1). If last index reconciled + // is less than the minimum allowed index but the QueueSize is 0, then + // we consider the reconciler to be caught up. + if t.reconciler.LastIndexReconciled() <= minIndex && t.reconciler.QueueSize() > 0 { + continue + } - // Once at tip, we want to consider - // coverage. It is not feasible that we could - // get high reconciliation coverage at the tip - // block, so we take the range from when first - // at tip to the current block. - if firstTipIndex < 0 { - firstTipIndex = blockIdentifier.Index + // Check if account count is above minimum index + if reconciliationCoverage.AccountCount != nil { + allAccounts, err := t.balanceStorage.GetAllAccountCurrency(ctx) + if err != nil { + color.Red( + "unable to get account count: %s%s", + err.Error(), + metadata, + ) + continue + } + + if int64(len(allAccounts)) < *reconciliationCoverage.AccountCount { + continue + } } - coverage, err := t.balanceStorage.ReconciliationCoverage(ctx, firstTipIndex) + coverageIndex := int64(0) + if reconciliationCoverage.FromTip { + coverageIndex = firstTipIndex + } + + coverage, err := t.balanceStorage.ReconciliationCoverage(ctx, coverageIndex) if err != nil { - log.Printf( - "%s: unable to get reconciliations coverage", + color.Red( + "unable to get reconciliation coverage: %s%s", err.Error(), + metadata, ) continue } - if coverage >= minReconciliationCoverage { + if coverage >= reconciliationCoverage.Coverage { t.endCondition = configuration.ReconciliationCoverageEndCondition t.endConditionDetail = fmt.Sprintf( - "Coverage: %f%%", + "Coverage: %f", coverage*utils.OneHundred, ) + msg := fmt.Sprintf( + "%s%s", + t.endConditionDetail, + metadata, + ) + color.Cyan(msg) t.cancel() return } + + color.Cyan(fmt.Sprintf( + "[END CONDITIONS] Waiting for reconciliation coverage after block %d (%f%%) to surpass requirement (%f%%)%s", + firstTipIndex, + coverage*utils.OneHundred, + reconciliationCoverage.Coverage*utils.OneHundred, + metadata, + )) } } } @@ -430,6 +847,12 @@ func (t *DataTester) EndDurationLoop( "Seconds: %d", int(duration.Seconds()), ) + msg := fmt.Sprintf( + "%s%s", + t.endConditionDetail, + metadata, + ) + color.Cyan(msg) t.cancel() return } @@ -447,7 +870,7 @@ func (t *DataTester) WatchEndConditions( if endConds.Tip != nil && *endConds.Tip { // runs a go routine that ends when reaching tip - go t.EndAtTipLoop(ctx, -1) + go t.EndAtTipLoop(ctx) } if endConds.Duration != nil && *endConds.Duration != 0 { @@ -456,20 +879,169 @@ func (t *DataTester) WatchEndConditions( } if endConds.ReconciliationCoverage != nil { - go t.EndAtTipLoop(ctx, *endConds.ReconciliationCoverage) + go t.EndReconciliationCoverage(ctx, endConds.ReconciliationCoverage) } return nil } +// CompleteReconciliations returns the sum of all failed, exempt, and successful +// reconciliations. +func (t *DataTester) CompleteReconciliations(ctx context.Context) (int64, error) { + activeReconciliations, err := t.counterStorage.Get(ctx, modules.ActiveReconciliationCounter) + if err != nil { + err = fmt.Errorf("failed to get active reconciliations counter: %w%s", err, metadata) + color.Red(err.Error()) + return -1, fmt.Errorf("failed to get active reconciliations counter: %w%s", err, metadata) + } + + exemptReconciliations, err := t.counterStorage.Get(ctx, modules.ExemptReconciliationCounter) + if err != nil { + err = fmt.Errorf("failed to get exempt reconciliations counter: %w%s", err, metadata) + color.Red(err.Error()) + return -1, err + } + + failedReconciliations, err := t.counterStorage.Get(ctx, modules.FailedReconciliationCounter) + if err != nil { + err = fmt.Errorf("failed to get failed reconciliations counter: %w%s", err, metadata) + color.Red(err.Error()) + return -1, err + } + + skippedReconciliations, err := t.counterStorage.Get(ctx, modules.SkippedReconciliationsCounter) + if err != nil { + err = fmt.Errorf("failed to get skipped reconciliations counter: %w%s", err, metadata) + color.Red(err.Error()) + return -1, err + } + + return activeReconciliations.Int64() + + exemptReconciliations.Int64() + + failedReconciliations.Int64() + + skippedReconciliations.Int64(), nil +} + +// WaitForEmptyQueue exits once the active reconciler +// queue is empty and all reconciler goroutines are idle. +func (t *DataTester) WaitForEmptyQueue( + ctx context.Context, +) error { + // To ensure we don't exit while a reconciliation is ongoing + // (i.e. when queue size is 0 but there are busy threads), + // we keep track of how many reconciliations we must complete + // and only exit when that many reconciliations have been performed. + startingComplete, err := t.CompleteReconciliations(ctx) + if err != nil { + err = fmt.Errorf("failed to complete reconciliations: %w%s", err, metadata) + color.Red(err.Error()) + return err + } + startingRemaining := t.reconciler.QueueSize() + + tc := time.NewTicker(EndAtTipCheckInterval) + defer tc.Stop() + + color.Cyan( + "[PROGRESS] remaining reconciliations: %d%s", + startingRemaining, + metadata, + ) + + for { + select { + case <-ctx.Done(): + return ctx.Err() + + case <-tc.C: + // We force cached counts to be written before + // determining if we should exit. + if err := t.reconcilerHandler.UpdateCounts(ctx); err != nil { + err = fmt.Errorf("failed to update count: %w%s", err, metadata) + color.Red(err.Error()) + return err + } + + nowComplete, err := t.CompleteReconciliations(ctx) + if err != nil { + err = fmt.Errorf("failed to complete reconciliations: %w%s", err, metadata) + color.Red(err.Error()) + return err + } + + completed := nowComplete - startingComplete + remaining := int64(startingRemaining) - completed + if remaining <= 0 { + t.cancel() + return nil + } + + color.Cyan( + "[PROGRESS] remaining reconciliations: %d%s", + remaining, + metadata, + ) + } + } +} + +// DrainReconcilerQueue returns once the reconciler queue has been drained +// or an error is encountered. +func (t *DataTester) DrainReconcilerQueue( + ctx context.Context, + sigListeners *[]context.CancelFunc, +) error { + color.Cyan("draining reconciler backlog (you can disable this in your configuration file)%s", metadata) + + // To cancel all execution, need to call multiple cancel functions. + ctx, cancel := context.WithCancel(ctx) + t.cancel = cancel + *sigListeners = append(*sigListeners, cancel) + + // Disable inactive lookups + t.reconciler.InactiveConcurrency = 0 + + g, ctx := errgroup.WithContext(ctx) + g.Go(func() error { + return t.StartReconciler(ctx) + }) + g.Go(func() error { + return t.WaitForEmptyQueue(ctx) + }) + + err := g.Wait() + + if *t.signalReceived { + return cliErrs.ErrReconcilerDrainHalt + } + + if errors.Is(err, context.Canceled) { + color.Cyan("drained reconciler backlog%s", metadata) + return nil + } + + return err +} + // HandleErr is called when `check:data` returns an error. // If historical balance lookups are enabled, HandleErr will attempt to // automatically find any missing balance-changing operations. -func (t *DataTester) HandleErr(ctx context.Context, err error, sigListeners []context.CancelFunc) { +func (t *DataTester) HandleErr(err error, sigListeners *[]context.CancelFunc) error { + // Initialize new context because calling context + // will no longer be usable when after termination. + ctx := context.Background() + if *t.signalReceived { - color.Red("Check halted") - os.Exit(1) - return + err = fmt.Errorf("%v: %w%s", err.Error(), cliErrs.ErrDataCheckHalt, metadata) + color.Red(err.Error()) + return results.ExitData( + t.config, + t.counterStorage, + t.balanceStorage, + err, + "", + "", + ) } if (err == nil || errors.Is(err, context.Canceled)) && @@ -480,15 +1052,47 @@ func (t *DataTester) HandleErr(ctx context.Context, err error, sigListeners []co "Index: %d", *t.config.Data.EndConditions.Index, ) + msg := fmt.Sprintf( + "%s%s", + t.endConditionDetail, + metadata, + ) + color.Cyan(msg) } + // End condition will only be populated if there is + // no error. if len(t.endCondition) != 0 { - ExitData( + // Wait for reconciliation queue to drain (only if end condition reached) + if shouldReconcile(t.config) && + t.reconciler.QueueSize() > 0 { + if t.config.Data.ReconciliationDrainDisabled { + color.Cyan( + "skipping reconciler backlog drain (you can enable this in your configuration file)%s", + metadata, + ) + } else { + drainErr := t.DrainReconcilerQueue(ctx, sigListeners) + if drainErr != nil { + err = fmt.Errorf("%w%s", drainErr, metadata) + color.Red(err.Error()) + return results.ExitData( + t.config, + t.counterStorage, + t.balanceStorage, + err, + "", + "", + ) + } + } + } + + return results.ExitData( t.config, t.counterStorage, t.balanceStorage, nil, - 0, t.endCondition, t.endConditionDetail, ) @@ -496,59 +1100,106 @@ func (t *DataTester) HandleErr(ctx context.Context, err error, sigListeners []co fmt.Printf("\n") if t.reconcilerHandler.InactiveFailure == nil { - ExitData(t.config, t.counterStorage, t.balanceStorage, err, 1, "", "") + err = fmt.Errorf("%w%s", err, metadata) + color.Red(err.Error()) + return results.ExitData( + t.config, + t.counterStorage, + t.balanceStorage, + err, + "", + "", + ) } - if t.config.Data.HistoricalBalanceDisabled { + if !t.historicalBalanceEnabled { color.Yellow( - "Can't find the block missing operations automatically, please enable --lookup-balance-by-block", + "Can't find the block missing operations automatically, please enable historical balance lookup%s", + metadata, + ) + err = fmt.Errorf("%w%s", err, metadata) + color.Red(err.Error()) + return results.ExitData( + t.config, + t.counterStorage, + t.balanceStorage, + err, + "", + "", ) - ExitData(t.config, t.counterStorage, t.balanceStorage, err, 1, "", "") } - if t.config.Data.InactiveDiscrepencySearchDisabled { - color.Yellow("Search for inactive reconciliation discrepency is disabled") - ExitData(t.config, t.counterStorage, t.balanceStorage, err, 1, "", "") + if t.config.Data.InactiveDiscrepancySearchDisabled { + color.Yellow("Search for inactive reconciliation discrepancy is disabled%s", metadata) + err = fmt.Errorf("%w%s", err, metadata) + color.Red(err.Error()) + return results.ExitData( + t.config, + t.counterStorage, + t.balanceStorage, + err, + "", + "", + ) } - t.FindMissingOps(ctx, err, sigListeners) + return t.FindMissingOps( + ctx, + err, + sigListeners, + ) } // FindMissingOps logs the types.BlockIdentifier of a block // that is missing balance-changing operations for a -// *reconciler.AccountCurrency. +// *types.AccountCurrency. func (t *DataTester) FindMissingOps( ctx context.Context, originalErr error, - sigListeners []context.CancelFunc, -) { - color.Cyan("Searching for block with missing operations...hold tight") + sigListeners *[]context.CancelFunc, +) error { + color.Cyan("Searching for block with missing operations...hold tight%s", metadata) badBlock, err := t.recursiveOpSearch( ctx, - &sigListeners, + sigListeners, t.reconcilerHandler.InactiveFailure, t.reconcilerHandler.InactiveFailureBlock.Index-InactiveFailureLookbackWindow, t.reconcilerHandler.InactiveFailureBlock.Index, ) if err != nil { - color.Yellow("%s: could not find block with missing ops", err.Error()) - ExitData(t.config, t.counterStorage, t.balanceStorage, originalErr, 1, "", "") + color.Yellow("could not find block with missing ops: %s%s", err.Error(), metadata) + return results.ExitData( + t.config, + t.counterStorage, + t.balanceStorage, + originalErr, + "", + "", + ) } color.Yellow( - "Missing ops for %s in block %d:%s", + "Missing ops for %s in block %d:%s%s", types.AccountString(t.reconcilerHandler.InactiveFailure.Account), badBlock.Index, badBlock.Hash, + metadata, ) - ExitData(t.config, t.counterStorage, t.balanceStorage, originalErr, 1, "", "") + return results.ExitData( + t.config, + t.counterStorage, + t.balanceStorage, + originalErr, + "", + "", + ) } func (t *DataTester) recursiveOpSearch( ctx context.Context, sigListeners *[]context.CancelFunc, - accountCurrency *reconciler.AccountCurrency, + accountCurrency *types.AccountCurrency, startIndex int64, endIndex int64, ) (*types.BlockIdentifier, error) { @@ -559,42 +1210,54 @@ func (t *DataTester) recursiveOpSearch( // Always use a temporary directory to find missing ops tmpDir, err := utils.CreateTempDir() if err != nil { - return nil, fmt.Errorf("%w: unable to create temporary directory", err) + err = fmt.Errorf("unable to create temporary directory: %w%s", err, metadata) + color.Red(err.Error()) + return nil, err } defer utils.RemoveTempDir(tmpDir) - opts := []storage.BadgerOption{} - if !t.config.DisableMemoryLimit { - opts = append(opts, storage.WithMemoryLimit()) - } - localStore, err := storage.NewBadgerStorage(ctx, tmpDir, opts...) + localStore, err := database.NewBadgerDatabase(ctx, tmpDir) if err != nil { - return nil, fmt.Errorf("%w: unable to initialize database", err) + err = fmt.Errorf("unable to initialize database: %w%s", err, metadata) + color.Red(err.Error()) + return nil, err } - counterStorage := storage.NewCounterStorage(localStore) - blockStorage := storage.NewBlockStorage(localStore) - balanceStorage := storage.NewBalanceStorage(localStore) + counterStorage := modules.NewCounterStorage(localStore) + blockStorage := modules.NewBlockStorage(localStore, t.config.SerialBlockWorkers) + balanceStorage := modules.NewBalanceStorage(localStore) - logger := logger.NewLogger( - counterStorage, - nil, + logger, err := logger.NewLogger( tmpDir, false, false, false, false, + logger.Data, + t.network, + t.logger.GetMetadataMap(), ) + if err != nil { + err = fmt.Errorf("unable to initialize logger with error: %w%s", err, metadata) + color.Red(err.Error()) + return nil, err + } + + t.forceInactiveReconciliation = types.Bool(false) reconcilerHelper := processor.NewReconcilerHelper( + t.config, t.network, t.fetcher, + localStore, blockStorage, balanceStorage, + t.forceInactiveReconciliation, ) reconcilerHandler := processor.NewReconcilerHandler( logger, + counterStorage, balanceStorage, true, // halt on reconciliation error ) @@ -602,6 +1265,7 @@ func (t *DataTester) recursiveOpSearch( r := reconciler.New( reconcilerHelper, reconcilerHandler, + t.parser, // When using concurrency > 1, we could start looking up balance changes // on multiple blocks at once. This can cause us to return the wrong block @@ -611,21 +1275,25 @@ func (t *DataTester) recursiveOpSearch( // Do not do any inactive lookups when looking for the block with missing // operations. reconciler.WithInactiveConcurrency(0), - reconciler.WithLookupBalanceByBlock(!t.config.Data.HistoricalBalanceDisabled), - reconciler.WithInterestingAccounts([]*reconciler.AccountCurrency{accountCurrency}), + reconciler.WithLookupBalanceByBlock(), + reconciler.WithInterestingAccounts([]*types.AccountCurrency{accountCurrency}), ) balanceStorageHelper := processor.NewBalanceStorageHelper( t.network, t.fetcher, - !t.config.Data.HistoricalBalanceDisabled, + counterStorage, + t.historicalBalanceEnabled, nil, false, + t.parser.BalanceExemptions, + false, // we will need to perform an initial balance fetch when finding issues ) balanceStorageHandler := processor.NewBalanceStorageHandler( logger, r, + counterStorage, true, accountCurrency, ) @@ -640,8 +1308,11 @@ func (t *DataTester) recursiveOpSearch( counterStorage, logger, cancel, - []storage.BlockWorker{balanceStorage}, - t.config.SyncConcurrency, + []modules.BlockWorker{balanceStorage}, + statefulsyncer.WithCacheSize(syncer.DefaultCacheSize), + statefulsyncer.WithMaxConcurrency(t.config.MaxSyncConcurrency), + statefulsyncer.WithPastBlockLimit(t.config.MaxReorgDepth), + statefulsyncer.WithSeenConcurrency(int64(t.config.SeenBlockWorkers)), ) g, ctx := errgroup.WithContext(ctx) @@ -662,14 +1333,20 @@ func (t *DataTester) recursiveOpSearch( // Close database before starting another search, otherwise we will // have n databases open when we find the offending block. if storageErr := localStore.Close(ctx); storageErr != nil { - return nil, fmt.Errorf("%w: unable to close database", storageErr) + err = fmt.Errorf("unable to close database: %w%s", storageErr, metadata) + color.Red(err.Error()) + return nil, err } if *t.signalReceived { - return nil, errors.New("Search for block with missing ops halted") + return nil, cliErrs.ErrMissingOps } - if err == nil || err == context.Canceled { + if err == nil || errors.Is(err, context.Canceled) { + if startIndex <= t.genesisBlock.Index { + return nil, cliErrs.ErrUnableToFindMissingOps + } + newStart := startIndex - InactiveFailureLookbackWindow if newStart < t.genesisBlock.Index { newStart = t.genesisBlock.Index @@ -677,18 +1354,22 @@ func (t *DataTester) recursiveOpSearch( newEnd := endIndex - InactiveFailureLookbackWindow if newEnd <= newStart { - return nil, fmt.Errorf( - "Next window to check has start index %d <= end index %d", + err = fmt.Errorf( + "next window to check has start index %d <= end index %d%s", newStart, newEnd, + metadata, ) + color.Red(err.Error()) + return nil, err } color.Cyan( - "Unable to find missing ops in block range %d-%d, now searching %d-%d", + "Unable to find missing ops in block range %d-%d, now searching %d-%d%s", startIndex, endIndex, newStart, newEnd, + metadata, ) return t.recursiveOpSearch( @@ -704,7 +1385,7 @@ func (t *DataTester) recursiveOpSearch( } if reconcilerHandler.ActiveFailureBlock == nil { - return nil, errors.New("unable to find missing ops") + return nil, cliErrs.ErrUnableToFindMissingOps } return reconcilerHandler.ActiveFailureBlock, nil diff --git a/pkg/tester/data_perf.go b/pkg/tester/data_perf.go new file mode 100644 index 00000000..0ce279ad --- /dev/null +++ b/pkg/tester/data_perf.go @@ -0,0 +1,96 @@ +// Copyright 2022 Coinbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tester + +import ( + "context" + "time" + + "github.com/coinbase/rosetta-cli/configuration" + cliErrs "github.com/coinbase/rosetta-cli/pkg/errors" + "github.com/coinbase/rosetta-cli/pkg/results" + "github.com/coinbase/rosetta-sdk-go/fetcher" + "github.com/coinbase/rosetta-sdk-go/types" +) + +func SetupBenchmarking(config *configuration.Configuration) (*fetcher.Fetcher, func() time.Duration, chan time.Duration) { + // Create a new fetcher + fetcher := fetcher.New( + config.OnlineURL, + fetcher.WithMaxRetries(0), + ) + timer := timerFactory() + elapsed := make(chan time.Duration, 1) + return fetcher, timer, elapsed +} + +// Benchmark the asset issuer's /block endpoint +func BmarkBlock(ctx context.Context, config *configuration.Configuration, fetcher *fetcher.Fetcher, timer func() time.Duration, elapsed chan time.Duration, rawStats *results.CheckPerfRawStats) error { + total_errors := 0 + go func() { + for m := config.Perf.StartBlock; m < config.Perf.EndBlock; m++ { + for n := 0; n < config.Perf.NumTimesToHitEndpoints; n++ { + partialBlockId := &types.PartialBlockIdentifier{ + Hash: nil, + Index: &m, + } + _, err := fetcher.Block(ctx, config.Network, partialBlockId) + if err != nil { + total_errors++ + } + } + } + elapsed <- timer() + }() + select { + case <-ctx.Done(): + return cliErrs.ErrBlockBenchmarkTimeout + case timeTaken := <-elapsed: + rawStats.BlockEndpointTotalTime = timeTaken + rawStats.BlockEndpointNumErrors = int64(total_errors) + return nil + } +} + +// Benchmark the asset issuers /account/balance endpoint +func BmarkAccountBalance(ctx context.Context, config *configuration.Configuration, fetcher *fetcher.Fetcher, timer func() time.Duration, elapsed chan time.Duration, rawStats *results.CheckPerfRawStats) error { + total_errors := 0 + go func() { + for m := config.Perf.StartBlock; m < config.Perf.EndBlock; m++ { + for n := 0; n < config.Perf.NumTimesToHitEndpoints; n++ { + account := &types.AccountIdentifier{ + Address: "address", + } + partialBlockId := &types.PartialBlockIdentifier{ + Hash: nil, + Index: &m, + } + _, _, _, err := fetcher.AccountBalance(ctx, config.Network, account, partialBlockId, nil) + if err != nil { + total_errors++ + } + } + } + elapsed <- timer() + }() + select { + case <-ctx.Done(): + return cliErrs.ErrAccountBalanceBenchmarkTimeout + case timeTaken := <-elapsed: + rawStats.AccountBalanceEndpointTotalTime = timeTaken + rawStats.AccountBalanceNumErrors = int64(total_errors) + return nil + } +} diff --git a/pkg/tester/data_results.go b/pkg/tester/data_results.go deleted file mode 100644 index 93696b0f..00000000 --- a/pkg/tester/data_results.go +++ /dev/null @@ -1,479 +0,0 @@ -// Copyright 2020 Coinbase, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import ( - "context" - "errors" - "fmt" - "log" - "os" - "strconv" - - "github.com/coinbase/rosetta-cli/configuration" - "github.com/coinbase/rosetta-cli/pkg/processor" - - "github.com/coinbase/rosetta-sdk-go/fetcher" - "github.com/coinbase/rosetta-sdk-go/storage" - "github.com/coinbase/rosetta-sdk-go/syncer" - "github.com/coinbase/rosetta-sdk-go/utils" - "github.com/fatih/color" - "github.com/olekukonko/tablewriter" -) - -// EndCondition contains the type of -// end condition and any detail associated -// with the stop. -type EndCondition struct { - Type configuration.CheckDataEndCondition `json:"type"` - Detail string `json:"detail"` -} - -// CheckDataResults contains any error that occurred -// on a check:data run, the outcome of certain tests, -// and a collection of interesting stats. -type CheckDataResults struct { - Error string `json:"error"` - EndCondition *EndCondition `json:"end_condition"` - Tests *CheckDataTests `json:"tests"` - Stats *CheckDataStats `json:"stats"` -} - -// Print logs CheckDataResults to the console. -func (c *CheckDataResults) Print() { - if len(c.Error) > 0 { - fmt.Printf("\n") - color.Red("Error: %s", c.Error) - } - - if c.EndCondition != nil { - fmt.Printf("\n") - color.Green("Success: %s [%s]", c.EndCondition.Type, c.EndCondition.Detail) - } - - fmt.Printf("\n") - c.Tests.Print() - fmt.Printf("\n") - if c.Stats != nil { - c.Stats.Print() - fmt.Printf("\n") - } -} - -// Output writes *CheckDataResults to the provided -// path. -func (c *CheckDataResults) Output(path string) { - if len(path) > 0 { - writeErr := utils.SerializeAndWrite(path, c) - if writeErr != nil { - log.Printf("%s: unable to save results\n", writeErr.Error()) - } - } -} - -// CheckDataStats contains interesting stats that -// are counted while running the check:data. -type CheckDataStats struct { - Blocks int64 `json:"blocks"` - Orphans int64 `json:"orphans"` - Transactions int64 `json:"transactions"` - Operations int64 `json:"operations"` - ActiveReconciliations int64 `json:"active_reconciliations"` - InactiveReconciliations int64 `json:"inactive_reconciliations"` - ReconciliationCoverage float64 `json:"reconciliation_coverage"` -} - -// Print logs CheckDataStats to the console. -func (c *CheckDataStats) Print() { - table := tablewriter.NewWriter(os.Stdout) - table.SetRowLine(true) - table.SetRowSeparator("-") - table.SetHeader([]string{"check:data Stats", "Description", "Value"}) - table.Append([]string{"Blocks", "# of blocks synced", strconv.FormatInt(c.Blocks, 10)}) - table.Append([]string{"Orphans", "# of blocks orphaned", strconv.FormatInt(c.Orphans, 10)}) - table.Append( - []string{ - "Transactions", - "# of transaction processed", - strconv.FormatInt(c.Transactions, 10), - }, - ) - table.Append( - []string{"Operations", "# of operations processed", strconv.FormatInt(c.Operations, 10)}, - ) - table.Append( - []string{ - "Active Reconciliations", - "# of reconciliations performed after seeing an account in a block", - strconv.FormatInt(c.ActiveReconciliations, 10), - }, - ) - table.Append( - []string{ - "Inactive Reconciliations", - "# of reconciliation performed on randomly selected accounts", - strconv.FormatInt(c.InactiveReconciliations, 10), - }, - ) - table.Append( - []string{ - "Reconciliation Coverage", - "% of accounts that have been reconciled", - fmt.Sprintf("%f%%", c.ReconciliationCoverage*utils.OneHundred), - }, - ) - - table.Render() -} - -// ComputeCheckDataStats returns a populated CheckDataStats. -func ComputeCheckDataStats( - ctx context.Context, - counters *storage.CounterStorage, - balances *storage.BalanceStorage, -) *CheckDataStats { - if counters == nil { - return nil - } - - blocks, err := counters.Get(ctx, storage.BlockCounter) - if err != nil { - log.Printf("%s: cannot get block counter", err.Error()) - return nil - } - - orphans, err := counters.Get(ctx, storage.OrphanCounter) - if err != nil { - log.Printf("%s: cannot get orphan counter", err.Error()) - return nil - } - - txs, err := counters.Get(ctx, storage.TransactionCounter) - if err != nil { - log.Printf("%s: cannot get transaction counter", err.Error()) - return nil - } - - ops, err := counters.Get(ctx, storage.OperationCounter) - if err != nil { - log.Printf("%s: cannot get operations counter", err.Error()) - return nil - } - - activeReconciliations, err := counters.Get(ctx, storage.ActiveReconciliationCounter) - if err != nil { - log.Printf("%s: cannot get active reconciliations counter", err.Error()) - return nil - } - - inactiveReconciliations, err := counters.Get(ctx, storage.InactiveReconciliationCounter) - if err != nil { - log.Printf("%s: cannot get inactive reconciliations counter", err.Error()) - return nil - } - - stats := &CheckDataStats{ - Blocks: blocks.Int64(), - Orphans: orphans.Int64(), - Transactions: txs.Int64(), - Operations: ops.Int64(), - ActiveReconciliations: activeReconciliations.Int64(), - InactiveReconciliations: inactiveReconciliations.Int64(), - } - - if balances != nil { - coverage, err := balances.ReconciliationCoverage(ctx, 0) - if err != nil { - log.Printf("%s: cannot get reconcile coverage", err.Error()) - return nil - } - - stats.ReconciliationCoverage = coverage - } - - return stats -} - -// CheckDataTests indicates which tests passed. -// If a test is nil, it did not apply to the run. -// -// TODO: add CoinTracking -type CheckDataTests struct { - RequestResponse bool `json:"request_response"` - ResponseAssertion bool `json:"response_assertion"` - BlockSyncing *bool `json:"block_syncing"` - BalanceTracking *bool `json:"balance_tracking"` - Reconciliation *bool `json:"reconciliation"` -} - -// convertBool converts a *bool -// to a test result. -func convertBool(v *bool) string { - if v == nil { - return "NOT TESTED" - } - - if *v { - return "PASSED" - } - - return "FAILED" -} - -// Print logs CheckDataTests to the console. -func (c *CheckDataTests) Print() { - table := tablewriter.NewWriter(os.Stdout) - table.SetRowLine(true) - table.SetRowSeparator("-") - table.SetHeader([]string{"check:data Tests", "Description", "Status"}) - table.Append( - []string{ - "Request/Response", - "Rosetta implementation serviced all requests", - convertBool(&c.RequestResponse), - }, - ) - table.Append( - []string{ - "Response Assertion", - "All responses are correctly formatted", - convertBool(&c.ResponseAssertion), - }, - ) - table.Append( - []string{ - "Block Syncing", - "Blocks are connected into a single canonical chain", - convertBool(c.BlockSyncing), - }, - ) - table.Append( - []string{ - "Balance Tracking", - "Account balances did not go negative", - convertBool(c.BalanceTracking), - }, - ) - table.Append( - []string{ - "Reconciliation", - "No balance discrepencies were found between computed and live balances", - convertBool(c.Reconciliation), - }, - ) - - table.Render() -} - -// RequestResponseTest returns a boolean -// indicating if all endpoints received -// a non-500 response. -func RequestResponseTest(err error) bool { - if errors.Is(err, fetcher.ErrExhaustedRetries) || errors.Is(err, fetcher.ErrRequestFailed) || - errors.Is(err, fetcher.ErrNoNetworks) || errors.Is(err, utils.ErrNetworkNotSupported) { - return false - } - - return true -} - -// ResponseAssertionTest returns a boolean -// indicating if all responses received from -// the server were correctly formatted. -func ResponseAssertionTest(err error) bool { - if errors.Is(err, fetcher.ErrAssertionFailed) { // nolint - return false - } - - return true -} - -// BlockSyncingTest returns a boolean -// indicating if it was possible to sync -// blocks. -func BlockSyncingTest(err error, blocksSynced bool) *bool { - relatedErrors := []error{ - syncer.ErrCannotRemoveGenesisBlock, - syncer.ErrOutOfOrder, - storage.ErrDuplicateKey, - storage.ErrDuplicateTransactionHash, - } - syncPass := true - for _, relatedError := range relatedErrors { - if errors.Is(err, relatedError) { - syncPass = false - break - } - } - - if !blocksSynced && syncPass { - return nil - } - - return &syncPass -} - -// BalanceTrackingTest returns a boolean -// indicating if any balances went negative -// while syncing. -func BalanceTrackingTest(cfg *configuration.Configuration, err error, operationsSeen bool) *bool { - relatedErrors := []error{ - storage.ErrNegativeBalance, - } - balancePass := true - for _, relatedError := range relatedErrors { - if errors.Is(err, relatedError) { - balancePass = false - break - } - } - - if (cfg.Data.BalanceTrackingDisabled || !operationsSeen) && balancePass { - return nil - } - - return &balancePass -} - -// ReconciliationTest returns a boolean -// if no reconciliation errors were received. -func ReconciliationTest( - cfg *configuration.Configuration, - err error, - reconciliationsPerformed bool, -) *bool { - relatedErrors := []error{ - processor.ErrReconciliationFailure, - } - reconciliationPass := true - for _, relatedError := range relatedErrors { - if errors.Is(err, relatedError) { - reconciliationPass = false - break - } - } - - if (cfg.Data.BalanceTrackingDisabled || cfg.Data.ReconciliationDisabled || cfg.Data.IgnoreReconciliationError || - !reconciliationsPerformed) && - reconciliationPass { - return nil - } - - return &reconciliationPass -} - -// ComputeCheckDataTests returns a populated CheckDataTests. -func ComputeCheckDataTests( - ctx context.Context, - cfg *configuration.Configuration, - err error, - counterStorage *storage.CounterStorage, -) *CheckDataTests { - operationsSeen := false - reconciliationsPerformed := false - blocksSynced := false - if counterStorage != nil { - blocks, err := counterStorage.Get(ctx, storage.BlockCounter) - if err == nil && blocks.Int64() > 0 { - blocksSynced = true - } - - ops, err := counterStorage.Get(ctx, storage.OperationCounter) - if err == nil && ops.Int64() > 0 { - operationsSeen = true - } - - activeReconciliations, err := counterStorage.Get(ctx, storage.ActiveReconciliationCounter) - if err == nil && activeReconciliations.Int64() > 0 { - reconciliationsPerformed = true - } - - inactiveReconciliations, err := counterStorage.Get( - ctx, - storage.InactiveReconciliationCounter, - ) - if err == nil && inactiveReconciliations.Int64() > 0 { - reconciliationsPerformed = true - } - } - - return &CheckDataTests{ - RequestResponse: RequestResponseTest(err), - ResponseAssertion: ResponseAssertionTest(err), - BlockSyncing: BlockSyncingTest(err, blocksSynced), - BalanceTracking: BalanceTrackingTest(cfg, err, operationsSeen), - Reconciliation: ReconciliationTest(cfg, err, reconciliationsPerformed), - } -} - -// ComputeCheckDataResults returns a populated CheckDataResults. -func ComputeCheckDataResults( - cfg *configuration.Configuration, - err error, - counterStorage *storage.CounterStorage, - balanceStorage *storage.BalanceStorage, - endCondition configuration.CheckDataEndCondition, - endConditionDetail string, -) *CheckDataResults { - ctx := context.Background() - tests := ComputeCheckDataTests(ctx, cfg, err, counterStorage) - stats := ComputeCheckDataStats(ctx, counterStorage, balanceStorage) - results := &CheckDataResults{ - Tests: tests, - Stats: stats, - } - - if err != nil { - results.Error = err.Error() - - // We never want to populate an end condition - // if there was an error! - return results - } - - if len(endCondition) > 0 { - results.EndCondition = &EndCondition{ - Type: endCondition, - Detail: endConditionDetail, - } - } - - return results -} - -// ExitData exits check:data, logs the test results to the console, -// and to a provided output path. -func ExitData( - config *configuration.Configuration, - counterStorage *storage.CounterStorage, - balanceStorage *storage.BalanceStorage, - err error, - status int, - endCondition configuration.CheckDataEndCondition, - endConditionDetail string, -) { - results := ComputeCheckDataResults( - config, - err, - counterStorage, - balanceStorage, - endCondition, - endConditionDetail, - ) - results.Print() - results.Output(config.Data.ResultsOutputFile) - - os.Exit(status) -} diff --git a/pkg/tester/general.go b/pkg/tester/general.go new file mode 100644 index 00000000..0edc1a3f --- /dev/null +++ b/pkg/tester/general.go @@ -0,0 +1,84 @@ +// Copyright 2020 Coinbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tester + +import ( + "context" + "fmt" + "log" + "net/http" + "time" + + "github.com/coinbase/rosetta-cli/pkg/logger" +) + +const ( + // MemoryLoggingFrequency is the frequency that memory + // usage stats are logged to the terminal. + MemoryLoggingFrequency = 10 * time.Second + + // ReadHeaderTimeout is the header timeout for server + ReadHeaderTimeout = 5 * time.Second +) + +// LogMemoryLoop runs a loop that logs memory usage. +func LogMemoryLoop( + ctx context.Context, +) error { + ticker := time.NewTicker(MemoryLoggingFrequency) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + logger.LogMemoryStats(ctx) + return ctx.Err() + case <-ticker.C: + logger.LogMemoryStats(ctx) + } + } +} + +// StartServer stats a server at a port with a particular handler. +// This is often used to support a status endpoint for a particular test. +func StartServer( + ctx context.Context, + name string, + handler http.Handler, + port uint, +) error { + server := &http.Server{ + Addr: fmt.Sprintf(":%d", port), + Handler: handler, + ReadHeaderTimeout: ReadHeaderTimeout, + } + + go func() { + log.Printf("%s server running on port %d\n", name, port) + _ = server.ListenAndServe() + }() + + go func() { + // If we don't shutdown server, it will + // never stop because server.ListenAndServe doesn't + // take any context. + <-ctx.Done() + log.Printf("%s server shutting down", name) + + _ = server.Shutdown(ctx) + }() + + return ctx.Err() +} diff --git a/scripts/compile.sh b/scripts/compile.sh index 77fc9255..764b9fbb 100755 --- a/scripts/compile.sh +++ b/scripts/compile.sh @@ -16,12 +16,22 @@ VERSION=$1; -xgo --targets=darwin/*,windows/*,linux/* -out "bin/rosetta-cli-${VERSION}" .; +go install github.com/crazy-max/xgo@latest + +MAC_TARGETS="darwin/amd64,darwin/arm64" +LINUX_TARGETS="linux/amd64,linux/arm64,linux/mips64,linux/mips64le,linux/ppc64le,linux/s390x" +WINDOWS_TARGET="windows/amd64" +TARGETS="${MAC_TARGETS},${LINUX_TARGETS},${WINDOWS_TARGET}" + +xgo -go 1.16.3 --targets=${TARGETS} -out "bin/rosetta-cli-${VERSION}" .; # Rename some files -mv "bin/rosetta-cli-${VERSION}-darwin-10.6-amd64" "bin/rosetta-cli-${VERSION}-darwin-amd64" +mv "bin/rosetta-cli-${VERSION}-darwin-10.16-amd64" "bin/rosetta-cli-${VERSION}-darwin-amd64" +mv "bin/rosetta-cli-${VERSION}-darwin-10.16-arm64" "bin/rosetta-cli-${VERSION}-darwin-arm64" mv "bin/rosetta-cli-${VERSION}-windows-4.0-amd64.exe" "bin/rosetta-cli-${VERSION}-windows-amd64" # Tar all files cd bin || exit; for i in *; do tar -czf "$i.tar.gz" "$i" && rm "$i"; done + +go mod tidy diff --git a/scripts/install.sh b/scripts/install.sh index 86ccad9b..669b8460 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -67,6 +67,7 @@ execute() { get_binaries() { case "$PLATFORM" in darwin/amd64) BINARY="rosetta-cli" ;; + darwin/arm64) BINARY="rosetta-cli" ;; linux/amd64) BINARY="rosetta-cli" ;; linux/arm64) BINARY="rosetta-cli" ;; linux/mips64) BINARY="rosetta-cli" ;;