diff --git a/.github/ISSUE_TEMPLATE/bug.md b/.github/ISSUE_TEMPLATE/bug.md
index 444463e9ded..bf3b5bf2c0a 100644
--- a/.github/ISSUE_TEMPLATE/bug.md
+++ b/.github/ISSUE_TEMPLATE/bug.md
@@ -16,9 +16,9 @@ Commit hash:
Erigon Command (with flags/config):
-Concensus Layer:
+Consensus Layer:
-Concensus Layer Command (with flags/config):
+Consensus Layer Command (with flags/config):
Chain/Network:
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 2470c3099f6..82267c6fc31 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -4,12 +4,12 @@ on:
branches:
- devel
- alpha
- - stable
+ - 'release/**'
pull_request:
branches:
- devel
- alpha
- - stable
+ - 'release/**'
types:
- opened
- reopened
@@ -25,14 +25,14 @@ jobs:
if: ${{ github.event_name == 'push' || !github.event.pull_request.draft }}
strategy:
matrix:
- os: [ ubuntu-20.04, macos-11 ] # list of os: https://github.com/actions/virtual-environments
+ os: [ ubuntu-22.04, macos-13-xlarge ] # list of os: https://github.com/actions/virtual-environments
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
- go-version: '1.19'
+ go-version: '1.20'
- name: Install dependencies on Linux
if: runner.os == 'Linux'
run: sudo apt update && sudo apt install build-essential
@@ -50,11 +50,21 @@ jobs:
echo >&2 "Reproducible build broken"; cat erigon1.sha256; cat erigon2.sha256; exit 1
fi
- - name: Lint
+ - name: Install golangci-lint
if: runner.os == 'Linux'
uses: golangci/golangci-lint-action@v3
with:
- version: v1.52
+ version: v1.55.2
+ skip-build-cache: true
+ args: --help
+
+ - name: Install go-licenses
+ if: runner.os == 'Linux'
+ run: cd erigon-lib && make lint-licenses-deps
+
+ - name: Lint
+ if: runner.os == 'Linux'
+ run: make lint
- name: Test
run: make test
@@ -67,10 +77,14 @@ jobs:
runs-on: ${{ matrix.os }}
steps:
+ - name: configure Pagefile
+ uses: al-cheb/configure-pagefile-action@v1.3
+ with:
+ minimum-size: 8GB
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
- go-version: '1.19'
+ go-version: '1.20'
- uses: actions/cache@v3
with:
@@ -80,8 +94,8 @@ jobs:
key: chocolatey-${{ matrix.os }}
- name: Install dependencies
run: |
- choco upgrade mingw -y --no-progress --version 11.2.0.07112021
- choco install cmake -y --no-progress --version 3.23.1
+ choco upgrade mingw -y --no-progress --version 13.2.0
+ choco install cmake -y --no-progress --version 3.27.8
- name: Build
run: .\wmake.ps1 all
@@ -89,10 +103,13 @@ jobs:
- name: Test
run: .\wmake.ps1 test
+ - name: Test erigon-lib
+ run: cd erigon-lib && make test-no-fuzz
+
docker-build-check:
# don't run this on devel - the PR must have run it to be merged and it misleads that this pushes the docker image
if: (${{ github.event_name == 'push' || !github.event.pull_request.draft }}) && ${{ github.ref != 'refs/heads/devel' }}
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
- uses: AutoModality/action-clean@v1
- uses: actions/checkout@v3
@@ -108,7 +125,7 @@ jobs:
# automated-tests:
# runs-on:
-# ubuntu-20.04
+# ubuntu-22.04
# if: ${{ github.event_name == 'push' || !github.event.pull_request.draft }}
# steps:
# - uses: actions/checkout@v3
diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml
index 474d71e6126..3982becbc53 100644
--- a/.github/workflows/coverage.yml
+++ b/.github/workflows/coverage.yml
@@ -12,7 +12,7 @@ jobs:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
- go-version: '1.19'
+ go-version: '1.20'
- name: install dependencies on Linux
if: runner.os == 'Linux'
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index ece2667dcae..e3e0d0e2499 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -19,6 +19,13 @@ jobs:
goreleaser:
runs-on: ubuntu-latest
steps:
+ - name: Maximize build space
+ uses: AdityaGarg8/remove-unwanted-software@v1
+ with:
+ remove-dotnet: 'true'
+ remove-android: 'true'
+ remove-haskell: 'true'
+
- name: Checkout
uses: actions/checkout@v3
with:
diff --git a/.github/workflows/stale-issues.yml b/.github/workflows/stale-issues.yml
deleted file mode 100644
index 98ae6d4716c..00000000000
--- a/.github/workflows/stale-issues.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-name: 'Close stale issues and PRs'
-on:
- schedule:
- - cron: '30 1 * * *'
-
-permissions:
- issues: write
- pull-requests: write
-
-jobs:
- stale:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/stale@v7
- with: # core team are exempt
- exempt-issue-assignees: 'AskAlexSharov,realLedgerwatch,AndreaLanfranchi,yperbasis,vorot93,b00ris,JekaMas,mandrigin,Giulio2002,tjayrush,revitteth,hexoscott,nanevardanyan'
- exempt-pr-assignees: 'AskAlexSharov,realLedgerwatch,AndreaLanfranchi,yperbasis,vorot93,b00ris,JekaMas,mandrigin,Giulio2002,tjayrush,revitteth,hexoscott,nanevardanyan'
- stale-issue-message: 'This issue is stale because it has been open for 40 days with no activity. Remove stale label or comment, or this will be closed in 7 days.'
- stale-pr-message: 'This PR is stale because it has been open for 40 days with no activity.'
- close-issue-message: 'This issue was closed because it has been stalled for 7 days with no activity.'
- days-before-stale: 40
- days-before-close: 7
- days-before-pr-close: -1 # don't close PRs
diff --git a/.github/workflows/test-integration-caplin.yml b/.github/workflows/test-integration-caplin.yml
new file mode 100644
index 00000000000..ca687f6df70
--- /dev/null
+++ b/.github/workflows/test-integration-caplin.yml
@@ -0,0 +1,63 @@
+name: Consensus specification tests
+on:
+ push:
+ branches:
+ - devel
+ - alpha
+ - 'release/**'
+ pull_request:
+ branches:
+ - devel
+ - alpha
+ - 'release/**'
+ types:
+ - opened
+ - reopened
+ - synchronize
+ - ready_for_review
+
+jobs:
+ tests:
+ strategy:
+ matrix:
+# disable macos-11 until https://github.com/ledgerwatch/erigon/issues/8789
+ os: [ ubuntu-22.04 ] # list of os: https://github.com/actions/virtual-environments
+ runs-on: ${{ matrix.os }}
+
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-go@v4
+ with:
+ go-version: '1.20'
+ - name: Install dependencies on Linux
+ if: runner.os == 'Linux'
+ run: sudo apt update && sudo apt install build-essential
+
+ - name: test-integration-caplin
+ run: cd cl/spectest && make tests && make mainnet
+
+ tests-windows:
+ strategy:
+ matrix:
+ os: [ windows-2022 ]
+ runs-on: ${{ matrix.os }}
+
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-go@v4
+ with:
+ go-version: '1.20'
+
+ - uses: actions/cache@v3
+ with:
+ path: |
+ C:\ProgramData\chocolatey\lib\mingw
+ C:\ProgramData\chocolatey\lib\cmake
+ key: chocolatey-${{ matrix.os }}
+ - name: Install dependencies
+ run: |
+ choco upgrade mingw -y --no-progress --version 13.2.0
+ choco install cmake -y --no-progress --version 3.27.8
+
+ - name: test-integration-caplin
+ run: cd ./cl/spectest/ && .\wmake.ps1 Tests Mainnet
diff --git a/.github/workflows/test-integration.yml b/.github/workflows/test-integration.yml
index e2afd1a3d76..22e68e3fa0f 100644
--- a/.github/workflows/test-integration.yml
+++ b/.github/workflows/test-integration.yml
@@ -4,7 +4,7 @@ on:
branches:
- devel
- alpha
- - stable
+ - 'release/**'
schedule:
- cron: '20 16 * * *' # daily at 16:20 UTC
workflow_dispatch:
@@ -13,7 +13,7 @@ jobs:
tests:
strategy:
matrix:
- os: [ ubuntu-20.04, macos-11 ] # list of os: https://github.com/actions/virtual-environments
+ os: [ ubuntu-22.04, macos-13-xlarge ] # list of os: https://github.com/actions/virtual-environments
runs-on: ${{ matrix.os }}
steps:
@@ -21,7 +21,7 @@ jobs:
- run: git submodule update --init --recursive --force
- uses: actions/setup-go@v4
with:
- go-version: '1.19'
+ go-version: '1.20'
- name: Install dependencies on Linux
if: runner.os == 'Linux'
run: sudo apt update && sudo apt install build-essential
@@ -29,8 +29,13 @@ jobs:
- name: test-integration
run: make test-integration
- - name: history-v3-test-integration
- run: make test3-integration
+ - name: Test erigon as a library
+ env:
+ GIT_COMMIT: ${{ github.event.pull_request.head.sha || github.sha }}
+ run: make test-erigon-ext GIT_COMMIT=$GIT_COMMIT
+
+ # name: history-v3-test-integration
+ # run: make test3-integration
tests-windows:
strategy:
@@ -43,7 +48,7 @@ jobs:
- run: git submodule update --init --recursive --force
- uses: actions/setup-go@v4
with:
- go-version: '1.19'
+ go-version: '1.20'
- uses: actions/cache@v3
with:
@@ -53,8 +58,8 @@ jobs:
key: chocolatey-${{ matrix.os }}
- name: Install dependencies
run: |
- choco upgrade mingw -y --no-progress --version 11.2.0.07112021
- choco install cmake -y --no-progress --version 3.23.1
+ choco upgrade mingw -y --no-progress --version 13.2.0
+ choco install cmake -y --no-progress --version 3.27.8
- name: test-integration
run: .\wmake.ps1 test-integration
diff --git a/.gitignore b/.gitignore
index 5df094e3162..3ce4eeca701 100644
--- a/.gitignore
+++ b/.gitignore
@@ -47,6 +47,7 @@ profile.cov
# VS Code
.vscode
+*.code-workspace
# dashboard
/dashboard/assets/flow-typed
@@ -70,9 +71,7 @@ __pycache__
libmdbx/build/*
tests/testdata/*
-go.work
-go.work.sum
-erigon-lib/
+go.work*
/goerli
@@ -82,7 +81,20 @@ coverage.out
dist
__debug_bin
+*.exe
*.log
-caplin-recordings
\ No newline at end of file
+caplin-recordings
+
+jwt.hex
+
+.tool-versions
+
+*__debug_bin*
+yarn.lock
+node_modules
+
+/config.toml
+/config.yaml
+/config.yml
diff --git a/.golangci.yml b/.golangci.yml
index e32d7f45c09..ea4a442c1de 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,5 +1,9 @@
run:
deadline: 10m
+ build-tags:
+ - nosqlite
+ - noboltdb
+ - integration
linters:
presets:
@@ -8,6 +12,7 @@ linters:
- unused
- performance
disable:
+ - gosec # enable it after 1-2 weeks. it failing CI withoutt enough info to fix: https://github.com/ledgerwatch/erigon/actions/runs/5928644129/job/16074701625
- exhaustive
- musttag
- contextcheck
@@ -20,7 +25,10 @@ linters:
- errorlint #TODO: enable me
- errchkjson #TODO: enable me
- unused #TODO: enable me
+ - testifylint #TODO: enable me
+ - perfsprint #TODO: enable me
- gocheckcompilerdirectives
+ - protogetter
enable:
- unconvert
# - predeclared #TODO: enable me
@@ -119,6 +127,7 @@ issues:
- unused
- deadcode
- gocritic
+ - perfsprint
- path: hack\.go
linters:
- gosec
diff --git a/.goreleaser.yml b/.goreleaser.yml
index a6cda699d66..47f44d303a1 100644
--- a/.goreleaser.yml
+++ b/.goreleaser.yml
@@ -14,7 +14,7 @@ builds:
env:
- CC=o64-clang
- CXX=o64-clang++
- tags: [ nosqlite, noboltdb ]
+ tags: [ nosqlite, noboltdb, nosilkworm ]
ldflags: -s -w
- id: darwin-arm64
@@ -25,7 +25,7 @@ builds:
env:
- CC=oa64-clang
- CXX=oa64-clang++
- tags: [ nosqlite, noboltdb ]
+ tags: [ nosqlite, noboltdb, nosilkworm ]
ldflags: -s -w
- id: linux-amd64
@@ -36,7 +36,7 @@ builds:
env:
- CC=x86_64-linux-gnu-gcc
- CXX=x86_64-linux-gnu-g++
- tags: [ nosqlite, noboltdb ]
+ tags: [ nosqlite, noboltdb, nosilkworm ]
ldflags: -s -w -extldflags "-static" # We need to build a static binary because we are building in a glibc based system and running in a musl container
- id: linux-arm64
@@ -47,7 +47,7 @@ builds:
env:
- CC=aarch64-linux-gnu-gcc
- CXX=aarch64-linux-gnu-g++
- tags: [ nosqlite, noboltdb ]
+ tags: [ nosqlite, noboltdb, nosilkworm ]
ldflags: -s -w -extldflags "-static" # We need to build a static binary because we are building in a glibc based system and running in a musl container
- id: windows-amd64
@@ -58,7 +58,7 @@ builds:
env:
- CC=x86_64-w64-mingw32-gcc
- CXX=x86_64-w64-mingw32-g++
- tags: [ nosqlite, noboltdb ]
+ tags: [ nosqlite, noboltdb, nosilkworm ]
ldflags: -s -w
diff --git a/Dockerfile b/Dockerfile
index 8c5ffc9fe96..e9850144a85 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -6,6 +6,8 @@ RUN apk --no-cache add build-base linux-headers git bash ca-certificates libstdc
WORKDIR /app
ADD go.mod go.mod
ADD go.sum go.sum
+ADD erigon-lib/go.mod erigon-lib/go.mod
+ADD erigon-lib/go.sum erigon-lib/go.sum
RUN go mod download
ADD . .
@@ -13,7 +15,7 @@ ADD . .
RUN --mount=type=cache,target=/root/.cache \
--mount=type=cache,target=/tmp/go-build \
--mount=type=cache,target=/go/pkg/mod \
- make all
+ make BUILD_TAGS=nosqlite,noboltdb,nosilkworm all
FROM docker.io/library/golang:1.20-alpine3.17 AS tools-builder
@@ -24,6 +26,8 @@ ADD Makefile Makefile
ADD tools.go tools.go
ADD go.mod go.mod
ADD go.sum go.sum
+ADD erigon-lib/go.mod erigon-lib/go.mod
+ADD erigon-lib/go.sum erigon-lib/go.sum
RUN mkdir -p /app/build/bin
@@ -74,7 +78,7 @@ COPY --from=builder /app/build/bin/sentry /usr/local/bin/sentry
COPY --from=builder /app/build/bin/state /usr/local/bin/state
COPY --from=builder /app/build/bin/txpool /usr/local/bin/txpool
COPY --from=builder /app/build/bin/verkle /usr/local/bin/verkle
-COPY --from=builder /app/build/bin/caplin-phase1 /usr/local/bin/caplin-phase1
+COPY --from=builder /app/build/bin/caplin /usr/local/bin/caplin
COPY --from=builder /app/build/bin/caplin-regression /usr/local/bin/caplin-regression
diff --git a/Dockerfile.debian b/Dockerfile.debian
index 5d7d4e54968..e3e8c9d5d89 100644
--- a/Dockerfile.debian
+++ b/Dockerfile.debian
@@ -1,5 +1,5 @@
# syntax = docker/dockerfile:1.2
-FROM docker.io/library/golang:1.19-bullseye AS builder
+FROM docker.io/library/golang:1.20-bullseye AS builder
RUN apt update
RUN apt install -y build-essential git bash ca-certificates libstdc++6
@@ -17,7 +17,7 @@ RUN --mount=type=cache,target=/root/.cache \
make all
-FROM docker.io/library/golang:1.19-alpine3.16 AS tools-builder
+FROM docker.io/library/golang:1.20-alpine3.17 AS tools-builder
RUN apk --no-cache add build-base linux-headers git bash ca-certificates libstdc++
WORKDIR /app
@@ -74,7 +74,7 @@ COPY --from=builder /app/build/bin/sentry /usr/local/bin/sentry
COPY --from=builder /app/build/bin/state /usr/local/bin/state
COPY --from=builder /app/build/bin/txpool /usr/local/bin/txpool
COPY --from=builder /app/build/bin/verkle /usr/local/bin/verkle
-COPY --from=builder /app/build/bin/caplin-phase1 /usr/local/bin/caplin-phase1
+COPY --from=builder /app/build/bin/caplin /usr/local/bin/caplin
COPY --from=builder /app/build/bin/caplin-regression /usr/local/bin/caplin-regression
EXPOSE 8545 \
diff --git a/Makefile b/Makefile
index d9746ce0d73..8890d6c1dc7 100644
--- a/Makefile
+++ b/Makefile
@@ -1,4 +1,4 @@
-GO = go # if using docker, should not need to be installed/linked
+GO ?= go # if using docker, should not need to be installed/linked
GOBIN = $(CURDIR)/build/bin
UNAME = $(shell uname) # Supported: Darwin, Linux
DOCKER := $(shell command -v docker 2> /dev/null)
@@ -24,25 +24,37 @@ CGO_CFLAGS += -DMDBX_FORCE_ASSERTIONS=0 # Enable MDBX's asserts by default in 'd
#CGO_CFLAGS += -DMDBX_ENV_CHECKPID=0 # Erigon doesn't do fork() syscall
CGO_CFLAGS += -O
CGO_CFLAGS += -D__BLST_PORTABLE__
-CGO_CFLAGS += -Wno-error=strict-prototypes # for Clang15, remove it when can https://github.com/ledgerwatch/erigon/issues/6113#issuecomment-1359526277
+CGO_CFLAGS += -Wno-unknown-warning-option -Wno-enum-int-mismatch -Wno-strict-prototypes -Wno-unused-but-set-variable
+
+CGO_LDFLAGS := $(shell $(GO) env CGO_LDFLAGS 2> /dev/null)
+ifeq ($(shell uname -s), Darwin)
+ ifeq ($(filter-out 13.%,$(shell sw_vers --productVersion)),)
+ CGO_LDFLAGS += -mmacosx-version-min=13.3
+ endif
+endif
# about netgo see: https://github.com/golang/go/issues/30310#issuecomment-471669125 and https://github.com/golang/go/issues/57757
BUILD_TAGS = nosqlite,noboltdb
+
+ifneq ($(shell "$(CURDIR)/turbo/silkworm/silkworm_compat_check.sh"),)
+ BUILD_TAGS := $(BUILD_TAGS),nosilkworm
+endif
+
PACKAGE = github.com/ledgerwatch/erigon
GO_FLAGS += -trimpath -tags $(BUILD_TAGS) -buildvcs=false
GO_FLAGS += -ldflags "-X ${PACKAGE}/params.GitCommit=${GIT_COMMIT} -X ${PACKAGE}/params.GitBranch=${GIT_BRANCH} -X ${PACKAGE}/params.GitTag=${GIT_TAG}"
-GOBUILD = CGO_CFLAGS="$(CGO_CFLAGS)" $(GO) build $(GO_FLAGS)
-GO_DBG_BUILD = CGO_CFLAGS="$(CGO_CFLAGS) -DMDBX_DEBUG=1" $(GO) build -tags $(BUILD_TAGS),debug -gcflags=all="-N -l" # see delve docs
-GOTEST = CGO_CFLAGS="$(CGO_CFLAGS)" GODEBUG=cgocheck=0 $(GO) test $(GO_FLAGS) ./... -p 2
+GOBUILD = CGO_CFLAGS="$(CGO_CFLAGS)" CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GO) build $(GO_FLAGS)
+GO_DBG_BUILD = CGO_CFLAGS="$(CGO_CFLAGS) -DMDBX_DEBUG=1" CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GO) build -tags $(BUILD_TAGS),debug -gcflags=all="-N -l" # see delve docs
+GOTEST = CGO_CFLAGS="$(CGO_CFLAGS)" CGO_LDFLAGS="$(CGO_LDFLAGS)" GODEBUG=cgocheck=0 $(GO) test $(GO_FLAGS) ./... -p 2
default: all
## go-version: print and verify go version
go-version:
- @if [ $(shell $(GO) version | cut -c 16-17) -lt 19 ]; then \
- echo "minimum required Golang version is 1.19"; \
+ @if [ $(shell $(GO) version | cut -c 16-17) -lt 20 ]; then \
+ echo "minimum required Golang version is 1.20"; \
exit 1 ;\
fi
@@ -105,7 +117,7 @@ erigon: go-version erigon.cmd
@rm -f $(GOBIN)/tg # Remove old binary to prevent confusion where users still use it because of the scripts
COMMANDS += devnet
-COMMANDS += erigon-el-mock
+COMMANDS += capcli
COMMANDS += downloader
COMMANDS += hack
COMMANDS += integration
@@ -119,8 +131,10 @@ COMMANDS += txpool
COMMANDS += verkle
COMMANDS += evm
COMMANDS += sentinel
-COMMANDS += caplin-phase1
+COMMANDS += caplin
COMMANDS += caplin-regression
+COMMANDS += tooling
+
# build each command using %.cmd rule
@@ -134,39 +148,46 @@ db-tools:
@echo "Building db-tools"
go mod vendor
- cd vendor/github.com/torquem-ch/mdbx-go && MDBX_BUILD_TIMESTAMP=unknown make tools
+ cd vendor/github.com/erigontech/mdbx-go && MDBX_BUILD_TIMESTAMP=unknown make tools
mkdir -p $(GOBIN)
- cd vendor/github.com/torquem-ch/mdbx-go/mdbxdist && cp mdbx_chk $(GOBIN) && cp mdbx_copy $(GOBIN) && cp mdbx_dump $(GOBIN) && cp mdbx_drop $(GOBIN) && cp mdbx_load $(GOBIN) && cp mdbx_stat $(GOBIN)
+ cd vendor/github.com/erigontech/mdbx-go/mdbxdist && cp mdbx_chk $(GOBIN) && cp mdbx_copy $(GOBIN) && cp mdbx_dump $(GOBIN) && cp mdbx_drop $(GOBIN) && cp mdbx_load $(GOBIN) && cp mdbx_stat $(GOBIN)
rm -rf vendor
@echo "Run \"$(GOBIN)/mdbx_stat -h\" to get info about mdbx db file."
+test-erigon-lib:
+ @cd erigon-lib && $(MAKE) test
+
+test-erigon-ext:
+ @cd tests/erigon-ext-test && ./test.sh $(GIT_COMMIT)
+
## test: run unit tests with a 100s timeout
-test:
- $(GOTEST) --timeout 100s
+test: test-erigon-lib
+ $(GOTEST) --timeout 10m
-test3:
- $(GOTEST) --timeout 100s -tags $(BUILD_TAGS),e3
+test3: test-erigon-lib
+ $(GOTEST) --timeout 10m -tags $(BUILD_TAGS),e3
## test-integration: run integration tests with a 30m timeout
-test-integration:
- $(GOTEST) --timeout 30m -tags $(BUILD_TAGS),integration
+test-integration: test-erigon-lib
+ $(GOTEST) --timeout 240m -tags $(BUILD_TAGS),integration
-test3-integration:
- $(GOTEST) --timeout 30m -tags $(BUILD_TAGS),integration,e3
+test3-integration: test-erigon-lib
+ $(GOTEST) --timeout 240m -tags $(BUILD_TAGS),integration,e3
-## lint: run golangci-lint with .golangci.yml config file
-lint:
- @./build/bin/golangci-lint run --config ./.golangci.yml
+## lint-deps: install lint dependencies
+lint-deps:
+ @cd erigon-lib && $(MAKE) lint-deps
-## lintci: run golangci-lint (additionally outputs message before run)
+## lintci: run golangci-lint linters
lintci:
- @echo "--> Running linter for code"
- @./build/bin/golangci-lint run --config ./.golangci.yml
+ @cd erigon-lib && $(MAKE) lintci
+ @./erigon-lib/tools/golangci_lint.sh
-## lintci-deps: (re)installs golangci-lint to build/bin/golangci-lint
-lintci-deps:
- rm -f ./build/bin/golangci-lint
- curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./build/bin v1.53.3
+## lint: run all linters
+lint:
+ @cd erigon-lib && $(MAKE) lint
+ @./erigon-lib/tools/golangci_lint.sh
+ @./erigon-lib/tools/mod_tidy_check.sh
## clean: cleans the go cache, build dir, libmdbx db dir
clean:
@@ -213,8 +234,18 @@ git-submodules:
@git submodule sync --quiet --recursive || true
@git submodule update --quiet --init --recursive --force || true
+## install: copies binaries and libraries to DIST
+DIST ?= $(CURDIR)/build/dist
+.PHONY: install
+install:
+ mkdir -p "$(DIST)"
+ cp -f "$$($(CURDIR)/turbo/silkworm/silkworm_lib_path.sh)" "$(DIST)"
+ cp -f "$(GOBIN)/"* "$(DIST)"
+ @echo "Copied files to $(DIST):"
+ @ls -al "$(DIST)"
+
PACKAGE_NAME := github.com/ledgerwatch/erigon
-GOLANG_CROSS_VERSION ?= v1.20.5
+GOLANG_CROSS_VERSION ?= v1.20.7
.PHONY: release-dry-run
release-dry-run: git-submodules
@@ -253,7 +284,6 @@ release: git-submodules
# we need separate envvars to facilitate creation of the erigon user on the host OS.
ERIGON_USER_UID ?= 3473
ERIGON_USER_GID ?= 3473
-ERIGON_USER_XDG_DATA_HOME ?= ~$(ERIGON_USER)/.local/share
## user_linux: create "erigon" user (Linux)
user_linux:
@@ -267,7 +297,7 @@ endif
ifdef DOCKER
sudo usermod -aG docker $(ERIGON_USER)
endif
- sudo -u $(ERIGON_USER) mkdir -p $(ERIGON_USER_XDG_DATA_HOME)
+ sudo -u $(ERIGON_USER) mkdir -p /home/$(ERIGON_USER)/.local/share
## user_macos: create "erigon" user (MacOS)
user_macos:
@@ -277,7 +307,7 @@ user_macos:
sudo dscl . -create /Users/$(ERIGON_USER) PrimaryGroupID $(ERIGON_USER_GID)
sudo dscl . -create /Users/$(ERIGON_USER) NFSHomeDirectory /Users/$(ERIGON_USER)
sudo dscl . -append /Groups/admin GroupMembership $(ERIGON_USER)
- sudo -u $(ERIGON_USER) mkdir -p $(ERIGON_USER_XDG_DATA_HOME)
+ sudo -u $(ERIGON_USER) mkdir -p /Users/$(ERIGON_USER)/.local/share
## coverage: run code coverage report and output total coverage %
.PHONY: coverage
diff --git a/README.md b/README.md
index 1687b7fbce9..7ada8d94f97 100644
--- a/README.md
+++ b/README.md
@@ -55,16 +55,17 @@ System Requirements
* Goerli Full node (see `--prune*` flags): 189GB on Beta, 114GB on Alpha (April 2022).
-* Gnosis Chain Archive: 370GB (January 2023).
+* Gnosis Chain Archive: 600GB (October 2023).
-* Polygon Mainnet Archive: 5TB. Polygon Mumbai Archive: 1TB. (April 2022).
+* Polygon Mainnet Archive: 5TB. (April 2022). `--prune.*.older 15768000`: 5.1Tb (Sept 2023). Polygon Mumbai Archive:
+ 1TB. (April 2022).
SSD or NVMe. Do not recommend HDD - on HDD Erigon will always stay N blocks behind chain tip, but not fall behind.
Bear in mind that SSD performance deteriorates when close to capacity.
RAM: >=16GB, 64-bit architecture.
-[Golang version >= 1.19](https://golang.org/doc/install); GCC 10+ or Clang; On Linux: kernel > v4
+[Golang version >= 1.20](https://golang.org/doc/install); GCC 10+ or Clang; On Linux: kernel > v4
🔬 more details on disk storage [here](https://erigon.substack.com/p/disk-footprint-changes-in-new-erigon?s=r)
and [here](https://ledgerwatch.github.io/turbo_geth_release.html#Disk-space).
@@ -74,10 +75,10 @@ Usage
### Getting Started
-For building the latest stable release (this will be suitable for most users just wanting to run a node):
+For building the latest release (this will be suitable for most users just wanting to run a node):
```sh
-git clone --branch stable --single-branch https://github.com/ledgerwatch/erigon.git
+git clone --branch release/ --single-branch https://github.com/ledgerwatch/erigon.git
cd erigon
make erigon
./build/bin/erigon
@@ -101,8 +102,8 @@ download speed by flag `--torrent.download.rate=20mb`. 🔬 See [Downloade
Use `--datadir` to choose where to store data.
-Use `--chain=gnosis` for [Gnosis Chain](https://www.gnosis.io/), `--chain=bor-mainnet` for Polygon Mainnet,
-and `--chain=mumbai` for Polygon Mumbai.
+Use `--chain=gnosis` for [Gnosis Chain](https://www.gnosis.io/), `--chain=bor-mainnet` for Polygon Mainnet,
+`--chain=mumbai` for Polygon Mumbai and `--chain=amoy` for Polygon Amoy.
For Gnosis Chain you need a [Consensus Layer](#beacon-chain-consensus-layer) client alongside
Erigon (https://docs.gnosischain.com/node/guide/beacon).
@@ -111,7 +112,8 @@ Running `make help` will list and describe the convenience commands available in
### Datadir structure
- chaindata: recent blocks, state, recent state history. low-latency disk recommended.
-- snapshots: old blocks, old state history. can symlink/mount it to cheaper disk. mostly immutable. must have ~100gb free space (for merge recent files to bigger one).
+- snapshots: old blocks, old state history. can symlink/mount it to cheaper disk. mostly immutable. must have ~100gb
+ free space (for merge recent files to bigger one).
- temp: can grow to ~100gb, but usually empty. can symlink/mount it to cheaper disk.
- txpool: pending transactions. safe to remove.
- nodes: p2p peers. safe to remove.
@@ -141,7 +143,9 @@ int value specifying the highest output log level:
LvlTrace = 5
```
-To set an output dir for logs to be collected on disk, please set `--log.dir.path` If you want to change the filename prodiced from `erigon` you should also set the `--log.dir.prefix` flag to an alternate name. The flag `--log.dir.verbosity` is
+To set an output dir for logs to be collected on disk, please set `--log.dir.path` If you want to change the filename
+produced from `erigon` you should also set the `--log.dir.prefix` flag to an alternate name. The
+flag `--log.dir.verbosity` is
also available to control the verbosity of this logging, with the same int value as above, or the string value e.g. '
debug' or 'info'. Default verbosity is 'debug' (4), for disk logging.
@@ -190,13 +194,7 @@ Support only remote-miners.
, `--miner.gastarget`
* JSON-RPC supports methods: eth_coinbase , eth_hashrate, eth_mining, eth_getWork, eth_submitWork, eth_submitHashrate
* JSON-RPC supports websocket methods: newPendingTransaction
-* TODO:
- + we don't broadcast mined blocks to p2p-network
- yet, [but it's easy to accomplish](https://github.com/ledgerwatch/erigon/blob/9b8cdc0f2289a7cef78218a15043de5bdff4465e/eth/downloader/downloader.go#L673)
- + eth_newPendingTransactionFilter
- + eth_newBlockFilter
- + eth_newFilter
- + websocket Logs
+
🔬 Detailed explanation is [here](/docs/mining.md).
@@ -210,8 +208,8 @@ Windows users may run erigon in 3 possible ways:
build on windows :
* [Git](https://git-scm.com/downloads) for Windows must be installed. If you're cloning this repository is very
likely you already have it
- * [GO Programming Language](https://golang.org/dl/) must be installed. Minimum required version is 1.19
- * GNU CC Compiler at least version 10 (is highly suggested that you install `chocolatey` package manager - see
+ * [GO Programming Language](https://golang.org/dl/) must be installed. Minimum required version is 1.20
+ * GNU CC Compiler at least version 13 (is highly suggested that you install `chocolatey` package manager - see
following point)
* If you need to build MDBX tools (i.e. `.\wmake.ps1 db-tools`)
then [Chocolatey package manager](https://chocolatey.org/) for Windows must be installed. By Chocolatey you need
@@ -260,7 +258,7 @@ of the yaml configuration file and sets the chain to goerli
Example of setting up TOML config file
```
-`datadir = 'your datadir'
+datadir = 'your datadir'
port = 1111
chain = "mainnet"
http = true
@@ -501,7 +499,7 @@ Windows support for docker-compose is not ready yet. Please help us with .ps1 po
`docker compose up prometheus grafana`, [detailed docs](./cmd/prometheus/Readme.md).
-###
+###
old data
@@ -532,58 +530,51 @@ Detailed explanation: [./docs/programmers_guide/db_faq.md](./docs/programmers_gu
#### `erigon` ports
-| Port | Protocol | Purpose | Expose |
-|:-----:|:---------:|:----------------------:|:-------:|
-| 30303 | TCP & UDP | eth/66 peering | Public |
-| 30304 | TCP & UDP | eth/67 peering | Public |
-| 9090 | TCP | gRPC Connections | Private |
-| 42069 | TCP & UDP | Snap sync (Bittorrent) | Public |
-| 6060 | TCP | Metrics or Pprof | Private |
-| 8551 | TCP | Engine API (JWT auth) | Private |
+| Component | Port | Protocol | Purpose | Should Expose |
+|-----------|-------|-----------|-----------------------------|---------------|
+| engine | 9090 | TCP | gRPC Server | Private |
+| engine | 42069 | TCP & UDP | Snap sync (Bittorrent) | Public |
+| engine | 8551 | TCP | Engine API (JWT auth) | Private |
+| sentry | 30303 | TCP & UDP | eth/68 peering | Public |
+| sentry | 30304 | TCP & UDP | eth/67 peering | Public |
+| sentry | 9091 | TCP | incoming gRPC Connections | Private |
+| rpcdaemon | 8545 | TCP | HTTP & WebSockets & GraphQL | Private |
Typically, 30303 and 30304 are exposed to the internet to allow incoming peering connections. 9090 is exposed only
internally for rpcdaemon or other connections, (e.g. rpcdaemon -> erigon).
Port 8551 (JWT authenticated) is exposed only internally for [Engine API] JSON-RPC queries from the Consensus Layer
node.
-#### `RPC` ports
-
-| Port | Protocol | Purpose | Expose |
-|:----:|:--------:|:---------------------------:|:-------:|
-| 8545 | TCP | HTTP & WebSockets & GraphQL | Private |
-
-Typically, 8545 is exposed only internally for JSON-RPC queries. Both HTTP and WebSocket and GraphQL are on the same
-port.
+#### `caplin` ports
-#### `sentry` ports
+| Component | Port | Protocol | Purpose | Should Expose |
+|-----------|------|----------|------------------|---------------|
+| sentinel | 4000 | UDP | Peering | Public |
+| sentinel | 4001 | TCP | Peering | Public |
-| Port | Protocol | Purpose | Expose |
-|:-----:|:---------:|:----------------:|:-------:|
-| 30303 | TCP & UDP | Peering | Public |
-| 9091 | TCP | gRPC Connections | Private |
+If you are using `--internalcl` aka `caplin` as your consensus client, then also look at the chart above
-Typically, a sentry process will run one eth/xx protocol (e.g. eth/66) and will be exposed to the internet on 30303.
-Port
-9091 is for internal gRCP connections (e.g erigon -> sentry).
+#### `beaconAPI` ports
-#### `sentinel` ports
+| Component | Port | Protocol | Purpose | Should Expose |
+|-----------|------|----------|------------------|---------------|
+| REST | 5555 | TCP | REST | Public |
-| Port | Protocol | Purpose | Expose |
-|:----:|:--------:|:----------------:|:-------:|
-| 4000 | UDP | Peering | Public |
-| 4001 | TCP | Peering | Public |
-| 7777 | TCP | gRPC Connections | Private |
+If you are using `--internalcl` aka `caplin` as your consensus client and `--beacon.api` then also look at the chart above
-#### Other ports
+#### `shared` ports
-| Port | Protocol | Purpose | Expose |
-|:----:|:--------:|:-------:|:-------:|
-| 6060 | TCP | pprof | Private |
-| 6060 | TCP | metrics | Private |
+| Component | Port | Protocol | Purpose | Should Expose |
+|-----------|------|----------|---------|---------------|
+| all | 6060 | TCP | pprof | Private |
+| all | 6060 | TCP | metrics | Private |
Optional flags can be enabled that enable pprof or metrics (or both) - however, they both run on 6060 by default, so
+
you'll have to change one if you want to run both at the same time. use `--help` with the binary for more info.
+#### `other` ports
+
Reserved for future use: **gRPC ports**: `9092` consensus engine, `9093` snapshot downloader, `9094` TxPool
#### Hetzner expecting strict firewall rules
@@ -592,7 +583,7 @@ Reserved for future use: **gRPC ports**: `9092` consensus engine, `9093` snapsho
0.0.0.0/8 "This" Network RFC 1122, Section 3.2.1.3
10.0.0.0/8 Private-Use Networks RFC 1918
100.64.0.0/10 Carrier-Grade NAT (CGN) RFC 6598, Section 7
-127.16.0.0/12 Private-Use Networks RFC 1918
+127.16.0.0/12 Private-Use Networks RFC 1918
169.254.0.0/16 Link Local RFC 3927
172.16.0.0/12 Private-Use Networks RFC 1918
192.0.0.0/24 IETF Protocol Assignments RFC 5736
@@ -600,16 +591,17 @@ Reserved for future use: **gRPC ports**: `9092` consensus engine, `9093` snapsho
192.88.99.0/24 6to4 Relay Anycast RFC 3068
192.168.0.0/16 Private-Use Networks RFC 1918
198.18.0.0/15 Network Interconnect
- Device Benchmark Testing RFC 2544
+Device Benchmark Testing RFC 2544
198.51.100.0/24 TEST-NET-2 RFC 5737
203.0.113.0/24 TEST-NET-3 RFC 5737
224.0.0.0/4 Multicast RFC 3171
240.0.0.0/4 Reserved for Future Use RFC 1112, Section 4
255.255.255.255/32 Limited Broadcast RFC 919, Section 7
- RFC 922, Section 7
+RFC 922, Section 7
```
-Same in [IpTables syntax](https://ethereum.stackexchange.com/questions/6386/how-to-prevent-being-blacklisted-for-running-an-ethereum-client/13068#13068)
+Same
+in [IpTables syntax](https://ethereum.stackexchange.com/questions/6386/how-to-prevent-being-blacklisted-for-running-an-ethereum-client/13068#13068)
### How to get diagnostic for bug report?
@@ -631,16 +623,29 @@ UID/GID (1000).
More details
in [post](https://www.fullstaq.com/knowledge-hub/blogs/docker-and-the-host-filesystem-owner-matching-problem)
+### How to run public RPC api
+
+- `--txpool.nolocals=true`
+- don't add `admin` in `--http.api` list
+- to increase throughput may need
+ increase/decrease: `--db.read.concurrency`, `--rpc.batch.concurrency`, `--rpc.batch.limit`
+
### Run RaspberyPI
https://github.com/mathMakesArt/Erigon-on-RPi-4
+### How to change db pagesize
+
+[post](https://github.com/ledgerwatch/erigon/blob/devel/cmd/integration/Readme.md#copy-data-to-another-db)
+
+
Getting in touch
================
### Erigon Discord Server
-The main discussions are happening on our Discord server. To get an invite, send an email to `bloxster [at] proton.me` with
+The main discussions are happening on our Discord server. To get an invite, send an email to `bloxster [at] proton.me`
+with
your name, occupation, a brief explanation of why you want to join the Discord, and how you heard about Erigon.
### Reporting security issues/concerns
@@ -672,15 +677,17 @@ Next tools show correct memory usage of Erigon:
browser `localhost:3000`, credentials `admin/admin`)
- `cat /proc//smaps`
-Erigon uses ~4Gb of RAM during genesis sync and ~1Gb during normal work. OS pages cache can utilize unlimited amount of
-memory.
-
-**Warning:** Multiple instances of Erigon on same machine will touch Disk concurrently, it impacts performance - one of
-main Erigon optimisations: "reduce Disk random access".
-"Blocks Execution stage" still does many random reads - this is reason why it's slowest stage. We do not recommend
-running
-multiple genesis syncs on same Disk. If genesis sync passed, then it's fine to run multiple Erigon instances on same
-Disk.
+ Erigon uses ~4Gb of RAM during genesis sync and ~1Gb during normal work. OS pages cache can utilize unlimited amount
+ of
+ memory.
+
+ **Warning:** Multiple instances of Erigon on same machine will touch Disk concurrently, it impacts performance - one
+ of
+ main Erigon optimisations: "reduce Disk random access".
+ "Blocks Execution stage" still does many random reads - this is reason why it's slowest stage. We do not recommend
+ running
+ multiple genesis syncs on same Disk. If genesis sync passed, then it's fine to run multiple Erigon instances on same
+ Disk.
### Blocks Execution is slow on cloud-network-drives
diff --git a/TESTING.md b/TESTING.md
index e57163f5473..9302b7772d1 100644
--- a/TESTING.md
+++ b/TESTING.md
@@ -2,7 +2,7 @@ Testing of new releases of Erigon should ideally include these checks.
## Incremental Sync
-This check requires having the Erigon database synced previously. Lets assume (for command line examples) it is in the
+This check requires having the Erigon database synced previously. Let's assume (for command line examples) it is in the
directory `~/mainnet/erigon/chaindata`.
Using `git pull` or `git checkout`, update the code to the version that is to be released (or very close to it). Then,
build erigon executable:
@@ -306,7 +306,7 @@ Example of recording command:
./build/bin/rpctest bench8 --erigonUrl http://192.168.1.2:8545 --gethUrl http://192.168.1.1:8545 --needCompare --blockFrom 9000000 --blockTo 9000100 --recordFile req.txt
```
-The file format is plain text, with requests and responses are written in separate lines, and delimited by the tripple
+The file format is plain text, with requests and responses are written in separate lines, and delimited by the triple
line breaks, like this:
```
diff --git a/accounts/abi/abi_test.go b/accounts/abi/abi_test.go
index 56414a88050..2ca829d7759 100644
--- a/accounts/abi/abi_test.go
+++ b/accounts/abi/abi_test.go
@@ -1137,7 +1137,7 @@ func TestUnpackRevert(t *testing.T) {
}
for index, c := range cases {
t.Run(fmt.Sprintf("case %d", index), func(t *testing.T) {
- got, err := UnpackRevert(common.Hex2Bytes(c.input))
+ got, err := UnpackRevert(libcommon.Hex2Bytes(c.input))
if c.expectErr != nil {
if err == nil {
t.Fatalf("Expected non-nil error")
diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go
index 9ec89a6fb63..e1b8a639036 100644
--- a/accounts/abi/bind/backends/simulated.go
+++ b/accounts/abi/bind/backends/simulated.go
@@ -30,7 +30,6 @@ import (
"github.com/ledgerwatch/erigon-lib/chain"
libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon-lib/common/fixedgas"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
"github.com/ledgerwatch/erigon-lib/kv"
state2 "github.com/ledgerwatch/erigon-lib/state"
@@ -148,7 +147,7 @@ func (b *SimulatedBackend) Commit() {
Headers: []*types.Header{b.pendingHeader},
Blocks: []*types.Block{b.pendingBlock},
TopBlock: b.pendingBlock,
- }, nil); err != nil {
+ }); err != nil {
panic(err)
}
//nolint:prealloc
@@ -174,11 +173,11 @@ func (b *SimulatedBackend) emptyPendingBlock() {
b.pendingBlock = blockChain.Blocks[0]
b.pendingReceipts = blockChain.Receipts[0]
b.pendingHeader = blockChain.Headers[0]
- b.gasPool = new(core.GasPool).AddGas(b.pendingHeader.GasLimit).AddBlobGas(fixedgas.MaxBlobGasPerBlock)
+ b.gasPool = new(core.GasPool).AddGas(b.pendingHeader.GasLimit).AddBlobGas(b.m.ChainConfig.GetMaxBlobGasPerBlock())
if b.pendingReaderTx != nil {
b.pendingReaderTx.Rollback()
}
- tx, err := b.m.DB.BeginRo(context.Background())
+ tx, err := b.m.DB.BeginRo(context.Background()) //nolint:gocritic
if err != nil {
panic(err)
}
diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go
index 0d33ca281b9..0f14ad4703a 100644
--- a/accounts/abi/bind/backends/simulated_test.go
+++ b/accounts/abi/bind/backends/simulated_test.go
@@ -467,7 +467,7 @@ func TestSimulatedBackend_EstimateGas(t *testing.T) {
Gas: 0,
GasPrice: u256.Num0,
Value: nil,
- Data: common.Hex2Bytes("d8b98391"),
+ Data: libcommon.Hex2Bytes("d8b98391"),
}, 0, errors.New("execution reverted: revert reason"), "0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000d72657665727420726561736f6e00000000000000000000000000000000000000"},
{"PureRevert", ethereum.CallMsg{
@@ -476,7 +476,7 @@ func TestSimulatedBackend_EstimateGas(t *testing.T) {
Gas: 0,
GasPrice: u256.Num0,
Value: nil,
- Data: common.Hex2Bytes("aa8b1d30"),
+ Data: libcommon.Hex2Bytes("aa8b1d30"),
}, 0, errors.New("execution reverted"), nil},
{"OOG", ethereum.CallMsg{
@@ -485,7 +485,7 @@ func TestSimulatedBackend_EstimateGas(t *testing.T) {
Gas: 100000,
GasPrice: u256.Num0,
Value: nil,
- Data: common.Hex2Bytes("50f6fe34"),
+ Data: libcommon.Hex2Bytes("50f6fe34"),
}, 0, errors.New("gas required exceeds allowance (100000)"), nil},
{"Assert", ethereum.CallMsg{
@@ -494,7 +494,7 @@ func TestSimulatedBackend_EstimateGas(t *testing.T) {
Gas: 100000,
GasPrice: u256.Num0,
Value: nil,
- Data: common.Hex2Bytes("b9b046f9"),
+ Data: libcommon.Hex2Bytes("b9b046f9"),
}, 0, errors.New("invalid opcode: INVALID"), nil},
{"Valid", ethereum.CallMsg{
@@ -503,7 +503,7 @@ func TestSimulatedBackend_EstimateGas(t *testing.T) {
Gas: 100000,
GasPrice: u256.Num0,
Value: nil,
- Data: common.Hex2Bytes("e09fface"),
+ Data: libcommon.Hex2Bytes("e09fface"),
}, 21275, nil, nil},
}
for _, c := range cases {
diff --git a/accounts/abi/bind/base_test.go b/accounts/abi/bind/base_test.go
index ca2d4e19e31..e16c4ad0485 100644
--- a/accounts/abi/bind/base_test.go
+++ b/accounts/abi/bind/base_test.go
@@ -18,6 +18,7 @@ package bind_test
import (
"context"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
"math/big"
"reflect"
"strings"
@@ -28,7 +29,6 @@ import (
ethereum "github.com/ledgerwatch/erigon"
"github.com/ledgerwatch/erigon/accounts/abi"
"github.com/ledgerwatch/erigon/accounts/abi/bind"
- "github.com/ledgerwatch/erigon/common/hexutil"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/crypto"
"github.com/ledgerwatch/erigon/rlp"
diff --git a/accounts/abi/pack_test.go b/accounts/abi/pack_test.go
index 0c745016f9e..49f576137c4 100644
--- a/accounts/abi/pack_test.go
+++ b/accounts/abi/pack_test.go
@@ -185,25 +185,25 @@ func TestPackNumber(t *testing.T) {
packed []byte
}{
// Protocol limits
- {reflect.ValueOf(0), common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")},
- {reflect.ValueOf(1), common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")},
- {reflect.ValueOf(-1), common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")},
+ {reflect.ValueOf(0), libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")},
+ {reflect.ValueOf(1), libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")},
+ {reflect.ValueOf(-1), libcommon.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")},
// Type corner cases
- {reflect.ValueOf(uint8(math.MaxUint8)), common.Hex2Bytes("00000000000000000000000000000000000000000000000000000000000000ff")},
- {reflect.ValueOf(uint16(math.MaxUint16)), common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000ffff")},
- {reflect.ValueOf(uint32(math.MaxUint32)), common.Hex2Bytes("00000000000000000000000000000000000000000000000000000000ffffffff")},
- {reflect.ValueOf(uint64(math.MaxUint64)), common.Hex2Bytes("000000000000000000000000000000000000000000000000ffffffffffffffff")},
-
- {reflect.ValueOf(int8(math.MaxInt8)), common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000007f")},
- {reflect.ValueOf(int16(math.MaxInt16)), common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000007fff")},
- {reflect.ValueOf(int32(math.MaxInt32)), common.Hex2Bytes("000000000000000000000000000000000000000000000000000000007fffffff")},
- {reflect.ValueOf(int64(math.MaxInt64)), common.Hex2Bytes("0000000000000000000000000000000000000000000000007fffffffffffffff")},
-
- {reflect.ValueOf(int8(math.MinInt8)), common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80")},
- {reflect.ValueOf(int16(math.MinInt16)), common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8000")},
- {reflect.ValueOf(int32(math.MinInt32)), common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffff80000000")},
- {reflect.ValueOf(int64(math.MinInt64)), common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffff8000000000000000")},
+ {reflect.ValueOf(uint8(math.MaxUint8)), libcommon.Hex2Bytes("00000000000000000000000000000000000000000000000000000000000000ff")},
+ {reflect.ValueOf(uint16(math.MaxUint16)), libcommon.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000ffff")},
+ {reflect.ValueOf(uint32(math.MaxUint32)), libcommon.Hex2Bytes("00000000000000000000000000000000000000000000000000000000ffffffff")},
+ {reflect.ValueOf(uint64(math.MaxUint64)), libcommon.Hex2Bytes("000000000000000000000000000000000000000000000000ffffffffffffffff")},
+
+ {reflect.ValueOf(int8(math.MaxInt8)), libcommon.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000007f")},
+ {reflect.ValueOf(int16(math.MaxInt16)), libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000007fff")},
+ {reflect.ValueOf(int32(math.MaxInt32)), libcommon.Hex2Bytes("000000000000000000000000000000000000000000000000000000007fffffff")},
+ {reflect.ValueOf(int64(math.MaxInt64)), libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000007fffffffffffffff")},
+
+ {reflect.ValueOf(int8(math.MinInt8)), libcommon.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80")},
+ {reflect.ValueOf(int16(math.MinInt16)), libcommon.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8000")},
+ {reflect.ValueOf(int32(math.MinInt32)), libcommon.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffff80000000")},
+ {reflect.ValueOf(int64(math.MinInt64)), libcommon.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffff8000000000000000")},
}
for i, tt := range tests {
packed := packNum(tt.value)
diff --git a/accounts/abi/packing_test.go b/accounts/abi/packing_test.go
index d960223f897..4248596079f 100644
--- a/accounts/abi/packing_test.go
+++ b/accounts/abi/packing_test.go
@@ -20,8 +20,6 @@ import (
"math/big"
libcommon "github.com/ledgerwatch/erigon-lib/common"
-
- "github.com/ledgerwatch/erigon/common"
)
type packUnpackTest struct {
@@ -378,7 +376,7 @@ var packUnpackTests = []packUnpackTest{
packed: "0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000020" +
"0100000000000000000000000000000000000000000000000000000000000000",
- unpacked: common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
+ unpacked: libcommon.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
def: `[{"type": "bytes32"}]`,
diff --git a/accounts/abi/unpack_test.go b/accounts/abi/unpack_test.go
index 880fb589335..083f329b75e 100644
--- a/accounts/abi/unpack_test.go
+++ b/accounts/abi/unpack_test.go
@@ -258,8 +258,8 @@ func TestUnpackIntoInterfaceSetDynamicArrayOutput(t *testing.T) {
}
var (
- marshalledReturn32 = common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000230783132333435363738393000000000000000000000000000000000000000003078303938373635343332310000000000000000000000000000000000000000")
- marshalledReturn15 = common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000230783031323334350000000000000000000000000000000000000000000000003078393837363534000000000000000000000000000000000000000000000000")
+ marshalledReturn32 = libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000230783132333435363738393000000000000000000000000000000000000000003078303938373635343332310000000000000000000000000000000000000000")
+ marshalledReturn15 = libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000230783031323334350000000000000000000000000000000000000000000000003078393837363534000000000000000000000000000000000000000000000000")
out32 [][32]byte
out15 [][15]byte
@@ -273,11 +273,11 @@ func TestUnpackIntoInterfaceSetDynamicArrayOutput(t *testing.T) {
if len(out32) != 2 {
t.Fatalf("expected array with 2 values, got %d", len(out32))
}
- expected := common.Hex2Bytes("3078313233343536373839300000000000000000000000000000000000000000")
+ expected := libcommon.Hex2Bytes("3078313233343536373839300000000000000000000000000000000000000000")
if !bytes.Equal(out32[0][:], expected) {
t.Errorf("expected %x, got %x\n", expected, out32[0])
}
- expected = common.Hex2Bytes("3078303938373635343332310000000000000000000000000000000000000000")
+ expected = libcommon.Hex2Bytes("3078303938373635343332310000000000000000000000000000000000000000")
if !bytes.Equal(out32[1][:], expected) {
t.Errorf("expected %x, got %x\n", expected, out32[1])
}
@@ -290,11 +290,11 @@ func TestUnpackIntoInterfaceSetDynamicArrayOutput(t *testing.T) {
if len(out15) != 2 {
t.Fatalf("expected array with 2 values, got %d", len(out15))
}
- expected = common.Hex2Bytes("307830313233343500000000000000")
+ expected = libcommon.Hex2Bytes("307830313233343500000000000000")
if !bytes.Equal(out15[0][:], expected) {
t.Errorf("expected %x, got %x\n", expected, out15[0])
}
- expected = common.Hex2Bytes("307839383736353400000000000000")
+ expected = libcommon.Hex2Bytes("307839383736353400000000000000")
if !bytes.Equal(out15[1][:], expected) {
t.Errorf("expected %x, got %x\n", expected, out15[1])
}
@@ -314,9 +314,9 @@ func methodMultiReturn(require *require.Assertions) (ABI, []byte, methodMultiOut
require.NoError(err)
// using buff to make the code readable
buff := new(bytes.Buffer)
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
buff.Write(common.RightPadBytes([]byte(expected.String), 32))
return abi, buff.Bytes(), expected
}
@@ -402,8 +402,8 @@ func TestMultiReturnWithArray(t *testing.T) {
t.Fatal(err)
}
buff := new(bytes.Buffer)
- buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000900000000000000000000000000000000000000000000000000000000000000090000000000000000000000000000000000000000000000000000000000000009"))
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000008"))
+ buff.Write(libcommon.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000900000000000000000000000000000000000000000000000000000000000000090000000000000000000000000000000000000000000000000000000000000009"))
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000008"))
ret1, ret1Exp := new([3]uint64), [3]uint64{9, 9, 9}
ret2, ret2Exp := new(uint64), uint64(8)
@@ -425,7 +425,7 @@ func TestMultiReturnWithStringArray(t *testing.T) {
t.Fatal(err)
}
buff := new(bytes.Buffer)
- buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000005c1b78ea0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000001a055690d9db80000000000000000000000000000ab1257528b3782fb40d7ed5f72e624b744dffb2f00000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001048656c6c6f2c20457468657265756d2100000000000000000000000000000000"))
+ buff.Write(libcommon.Hex2Bytes("000000000000000000000000000000000000000000000000000000005c1b78ea0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000001a055690d9db80000000000000000000000000000ab1257528b3782fb40d7ed5f72e624b744dffb2f00000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001048656c6c6f2c20457468657265756d2100000000000000000000000000000000"))
temp, _ := big.NewInt(0).SetString("30000000000000000000", 10)
ret1, ret1Exp := new([3]*big.Int), [3]*big.Int{big.NewInt(1545304298), big.NewInt(6), temp}
ret2, ret2Exp := new(libcommon.Address), libcommon.HexToAddress("ab1257528b3782fb40d7ed5f72e624b744dffb2f")
@@ -455,18 +455,18 @@ func TestMultiReturnWithStringSlice(t *testing.T) {
t.Fatal(err)
}
buff := new(bytes.Buffer)
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040")) // output[0] offset
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000120")) // output[1] offset
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // output[0] length
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040")) // output[0][0] offset
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000080")) // output[0][1] offset
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000008")) // output[0][0] length
- buff.Write(common.Hex2Bytes("657468657265756d000000000000000000000000000000000000000000000000")) // output[0][0] value
- buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000b")) // output[0][1] length
- buff.Write(common.Hex2Bytes("676f2d657468657265756d000000000000000000000000000000000000000000")) // output[0][1] value
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // output[1] length
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000064")) // output[1][0] value
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000065")) // output[1][1] value
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040")) // output[0] offset
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000120")) // output[1] offset
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // output[0] length
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040")) // output[0][0] offset
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000080")) // output[0][1] offset
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000008")) // output[0][0] length
+ buff.Write(libcommon.Hex2Bytes("657468657265756d000000000000000000000000000000000000000000000000")) // output[0][0] value
+ buff.Write(libcommon.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000b")) // output[0][1] length
+ buff.Write(libcommon.Hex2Bytes("676f2d657468657265756d000000000000000000000000000000000000000000")) // output[0][1] value
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // output[1] length
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000064")) // output[1][0] value
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000065")) // output[1][1] value
ret1, ret1Exp := new([]string), []string{"ethereum", "go-ethereum"}
ret2, ret2Exp := new([]*big.Int), []*big.Int{big.NewInt(100), big.NewInt(101)}
if err := abi.UnpackIntoInterface(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil {
@@ -493,14 +493,14 @@ func TestMultiReturnWithDeeplyNestedArray(t *testing.T) {
buff := new(bytes.Buffer)
// construct the test array, each 3 char element is joined with 61 '0' chars,
// to from the ((3 + 61) * 0.5) = 32 byte elements in the array.
- buff.Write(common.Hex2Bytes(strings.Join([]string{
+ buff.Write(libcommon.Hex2Bytes(strings.Join([]string{
"", //empty, to apply the 61-char separator to the first element as well.
"111", "112", "113", "121", "122", "123",
"211", "212", "213", "221", "222", "223",
"311", "312", "313", "321", "322", "323",
"411", "412", "413", "421", "422", "423",
}, "0000000000000000000000000000000000000000000000000000000000000")))
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000009876"))
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000009876"))
ret1, ret1Exp := new([4][2][3]uint64), [4][2][3]uint64{
{{0x111, 0x112, 0x113}, {0x121, 0x122, 0x123}},
@@ -539,14 +539,14 @@ func TestUnmarshal(t *testing.T) {
buff := new(bytes.Buffer)
// marshall mixed bytes (mixedBytes)
- p0, p0Exp := []byte{}, common.Hex2Bytes("01020000000000000000")
- p1, p1Exp := [32]byte{}, common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000ddeeff")
+ p0, p0Exp := []byte{}, libcommon.Hex2Bytes("01020000000000000000")
+ p1, p1Exp := [32]byte{}, libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000ddeeff")
mixedBytes := []interface{}{&p0, &p1}
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000ddeeff"))
- buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000a"))
- buff.Write(common.Hex2Bytes("0102000000000000000000000000000000000000000000000000000000000000"))
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000ddeeff"))
+ buff.Write(libcommon.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000a"))
+ buff.Write(libcommon.Hex2Bytes("0102000000000000000000000000000000000000000000000000000000000000"))
err = abi.UnpackIntoInterface(&mixedBytes, "mixedBytes", buff.Bytes())
if err != nil {
@@ -563,7 +563,7 @@ func TestUnmarshal(t *testing.T) {
// marshal int
var Int *big.Int
- err = abi.UnpackIntoInterface(&Int, "int", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
+ err = abi.UnpackIntoInterface(&Int, "int", libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
if err != nil {
t.Error(err)
}
@@ -574,7 +574,7 @@ func TestUnmarshal(t *testing.T) {
// marshal bool
var Bool bool
- err = abi.UnpackIntoInterface(&Bool, "bool", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
+ err = abi.UnpackIntoInterface(&Bool, "bool", libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
if err != nil {
t.Error(err)
}
@@ -585,8 +585,8 @@ func TestUnmarshal(t *testing.T) {
// marshal dynamic bytes max length 32
buff.Reset()
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
bytesOut := common.RightPadBytes([]byte("hello"), 32)
buff.Write(bytesOut)
@@ -602,8 +602,8 @@ func TestUnmarshal(t *testing.T) {
// marshall dynamic bytes max length 64
buff.Reset()
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
bytesOut = common.RightPadBytes([]byte("hello"), 64)
buff.Write(bytesOut)
@@ -618,8 +618,8 @@ func TestUnmarshal(t *testing.T) {
// marshall dynamic bytes max length 64
buff.Reset()
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
- buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000003f"))
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
+ buff.Write(libcommon.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000003f"))
bytesOut = common.RightPadBytes([]byte("hello"), 64)
buff.Write(bytesOut)
@@ -640,8 +640,8 @@ func TestUnmarshal(t *testing.T) {
// marshal dynamic bytes length 5
buff.Reset()
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
buff.Write(common.RightPadBytes([]byte("hello"), 32))
err = abi.UnpackIntoInterface(&Bytes, "bytes", buff.Bytes())
@@ -670,7 +670,7 @@ func TestUnmarshal(t *testing.T) {
// marshal error
buff.Reset()
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
err = abi.UnpackIntoInterface(&Bytes, "bytes", buff.Bytes())
if err == nil {
t.Error("expected error")
@@ -682,9 +682,9 @@ func TestUnmarshal(t *testing.T) {
}
buff.Reset()
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"))
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000003"))
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"))
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000003"))
// marshal int array
var intArray [3]*big.Int
err = abi.UnpackIntoInterface(&intArray, "intArraySingle", buff.Bytes())
@@ -703,9 +703,9 @@ func TestUnmarshal(t *testing.T) {
}
// marshal address slice
buff.Reset()
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020")) // offset
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // size
- buff.Write(common.Hex2Bytes("0000000000000000000000000100000000000000000000000000000000000000"))
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020")) // offset
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // size
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000100000000000000000000000000000000000000"))
var outAddr []libcommon.Address
err = abi.UnpackIntoInterface(&outAddr, "addressSliceSingle", buff.Bytes())
@@ -723,13 +723,13 @@ func TestUnmarshal(t *testing.T) {
// marshal multiple address slice
buff.Reset()
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040")) // offset
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000080")) // offset
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // size
- buff.Write(common.Hex2Bytes("0000000000000000000000000100000000000000000000000000000000000000"))
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // size
- buff.Write(common.Hex2Bytes("0000000000000000000000000200000000000000000000000000000000000000"))
- buff.Write(common.Hex2Bytes("0000000000000000000000000300000000000000000000000000000000000000"))
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040")) // offset
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000080")) // offset
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // size
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000100000000000000000000000000000000000000"))
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // size
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000200000000000000000000000000000000000000"))
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000300000000000000000000000000000000000000"))
var outAddrStruct struct {
A []libcommon.Address
@@ -761,7 +761,7 @@ func TestUnmarshal(t *testing.T) {
// marshal invalid address slice
buff.Reset()
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000100"))
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000100"))
err = abi.UnpackIntoInterface(&outAddr, "addressSliceSingle", buff.Bytes())
if err == nil {
@@ -777,8 +777,8 @@ func TestUnpackTuple(t *testing.T) {
}
buff := new(bytes.Buffer)
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // ret[a] = 1
- buff.Write(common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) // ret[b] = -1
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // ret[a] = 1
+ buff.Write(libcommon.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) // ret[b] = -1
// If the result is single tuple, use struct as return value container directly.
v := struct {
@@ -810,21 +810,21 @@ func TestUnpackTuple(t *testing.T) {
t.Fatal(err)
}
buff.Reset()
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000080")) // s offset
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")) // t.X = 0
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // t.Y = 1
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // a = 1
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.A = 1
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000060")) // s.B offset
- buff.Write(common.Hex2Bytes("00000000000000000000000000000000000000000000000000000000000000c0")) // s.C offset
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.B length
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.B[0] = 1
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.B[0] = 2
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.C length
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.C[0].X
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.C[0].Y
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.C[1].X
- buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.C[1].Y
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000080")) // s offset
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")) // t.X = 0
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // t.Y = 1
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // a = 1
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.A = 1
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000060")) // s.B offset
+ buff.Write(libcommon.Hex2Bytes("00000000000000000000000000000000000000000000000000000000000000c0")) // s.C offset
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.B length
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.B[0] = 1
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.B[0] = 2
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.C length
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.C[0].X
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.C[0].Y
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.C[1].X
+ buff.Write(libcommon.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.C[1].Y
type T struct {
X *big.Int `abi:"x"`
diff --git a/cl/abstract/beacon_state.go b/cl/abstract/beacon_state.go
index da949eed384..a4d098e3206 100644
--- a/cl/abstract/beacon_state.go
+++ b/cl/abstract/beacon_state.go
@@ -32,7 +32,7 @@ type BeaconStateExtension interface {
BaseReward(index uint64) (uint64, error)
SyncRewards() (proposerReward, participantReward uint64, err error)
CommitteeCount(epoch uint64) uint64
- GetAttestationParticipationFlagIndicies(data solid.AttestationData, inclusionDelay uint64) ([]uint8, error)
+ GetAttestationParticipationFlagIndicies(data solid.AttestationData, inclusionDelay uint64, skipAssert bool) ([]uint8, error)
GetBeaconCommitee(slot, committeeIndex uint64) ([]uint64, error)
ComputeNextSyncCommittee() (*solid.SyncCommittee, error)
GetAttestingIndicies(attestation solid.AttestationData, aggregationBits []byte, checkBitsLength bool) ([]uint64, error)
@@ -43,6 +43,24 @@ type BeaconStateExtension interface {
}
type BeaconStateBasic interface {
+ BeaconStateMinimal
+ BeaconStateExtra
+ BeaconStateMutator
+ BeaconStateSSZ
+
+ Clone() clonable.Clonable
+ DebugPrint(prefix string)
+}
+
+type BeaconStateSSZ interface {
+ BlockRoot() ([32]byte, error)
+ EncodeSSZ(buf []byte) ([]byte, error)
+ DecodeSSZ(buf []byte, version int) error
+ EncodingSizeSSZ() (size int)
+ HashSSZ() (out [32]byte, err error)
+}
+
+type BeaconStateMutator interface {
SetVersion(version clparams.StateVersion)
SetSlot(slot uint64)
SetFork(fork *cltypes.Fork)
@@ -56,8 +74,6 @@ type BeaconStateBasic interface {
SetActivationEpochForValidatorAtIndex(index int, epoch uint64)
SetActivationEligibilityEpochForValidatorAtIndex(index int, epoch uint64)
SetEth1Data(eth1Data *cltypes.Eth1Data)
- AddEth1DataVote(vote *cltypes.Eth1Data)
- ResetEth1DataVotes()
SetEth1DepositIndex(eth1DepositIndex uint64)
SetValidatorSlashed(index int, slashed bool) error
SetValidatorMinCurrentInclusionDelayAttestation(index int, value *solid.PendingAttestation) error
@@ -69,13 +85,11 @@ type BeaconStateBasic interface {
SetValidatorIsPreviousMatchingTargetAttester(index int, value bool) error
SetValidatorIsPreviousMatchingHeadAttester(index int, value bool) error
SetValidatorBalance(index int, balance uint64) error
- AddValidator(validator solid.Validator, balance uint64)
SetRandaoMixAt(index int, mix common.Hash)
SetSlashingSegmentAt(index int, segment uint64)
- IncrementSlashingSegmentAt(index int, delta uint64)
SetEpochParticipationForValidatorIndex(isCurrentEpoch bool, index int, flags cltypes.ParticipationFlags)
SetValidatorAtIndex(index int, validator solid.Validator)
- ResetEpochParticipation()
+
SetJustificationBits(justificationBits cltypes.JustificationBits)
SetPreviousJustifiedCheckpoint(previousJustifiedCheckpoint solid.Checkpoint)
SetCurrentJustifiedCheckpoint(currentJustifiedCheckpoint solid.Checkpoint)
@@ -85,41 +99,37 @@ type BeaconStateBasic interface {
SetLatestExecutionPayloadHeader(header *cltypes.Eth1Header)
SetNextWithdrawalIndex(index uint64)
SetNextWithdrawalValidatorIndex(index uint64)
- ResetHistoricalSummaries()
- AddHistoricalSummary(summary *cltypes.HistoricalSummary)
- AddHistoricalRoot(root common.Hash)
SetInactivityScores(scores []uint64)
- AddInactivityScore(score uint64)
SetValidatorInactivityScore(index int, score uint64) error
SetCurrentEpochParticipationFlags(flags []cltypes.ParticipationFlags)
SetPreviousEpochParticipationFlags(flags []cltypes.ParticipationFlags)
+ SetPreviousEpochAttestations(attestations *solid.ListSSZ[*solid.PendingAttestation])
+
+ AddEth1DataVote(vote *cltypes.Eth1Data)
+ AddValidator(validator solid.Validator, balance uint64)
+ AddHistoricalSummary(summary *cltypes.HistoricalSummary)
+ AddHistoricalRoot(root common.Hash)
+ AddInactivityScore(score uint64)
AddCurrentEpochParticipationFlags(flags cltypes.ParticipationFlags)
AddPreviousEpochParticipationFlags(flags cltypes.ParticipationFlags)
AddPreviousEpochParticipationAt(index int, delta byte)
AddCurrentEpochAtteastation(attestation *solid.PendingAttestation)
AddPreviousEpochAttestation(attestation *solid.PendingAttestation)
+
+ AppendValidator(in solid.Validator)
+
+ ResetEth1DataVotes()
+ ResetEpochParticipation()
+ ResetHistoricalSummaries()
ResetCurrentEpochAttestations()
- SetPreviousEpochAttestations(attestations *solid.ListSSZ[*solid.PendingAttestation])
ResetPreviousEpochAttestations()
- BeaconConfig() *clparams.BeaconChainConfig
- Version() clparams.StateVersion
- GenesisTime() uint64
- GenesisValidatorsRoot() common.Hash
- Slot() uint64
- PreviousSlot() uint64
- Fork() *cltypes.Fork
- LatestBlockHeader() cltypes.BeaconBlockHeader
- BlockRoots() solid.HashVectorSSZ
- StateRoots() solid.HashVectorSSZ
- Eth1Data() *cltypes.Eth1Data
- Eth1DataVotes() *solid.ListSSZ[*cltypes.Eth1Data]
- Eth1DepositIndex() uint64
+}
+
+type BeaconStateExtra interface {
ValidatorLength() int
- AppendValidator(in solid.Validator)
- ForEachValidator(fn func(v solid.Validator, idx int, total int) bool)
- ValidatorForValidatorIndex(index int) (solid.Validator, error)
- ForEachBalance(fn func(v uint64, idx int, total int) bool)
ValidatorBalance(index int) (uint64, error)
+ RandaoMixes() solid.HashVectorSSZ
+ ForEachBalance(fn func(v uint64, idx int, total int) bool)
ValidatorExitEpoch(index int) (uint64, error)
ValidatorWithdrawableEpoch(index int) (uint64, error)
ValidatorEffectiveBalance(index int) (uint64, error)
@@ -131,36 +141,52 @@ type BeaconStateBasic interface {
ValidatorIsPreviousMatchingSourceAttester(idx int) (bool, error)
ValidatorIsPreviousMatchingTargetAttester(idx int) (bool, error)
ValidatorIsPreviousMatchingHeadAttester(idx int) (bool, error)
- RandaoMixes() solid.HashVectorSSZ
GetRandaoMixes(epoch uint64) [32]byte
GetRandaoMix(index int) [32]byte
+ EpochParticipationForValidatorIndex(isCurrentEpoch bool, index int) cltypes.ParticipationFlags
+ GetBlockRootAtSlot(slot uint64) (common.Hash, error)
+ GetDomain(domainType [4]byte, epoch uint64) ([]byte, error)
+}
+
+type BeaconStateMinimal interface {
+ BeaconConfig() *clparams.BeaconChainConfig
+ Version() clparams.StateVersion
+ GenesisTime() uint64
+ GenesisValidatorsRoot() common.Hash
+ Slot() uint64
+ PreviousSlot() uint64
+ Fork() *cltypes.Fork
+ LatestBlockHeader() cltypes.BeaconBlockHeader
+ BlockRoots() solid.HashVectorSSZ
+ StateRoots() solid.HashVectorSSZ
+ Eth1Data() *cltypes.Eth1Data
+ Eth1DataVotes() *solid.ListSSZ[*cltypes.Eth1Data]
+ Eth1DepositIndex() uint64
+
+ ForEachValidator(fn func(v solid.Validator, idx int, total int) bool)
+ ValidatorForValidatorIndex(index int) (solid.Validator, error)
+
ForEachSlashingSegment(fn func(idx int, v uint64, total int) bool)
SlashingSegmentAt(pos int) uint64
+
EpochParticipation(currentEpoch bool) *solid.BitList
JustificationBits() cltypes.JustificationBits
- EpochParticipationForValidatorIndex(isCurrentEpoch bool, index int) cltypes.ParticipationFlags
+
PreviousJustifiedCheckpoint() solid.Checkpoint
CurrentJustifiedCheckpoint() solid.Checkpoint
- ValidatorInactivityScore(index int) (uint64, error)
FinalizedCheckpoint() solid.Checkpoint
+ ValidatorInactivityScore(index int) (uint64, error)
CurrentSyncCommittee() *solid.SyncCommittee
NextSyncCommittee() *solid.SyncCommittee
LatestExecutionPayloadHeader() *cltypes.Eth1Header
NextWithdrawalIndex() uint64
+ NextWithdrawalValidatorIndex() uint64
+ // HistoricalSummary has no accessor yet.
+
CurrentEpochAttestations() *solid.ListSSZ[*solid.PendingAttestation]
CurrentEpochAttestationsLength() int
PreviousEpochAttestations() *solid.ListSSZ[*solid.PendingAttestation]
PreviousEpochAttestationsLength() int
- NextWithdrawalValidatorIndex() uint64
- GetBlockRootAtSlot(slot uint64) (common.Hash, error)
- GetDomain(domainType [4]byte, epoch uint64) ([]byte, error)
- DebugPrint(prefix string)
- BlockRoot() ([32]byte, error)
- EncodeSSZ(buf []byte) ([]byte, error)
- DecodeSSZ(buf []byte, version int) error
- EncodingSizeSSZ() (size int)
- Clone() clonable.Clonable
- HashSSZ() (out [32]byte, err error)
}
// TODO figure this out
diff --git a/cl/antiquary/antiquary.go b/cl/antiquary/antiquary.go
new file mode 100644
index 00000000000..2042d78b5ca
--- /dev/null
+++ b/cl/antiquary/antiquary.go
@@ -0,0 +1,269 @@
+package antiquary
+
+import (
+ "context"
+ "sync/atomic"
+ "time"
+
+ "github.com/ledgerwatch/erigon-lib/common/datadir"
+ "github.com/ledgerwatch/erigon-lib/downloader/snaptype"
+ proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/persistence"
+ "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies"
+ state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state"
+ "github.com/ledgerwatch/erigon/cl/utils"
+ "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks"
+ "github.com/ledgerwatch/log/v3"
+ "github.com/spf13/afero"
+)
+
+const safetyMargin = 10_000 // We retire snapshots 10k blocks after the finalized head
+
+// Antiquary is where the snapshots go, aka old history, it is what keep track of the oldest records.
+type Antiquary struct {
+ mainDB kv.RwDB // this is the main DB
+ dirs datadir.Dirs
+ downloader proto_downloader.DownloaderClient
+ logger log.Logger
+ sn *freezeblocks.CaplinSnapshots
+ snReader freezeblocks.BeaconSnapshotReader
+ ctx context.Context
+ beaconDB persistence.BlockSource
+ backfilled *atomic.Bool
+ cfg *clparams.BeaconChainConfig
+ states bool
+ fs afero.Fs
+ validatorsTable *state_accessors.StaticValidatorTable
+ genesisState *state.CachingBeaconState
+ // set to nil
+ currentState *state.CachingBeaconState
+ balances32 []byte
+}
+
+func NewAntiquary(ctx context.Context, genesisState *state.CachingBeaconState, validatorsTable *state_accessors.StaticValidatorTable, cfg *clparams.BeaconChainConfig, dirs datadir.Dirs, downloader proto_downloader.DownloaderClient, mainDB kv.RwDB, sn *freezeblocks.CaplinSnapshots, reader freezeblocks.BeaconSnapshotReader, beaconDB persistence.BlockSource, logger log.Logger, states bool, fs afero.Fs) *Antiquary {
+ backfilled := &atomic.Bool{}
+ backfilled.Store(false)
+ return &Antiquary{
+ mainDB: mainDB,
+ dirs: dirs,
+ downloader: downloader,
+ logger: logger,
+ sn: sn,
+ beaconDB: beaconDB,
+ ctx: ctx,
+ backfilled: backfilled,
+ cfg: cfg,
+ states: states,
+ snReader: reader,
+ fs: fs,
+ validatorsTable: validatorsTable,
+ genesisState: genesisState,
+ }
+}
+
+// Antiquate is the function that starts transactions seeding and shit, very cool but very shit too as a name.
+func (a *Antiquary) Loop() error {
+ if a.downloader == nil {
+ return nil // Just skip if we don't have a downloader
+ }
+ // Skip if we dont support backfilling for the current network
+ if !clparams.SupportBackfilling(a.cfg.DepositNetworkID) {
+ return nil
+ }
+ statsReply, err := a.downloader.Stats(a.ctx, &proto_downloader.StatsRequest{})
+ if err != nil {
+ return err
+ }
+ reCheckTicker := time.NewTicker(3 * time.Second)
+ defer reCheckTicker.Stop()
+ // Fist part of the antiquate is to download caplin snapshots
+ for !statsReply.Completed {
+ select {
+ case <-reCheckTicker.C:
+ statsReply, err = a.downloader.Stats(a.ctx, &proto_downloader.StatsRequest{})
+ if err != nil {
+ return err
+ }
+ case <-a.ctx.Done():
+ }
+ }
+ if err := a.sn.BuildMissingIndices(a.ctx, a.logger, log.LvlDebug); err != nil {
+ return err
+ }
+ // Here we need to start mdbx transaction and lock the thread
+ log.Info("[Antiquary]: Stopping Caplin to process historical indicies")
+ tx, err := a.mainDB.BeginRw(a.ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ // read the last beacon snapshots
+ from, err := beacon_indicies.ReadLastBeaconSnapshot(tx)
+ if err != nil {
+ return err
+ }
+ logInterval := time.NewTicker(30 * time.Second)
+ if err := a.sn.ReopenFolder(); err != nil {
+ return err
+ }
+ defer logInterval.Stop()
+
+ // Now write the snapshots as indicies
+ for i := from; i < a.sn.BlocksAvailable(); i++ {
+ // read the snapshot
+ header, elBlockNumber, elBlockHash, err := a.sn.ReadHeader(i)
+ if err != nil {
+ return err
+ }
+ if header == nil {
+ continue
+ }
+ blockRoot, err := header.Header.HashSSZ()
+ if err != nil {
+ return err
+ }
+ if err := beacon_indicies.MarkRootCanonical(a.ctx, tx, header.Header.Slot, blockRoot); err != nil {
+ return err
+ }
+ if err := beacon_indicies.WriteHeaderSlot(tx, blockRoot, header.Header.Slot); err != nil {
+ return err
+ }
+ if err := beacon_indicies.WriteStateRoot(tx, blockRoot, header.Header.Root); err != nil {
+ return err
+ }
+ if err := beacon_indicies.WriteParentBlockRoot(a.ctx, tx, blockRoot, header.Header.ParentRoot); err != nil {
+ return err
+ }
+ if err := beacon_indicies.WriteExecutionBlockNumber(tx, blockRoot, elBlockNumber); err != nil {
+ return err
+ }
+ if err := beacon_indicies.WriteExecutionBlockHash(tx, blockRoot, elBlockHash); err != nil {
+ return err
+ }
+ select {
+ case <-logInterval.C:
+ log.Info("[Antiquary]: Processed snapshots", "progress", i, "target", a.sn.BlocksAvailable())
+ case <-a.ctx.Done():
+ default:
+ }
+ }
+
+ frozenSlots := a.sn.BlocksAvailable()
+ if frozenSlots != 0 {
+ if err := a.beaconDB.PurgeRange(a.ctx, tx, 0, frozenSlots); err != nil {
+ return err
+ }
+ }
+
+ if a.states {
+ go a.loopStates(a.ctx)
+ }
+
+ // write the indicies
+ if err := beacon_indicies.WriteLastBeaconSnapshot(tx, frozenSlots); err != nil {
+ return err
+ }
+ log.Info("[Antiquary]: Restarting Caplin")
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+ // Check for snapshots retirement every 3 minutes
+ retirementTicker := time.NewTicker(3 * time.Minute)
+ defer retirementTicker.Stop()
+ for {
+ select {
+ case <-retirementTicker.C:
+ if !a.backfilled.Load() {
+ continue
+ }
+ var (
+ from uint64
+ to uint64
+ )
+ if err := a.mainDB.View(a.ctx, func(roTx kv.Tx) error {
+ // read the last beacon snapshots
+ from, err = beacon_indicies.ReadLastBeaconSnapshot(roTx)
+ if err != nil {
+ return err
+ }
+ from += 1
+ // read the finalized head
+ to, err = beacon_indicies.ReadHighestFinalized(roTx)
+ if err != nil {
+ return err
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+ // Sanity checks just to be safe.
+ if from >= to {
+ continue
+ }
+ to = utils.Min64(to, to-safetyMargin) // We don't want to retire snapshots that are too close to the finalized head
+ to = (to / snaptype.Erigon2RecentMergeLimit) * snaptype.Erigon2RecentMergeLimit
+ if to-from < snaptype.Erigon2RecentMergeLimit {
+ continue
+ }
+ if err := a.antiquate(from, to); err != nil {
+ return err
+ }
+ case <-a.ctx.Done():
+ }
+ }
+}
+
+// Antiquate will antiquate a specific block range (aka. retire snapshots), this should be ran in the background.
+func (a *Antiquary) antiquate(from, to uint64) error {
+ if a.downloader == nil {
+ return nil // Just skip if we don't have a downloader
+ }
+ log.Info("[Antiquary]: Antiquating", "from", from, "to", to)
+ if err := freezeblocks.DumpBeaconBlocks(a.ctx, a.mainDB, a.beaconDB, from, to, snaptype.Erigon2RecentMergeLimit, a.dirs.Tmp, a.dirs.Snap, 1, log.LvlDebug, a.logger); err != nil {
+ return err
+ }
+
+ roTx, err := a.mainDB.BeginRo(a.ctx)
+ if err != nil {
+ return err
+ }
+ defer roTx.Rollback()
+ if err := a.beaconDB.PurgeRange(a.ctx, roTx, from, to-from-1); err != nil {
+ return err
+ }
+ roTx.Rollback()
+ if err := a.sn.ReopenFolder(); err != nil {
+ return err
+ }
+
+ paths := a.sn.SegFilePaths(from, to)
+ downloadItems := make([]*proto_downloader.AddItem, len(paths))
+ for i, path := range paths {
+ downloadItems[i] = &proto_downloader.AddItem{
+ Path: path,
+ }
+ }
+ // Notify bittorent to seed the new snapshots
+ if _, err := a.downloader.Add(a.ctx, &proto_downloader.AddRequest{Items: downloadItems}); err != nil {
+ return err
+ }
+
+ tx, err := a.mainDB.BeginRw(a.ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ a.validatorsTable.SetSlot(to)
+ if err := beacon_indicies.WriteLastBeaconSnapshot(tx, to-1); err != nil {
+ return err
+ }
+ return tx.Commit()
+}
+
+func (a *Antiquary) NotifyBackfilled() {
+ // we set up the range for [lowestRawSlot, finalized]
+ a.backfilled.Store(true) // this is the lowest slot not in snapshots
+}
diff --git a/cl/antiquary/state_antiquary.go b/cl/antiquary/state_antiquary.go
new file mode 100644
index 00000000000..3ae2a1dd3dc
--- /dev/null
+++ b/cl/antiquary/state_antiquary.go
@@ -0,0 +1,804 @@
+package antiquary
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "os"
+ "sync"
+ "time"
+
+ "github.com/klauspost/compress/zstd"
+ "github.com/ledgerwatch/erigon-lib/common"
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/etl"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/clparams/initial_state"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/cltypes/solid"
+ "github.com/ledgerwatch/erigon/cl/persistence/base_encoding"
+ "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies"
+ state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state"
+ "github.com/ledgerwatch/erigon/cl/persistence/state/historical_states_reader"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state/raw"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state/shuffling"
+ "github.com/ledgerwatch/erigon/cl/transition"
+ "github.com/ledgerwatch/log/v3"
+)
+
+// pool for buffers
+var bufferPool = sync.Pool{
+ New: func() interface{} {
+ return &bytes.Buffer{}
+ },
+}
+
+func excludeDuplicatesIdentity() etl.LoadFunc {
+ var prevKey, prevValue []byte
+ prevValue = []byte{}
+ return func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error {
+ if len(prevKey) == 0 {
+ prevKey = common.Copy(k)
+ prevValue = common.Copy(v)
+ return nil
+ }
+ if bytes.Equal(k, prevKey) {
+ prevValue = common.Copy(v)
+ return nil
+ }
+ if err := next(prevKey, prevKey, prevValue); err != nil {
+ return err
+ }
+ prevKey = common.Copy(k)
+ prevValue = common.Copy(v)
+ return nil
+ }
+}
+
+func (s *Antiquary) loopStates(ctx context.Context) {
+ // Execute this each second
+ reqRetryTimer := time.NewTicker(100 * time.Millisecond)
+ defer reqRetryTimer.Stop()
+ if !initial_state.IsGenesisStateSupported(clparams.NetworkType(s.cfg.DepositNetworkID)) {
+ s.logger.Warn("Genesis state is not supported for this network, no historical states data will be available")
+ return
+ }
+
+ for {
+ select {
+ // Check if we are behind finalized
+ case <-reqRetryTimer.C:
+ if !s.backfilled.Load() {
+ continue
+ }
+ // Check if we are behind finalized
+ progress, finalized, err := s.readHistoricalProcessingProgress(ctx)
+ if err != nil {
+ s.logger.Error("Failed to read historical processing progress", "err", err)
+ continue
+ }
+ // Stay behind a little bit we rely on forkchoice up until (finalized - 2*slotsPerEpoch)
+ if progress+s.cfg.SlotsPerEpoch/2 >= finalized {
+ continue
+ }
+ if err := s.IncrementBeaconState(ctx, finalized); err != nil {
+ slot := uint64(0)
+ if s.currentState != nil {
+ slot = s.currentState.Slot()
+ }
+ s.logger.Error("Failed to increment beacon state", "err", err, "slot", slot)
+ return
+ }
+
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+func (s *Antiquary) readHistoricalProcessingProgress(ctx context.Context) (progress, finalized uint64, err error) {
+ var tx kv.Tx
+ tx, err = s.mainDB.BeginRo(ctx)
+ if err != nil {
+ return
+ }
+ defer tx.Rollback()
+ progress, err = state_accessors.GetStateProcessingProgress(tx)
+ if err != nil {
+ return
+ }
+
+ finalized, err = beacon_indicies.ReadHighestFinalized(tx)
+ if err != nil {
+ return
+ }
+ return
+}
+
+func uint64BalancesList(s *state.CachingBeaconState, out []uint64) []uint64 {
+ if len(out) < s.ValidatorLength() {
+ out = make([]uint64, s.ValidatorLength())
+ }
+ out = out[:s.ValidatorLength()]
+
+ s.ForEachBalance(func(v uint64, index int, total int) bool {
+ out[index] = v
+ return true
+ })
+ return out
+}
+
+func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error {
+ var tx kv.Tx
+
+ tx, err := s.mainDB.BeginRo(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ // maps which validators changes
+ changedValidators := make(map[uint64]struct{})
+
+ loadfunc := func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error {
+ return next(k, k, v)
+ }
+
+ effectiveBalance := etl.NewCollector(kv.ValidatorEffectiveBalance, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
+ defer effectiveBalance.Close()
+ balances := etl.NewCollector(kv.ValidatorBalance, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
+ defer balances.Close()
+ randaoMixes := etl.NewCollector(kv.RandaoMixes, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
+ defer randaoMixes.Close()
+ intraRandaoMixes := etl.NewCollector(kv.IntraRandaoMixes, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
+ defer intraRandaoMixes.Close()
+ proposers := etl.NewCollector(kv.Proposers, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
+ defer proposers.Close()
+ slashings := etl.NewCollector(kv.ValidatorSlashings, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
+ defer slashings.Close()
+ blockRoots := etl.NewCollector(kv.BlockRoot, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
+ defer blockRoots.Close()
+ stateRoots := etl.NewCollector(kv.StateRoot, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
+ defer stateRoots.Close()
+ minimalBeaconStates := etl.NewCollector(kv.MinimalBeaconState, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
+ defer minimalBeaconStates.Close()
+ inactivityScoresC := etl.NewCollector(kv.InactivityScores, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
+ defer inactivityScoresC.Close()
+ checkpoints := etl.NewCollector(kv.Checkpoints, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
+ defer checkpoints.Close()
+ nextSyncCommittee := etl.NewCollector(kv.NextSyncCommittee, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
+ defer nextSyncCommittee.Close()
+ currentSyncCommittee := etl.NewCollector(kv.CurrentSyncCommittee, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
+ defer currentSyncCommittee.Close()
+ currentEpochAttestations := etl.NewCollector(kv.CurrentEpochAttestations, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
+ defer currentEpochAttestations.Close()
+ previousEpochAttestations := etl.NewCollector(kv.PreviousEpochAttestations, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
+ defer previousEpochAttestations.Close()
+ eth1DataVotes := etl.NewCollector(kv.Eth1DataVotes, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
+ defer eth1DataVotes.Close()
+ stateEvents := etl.NewCollector(kv.StateEvents, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
+ defer stateEvents.Close()
+
+ progress, err := state_accessors.GetStateProcessingProgress(tx)
+ if err != nil {
+ return err
+ }
+ // Go back a little bit
+ if progress > s.cfg.SlotsPerEpoch*2 {
+ progress -= s.cfg.SlotsPerEpoch * 2
+ } else {
+ progress = 0
+ }
+ progress, err = findNearestSlotBackwards(tx, s.cfg, progress) // Maybe the guess was a missed slot.
+ if err != nil {
+ return err
+ }
+ // buffers
+ commonBuffer := &bytes.Buffer{}
+ compressedWriter, err := zstd.NewWriter(commonBuffer, zstd.WithEncoderLevel(zstd.SpeedBetterCompression))
+ if err != nil {
+ return err
+ }
+ defer compressedWriter.Close()
+ // TODO(Giulio2002): also store genesis information and resume from state.
+ if s.currentState == nil {
+ // progress is 0 when we are at genesis
+ if progress == 0 {
+ s.currentState, err = s.genesisState.Copy()
+ if err != nil {
+ return err
+ }
+ // Collect genesis state if we are at genesis
+ if err := s.collectGenesisState(ctx, compressedWriter, s.currentState, slashings, inactivityScoresC, proposers, minimalBeaconStates, stateEvents, changedValidators); err != nil {
+ return err
+ }
+ s.balances32 = append(s.balances32, s.currentState.RawBalances()...)
+ } else {
+ start := time.Now()
+ // progress not 0? we need to load the state from the DB
+ historicalReader := historical_states_reader.NewHistoricalStatesReader(s.cfg, s.snReader, s.validatorsTable, s.fs, s.genesisState)
+ s.currentState, err = historicalReader.ReadHistoricalState(ctx, tx, progress)
+ if err != nil {
+ return fmt.Errorf("failed to read historical state at slot %d: %w", progress, err)
+ }
+ end := time.Since(start)
+ hashRoot, err := s.currentState.HashSSZ()
+ if err != nil {
+ return err
+ }
+ log.Info("Recovered Beacon State", "slot", s.currentState.Slot(), "elapsed", end, "root", libcommon.Hash(hashRoot).String())
+ s.balances32 = append(s.balances32, s.currentState.RawBalances()...)
+ }
+ }
+
+ logLvl := log.LvlInfo
+ if to-s.currentState.Slot() < 96 {
+ logLvl = log.LvlDebug
+ }
+ start := time.Now()
+
+ // Use this as the event slot (it will be incremented by 1 each time we process a block)
+ slot := s.currentState.Slot() + 1
+
+ var prevBalances, prevValSet []byte
+ events := state_accessors.NewStateEvents()
+ slashingOccured := false
+ // var validatorStaticState
+ // var validatorStaticState map[uint64]*state.ValidatorStatic
+ // Setup state events handlers
+ s.currentState.SetEvents(raw.Events{
+ OnNewSlashingSegment: func(index int, segment uint64) error {
+ slashingOccured = true
+ return nil
+ },
+ OnRandaoMixChange: func(index int, mix [32]byte) error {
+ return intraRandaoMixes.Collect(base_encoding.Encode64ToBytes4(slot), mix[:])
+ },
+ OnNewValidator: func(index int, v solid.Validator, balance uint64) error {
+ changedValidators[uint64(index)] = struct{}{}
+ events.AddValidator(uint64(index), v)
+ return s.validatorsTable.AddValidator(v, uint64(index), slot)
+ },
+ OnNewValidatorActivationEpoch: func(index int, epoch uint64) error {
+ changedValidators[uint64(index)] = struct{}{}
+ events.ChangeActivationEpoch(uint64(index), epoch)
+ return s.validatorsTable.AddActivationEpoch(uint64(index), slot, epoch)
+ },
+ OnNewValidatorExitEpoch: func(index int, epoch uint64) error {
+ changedValidators[uint64(index)] = struct{}{}
+ events.ChangeExitEpoch(uint64(index), epoch)
+ return s.validatorsTable.AddExitEpoch(uint64(index), slot, epoch)
+ },
+ OnNewValidatorWithdrawableEpoch: func(index int, epoch uint64) error {
+ changedValidators[uint64(index)] = struct{}{}
+ events.ChangeWithdrawableEpoch(uint64(index), epoch)
+ return s.validatorsTable.AddWithdrawableEpoch(uint64(index), slot, epoch)
+ },
+ OnNewValidatorSlashed: func(index int, newSlashed bool) error {
+ changedValidators[uint64(index)] = struct{}{}
+ events.ChangeSlashed(uint64(index), newSlashed)
+ return s.validatorsTable.AddSlashed(uint64(index), slot, newSlashed)
+ },
+ OnNewValidatorActivationEligibilityEpoch: func(index int, epoch uint64) error {
+ changedValidators[uint64(index)] = struct{}{}
+ events.ChangeActivationEligibilityEpoch(uint64(index), epoch)
+ return s.validatorsTable.AddActivationEligibility(uint64(index), slot, epoch)
+ },
+ OnNewValidatorWithdrawalCredentials: func(index int, wc []byte) error {
+ changedValidators[uint64(index)] = struct{}{}
+ events.ChangeWithdrawalCredentials(uint64(index), libcommon.BytesToHash(wc))
+ return s.validatorsTable.AddWithdrawalCredentials(uint64(index), slot, libcommon.BytesToHash(wc))
+ },
+ OnEpochBoundary: func(epoch uint64) error {
+ k := base_encoding.Encode64ToBytes4(s.cfg.RoundSlotToEpoch(slot))
+ v := make([]byte, solid.CheckpointSize*3)
+ copy(v, s.currentState.CurrentJustifiedCheckpoint())
+ copy(v[solid.CheckpointSize:], s.currentState.PreviousJustifiedCheckpoint())
+ copy(v[solid.CheckpointSize*2:], s.currentState.FinalizedCheckpoint())
+ if err := checkpoints.Collect(k, v); err != nil {
+ return err
+ }
+ prevEpoch := epoch - 1
+ mix := s.currentState.GetRandaoMixes(prevEpoch)
+ if err := randaoMixes.Collect(base_encoding.Encode64ToBytes4(prevEpoch*s.cfg.SlotsPerEpoch), mix[:]); err != nil {
+ return err
+ }
+ // truncate the file
+ return proposers.Collect(base_encoding.Encode64ToBytes4(epoch), getProposerDutiesValue(s.currentState))
+ },
+ OnNewBlockRoot: func(index int, root common.Hash) error {
+ return blockRoots.Collect(base_encoding.Encode64ToBytes4(s.currentState.Slot()), root[:])
+ },
+ OnNewStateRoot: func(index int, root common.Hash) error {
+ return stateRoots.Collect(base_encoding.Encode64ToBytes4(s.currentState.Slot()), root[:])
+ },
+ OnNewNextSyncCommittee: func(committee *solid.SyncCommittee) error {
+ roundedSlot := s.cfg.RoundSlotToSyncCommitteePeriod(slot)
+ return nextSyncCommittee.Collect(base_encoding.Encode64ToBytes4(roundedSlot), committee[:])
+ },
+ OnNewCurrentSyncCommittee: func(committee *solid.SyncCommittee) error {
+ roundedSlot := s.cfg.RoundSlotToSyncCommitteePeriod(slot)
+ return currentSyncCommittee.Collect(base_encoding.Encode64ToBytes4(roundedSlot), committee[:])
+ },
+ OnAppendEth1Data: func(data *cltypes.Eth1Data) error {
+ vote, err := data.EncodeSSZ(nil)
+ if err != nil {
+ return err
+ }
+ return eth1DataVotes.Collect(base_encoding.Encode64ToBytes4(slot), vote)
+ },
+ })
+ log.Log(logLvl, "Starting state processing", "from", slot, "to", to)
+ // Set up a timer to log progress
+ progressTimer := time.NewTicker(1 * time.Minute)
+ defer progressTimer.Stop()
+ prevSlot := slot
+ first := false
+ // This tells us that transition and operations do not happen concurrently and access is safe, so we can optimize for GC.
+ // there is optimized custom cache to recycle big GC overhead.
+ for ; slot < to; slot++ {
+ slashingOccured = false // Set this to false at the beginning of each slot.
+ key := base_encoding.Encode64ToBytes4(slot)
+
+ isDumpSlot := slot%clparams.SlotsPerDump == 0
+ block, err := s.snReader.ReadBlockBySlot(ctx, tx, slot)
+ if err != nil {
+ return err
+ }
+ prevValidatorSetLength := s.currentState.ValidatorLength()
+ prevEpoch := state.Epoch(s.currentState)
+
+ if slot%s.cfg.SlotsPerEpoch == 0 && s.currentState.Version() == clparams.Phase0Version {
+ encoded, err := s.currentState.CurrentEpochAttestations().EncodeSSZ(nil)
+ if err != nil {
+ return err
+ }
+ if err := s.dumpPayload(base_encoding.Encode64ToBytes4(s.cfg.RoundSlotToEpoch(slot-1)), encoded, currentEpochAttestations, commonBuffer, compressedWriter); err != nil {
+ return err
+ }
+ encoded, err = s.currentState.PreviousEpochAttestations().EncodeSSZ(nil)
+ if err != nil {
+ return err
+ }
+ if err := s.dumpPayload(base_encoding.Encode64ToBytes4(s.cfg.RoundSlotToEpoch(slot-1)), encoded, previousEpochAttestations, commonBuffer, compressedWriter); err != nil {
+ return err
+ }
+ }
+
+ // If we have a missed block, we just skip it.
+ if block == nil {
+ if isDumpSlot {
+ if err := s.antiquateField(ctx, slot, s.currentState.RawBalances(), compressedWriter, "balances"); err != nil {
+ return err
+ }
+ if err := s.antiquateEffectiveBalances(ctx, slot, s.currentState.RawValidatorSet(), compressedWriter); err != nil {
+ return err
+ }
+ } else if slot%s.cfg.SlotsPerEpoch == 0 {
+ if err := s.antiquateBytesListDiff(ctx, key, s.balances32, s.currentState.RawBalances(), balances, base_encoding.ComputeCompressedSerializedUint64ListDiff); err != nil {
+ return err
+ }
+ s.balances32 = s.balances32[:0]
+ s.balances32 = append(s.balances32, s.currentState.RawBalances()...)
+ }
+
+ continue
+ }
+ // We now compute the difference between the two balances.
+ prevBalances = prevBalances[:0]
+ prevBalances = append(prevBalances, s.currentState.RawBalances()...)
+ prevValSet = prevValSet[:0]
+ prevValSet = append(prevValSet, s.currentState.RawValidatorSet()...)
+
+ fullValidation := slot%100_000 == 0 || first
+ // We sanity check the state every 100k slots or when we start.
+ if err := transition.TransitionState(s.currentState, block, fullValidation); err != nil {
+ return err
+ }
+ first = false
+
+ // dump the whole sla
+ if slashingOccured {
+ if err := s.antiquateFullUint64List(slashings, slot, s.currentState.RawSlashings(), commonBuffer, compressedWriter); err != nil {
+ return err
+ }
+ }
+
+ if err := s.storeMinimalState(commonBuffer, s.currentState, minimalBeaconStates); err != nil {
+ return err
+ }
+ if err := stateEvents.Collect(base_encoding.Encode64ToBytes4(slot), events.CopyBytes()); err != nil {
+ return err
+ }
+ events.Reset()
+
+ if isDumpSlot {
+ if err := s.antiquateField(ctx, slot, s.currentState.RawBalances(), compressedWriter, "balances"); err != nil {
+ return err
+ }
+ if err := s.antiquateEffectiveBalances(ctx, slot, s.currentState.RawValidatorSet(), compressedWriter); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // antiquate diffs
+ isEpochCrossed := prevEpoch != state.Epoch(s.currentState)
+ if slot%s.cfg.SlotsPerEpoch == 0 {
+ if err := s.antiquateBytesListDiff(ctx, key, s.balances32, s.currentState.RawBalances(), balances, base_encoding.ComputeCompressedSerializedUint64ListDiff); err != nil {
+ return err
+ }
+ s.balances32 = s.balances32[:0]
+ s.balances32 = append(s.balances32, s.currentState.RawBalances()...)
+ } else if err := s.antiquateBytesListDiff(ctx, key, prevBalances, s.currentState.RawBalances(), balances, base_encoding.ComputeCompressedSerializedUint64ListDiff); err != nil {
+ return err
+ }
+ if prevValidatorSetLength != s.currentState.ValidatorLength() || isEpochCrossed {
+ if err := s.antiquateBytesListDiff(ctx, key, prevValSet, s.currentState.RawValidatorSet(), effectiveBalance, base_encoding.ComputeCompressedSerializedEffectiveBalancesDiff); err != nil {
+ return err
+ }
+ if s.currentState.Version() >= clparams.AltairVersion {
+ if err := s.antiquateFullUint64List(inactivityScoresC, slot, s.currentState.RawInactivityScores(), commonBuffer, compressedWriter); err != nil {
+ return err
+ }
+ }
+ }
+ // We now do some post-processing on the state.
+ select {
+ case <-progressTimer.C:
+ log.Log(logLvl, "State processing progress", "slot", slot, "blk/sec", fmt.Sprintf("%.2f", float64(slot-prevSlot)/60))
+ prevSlot = slot
+ default:
+ }
+ }
+ tx.Rollback()
+
+ log.Debug("Finished beacon state iteration", "elapsed", time.Since(start))
+
+ rwTx, err := s.mainDB.BeginRw(ctx)
+ if err != nil {
+ return err
+ }
+ defer rwTx.Rollback()
+ start = time.Now()
+ log.Log(logLvl, "Stopped Caplin to load states")
+ // Now load.
+ if err := effectiveBalance.Load(rwTx, kv.ValidatorEffectiveBalance, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil {
+ return err
+ }
+
+ if err := randaoMixes.Load(rwTx, kv.RandaoMixes, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil {
+ return err
+ }
+
+ if err := balances.Load(rwTx, kv.ValidatorBalance, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil {
+ return err
+ }
+
+ if err := proposers.Load(rwTx, kv.Proposers, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil {
+ return err
+ }
+
+ if err := slashings.Load(rwTx, kv.ValidatorSlashings, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil {
+ return err
+ }
+
+ if err := blockRoots.Load(rwTx, kv.BlockRoot, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil {
+ return err
+ }
+
+ if err := stateRoots.Load(rwTx, kv.StateRoot, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil {
+ return err
+ }
+
+ if err := minimalBeaconStates.Load(rwTx, kv.MinimalBeaconState, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil {
+ return err
+ }
+
+ if err := inactivityScoresC.Load(rwTx, kv.InactivityScores, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil {
+ return err
+ }
+
+ if err := intraRandaoMixes.Load(rwTx, kv.IntraRandaoMixes, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil {
+ return err
+ }
+
+ if err := checkpoints.Load(rwTx, kv.Checkpoints, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil {
+ return err
+ }
+
+ if err := nextSyncCommittee.Load(rwTx, kv.NextSyncCommittee, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil {
+ return err
+ }
+
+ if err := currentSyncCommittee.Load(rwTx, kv.CurrentSyncCommittee, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil {
+ return err
+ }
+
+ if err := currentEpochAttestations.Load(rwTx, kv.CurrentEpochAttestations, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil {
+ return err
+ }
+
+ if err := previousEpochAttestations.Load(rwTx, kv.PreviousEpochAttestations, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil {
+ return err
+ }
+
+ if err := eth1DataVotes.Load(rwTx, kv.Eth1DataVotes, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil {
+ return err
+ }
+ if err := stateEvents.Load(rwTx, kv.StateEvents, loadfunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil {
+ return err
+ }
+ if err := state_accessors.SetStateProcessingProgress(rwTx, s.currentState.Slot()); err != nil {
+ return err
+ }
+ s.validatorsTable.SetSlot(s.currentState.Slot())
+
+ s.validatorsTable.ForEach(func(validatorIndex uint64, validator *state_accessors.StaticValidator) bool {
+ if _, ok := changedValidators[validatorIndex]; !ok {
+ return true
+ }
+ commonBuffer.Reset()
+ if err = validator.WriteTo(commonBuffer); err != nil {
+ return false
+ }
+ if err = rwTx.Put(kv.StaticValidators, base_encoding.Encode64ToBytes4(validatorIndex), common.Copy(commonBuffer.Bytes())); err != nil {
+ return false
+ }
+ return true
+ })
+ if err != nil {
+ return err
+ }
+ if err := rwTx.Commit(); err != nil {
+ return err
+ }
+ endTime := time.Since(start)
+ stateRoot, err := s.currentState.HashSSZ()
+ if err != nil {
+ return err
+ }
+ log.Info("Historical states antiquated", "slot", s.currentState.Slot(), "root", libcommon.Hash(stateRoot), "latency", endTime)
+ return nil
+}
+
+func (s *Antiquary) antiquateField(ctx context.Context, slot uint64, uncompressed []byte, compressor *zstd.Encoder, name string) error {
+ folderPath, filePath := clparams.EpochToPaths(slot, s.cfg, name)
+ _ = s.fs.MkdirAll(folderPath, 0o755)
+
+ balancesFile, err := s.fs.OpenFile(filePath, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ return err
+ }
+ defer balancesFile.Close()
+ compressor.Reset(balancesFile)
+
+ if err := binary.Write(balancesFile, binary.LittleEndian, uint64(len(uncompressed))); err != nil {
+ return err
+ }
+
+ if _, err := compressor.Write(uncompressed); err != nil {
+ return err
+ }
+
+ if err := compressor.Close(); err != nil {
+ return err
+ }
+ return balancesFile.Sync()
+}
+
+func (s *Antiquary) antiquateEffectiveBalances(ctx context.Context, slot uint64, uncompressed []byte, compressor *zstd.Encoder) error {
+ folderPath, filePath := clparams.EpochToPaths(slot, s.cfg, "effective_balances")
+ _ = s.fs.MkdirAll(folderPath, 0o755)
+
+ balancesFile, err := s.fs.OpenFile(filePath, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ return err
+ }
+ defer balancesFile.Close()
+ compressor.Reset(balancesFile)
+ validatorSetSize := 121
+
+ if err := binary.Write(balancesFile, binary.LittleEndian, uint64((len(uncompressed)/validatorSetSize)*8)); err != nil {
+ return err
+ }
+
+ for i := 0; i < len(uncompressed)/validatorSetSize; i++ {
+ // 80:88
+ if _, err := compressor.Write(uncompressed[i*validatorSetSize+80 : i*validatorSetSize+88]); err != nil {
+ return err
+ }
+ }
+
+ if err := compressor.Close(); err != nil {
+ return err
+ }
+ return balancesFile.Sync()
+}
+
+func (s *Antiquary) antiquateBytesListDiff(ctx context.Context, key []byte, old, new []byte, collector *etl.Collector, diffFn func(w io.Writer, old, new []byte) error) error {
+ // create a diff
+ diffBuffer := bufferPool.Get().(*bytes.Buffer)
+ defer bufferPool.Put(diffBuffer)
+ diffBuffer.Reset()
+
+ if err := diffFn(diffBuffer, old, new); err != nil {
+ return err
+ }
+
+ return collector.Collect(key, common.Copy(diffBuffer.Bytes()))
+}
+
+func getProposerDutiesValue(s *state.CachingBeaconState) []byte {
+ epoch := state.Epoch(s)
+ var wg sync.WaitGroup
+ list := make([]byte, s.BeaconConfig().SlotsPerEpoch*4)
+ for slot := s.Slot(); slot < s.Slot()+s.BeaconConfig().SlotsPerEpoch; slot++ {
+ var proposerIndex uint64
+ // Lets do proposer index computation
+ mixPosition := (epoch + s.BeaconConfig().EpochsPerHistoricalVector - s.BeaconConfig().MinSeedLookahead - 1) %
+ s.BeaconConfig().EpochsPerHistoricalVector
+ // Input for the seed hash.
+ mix := s.GetRandaoMix(int(mixPosition))
+ input := shuffling.GetSeed(s.BeaconConfig(), mix, epoch, s.BeaconConfig().DomainBeaconProposer)
+ slotByteArray := make([]byte, 8)
+ binary.LittleEndian.PutUint64(slotByteArray, slot)
+
+ // Add slot to the end of the input.
+ inputWithSlot := append(input[:], slotByteArray...)
+ hash := sha256.New()
+
+ // Calculate the hash.
+ hash.Write(inputWithSlot)
+ seed := hash.Sum(nil)
+
+ indices := s.GetActiveValidatorsIndices(epoch)
+
+ // Write the seed to an array.
+ seedArray := [32]byte{}
+ copy(seedArray[:], seed)
+ wg.Add(1)
+
+ // Do it in parallel
+ go func(i, slot uint64, indicies []uint64, seedArray [32]byte) {
+ defer wg.Done()
+ var err error
+ proposerIndex, err = shuffling.ComputeProposerIndex(s.BeaconState, indices, seedArray)
+ if err != nil {
+ panic(err)
+ }
+ binary.BigEndian.PutUint32(list[i*4:(i+1)*4], uint32(proposerIndex))
+ }(slot-s.Slot(), slot, indices, seedArray)
+ }
+ wg.Wait()
+ return list
+}
+
+func (s *Antiquary) collectGenesisState(ctx context.Context, compressor *zstd.Encoder, state *state.CachingBeaconState, slashings, inactivities, proposersCollector, minimalBeaconStateCollector, stateEvents *etl.Collector, changedValidators map[uint64]struct{}) error {
+ var err error
+ slot := state.Slot()
+ epoch := slot / s.cfg.SlotsPerEpoch
+ // Setup state events handlers
+ if err := proposersCollector.Collect(base_encoding.Encode64ToBytes4(epoch), getProposerDutiesValue(s.currentState)); err != nil {
+ return err
+ }
+
+ events := state_accessors.NewStateEvents()
+
+ state.ForEachValidator(func(v solid.Validator, index, total int) bool {
+ changedValidators[uint64(index)] = struct{}{}
+ if err = s.validatorsTable.AddValidator(v, uint64(index), 0); err != nil {
+ return false
+ }
+ events.AddValidator(uint64(index), v)
+ return true
+ })
+ if err != nil {
+ return err
+ }
+ roundedSlotToDump := slot - (slot % clparams.SlotsPerDump)
+ if err := s.antiquateField(ctx, roundedSlotToDump, s.currentState.RawBalances(), compressor, "balances"); err != nil {
+ return err
+ }
+ if err := s.antiquateEffectiveBalances(ctx, roundedSlotToDump, s.currentState.RawValidatorSet(), compressor); err != nil {
+ return err
+ }
+ var commonBuffer bytes.Buffer
+ if err := s.antiquateFullUint64List(slashings, roundedSlotToDump, s.currentState.RawSlashings(), &commonBuffer, compressor); err != nil {
+ return err
+ }
+
+ if state.Version() >= clparams.AltairVersion {
+ // dump inactivity scores
+ if err := s.antiquateFullUint64List(inactivities, slot, state.RawInactivityScores(), &commonBuffer, compressor); err != nil {
+ return err
+ }
+ }
+
+ var b bytes.Buffer
+ if err := s.storeMinimalState(&b, state, minimalBeaconStateCollector); err != nil {
+ return err
+ }
+
+ return stateEvents.Collect(base_encoding.Encode64ToBytes4(slot), events.CopyBytes())
+}
+
+func (s *Antiquary) storeMinimalState(buffer *bytes.Buffer, st *state.CachingBeaconState, collector *etl.Collector) error {
+ buffer.Reset()
+ minimalBeaconState := state_accessors.MinimalBeaconStateFromBeaconState(st.BeaconState)
+
+ if err := minimalBeaconState.WriteTo(buffer); err != nil {
+ return err
+ }
+ return collector.Collect(base_encoding.Encode64ToBytes4(st.Slot()), buffer.Bytes())
+}
+
+func (s *Antiquary) dumpPayload(k []byte, v []byte, c *etl.Collector, b *bytes.Buffer, compressor *zstd.Encoder) error {
+ if compressor == nil {
+ return c.Collect(k, v)
+ }
+ b.Reset()
+ compressor.Reset(b)
+
+ if _, err := compressor.Write(v); err != nil {
+ return err
+ }
+ if err := compressor.Close(); err != nil {
+ return err
+ }
+ return c.Collect(k, common.Copy(b.Bytes()))
+}
+
+// func (s *Antiquary) dumpFullBeaconState() {
+// b, err := s.currentState.EncodeSSZ(nil)
+// if err != nil {
+// s.logger.Error("Failed to encode full beacon state", "err", err)
+// return
+// }
+// // just dump it in a.txt like an idiot without afero
+// if err := os.WriteFile("b.txt", b, 0644); err != nil {
+// s.logger.Error("Failed to write full beacon state", "err", err)
+// }
+
+// }
+
+func flattenRandaoMixes(hashes []libcommon.Hash) []byte {
+ out := make([]byte, len(hashes)*32)
+ for i, h := range hashes {
+ copy(out[i*32:(i+1)*32], h[:])
+ }
+ return out
+}
+
+// antiquateFullSlashings goes on mdbx as it is full of common repeated patter always and thus fits with 16KB pages.
+func (s *Antiquary) antiquateFullUint64List(collector *etl.Collector, slot uint64, raw []byte, buffer *bytes.Buffer, compressor *zstd.Encoder) error {
+ buffer.Reset()
+ compressor.Reset(buffer)
+ if _, err := compressor.Write(raw); err != nil {
+ return err
+ }
+ if err := compressor.Close(); err != nil {
+ return err
+ }
+ return collector.Collect(base_encoding.Encode64ToBytes4(slot), common.Copy(buffer.Bytes()))
+}
+
+func findNearestSlotBackwards(tx kv.Tx, cfg *clparams.BeaconChainConfig, slot uint64) (uint64, error) {
+ canonicalRoot, err := beacon_indicies.ReadCanonicalBlockRoot(tx, slot)
+ if err != nil {
+ return 0, err
+ }
+ for (canonicalRoot == (common.Hash{}) && slot > 0) || slot%cfg.SlotsPerEpoch != 0 {
+ slot--
+ canonicalRoot, err = beacon_indicies.ReadCanonicalBlockRoot(tx, slot)
+ if err != nil {
+ return 0, err
+ }
+ }
+ return slot, nil
+}
diff --git a/cl/antiquary/state_antiquary_test.go b/cl/antiquary/state_antiquary_test.go
new file mode 100644
index 00000000000..52805c12fe2
--- /dev/null
+++ b/cl/antiquary/state_antiquary_test.go
@@ -0,0 +1,50 @@
+package antiquary
+
+import (
+ "context"
+ _ "embed"
+ "fmt"
+ "testing"
+
+ "github.com/ledgerwatch/erigon-lib/common/datadir"
+ "github.com/ledgerwatch/erigon-lib/kv/memdb"
+ "github.com/ledgerwatch/erigon/cl/antiquary/tests"
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state"
+ "github.com/ledgerwatch/log/v3"
+ "github.com/spf13/afero"
+ "github.com/stretchr/testify/require"
+)
+
+func runTest(t *testing.T, blocks []*cltypes.SignedBeaconBlock, preState, postState *state.CachingBeaconState) {
+ db := memdb.NewTestDB(t)
+ reader := tests.LoadChain(blocks, db)
+
+ ctx := context.Background()
+ vt := state_accessors.NewStaticValidatorTable()
+ f := afero.NewMemMapFs()
+ a := NewAntiquary(ctx, preState, vt, &clparams.MainnetBeaconConfig, datadir.New("/tmp"), nil, db, nil, reader, nil, log.New(), true, f)
+ require.NoError(t, a.IncrementBeaconState(ctx, blocks[len(blocks)-1].Block.Slot+33))
+ // TODO: add more meaning here, like checking db values, will do so once i see some bugs
+}
+
+func TestStateAntiquaryCapella(t *testing.T) {
+ t.Skip()
+ blocks, preState, postState := tests.GetCapellaRandom()
+ runTest(t, blocks, preState, postState)
+}
+
+func TestStateAntiquaryBellatrix(t *testing.T) {
+ t.Skip()
+ blocks, preState, postState := tests.GetBellatrixRandom()
+ fmt.Println(len(blocks))
+ runTest(t, blocks, preState, postState)
+}
+
+func TestStateAntiquaryPhase0(t *testing.T) {
+ t.Skip()
+ blocks, preState, postState := tests.GetPhase0Random()
+ runTest(t, blocks, preState, postState)
+}
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_0.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_0.ssz_snappy
new file mode 100644
index 00000000000..ddb86265417
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_0.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_1.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_1.ssz_snappy
new file mode 100644
index 00000000000..338d0aa9f5c
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_1.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_10.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_10.ssz_snappy
new file mode 100644
index 00000000000..f15263647c4
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_10.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_11.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_11.ssz_snappy
new file mode 100644
index 00000000000..2480b5d7ed3
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_11.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_12.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_12.ssz_snappy
new file mode 100644
index 00000000000..0ff0fabc2ad
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_12.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_13.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_13.ssz_snappy
new file mode 100644
index 00000000000..802619428d7
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_13.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_14.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_14.ssz_snappy
new file mode 100644
index 00000000000..e7f70750845
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_14.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_15.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_15.ssz_snappy
new file mode 100644
index 00000000000..b9d5ef35308
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_15.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_16.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_16.ssz_snappy
new file mode 100644
index 00000000000..29d509c9a61
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_16.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_17.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_17.ssz_snappy
new file mode 100644
index 00000000000..3f976bd89e9
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_17.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_18.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_18.ssz_snappy
new file mode 100644
index 00000000000..39f66b4d87f
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_18.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_19.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_19.ssz_snappy
new file mode 100644
index 00000000000..5ee1b7ae12b
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_19.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_2.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_2.ssz_snappy
new file mode 100644
index 00000000000..e5d9c477572
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_2.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_20.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_20.ssz_snappy
new file mode 100644
index 00000000000..09979630358
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_20.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_21.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_21.ssz_snappy
new file mode 100644
index 00000000000..b611fe260ee
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_21.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_22.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_22.ssz_snappy
new file mode 100644
index 00000000000..7f97dafd851
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_22.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_23.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_23.ssz_snappy
new file mode 100644
index 00000000000..74f348df968
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_23.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_24.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_24.ssz_snappy
new file mode 100644
index 00000000000..c0a4e649b1c
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_24.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_25.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_25.ssz_snappy
new file mode 100644
index 00000000000..83ec1717069
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_25.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_26.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_26.ssz_snappy
new file mode 100644
index 00000000000..4299bb3cd17
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_26.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_27.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_27.ssz_snappy
new file mode 100644
index 00000000000..6635bb8ae6d
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_27.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_28.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_28.ssz_snappy
new file mode 100644
index 00000000000..5b31157fd7b
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_28.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_29.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_29.ssz_snappy
new file mode 100644
index 00000000000..b9cb2b91feb
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_29.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_3.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_3.ssz_snappy
new file mode 100644
index 00000000000..93448ab0b95
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_3.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_30.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_30.ssz_snappy
new file mode 100644
index 00000000000..ef1c4a4f625
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_30.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_31.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_31.ssz_snappy
new file mode 100644
index 00000000000..aaf877bb3ec
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_31.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_32.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_32.ssz_snappy
new file mode 100644
index 00000000000..ee19e4ef320
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_32.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_33.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_33.ssz_snappy
new file mode 100644
index 00000000000..cb6b0514d2f
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_33.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_34.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_34.ssz_snappy
new file mode 100644
index 00000000000..9f7d82de01f
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_34.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_35.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_35.ssz_snappy
new file mode 100644
index 00000000000..942d790bcab
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_35.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_36.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_36.ssz_snappy
new file mode 100644
index 00000000000..fe7bfd23487
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_36.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_37.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_37.ssz_snappy
new file mode 100644
index 00000000000..3de6efbf78e
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_37.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_38.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_38.ssz_snappy
new file mode 100644
index 00000000000..b79724bba01
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_38.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_39.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_39.ssz_snappy
new file mode 100644
index 00000000000..9cbebe9fef2
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_39.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_4.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_4.ssz_snappy
new file mode 100644
index 00000000000..ff1c6ebb6a9
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_4.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_40.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_40.ssz_snappy
new file mode 100644
index 00000000000..7a63b458fa0
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_40.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_41.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_41.ssz_snappy
new file mode 100644
index 00000000000..77fc5a1f5b9
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_41.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_42.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_42.ssz_snappy
new file mode 100644
index 00000000000..7231136bb3b
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_42.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_43.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_43.ssz_snappy
new file mode 100644
index 00000000000..3e8eb3d2aea
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_43.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_44.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_44.ssz_snappy
new file mode 100644
index 00000000000..fb6af616f2e
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_44.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_45.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_45.ssz_snappy
new file mode 100644
index 00000000000..fe544a77ae1
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_45.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_46.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_46.ssz_snappy
new file mode 100644
index 00000000000..9455c0c00bc
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_46.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_47.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_47.ssz_snappy
new file mode 100644
index 00000000000..998b1cdd4ab
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_47.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_48.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_48.ssz_snappy
new file mode 100644
index 00000000000..95dc1add11a
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_48.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_49.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_49.ssz_snappy
new file mode 100644
index 00000000000..b1b9f4a5320
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_49.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_5.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_5.ssz_snappy
new file mode 100644
index 00000000000..70322b97f8f
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_5.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_50.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_50.ssz_snappy
new file mode 100644
index 00000000000..d1878140277
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_50.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_51.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_51.ssz_snappy
new file mode 100644
index 00000000000..bb247b4edcc
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_51.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_52.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_52.ssz_snappy
new file mode 100644
index 00000000000..4591a85f7ca
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_52.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_53.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_53.ssz_snappy
new file mode 100644
index 00000000000..16ae7cf07e6
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_53.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_54.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_54.ssz_snappy
new file mode 100644
index 00000000000..ce039121c6d
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_54.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_55.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_55.ssz_snappy
new file mode 100644
index 00000000000..cfd569604fb
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_55.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_56.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_56.ssz_snappy
new file mode 100644
index 00000000000..c3ff7830aae
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_56.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_57.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_57.ssz_snappy
new file mode 100644
index 00000000000..a96e3bc83d5
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_57.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_58.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_58.ssz_snappy
new file mode 100644
index 00000000000..e377414e3f8
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_58.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_59.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_59.ssz_snappy
new file mode 100644
index 00000000000..30eb999215f
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_59.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_6.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_6.ssz_snappy
new file mode 100644
index 00000000000..443eb6f6852
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_6.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_60.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_60.ssz_snappy
new file mode 100644
index 00000000000..c5abd4e7052
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_60.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_61.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_61.ssz_snappy
new file mode 100644
index 00000000000..15f0c0eba5e
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_61.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_62.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_62.ssz_snappy
new file mode 100644
index 00000000000..6329d0b0b7f
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_62.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_63.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_63.ssz_snappy
new file mode 100644
index 00000000000..9a788865bc8
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_63.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_64.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_64.ssz_snappy
new file mode 100644
index 00000000000..6211aef59c9
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_64.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_65.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_65.ssz_snappy
new file mode 100644
index 00000000000..9f9fe354302
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_65.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_66.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_66.ssz_snappy
new file mode 100644
index 00000000000..2d8d6410aa1
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_66.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_67.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_67.ssz_snappy
new file mode 100644
index 00000000000..a7d59ebd37d
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_67.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_68.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_68.ssz_snappy
new file mode 100644
index 00000000000..ba24c242ba0
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_68.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_69.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_69.ssz_snappy
new file mode 100644
index 00000000000..d0ee1bd34d3
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_69.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_7.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_7.ssz_snappy
new file mode 100644
index 00000000000..d0270aab04d
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_7.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_70.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_70.ssz_snappy
new file mode 100644
index 00000000000..589aaa2bbe3
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_70.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_71.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_71.ssz_snappy
new file mode 100644
index 00000000000..0c0c3dac0c3
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_71.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_72.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_72.ssz_snappy
new file mode 100644
index 00000000000..31600d4849b
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_72.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_73.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_73.ssz_snappy
new file mode 100644
index 00000000000..5e2b0eea6af
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_73.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_74.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_74.ssz_snappy
new file mode 100644
index 00000000000..08404e98615
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_74.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_75.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_75.ssz_snappy
new file mode 100644
index 00000000000..31260532e91
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_75.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_76.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_76.ssz_snappy
new file mode 100644
index 00000000000..6f8e00d1a53
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_76.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_77.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_77.ssz_snappy
new file mode 100644
index 00000000000..cc9c4c6f043
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_77.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_78.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_78.ssz_snappy
new file mode 100644
index 00000000000..ab1a71ce711
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_78.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_79.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_79.ssz_snappy
new file mode 100644
index 00000000000..5c0aa21445d
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_79.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_8.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_8.ssz_snappy
new file mode 100644
index 00000000000..13fa263a79f
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_8.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_80.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_80.ssz_snappy
new file mode 100644
index 00000000000..12f4c18084c
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_80.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_81.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_81.ssz_snappy
new file mode 100644
index 00000000000..0625f98e73c
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_81.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_82.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_82.ssz_snappy
new file mode 100644
index 00000000000..a73e8ba3abb
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_82.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_83.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_83.ssz_snappy
new file mode 100644
index 00000000000..c85e4aa1598
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_83.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_84.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_84.ssz_snappy
new file mode 100644
index 00000000000..a476b0929b6
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_84.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_85.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_85.ssz_snappy
new file mode 100644
index 00000000000..f097123ef54
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_85.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_86.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_86.ssz_snappy
new file mode 100644
index 00000000000..7b3eddf3f8c
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_86.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_87.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_87.ssz_snappy
new file mode 100644
index 00000000000..638bafe5d61
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_87.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_88.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_88.ssz_snappy
new file mode 100644
index 00000000000..ef553a34db2
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_88.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_89.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_89.ssz_snappy
new file mode 100644
index 00000000000..1591667ac26
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_89.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_9.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_9.ssz_snappy
new file mode 100644
index 00000000000..b4c3dcfd693
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_9.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_90.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_90.ssz_snappy
new file mode 100644
index 00000000000..373746aa808
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_90.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_91.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_91.ssz_snappy
new file mode 100644
index 00000000000..fc01404a9c6
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_91.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_92.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_92.ssz_snappy
new file mode 100644
index 00000000000..cd851715494
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_92.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_93.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_93.ssz_snappy
new file mode 100644
index 00000000000..636ade4791f
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_93.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_94.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_94.ssz_snappy
new file mode 100644
index 00000000000..b07de428a13
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_94.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/blocks_95.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/blocks_95.ssz_snappy
new file mode 100644
index 00000000000..7cbf6df9079
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/blocks_95.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/meta.yaml b/cl/antiquary/tests/test_data/bellatrix/meta.yaml
new file mode 100644
index 00000000000..46caff2710c
--- /dev/null
+++ b/cl/antiquary/tests/test_data/bellatrix/meta.yaml
@@ -0,0 +1 @@
+{blocks_count: 96}
diff --git a/cl/antiquary/tests/test_data/bellatrix/post.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/post.ssz_snappy
new file mode 100644
index 00000000000..b3bc26c91c2
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/post.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/bellatrix/pre.ssz_snappy b/cl/antiquary/tests/test_data/bellatrix/pre.ssz_snappy
new file mode 100644
index 00000000000..ee9a847b53f
Binary files /dev/null and b/cl/antiquary/tests/test_data/bellatrix/pre.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/capella/blocks_0.ssz_snappy b/cl/antiquary/tests/test_data/capella/blocks_0.ssz_snappy
new file mode 100644
index 00000000000..15640e26fa4
Binary files /dev/null and b/cl/antiquary/tests/test_data/capella/blocks_0.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/capella/blocks_1.ssz_snappy b/cl/antiquary/tests/test_data/capella/blocks_1.ssz_snappy
new file mode 100644
index 00000000000..8d3683ef56a
Binary files /dev/null and b/cl/antiquary/tests/test_data/capella/blocks_1.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/capella/meta.yaml b/cl/antiquary/tests/test_data/capella/meta.yaml
new file mode 100644
index 00000000000..330980a92b8
--- /dev/null
+++ b/cl/antiquary/tests/test_data/capella/meta.yaml
@@ -0,0 +1 @@
+{blocks_count: 2, bls_setting: 1}
diff --git a/cl/antiquary/tests/test_data/capella/post.ssz_snappy b/cl/antiquary/tests/test_data/capella/post.ssz_snappy
new file mode 100644
index 00000000000..c51c8d06f28
Binary files /dev/null and b/cl/antiquary/tests/test_data/capella/post.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/capella/pre.ssz_snappy b/cl/antiquary/tests/test_data/capella/pre.ssz_snappy
new file mode 100644
index 00000000000..04b981bd0d8
Binary files /dev/null and b/cl/antiquary/tests/test_data/capella/pre.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/phase0/blocks_0.ssz_snappy b/cl/antiquary/tests/test_data/phase0/blocks_0.ssz_snappy
new file mode 100644
index 00000000000..716790ea589
Binary files /dev/null and b/cl/antiquary/tests/test_data/phase0/blocks_0.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/phase0/blocks_1.ssz_snappy b/cl/antiquary/tests/test_data/phase0/blocks_1.ssz_snappy
new file mode 100644
index 00000000000..f846f036879
Binary files /dev/null and b/cl/antiquary/tests/test_data/phase0/blocks_1.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/phase0/meta.yaml b/cl/antiquary/tests/test_data/phase0/meta.yaml
new file mode 100644
index 00000000000..330980a92b8
--- /dev/null
+++ b/cl/antiquary/tests/test_data/phase0/meta.yaml
@@ -0,0 +1 @@
+{blocks_count: 2, bls_setting: 1}
diff --git a/cl/antiquary/tests/test_data/phase0/post.ssz_snappy b/cl/antiquary/tests/test_data/phase0/post.ssz_snappy
new file mode 100644
index 00000000000..45758ffae1d
Binary files /dev/null and b/cl/antiquary/tests/test_data/phase0/post.ssz_snappy differ
diff --git a/cl/antiquary/tests/test_data/phase0/pre.ssz_snappy b/cl/antiquary/tests/test_data/phase0/pre.ssz_snappy
new file mode 100644
index 00000000000..ceb98ad7b34
Binary files /dev/null and b/cl/antiquary/tests/test_data/phase0/pre.ssz_snappy differ
diff --git a/cl/antiquary/tests/tests.go b/cl/antiquary/tests/tests.go
new file mode 100644
index 00000000000..1596e16d05b
--- /dev/null
+++ b/cl/antiquary/tests/tests.go
@@ -0,0 +1,174 @@
+package tests
+
+import (
+ "context"
+ "embed"
+ _ "embed"
+ "strconv"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state"
+ "github.com/ledgerwatch/erigon/cl/utils"
+)
+
+//go:embed test_data/capella/blocks_0.ssz_snappy
+var capella_blocks_0_ssz_snappy []byte
+
+//go:embed test_data/capella/blocks_1.ssz_snappy
+var capella_blocks_1_ssz_snappy []byte
+
+//go:embed test_data/capella/pre.ssz_snappy
+var capella_pre_state_ssz_snappy []byte
+
+//go:embed test_data/capella/post.ssz_snappy
+var capella_post_state_ssz_snappy []byte
+
+//go:embed test_data/phase0/blocks_0.ssz_snappy
+var phase0_blocks_0_ssz_snappy []byte
+
+//go:embed test_data/phase0/blocks_1.ssz_snappy
+var phase0_blocks_1_ssz_snappy []byte
+
+//go:embed test_data/phase0/pre.ssz_snappy
+var phase0_pre_state_ssz_snappy []byte
+
+//go:embed test_data/phase0/post.ssz_snappy
+var phase0_post_state_ssz_snappy []byte
+
+// bellatrix is long
+
+//go:embed test_data/bellatrix
+var bellatrixFS embed.FS
+
+type MockBlockReader struct {
+ u map[uint64]*cltypes.SignedBeaconBlock
+}
+
+func NewMockBlockReader() *MockBlockReader {
+ return &MockBlockReader{u: make(map[uint64]*cltypes.SignedBeaconBlock)}
+}
+
+func (m *MockBlockReader) ReadBlockBySlot(ctx context.Context, tx kv.Tx, slot uint64) (*cltypes.SignedBeaconBlock, error) {
+ return m.u[slot], nil
+}
+
+func (m *MockBlockReader) ReadBlockByRoot(ctx context.Context, tx kv.Tx, blockRoot libcommon.Hash) (*cltypes.SignedBeaconBlock, error) {
+ panic("implement me")
+}
+func (m *MockBlockReader) ReadHeaderByRoot(ctx context.Context, tx kv.Tx, blockRoot libcommon.Hash) (*cltypes.SignedBeaconBlockHeader, error) {
+ panic("implement me")
+}
+
+func (m *MockBlockReader) FrozenSlots() uint64 {
+ panic("implement me")
+}
+
+func LoadChain(blocks []*cltypes.SignedBeaconBlock, db kv.RwDB) *MockBlockReader {
+ tx, err := db.BeginRw(context.Background())
+ if err != nil {
+ panic(err)
+ }
+ defer tx.Rollback()
+
+ m := NewMockBlockReader()
+ for _, block := range blocks {
+ m.u[block.Block.Slot] = block
+ h := block.SignedBeaconBlockHeader()
+ if err := beacon_indicies.WriteBeaconBlockHeaderAndIndicies(context.Background(), tx, h, true); err != nil {
+ panic(err)
+ }
+ if err := beacon_indicies.WriteHighestFinalized(tx, block.Block.Slot+64); err != nil {
+ panic(err)
+ }
+ }
+ if err := tx.Commit(); err != nil {
+ panic(err)
+ }
+ return m
+}
+
+func GetCapellaRandom() ([]*cltypes.SignedBeaconBlock, *state.CachingBeaconState, *state.CachingBeaconState) {
+ block1 := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig)
+ block2 := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig)
+
+ // Lets do te
+ if err := utils.DecodeSSZSnappy(block1, capella_blocks_0_ssz_snappy, int(clparams.CapellaVersion)); err != nil {
+ panic(err)
+ }
+ if err := utils.DecodeSSZSnappy(block2, capella_blocks_1_ssz_snappy, int(clparams.CapellaVersion)); err != nil {
+ panic(err)
+ }
+
+ preState := state.New(&clparams.MainnetBeaconConfig)
+ if err := utils.DecodeSSZSnappy(preState, capella_pre_state_ssz_snappy, int(clparams.CapellaVersion)); err != nil {
+ panic(err)
+
+ }
+ postState := state.New(&clparams.MainnetBeaconConfig)
+ if err := utils.DecodeSSZSnappy(postState, capella_post_state_ssz_snappy, int(clparams.CapellaVersion)); err != nil {
+ panic(err)
+ }
+ return []*cltypes.SignedBeaconBlock{block1, block2}, preState, postState
+}
+
+func GetPhase0Random() ([]*cltypes.SignedBeaconBlock, *state.CachingBeaconState, *state.CachingBeaconState) {
+ block1 := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig)
+ block2 := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig)
+
+ // Lets do te
+ if err := utils.DecodeSSZSnappy(block1, phase0_blocks_0_ssz_snappy, int(clparams.Phase0Version)); err != nil {
+ panic(err)
+ }
+ if err := utils.DecodeSSZSnappy(block2, phase0_blocks_1_ssz_snappy, int(clparams.Phase0Version)); err != nil {
+ panic(err)
+ }
+
+ preState := state.New(&clparams.MainnetBeaconConfig)
+ if err := utils.DecodeSSZSnappy(preState, phase0_pre_state_ssz_snappy, int(clparams.Phase0Version)); err != nil {
+ panic(err)
+ }
+ postState := state.New(&clparams.MainnetBeaconConfig)
+ if err := utils.DecodeSSZSnappy(postState, phase0_post_state_ssz_snappy, int(clparams.Phase0Version)); err != nil {
+ panic(err)
+ }
+ return []*cltypes.SignedBeaconBlock{block1, block2}, preState, postState
+}
+
+func GetBellatrixRandom() ([]*cltypes.SignedBeaconBlock, *state.CachingBeaconState, *state.CachingBeaconState) {
+ ret := make([]*cltypes.SignedBeaconBlock, 0, 96)
+ // format for blocks is blocks_{i}.ssz_snappy where i is the index of the block, starting from 0 to 95 included.
+ for i := 0; i < 96; i++ {
+ block := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig)
+ // Lets do te
+ b, err := bellatrixFS.ReadFile("test_data/bellatrix/blocks_" + strconv.FormatInt(int64(i), 10) + ".ssz_snappy")
+ if err != nil {
+ panic(err)
+ }
+ if err := utils.DecodeSSZSnappy(block, b, int(clparams.BellatrixVersion)); err != nil {
+ panic(err)
+ }
+ ret = append(ret, block)
+ }
+ preState := state.New(&clparams.MainnetBeaconConfig)
+ b, err := bellatrixFS.ReadFile("test_data/bellatrix/pre.ssz_snappy")
+ if err != nil {
+ panic(err)
+ }
+ if err := utils.DecodeSSZSnappy(preState, b, int(clparams.BellatrixVersion)); err != nil {
+ panic(err)
+ }
+ postState := state.New(&clparams.MainnetBeaconConfig)
+ b, err = bellatrixFS.ReadFile("test_data/bellatrix/post.ssz_snappy")
+ if err != nil {
+ panic(err)
+ }
+ if err := utils.DecodeSSZSnappy(postState, b, int(clparams.BellatrixVersion)); err != nil {
+ panic(err)
+ }
+ return ret, preState, postState
+
+}
diff --git a/cl/beacon/beacon_router_configuration/cfg.go b/cl/beacon/beacon_router_configuration/cfg.go
new file mode 100644
index 00000000000..1a3307b0a1f
--- /dev/null
+++ b/cl/beacon/beacon_router_configuration/cfg.go
@@ -0,0 +1,14 @@
+package beacon_router_configuration
+
+import "time"
+
+// TODO(enriavil1): Make this configurable via flags
+type RouterConfiguration struct {
+ Active bool
+ Protocol string
+ Address string
+
+ ReadTimeTimeout time.Duration
+ IdleTimeout time.Duration
+ WriteTimeout time.Duration
+}
diff --git a/cl/beacon/beaconhttp/api.go b/cl/beacon/beaconhttp/api.go
new file mode 100644
index 00000000000..b0c3d94c385
--- /dev/null
+++ b/cl/beacon/beaconhttp/api.go
@@ -0,0 +1,105 @@
+package beaconhttp
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "reflect"
+
+ "github.com/ledgerwatch/erigon-lib/types/ssz"
+ "github.com/ledgerwatch/erigon/cl/phase1/forkchoice/fork_graph"
+ "github.com/ledgerwatch/log/v3"
+)
+
+var _ error = EndpointError{}
+var _ error = (*EndpointError)(nil)
+
+type EndpointError struct {
+ Code int `json:"code"`
+ Message string `json:"message"`
+}
+
+func WrapEndpointError(err error) *EndpointError {
+ e := &EndpointError{}
+ if errors.As(err, e) {
+ return e
+ }
+ if errors.Is(err, fork_graph.ErrStateNotFound) {
+ return NewEndpointError(http.StatusNotFound, "Could not find beacon state")
+ }
+ return NewEndpointError(http.StatusInternalServerError, err.Error())
+}
+
+func NewEndpointError(code int, message string) *EndpointError {
+ return &EndpointError{
+ Code: code,
+ Message: message,
+ }
+}
+
+func (e EndpointError) Error() string {
+ return fmt.Sprintf("Code %d: %s", e.Code, e.Message)
+}
+
+func (e *EndpointError) WriteTo(w http.ResponseWriter) {
+ w.WriteHeader(e.Code)
+ encErr := json.NewEncoder(w).Encode(e)
+ if encErr != nil {
+ log.Error("beaconapi failed to write json error", "err", encErr)
+ }
+}
+
+type EndpointHandler[T any] interface {
+ Handle(r *http.Request) (T, error)
+}
+
+type EndpointHandlerFunc[T any] func(r *http.Request) (T, error)
+
+func (e EndpointHandlerFunc[T]) Handle(r *http.Request) (T, error) {
+ return e(r)
+}
+
+func HandleEndpointFunc[T any](h EndpointHandlerFunc[T]) http.HandlerFunc {
+ return HandleEndpoint[T](h)
+}
+
+func HandleEndpoint[T any](h EndpointHandler[T]) http.HandlerFunc {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ans, err := h.Handle(r)
+ if err != nil {
+ log.Error("beacon api request error", "err", err)
+ endpointError := WrapEndpointError(err)
+ endpointError.WriteTo(w)
+ return
+ }
+ // TODO: ssz handler
+ // TODO: potentially add a context option to buffer these
+ contentType := r.Header.Get("Accept")
+ switch contentType {
+ case "application/octet-stream":
+ sszMarshaler, ok := any(ans).(ssz.Marshaler)
+ if !ok {
+ NewEndpointError(http.StatusBadRequest, "This endpoint does not support SSZ response").WriteTo(w)
+ return
+ }
+ // TODO: we should probably figure out some way to stream this in the future :)
+ encoded, err := sszMarshaler.EncodeSSZ(nil)
+ if err != nil {
+ WrapEndpointError(err).WriteTo(w)
+ return
+ }
+ w.Write(encoded)
+ case "application/json", "":
+ w.Header().Add("content-type", "application/json")
+ err := json.NewEncoder(w).Encode(ans)
+ if err != nil {
+ // this error is fatal, log to console
+ log.Error("beaconapi failed to encode json", "type", reflect.TypeOf(ans), "err", err)
+ }
+ default:
+ http.Error(w, "content type must be application/json or application/octet-stream", http.StatusBadRequest)
+
+ }
+ })
+}
diff --git a/cl/beacon/handler/blocks.go b/cl/beacon/handler/blocks.go
new file mode 100644
index 00000000000..cabe88addca
--- /dev/null
+++ b/cl/beacon/handler/blocks.go
@@ -0,0 +1,158 @@
+package handler
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies"
+)
+
+type headerResponse struct {
+ Root libcommon.Hash `json:"root"`
+ Canonical bool `json:"canonical"`
+ Header *cltypes.SignedBeaconBlockHeader `json:"header"`
+}
+
+type getHeadersRequest struct {
+ Slot *uint64 `json:"slot,omitempty"`
+ ParentRoot *libcommon.Hash `json:"root,omitempty"`
+}
+
+func (a *ApiHandler) rootFromBlockId(ctx context.Context, tx kv.Tx, blockId *segmentID) (root libcommon.Hash, err error) {
+ switch {
+ case blockId.head():
+ root, _, err = a.forkchoiceStore.GetHead()
+ if err != nil {
+ return libcommon.Hash{}, err
+ }
+ case blockId.finalized():
+ root = a.forkchoiceStore.FinalizedCheckpoint().BlockRoot()
+ case blockId.justified():
+ root = a.forkchoiceStore.JustifiedCheckpoint().BlockRoot()
+ case blockId.genesis():
+ root, err = beacon_indicies.ReadCanonicalBlockRoot(tx, 0)
+ if err != nil {
+ return libcommon.Hash{}, err
+ }
+ if root == (libcommon.Hash{}) {
+ return libcommon.Hash{}, beaconhttp.NewEndpointError(http.StatusNotFound, "genesis block not found")
+ }
+ case blockId.getSlot() != nil:
+ root, err = beacon_indicies.ReadCanonicalBlockRoot(tx, *blockId.getSlot())
+ if err != nil {
+ return libcommon.Hash{}, err
+ }
+ if root == (libcommon.Hash{}) {
+ return libcommon.Hash{}, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("block not found %d", *blockId.getSlot()))
+ }
+ case blockId.getRoot() != nil:
+ // first check if it exists
+ root = *blockId.getRoot()
+ default:
+ return libcommon.Hash{}, beaconhttp.NewEndpointError(http.StatusInternalServerError, "cannot parse block id")
+ }
+ return
+}
+
+func (a *ApiHandler) getBlock(r *http.Request) (*beaconResponse, error) {
+ ctx := r.Context()
+ tx, err := a.indiciesDB.BeginRo(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer tx.Rollback()
+
+ blockId, err := blockIdFromRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ root, err := a.rootFromBlockId(ctx, tx, blockId)
+ if err != nil {
+ return nil, err
+ }
+
+ blk, err := a.blockReader.ReadBlockByRoot(ctx, tx, root)
+ if err != nil {
+ return nil, err
+ }
+ if blk == nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("block not found %x", root))
+ }
+ // Check if the block is canonical
+ var canonicalRoot libcommon.Hash
+ canonicalRoot, err = beacon_indicies.ReadCanonicalBlockRoot(tx, blk.Block.Slot)
+ if err != nil {
+ return nil, beaconhttp.WrapEndpointError(err)
+ }
+ return newBeaconResponse(blk).
+ withFinalized(root == canonicalRoot && blk.Block.Slot <= a.forkchoiceStore.FinalizedSlot()).
+ withVersion(blk.Version()), nil
+}
+
+func (a *ApiHandler) getBlockAttestations(r *http.Request) (*beaconResponse, error) {
+ ctx := r.Context()
+ tx, err := a.indiciesDB.BeginRo(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer tx.Rollback()
+ blockId, err := blockIdFromRequest(r)
+ if err != nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error())
+ }
+ root, err := a.rootFromBlockId(ctx, tx, blockId)
+ if err != nil {
+ return nil, err
+ }
+ blk, err := a.blockReader.ReadBlockByRoot(ctx, tx, root)
+ if err != nil {
+ return nil, err
+ }
+ if blk == nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("block not found %x", root))
+ }
+ // Check if the block is canonical
+ canonicalRoot, err := beacon_indicies.ReadCanonicalBlockRoot(tx, blk.Block.Slot)
+ if err != nil {
+ return nil, err
+ }
+ return newBeaconResponse(blk.Block.Body.Attestations).withFinalized(root == canonicalRoot && blk.Block.Slot <= a.forkchoiceStore.FinalizedSlot()).
+ withVersion(blk.Version()), nil
+}
+
+func (a *ApiHandler) getBlockRoot(r *http.Request) (*beaconResponse, error) {
+ ctx := r.Context()
+ tx, err := a.indiciesDB.BeginRo(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer tx.Rollback()
+ blockId, err := blockIdFromRequest(r)
+ if err != nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error())
+ }
+ root, err := a.rootFromBlockId(ctx, tx, blockId)
+ if err != nil {
+ return nil, err
+ }
+ // check if the root exist
+ slot, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, root)
+ if err != nil {
+ return nil, err
+ }
+ if slot == nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("block not found %x", root))
+ }
+ // Check if the block is canonical
+ var canonicalRoot libcommon.Hash
+ canonicalRoot, err = beacon_indicies.ReadCanonicalBlockRoot(tx, *slot)
+ if err != nil {
+ return nil, err
+ }
+ return newBeaconResponse(struct{ Root libcommon.Hash }{Root: root}).withFinalized(canonicalRoot == root && *slot <= a.forkchoiceStore.FinalizedSlot()), nil
+}
diff --git a/cl/beacon/handler/config.go b/cl/beacon/handler/config.go
new file mode 100644
index 00000000000..b0e8972c2d8
--- /dev/null
+++ b/cl/beacon/handler/config.go
@@ -0,0 +1,47 @@
+package handler
+
+import (
+ "bytes"
+ "net/http"
+ "sort"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+)
+
+func (a *ApiHandler) getSpec(r *http.Request) (*beaconResponse, error) {
+ return newBeaconResponse(a.beaconChainCfg), nil
+}
+
+func (a *ApiHandler) getDepositContract(r *http.Request) (*beaconResponse, error) {
+
+ return newBeaconResponse(struct {
+ ChainId uint64 `json:"chain_id"`
+ DepositContract string `json:"address"`
+ }{ChainId: a.beaconChainCfg.DepositChainID, DepositContract: a.beaconChainCfg.DepositContractAddress}), nil
+
+}
+
+func (a *ApiHandler) getForkSchedule(r *http.Request) (*beaconResponse, error) {
+ response := []cltypes.Fork{}
+ // create first response (unordered and incomplete)
+ for currentVersion, epoch := range a.beaconChainCfg.ForkVersionSchedule {
+ response = append(response, cltypes.Fork{
+ CurrentVersion: currentVersion,
+ Epoch: epoch,
+ })
+ }
+ // Sort the respnses by epoch
+ sort.Slice(response, func(i, j int) bool {
+ if response[i].Epoch == response[j].Epoch {
+ return bytes.Compare(response[i].CurrentVersion[:], response[j].CurrentVersion[:]) < 0
+ }
+ return response[i].Epoch < response[j].Epoch
+ })
+ var previousVersion libcommon.Bytes4
+ for i := range response {
+ response[i].PreviousVersion = previousVersion
+ previousVersion = response[i].CurrentVersion
+ }
+ return newBeaconResponse(response), nil
+}
diff --git a/cl/beacon/handler/duties_proposer.go b/cl/beacon/handler/duties_proposer.go
new file mode 100644
index 00000000000..609a8292c41
--- /dev/null
+++ b/cl/beacon/handler/duties_proposer.go
@@ -0,0 +1,94 @@
+package handler
+
+import (
+ "crypto/sha256"
+ "encoding/binary"
+ "net/http"
+ "sync"
+
+ "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp"
+ shuffling2 "github.com/ledgerwatch/erigon/cl/phase1/core/state/shuffling"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+)
+
+type proposerDuties struct {
+ Pubkey libcommon.Bytes48 `json:"pubkey"`
+ ValidatorIndex uint64 `json:"validator_index"`
+ Slot uint64 `json:"slot"`
+}
+
+func (a *ApiHandler) getDutiesProposer(r *http.Request) (*beaconResponse, error) {
+
+ epoch, err := epochFromRequest(r)
+ if err != nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error())
+ }
+
+ if epoch < a.forkchoiceStore.FinalizedCheckpoint().Epoch() {
+ return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, "invalid epoch")
+ }
+
+ // We need to compute our duties
+ state, cancel := a.syncedData.HeadState()
+ defer cancel()
+ if state == nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusInternalServerError, "beacon node is syncing")
+
+ }
+
+ expectedSlot := epoch * a.beaconChainCfg.SlotsPerEpoch
+
+ duties := make([]proposerDuties, a.beaconChainCfg.SlotsPerEpoch)
+ wg := sync.WaitGroup{}
+
+ for slot := expectedSlot; slot < expectedSlot+a.beaconChainCfg.SlotsPerEpoch; slot++ {
+ var proposerIndex uint64
+ // Lets do proposer index computation
+ mixPosition := (epoch + a.beaconChainCfg.EpochsPerHistoricalVector - a.beaconChainCfg.MinSeedLookahead - 1) %
+ a.beaconChainCfg.EpochsPerHistoricalVector
+ // Input for the seed hash.
+ mix := state.GetRandaoMix(int(mixPosition))
+ input := shuffling2.GetSeed(a.beaconChainCfg, mix, epoch, a.beaconChainCfg.DomainBeaconProposer)
+ slotByteArray := make([]byte, 8)
+ binary.LittleEndian.PutUint64(slotByteArray, slot)
+
+ // Add slot to the end of the input.
+ inputWithSlot := append(input[:], slotByteArray...)
+ hash := sha256.New()
+
+ // Calculate the hash.
+ hash.Write(inputWithSlot)
+ seed := hash.Sum(nil)
+
+ indices := state.GetActiveValidatorsIndices(epoch)
+
+ // Write the seed to an array.
+ seedArray := [32]byte{}
+ copy(seedArray[:], seed)
+ wg.Add(1)
+
+ // Do it in parallel
+ go func(i, slot uint64, indicies []uint64, seedArray [32]byte) {
+ defer wg.Done()
+ proposerIndex, err = shuffling2.ComputeProposerIndex(state.BeaconState, indices, seedArray)
+ if err != nil {
+ panic(err)
+ }
+ var pk libcommon.Bytes48
+ pk, err = state.ValidatorPublicKey(int(proposerIndex))
+ if err != nil {
+ panic(err)
+ }
+ duties[i] = proposerDuties{
+ Pubkey: pk,
+ ValidatorIndex: proposerIndex,
+ Slot: slot,
+ }
+ }(slot-expectedSlot, slot, indices, seedArray)
+ }
+ wg.Wait()
+
+ return newBeaconResponse(duties).withFinalized(false).withVersion(a.beaconChainCfg.GetCurrentStateVersion(epoch)), nil
+
+}
diff --git a/cl/beacon/handler/format.go b/cl/beacon/handler/format.go
new file mode 100644
index 00000000000..f2ea28495cb
--- /dev/null
+++ b/cl/beacon/handler/format.go
@@ -0,0 +1,262 @@
+package handler
+
+import (
+ "fmt"
+ "net/http"
+ "regexp"
+ "strconv"
+
+ "github.com/go-chi/chi/v5"
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/types/ssz"
+ "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp"
+ "github.com/ledgerwatch/erigon/cl/clparams"
+)
+
+type apiError struct {
+ code int
+ err error
+}
+
+type beaconResponse struct {
+ Data any `json:"data,omitempty"`
+ Finalized *bool `json:"finalized,omitempty"`
+ Version *clparams.StateVersion `json:"version,omitempty"`
+ ExecutionOptimistic *bool `json:"execution_optimistic,omitempty"`
+}
+
+func (b *beaconResponse) EncodeSSZ(xs []byte) ([]byte, error) {
+ marshaler, ok := b.Data.(ssz.Marshaler)
+ if !ok {
+ return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, "This endpoint does not support SSZ response")
+ }
+ encoded, err := marshaler.EncodeSSZ(nil)
+ if err != nil {
+ return nil, err
+ }
+ return encoded, nil
+}
+
+func (b *beaconResponse) EncodingSizeSSZ() int {
+ marshaler, ok := b.Data.(ssz.Marshaler)
+ if !ok {
+ return 9
+ }
+ return marshaler.EncodingSizeSSZ()
+}
+
+func newBeaconResponse(data any) *beaconResponse {
+ return &beaconResponse{
+ Data: data,
+ }
+}
+
+func (r *beaconResponse) withFinalized(finalized bool) (out *beaconResponse) {
+ out = new(beaconResponse)
+ *out = *r
+ out.Finalized = new(bool)
+ out.ExecutionOptimistic = new(bool)
+ out.Finalized = &finalized
+ return r
+}
+
+func (r *beaconResponse) withVersion(version clparams.StateVersion) (out *beaconResponse) {
+ out = new(beaconResponse)
+ *out = *r
+ out.Version = new(clparams.StateVersion)
+ out.Version = &version
+ return r
+}
+
+//// In case of it being a json we need to also expose finalization, version, etc...
+//type beaconHandlerFn func(r *http.Request) *beaconResponse
+//
+//func beaconHandlerWrapper(fn beaconHandlerFn, supportSSZ bool) func(w http.ResponseWriter, r *http.Request) {
+// return func(w http.ResponseWriter, r *http.Request) {
+// accept := r.Header.Get("Accept")
+// isSSZ := !strings.Contains(accept, "application/json") && strings.Contains(accept, "application/stream-octect")
+// start := time.Now()
+// defer func() {
+// log.Debug("[Beacon API] finished", "method", r.Method, "path", r.URL.Path, "duration", time.Since(start))
+// }()
+//
+// resp := fn(r)
+// if resp.internalError != nil {
+// http.Error(w, resp.internalError.Error(), http.StatusInternalServerError)
+// log.Debug("[Beacon API] failed", "method", r.Method, "err", resp.internalError.Error(), "ssz", isSSZ)
+// return
+// }
+//
+// if resp.apiError != nil {
+// http.Error(w, resp.apiError.err.Error(), resp.apiError.code)
+// log.Debug("[Beacon API] failed", "method", r.Method, "err", resp.apiError.err.Error(), "ssz", isSSZ)
+// return
+// }
+//
+// if isSSZ && supportSSZ {
+// data := resp.Data
+// // SSZ encoding
+// encoded, err := data.(ssz.Marshaler).EncodeSSZ(nil)
+// if err != nil {
+// http.Error(w, err.Error(), http.StatusInternalServerError)
+// log.Debug("[Beacon API] failed", "method", r.Method, "err", err, "accepted", accept)
+// return
+// }
+// w.Header().Set("Content-Type", "application/octet-stream")
+// w.Write(encoded)
+// return
+// }
+// w.Header().Set("Content-Type", "application/json")
+// if err := json.NewEncoder(w).Encode(resp); err != nil {
+// log.Warn("[Beacon API] failed", "method", r.Method, "err", err, "ssz", isSSZ)
+// }
+// }
+//}
+
+type chainTag int
+
+var (
+ Head chainTag = 0
+ Finalized chainTag = 1
+ Justified chainTag = 2
+ Genesis chainTag = 3
+)
+
+// Represent either state id or block id
+type segmentID struct {
+ tag chainTag
+ slot *uint64
+ root *libcommon.Hash
+}
+
+func (c *segmentID) head() bool {
+ return c.tag == Head && c.slot == nil && c.root == nil
+}
+
+func (c *segmentID) finalized() bool {
+ return c.tag == Finalized
+}
+
+func (c *segmentID) justified() bool {
+ return c.tag == Justified
+}
+
+func (c *segmentID) genesis() bool {
+ return c.tag == Genesis
+}
+
+func (c *segmentID) getSlot() *uint64 {
+ return c.slot
+}
+
+func (c *segmentID) getRoot() *libcommon.Hash {
+ return c.root
+}
+
+func epochFromRequest(r *http.Request) (uint64, error) {
+ // Must only be a number
+ regex := regexp.MustCompile(`^\d+$`)
+ epoch := chi.URLParam(r, "epoch")
+ if !regex.MatchString(epoch) {
+ return 0, fmt.Errorf("invalid path variable: {epoch}")
+ }
+ epochMaybe, err := strconv.ParseUint(epoch, 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ return epochMaybe, nil
+}
+
+func blockIdFromRequest(r *http.Request) (*segmentID, error) {
+ regex := regexp.MustCompile(`^(?:0x[0-9a-fA-F]{64}|head|finalized|genesis|\d+)$`)
+
+ blockId := chi.URLParam(r, "block_id")
+ if !regex.MatchString(blockId) {
+ return nil, fmt.Errorf("invalid path variable: {block_id}")
+ }
+
+ if blockId == "head" {
+ return &segmentID{tag: Head}, nil
+ }
+ if blockId == "finalized" {
+ return &segmentID{tag: Finalized}, nil
+ }
+ if blockId == "genesis" {
+ return &segmentID{tag: Genesis}, nil
+ }
+ slotMaybe, err := strconv.ParseUint(blockId, 10, 64)
+ if err == nil {
+ return &segmentID{slot: &slotMaybe}, nil
+ }
+ root := libcommon.HexToHash(blockId)
+ return &segmentID{
+ root: &root,
+ }, nil
+}
+
+func stateIdFromRequest(r *http.Request) (*segmentID, error) {
+ regex := regexp.MustCompile(`^(?:0x[0-9a-fA-F]{64}|head|finalized|genesis|justified|\d+)$`)
+
+ stateId := chi.URLParam(r, "state_id")
+ if !regex.MatchString(stateId) {
+ return nil, fmt.Errorf("invalid path variable: {block_id}")
+ }
+
+ if stateId == "head" {
+ return &segmentID{tag: Head}, nil
+ }
+ if stateId == "finalized" {
+ return &segmentID{tag: Finalized}, nil
+ }
+ if stateId == "genesis" {
+ return &segmentID{tag: Genesis}, nil
+ }
+ if stateId == "justified" {
+ return &segmentID{tag: Justified}, nil
+ }
+ slotMaybe, err := strconv.ParseUint(stateId, 10, 64)
+ if err == nil {
+ return &segmentID{slot: &slotMaybe}, nil
+ }
+ root := libcommon.HexToHash(stateId)
+ return &segmentID{
+ root: &root,
+ }, nil
+}
+
+func hashFromQueryParams(r *http.Request, name string) (*libcommon.Hash, error) {
+ hashStr := r.URL.Query().Get(name)
+ if hashStr == "" {
+ return nil, nil
+ }
+ // check if hashstr is an hex string
+ if len(hashStr) != 2+2*32 {
+ return nil, fmt.Errorf("invalid hash length")
+ }
+ if hashStr[:2] != "0x" {
+ return nil, fmt.Errorf("invalid hash prefix")
+ }
+ notHex, err := regexp.MatchString("[^0-9A-Fa-f]", hashStr[2:])
+ if err != nil {
+ return nil, err
+ }
+ if notHex {
+ return nil, fmt.Errorf("invalid hash characters")
+ }
+
+ hash := libcommon.HexToHash(hashStr)
+ return &hash, nil
+}
+
+// uint64FromQueryParams retrieves a number from the query params, in base 10.
+func uint64FromQueryParams(r *http.Request, name string) (*uint64, error) {
+ str := r.URL.Query().Get(name)
+ if str == "" {
+ return nil, nil
+ }
+ num, err := strconv.ParseUint(str, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ return &num, nil
+}
diff --git a/cl/beacon/handler/genesis.go b/cl/beacon/handler/genesis.go
index 0e4bf61deb2..05af01dd8b5 100644
--- a/cl/beacon/handler/genesis.go
+++ b/cl/beacon/handler/genesis.go
@@ -1,42 +1,33 @@
package handler
import (
- "encoding/json"
- "io"
"net/http"
"github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon/cl/beacon/types"
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp"
"github.com/ledgerwatch/erigon/cl/fork"
- "github.com/ledgerwatch/log/v3"
)
-type genesisReponse struct {
- GenesisTime uint64 `json:"genesis_time,omitempty"`
- GenesisValidatorRoot common.Hash `json:"genesis_validator_root,omitempty"`
- GenesisForkVersion types.Bytes4 `json:"genesis_fork_version,omitempty"`
+type genesisResponse struct {
+ GenesisTime uint64 `json:"genesis_time,omitempty"`
+ GenesisValidatorRoot common.Hash `json:"genesis_validator_root,omitempty"`
+ GenesisForkVersion libcommon.Bytes4 `json:"genesis_fork_version,omitempty"`
}
-func (a *ApiHandler) getGenesis(w http.ResponseWriter, _ *http.Request) {
+func (a *ApiHandler) getGenesis(r *http.Request) (*beaconResponse, error) {
if a.genesisCfg == nil {
- w.WriteHeader(http.StatusNotFound)
- io.WriteString(w, "Genesis Config is missing")
- return
+ return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "Genesis Config is missing")
}
digest, err := fork.ComputeForkDigest(a.beaconChainCfg, a.genesisCfg)
if err != nil {
- w.WriteHeader(http.StatusInternalServerError)
- io.WriteString(w, "Failed to compute fork digest")
- log.Error("[Beacon API] genesis handler failed", err)
- return
+ return nil, err
}
- w.Header().Set("Content-Type", "Application/json")
- w.WriteHeader(http.StatusAccepted)
- json.NewEncoder(w).Encode(genesisReponse{
+ return newBeaconResponse(&genesisResponse{
GenesisTime: a.genesisCfg.GenesisTime,
GenesisValidatorRoot: a.genesisCfg.GenesisValidatorRoot,
- GenesisForkVersion: types.Bytes4(digest),
- })
+ GenesisForkVersion: digest,
+ }), nil
}
diff --git a/cl/beacon/handler/handler.go b/cl/beacon/handler/handler.go
index 4a4012b557a..b6703bb7b88 100644
--- a/cl/beacon/handler/handler.go
+++ b/cl/beacon/handler/handler.go
@@ -5,18 +5,31 @@ import (
"sync"
"github.com/go-chi/chi/v5"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp"
+ "github.com/ledgerwatch/erigon/cl/beacon/synced_data"
"github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/persistence"
+ "github.com/ledgerwatch/erigon/cl/phase1/forkchoice"
+ "github.com/ledgerwatch/erigon/cl/pool"
+ "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks"
)
type ApiHandler struct {
- o sync.Once
- mux chi.Router
- genesisCfg *clparams.GenesisConfig
- beaconChainCfg *clparams.BeaconChainConfig
+ o sync.Once
+ mux chi.Router
+
+ blockReader freezeblocks.BeaconSnapshotReader
+ indiciesDB kv.RoDB
+ genesisCfg *clparams.GenesisConfig
+ beaconChainCfg *clparams.BeaconChainConfig
+ forkchoiceStore forkchoice.ForkChoiceStorage
+ operationsPool pool.OperationsPool
+ syncedData *synced_data.SyncedDataManager
}
-func NewApiHandler(genesisConfig *clparams.GenesisConfig, beaconChainConfig *clparams.BeaconChainConfig) *ApiHandler {
- return &ApiHandler{o: sync.Once{}, genesisCfg: genesisConfig, beaconChainCfg: beaconChainConfig}
+func NewApiHandler(genesisConfig *clparams.GenesisConfig, beaconChainConfig *clparams.BeaconChainConfig, source persistence.RawBeaconBlockChain, indiciesDB kv.RoDB, forkchoiceStore forkchoice.ForkChoiceStorage, operationsPool pool.OperationsPool, rcsn freezeblocks.BeaconSnapshotReader, syncedData *synced_data.SyncedDataManager) *ApiHandler {
+ return &ApiHandler{o: sync.Once{}, genesisCfg: genesisConfig, beaconChainCfg: beaconChainConfig, indiciesDB: indiciesDB, forkchoiceStore: forkchoiceStore, operationsPool: operationsPool, blockReader: rcsn, syncedData: syncedData}
}
func (a *ApiHandler) init() {
@@ -26,52 +39,74 @@ func (a *ApiHandler) init() {
// otterscn specific ones are commented as such
r.Route("/eth", func(r chi.Router) {
r.Route("/v1", func(r chi.Router) {
- r.Get("/events", nil)
+ r.Get("/events", http.NotFound)
+ r.Route("/config", func(r chi.Router) {
+ r.Get("/spec", beaconhttp.HandleEndpointFunc(a.getSpec))
+ r.Get("/deposit_contract", beaconhttp.HandleEndpointFunc(a.getDepositContract))
+ r.Get("/fork_schedule", beaconhttp.HandleEndpointFunc(a.getForkSchedule))
+ })
r.Route("/beacon", func(r chi.Router) {
- r.Get("/headers/{tag}", nil) // otterscan
- r.Get("/blocks/{slot}/root", nil) //otterscan
- r.Get("/genesis", a.getGenesis)
- r.Post("/binded_blocks", nil)
- r.Post("/blocks", nil)
+ r.Route("/headers", func(r chi.Router) {
+ r.Get("/", beaconhttp.HandleEndpointFunc(a.getHeaders))
+ r.Get("/{block_id}", beaconhttp.HandleEndpointFunc(a.getHeader))
+ })
+ r.Route("/blocks", func(r chi.Router) {
+ r.Post("/", http.NotFound)
+ r.Get("/{block_id}", beaconhttp.HandleEndpointFunc(a.getBlock))
+ r.Get("/{block_id}/attestations", beaconhttp.HandleEndpointFunc(a.getBlockAttestations))
+ r.Get("/{block_id}/root", beaconhttp.HandleEndpointFunc(a.getBlockRoot))
+ })
+ r.Get("/genesis", beaconhttp.HandleEndpointFunc(a.getGenesis))
+ r.Post("/binded_blocks", http.NotFound)
r.Route("/pool", func(r chi.Router) {
- r.Post("/attestations", nil)
- r.Post("/sync_committees", nil)
+ r.Post("/attestations", http.NotFound)
+ r.Get("/voluntary_exits", beaconhttp.HandleEndpointFunc(a.poolVoluntaryExits))
+ r.Get("/attester_slashings", beaconhttp.HandleEndpointFunc(a.poolAttesterSlashings))
+ r.Get("/proposer_slashings", beaconhttp.HandleEndpointFunc(a.poolProposerSlashings))
+ r.Get("/bls_to_execution_changes", beaconhttp.HandleEndpointFunc(a.poolBlsToExecutionChanges))
+ r.Get("/attestations", beaconhttp.HandleEndpointFunc(a.poolAttestations))
+ r.Post("/sync_committees", http.NotFound)
})
- r.Get("/node/syncing", nil)
- r.Get("/config/spec", nil)
+ r.Get("/node/syncing", http.NotFound)
r.Route("/states", func(r chi.Router) {
- r.Get("/head/validators/{index}", nil) // otterscan
- r.Get("/head/committees", nil) // otterscan
+ r.Get("/head/validators/{index}", http.NotFound) // otterscan
+ r.Get("/head/committees", http.NotFound) // otterscan
r.Route("/{state_id}", func(r chi.Router) {
- r.Get("/validators", nil)
- r.Get("/fork", nil)
- r.Get("/validators/{id}", nil)
+ r.Get("/validators", http.NotFound)
+ r.Get("/root", beaconhttp.HandleEndpointFunc(a.getStateRoot))
+ r.Get("/fork", beaconhttp.HandleEndpointFunc(a.getStateFork))
+ r.Get("/validators/{id}", http.NotFound)
})
})
})
r.Route("/validator", func(r chi.Router) {
r.Route("/duties", func(r chi.Router) {
- r.Post("/attester/{epoch}", nil)
- r.Get("/proposer/{epoch}", nil)
- r.Post("/sync/{epoch}", nil)
+ r.Post("/attester/{epoch}", http.NotFound)
+ r.Get("/proposer/{epoch}", beaconhttp.HandleEndpointFunc(a.getDutiesProposer))
+ r.Post("/sync/{epoch}", http.NotFound)
})
- r.Get("/blinded_blocks/{slot}", nil)
- r.Get("/attestation_data", nil)
- r.Get("/aggregate_attestation", nil)
- r.Post("/aggregate_and_proofs", nil)
- r.Post("/beacon_committee_subscriptions", nil)
- r.Post("/sync_committee_subscriptions", nil)
- r.Get("/sync_committee_contribution", nil)
- r.Post("/contribution_and_proofs", nil)
- r.Post("/prepare_beacon_proposer", nil)
+ r.Get("/blinded_blocks/{slot}", http.NotFound)
+ r.Get("/attestation_data", http.NotFound)
+ r.Get("/aggregate_attestation", http.NotFound)
+ r.Post("/aggregate_and_proofs", http.NotFound)
+ r.Post("/beacon_committee_subscriptions", http.NotFound)
+ r.Post("/sync_committee_subscriptions", http.NotFound)
+ r.Get("/sync_committee_contribution", http.NotFound)
+ r.Post("/contribution_and_proofs", http.NotFound)
+ r.Post("/prepare_beacon_proposer", http.NotFound)
})
})
r.Route("/v2", func(r chi.Router) {
+ r.Route("/debug", func(r chi.Router) {
+ r.Route("/beacon", func(r chi.Router) {
+ r.Get("/states/{state_id}", beaconhttp.HandleEndpointFunc(a.getFullState))
+ })
+ })
r.Route("/beacon", func(r chi.Router) {
- r.Post("/blocks/{slot}", nil) //otterscan
+ r.Get("/blocks/{block_id}", beaconhttp.HandleEndpointFunc(a.getBlock)) //otterscan
})
r.Route("/validator", func(r chi.Router) {
- r.Post("/blocks/{slot}", nil)
+ r.Post("/blocks/{slot}", http.NotFound)
})
})
})
diff --git a/cl/beacon/handler/headers.go b/cl/beacon/handler/headers.go
new file mode 100644
index 00000000000..e6b18607115
--- /dev/null
+++ b/cl/beacon/handler/headers.go
@@ -0,0 +1,129 @@
+package handler
+
+import (
+ "fmt"
+ "net/http"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp"
+ "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies"
+)
+
+func (a *ApiHandler) getHeaders(r *http.Request) (*beaconResponse, error) {
+ ctx := r.Context()
+
+ querySlot, err := uint64FromQueryParams(r, "slot")
+ if err != nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error())
+ }
+ queryParentHash, err := hashFromQueryParams(r, "parent_root")
+ if err != nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error())
+ }
+
+ tx, err := a.indiciesDB.BeginRo(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer tx.Rollback()
+ var candidates []libcommon.Hash
+ var slot *uint64
+ var potentialRoot libcommon.Hash
+ // First lets find some good candidates for the query. TODO(Giulio2002): this does not give all the headers.
+ switch {
+ case queryParentHash != nil && querySlot != nil:
+ // get all blocks with this parent
+ slot, err = beacon_indicies.ReadBlockSlotByBlockRoot(tx, *queryParentHash)
+ if err != nil {
+ return nil, err
+ }
+ if slot == nil {
+ break
+ }
+ if *slot+1 != *querySlot {
+ break
+ }
+ potentialRoot, err = beacon_indicies.ReadCanonicalBlockRoot(tx, *slot+1)
+ if err != nil {
+ return nil, err
+ }
+ candidates = append(candidates, potentialRoot)
+ case queryParentHash == nil && querySlot != nil:
+ potentialRoot, err = beacon_indicies.ReadCanonicalBlockRoot(tx, *querySlot)
+ if err != nil {
+ return nil, err
+ }
+ candidates = append(candidates, potentialRoot)
+ case queryParentHash == nil && querySlot == nil:
+ headSlot := a.syncedData.HeadSlot()
+ if headSlot == 0 {
+ break
+ }
+ potentialRoot, err = beacon_indicies.ReadCanonicalBlockRoot(tx, headSlot)
+ if err != nil {
+ return nil, err
+ }
+ candidates = append(candidates, potentialRoot)
+ }
+ // Now we assemble the response
+ headers := make([]*headerResponse, 0, len(candidates))
+ for _, root := range candidates {
+ signedHeader, err := a.blockReader.ReadHeaderByRoot(ctx, tx, root)
+ if err != nil {
+ return nil, err
+ }
+ if signedHeader == nil || (queryParentHash != nil && signedHeader.Header.ParentRoot != *queryParentHash) || (querySlot != nil && signedHeader.Header.Slot != *querySlot) {
+ continue
+ }
+
+ canonicalRoot, err := beacon_indicies.ReadCanonicalBlockRoot(tx, signedHeader.Header.Slot)
+ if err != nil {
+ return nil, err
+ }
+ headers = append(headers, &headerResponse{
+ Root: root,
+ Canonical: canonicalRoot == root,
+ Header: signedHeader,
+ })
+ }
+ return newBeaconResponse(headers), nil
+}
+
+func (a *ApiHandler) getHeader(r *http.Request) (*beaconResponse, error) {
+ ctx := r.Context()
+ tx, err := a.indiciesDB.BeginRo(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer tx.Rollback()
+ blockId, err := blockIdFromRequest(r)
+ if err != nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error())
+ }
+ root, err := a.rootFromBlockId(ctx, tx, blockId)
+ if err != nil {
+ return nil, err
+ }
+
+ signedHeader, err := a.blockReader.ReadHeaderByRoot(ctx, tx, root)
+ if err != nil {
+ return nil, err
+ }
+
+ if signedHeader == nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("block not found %x", root))
+ }
+ var canonicalRoot libcommon.Hash
+ canonicalRoot, err = beacon_indicies.ReadCanonicalBlockRoot(tx, signedHeader.Header.Slot)
+ if err != nil {
+ return nil, err
+ }
+
+ version := a.beaconChainCfg.GetCurrentStateVersion(signedHeader.Header.Slot / a.beaconChainCfg.SlotsPerEpoch)
+
+ return newBeaconResponse(&headerResponse{
+ Root: root,
+ Canonical: canonicalRoot == root,
+ Header: signedHeader,
+ }).withFinalized(canonicalRoot == root && signedHeader.Header.Slot <= a.forkchoiceStore.FinalizedSlot()).withVersion(version), nil
+}
diff --git a/cl/beacon/handler/pool.go b/cl/beacon/handler/pool.go
new file mode 100644
index 00000000000..66614f904f2
--- /dev/null
+++ b/cl/beacon/handler/pool.go
@@ -0,0 +1,25 @@
+package handler
+
+import (
+ "net/http"
+)
+
+func (a *ApiHandler) poolVoluntaryExits(r *http.Request) (*beaconResponse, error) {
+ return newBeaconResponse(a.operationsPool.VoluntaryExistsPool.Raw()), nil
+}
+
+func (a *ApiHandler) poolAttesterSlashings(r *http.Request) (*beaconResponse, error) {
+ return newBeaconResponse(a.operationsPool.AttesterSlashingsPool.Raw()), nil
+}
+
+func (a *ApiHandler) poolProposerSlashings(r *http.Request) (*beaconResponse, error) {
+ return newBeaconResponse(a.operationsPool.ProposerSlashingsPool.Raw()), nil
+}
+
+func (a *ApiHandler) poolBlsToExecutionChanges(r *http.Request) (*beaconResponse, error) {
+ return newBeaconResponse(a.operationsPool.BLSToExecutionChangesPool.Raw()), nil
+}
+
+func (a *ApiHandler) poolAttestations(r *http.Request) (*beaconResponse, error) {
+ return newBeaconResponse(a.operationsPool.AttestationsPool.Raw()), nil
+}
diff --git a/cl/beacon/handler/states.go b/cl/beacon/handler/states.go
new file mode 100644
index 00000000000..0d0c75d9573
--- /dev/null
+++ b/cl/beacon/handler/states.go
@@ -0,0 +1,182 @@
+package handler
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp"
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies"
+ "github.com/ledgerwatch/erigon/cl/utils"
+)
+
+func (a *ApiHandler) rootFromStateId(ctx context.Context, tx kv.Tx, stateId *segmentID) (root libcommon.Hash, httpStatusErr int, err error) {
+ var blockRoot libcommon.Hash
+ switch {
+ case stateId.head():
+ blockRoot, _, err = a.forkchoiceStore.GetHead()
+ if err != nil {
+ return libcommon.Hash{}, http.StatusInternalServerError, err
+ }
+ case stateId.finalized():
+ blockRoot = a.forkchoiceStore.FinalizedCheckpoint().BlockRoot()
+ case stateId.justified():
+ blockRoot = a.forkchoiceStore.JustifiedCheckpoint().BlockRoot()
+ case stateId.genesis():
+ blockRoot, err = beacon_indicies.ReadCanonicalBlockRoot(tx, 0)
+ if err != nil {
+ return libcommon.Hash{}, http.StatusInternalServerError, err
+ }
+ if blockRoot == (libcommon.Hash{}) {
+ return libcommon.Hash{}, http.StatusNotFound, fmt.Errorf("genesis block not found")
+ }
+ case stateId.getSlot() != nil:
+ blockRoot, err = beacon_indicies.ReadCanonicalBlockRoot(tx, *stateId.getSlot())
+ if err != nil {
+ return libcommon.Hash{}, http.StatusInternalServerError, err
+ }
+ if blockRoot == (libcommon.Hash{}) {
+ return libcommon.Hash{}, http.StatusNotFound, fmt.Errorf("block not found %d", *stateId.getSlot())
+ }
+ case stateId.getRoot() != nil:
+ root = *stateId.getRoot()
+ return
+ default:
+ return libcommon.Hash{}, http.StatusInternalServerError, fmt.Errorf("cannot parse state id")
+ }
+ root, err = beacon_indicies.ReadStateRootByBlockRoot(ctx, tx, blockRoot)
+ if err != nil {
+ return libcommon.Hash{}, http.StatusInternalServerError, err
+ }
+ if root == (libcommon.Hash{}) {
+ return libcommon.Hash{}, http.StatusNotFound, fmt.Errorf("block not found")
+ }
+ return
+}
+
+type rootResponse struct {
+ Root libcommon.Hash `json:"root"`
+}
+
+func previousVersion(v clparams.StateVersion) clparams.StateVersion {
+ if v == clparams.Phase0Version {
+ return clparams.Phase0Version
+ }
+ return v - 1
+}
+
+func (a *ApiHandler) getStateFork(r *http.Request) (*beaconResponse, error) {
+ ctx := r.Context()
+
+ tx, err := a.indiciesDB.BeginRo(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer tx.Rollback()
+
+ blockId, err := stateIdFromRequest(r)
+ if err != nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error())
+ }
+ root, httpStatus, err := a.rootFromStateId(ctx, tx, blockId)
+ if err != nil {
+ return nil, beaconhttp.NewEndpointError(httpStatus, err.Error())
+ }
+
+ slot, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, root)
+ if err != nil {
+ return nil, err
+ }
+ if slot == nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusNotFound, err.Error())
+ }
+ epoch := *slot / a.beaconChainCfg.SlotsPerEpoch
+
+ stateVersion := a.beaconChainCfg.GetCurrentStateVersion(epoch)
+ forkEpoch := a.beaconChainCfg.GetForkEpochByVersion(stateVersion)
+ currentVersion := a.beaconChainCfg.GetForkVersionByVersion(stateVersion)
+ previousVersion := a.beaconChainCfg.GetForkVersionByVersion(previousVersion(stateVersion))
+
+ return newBeaconResponse(&cltypes.Fork{
+ PreviousVersion: utils.Uint32ToBytes4(previousVersion),
+ CurrentVersion: utils.Uint32ToBytes4(currentVersion),
+ Epoch: forkEpoch,
+ }), nil
+}
+
+func (a *ApiHandler) getStateRoot(r *http.Request) (*beaconResponse, error) {
+ ctx := r.Context()
+
+ tx, err := a.indiciesDB.BeginRo(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer tx.Rollback()
+
+ blockId, err := stateIdFromRequest(r)
+ if err != nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error())
+ }
+ root, httpStatus, err := a.rootFromStateId(ctx, tx, blockId)
+ if err != nil {
+ return nil, beaconhttp.NewEndpointError(httpStatus, err.Error())
+ }
+
+ stateRoot, err := beacon_indicies.ReadStateRootByBlockRoot(ctx, tx, root)
+ if err != nil {
+ return nil, err
+ }
+ if stateRoot == (libcommon.Hash{}) {
+ return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not read block header: %x", root))
+ }
+
+ slot, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, root)
+ if err != nil {
+ return nil, err
+ }
+ if slot == nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("could not read block header: %x", root))
+ }
+ canonicalRoot, err := beacon_indicies.ReadCanonicalBlockRoot(tx, *slot)
+ if err != nil {
+ return nil, err
+ }
+
+ return newBeaconResponse(&rootResponse{Root: stateRoot}).withFinalized(canonicalRoot == root && *slot <= a.forkchoiceStore.FinalizedSlot()), nil
+}
+
+func (a *ApiHandler) getFullState(r *http.Request) (*beaconResponse, error) {
+ ctx := r.Context()
+
+ tx, err := a.indiciesDB.BeginRo(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer tx.Rollback()
+
+ blockId, err := stateIdFromRequest(r)
+ if err != nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error())
+ }
+
+ root, httpStatus, err := a.rootFromStateId(ctx, tx, blockId)
+ if err != nil {
+ return nil, beaconhttp.NewEndpointError(httpStatus, err.Error())
+ }
+
+ blockRoot, err := beacon_indicies.ReadBlockRootByStateRoot(tx, root)
+ if err != nil {
+ return nil, err
+ }
+
+ state, err := a.forkchoiceStore.GetStateAtBlockRoot(blockRoot, true)
+ if err != nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err.Error())
+ }
+
+ return newBeaconResponse(state).withFinalized(false).withVersion(state.Version()), nil
+}
diff --git a/cl/beacon/middleware.go b/cl/beacon/middleware.go
new file mode 100644
index 00000000000..519aebf0527
--- /dev/null
+++ b/cl/beacon/middleware.go
@@ -0,0 +1 @@
+package beacon
diff --git a/cl/beacon/router.go b/cl/beacon/router.go
index 5d87b248643..3fb927f0d33 100644
--- a/cl/beacon/router.go
+++ b/cl/beacon/router.go
@@ -3,26 +3,54 @@ package beacon
import (
"net"
"net/http"
- "time"
+ "strings"
+ "github.com/go-chi/chi/v5"
+ "github.com/ledgerwatch/erigon/cl/beacon/beacon_router_configuration"
"github.com/ledgerwatch/erigon/cl/beacon/handler"
+ "github.com/ledgerwatch/erigon/cl/beacon/validatorapi"
"github.com/ledgerwatch/log/v3"
)
-// TODO(enriavil1): Make this configurable via flags
-type RouterConfiguration struct {
- Protocol string
- Address string
-
- ReadTimeTimeout time.Duration
- IdleTimeout time.Duration
- WriteTimeout time.Duration
+type LayeredBeaconHandler struct {
+ ValidatorApi *validatorapi.ValidatorApiHandler
+ ArchiveApi *handler.ApiHandler
}
-func ListenAndServe(api *handler.ApiHandler, routerCfg *RouterConfiguration) {
+func ListenAndServe(beaconHandler *LayeredBeaconHandler, routerCfg beacon_router_configuration.RouterConfiguration) error {
listener, err := net.Listen(routerCfg.Protocol, routerCfg.Address)
+ if err != nil {
+ return err
+ }
+ defer listener.Close()
+ mux := chi.NewRouter()
+ // enforce json content type
+ mux.Use(func(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ contentType := r.Header.Get("Content-Type")
+ if len(contentType) > 0 && !strings.EqualFold(contentType, "application/json") {
+ http.Error(w, "Content-Type header must be application/json", http.StatusUnsupportedMediaType)
+ return
+ }
+ h.ServeHTTP(w, r)
+ })
+ })
+ // layered handling - 404 on first handler falls back to the second
+ mux.HandleFunc("/eth/*", func(w http.ResponseWriter, r *http.Request) {
+ nfw := ¬FoundNoWriter{rw: w}
+ beaconHandler.ValidatorApi.ServeHTTP(nfw, r)
+ if nfw.code == 404 || nfw.code == 0 {
+ beaconHandler.ArchiveApi.ServeHTTP(w, r)
+ }
+ })
+ mux.HandleFunc("/validator/*", func(w http.ResponseWriter, r *http.Request) {
+ http.StripPrefix("/validator", beaconHandler.ValidatorApi).ServeHTTP(w, r)
+ })
+ mux.HandleFunc("/archive/*", func(w http.ResponseWriter, r *http.Request) {
+ http.StripPrefix("/archive", beaconHandler.ArchiveApi).ServeHTTP(w, r)
+ })
server := &http.Server{
- Handler: api,
+ Handler: mux,
ReadTimeout: routerCfg.ReadTimeTimeout,
IdleTimeout: routerCfg.IdleTimeout,
WriteTimeout: routerCfg.IdleTimeout,
@@ -33,5 +61,18 @@ func ListenAndServe(api *handler.ApiHandler, routerCfg *RouterConfiguration) {
if err := server.Serve(listener); err != nil {
log.Warn("[Beacon API] failed to start serving", "addr", routerCfg.Address, "err", err)
+ return err
}
+ return nil
+}
+
+func newBeaconMiddleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ contentType := r.Header.Get("Content-Type")
+ if contentType != "application/json" && contentType != "" {
+ http.Error(w, "Content-Type header must be application/json", http.StatusUnsupportedMediaType)
+ return
+ }
+ next.ServeHTTP(w, r)
+ })
}
diff --git a/cl/beacon/rw.go b/cl/beacon/rw.go
new file mode 100644
index 00000000000..33a74b2fb7e
--- /dev/null
+++ b/cl/beacon/rw.go
@@ -0,0 +1,42 @@
+package beacon
+
+import (
+ "net/http"
+)
+
+type notFoundNoWriter struct {
+ rw http.ResponseWriter
+
+ code int
+ headers http.Header
+}
+
+func (f *notFoundNoWriter) Header() http.Header {
+ if f.code == 404 {
+ return make(http.Header)
+ }
+ return f.rw.Header()
+}
+
+func (f *notFoundNoWriter) Write(xs []byte) (int, error) {
+ // write code 200 if code not written yet
+ if f.code == 0 {
+ f.WriteHeader(200)
+ }
+ if f.code == 404 {
+ return 0, nil
+ }
+ // pass on the write
+ return f.rw.Write(xs)
+}
+
+func (f *notFoundNoWriter) WriteHeader(statusCode int) {
+ if f.code != 0 {
+ return
+ }
+ if f.code != 404 {
+ f.rw.WriteHeader(statusCode)
+ }
+ // if it's a 404 and we are not at our last handler, set the target to an io.Discard
+ f.code = statusCode
+}
diff --git a/cl/beacon/synced_data/synced_data.go b/cl/beacon/synced_data/synced_data.go
new file mode 100644
index 00000000000..abc04251670
--- /dev/null
+++ b/cl/beacon/synced_data/synced_data.go
@@ -0,0 +1,81 @@
+package synced_data
+
+import (
+ "sync"
+
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state"
+ "github.com/ledgerwatch/erigon/cl/utils"
+ "github.com/ledgerwatch/log/v3"
+)
+
+type SyncedDataManager struct {
+ enabled bool
+ cfg *clparams.BeaconChainConfig
+ headState *state.CachingBeaconState
+
+ mu sync.RWMutex
+}
+
+func NewSyncedDataManager(enabled bool, cfg *clparams.BeaconChainConfig) *SyncedDataManager {
+ return &SyncedDataManager{
+ enabled: enabled,
+ cfg: cfg,
+ }
+}
+
+func (s *SyncedDataManager) OnHeadState(newState *state.CachingBeaconState) (err error) {
+ if !s.enabled {
+ return
+ }
+ // Schedule update.
+ go func() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.headState == nil {
+ s.headState, err = newState.Copy()
+ }
+ err = newState.CopyInto(s.headState)
+ if err != nil {
+ log.Error("failed to copy head state", "err", err)
+ }
+ }()
+
+ return
+}
+
+func (s *SyncedDataManager) HeadState() (state *state.CachingBeaconState, cancel func()) {
+ if !s.enabled {
+ return nil, func() {}
+ }
+ s.mu.RLock()
+ return s.headState, s.mu.RUnlock
+}
+
+func (s *SyncedDataManager) Syncing() bool {
+ if !s.enabled {
+ return false
+ }
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ if s.headState == nil {
+ return false
+ }
+
+ headEpoch := utils.GetCurrentEpoch(s.headState.GenesisTime(), s.cfg.SecondsPerSlot, s.cfg.SlotsPerEpoch)
+ // surplusMargin, give it a go if we are within 2 epochs of the head
+ surplusMargin := s.cfg.SlotsPerEpoch * 2
+ return s.headState.Slot()+surplusMargin < headEpoch
+}
+
+func (s *SyncedDataManager) HeadSlot() uint64 {
+ if !s.enabled {
+ return 0
+ }
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ if s.headState == nil {
+ return 0
+ }
+ return s.headState.Slot()
+}
diff --git a/cl/beacon/types/primitives.go b/cl/beacon/types/primitives.go
deleted file mode 100644
index 2e4681d0885..00000000000
--- a/cl/beacon/types/primitives.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package types
-
-import (
- "encoding/hex"
- "encoding/json"
-)
-
-type Bytes4 [4]byte
-
-func (b Bytes4) MarshalJSON() ([]byte, error) {
- return json.Marshal("0x" + hex.EncodeToString(b[:]))
-}
diff --git a/cl/beacon/validatorapi/endpoints.go b/cl/beacon/validatorapi/endpoints.go
new file mode 100644
index 00000000000..ed06d471a2f
--- /dev/null
+++ b/cl/beacon/validatorapi/endpoints.go
@@ -0,0 +1,249 @@
+package validatorapi
+
+import (
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "github.com/go-chi/chi/v5"
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/hexutility"
+ "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp"
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/fork"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state"
+ "github.com/ledgerwatch/erigon/cl/utils"
+)
+
+func (v *ValidatorApiHandler) GetEthV1NodeSyncing(r *http.Request) (any, error) {
+ _, slot, err := v.FC.GetHead()
+ if err != nil {
+ return nil, err
+ }
+
+ realHead := utils.GetCurrentSlot(v.GenesisCfg.GenesisTime, v.BeaconChainCfg.SecondsPerSlot)
+
+ isSyncing := realHead > slot
+
+ syncDistance := 0
+ if isSyncing {
+ syncDistance = int(realHead) - int(slot)
+ }
+
+ elOffline := true
+ if v.FC.Engine() != nil {
+ val, err := v.FC.Engine().Ready()
+ if err == nil {
+ elOffline = !val
+ }
+ }
+
+ return map[string]any{
+ "head_slot": strconv.FormatUint(slot, 10),
+ "sync_distance": syncDistance,
+ "is_syncing": isSyncing,
+ "el_offline": elOffline,
+ // TODO: figure out how to populat this field
+ "is_optimistic": true,
+ }, nil
+}
+
+func (v *ValidatorApiHandler) EventSourceGetV1Events(w http.ResponseWriter, r *http.Request) {
+}
+
+func (v *ValidatorApiHandler) GetEthV1ConfigSpec(r *http.Request) (*clparams.BeaconChainConfig, error) {
+ if v.BeaconChainCfg == nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "beacon config not found")
+ }
+ return v.BeaconChainCfg, nil
+}
+
+func (v *ValidatorApiHandler) GetEthV1BeaconGenesis(r *http.Request) (any, error) {
+ if v.GenesisCfg == nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "genesis config not found")
+ }
+ digest, err := fork.ComputeForkDigest(v.BeaconChainCfg, v.GenesisCfg)
+ if err != nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusInternalServerError, err.Error())
+ }
+ return map[string]any{
+ "genesis_time": v.GenesisCfg.GenesisTime,
+ "genesis_validator_root": v.GenesisCfg.GenesisValidatorRoot,
+ "genesis_fork_version": hexutility.Bytes(digest[:]),
+ }, nil
+}
+
+func (v *ValidatorApiHandler) GetEthV1BeaconStatesStateIdFork(r *http.Request) (any, error) {
+ stateId := chi.URLParam(r, "state_id")
+ state, err := v.privateGetStateFromStateId(stateId)
+ if err != nil {
+ return nil, err
+ }
+ isFinalized := state.Slot() <= v.FC.FinalizedSlot()
+ forkData := state.BeaconState.Fork()
+ return map[string]any{
+ // TODO: this "True if the response references an unverified execution payload. "
+ // figure out the condition where this happens
+ "execution_optimistic": false,
+ "finalized": isFinalized,
+ "data": map[string]any{
+ "previous_version": hexutility.Bytes(forkData.PreviousVersion[:]),
+ "current_version": hexutility.Bytes(forkData.CurrentVersion[:]),
+ "epoch": strconv.Itoa(int(forkData.Epoch)),
+ },
+ }, nil
+}
+func (v *ValidatorApiHandler) GetEthV1BeaconStatesStateIdValidatorsValidatorId(r *http.Request) (any, error) {
+ stateId := chi.URLParam(r, "state_id")
+ // grab the correct state for the given state id
+ beaconState, err := v.privateGetStateFromStateId(stateId)
+ if err != nil {
+ return nil, err
+ }
+
+ var validatorIndex uint64
+ validatorId := chi.URLParam(r, "validator_id")
+ switch {
+ case strings.HasPrefix(validatorId, "0x"):
+ // assume is hex has, so try to parse
+ hsh := common.Bytes48{}
+ err := hsh.UnmarshalText([]byte(stateId))
+ if err != nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Sprintf("Invalid validator ID: %s", validatorId))
+ }
+ val, ok := beaconState.ValidatorIndexByPubkey(hsh)
+ if !ok {
+ return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("validator not found: %s", validatorId))
+ }
+ validatorIndex = val
+ case isInt(validatorId):
+ val, err := strconv.ParseUint(validatorId, 10, 64)
+ if err != nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Sprintf("Invalid validator ID: %s", validatorId))
+ }
+ validatorIndex = val
+ default:
+ return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Sprintf("Invalid validator ID: %s", validatorId))
+ }
+ // at this point validatorIndex is neccesarily assigned, so we can trust the zero value
+ validator, err := beaconState.ValidatorForValidatorIndex(int(validatorIndex))
+ if err != nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("validator not found at %s: %s ", stateId, validatorId))
+ }
+ validatorBalance, err := beaconState.ValidatorBalance(int(validatorIndex))
+ if err != nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Sprintf("balance not found at %s: %s ", stateId, validatorId))
+ }
+
+ //pending_initialized - When the first deposit is processed, but not enough funds are available (or not yet the end of the first epoch) to get validator into the activation queue.
+ //pending_queued - When validator is waiting to get activated, and have enough funds etc. while in the queue, validator activation epoch keeps changing until it gets to the front and make it through (finalization is a requirement here too).
+ //active_ongoing - When validator must be attesting, and have not initiated any exit.
+ //active_exiting - When validator is still active, but filed a voluntary request to exit.
+ //active_slashed - When validator is still active, but have a slashed status and is scheduled to exit.
+ //exited_unslashed - When validator has reached regular exit epoch, not being slashed, and doesn't have to attest any more, but cannot withdraw yet.
+ //exited_slashed - When validator has reached regular exit epoch, but was slashed, have to wait for a longer withdrawal period.
+ //withdrawal_possible - After validator has exited, a while later is permitted to move funds, and is truly out of the system.
+ //withdrawal_done - (not possible in phase0, except slashing full balance) - actually having moved funds away
+
+ epoch := state.GetEpochAtSlot(v.BeaconChainCfg, beaconState.Slot())
+ // TODO: figure out what is wrong and missing here
+ validator_status := func() string {
+ // see if validator has exited
+ if validator.ExitEpoch() >= epoch {
+ if validator.WithdrawableEpoch() >= epoch {
+ // TODO: is this right? not sure if correct way to check for withdrawal_done
+ if validatorBalance == 0 {
+ return "withdrawal_done"
+ }
+ return "withdrawal_possible"
+ }
+ if validator.Slashed() {
+ return "exited_slashed"
+ }
+ return "exited_unslashed"
+ }
+ // at this point we know they have not exited, so they are either active or pending
+ if validator.Active(epoch) {
+ // if active, figure out if they are slashed
+ if validator.Slashed() {
+ return "active_slashed"
+ }
+ if validator.ExitEpoch() != v.BeaconChainCfg.FarFutureEpoch {
+ return "active_exiting"
+ }
+ return "active_ongoing"
+ }
+ // check if enough funds (TODO: or end of first epoch??)
+ if validatorBalance >= v.BeaconChainCfg.MinDepositAmount {
+ return "pending_initialized"
+ }
+ return "pending_queued"
+ }()
+
+ isFinalized := beaconState.Slot() <= v.FC.FinalizedSlot()
+ return map[string]any{
+ // TODO: this "True if the response references an unverified execution payload. "
+ // figure out the condition where this happens
+ "execution_optimistic": false,
+ "finalized": isFinalized,
+ "data": map[string]any{
+ "index": strconv.FormatUint(validatorIndex, 10),
+ "balance": strconv.FormatUint(validatorBalance, 10),
+ "status": validator_status,
+ "data": map[string]any{
+ "pubkey": hexutility.Bytes(validator.PublicKeyBytes()),
+ "withdraw_credentials": hexutility.Bytes(validator.WithdrawalCredentials().Bytes()),
+ "effective_balance": strconv.FormatUint(validator.EffectiveBalance(), 10),
+ "slashed": validator.Slashed(),
+ "activation_eligibility_epoch": strconv.FormatUint(validator.ActivationEligibilityEpoch(), 10),
+ "activation_epoch": strconv.FormatUint(validator.ActivationEpoch(), 10),
+ "exit_epoch": strconv.FormatUint(validator.ActivationEpoch(), 10),
+ "withdrawable_epoch": strconv.FormatUint(validator.WithdrawableEpoch(), 10),
+ },
+ },
+ }, nil
+}
+
+func (v *ValidatorApiHandler) privateGetStateFromStateId(stateId string) (*state.CachingBeaconState, error) {
+ switch {
+ case stateId == "head":
+ // Now check the head
+ headRoot, _, err := v.FC.GetHead()
+ if err != nil {
+ return nil, err
+ }
+ return v.FC.GetStateAtBlockRoot(headRoot, true)
+ case stateId == "genesis":
+ // not supported
+ return nil, beaconhttp.NewEndpointError(http.StatusNotFound, "genesis block not found")
+ case stateId == "finalized":
+ return v.FC.GetStateAtBlockRoot(v.FC.FinalizedCheckpoint().BlockRoot(), true)
+ case stateId == "justified":
+ return v.FC.GetStateAtBlockRoot(v.FC.JustifiedCheckpoint().BlockRoot(), true)
+ case strings.HasPrefix(stateId, "0x"):
+ // assume is hex has, so try to parse
+ hsh := common.Hash{}
+ err := hsh.UnmarshalText([]byte(stateId))
+ if err != nil {
+ return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Sprintf("Invalid state ID: %s", stateId))
+ }
+ return v.FC.GetStateAtStateRoot(hsh, true)
+ case isInt(stateId):
+ // ignore the error bc isInt check succeeded. yes this doesn't protect for overflow, they will request slot 0 and it will fail. good
+ val, _ := strconv.ParseUint(stateId, 10, 64)
+ return v.FC.GetStateAtSlot(val, true)
+ default:
+ return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Sprintf("Invalid state ID: %s", stateId))
+ }
+}
+
+func isInt(s string) bool {
+ for _, c := range s {
+ if !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/cl/beacon/validatorapi/handler.go b/cl/beacon/validatorapi/handler.go
new file mode 100644
index 00000000000..838ef398240
--- /dev/null
+++ b/cl/beacon/validatorapi/handler.go
@@ -0,0 +1,89 @@
+package validatorapi
+
+import (
+ "net/http"
+ "sync"
+
+ "github.com/go-chi/chi/v5"
+ "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp"
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/phase1/forkchoice"
+)
+
+type ValidatorApiHandler struct {
+ FC forkchoice.ForkChoiceStorage
+
+ BeaconChainCfg *clparams.BeaconChainConfig
+ GenesisCfg *clparams.GenesisConfig
+
+ o sync.Once
+ mux chi.Router
+}
+
+func (v *ValidatorApiHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ v.o.Do(func() {
+ v.mux = chi.NewRouter()
+ v.init(v.mux)
+ })
+ v.mux.ServeHTTP(w, r)
+}
+
+func (v *ValidatorApiHandler) init(r chi.Router) {
+ r.Route("/eth", func(r chi.Router) {
+ r.Route("/v1", func(r chi.Router) {
+ r.Route("/beacon", func(r chi.Router) {
+ r.Get("/genesis", beaconhttp.HandleEndpointFunc(v.GetEthV1BeaconGenesis))
+ r.Route("/states", func(r chi.Router) {
+ r.Route("/{state_id}", func(r chi.Router) {
+ r.Get("/fork", beaconhttp.HandleEndpointFunc(v.GetEthV1BeaconStatesStateIdFork))
+ r.Get("/validators/{validator_id}", beaconhttp.HandleEndpointFunc(v.GetEthV1BeaconStatesStateIdValidatorsValidatorId))
+ })
+ })
+ r.Post("/binded_blocks", http.NotFound)
+ r.Post("/blocks", http.NotFound)
+ r.Route("/pool", func(r chi.Router) {
+ r.Post("/attestations", http.NotFound)
+ r.Post("/sync_committees", http.NotFound)
+ })
+ r.Get("/node/syncing", beaconhttp.HandleEndpointFunc(v.GetEthV1NodeSyncing))
+ })
+ r.Get("/config/spec", beaconhttp.HandleEndpointFunc(v.GetEthV1ConfigSpec))
+ r.Get("/events", http.NotFound)
+ r.Route("/validator", func(r chi.Router) {
+ r.Route("/duties", func(r chi.Router) {
+ r.Post("/attester/{epoch}", http.NotFound)
+ r.Get("/proposer/{epoch}", http.NotFound)
+ r.Post("/sync/{epoch}", http.NotFound)
+ })
+ // r.Get("/blinded_blocks/{slot}", http.NotFound) - deprecated
+ r.Get("/attestation_data", http.NotFound)
+ r.Get("/aggregate_attestation", http.NotFound)
+ r.Post("/aggregate_and_proofs", http.NotFound)
+ r.Post("/beacon_committee_subscriptions", http.NotFound)
+ r.Post("/sync_committee_subscriptions", http.NotFound)
+ r.Get("/sync_committee_contribution", http.NotFound)
+ r.Post("/contribution_and_proofs", http.NotFound)
+ r.Post("/prepare_beacon_proposer", http.NotFound)
+ })
+ })
+ r.Route("/v2", func(r chi.Router) {
+ r.Route("/debug", func(r chi.Router) {
+ r.Route("/beacon", func(r chi.Router) {
+ r.Get("/states/{state_id}", http.NotFound)
+ })
+ })
+ r.Route("/beacon", func(r chi.Router) {
+ r.Post("/blocks/{block_id}", http.NotFound)
+ })
+ r.Route("/validator", func(r chi.Router) {
+ r.Post("/blocks/{slot}", http.NotFound)
+ })
+ })
+ r.Route("/v3", func(r chi.Router) {
+ r.Route("/beacon", func(r chi.Router) {
+ r.Get("/blocks/{block_id}", http.NotFound)
+ })
+ })
+ })
+
+}
diff --git a/cl/clparams/config.go b/cl/clparams/config.go
index 67b64ee4c0d..8b58db47b3d 100644
--- a/cl/clparams/config.go
+++ b/cl/clparams/config.go
@@ -19,16 +19,22 @@ import (
"math"
"math/big"
"os"
+ "path"
"time"
+ "github.com/ledgerwatch/erigon-lib/chain/networkname"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/types/ssz"
"gopkg.in/yaml.v2"
"github.com/ledgerwatch/erigon/cl/utils"
- "github.com/ledgerwatch/erigon/params/networkname"
)
+type CaplinConfig struct {
+ Backfilling bool
+ Archive bool
+}
+
type NetworkType int
const (
@@ -47,6 +53,11 @@ const (
RespTimeout time.Duration = 15 * time.Second
)
+const (
+ SubDivisionFolderSize = 10_000
+ SlotsPerDump = 1024
+)
+
var (
MainnetBootstrapNodes = []string{
// Teku team's bootnode
@@ -68,6 +79,11 @@ var (
"enr:-LK4QA8FfhaAjlb_BXsXxSfiysR7R52Nhi9JBt4F8SPssu8hdE1BXQQEtVDC3qStCW60LSO7hEsVHv5zm8_6Vnjhcn0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAN4aBKJc2VjcDI1NmsxoQJerDhsJ-KxZ8sHySMOCmTO6sHM3iCFQ6VMvLTe948MyYN0Y3CCI4yDdWRwgiOM",
"enr:-LK4QKWrXTpV9T78hNG6s8AM6IO4XH9kFT91uZtFg1GcsJ6dKovDOr1jtAAFPnS2lvNltkOGA9k29BUN7lFh_sjuc9QBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhANAdd-Jc2VjcDI1NmsxoQLQa6ai7y9PMN5hpLe5HmiJSlYzMuzP7ZhwRiwHvqNXdoN0Y3CCI4yDdWRwgiOM",
}
+ SepoliaBootstrapNodes = append(MainnetBootstrapNodes,
+ "enr:-Iq4QMCTfIMXnow27baRUb35Q8iiFHSIDBJh6hQM5Axohhf4b6Kr_cOCu0htQ5WvVqKvFgY28893DHAg8gnBAXsAVqmGAX53x8JggmlkgnY0gmlwhLKAlv6Jc2VjcDI1NmsxoQK6S-Cii_KmfFdUJL2TANL3ksaKUnNXvTCv1tLwXs0QgIN1ZHCCIyk",
+ "enr:-Ly4QFoZTWR8ulxGVsWydTNGdwEESueIdj-wB6UmmjUcm-AOPxnQi7wprzwcdo7-1jBW_JxELlUKJdJES8TDsbl1EdNlh2F0dG5ldHOI__78_v2bsV-EZXRoMpA2-lATkAAAcf__________gmlkgnY0gmlwhBLYJjGJc2VjcDI1NmsxoQI0gujXac9rMAb48NtMqtSTyHIeNYlpjkbYpWJw46PmYYhzeW5jbmV0cw-DdGNwgiMog3VkcIIjKA",
+ "enr:-KG4QE5OIg5ThTjkzrlVF32WT_-XT14WeJtIz2zoTqLLjQhYAmJlnk4ItSoH41_2x0RX0wTFIe5GgjRzU2u7Q1fN4vADhGV0aDKQqP7o7pAAAHAyAAAAAAAAAIJpZIJ2NIJpcISlFsStiXNlY3AyNTZrMaEC-Rrd_bBZwhKpXzFCrStKp1q_HmGOewxY3KwM8ofAj_ODdGNwgiMog3VkcIIjKA",
+ "enr:-L64QC9Hhov4DhQ7mRukTOz4_jHm4DHlGL726NWH4ojH1wFgEwSin_6H95Gs6nW2fktTWbPachHJ6rUFu0iJNgA0SB2CARqHYXR0bmV0c4j__________4RldGgykDb6UBOQAABx__________-CaWSCdjSCaXCEA-2vzolzZWNwMjU2azGhA17lsUg60R776rauYMdrAz383UUgESoaHEzMkvm4K6k6iHN5bmNuZXRzD4N0Y3CCIyiDdWRwgiMo")
GnosisBootstrapNodes = append(MainnetBootstrapNodes, []string{
"enr:-Ly4QMU1y81COwm1VZgxGF4_eZ21ub9-GHF6dXZ29aEJ0oZpcV2Rysw-viaEKfpcpu9ZarILJLxFZjcKOjE0Sybs3MQBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhANLnx-Jc2VjcDI1NmsxoQKoaYT8I-wf2I_f_ii6EgoSSXj5T3bhiDyW-7ZLsY3T64hzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA",
@@ -95,7 +111,6 @@ type NetworkConfig struct {
GossipMaxSize uint64 `json:"gossip_max_size"` // The maximum allowed size of uncompressed gossip messages.
GossipMaxSizeBellatrix uint64 `json:"gossip_max_size_bellatrix"` // The maximum allowed size of bellatrix uncompressed gossip messages.
MaxRequestBlocks uint64 `json:"max_request_blocks"` // Maximum number of blocks in a single request
- MinEpochsForBlockRequests uint64 `json:"min_epochs_for_block_requests"` // The minimum epoch range over which a node must serve blocks
MaxChunkSize uint64 `json:"max_chunk_size"` // The maximum allowed size of uncompressed req/resp chunked responses.
AttestationSubnetCount uint64 `json:"attestation_subnet_count"` // The number of attestation subnets used in the gossipsub protocol.
TtfbTimeout time.Duration `json:"ttfbt_timeout"` // The maximum time to wait for first byte of request response (time-to-first-byte).
@@ -159,7 +174,7 @@ var NetworkConfigs map[NetworkType]NetworkConfig = map[NetworkType]NetworkConfig
SyncCommsSubnetKey: "syncnets",
MinimumPeersInSubnetSearch: 20,
ContractDeploymentBlock: 1273020,
- BootNodes: MainnetBootstrapNodes,
+ BootNodes: SepoliaBootstrapNodes,
},
GoerliNetwork: {
@@ -262,9 +277,9 @@ var CheckpointSyncEndpoints = map[NetworkType][]string{
"https://prater-checkpoint-sync.stakely.io/eth/v2/debug/beacon/states/finalized",
},
SepoliaNetwork: {
- "https://beaconstate-sepolia.chainsafe.io/eth/v2/debug/beacon/states/finalized",
- // "https://sepolia.beaconstate.info/eth/v2/debug/beacon/states/finalized",
- // "https://checkpoint-sync.sepolia.ethpandaops.io/eth/v2/debug/beacon/states/finalized",
+ //"https://beaconstate-sepolia.chainsafe.io/eth/v2/debug/beacon/states/finalized",
+ "https://sepolia.beaconstate.info/eth/v2/debug/beacon/states/finalized",
+ "https://checkpoint-sync.sepolia.ethpandaops.io/eth/v2/debug/beacon/states/finalized",
},
GnosisNetwork: {
"https://checkpoint.gnosis.gateway.fm/eth/v2/debug/beacon/states/finalized",
@@ -275,6 +290,12 @@ var CheckpointSyncEndpoints = map[NetworkType][]string{
},
}
+// MinEpochsForBlockRequests equal to MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT / 2
+func (b *BeaconChainConfig) MinEpochsForBlockRequests() uint64 {
+ return b.MinValidatorWithdrawabilityDelay + (b.ChurnLimitQuotient)/2
+
+}
+
// BeaconChainConfig contains constant configs for node to participate in beacon chain.
type BeaconChainConfig struct {
// Constants (non-configurable)
@@ -309,9 +330,9 @@ type BeaconChainConfig struct {
EffectiveBalanceIncrement uint64 `yaml:"EFFECTIVE_BALANCE_INCREMENT" spec:"true"` // EffectiveBalanceIncrement is used for converting the high balance into the low balance for validators.
// Initial value constants.
- BLSWithdrawalPrefixByte byte `yaml:"BLS_WITHDRAWAL_PREFIX" spec:"true"` // BLSWithdrawalPrefixByte is used for BLS withdrawal and it's the first byte.
- ETH1AddressWithdrawalPrefixByte byte `yaml:"ETH1_ADDRESS_WITHDRAWAL_PREFIX" spec:"true"` // ETH1AddressWithdrawalPrefixByte is used for withdrawals and it's the first byte.
- ZeroHash [32]byte // ZeroHash is used to represent a zeroed out 32 byte array.
+ BLSWithdrawalPrefixByte byte `yaml:"BLS_WITHDRAWAL_PREFIX" spec:"true"` // BLSWithdrawalPrefixByte is used for BLS withdrawal and it's the first byte.
+ ETH1AddressWithdrawalPrefixByte byte `yaml:"ETH1_ADDRESS_WITHDRAWAL_PREFIX" spec:"true"` // ETH1AddressWithdrawalPrefixByte is used for withdrawals and it's the first byte.
+ ZeroHash libcommon.Hash // ZeroHash is used to represent a zeroed out 32 byte array.
// Time parameters constants.
GenesisDelay uint64 `yaml:"GENESIS_DELAY" spec:"true"` // GenesisDelay is the minimum number of seconds to delay starting the Ethereum Beacon Chain genesis. Must be at least 1 second.
@@ -369,37 +390,37 @@ type BeaconChainConfig struct {
MaxValidatorsPerWithdrawalsSweep uint64 `yaml:"MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP" spec:"true"` //MaxValidatorsPerWithdrawalsSweep bounds the size of the sweep searching for withdrawals per slot.
// BLS domain values.
- DomainBeaconProposer [4]byte `yaml:"DOMAIN_BEACON_PROPOSER" spec:"true"` // DomainBeaconProposer defines the BLS signature domain for beacon proposal verification.
- DomainRandao [4]byte `yaml:"DOMAIN_RANDAO" spec:"true"` // DomainRandao defines the BLS signature domain for randao verification.
- DomainBeaconAttester [4]byte `yaml:"DOMAIN_BEACON_ATTESTER" spec:"true"` // DomainBeaconAttester defines the BLS signature domain for attestation verification.
- DomainDeposit [4]byte `yaml:"DOMAIN_DEPOSIT" spec:"true"` // DomainDeposit defines the BLS signature domain for deposit verification.
- DomainVoluntaryExit [4]byte `yaml:"DOMAIN_VOLUNTARY_EXIT" spec:"true"` // DomainVoluntaryExit defines the BLS signature domain for exit verification.
- DomainSelectionProof [4]byte `yaml:"DOMAIN_SELECTION_PROOF" spec:"true"` // DomainSelectionProof defines the BLS signature domain for selection proof.
- DomainAggregateAndProof [4]byte `yaml:"DOMAIN_AGGREGATE_AND_PROOF" spec:"true"` // DomainAggregateAndProof defines the BLS signature domain for aggregate and proof.
- DomainSyncCommittee [4]byte `yaml:"DOMAIN_SYNC_COMMITTEE" spec:"true"` // DomainVoluntaryExit defines the BLS signature domain for sync committee.
- DomainSyncCommitteeSelectionProof [4]byte `yaml:"DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF" spec:"true"` // DomainSelectionProof defines the BLS signature domain for sync committee selection proof.
- DomainContributionAndProof [4]byte `yaml:"DOMAIN_CONTRIBUTION_AND_PROOF" spec:"true"` // DomainAggregateAndProof defines the BLS signature domain for contribution and proof.
- DomainApplicationMask [4]byte `yaml:"DOMAIN_APPLICATION_MASK" spec:"true"` // DomainApplicationMask defines the BLS signature domain for application mask.
- DomainApplicationBuilder [4]byte // DomainApplicationBuilder defines the BLS signature domain for application builder.
- DomainBLSToExecutionChange [4]byte // DomainBLSToExecutionChange defines the BLS signature domain to change withdrawal addresses to ETH1 prefix
- DomainBlobSideCar [4]byte `yaml:"DOMAIN_BLOB_SIDECAR" spec:"true"` // DomainBlobSideCar defines the BLS signature domain for blob sidecar verification
+ DomainBeaconProposer libcommon.Bytes4 `yaml:"DOMAIN_BEACON_PROPOSER" spec:"true"` // DomainBeaconProposer defines the BLS signature domain for beacon proposal verification.
+ DomainRandao libcommon.Bytes4 `yaml:"DOMAIN_RANDAO" spec:"true"` // DomainRandao defines the BLS signature domain for randao verification.
+ DomainBeaconAttester libcommon.Bytes4 `yaml:"DOMAIN_BEACON_ATTESTER" spec:"true"` // DomainBeaconAttester defines the BLS signature domain for attestation verification.
+ DomainDeposit libcommon.Bytes4 `yaml:"DOMAIN_DEPOSIT" spec:"true"` // DomainDeposit defines the BLS signature domain for deposit verification.
+ DomainVoluntaryExit libcommon.Bytes4 `yaml:"DOMAIN_VOLUNTARY_EXIT" spec:"true"` // DomainVoluntaryExit defines the BLS signature domain for exit verification.
+ DomainSelectionProof libcommon.Bytes4 `yaml:"DOMAIN_SELECTION_PROOF" spec:"true"` // DomainSelectionProof defines the BLS signature domain for selection proof.
+ DomainAggregateAndProof libcommon.Bytes4 `yaml:"DOMAIN_AGGREGATE_AND_PROOF" spec:"true"` // DomainAggregateAndProof defines the BLS signature domain for aggregate and proof.
+ DomainSyncCommittee libcommon.Bytes4 `yaml:"DOMAIN_SYNC_COMMITTEE" spec:"true"` // DomainVoluntaryExit defines the BLS signature domain for sync committee.
+ DomainSyncCommitteeSelectionProof libcommon.Bytes4 `yaml:"DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF" spec:"true"` // DomainSelectionProof defines the BLS signature domain for sync committee selection proof.
+ DomainContributionAndProof libcommon.Bytes4 `yaml:"DOMAIN_CONTRIBUTION_AND_PROOF" spec:"true"` // DomainAggregateAndProof defines the BLS signature domain for contribution and proof.
+ DomainApplicationMask libcommon.Bytes4 `yaml:"DOMAIN_APPLICATION_MASK" spec:"true"` // DomainApplicationMask defines the BLS signature domain for application mask.
+ DomainApplicationBuilder libcommon.Bytes4 // DomainApplicationBuilder defines the BLS signature domain for application builder.
+ DomainBLSToExecutionChange libcommon.Bytes4 // DomainBLSToExecutionChange defines the BLS signature domain to change withdrawal addresses to ETH1 prefix
+ DomainBlobSideCar libcommon.Bytes4 `yaml:"DOMAIN_BLOB_SIDECAR" spec:"true"` // DomainBlobSideCar defines the BLS signature domain for blob sidecar verification
// Prysm constants.
- GweiPerEth uint64 // GweiPerEth is the amount of gwei corresponding to 1 eth.
- BLSSecretKeyLength int // BLSSecretKeyLength defines the expected length of BLS secret keys in bytes.
- BLSPubkeyLength int // BLSPubkeyLength defines the expected length of BLS public keys in bytes.
- DefaultBufferSize int // DefaultBufferSize for channels across the Prysm repository.
- ValidatorPrivkeyFileName string // ValidatorPrivKeyFileName specifies the string name of a validator private key file.
- WithdrawalPrivkeyFileName string // WithdrawalPrivKeyFileName specifies the string name of a withdrawal private key file.
- RPCSyncCheck time.Duration // Number of seconds to query the sync service, to find out if the node is synced or not.
- EmptySignature [96]byte // EmptySignature is used to represent a zeroed out BLS Signature.
- DefaultPageSize int // DefaultPageSize defines the default page size for RPC server request.
- MaxPeersToSync int // MaxPeersToSync describes the limit for number of peers in round robin sync.
- SlotsPerArchivedPoint uint64 // SlotsPerArchivedPoint defines the number of slots per one archived point.
- GenesisCountdownInterval time.Duration // How often to log the countdown until the genesis time is reached.
- BeaconStateFieldCount int // BeaconStateFieldCount defines how many fields are in beacon state.
- BeaconStateAltairFieldCount int // BeaconStateAltairFieldCount defines how many fields are in beacon state hard fork 1.
- BeaconStateBellatrixFieldCount int // BeaconStateBellatrixFieldCount defines how many fields are in beacon state post upgrade to the Bellatrix.
+ GweiPerEth uint64 // GweiPerEth is the amount of gwei corresponding to 1 eth.
+ BLSSecretKeyLength int // BLSSecretKeyLength defines the expected length of BLS secret keys in bytes.
+ BLSPubkeyLength int // BLSPubkeyLength defines the expected length of BLS public keys in bytes.
+ DefaultBufferSize int // DefaultBufferSize for channels across the Prysm repository.
+ ValidatorPrivkeyFileName string // ValidatorPrivKeyFileName specifies the string name of a validator private key file.
+ WithdrawalPrivkeyFileName string // WithdrawalPrivKeyFileName specifies the string name of a withdrawal private key file.
+ RPCSyncCheck time.Duration // Number of seconds to query the sync service, to find out if the node is synced or not.
+ EmptySignature libcommon.Bytes96 // EmptySignature is used to represent a zeroed out BLS Signature.
+ DefaultPageSize int // DefaultPageSize defines the default page size for RPC server request.
+ MaxPeersToSync int // MaxPeersToSync describes the limit for number of peers in round robin sync.
+ SlotsPerArchivedPoint uint64 // SlotsPerArchivedPoint defines the number of slots per one archived point.
+ GenesisCountdownInterval time.Duration // How often to log the countdown until the genesis time is reached.
+ BeaconStateFieldCount int // BeaconStateFieldCount defines how many fields are in beacon state.
+ BeaconStateAltairFieldCount int // BeaconStateAltairFieldCount defines how many fields are in beacon state hard fork 1.
+ BeaconStateBellatrixFieldCount int // BeaconStateBellatrixFieldCount defines how many fields are in beacon state post upgrade to the Bellatrix.
// Slasher constants.
WeakSubjectivityPeriod uint64 // WeakSubjectivityPeriod defines the time period expressed in number of epochs were proof of stake network should validate block headers and attestations for slashable events.
@@ -419,8 +440,8 @@ type BeaconChainConfig struct {
DenebForkVersion uint32 `yaml:"DENEB_FORK_VERSION" spec:"true"` // DenebForkVersion is used to represent the fork version for Deneb.
DenebForkEpoch uint64 `yaml:"DENEB_FORK_EPOCH" spec:"true"` // DenebForkEpoch is used to represent the assigned fork epoch for Deneb.
- ForkVersionSchedule map[[VersionLength]byte]uint64 // Schedule of fork epochs by version.
- ForkVersionNames map[[VersionLength]byte]string // Human-readable names of fork versions.
+ ForkVersionSchedule map[libcommon.Bytes4]uint64 // Schedule of fork epochs by version.
+ ForkVersionNames map[libcommon.Bytes4]string // Human-readable names of fork versions.
// Weak subjectivity values.
SafetyDecay uint64 // SafetyDecay is defined as the loss in the 1/3 consensus safety margin of the casper FFG mechanism.
@@ -474,6 +495,20 @@ type BeaconChainConfig struct {
MaxBuilderEpochMissedSlots uint64 // MaxBuilderEpochMissedSlots is defines the number of total skip slot (per epoch rolling windows) to fallback from using relay/builder to local execution engine for block construction.
}
+func (b *BeaconChainConfig) RoundSlotToEpoch(slot uint64) uint64 {
+ return slot - (slot % b.SlotsPerEpoch)
+}
+
+func (b *BeaconChainConfig) RoundSlotToSyncCommitteePeriod(slot uint64) uint64 {
+ slotsPerSyncCommitteePeriod := b.SlotsPerEpoch * b.EpochsPerSyncCommitteePeriod
+ return slot - (slot % slotsPerSyncCommitteePeriod)
+}
+
+func (b *BeaconChainConfig) RoundSlotToVotePeriod(slot uint64) uint64 {
+ p := b.SlotsPerEpoch * b.EpochsPerEth1VotingPeriod
+ return slot - (slot % p)
+}
+
func (b *BeaconChainConfig) GetCurrentStateVersion(epoch uint64) StateVersion {
forkEpochList := []uint64{b.AltairForkEpoch, b.BellatrixForkEpoch, b.CapellaForkEpoch, b.DenebForkEpoch}
stateVersion := Phase0Version
@@ -497,8 +532,8 @@ func toBytes4(in []byte) (ret [4]byte) {
return
}
-func configForkSchedule(b *BeaconChainConfig) map[[VersionLength]byte]uint64 {
- fvs := map[[VersionLength]byte]uint64{}
+func configForkSchedule(b *BeaconChainConfig) map[libcommon.Bytes4]uint64 {
+ fvs := map[libcommon.Bytes4]uint64{}
fvs[utils.Uint32ToBytes4(b.GenesisForkVersion)] = 0
fvs[utils.Uint32ToBytes4(b.AltairForkVersion)] = b.AltairForkEpoch
fvs[utils.Uint32ToBytes4(b.BellatrixForkVersion)] = b.BellatrixForkEpoch
@@ -507,8 +542,8 @@ func configForkSchedule(b *BeaconChainConfig) map[[VersionLength]byte]uint64 {
return fvs
}
-func configForkNames(b *BeaconChainConfig) map[[VersionLength]byte]string {
- fvn := map[[VersionLength]byte]string{}
+func configForkNames(b *BeaconChainConfig) map[libcommon.Bytes4]string {
+ fvn := map[libcommon.Bytes4]string{}
fvn[utils.Uint32ToBytes4(b.GenesisForkVersion)] = "phase0"
fvn[utils.Uint32ToBytes4(b.AltairForkVersion)] = "altair"
fvn[utils.Uint32ToBytes4(b.BellatrixForkVersion)] = "bellatrix"
@@ -809,14 +844,17 @@ func gnosisConfig() BeaconChainConfig {
cfg.AltairForkVersion = 0x01000064
cfg.BellatrixForkEpoch = 385536
cfg.BellatrixForkVersion = 0x02000064
+ cfg.CapellaForkEpoch = 648704
+ cfg.CapellaForkVersion = 0x03000064
cfg.TerminalTotalDifficulty = "8626000000000000000000058750000000000000000000"
cfg.DepositContractAddress = "0x0B98057eA310F4d31F2a452B414647007d1645d9"
cfg.BaseRewardFactor = 25
cfg.SlotsPerEpoch = 16
cfg.EpochsPerSyncCommitteePeriod = 512
- cfg.CapellaForkEpoch = 648704
- cfg.CapellaForkVersion = 0x03000064
cfg.DenebForkEpoch = math.MaxUint64
+ cfg.InactivityScoreRecoveryRate = 16
+ cfg.InactivityScoreBias = 4
+ cfg.MaxWithdrawalsPerPayload = 8
cfg.InitializeForkSchedule()
return cfg
}
@@ -912,6 +950,38 @@ func (b *BeaconChainConfig) CurrentEpochAttestationsLength() uint64 {
return b.SlotsPerEpoch * b.MaxAttestations
}
+func (b *BeaconChainConfig) GetForkVersionByVersion(v StateVersion) uint32 {
+ switch v {
+ case Phase0Version:
+ return b.GenesisForkVersion
+ case AltairVersion:
+ return b.AltairForkVersion
+ case BellatrixVersion:
+ return b.BellatrixForkVersion
+ case CapellaVersion:
+ return b.CapellaForkVersion
+ case DenebVersion:
+ return b.DenebForkVersion
+ }
+ panic("invalid version")
+}
+
+func (b *BeaconChainConfig) GetForkEpochByVersion(v StateVersion) uint64 {
+ switch v {
+ case Phase0Version:
+ return 0
+ case AltairVersion:
+ return b.AltairForkEpoch
+ case BellatrixVersion:
+ return b.BellatrixForkEpoch
+ case CapellaVersion:
+ return b.CapellaForkEpoch
+ case DenebVersion:
+ return b.DenebForkEpoch
+ }
+ panic("invalid version")
+}
+
func GetConfigsByNetwork(net NetworkType) (*GenesisConfig, *NetworkConfig, *BeaconChainConfig) {
networkConfig := NetworkConfigs[net]
genesisConfig := GenesisConfigs[net]
@@ -940,6 +1010,7 @@ func GetConfigsByNetworkName(net string) (*GenesisConfig, *NetworkConfig, *Beaco
return nil, nil, nil, MainnetNetwork, fmt.Errorf("chain not found")
}
}
+
func GetCheckpointSyncEndpoint(net NetworkType) string {
checkpoints, ok := CheckpointSyncEndpoints[net]
if !ok {
@@ -974,3 +1045,12 @@ func EmbeddedSupported(id uint64) bool {
func EmbeddedEnabledByDefault(id uint64) bool {
return id == 1 || id == 5 || id == 11155111
}
+
+func SupportBackfilling(networkId uint64) bool {
+ return networkId == uint64(MainnetNetwork) || networkId == uint64(SepoliaNetwork)
+}
+
+func EpochToPaths(slot uint64, config *BeaconChainConfig, suffix string) (string, string) {
+ folderPath := path.Clean(fmt.Sprintf("%d", slot/SubDivisionFolderSize))
+ return folderPath, path.Clean(fmt.Sprintf("%s/%d.%s.sz", folderPath, slot, suffix))
+}
diff --git a/cl/clparams/initial_state/goerli.state.ssz b/cl/clparams/initial_state/goerli.state.ssz
deleted file mode 100644
index 3f0386727f3..00000000000
Binary files a/cl/clparams/initial_state/goerli.state.ssz and /dev/null differ
diff --git a/cl/clparams/initial_state/initial_state.go b/cl/clparams/initial_state/initial_state.go
index 4ceaf6de9f9..3f15c23dc1f 100644
--- a/cl/clparams/initial_state/initial_state.go
+++ b/cl/clparams/initial_state/initial_state.go
@@ -2,7 +2,6 @@ package initial_state
import (
_ "embed"
- "fmt"
"github.com/ledgerwatch/erigon/cl/phase1/core/state"
@@ -15,9 +14,6 @@ var mainnetStateSSZ []byte
//go:embed sepolia.state.ssz
var sepoliaStateSSZ []byte
-//go:embed goerli.state.ssz
-var goerliStateSSZ []byte
-
// Return genesis state
func GetGenesisState(network clparams.NetworkType) (*state.CachingBeaconState, error) {
_, _, config := clparams.GetConfigsByNetwork(network)
@@ -28,16 +24,23 @@ func GetGenesisState(network clparams.NetworkType) (*state.CachingBeaconState, e
if err := returnState.DecodeSSZ(mainnetStateSSZ, int(clparams.Phase0Version)); err != nil {
return nil, err
}
- case clparams.GoerliNetwork:
- if err := returnState.DecodeSSZ(goerliStateSSZ, int(clparams.Phase0Version)); err != nil {
- return nil, err
- }
case clparams.SepoliaNetwork:
if err := returnState.DecodeSSZ(sepoliaStateSSZ, int(clparams.Phase0Version)); err != nil {
return nil, err
}
- default:
- return nil, fmt.Errorf("unsupported network for genesis fetching")
+ case clparams.GoerliNetwork:
+ return nil, nil
}
return returnState, nil
}
+
+func IsGenesisStateSupported(network clparams.NetworkType) bool {
+ switch network {
+ case clparams.MainnetNetwork:
+ return true
+ case clparams.SepoliaNetwork:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/cl/clparams/initial_state/initial_state_test.go b/cl/clparams/initial_state/initial_state_test.go
index e6ccef9c792..46714b2a305 100644
--- a/cl/clparams/initial_state/initial_state_test.go
+++ b/cl/clparams/initial_state/initial_state_test.go
@@ -17,14 +17,6 @@ func TestMainnet(t *testing.T) {
assert.Equal(t, libcommon.Hash(root), libcommon.HexToHash("7e76880eb67bbdc86250aa578958e9d0675e64e714337855204fb5abaaf82c2b"))
}
-func TestGoerli(t *testing.T) {
- state, err := initial_state.GetGenesisState(clparams.GoerliNetwork)
- assert.NoError(t, err)
- root, err := state.HashSSZ()
- assert.NoError(t, err)
- assert.Equal(t, libcommon.Hash(root), libcommon.HexToHash("895390e92edc03df7096e9f51e51896e8dbe6e7e838180dadbfd869fdd77a659"))
-}
-
func TestSepolia(t *testing.T) {
state, err := initial_state.GetGenesisState(clparams.SepoliaNetwork)
assert.NoError(t, err)
diff --git a/cl/clparams/version.go b/cl/clparams/version.go
index 6a1e33b74c8..72884406fdd 100644
--- a/cl/clparams/version.go
+++ b/cl/clparams/version.go
@@ -27,3 +27,20 @@ func StringToClVersion(s string) StateVersion {
panic("unsupported fork version: " + s)
}
}
+
+func ClVersionToString(s StateVersion) string {
+ switch s {
+ case Phase0Version:
+ return "phase0"
+ case AltairVersion:
+ return "altair"
+ case BellatrixVersion:
+ return "bellatrix"
+ case CapellaVersion:
+ return "capella"
+ case DenebVersion:
+ return "deneb"
+ default:
+ panic("unsupported fork version")
+ }
+}
diff --git a/cl/clstages/clstages.go b/cl/clstages/clstages.go
new file mode 100644
index 00000000000..543de4b038d
--- /dev/null
+++ b/cl/clstages/clstages.go
@@ -0,0 +1,64 @@
+package clstages
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/ledgerwatch/log/v3"
+)
+
+type StageGraph[CONFIG any, ARGUMENTS any] struct {
+ ArgsFunc func(ctx context.Context, cfg CONFIG) (args ARGUMENTS)
+ Stages map[string]Stage[CONFIG, ARGUMENTS]
+}
+
+type Stage[CONFIG any, ARGUMENTS any] struct {
+ Description string
+ ActionFunc func(ctx context.Context, logger log.Logger, cfg CONFIG, args ARGUMENTS) error
+ TransitionFunc func(cfg CONFIG, args ARGUMENTS, err error) string
+}
+
+func (s *StageGraph[CONFIG, ARGUMENTS]) StartWithStage(ctx context.Context, startStage string, logger log.Logger, cfg CONFIG) error {
+ stageName := startStage
+ args := s.ArgsFunc(ctx, cfg)
+ for {
+ currentStage, ok := s.Stages[stageName]
+ if !ok {
+ return fmt.Errorf("attempted to transition to unknown stage: %s", stageName)
+ }
+ lg := logger.New("stage", stageName)
+ errch := make(chan error)
+ start := time.Now()
+ go func() {
+ sctx, cn := context.WithCancel(ctx)
+ defer cn()
+ // we run this is a goroutine so that the process can exit in the middle of a stage
+ // since caplin is designed to always be able to recover regardless of db state, this should be safe
+ select {
+ case errch <- currentStage.ActionFunc(sctx, lg, cfg, args):
+ case <-sctx.Done():
+ errch <- sctx.Err()
+ }
+ }()
+ err := <-errch
+ dur := time.Since(start)
+ if err != nil {
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ lg.Debug("error executing clstage", "err", err)
+ } else {
+ lg.Warn("error executing clstage", "err", err)
+ }
+ }
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ args = s.ArgsFunc(ctx, cfg)
+ nextStage := currentStage.TransitionFunc(cfg, args, err)
+ logger.Debug("clstage finish", "stage", stageName, "in", dur, "next", nextStage)
+ stageName = nextStage
+ }
+ }
+}
diff --git a/cl/cltypes/aggregate.go b/cl/cltypes/aggregate.go
index 6f8d724a0cd..a21ade55f6a 100644
--- a/cl/cltypes/aggregate.go
+++ b/cl/cltypes/aggregate.go
@@ -1,6 +1,7 @@
package cltypes
import (
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
"github.com/ledgerwatch/erigon/cl/merkle_tree"
ssz2 "github.com/ledgerwatch/erigon/cl/ssz"
@@ -13,13 +14,17 @@ import (
type AggregateAndProof struct {
AggregatorIndex uint64
Aggregate *solid.Attestation
- SelectionProof [96]byte
+ SelectionProof libcommon.Bytes96
}
func (a *AggregateAndProof) EncodeSSZ(dst []byte) ([]byte, error) {
return ssz2.MarshalSSZ(dst, a.AggregatorIndex, a.Aggregate, a.SelectionProof[:])
}
+func (a *AggregateAndProof) Static() bool {
+ return false
+}
+
func (a *AggregateAndProof) DecodeSSZ(buf []byte, version int) error {
a.Aggregate = new(solid.Attestation)
return ssz2.UnmarshalSSZ(buf, version, &a.AggregatorIndex, a.Aggregate, a.SelectionProof[:])
@@ -35,7 +40,7 @@ func (a *AggregateAndProof) HashSSZ() ([32]byte, error) {
type SignedAggregateAndProof struct {
Message *AggregateAndProof
- Signature [96]byte
+ Signature libcommon.Bytes96
}
func (a *SignedAggregateAndProof) EncodeSSZ(dst []byte) ([]byte, error) {
@@ -51,13 +56,17 @@ func (a *SignedAggregateAndProof) EncodingSizeSSZ() int {
return 100 + a.Message.EncodingSizeSSZ()
}
+func (a *SignedAggregateAndProof) HashSSZ() ([32]byte, error) {
+ return merkle_tree.HashTreeRoot(a.Message, a.Signature[:])
+}
+
/*
* SyncAggregate, Determines successfull committee, bits shows active participants,
* and signature is the aggregate BLS signature of the committee.
*/
type SyncAggregate struct {
- SyncCommiteeBits [64]byte
- SyncCommiteeSignature [96]byte
+ SyncCommiteeBits libcommon.Bytes64 `json:"sync_commitee_bits"`
+ SyncCommiteeSignature libcommon.Bytes96 `json:"signature"`
}
// return sum of the committee bits
diff --git a/cl/cltypes/beacon_block.go b/cl/cltypes/beacon_block.go
index 344cfec5c40..8125342241c 100644
--- a/cl/cltypes/beacon_block.go
+++ b/cl/cltypes/beacon_block.go
@@ -23,50 +23,82 @@ const (
)
type SignedBeaconBlock struct {
- Signature [96]byte
- Block *BeaconBlock
+ Signature libcommon.Bytes96 `json:"signature"`
+ Block *BeaconBlock `json:"message"`
}
type BeaconBlock struct {
- Slot uint64
- ProposerIndex uint64
- ParentRoot libcommon.Hash
- StateRoot libcommon.Hash
- Body *BeaconBody
+ Slot uint64 `json:"slot"`
+ ProposerIndex uint64 `json:"proposer_index"`
+ ParentRoot libcommon.Hash `json:"parent_root"`
+ StateRoot libcommon.Hash `json:"state_root"`
+ Body *BeaconBody `json:"body"`
}
type BeaconBody struct {
// A byte array used for randomness in the beacon chain
- RandaoReveal [96]byte
+ RandaoReveal libcommon.Bytes96 `json:"randao_reveal"`
// Data related to the Ethereum 1.0 chain
- Eth1Data *Eth1Data
+ Eth1Data *Eth1Data `json:"eth1_data"`
// A byte array used to customize validators' behavior
- Graffiti [32]byte
+ Graffiti libcommon.Hash `json:"graffit"`
// A list of slashing events for validators who included invalid blocks in the chain
- ProposerSlashings *solid.ListSSZ[*ProposerSlashing]
+ ProposerSlashings *solid.ListSSZ[*ProposerSlashing] `json:"proposer_slashings"`
// A list of slashing events for validators who included invalid attestations in the chain
- AttesterSlashings *solid.ListSSZ[*AttesterSlashing]
+ AttesterSlashings *solid.ListSSZ[*AttesterSlashing] `json:"attester_slashings"`
// A list of attestations included in the block
- Attestations *solid.ListSSZ[*solid.Attestation]
+ Attestations *solid.ListSSZ[*solid.Attestation] `json:"attestations"`
// A list of deposits made to the Ethereum 1.0 chain
- Deposits *solid.ListSSZ[*Deposit]
+ Deposits *solid.ListSSZ[*Deposit] `json:"deposits"`
// A list of validators who have voluntarily exited the beacon chain
- VoluntaryExits *solid.ListSSZ[*SignedVoluntaryExit]
+ VoluntaryExits *solid.ListSSZ[*SignedVoluntaryExit] `json:"voluntary_exits"`
// A summary of the current state of the beacon chain
- SyncAggregate *SyncAggregate
+ SyncAggregate *SyncAggregate `json:"sync_aggregate,omitempty"`
// Data related to crosslink records and executing operations on the Ethereum 2.0 chain
- ExecutionPayload *Eth1Block
+ ExecutionPayload *Eth1Block `json:"execution_payload,omitempty"`
// Withdrawals Diffs for Execution Layer
- ExecutionChanges *solid.ListSSZ[*SignedBLSToExecutionChange]
+ ExecutionChanges *solid.ListSSZ[*SignedBLSToExecutionChange] `json:"execution_changes,omitempty"`
// The commitments for beacon chain blobs
// With a max of 4 per block
- BlobKzgCommitments *solid.ListSSZ[*KZGCommitment]
+ BlobKzgCommitments *solid.ListSSZ[*KZGCommitment] `json:"blob_kzg_commitments,omitempty"`
// The version of the beacon chain
- Version clparams.StateVersion
+ Version clparams.StateVersion `json:"-"`
+ beaconCfg *clparams.BeaconChainConfig
}
// Getters
+func NewSignedBeaconBlock(beaconCfg *clparams.BeaconChainConfig) *SignedBeaconBlock {
+ return &SignedBeaconBlock{Block: NewBeaconBlock(beaconCfg)}
+}
+
+func (s *SignedBeaconBlock) SignedBeaconBlockHeader() *SignedBeaconBlockHeader {
+ bodyRoot, err := s.Block.Body.HashSSZ()
+ if err != nil {
+ panic(err)
+ }
+ return &SignedBeaconBlockHeader{
+ Signature: s.Signature,
+ Header: &BeaconBlockHeader{
+ Slot: s.Block.Slot,
+ ProposerIndex: s.Block.ProposerIndex,
+ ParentRoot: s.Block.ParentRoot,
+ Root: s.Block.StateRoot,
+ BodyRoot: bodyRoot,
+ },
+ }
+}
+
+func NewBeaconBlock(beaconCfg *clparams.BeaconChainConfig) *BeaconBlock {
+ return &BeaconBlock{Body: NewBeaconBody(beaconCfg)}
+}
+
+func NewBeaconBody(beaconCfg *clparams.BeaconChainConfig) *BeaconBody {
+ return &BeaconBody{
+ beaconCfg: beaconCfg,
+ }
+}
+
// Version returns beacon block version.
func (b *SignedBeaconBlock) Version() clparams.StateVersion {
return b.Block.Body.Version
@@ -78,7 +110,7 @@ func (b *BeaconBlock) Version() clparams.StateVersion {
}
func (b *BeaconBody) EncodeSSZ(dst []byte) ([]byte, error) {
- return ssz2.MarshalSSZ(dst, b.getSchema()...)
+ return ssz2.MarshalSSZ(dst, b.getSchema(false)...)
}
func (b *BeaconBody) EncodingSizeSSZ() (size int) {
@@ -90,7 +122,7 @@ func (b *BeaconBody) EncodingSizeSSZ() (size int) {
b.SyncAggregate = &SyncAggregate{}
}
if b.ExecutionPayload == nil {
- b.ExecutionPayload = &Eth1Block{}
+ b.ExecutionPayload = NewEth1Block(b.Version, b.beaconCfg)
}
if b.ProposerSlashings == nil {
b.ProposerSlashings = solid.NewStaticListSSZ[*ProposerSlashing](MaxProposerSlashings, 416)
@@ -108,7 +140,7 @@ func (b *BeaconBody) EncodingSizeSSZ() (size int) {
b.VoluntaryExits = solid.NewStaticListSSZ[*SignedVoluntaryExit](MaxVoluntaryExits, 112)
}
if b.ExecutionPayload == nil {
- b.ExecutionPayload = new(Eth1Block)
+ b.ExecutionPayload = NewEth1Block(b.Version, b.beaconCfg)
}
if b.ExecutionChanges == nil {
b.ExecutionChanges = solid.NewStaticListSSZ[*SignedBLSToExecutionChange](MaxExecutionChanges, 172)
@@ -142,20 +174,22 @@ func (b *BeaconBody) DecodeSSZ(buf []byte, version int) error {
return fmt.Errorf("[BeaconBody] err: %s", ssz.ErrLowBufferSize)
}
- err := ssz2.UnmarshalSSZ(buf, version, b.getSchema()...)
+ b.ExecutionPayload = NewEth1Block(b.Version, b.beaconCfg)
+
+ err := ssz2.UnmarshalSSZ(buf, version, b.getSchema(false)...)
return err
}
func (b *BeaconBody) HashSSZ() ([32]byte, error) {
- return merkle_tree.HashTreeRoot(b.getSchema()...)
+ return merkle_tree.HashTreeRoot(b.getSchema(false)...)
}
-func (b *BeaconBody) getSchema() []interface{} {
+func (b *BeaconBody) getSchema(storage bool) []interface{} {
s := []interface{}{b.RandaoReveal[:], b.Eth1Data, b.Graffiti[:], b.ProposerSlashings, b.AttesterSlashings, b.Attestations, b.Deposits, b.VoluntaryExits}
if b.Version >= clparams.AltairVersion {
s = append(s, b.SyncAggregate)
}
- if b.Version >= clparams.BellatrixVersion {
+ if b.Version >= clparams.BellatrixVersion && !storage {
s = append(s, b.ExecutionPayload)
}
if b.Version >= clparams.CapellaVersion {
@@ -173,13 +207,12 @@ func (b *BeaconBlock) EncodeSSZ(buf []byte) (dst []byte, err error) {
func (b *BeaconBlock) EncodingSizeSSZ() int {
if b.Body == nil {
- b.Body = new(BeaconBody)
+ return 80
}
return 80 + b.Body.EncodingSizeSSZ()
}
func (b *BeaconBlock) DecodeSSZ(buf []byte, version int) error {
- b.Body = new(BeaconBody)
return ssz2.UnmarshalSSZ(buf, version, &b.Slot, &b.ProposerIndex, b.ParentRoot[:], b.StateRoot[:], b.Body)
}
@@ -193,13 +226,12 @@ func (b *SignedBeaconBlock) EncodeSSZ(buf []byte) ([]byte, error) {
func (b *SignedBeaconBlock) EncodingSizeSSZ() int {
if b.Block == nil {
- b.Block = new(BeaconBlock)
+ return 100
}
return 100 + b.Block.EncodingSizeSSZ()
}
func (b *SignedBeaconBlock) DecodeSSZ(buf []byte, s int) error {
- b.Block = new(BeaconBlock)
return ssz2.UnmarshalSSZ(buf, s, b.Block, b.Signature[:])
}
diff --git a/cl/cltypes/beacon_block_test.go b/cl/cltypes/beacon_block_test.go
index a07cd560e31..e032454800e 100644
--- a/cl/cltypes/beacon_block_test.go
+++ b/cl/cltypes/beacon_block_test.go
@@ -43,10 +43,11 @@ func TestBeaconBody(t *testing.T) {
Deposits: deposits,
VoluntaryExits: voluntaryExits,
SyncAggregate: syncAggregate,
- ExecutionPayload: NewEth1BlockFromHeaderAndBody(block.Header(), block.RawBody()),
+ ExecutionPayload: NewEth1BlockFromHeaderAndBody(block.Header(), block.RawBody(), &clparams.MainnetBeaconConfig),
ExecutionChanges: executionChanges,
BlobKzgCommitments: blobKzgCommitments,
Version: version,
+ beaconCfg: &clparams.MainnetBeaconConfig,
}
// Test EncodeSSZ and DecodeSSZ
diff --git a/cl/cltypes/beacon_header.go b/cl/cltypes/beacon_header.go
index bef37a913b7..94064339d23 100644
--- a/cl/cltypes/beacon_header.go
+++ b/cl/cltypes/beacon_header.go
@@ -13,11 +13,11 @@ import (
* It contains the hash of the block body, and state root data.
*/
type BeaconBlockHeader struct {
- Slot uint64
- ProposerIndex uint64
- ParentRoot libcommon.Hash
- Root libcommon.Hash
- BodyRoot libcommon.Hash
+ Slot uint64 `json:"slot"`
+ ProposerIndex uint64 `json:"proposer_index"`
+ ParentRoot libcommon.Hash `json:"parent_root"`
+ Root libcommon.Hash `json:"state_root"`
+ BodyRoot libcommon.Hash `json:"body_root"`
}
func (b *BeaconBlockHeader) Copy() *BeaconBlockHeader {
@@ -49,8 +49,8 @@ func (*BeaconBlockHeader) Static() bool {
* SignedBeaconBlockHeader is a beacon block header + validator signature.
*/
type SignedBeaconBlockHeader struct {
- Header *BeaconBlockHeader
- Signature [96]byte
+ Header *BeaconBlockHeader `json:"message"`
+ Signature libcommon.Bytes96 `json:"signature"`
}
func (b *SignedBeaconBlockHeader) Static() bool {
diff --git a/cl/cltypes/beacon_kzgcommitment.go b/cl/cltypes/beacon_kzgcommitment.go
index 7b42d0b3b17..f6a5d5f38f1 100644
--- a/cl/cltypes/beacon_kzgcommitment.go
+++ b/cl/cltypes/beacon_kzgcommitment.go
@@ -1,7 +1,10 @@
package cltypes
import (
+ "encoding/json"
+
gokzg4844 "github.com/crate-crypto/go-kzg-4844"
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/cl/merkle_tree"
ssz2 "github.com/ledgerwatch/erigon/cl/ssz"
)
@@ -16,6 +19,14 @@ const BYTES_PER_BLOB = uint64(BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB)
type KZGCommitment gokzg4844.KZGCommitment
+func (b KZGCommitment) MarshalJSON() ([]byte, error) {
+ return json.Marshal(libcommon.Bytes48(b))
+}
+
+func (b *KZGCommitment) UnmarshalJSON(data []byte) error {
+ return json.Unmarshal(data, (*libcommon.Bytes48)(b))
+}
+
func (b *KZGCommitment) Copy() *KZGCommitment {
copy := *b
return ©
diff --git a/cl/cltypes/bls_to_execution_change.go b/cl/cltypes/bls_to_execution_change.go
index 7ecb2ce4230..676154f15fc 100644
--- a/cl/cltypes/bls_to_execution_change.go
+++ b/cl/cltypes/bls_to_execution_change.go
@@ -11,9 +11,9 @@ import (
// Change to EL engine
type BLSToExecutionChange struct {
- ValidatorIndex uint64
- From [48]byte
- To libcommon.Address
+ ValidatorIndex uint64 `json:"validator_index"`
+ From libcommon.Bytes48 `json:"from"`
+ To libcommon.Address `json:"to"`
}
func (b *BLSToExecutionChange) EncodeSSZ(buf []byte) ([]byte, error) {
@@ -43,8 +43,8 @@ func (*BLSToExecutionChange) Static() bool {
}
type SignedBLSToExecutionChange struct {
- Message *BLSToExecutionChange
- Signature [96]byte
+ Message *BLSToExecutionChange `json:"message"`
+ Signature libcommon.Bytes96 `json:"signature"`
}
func (s *SignedBLSToExecutionChange) EncodeSSZ(buf []byte) ([]byte, error) {
diff --git a/cl/cltypes/bls_to_execution_test.go b/cl/cltypes/bls_to_execution_test.go
index c5065e85e8f..2aca0a33956 100644
--- a/cl/cltypes/bls_to_execution_test.go
+++ b/cl/cltypes/bls_to_execution_test.go
@@ -1,11 +1,11 @@
package cltypes_test
import (
+ "github.com/ledgerwatch/erigon-lib/common"
"testing"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/utils"
- "github.com/ledgerwatch/erigon/common"
"github.com/stretchr/testify/require"
)
diff --git a/cl/cltypes/clone.go b/cl/cltypes/clone.go
index 9fb73cc4b95..4b6da3b71b9 100644
--- a/cl/cltypes/clone.go
+++ b/cl/cltypes/clone.go
@@ -4,20 +4,20 @@ import (
"github.com/ledgerwatch/erigon-lib/types/clonable"
)
-func (*SignedBeaconBlock) Clone() clonable.Clonable {
- return &SignedBeaconBlock{}
+func (s *SignedBeaconBlock) Clone() clonable.Clonable {
+ return NewSignedBeaconBlock(s.Block.Body.beaconCfg)
}
func (*IndexedAttestation) Clone() clonable.Clonable {
return &IndexedAttestation{}
}
-func (*BeaconBody) Clone() clonable.Clonable {
- return &BeaconBody{}
+func (b *BeaconBody) Clone() clonable.Clonable {
+ return NewBeaconBody(b.beaconCfg)
}
-func (*Eth1Block) Clone() clonable.Clonable {
- return &Eth1Block{}
+func (e *Eth1Block) Clone() clonable.Clonable {
+ return NewEth1Block(e.version, e.beaconCfg)
}
func (*Eth1Data) Clone() clonable.Clonable {
@@ -72,8 +72,8 @@ func (*Deposit) Clone() clonable.Clonable {
return &Deposit{}
}
-func (*BeaconBlock) Clone() clonable.Clonable {
- return &BeaconBlock{}
+func (b *BeaconBlock) Clone() clonable.Clonable {
+ return NewBeaconBlock(b.Body.beaconCfg)
}
func (*AggregateAndProof) Clone() clonable.Clonable {
@@ -103,3 +103,7 @@ func (*KZGCommitment) Clone() clonable.Clonable {
func (*Eth1Header) Clone() clonable.Clonable {
return &Eth1Header{}
}
+
+func (*Withdrawal) Clone() clonable.Clonable {
+ return &Withdrawal{}
+}
diff --git a/cl/cltypes/eth1_block.go b/cl/cltypes/eth1_block.go
index 253ec50949a..3d92938f7ee 100644
--- a/cl/cltypes/eth1_block.go
+++ b/cl/cltypes/eth1_block.go
@@ -16,35 +16,36 @@ import (
// ETH1Block represents a block structure CL-side.
type Eth1Block struct {
- ParentHash libcommon.Hash
- FeeRecipient libcommon.Address
- StateRoot libcommon.Hash
- ReceiptsRoot libcommon.Hash
- LogsBloom types.Bloom
- PrevRandao libcommon.Hash
- BlockNumber uint64
- GasLimit uint64
- GasUsed uint64
- Time uint64
- Extra *solid.ExtraData
- BaseFeePerGas [32]byte
+ ParentHash libcommon.Hash `json:"parent_hash"`
+ FeeRecipient libcommon.Address `json:"fee_recipient"`
+ StateRoot libcommon.Hash `json:"state_root"`
+ ReceiptsRoot libcommon.Hash `json:"receipts_root"`
+ LogsBloom types.Bloom `json:"logs_bloom"`
+ PrevRandao libcommon.Hash `json:"prev_randao"`
+ BlockNumber uint64 `json:"block_number"`
+ GasLimit uint64 `json:"gas_limit"`
+ GasUsed uint64 `json:"gas_used"`
+ Time uint64 `json:"timestamp"`
+ Extra *solid.ExtraData `json:"extra_data"`
+ BaseFeePerGas libcommon.Hash `json:"base_fee_per_gas"`
// Extra fields
- BlockHash libcommon.Hash
- Transactions *solid.TransactionsSSZ
- Withdrawals *solid.ListSSZ[*types.Withdrawal]
- BlobGasUsed uint64
- ExcessBlobGas uint64
+ BlockHash libcommon.Hash `json:"block_hash"`
+ Transactions *solid.TransactionsSSZ `json:"transactions"`
+ Withdrawals *solid.ListSSZ[*Withdrawal] `json:"withdrawals,omitempty"`
+ BlobGasUsed uint64 `json:"blob_gas_used,omitempty"`
+ ExcessBlobGas uint64 `json:"excess_blob_gas,omitempty"`
// internals
- version clparams.StateVersion
+ version clparams.StateVersion
+ beaconCfg *clparams.BeaconChainConfig
}
// NewEth1Block creates a new Eth1Block.
-func NewEth1Block(version clparams.StateVersion) *Eth1Block {
- return &Eth1Block{version: version}
+func NewEth1Block(version clparams.StateVersion, beaconCfg *clparams.BeaconChainConfig) *Eth1Block {
+ return &Eth1Block{version: version, beaconCfg: beaconCfg}
}
// NewEth1BlockFromHeaderAndBody with given header/body.
-func NewEth1BlockFromHeaderAndBody(header *types.Header, body *types.RawBody) *Eth1Block {
+func NewEth1BlockFromHeaderAndBody(header *types.Header, body *types.RawBody, beaconCfg *clparams.BeaconChainConfig) *Eth1Block {
baseFeeBytes := header.BaseFee.Bytes()
for i, j := 0, len(baseFeeBytes)-1; i < j; i, j = i+1, j-1 {
baseFeeBytes[i], baseFeeBytes[j] = baseFeeBytes[j], baseFeeBytes[i]
@@ -69,7 +70,8 @@ func NewEth1BlockFromHeaderAndBody(header *types.Header, body *types.RawBody) *E
BaseFeePerGas: baseFee32,
BlockHash: header.Hash(),
Transactions: solid.NewTransactionsSSZFromTransactions(body.Transactions),
- Withdrawals: solid.NewStaticListSSZFromList(body.Withdrawals, 16, 44),
+ Withdrawals: solid.NewStaticListSSZFromList(convertExecutionWithdrawalsToConsensusWithdrawals(body.Withdrawals), int(beaconCfg.MaxWithdrawalsPerPayload), 44),
+ beaconCfg: beaconCfg,
}
if header.BlobGasUsed != nil && header.ExcessBlobGas != nil {
@@ -92,8 +94,11 @@ func (*Eth1Block) Static() bool {
func (b *Eth1Block) PayloadHeader() (*Eth1Header, error) {
var err error
var transactionsRoot, withdrawalsRoot libcommon.Hash
- if transactionsRoot, err = b.Transactions.HashSSZ(); err != nil {
- return nil, err
+ // Corner case: before TTD this is 0, since all fields are 0, a 0 hash check will suffice.
+ if b.BlockHash != (libcommon.Hash{}) {
+ if transactionsRoot, err = b.Transactions.HashSSZ(); err != nil {
+ return nil, err
+ }
}
if b.version >= clparams.CapellaVersion {
withdrawalsRoot, err = b.Withdrawals.HashSSZ()
@@ -143,7 +148,7 @@ func (b *Eth1Block) EncodingSizeSSZ() (size int) {
if b.version >= clparams.CapellaVersion {
if b.Withdrawals == nil {
- b.Withdrawals = solid.NewStaticListSSZ[*types.Withdrawal](16, 44)
+ b.Withdrawals = solid.NewStaticListSSZ[*Withdrawal](int(b.beaconCfg.MaxWithdrawalsPerPayload), 44)
}
size += b.Withdrawals.EncodingSizeSSZ() + 4
}
@@ -159,7 +164,7 @@ func (b *Eth1Block) EncodingSizeSSZ() (size int) {
func (b *Eth1Block) DecodeSSZ(buf []byte, version int) error {
b.Extra = solid.NewExtraData()
b.Transactions = &solid.TransactionsSSZ{}
- b.Withdrawals = solid.NewStaticListSSZ[*types.Withdrawal](16, 44)
+ b.Withdrawals = solid.NewStaticListSSZ[*Withdrawal](int(b.beaconCfg.MaxWithdrawalsPerPayload), 44)
b.version = clparams.StateVersion(version)
return ssz2.UnmarshalSSZ(buf, version, b.getSchema()...)
}
@@ -200,8 +205,8 @@ func (b *Eth1Block) RlpHeader() (*types.Header, error) {
withdrawalsHash = new(libcommon.Hash)
// extract all withdrawals from itearable list
withdrawals := make([]*types.Withdrawal, b.Withdrawals.Len())
- b.Withdrawals.Range(func(idx int, w *types.Withdrawal, _ int) bool {
- withdrawals[idx] = w
+ b.Withdrawals.Range(func(idx int, w *Withdrawal, _ int) bool {
+ withdrawals[idx] = convertConsensusWithdrawalToExecutionWithdrawal(w)
return true
})
*withdrawalsHash = types.DeriveSha(types.Withdrawals(withdrawals))
@@ -236,7 +241,7 @@ func (b *Eth1Block) RlpHeader() (*types.Header, error) {
// If the header hash does not match the block hash, return an error.
if header.Hash() != b.BlockHash {
- return nil, fmt.Errorf("cannot derive rlp header: mismatching hash")
+ return nil, fmt.Errorf("cannot derive rlp header: mismatching hash: %s != %s", header.Hash(), b.BlockHash)
}
return header, nil
@@ -249,8 +254,8 @@ func (b *Eth1Block) Version() clparams.StateVersion {
// Body returns the equivalent raw body (only eth1 body section).
func (b *Eth1Block) Body() *types.RawBody {
withdrawals := make([]*types.Withdrawal, b.Withdrawals.Len())
- b.Withdrawals.Range(func(idx int, w *types.Withdrawal, _ int) bool {
- withdrawals[idx] = w
+ b.Withdrawals.Range(func(idx int, w *Withdrawal, _ int) bool {
+ withdrawals[idx] = convertConsensusWithdrawalToExecutionWithdrawal(w)
return true
})
return &types.RawBody{
diff --git a/cl/cltypes/eth1_data.go b/cl/cltypes/eth1_data.go
index 165560f8bcd..716105dee89 100644
--- a/cl/cltypes/eth1_data.go
+++ b/cl/cltypes/eth1_data.go
@@ -6,13 +6,12 @@ import (
"github.com/ledgerwatch/erigon/cl/merkle_tree"
ssz2 "github.com/ledgerwatch/erigon/cl/ssz"
- "github.com/ledgerwatch/erigon/common"
)
type Eth1Data struct {
- Root libcommon.Hash
- DepositCount uint64
- BlockHash libcommon.Hash
+ Root libcommon.Hash `json:"deposit_root"`
+ DepositCount uint64 `json:"deposit_count"`
+ BlockHash libcommon.Hash `json:"block_hash"`
}
func (e *Eth1Data) Copy() *Eth1Data {
@@ -36,7 +35,7 @@ func (e *Eth1Data) DecodeSSZ(buf []byte, _ int) error {
// EncodingSizeSSZ returns the ssz encoded size in bytes for the Eth1Data object
func (e *Eth1Data) EncodingSizeSSZ() int {
- return common.BlockNumberLength + length.Hash*2
+ return 8 + length.Hash*2
}
// HashSSZ ssz hashes the Eth1Data object
diff --git a/cl/cltypes/eth1_data_test.go b/cl/cltypes/eth1_data_test.go
index 9d363e9fb0e..9a15b92f3e6 100644
--- a/cl/cltypes/eth1_data_test.go
+++ b/cl/cltypes/eth1_data_test.go
@@ -8,7 +8,6 @@ import (
"github.com/stretchr/testify/require"
"github.com/ledgerwatch/erigon/cl/cltypes"
- "github.com/ledgerwatch/erigon/common"
)
var testEth1Data = &cltypes.Eth1Data{
@@ -17,8 +16,8 @@ var testEth1Data = &cltypes.Eth1Data{
DepositCount: 69,
}
-var expectedTestEth1DataMarshalled = common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000245000000000000000000000000000000000000000000000000000000000000000000000000000003")
-var expectedTestEth1DataRoot = common.Hex2Bytes("adbafa10f1d6046b59cb720371c5e70ce2c6c3067b0e87985f5cd0899a515886")
+var expectedTestEth1DataMarshalled = libcommon.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000245000000000000000000000000000000000000000000000000000000000000000000000000000003")
+var expectedTestEth1DataRoot = libcommon.Hex2Bytes("adbafa10f1d6046b59cb720371c5e70ce2c6c3067b0e87985f5cd0899a515886")
func TestEth1DataMarshalUnmarmashal(t *testing.T) {
marshalled, _ := testEth1Data.EncodeSSZ(nil)
diff --git a/cl/cltypes/eth1_header.go b/cl/cltypes/eth1_header.go
index 574cb95d019..32794c2ebdd 100644
--- a/cl/cltypes/eth1_header.go
+++ b/cl/cltypes/eth1_header.go
@@ -25,7 +25,7 @@ type Eth1Header struct {
GasUsed uint64
Time uint64
Extra *solid.ExtraData
- BaseFeePerGas [32]byte
+ BaseFeePerGas libcommon.Hash
// Extra fields
BlockHash libcommon.Hash
TransactionsRoot libcommon.Hash
diff --git a/cl/cltypes/fork.go b/cl/cltypes/fork.go
index 2699baefa36..9059a927112 100644
--- a/cl/cltypes/fork.go
+++ b/cl/cltypes/fork.go
@@ -1,15 +1,16 @@
package cltypes
import (
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/cl/merkle_tree"
ssz2 "github.com/ledgerwatch/erigon/cl/ssz"
)
// Fork data, contains if we were on bellatrix/alteir/phase0 and transition epoch.
type Fork struct {
- PreviousVersion [4]byte
- CurrentVersion [4]byte
- Epoch uint64
+ PreviousVersion libcommon.Bytes4 `json:"previous_version"`
+ CurrentVersion libcommon.Bytes4 `json:"current_version"`
+ Epoch uint64 `json:"epoch"`
}
func (*Fork) Static() bool {
diff --git a/cl/cltypes/historical_summary.go b/cl/cltypes/historical_summary.go
index d67e90a98b3..f4abf293f8e 100644
--- a/cl/cltypes/historical_summary.go
+++ b/cl/cltypes/historical_summary.go
@@ -9,8 +9,8 @@ import (
)
type HistoricalSummary struct {
- BlockSummaryRoot libcommon.Hash
- StateSummaryRoot libcommon.Hash
+ BlockSummaryRoot libcommon.Hash `json:"block_summary_root"`
+ StateSummaryRoot libcommon.Hash `json:"state_summary_root"`
}
func (h *HistoricalSummary) EncodeSSZ(buf []byte) ([]byte, error) {
diff --git a/cl/cltypes/historical_summary_test.go b/cl/cltypes/historical_summary_test.go
index f99b5bd9132..89a716c11a2 100644
--- a/cl/cltypes/historical_summary_test.go
+++ b/cl/cltypes/historical_summary_test.go
@@ -1,11 +1,11 @@
package cltypes_test
import (
+ "github.com/ledgerwatch/erigon-lib/common"
"testing"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/utils"
- "github.com/ledgerwatch/erigon/common"
"github.com/stretchr/testify/require"
)
diff --git a/cl/cltypes/indexed_attestation.go b/cl/cltypes/indexed_attestation.go
index e0905168577..4173429d8a0 100644
--- a/cl/cltypes/indexed_attestation.go
+++ b/cl/cltypes/indexed_attestation.go
@@ -1,6 +1,7 @@
package cltypes
import (
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
"github.com/ledgerwatch/erigon/cl/merkle_tree"
ssz2 "github.com/ledgerwatch/erigon/cl/ssz"
@@ -10,9 +11,9 @@ import (
* IndexedAttestation are attestantions sets to prove that someone misbehaved.
*/
type IndexedAttestation struct {
- AttestingIndices solid.Uint64ListSSZ
- Data solid.AttestationData
- Signature [96]byte
+ AttestingIndices *solid.RawUint64List `json:"attesting_indicies"`
+ Data solid.AttestationData `json:"data"`
+ Signature libcommon.Bytes96 `json:"signature"`
}
func (i *IndexedAttestation) Static() bool {
@@ -26,7 +27,7 @@ func (i *IndexedAttestation) EncodeSSZ(buf []byte) (dst []byte, err error) {
// DecodeSSZ ssz unmarshals the IndexedAttestation object
func (i *IndexedAttestation) DecodeSSZ(buf []byte, version int) error {
i.Data = solid.NewAttestationData()
- i.AttestingIndices = solid.NewUint64ListSSZ(2048)
+ i.AttestingIndices = solid.NewRawUint64List(2048, nil)
return ssz2.UnmarshalSSZ(buf, version, i.AttestingIndices, i.Data, i.Signature[:])
}
diff --git a/cl/cltypes/justification_bits.go b/cl/cltypes/justification_bits.go
index 060844a6693..61b337054ae 100644
--- a/cl/cltypes/justification_bits.go
+++ b/cl/cltypes/justification_bits.go
@@ -1,6 +1,9 @@
package cltypes
import (
+ "encoding/json"
+
+ "github.com/ledgerwatch/erigon-lib/common/hexutility"
"github.com/ledgerwatch/erigon-lib/types/clonable"
"github.com/ledgerwatch/erigon/cl/utils"
)
@@ -9,6 +12,9 @@ const JustificationBitsLength = 4
type JustificationBits [JustificationBitsLength]bool // Bit vector of size 4
+func (j JustificationBits) Clone() clonable.Clonable {
+ return JustificationBits{}
+}
func (j JustificationBits) Byte() (out byte) {
for i, bit := range j {
if !bit {
@@ -27,19 +33,15 @@ func (j *JustificationBits) DecodeSSZ(b []byte, _ int) error {
return nil
}
-func (j *JustificationBits) EncodeSSZ(buf []byte) ([]byte, error) {
+func (j JustificationBits) EncodeSSZ(buf []byte) ([]byte, error) {
return append(buf, j.Byte()), nil
}
-func (j *JustificationBits) Clone() clonable.Clonable {
- return &JustificationBits{}
-}
-
-func (*JustificationBits) EncodingSizeSSZ() int {
+func (JustificationBits) EncodingSizeSSZ() int {
return 1
}
-func (*JustificationBits) Static() bool {
+func (JustificationBits) Static() bool {
return true
}
@@ -62,3 +64,19 @@ func (j JustificationBits) CheckRange(start int, end int) bool {
func (j JustificationBits) Copy() JustificationBits {
return JustificationBits{j[0], j[1], j[2], j[3]}
}
+
+func (j JustificationBits) MarshalJSON() ([]byte, error) {
+ enc, err := j.EncodeSSZ(nil)
+ if err != nil {
+ return nil, err
+ }
+ return json.Marshal(hexutility.Bytes(enc))
+}
+
+func (j *JustificationBits) UnmarshalJSON(input []byte) error {
+ var hex hexutility.Bytes
+ if err := json.Unmarshal(input, &hex); err != nil {
+ return err
+ }
+ return j.DecodeSSZ(hex, 0)
+}
diff --git a/cl/cltypes/network.go b/cl/cltypes/network.go
index 45f51acfaee..af8d1a3ee26 100644
--- a/cl/cltypes/network.go
+++ b/cl/cltypes/network.go
@@ -5,7 +5,6 @@ import (
"github.com/ledgerwatch/erigon-lib/types/ssz"
ssz2 "github.com/ledgerwatch/erigon/cl/ssz"
- "github.com/ledgerwatch/erigon/common"
)
type Metadata struct {
@@ -22,7 +21,7 @@ func (m *Metadata) EncodeSSZ(buf []byte) ([]byte, error) {
}
func (m *Metadata) EncodingSizeSSZ() (ret int) {
- ret = common.BlockNumberLength * 2
+ ret = 8 * 2
if m.Syncnets != nil {
ret += 8
}
@@ -50,7 +49,7 @@ func (p *Ping) EncodeSSZ(buf []byte) ([]byte, error) {
}
func (p *Ping) EncodingSizeSSZ() int {
- return common.BlockNumberLength
+ return 8
}
func (p *Ping) DecodeSSZ(buf []byte, _ int) error {
@@ -76,7 +75,7 @@ func (b *BeaconBlocksByRangeRequest) DecodeSSZ(buf []byte, v int) error {
}
func (b *BeaconBlocksByRangeRequest) EncodingSizeSSZ() int {
- return 3 * common.BlockNumberLength
+ return 3 * 8
}
func (*BeaconBlocksByRangeRequest) Clone() clonable.Clonable {
diff --git a/cl/cltypes/slashings.go b/cl/cltypes/slashings.go
index 8a74b071a09..f849b7b50ee 100644
--- a/cl/cltypes/slashings.go
+++ b/cl/cltypes/slashings.go
@@ -6,8 +6,8 @@ import (
)
type ProposerSlashing struct {
- Header1 *SignedBeaconBlockHeader
- Header2 *SignedBeaconBlockHeader
+ Header1 *SignedBeaconBlockHeader `json:"signed_header_1"`
+ Header2 *SignedBeaconBlockHeader `json:"signed_header_2"`
}
func (p *ProposerSlashing) EncodeSSZ(dst []byte) ([]byte, error) {
@@ -29,8 +29,8 @@ func (p *ProposerSlashing) HashSSZ() ([32]byte, error) {
}
type AttesterSlashing struct {
- Attestation_1 *IndexedAttestation
- Attestation_2 *IndexedAttestation
+ Attestation_1 *IndexedAttestation `json:"attestation_1"`
+ Attestation_2 *IndexedAttestation `json:"attestation_2"`
}
func (a *AttesterSlashing) EncodeSSZ(dst []byte) ([]byte, error) {
diff --git a/cl/cltypes/slashings_test.go b/cl/cltypes/slashings_test.go
index 51e355844b8..3f13f34b8a2 100644
--- a/cl/cltypes/slashings_test.go
+++ b/cl/cltypes/slashings_test.go
@@ -48,12 +48,12 @@ func TestProposerSlashing(t *testing.T) {
func TestAttesterSlashing(t *testing.T) {
// Create sample data
attestation1 := &IndexedAttestation{
- AttestingIndices: solid.NewUint64ListSSZ(9192),
+ AttestingIndices: solid.NewRawUint64List(9192, nil),
Data: solid.NewAttestationData(),
}
// Create an IndexedAttestation object
attestation2 := &IndexedAttestation{
- AttestingIndices: solid.NewUint64ListSSZ(9192),
+ AttestingIndices: solid.NewRawUint64List(9192, nil),
Data: solid.NewAttestationData(),
}
// Create AttesterSlashing
diff --git a/cl/cltypes/solid/attestation.go b/cl/cltypes/solid/attestation.go
index baf2354b3bd..68e9e131e39 100644
--- a/cl/cltypes/solid/attestation.go
+++ b/cl/cltypes/solid/attestation.go
@@ -1,18 +1,21 @@
package solid
import (
+ "encoding/json"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/hexutility"
"github.com/ledgerwatch/erigon-lib/common/length"
"github.com/ledgerwatch/erigon-lib/types/clonable"
"github.com/ledgerwatch/erigon-lib/types/ssz"
"github.com/ledgerwatch/erigon/cl/merkle_tree"
- "github.com/ledgerwatch/erigon/common"
)
const (
// agg bits offset: 4 bytes
// attestationData: 128
// Signature: 96 bytes
- attestationStaticBufferSize = 4 + attestationDataBufferSize + 96
+ attestationStaticBufferSize = 4 + AttestationDataBufferSize + 96
// offset is usually always the same
aggregationBitsOffset = 228
@@ -44,6 +47,33 @@ func NewAttestionFromParameters(
return a
}
+func (a Attestation) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ AggregationBits hexutility.Bytes `json:"aggregation_bits"`
+ Signature libcommon.Bytes96 `json:"signature"`
+ Data AttestationData `json:"data"`
+ }{
+ AggregationBits: a.aggregationBitsBuffer,
+ Signature: a.Signature(),
+ Data: a.AttestantionData(),
+ })
+}
+
+func (a *Attestation) UnmarshalJSON(buf []byte) error {
+ var tmp struct {
+ AggregationBits hexutility.Bytes `json:"aggregation_bits"`
+ Signature libcommon.Bytes96 `json:"signature"`
+ Data AttestationData `json:"data"`
+ }
+ if err := json.Unmarshal(buf, &tmp); err != nil {
+ return err
+ }
+ a.SetAggregationBits(tmp.AggregationBits)
+ a.SetSignature(tmp.Signature)
+ a.SetAttestationData(tmp.Data)
+ return nil
+}
+
// AggregationBits returns the aggregation bits buffer of the Attestation instance.
func (a *Attestation) AggregationBits() []byte {
return a.aggregationBitsBuffer
@@ -90,7 +120,7 @@ func (a *Attestation) DecodeSSZ(buf []byte, _ int) error {
return ssz.ErrLowBufferSize
}
copy(a.staticBuffer[:], buf)
- a.aggregationBitsBuffer = common.CopyBytes(buf[aggregationBitsOffset:])
+ a.aggregationBitsBuffer = libcommon.CopyBytes(buf[aggregationBitsOffset:])
return nil
}
diff --git a/cl/cltypes/solid/attestation_data.go b/cl/cltypes/solid/attestation_data.go
index 65cad27b79e..6c505f58105 100644
--- a/cl/cltypes/solid/attestation_data.go
+++ b/cl/cltypes/solid/attestation_data.go
@@ -3,6 +3,7 @@ package solid
import (
"bytes"
"encoding/binary"
+ "encoding/json"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/length"
@@ -16,7 +17,7 @@ import (
// beaconBlockHash: 32 bytes
// source: 40 bytes
// target: 40 bytes
-const attestationDataBufferSize = 8 + 8 + 32 + 40*2
+const AttestationDataBufferSize = 8 + 8 + 32 + 40*2
// AttestantionData contains information about attestantion, including finalized/attested checkpoints.
type AttestationData []byte
@@ -37,8 +38,43 @@ func NewAttestionDataFromParameters(
return a
}
+func (a AttestationData) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Slot uint64 `json:"slot"`
+ Index uint64 `json:"index"`
+ BeaconBlockRoot libcommon.Hash `json:"beacon_block_root"`
+ Source Checkpoint `json:"source"`
+ Target Checkpoint `json:"target"`
+ }{
+ Slot: a.Slot(),
+ BeaconBlockRoot: a.BeaconBlockRoot(),
+ Index: a.ValidatorIndex(),
+ Source: a.Source(),
+ Target: a.Target(),
+ })
+}
+
+func (a AttestationData) UnmarshalJSON(buf []byte) error {
+ var tmp struct {
+ Slot uint64 `json:"slot"`
+ Index uint64 `json:"index"`
+ BeaconBlockRoot libcommon.Hash `json:"beacon_block_root"`
+ Source Checkpoint `json:"source"`
+ Target Checkpoint `json:"target"`
+ }
+ if err := json.Unmarshal(buf, &tmp); err != nil {
+ return err
+ }
+ a.SetSlot(tmp.Slot)
+ a.SetValidatorIndex(tmp.Index)
+ a.SetBeaconBlockRoot(tmp.BeaconBlockRoot)
+ a.SetSource(tmp.Source)
+ a.SetTarget(tmp.Target)
+ return nil
+}
+
func NewAttestationData() AttestationData {
- return make([]byte, attestationDataBufferSize)
+ return make([]byte, AttestationDataBufferSize)
}
func (a AttestationData) Static() bool {
@@ -78,6 +114,31 @@ func (a AttestationData) SetBeaconBlockRoot(beaconBlockRoot libcommon.Hash) {
copy(a[16:], beaconBlockRoot[:])
}
+func (a AttestationData) SetSlotWithRawBytes(b []byte) {
+ copy(a[:8], b)
+}
+
+func (a AttestationData) SetValidatorIndexWithRawBytes(b []byte) {
+ copy(a[8:16], b)
+
+}
+
+func (a AttestationData) RawSlot() []byte {
+ return a[:8]
+}
+
+func (a AttestationData) RawValidatorIndex() []byte {
+ return a[8:16]
+}
+
+func (a AttestationData) RawBeaconBlockRoot() []byte {
+ return a[16:48]
+}
+
+func (a AttestationData) SetBeaconBlockRootWithRawBytes(b []byte) {
+ copy(a[16:48], b)
+}
+
func (a AttestationData) SetSource(c Checkpoint) {
copy(a[48:88], c)
}
diff --git a/cl/cltypes/solid/bitlist.go b/cl/cltypes/solid/bitlist.go
index f93a47ef786..cf14cf0644c 100644
--- a/cl/cltypes/solid/bitlist.go
+++ b/cl/cltypes/solid/bitlist.go
@@ -1,12 +1,13 @@
package solid
import (
+ "encoding/json"
"math/bits"
+ "github.com/ledgerwatch/erigon-lib/common/hexutility"
"github.com/ledgerwatch/erigon-lib/types/clonable"
"github.com/ledgerwatch/erigon/cl/merkle_tree"
"github.com/ledgerwatch/erigon/cl/utils"
- "github.com/ledgerwatch/erigon/common"
)
// BitList is like a dynamic binary string. It's like a flipbook of 1s and 0s!
@@ -41,6 +42,10 @@ func BitlistFromBytes(xs []byte, c int) *BitList {
}
}
+func (u *BitList) Bytes() []byte {
+ return u.u[:u.l]
+}
+
// Clear wipes the BitList clean, just like the memory wipe spell from a particularly forgetful wizard.
func (u *BitList) Clear() {
u.u = u.u[:0]
@@ -115,7 +120,7 @@ func (u *BitList) HashSSZ() ([32]byte, error) {
}
}
lengthRoot := merkle_tree.Uint64Root(uint64(u.l))
- return utils.Keccak256(baseRoot[:], lengthRoot[:]), nil
+ return utils.Sha256(baseRoot[:], lengthRoot[:]), nil
}
func (arr *BitList) getBaseHash(xs []byte, depth uint8) error {
@@ -151,7 +156,8 @@ func (u *BitList) EncodeSSZ(dst []byte) ([]byte, error) {
// DecodeSSZ replaces the underlying byte slice of the BitList with a copy of the input byte slice.
// It then updates the length of the BitList to match the length of the new byte slice.
func (u *BitList) DecodeSSZ(dst []byte, _ int) error {
- u.u = common.CopyBytes(dst)
+ u.u = make([]byte, len(dst))
+ copy(u.u, dst)
u.l = len(dst)
return nil
}
@@ -187,3 +193,19 @@ func (u *BitList) Bits() int {
// bit. Subtract this value by 1 to determine the length of the bitlist.
return 8*(u.l-1) + msb - 1
}
+
+func (u *BitList) MarshalJSON() ([]byte, error) {
+ enc, err := u.EncodeSSZ(nil)
+ if err != nil {
+ return nil, err
+ }
+ return json.Marshal(hexutility.Bytes(enc))
+}
+
+func (u *BitList) UnmarshalJSON(input []byte) error {
+ var hex hexutility.Bytes
+ if err := json.Unmarshal(input, &hex); err != nil {
+ return err
+ }
+ return u.DecodeSSZ(hex, 0)
+}
diff --git a/cl/cltypes/solid/checkpoint.go b/cl/cltypes/solid/checkpoint.go
index aa17c60197a..87ce50436aa 100644
--- a/cl/cltypes/solid/checkpoint.go
+++ b/cl/cltypes/solid/checkpoint.go
@@ -3,6 +3,7 @@ package solid
import (
"bytes"
"encoding/binary"
+ "encoding/json"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/length"
@@ -12,7 +13,7 @@ import (
)
// Constants to represent the size and layout of a Checkpoint
-const checkpointSize = 32 + 8 // BlockRoot(32 bytes) + Epoch(8 bytes)
+const CheckpointSize = 32 + 8 // BlockRoot(32 bytes) + Epoch(8 bytes)
type Checkpoint []byte // Define Checkpoint as a byte slice
@@ -21,7 +22,7 @@ func NewCheckpointFromParameters(
blockRoot libcommon.Hash, // A hash representing the block root
epoch uint64, // An unsigned 64-bit integer representing the epoch
) Checkpoint {
- var c Checkpoint = make([]byte, checkpointSize)
+ var c Checkpoint = make([]byte, CheckpointSize)
c.SetBlockRoot(blockRoot)
c.SetEpoch(epoch)
return c
@@ -29,7 +30,43 @@ func NewCheckpointFromParameters(
// NewCheckpoint returns a new Checkpoint with the underlying byte slice initialized to zeros
func NewCheckpoint() Checkpoint {
- return make([]byte, checkpointSize)
+ return make([]byte, CheckpointSize)
+}
+
+func (c Checkpoint) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Epoch uint64 `json:"epoch"`
+ Root libcommon.Hash `json:"root"`
+ }{Epoch: c.Epoch(), Root: c.BlockRoot()})
+}
+
+func (c Checkpoint) UnmarshalJSON(buf []byte) error {
+ var tmp struct {
+ Epoch uint64 `json:"epoch"`
+ Root libcommon.Hash `json:"root"`
+ }
+ if err := json.Unmarshal(buf, &tmp); err != nil {
+ return err
+ }
+ c.SetEpoch(tmp.Epoch)
+ c.SetBlockRoot(tmp.Root)
+ return nil
+}
+
+func (c Checkpoint) SetRawEpoch(b []byte) {
+ copy(c[:8], b[:8])
+}
+
+func (c Checkpoint) SetRawBlockRoot(b []byte) {
+ copy(c[8:40], b[:32])
+}
+
+func (c Checkpoint) RawEpoch() []byte {
+ return c[:8]
+}
+
+func (c Checkpoint) RawBlockRoot() []byte {
+ return c[8:40]
}
// SetBlockRoot copies the given blockRoot into the correct location within the Checkpoint
@@ -55,7 +92,7 @@ func (c Checkpoint) BlockRoot() (o libcommon.Hash) {
// EncodingSizeSSZ returns the size of the Checkpoint object when encoded as SSZ.
func (Checkpoint) EncodingSizeSSZ() int {
- return checkpointSize
+ return CheckpointSize
}
// DecodeSSZ decodes the Checkpoint object from SSZ-encoded data.
diff --git a/cl/cltypes/solid/checkpoint_test.go b/cl/cltypes/solid/checkpoint_test.go
index de41bd3e9f9..e6598028e4a 100644
--- a/cl/cltypes/solid/checkpoint_test.go
+++ b/cl/cltypes/solid/checkpoint_test.go
@@ -8,13 +8,12 @@ import (
"github.com/stretchr/testify/require"
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
- "github.com/ledgerwatch/erigon/common"
)
var testCheckpoint = solid.NewCheckpointFromParameters(libcommon.HexToHash("0x3"), 69)
-var expectedTestCheckpointMarshalled = common.Hex2Bytes("45000000000000000000000000000000000000000000000000000000000000000000000000000003")
-var expectedTestCheckpointRoot = common.Hex2Bytes("be8567f9fdae831b10720823dbcf0e3680e61d6a2a27d85ca00f6c15a7bbb1ea")
+var expectedTestCheckpointMarshalled = libcommon.Hex2Bytes("45000000000000000000000000000000000000000000000000000000000000000000000000000003")
+var expectedTestCheckpointRoot = libcommon.Hex2Bytes("be8567f9fdae831b10720823dbcf0e3680e61d6a2a27d85ca00f6c15a7bbb1ea")
func TestCheckpointMarshalUnmarmashal(t *testing.T) {
marshalled, err := testCheckpoint.EncodeSSZ(nil)
diff --git a/cl/cltypes/solid/extra_data.go b/cl/cltypes/solid/extra_data.go
index abaa040753c..4ef2c78ec31 100644
--- a/cl/cltypes/solid/extra_data.go
+++ b/cl/cltypes/solid/extra_data.go
@@ -2,8 +2,10 @@ package solid
import (
"encoding/binary"
+ "encoding/json"
"github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/hexutility"
"github.com/ledgerwatch/erigon-lib/common/length"
"github.com/ledgerwatch/erigon-lib/types/clonable"
"github.com/ledgerwatch/erigon/cl/merkle_tree"
@@ -22,6 +24,18 @@ func NewExtraData() *ExtraData {
}
}
+func (e *ExtraData) UnmarshalJSON(buf []byte) error {
+ if err := json.Unmarshal(buf, (*hexutility.Bytes)(&e.data)); err != nil {
+ return err
+ }
+ e.l = len(e.data)
+ return nil
+}
+
+func (e ExtraData) MarshalJSON() ([]byte, error) {
+ return json.Marshal(hexutility.Bytes(e.data[:e.l]))
+}
+
// Clone creates a new instance of ExtraData.
func (*ExtraData) Clone() clonable.Clonable {
return NewExtraData()
diff --git a/cl/cltypes/solid/hash_list.go b/cl/cltypes/solid/hash_list.go
index bc67267847a..9c4ba66dd9c 100644
--- a/cl/cltypes/solid/hash_list.go
+++ b/cl/cltypes/solid/hash_list.go
@@ -1,6 +1,8 @@
package solid
import (
+ "encoding/json"
+
"github.com/ledgerwatch/erigon-lib/common"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/length"
@@ -23,6 +25,32 @@ func NewHashList(c int) HashListSSZ {
}
}
+func (arr *hashList) Bytes() []byte {
+ return arr.u[:arr.l*length.Hash]
+}
+
+func (arr *hashList) MarshalJSON() ([]byte, error) {
+ list := make([]libcommon.Hash, arr.l)
+ for i := 0; i < arr.l; i++ {
+ list[0] = arr.Get(i)
+ }
+ return json.Marshal(list)
+}
+
+func (arr *hashList) UnmarshalJSON(buf []byte) error {
+ var list []libcommon.Hash
+
+ if err := json.Unmarshal(buf, &list); err != nil {
+ return err
+ }
+ arr.Clear()
+ arr.l = len(list)
+ for _, elem := range list {
+ arr.Append(elem)
+ }
+ return nil
+}
+
func (h *hashList) Append(val libcommon.Hash) {
offset := h.l * length.Hash
if offset == len(h.u) {
@@ -129,7 +157,7 @@ func (h *hashList) HashSSZ() ([32]byte, error) {
}
}
lengthRoot := merkle_tree.Uint64Root(uint64(h.l))
- return utils.Keccak256(baseRoot[:], lengthRoot[:]), nil
+ return utils.Sha256(baseRoot[:], lengthRoot[:]), nil
}
func (h *hashList) Range(fn func(int, libcommon.Hash, int) bool) {
diff --git a/cl/cltypes/solid/hash_vector.go b/cl/cltypes/solid/hash_vector.go
index 95f5fc3eb06..01a971cda9f 100644
--- a/cl/cltypes/solid/hash_vector.go
+++ b/cl/cltypes/solid/hash_vector.go
@@ -1,6 +1,8 @@
package solid
import (
+ "encoding/json"
+
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/length"
"github.com/ledgerwatch/erigon-lib/types/clonable"
@@ -22,10 +24,22 @@ func NewHashVector(s int) HashVectorSSZ {
}
}
+func (arr *hashVector) Bytes() []byte {
+ return arr.u.u[:arr.u.l*length.Hash]
+}
+
func (h *hashVector) Append(val libcommon.Hash) {
panic("not implmented")
}
+func (h hashVector) MarshalJSON() ([]byte, error) {
+ return json.Marshal(h.u)
+}
+
+func (h *hashVector) UnmarshalJSON(buf []byte) error {
+ return json.Unmarshal(buf, h.u)
+}
+
func (h *hashVector) Cap() int {
return h.u.l
}
diff --git a/cl/cltypes/solid/interfaces.go b/cl/cltypes/solid/interfaces.go
index b5373e20a2b..31e73723148 100644
--- a/cl/cltypes/solid/interfaces.go
+++ b/cl/cltypes/solid/interfaces.go
@@ -1,6 +1,8 @@
package solid
import (
+ "encoding/json"
+
"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/types/ssz"
ssz2 "github.com/ledgerwatch/erigon/cl/ssz"
@@ -14,7 +16,7 @@ type IterableSSZ[T any] interface {
Set(index int, v T)
Length() int
Cap() int
-
+ Bytes() []byte
Pop() T
Append(v T)
@@ -23,7 +25,26 @@ type IterableSSZ[T any] interface {
ssz.HashableSSZ
}
-type Uint64VectorSSZ IterableSSZ[uint64]
-type Uint64ListSSZ IterableSSZ[uint64]
-type HashListSSZ IterableSSZ[common.Hash]
-type HashVectorSSZ IterableSSZ[common.Hash]
+type Uint64ListSSZ interface {
+ IterableSSZ[uint64]
+ json.Marshaler
+ json.Unmarshaler
+}
+
+type Uint64VectorSSZ interface {
+ IterableSSZ[uint64]
+ json.Marshaler
+ json.Unmarshaler
+}
+
+type HashListSSZ interface {
+ IterableSSZ[common.Hash]
+ json.Marshaler
+ json.Unmarshaler
+}
+
+type HashVectorSSZ interface {
+ IterableSSZ[common.Hash]
+ json.Marshaler
+ json.Unmarshaler
+}
diff --git a/cl/cltypes/solid/list_ssz.go b/cl/cltypes/solid/list_ssz.go
index 11450d8f997..cf2de25ff1b 100644
--- a/cl/cltypes/solid/list_ssz.go
+++ b/cl/cltypes/solid/list_ssz.go
@@ -1,6 +1,8 @@
package solid
import (
+ "encoding/json"
+
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/types/clonable"
"github.com/ledgerwatch/erigon-lib/types/ssz"
@@ -43,6 +45,14 @@ func NewStaticListSSZ[T encodableHashableSSZ](limit int, bytesPerElement int) *L
}
}
+func (l ListSSZ[T]) MarshalJSON() ([]byte, error) {
+ return json.Marshal(l.list)
+}
+
+func (l *ListSSZ[T]) UnmarshalJSON(data []byte) error {
+ return json.Unmarshal(data, &l.list)
+}
+
func NewDynamicListSSZFromList[T encodableHashableSSZ](list []T, limit int) *ListSSZ[T] {
return &ListSSZ[T]{
list: list,
@@ -149,3 +159,8 @@ func (l *ListSSZ[T]) Clear() {
l.list = nil
l.root = libcommon.Hash{}
}
+
+func (l *ListSSZ[T]) Truncate(length int) {
+ l.list = l.list[:length]
+ l.root = libcommon.Hash{}
+}
diff --git a/cl/cltypes/solid/pending_attestation.go b/cl/cltypes/solid/pending_attestation.go
index 3ea621f6979..e17b48b07ad 100644
--- a/cl/cltypes/solid/pending_attestation.go
+++ b/cl/cltypes/solid/pending_attestation.go
@@ -2,11 +2,14 @@ package solid
import (
"encoding/binary"
+ "encoding/json"
+
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/hexutility"
"github.com/ledgerwatch/erigon-lib/types/clonable"
"github.com/ledgerwatch/erigon-lib/types/ssz"
"github.com/ledgerwatch/erigon/cl/merkle_tree"
- "github.com/ledgerwatch/erigon/common"
)
const (
@@ -14,7 +17,7 @@ const (
// attestationData: 128
// InclusionDelay: 8 bytes
// ProposerIndex: 8 bytes
- pendingAttestationStaticBufferSize = 4 + attestationDataBufferSize + 8 + 8
+ pendingAttestationStaticBufferSize = 4 + AttestationDataBufferSize + 8 + 8
// offset is usually always the same
pendingAggregationBitsOffset = 148
@@ -107,3 +110,35 @@ func (a *PendingAttestation) HashSSZ() (o [32]byte, err error) {
func (*PendingAttestation) Clone() clonable.Clonable {
return &PendingAttestation{}
}
+
+func (a *PendingAttestation) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ AggregationBits hexutility.Bytes `json:"aggregation_bits"`
+ AttestationData AttestationData `json:"attestation_data"`
+ InclusionDelay uint64 `json:"inclusion_delay"`
+ ProposerIndex uint64 `json:"proposer_index"`
+ }{
+ AggregationBits: a.AggregationBits(),
+ AttestationData: a.AttestantionData(),
+ InclusionDelay: a.InclusionDelay(),
+ ProposerIndex: a.ProposerIndex(),
+ })
+}
+
+func (a *PendingAttestation) UnmarshalJSON(input []byte) error {
+ var err error
+ var tmp struct {
+ AggregationBits hexutility.Bytes `json:"aggregation_bits"`
+ AttestationData AttestationData `json:"attestation_data"`
+ InclusionDelay uint64 `json:"inclusion_delay"`
+ ProposerIndex uint64 `json:"proposer_index"`
+ }
+ if err = json.Unmarshal(input, &tmp); err != nil {
+ return err
+ }
+ a.SetAggregationBits(tmp.AggregationBits)
+ a.SetAttestationData(tmp.AttestationData)
+ a.SetInclusionDelay(tmp.InclusionDelay)
+ a.SetProposerIndex(tmp.ProposerIndex)
+ return nil
+}
diff --git a/cl/cltypes/solid/sync_committee.go b/cl/cltypes/solid/sync_committee.go
index 95a7b4a97ca..58d07e6f780 100644
--- a/cl/cltypes/solid/sync_committee.go
+++ b/cl/cltypes/solid/sync_committee.go
@@ -1,6 +1,9 @@
package solid
import (
+ "encoding/json"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/types/clonable"
"github.com/ledgerwatch/erigon-lib/types/ssz"
"github.com/ledgerwatch/erigon/cl/merkle_tree"
@@ -12,8 +15,8 @@ const syncCommitteeSize = 48 * 513
type SyncCommittee [syncCommitteeSize]byte
func NewSyncCommitteeFromParameters(
- committee [][48]byte,
- aggregatePublicKey [48]byte,
+ committee []libcommon.Bytes48,
+ aggregatePublicKey libcommon.Bytes48,
) *SyncCommittee {
s := &SyncCommittee{}
s.SetAggregatePublicKey(aggregatePublicKey)
@@ -21,26 +24,26 @@ func NewSyncCommitteeFromParameters(
return s
}
-func (s *SyncCommittee) GetCommittee() [][48]byte {
- committee := make([][48]byte, 512)
+func (s *SyncCommittee) GetCommittee() []libcommon.Bytes48 {
+ committee := make([]libcommon.Bytes48, 512)
for i := range committee {
copy(committee[i][:], s[i*48:])
}
return committee
}
-func (s *SyncCommittee) AggregatePublicKey() (out [48]byte) {
+func (s *SyncCommittee) AggregatePublicKey() (out libcommon.Bytes48) {
copy(out[:], s[syncCommitteeSize-48:])
return
}
-func (s *SyncCommittee) SetCommittee(committee [][48]byte) {
+func (s *SyncCommittee) SetCommittee(committee []libcommon.Bytes48) {
for i := range committee {
copy(s[i*48:], committee[i][:])
}
}
-func (s *SyncCommittee) SetAggregatePublicKey(k [48]byte) {
+func (s *SyncCommittee) SetAggregatePublicKey(k libcommon.Bytes48) {
copy(s[syncCommitteeSize-48:], k[:])
}
@@ -89,3 +92,27 @@ func (s *SyncCommittee) HashSSZ() ([32]byte, error) {
func (s *SyncCommittee) Static() bool {
return true
}
+
+func (s *SyncCommittee) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Committee []libcommon.Bytes48 `json:"committee"`
+ AggregatePublicKey libcommon.Bytes48 `json:"aggregate_public_key"`
+ }{
+ Committee: s.GetCommittee(),
+ AggregatePublicKey: s.AggregatePublicKey(),
+ })
+}
+
+func (s *SyncCommittee) UnmarshalJSON(input []byte) error {
+ var err error
+ var tmp struct {
+ Committee []libcommon.Bytes48 `json:"committee"`
+ AggregatePublicKey libcommon.Bytes48 `json:"aggregate_public_key"`
+ }
+ if err = json.Unmarshal(input, &tmp); err != nil {
+ return err
+ }
+ s.SetAggregatePublicKey(tmp.AggregatePublicKey)
+ s.SetCommittee(tmp.Committee)
+ return nil
+}
diff --git a/cl/cltypes/solid/sync_committee_test.go b/cl/cltypes/solid/sync_committee_test.go
index 4c004f3941a..1a12bd4a0ca 100644
--- a/cl/cltypes/solid/sync_committee_test.go
+++ b/cl/cltypes/solid/sync_committee_test.go
@@ -10,7 +10,7 @@ import (
func TestSyncCommittee(t *testing.T) {
// Test NewSyncCommitteeFromParameters
- committee := make([][48]byte, 512)
+ committee := make([]libcommon.Bytes48, 512)
aggregatePublicKey := [48]byte{1, 2, 3} // Example aggregate public key
syncCommittee := NewSyncCommitteeFromParameters(committee, aggregatePublicKey)
assert.NotNil(t, syncCommittee)
@@ -20,7 +20,7 @@ func TestSyncCommittee(t *testing.T) {
assert.Equal(t, committee, gotCommittee)
// Test SetCommittee
- newCommittee := make([][48]byte, 512)
+ newCommittee := make([]libcommon.Bytes48, 512)
for i := 0; i < 512; i++ {
copy(newCommittee[i][:], []byte{byte(i)})
}
@@ -30,13 +30,13 @@ func TestSyncCommittee(t *testing.T) {
// Test AggregatePublicKey
gotAggregatePublicKey := syncCommittee.AggregatePublicKey()
- assert.Equal(t, aggregatePublicKey, gotAggregatePublicKey)
+ assert.Equal(t, libcommon.Bytes48(aggregatePublicKey), gotAggregatePublicKey)
// Test SetAggregatePublicKey
newAggregatePublicKey := [48]byte{4, 5, 6} // Example new aggregate public key
syncCommittee.SetAggregatePublicKey(newAggregatePublicKey)
updatedAggregatePublicKey := syncCommittee.AggregatePublicKey()
- assert.Equal(t, newAggregatePublicKey, updatedAggregatePublicKey)
+ assert.Equal(t, libcommon.Bytes48(newAggregatePublicKey), updatedAggregatePublicKey)
// Test EncodingSizeSSZ
expectedEncodingSize := syncCommitteeSize
@@ -73,3 +73,19 @@ func TestSyncCommittee(t *testing.T) {
// Test Static
assert.True(t, syncCommittee.Static())
}
+
+func TestSyncCommitteeJson(t *testing.T) {
+ // Test MarshalJSON and UnmarshalJSON
+ committee := make([]libcommon.Bytes48, 512)
+ for i := 0; i < 512; i++ {
+ copy(committee[i][:], []byte{byte(i)})
+ }
+ aggregatePublicKey := [48]byte{1, 2, 3} // Example aggregate public key
+ syncCommittee := NewSyncCommitteeFromParameters(committee, aggregatePublicKey)
+ encodedData, err := syncCommittee.MarshalJSON()
+ assert.NoError(t, err)
+ decodedSyncCommittee := &SyncCommittee{}
+ err = decodedSyncCommittee.UnmarshalJSON(encodedData)
+ assert.NoError(t, err)
+ assert.Equal(t, syncCommittee, decodedSyncCommittee)
+}
diff --git a/cl/cltypes/solid/transactions.go b/cl/cltypes/solid/transactions.go
index 73ba6c81a99..40651ac85fd 100644
--- a/cl/cltypes/solid/transactions.go
+++ b/cl/cltypes/solid/transactions.go
@@ -1,7 +1,10 @@
package solid
import (
+ "encoding/json"
+
libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/hexutility"
"github.com/ledgerwatch/erigon-lib/types/clonable"
"github.com/ledgerwatch/erigon-lib/types/ssz"
"github.com/ledgerwatch/erigon/cl/merkle_tree"
@@ -12,6 +15,27 @@ type TransactionsSSZ struct {
root libcommon.Hash // root
}
+func (t *TransactionsSSZ) UnmarshalJSON(buf []byte) error {
+ tmp := []hexutility.Bytes{}
+ t.root = libcommon.Hash{}
+ if err := json.Unmarshal(buf, &tmp); err != nil {
+ return err
+ }
+ t.underlying = nil
+ for _, tx := range tmp {
+ t.underlying = append(t.underlying, tx)
+ }
+ return nil
+}
+
+func (t TransactionsSSZ) MarshalJSON() ([]byte, error) {
+ tmp := []hexutility.Bytes{}
+ for _, tx := range t.underlying {
+ tmp = append(tmp, tx)
+ }
+ return json.Marshal(tmp)
+}
+
func (*TransactionsSSZ) Clone() clonable.Clonable {
return &TransactionsSSZ{}
}
diff --git a/cl/cltypes/solid/uint64_list.go b/cl/cltypes/solid/uint64_list.go
index b57b5ae0dde..2678cf0e212 100644
--- a/cl/cltypes/solid/uint64_list.go
+++ b/cl/cltypes/solid/uint64_list.go
@@ -1,6 +1,8 @@
package solid
import (
+ "encoding/json"
+
"github.com/ledgerwatch/erigon-lib/types/clonable"
)
@@ -14,6 +16,14 @@ func NewUint64ListSSZ(limit int) Uint64ListSSZ {
}
}
+func (h uint64ListSSZ) MarshalJSON() ([]byte, error) {
+ return json.Marshal(h.u)
+}
+
+func (h uint64ListSSZ) UnmarshalJSON(buf []byte) error {
+ return json.Unmarshal(buf, h.u)
+}
+
func (h *uint64ListSSZ) Static() bool {
return false
}
@@ -32,6 +42,10 @@ func (arr *uint64ListSSZ) Clear() {
arr.u.Clear()
}
+func (arr *uint64ListSSZ) Bytes() []byte {
+ return arr.u.u[:arr.u.l*8]
+}
+
func (arr *uint64ListSSZ) CopyTo(target IterableSSZ[uint64]) {
if c, ok := target.(*uint64ListSSZ); ok {
arr.u.CopyTo(c.u)
diff --git a/cl/cltypes/solid/uint64_raw_list.go b/cl/cltypes/solid/uint64_raw_list.go
new file mode 100644
index 00000000000..b520700bfd8
--- /dev/null
+++ b/cl/cltypes/solid/uint64_raw_list.go
@@ -0,0 +1,167 @@
+package solid
+
+import (
+ "encoding/binary"
+ "encoding/json"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/length"
+ "github.com/ledgerwatch/erigon-lib/types/clonable"
+ "github.com/ledgerwatch/erigon/cl/merkle_tree"
+ "github.com/ledgerwatch/erigon/cl/utils"
+)
+
+type RawUint64List struct {
+ u []uint64
+ c int
+
+ hahsBuffer []byte
+ cachedHash libcommon.Hash
+}
+
+func NewRawUint64List(limit int, u []uint64) *RawUint64List {
+ return &RawUint64List{
+ c: limit,
+ u: u,
+ }
+}
+
+func (arr *RawUint64List) Clear() {
+ arr.cachedHash = libcommon.Hash{}
+ arr.u = arr.u[:0]
+}
+
+func (arr *RawUint64List) Append(value uint64) {
+ arr.cachedHash = libcommon.Hash{}
+ arr.u = append(arr.u, value)
+}
+
+func (arr *RawUint64List) Get(index int) uint64 {
+ return arr.u[index]
+}
+
+func (arr *RawUint64List) Set(index int, v uint64) {
+ arr.u[index] = v
+}
+
+func (arr *RawUint64List) CopyTo(target IterableSSZ[uint64]) {
+ if c, ok := target.(*RawUint64List); ok {
+ c.u = append(c.u[:0], arr.u...)
+ c.cachedHash = arr.cachedHash
+ return
+ }
+ panic("incompatible type")
+}
+
+func (arr *RawUint64List) Range(fn func(index int, value uint64, length int) bool) {
+ for i, v := range arr.u {
+ cont := fn(i, v, len(arr.u))
+ if !cont {
+ break
+ }
+ }
+}
+
+func (arr *RawUint64List) Static() bool {
+ return false
+}
+
+func (arr *RawUint64List) Bytes() []byte {
+ out, _ := arr.EncodeSSZ(nil)
+ return out
+}
+
+func (arr *RawUint64List) EncodeSSZ(buf []byte) (dst []byte, err error) {
+ dst = buf
+ for _, v := range arr.u {
+ dst = binary.LittleEndian.AppendUint64(dst, v)
+ }
+ return dst, nil
+}
+
+func (arr *RawUint64List) DecodeSSZ(buf []byte, _ int) error {
+ arr.cachedHash = libcommon.Hash{}
+ arr.u = make([]uint64, len(buf)/8)
+ for i := range arr.u {
+ arr.u[i] = binary.LittleEndian.Uint64(buf[i*8:])
+ }
+ return nil
+}
+
+func (arr *RawUint64List) Length() int {
+ return len(arr.u)
+}
+
+func (arr *RawUint64List) Cap() int {
+ return arr.c
+}
+
+func (arr *RawUint64List) Clone() clonable.Clonable {
+ return &RawUint64List{}
+}
+
+func (arr *RawUint64List) EncodingSizeSSZ() int {
+ return len(arr.u) * 8
+}
+
+func (arr *RawUint64List) SetReusableHashBuffer(buf []byte) {
+ arr.hahsBuffer = buf
+}
+
+func (arr *RawUint64List) hashBufLength() int {
+ return (((len(arr.u) * 4) + 3) / 4) * length.Hash
+}
+
+func (arr *RawUint64List) HashSSZ() ([32]byte, error) {
+ if arr.cachedHash != (libcommon.Hash{}) {
+ return arr.cachedHash, nil
+ }
+ if cap(arr.hahsBuffer) < arr.hashBufLength() {
+ arr.hahsBuffer = make([]byte, 0, arr.hashBufLength())
+ }
+ depth := GetDepth((uint64(arr.c)*8 + 31) / 32)
+
+ lnRoot := merkle_tree.Uint64Root(uint64(len(arr.u)))
+ if len(arr.u) == 0 {
+ arr.cachedHash = utils.Sha256(merkle_tree.ZeroHashes[depth][:], lnRoot[:])
+ return arr.cachedHash, nil
+ }
+
+ arr.hahsBuffer = arr.hahsBuffer[:arr.hashBufLength()]
+ for i, v := range arr.u {
+ binary.LittleEndian.PutUint64(arr.hahsBuffer[i*8:], v)
+ }
+
+ elements := arr.hahsBuffer
+ for i := 0; i < int(depth); i++ {
+ layerLen := len(elements)
+ if layerLen%64 == 32 {
+ elements = append(elements, merkle_tree.ZeroHashes[i][:]...)
+ }
+ outputLen := len(elements) / 2
+ if err := merkle_tree.HashByteSlice(elements, elements); err != nil {
+ return [32]byte{}, err
+ }
+ elements = elements[:outputLen]
+ }
+
+ arr.cachedHash = utils.Sha256(elements[:32], lnRoot[:])
+ return arr.cachedHash, nil
+}
+
+func (arr *RawUint64List) Pop() uint64 {
+ panic("k")
+}
+
+func (arr *RawUint64List) MarshalJSON() ([]byte, error) {
+ return json.Marshal(arr.u)
+}
+
+func (arr *RawUint64List) UnmarshalJSON(data []byte) error {
+ arr.cachedHash = libcommon.Hash{}
+ if err := json.Unmarshal(data, &arr.u); err != nil {
+ return err
+ }
+ arr.c = len(arr.u)
+ return nil
+}
diff --git a/cl/cltypes/solid/uint64_vector.go b/cl/cltypes/solid/uint64_vector.go
index 446d6cdbfe9..f5ba39817f6 100644
--- a/cl/cltypes/solid/uint64_vector.go
+++ b/cl/cltypes/solid/uint64_vector.go
@@ -1,6 +1,9 @@
package solid
import (
+ "encoding/json"
+
+ "github.com/ledgerwatch/erigon-lib/common/length"
"github.com/ledgerwatch/erigon-lib/types/clonable"
)
@@ -10,15 +13,24 @@ type uint64VectorSSZ struct {
func NewUint64VectorSSZ(size int) Uint64VectorSSZ {
o := &byteBasedUint64Slice{
- c: size,
- l: size,
- u: make([]byte, size*8),
+ c: size,
+ l: size,
+ u: make([]byte, size*8),
+ treeCacheBuffer: make([]byte, getTreeCacheSize((size+3)/4, treeCacheDepthUint64Slice)*length.Hash),
}
return &uint64VectorSSZ{
u: o,
}
}
+func (h uint64VectorSSZ) MarshalJSON() ([]byte, error) {
+ return json.Marshal(h.u)
+}
+
+func (h *uint64VectorSSZ) UnmarshalJSON(buf []byte) error {
+ return json.Unmarshal(buf, h.u)
+}
+
func (arr *uint64VectorSSZ) Clear() {
arr.u.Clear()
}
@@ -32,6 +44,10 @@ func (arr *uint64VectorSSZ) CopyTo(target IterableSSZ[uint64]) {
arr.u.CopyTo(c.u)
}
+func (arr *uint64VectorSSZ) Bytes() []byte {
+ return arr.u.u[:arr.u.l*8]
+}
+
func (arr *uint64VectorSSZ) Range(fn func(index int, value uint64, length int) bool) {
arr.u.Range(fn)
}
diff --git a/cl/cltypes/solid/uint64slice_byte.go b/cl/cltypes/solid/uint64slice_byte.go
index 5b5e85d5d90..a642c6278c1 100644
--- a/cl/cltypes/solid/uint64slice_byte.go
+++ b/cl/cltypes/solid/uint64slice_byte.go
@@ -1,7 +1,9 @@
package solid
import (
+ "bytes"
"encoding/binary"
+ "encoding/json"
"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/length"
@@ -10,12 +12,25 @@ import (
"github.com/ledgerwatch/erigon/cl/utils"
)
+const treeCacheDepthUint64Slice = 0
+
+func convertDepthToChunkSize(d int) int {
+ return (1 << d) // just power of 2
+}
+
+func getTreeCacheSize(listLen int, cacheDepth int) int {
+ treeChunks := convertDepthToChunkSize(cacheDepth)
+ return (listLen + treeChunks - 1) / treeChunks
+
+}
+
// byteBasedUint64Slice represents a dynamic Uint64Slice data type that is byte-backed.
// The underlying storage for the slice is a byte array. This approach allows for efficient
// memory usage, especially when dealing with large slices.
type byteBasedUint64Slice struct {
// The bytes that back the slice
- u []byte
+ u []byte
+ treeCacheBuffer []byte
// Length of the slice
l int
@@ -31,7 +46,6 @@ func NewUint64Slice(limit int) *byteBasedUint64Slice {
o := &byteBasedUint64Slice{
c: limit,
}
- o.u = make([]byte, 0)
return o
}
@@ -41,6 +55,9 @@ func (arr *byteBasedUint64Slice) Clear() {
for i := range arr.u {
arr.u[i] = 0
}
+ for i := range arr.treeCacheBuffer {
+ arr.treeCacheBuffer[i] = 0
+ }
}
// CopyTo copies the slice to a target slice.
@@ -52,13 +69,35 @@ func (arr *byteBasedUint64Slice) CopyTo(target *byteBasedUint64Slice) {
if len(target.u) < len(arr.u) {
target.u = make([]byte, len(arr.u))
}
+ if len(target.treeCacheBuffer) < len(arr.treeCacheBuffer) {
+ target.treeCacheBuffer = make([]byte, len(arr.treeCacheBuffer))
+ }
+ target.treeCacheBuffer = target.treeCacheBuffer[:len(arr.treeCacheBuffer)]
target.u = target.u[:len(arr.u)]
copy(target.u, arr.u)
+ copy(target.treeCacheBuffer, arr.treeCacheBuffer)
+}
+
+func (arr *byteBasedUint64Slice) MarshalJSON() ([]byte, error) {
+ list := make([]uint64, arr.l)
+ for i := 0; i < arr.l; i++ {
+ list[i] = arr.Get(i)
+ }
+ return json.Marshal(list)
}
-// depth returns the depth of the Merkle tree representing the slice.
-func (arr *byteBasedUint64Slice) depth() int {
- return int(GetDepth(uint64(arr.c) / 4))
+func (arr *byteBasedUint64Slice) UnmarshalJSON(buf []byte) error {
+ var list []uint64
+
+ if err := json.Unmarshal(buf, &list); err != nil {
+ return err
+ }
+ arr.Clear()
+ arr.l = len(list)
+ for _, elem := range list {
+ arr.Append(elem)
+ }
+ return nil
}
// Range iterates over the slice and applies a provided function to each element.
@@ -77,6 +116,7 @@ func (arr *byteBasedUint64Slice) Pop() uint64 {
val := binary.LittleEndian.Uint64(arr.u[offset : offset+8])
binary.LittleEndian.PutUint64(arr.u[offset:offset+8], 0)
arr.l = arr.l - 1
+ arr.treeCacheBuffer = arr.treeCacheBuffer[:getTreeCacheSize((arr.l+3)/4, treeCacheDepthUint64Slice)*length.Hash]
return val
}
@@ -85,9 +125,18 @@ func (arr *byteBasedUint64Slice) Append(v uint64) {
if len(arr.u) <= arr.l*8 {
arr.u = append(arr.u, make([]byte, 32)...)
}
+
offset := arr.l * 8
binary.LittleEndian.PutUint64(arr.u[offset:offset+8], v)
arr.l = arr.l + 1
+ treeBufferExpectCache := getTreeCacheSize((arr.l+3)/4, treeCacheDepthUint64Slice) * length.Hash
+ if len(arr.treeCacheBuffer) < treeBufferExpectCache {
+ arr.treeCacheBuffer = append(arr.treeCacheBuffer, make([]byte, treeBufferExpectCache-len(arr.treeCacheBuffer))...)
+ }
+ ihIdx := (((arr.l - 1) / 4) / convertDepthToChunkSize(treeCacheDepthUint64Slice)) * length.Hash
+ for i := ihIdx; i < ihIdx+length.Hash; i++ {
+ arr.treeCacheBuffer[i] = 0
+ }
}
// Get returns the element at the given index.
@@ -101,10 +150,11 @@ func (arr *byteBasedUint64Slice) Get(index int) uint64 {
// Set replaces the element at the given index with a new value.
func (arr *byteBasedUint64Slice) Set(index int, v uint64) {
- if index >= arr.l {
- panic("index out of range")
- }
offset := index * 8
+ ihIdx := ((index / 4) / convertDepthToChunkSize(treeCacheDepthUint64Slice)) * length.Hash
+ for i := ihIdx; i < ihIdx+length.Hash; i++ {
+ arr.treeCacheBuffer[i] = 0
+ }
binary.LittleEndian.PutUint64(arr.u[offset:offset+8], v)
}
@@ -132,15 +182,41 @@ func (arr *byteBasedUint64Slice) HashListSSZ() ([32]byte, error) {
}
}
lengthRoot := merkle_tree.Uint64Root(uint64(arr.l))
- return utils.Keccak256(baseRoot[:], lengthRoot[:]), nil
+ return utils.Sha256(baseRoot[:], lengthRoot[:]), nil
}
// HashVectorSSZ computes the SSZ hash of the slice as a vector. It returns the hash and any error encountered.
func (arr *byteBasedUint64Slice) HashVectorSSZ() ([32]byte, error) {
- depth := GetDepth((uint64(arr.c)*8 + 31) / 32)
- offset := 32*((arr.l-1)/4) + 32
- elements := arr.u[:offset]
- for i := uint8(0); i < depth; i++ {
+ chunkSize := convertDepthToChunkSize(treeCacheDepthUint64Slice) * length.Hash
+ depth := GetDepth((uint64(arr.c)*8 + length.Hash - 1) / length.Hash)
+ emptyHashBytes := make([]byte, length.Hash)
+
+ layerBuffer := make([]byte, chunkSize)
+ maxTo := length.Hash*((arr.l-1)/4) + length.Hash
+
+ offset := 0
+ for i := 0; i < maxTo; i += chunkSize {
+ offset = (i / chunkSize) * length.Hash
+ from := i
+ to := int(utils.Min64(uint64(from+chunkSize), uint64(maxTo)))
+
+ if !bytes.Equal(arr.treeCacheBuffer[offset:offset+length.Hash], emptyHashBytes) {
+ continue
+ }
+ layerBuffer = layerBuffer[:to-from]
+ copy(layerBuffer, arr.u[from:to])
+ if err := computeFlatRootsToBuffer(uint8(utils.Min64(treeCacheDepthUint64Slice, uint64(depth))), layerBuffer, arr.treeCacheBuffer[offset:]); err != nil {
+ return [32]byte{}, err
+ }
+ }
+ if treeCacheDepthUint64Slice >= depth {
+ return common.BytesToHash(arr.treeCacheBuffer[:32]), nil
+ }
+
+ arr.makeBuf(offset + length.Hash)
+ copy(arr.buf, arr.treeCacheBuffer[:offset+length.Hash])
+ elements := arr.buf
+ for i := uint8(treeCacheDepthUint64Slice); i < depth; i++ {
layerLen := len(elements)
if layerLen%64 == 32 {
elements = append(elements, merkle_tree.ZeroHashes[i][:]...)
@@ -166,10 +242,11 @@ func (arr *byteBasedUint64Slice) DecodeSSZ(buf []byte, _ int) error {
if len(buf)%8 > 0 {
return ssz.ErrBadDynamicLength
}
- bufferLength := len(buf) + (length.Hash - (len(buf) % length.Hash))
+ arr.l = len(buf) / 8
+ bufferLength := length.Hash*((arr.l-1)/4) + length.Hash
arr.u = make([]byte, bufferLength)
copy(arr.u, buf)
- arr.l = len(buf) / 8
+ arr.treeCacheBuffer = make([]byte, getTreeCacheSize((arr.l+3)/4, treeCacheDepthUint64Slice)*length.Hash)
return nil
}
diff --git a/cl/cltypes/solid/uint64slice_byte_test.go b/cl/cltypes/solid/uint64slice_byte_test.go
index f185712004a..973866df9fc 100644
--- a/cl/cltypes/solid/uint64slice_byte_test.go
+++ b/cl/cltypes/solid/uint64slice_byte_test.go
@@ -25,3 +25,21 @@ func TestUint64SliceBasic(t *testing.T) {
require.EqualValues(t, common.HexToHash("eb8cec5eaec74a32e8b9b56cc42f7627cef722f81081ead786c97a4df1c8be5d"), out)
}
+
+func TestUint64SliceCopyTo(t *testing.T) {
+ num := 1000
+ set := solid.NewUint64ListSSZ(100_000)
+ set2 := solid.NewUint64ListSSZ(100_000)
+ for i := 0; i < num; i++ {
+ set.Append(uint64(i))
+ set.HashSSZ()
+ }
+ firstHash, err := set.HashSSZ()
+ require.NoError(t, err)
+
+ set.CopyTo(set2)
+ secondHash, err := set2.HashSSZ()
+ require.NoError(t, err)
+
+ require.Equal(t, firstHash, secondHash)
+}
diff --git a/cl/cltypes/solid/validator.go b/cl/cltypes/solid/validator.go
index 025b74d6b1c..63353e1ee66 100644
--- a/cl/cltypes/solid/validator.go
+++ b/cl/cltypes/solid/validator.go
@@ -2,6 +2,7 @@ package solid
import (
"encoding/binary"
+ "encoding/json"
"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/length"
@@ -153,6 +154,11 @@ func (v Validator) SetWithdrawalCredentials(o common.Hash) {
func (v Validator) SetEffectiveBalance(i uint64) {
binary.LittleEndian.PutUint64(v[80:88], i)
}
+
+func (v Validator) SetEffectiveBalanceFromBytes(b []byte) {
+ copy(v[80:88], b)
+}
+
func (v Validator) SetSlashed(b bool) {
if b {
v[88] = 1
@@ -177,10 +183,51 @@ func (v Validator) SetWithdrawableEpoch(i uint64) {
}
// Active returns if validator is active for given epoch
-func (v *Validator) Active(epoch uint64) bool {
+func (v Validator) Active(epoch uint64) bool {
return v.ActivationEpoch() <= epoch && epoch < v.ExitEpoch()
}
-func (v *Validator) IsSlashable(epoch uint64) bool {
+func (v Validator) IsSlashable(epoch uint64) bool {
return !v.Slashed() && (v.ActivationEpoch() <= epoch) && (epoch < v.WithdrawableEpoch())
}
+
+func (v Validator) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ PublicKey common.Bytes48 `json:"public_key"`
+ WithdrawalCredentials common.Hash `json:"withdrawal_credentials"`
+ EffectiveBalance uint64 `json:"effective_balance"`
+ Slashed bool `json:"slashed"`
+ ActivationEligibilityEpoch uint64 `json:"activation_eligibility_epoch"`
+ ActivationEpoch uint64 `json:"activation_epoch"`
+ ExitEpoch uint64 `json:"exit_epoch"`
+ WithdrawableEpoch uint64 `json:"withdrawable_epoch"`
+ }{
+ PublicKey: v.PublicKey(),
+ WithdrawalCredentials: v.WithdrawalCredentials(),
+ EffectiveBalance: v.EffectiveBalance(),
+ Slashed: v.Slashed(),
+ ActivationEligibilityEpoch: v.ActivationEligibilityEpoch(),
+ ActivationEpoch: v.ActivationEpoch(),
+ ExitEpoch: v.ExitEpoch(),
+ WithdrawableEpoch: v.WithdrawableEpoch(),
+ })
+}
+
+func (v *Validator) UnmarshalJSON(input []byte) error {
+ var err error
+ var tmp struct {
+ PublicKey common.Bytes48 `json:"public_key"`
+ WithdrawalCredentials common.Hash `json:"withdrawal_credentials"`
+ EffectiveBalance uint64 `json:"effective_balance"`
+ Slashed bool `json:"slashed"`
+ ActivationEligibilityEpoch uint64 `json:"activation_eligibility_epoch"`
+ ActivationEpoch uint64 `json:"activation_epoch"`
+ ExitEpoch uint64 `json:"exit_epoch"`
+ WithdrawableEpoch uint64 `json:"withdrawable_epoch"`
+ }
+ if err = json.Unmarshal(input, &tmp); err != nil {
+ return err
+ }
+ *v = NewValidatorFromParameters(tmp.PublicKey, tmp.WithdrawalCredentials, tmp.EffectiveBalance, tmp.Slashed, tmp.ActivationEligibilityEpoch, tmp.ActivationEpoch, tmp.ExitEpoch, tmp.WithdrawableEpoch)
+ return nil
+}
diff --git a/cl/cltypes/solid/validator_set.go b/cl/cltypes/solid/validator_set.go
index 31b51809cd0..eb5b6e170d1 100644
--- a/cl/cltypes/solid/validator_set.go
+++ b/cl/cltypes/solid/validator_set.go
@@ -1,7 +1,10 @@
package solid
import (
- "github.com/ledgerwatch/erigon-lib/common"
+ "bytes"
+ "encoding/json"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/length"
"github.com/ledgerwatch/erigon-lib/types/clonable"
"github.com/ledgerwatch/erigon-lib/types/ssz"
@@ -18,7 +21,10 @@ const (
IsPreviousMatchingHeadAttesterBit = 0x5
)
-const validatorSetCapacityMultiplier = 1.05 // allocate 5% to the validator set when re-allocation is needed.
+const (
+ validatorSetCapacityMultiplier = 1.01 // allocate 20% to the validator set when re-allocation is needed.
+ validatorTreeCacheGroupLayer = 3 // It will cache group validatorTreeCacheGroupLayer^2 accordingly
+)
// This is all stuff used by phase0 state transition. It makes many operations faster.
type Phase0Data struct {
@@ -28,8 +34,10 @@ type Phase0Data struct {
}
type ValidatorSet struct {
- buffer []byte
- l, c int
+ buffer []byte
+ treeCacheBuffer []byte
+
+ l, c int
// We have phase0 data below
phase0Data []Phase0Data
@@ -44,13 +52,36 @@ func NewValidatorSet(c int) *ValidatorSet {
}
}
-func (v *ValidatorSet) expandBuffer(size int) {
+func NewValidatorSetWithLength(c int, l int) *ValidatorSet {
+ return &ValidatorSet{
+ c: c,
+ l: l,
+ buffer: make([]byte, l*validatorSize),
+ treeCacheBuffer: make([]byte, getTreeCacheSize(l, validatorTreeCacheGroupLayer)*length.Hash),
+ phase0Data: make([]Phase0Data, l),
+ attesterBits: make([]byte, l),
+ }
+}
+
+func (v *ValidatorSet) Bytes() []byte {
+ return v.buffer[:v.l*validatorSize]
+}
+
+func (v *ValidatorSet) expandBuffer(newValidatorSetLength int) {
+ size := newValidatorSetLength * validatorSize
+ treeCacheSize := getTreeCacheSize(newValidatorSetLength, validatorTreeCacheGroupLayer) * length.Hash
+
if size <= cap(v.buffer) {
+ v.treeCacheBuffer = v.treeCacheBuffer[:treeCacheSize]
v.buffer = v.buffer[:size]
return
}
- buffer := make([]byte, size, int(float64(size)*validatorSetCapacityMultiplier))
+ increasedValidatorsCapacity := uint64(float64(newValidatorSetLength)*validatorSetCapacityMultiplier) + 1
+ buffer := make([]byte, size, increasedValidatorsCapacity*validatorSize)
+ cacheBuffer := make([]byte, treeCacheSize, increasedValidatorsCapacity*length.Hash)
copy(buffer, v.buffer)
+ copy(cacheBuffer, v.treeCacheBuffer)
+ v.treeCacheBuffer = cacheBuffer
v.buffer = buffer
}
@@ -58,9 +89,10 @@ func (v *ValidatorSet) Append(val Validator) {
offset := v.EncodingSizeSSZ()
// we are overflowing the buffer? append.
if offset >= len(v.buffer) {
- v.expandBuffer(offset + validatorSize)
+ v.expandBuffer(v.l + 1)
v.phase0Data = append(v.phase0Data, Phase0Data{})
}
+ v.zeroTreeHash(v.l)
copy(v.buffer[offset:], val)
v.phase0Data[v.l] = Phase0Data{} // initialize to empty.
v.attesterBits = append(v.attesterBits, 0x0)
@@ -88,19 +120,18 @@ func (v *ValidatorSet) Clone() clonable.Clonable {
return NewValidatorSet(v.c)
}
-func (v *ValidatorSet) CopyTo(set2 IterableSSZ[Validator]) {
- t := set2.(*ValidatorSet)
+func (v *ValidatorSet) CopyTo(t *ValidatorSet) {
t.l = v.l
t.c = v.c
offset := v.EncodingSizeSSZ()
if offset > len(t.buffer) {
- t.expandBuffer(offset)
- t.buffer = append(t.buffer, make([]byte, len(v.buffer)-len(t.buffer))...)
+ t.expandBuffer(v.l)
t.attesterBits = make([]byte, len(v.attesterBits))
}
// skip copying (unsupported for phase0)
t.phase0Data = make([]Phase0Data, t.l)
copy(t.buffer, v.buffer)
+ copy(t.treeCacheBuffer, v.treeCacheBuffer)
copy(t.attesterBits, v.attesterBits)
t.attesterBits = t.attesterBits[:v.l]
}
@@ -109,7 +140,7 @@ func (v *ValidatorSet) DecodeSSZ(buf []byte, _ int) error {
if len(buf)%validatorSize > 0 {
return ssz.ErrBufferNotRounded
}
- v.expandBuffer(len(buf))
+ v.expandBuffer(len(buf) / validatorSize)
copy(v.buffer, buf)
v.l = len(buf) / validatorSize
v.phase0Data = make([]Phase0Data, v.l)
@@ -136,45 +167,82 @@ func (v *ValidatorSet) Get(idx int) Validator {
if idx >= v.l {
panic("ValidatorSet -- Get: out of bounds")
}
- return v.buffer[idx*validatorSize : (idx*validatorSize)+validatorSize]
+
+ return Validator(v.buffer[idx*validatorSize : (idx*validatorSize)+validatorSize])
}
func (v *ValidatorSet) HashSSZ() ([32]byte, error) {
// generate root list
- v.makeBuf(v.l * length.Hash)
- validatorLeaves := v.buf
+ validatorsLeafChunkSize := convertDepthToChunkSize(validatorTreeCacheGroupLayer)
hashBuffer := make([]byte, 8*32)
depth := GetDepth(uint64(v.c))
lengthRoot := merkle_tree.Uint64Root(uint64(v.l))
if v.l == 0 {
- return utils.Keccak256(merkle_tree.ZeroHashes[depth][:], lengthRoot[:]), nil
+ return utils.Sha256(merkle_tree.ZeroHashes[depth][:], lengthRoot[:]), nil
}
- for i := 0; i < v.l; i++ {
- validator := v.Get(i)
- if err := validator.CopyHashBufferTo(hashBuffer); err != nil {
- return [32]byte{}, err
+
+ emptyHashBytes := make([]byte, length.Hash)
+
+ layerBuffer := make([]byte, validatorsLeafChunkSize*length.Hash)
+ for i := 0; i < v.l; i += validatorsLeafChunkSize {
+ from := uint64(i)
+ to := utils.Min64(from+uint64(validatorsLeafChunkSize), uint64(v.l))
+ offset := (i / validatorsLeafChunkSize) * length.Hash
+
+ if !bytes.Equal(v.treeCacheBuffer[offset:offset+length.Hash], emptyHashBytes) {
+ continue
}
- hashBuffer = hashBuffer[:(8 * 32)]
- if err := merkle_tree.MerkleRootFromFlatLeaves(hashBuffer, validatorLeaves[i*length.Hash:]); err != nil {
+ for i := from; i < to; i++ {
+ validator := v.Get(int(i))
+ if err := validator.CopyHashBufferTo(hashBuffer); err != nil {
+ return [32]byte{}, err
+ }
+ hashBuffer = hashBuffer[:(8 * 32)]
+ if err := merkle_tree.MerkleRootFromFlatLeaves(hashBuffer, layerBuffer[(i-from)*length.Hash:]); err != nil {
+ return [32]byte{}, err
+ }
+ }
+ endOffset := (to - from) * length.Hash
+ if err := computeFlatRootsToBuffer(validatorTreeCacheGroupLayer, layerBuffer[:endOffset], v.treeCacheBuffer[offset:]); err != nil {
return [32]byte{}, err
}
+
}
- offset := length.Hash * v.l
- elements := common.Copy(validatorLeaves[:offset])
- for i := uint8(0); i < depth; i++ {
+
+ offset := length.Hash * ((v.l + validatorsLeafChunkSize - 1) / validatorsLeafChunkSize)
+ v.makeBuf(offset)
+ copy(v.buf, v.treeCacheBuffer[:offset])
+ elements := v.buf
+ for i := uint8(validatorTreeCacheGroupLayer); i < depth; i++ {
// Sequential
if len(elements)%64 != 0 {
elements = append(elements, merkle_tree.ZeroHashes[i][:]...)
}
outputLen := len(elements) / 2
- v.makeBuf(outputLen)
- if err := merkle_tree.HashByteSlice(v.buf, elements); err != nil {
+ if err := merkle_tree.HashByteSlice(elements, elements); err != nil {
return [32]byte{}, err
}
- elements = v.buf
+ elements = elements[:outputLen]
}
- return utils.Keccak256(elements[:length.Hash], lengthRoot[:]), nil
+
+ return utils.Sha256(elements[:length.Hash], lengthRoot[:]), nil
+}
+
+func computeFlatRootsToBuffer(depth uint8, layerBuffer, output []byte) error {
+ for i := uint8(0); i < depth; i++ {
+ // Sequential
+ if len(layerBuffer)%64 != 0 {
+ layerBuffer = append(layerBuffer, merkle_tree.ZeroHashes[i][:]...)
+ }
+ if err := merkle_tree.HashByteSlice(layerBuffer, layerBuffer); err != nil {
+ return err
+ }
+ layerBuffer = layerBuffer[:len(layerBuffer)/2]
+ }
+
+ copy(output, layerBuffer[:length.Hash])
+ return nil
}
func (v *ValidatorSet) Set(idx int, val Validator) {
@@ -217,6 +285,13 @@ func (v *ValidatorSet) Range(fn func(int, Validator, int) bool) {
}
}
+func (v *ValidatorSet) zeroTreeHash(idx int) {
+ iNodeIdx := (idx / (1 << validatorTreeCacheGroupLayer)) * length.Hash
+ for i := iNodeIdx; i < iNodeIdx+length.Hash; i++ {
+ v.treeCacheBuffer[i] = 0
+ }
+}
+
func (v *ValidatorSet) IsCurrentMatchingSourceAttester(idx int) bool {
return v.getAttesterBit(idx, IsCurrentMatchingSourceAttesterBit)
}
@@ -280,3 +355,58 @@ func (v *ValidatorSet) SetMinCurrentInclusionDelayAttestation(idx int, val *Pend
func (v *ValidatorSet) SetMinPreviousInclusionDelayAttestation(idx int, val *PendingAttestation) {
v.getPhase0(idx).MinPreviousInclusionDelayAttestation = val
}
+
+func (v *ValidatorSet) SetWithdrawalCredentialForValidatorAtIndex(index int, creds libcommon.Hash) {
+ v.zeroTreeHash(index)
+ v.Get(index).SetWithdrawalCredentials(creds)
+}
+
+func (v *ValidatorSet) SetExitEpochForValidatorAtIndex(index int, epoch uint64) {
+ v.zeroTreeHash(index)
+ v.Get(index).SetExitEpoch(epoch)
+}
+
+func (v *ValidatorSet) SetWithdrawableEpochForValidatorAtIndex(index int, epoch uint64) {
+ v.zeroTreeHash(index)
+ v.Get(index).SetWithdrawableEpoch(epoch)
+}
+
+func (v *ValidatorSet) SetEffectiveBalanceForValidatorAtIndex(index int, balance uint64) {
+ v.zeroTreeHash(index)
+ v.Get(index).SetEffectiveBalance(balance)
+}
+
+func (v *ValidatorSet) SetActivationEpochForValidatorAtIndex(index int, epoch uint64) {
+ v.zeroTreeHash(index)
+ v.Get(index).SetActivationEpoch(epoch)
+}
+
+func (v *ValidatorSet) SetActivationEligibilityEpochForValidatorAtIndex(index int, epoch uint64) {
+ v.zeroTreeHash(index)
+ v.Get(index).SetActivationEligibilityEpoch(epoch)
+}
+
+func (v *ValidatorSet) SetValidatorSlashed(index int, slashed bool) {
+ v.zeroTreeHash(index)
+ v.Get(index).SetSlashed(slashed)
+}
+
+func (v *ValidatorSet) MarshalJSON() ([]byte, error) {
+ validators := make([]Validator, v.l)
+ for i := 0; i < v.l; i++ {
+ validators[i] = v.Get(i)
+ }
+ return json.Marshal(validators)
+}
+
+func (v *ValidatorSet) UnmarshalJSON(data []byte) error {
+ var validators []Validator
+ if err := json.Unmarshal(data, &validators); err != nil {
+ return err
+ }
+ v.Clear()
+ for _, val := range validators {
+ v.Append(val)
+ }
+ return nil
+}
diff --git a/cl/cltypes/solid/validator_test.go b/cl/cltypes/solid/validator_test.go
index b270ccdbbba..9e3debd19dc 100644
--- a/cl/cltypes/solid/validator_test.go
+++ b/cl/cltypes/solid/validator_test.go
@@ -1,10 +1,12 @@
package solid
import (
+ "encoding/binary"
"testing"
"github.com/ledgerwatch/erigon-lib/common"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestValidator(t *testing.T) {
@@ -49,3 +51,39 @@ func TestValidator(t *testing.T) {
validator.CopyTo(copiedValidator)
assert.Equal(t, validator, copiedValidator)
}
+
+func TestValidatorSetTest(t *testing.T) {
+ num := 1000
+ vset := NewValidatorSet(1000000)
+ vset2 := NewValidatorSet(1000000)
+ for i := 0; i < num; i++ {
+ var pk [48]byte
+ var wk [32]byte
+ binary.BigEndian.PutUint32(pk[:], uint32(i))
+ binary.BigEndian.PutUint32(wk[:], uint32(i))
+ vset.Append(NewValidatorFromParameters(
+ pk, wk, uint64(1), true, uint64(1), uint64(1), uint64(1), uint64(1),
+ ))
+ vset.HashSSZ()
+ }
+ firstHash, err := vset.HashSSZ()
+ require.NoError(t, err)
+
+ vset.CopyTo(vset2)
+ secondHash, err := vset2.HashSSZ()
+ require.NoError(t, err)
+
+ require.Equal(t, firstHash, secondHash)
+}
+
+func TestMarshalUnmarshalJson(t *testing.T) {
+ validator := NewValidatorFromParameters(
+ [48]byte{1, 2, 3}, [32]byte{4, 5, 6}, 7, true, 8, 9, 10, 11,
+ )
+ encoded, err := validator.MarshalJSON()
+ require.NoError(t, err)
+ decoded := NewValidator()
+ err = decoded.UnmarshalJSON(encoded)
+ require.NoError(t, err)
+ assert.Equal(t, validator, decoded)
+}
diff --git a/cl/cltypes/validator.go b/cl/cltypes/validator.go
index 3f4f3f8a288..7d328bf21cf 100644
--- a/cl/cltypes/validator.go
+++ b/cl/cltypes/validator.go
@@ -16,11 +16,10 @@ const (
)
type DepositData struct {
- PubKey [48]byte
- WithdrawalCredentials [32]byte // 32 byte
- Amount uint64
- Signature [96]byte
- Root libcommon.Hash // Ignored if not for hashing
+ PubKey libcommon.Bytes48 `json:"pubkey"`
+ WithdrawalCredentials libcommon.Hash `json:"withdrawal_credentials"`
+ Amount uint64 `json:"amount"`
+ Signature libcommon.Bytes96 `json:"signature"`
}
func (d *DepositData) EncodeSSZ(dst []byte) ([]byte, error) {
@@ -49,8 +48,8 @@ func (*DepositData) Static() bool {
type Deposit struct {
// Merkle proof is used for deposits
- Proof solid.HashVectorSSZ // 33 X 32 size.
- Data *DepositData
+ Proof solid.HashVectorSSZ `json:"proof"` // 33 X 32 size.
+ Data *DepositData `json:"data"`
}
func (d *Deposit) EncodeSSZ(dst []byte) ([]byte, error) {
@@ -73,8 +72,8 @@ func (d *Deposit) HashSSZ() ([32]byte, error) {
}
type VoluntaryExit struct {
- Epoch uint64
- ValidatorIndex uint64
+ Epoch uint64 `json:"epoch"`
+ ValidatorIndex uint64 `json:"validator_index"`
}
func (e *VoluntaryExit) EncodeSSZ(buf []byte) ([]byte, error) {
@@ -102,23 +101,23 @@ func (*VoluntaryExit) EncodingSizeSSZ() int {
}
type SignedVoluntaryExit struct {
- VolunaryExit *VoluntaryExit
- Signature [96]byte
+ VoluntaryExit *VoluntaryExit `json:"message"`
+ Signature libcommon.Bytes96 `json:"signature"`
}
func (e *SignedVoluntaryExit) EncodeSSZ(dst []byte) ([]byte, error) {
- return ssz2.MarshalSSZ(dst, e.VolunaryExit, e.Signature[:])
+ return ssz2.MarshalSSZ(dst, e.VoluntaryExit, e.Signature[:])
}
func (e *SignedVoluntaryExit) DecodeSSZ(buf []byte, version int) error {
- e.VolunaryExit = new(VoluntaryExit)
- return ssz2.UnmarshalSSZ(buf, version, e.VolunaryExit, e.Signature[:])
+ e.VoluntaryExit = new(VoluntaryExit)
+ return ssz2.UnmarshalSSZ(buf, version, e.VoluntaryExit, e.Signature[:])
}
func (e *SignedVoluntaryExit) HashSSZ() ([32]byte, error) {
- return merkle_tree.HashTreeRoot(e.VolunaryExit, e.Signature[:])
+ return merkle_tree.HashTreeRoot(e.VoluntaryExit, e.Signature[:])
}
func (e *SignedVoluntaryExit) EncodingSizeSSZ() int {
- return 96 + e.VolunaryExit.EncodingSizeSSZ()
+ return 96 + e.VoluntaryExit.EncodingSizeSSZ()
}
diff --git a/cl/cltypes/validator_test.go b/cl/cltypes/validator_test.go
index c798a19c824..db96632c84a 100644
--- a/cl/cltypes/validator_test.go
+++ b/cl/cltypes/validator_test.go
@@ -21,8 +21,8 @@ func TestSignedVoluntaryExit(t *testing.T) {
signature := [96]byte{1, 2, 3}
signedExit := &cltypes.SignedVoluntaryExit{
- VolunaryExit: voluntaryExit,
- Signature: signature,
+ VoluntaryExit: voluntaryExit,
+ Signature: signature,
}
// Encode SignedVoluntaryExit to SSZ
@@ -35,8 +35,8 @@ func TestSignedVoluntaryExit(t *testing.T) {
assert.NoError(t, err, "Failed to decode SSZ to SignedVoluntaryExit")
// Compare the original and decoded SignedVoluntaryExit
- assert.Equal(t, signedExit.VolunaryExit.Epoch, decodedExit.VolunaryExit.Epoch, "Decoded SignedVoluntaryExit has incorrect epoch")
- assert.Equal(t, signedExit.VolunaryExit.ValidatorIndex, decodedExit.VolunaryExit.ValidatorIndex, "Decoded SignedVoluntaryExit has incorrect validator index")
+ assert.Equal(t, signedExit.VoluntaryExit.Epoch, decodedExit.VoluntaryExit.Epoch, "Decoded SignedVoluntaryExit has incorrect epoch")
+ assert.Equal(t, signedExit.VoluntaryExit.ValidatorIndex, decodedExit.VoluntaryExit.ValidatorIndex, "Decoded SignedVoluntaryExit has incorrect validator index")
assert.Equal(t, signedExit.Signature, decodedExit.Signature, "Decoded SignedVoluntaryExit has incorrect signature")
}
@@ -47,7 +47,6 @@ func TestDepositData(t *testing.T) {
WithdrawalCredentials: [32]byte{4, 5, 6},
Amount: 100,
Signature: [96]byte{7, 8, 9},
- Root: [32]byte{10, 11, 12},
}
// Encode DepositData to SSZ
diff --git a/cl/cltypes/withdrawal.go b/cl/cltypes/withdrawal.go
new file mode 100644
index 00000000000..8923820b243
--- /dev/null
+++ b/cl/cltypes/withdrawal.go
@@ -0,0 +1,72 @@
+package cltypes
+
+import (
+ "fmt"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/length"
+ "github.com/ledgerwatch/erigon-lib/types/ssz"
+ "github.com/ledgerwatch/erigon/cl/merkle_tree"
+ "github.com/ledgerwatch/erigon/core/types"
+)
+
+type Withdrawal struct {
+ Index uint64 `json:"index"` // monotonically increasing identifier issued by consensus layer
+ Validator uint64 `json:"validatorIndex"` // index of validator associated with withdrawal
+ Address libcommon.Address `json:"address"` // target address for withdrawn ether
+ Amount uint64 `json:"amount"` // value of withdrawal in GWei
+}
+
+func (obj *Withdrawal) EncodeSSZ(buf []byte) ([]byte, error) {
+ buf = append(buf, ssz.Uint64SSZ(obj.Index)...)
+ buf = append(buf, ssz.Uint64SSZ(obj.Validator)...)
+ buf = append(buf, obj.Address[:]...)
+ buf = append(buf, ssz.Uint64SSZ(obj.Amount)...)
+ return buf, nil
+}
+
+func (obj *Withdrawal) DecodeSSZ(buf []byte, _ int) error {
+ if len(buf) < obj.EncodingSizeSSZ() {
+ return fmt.Errorf("[Withdrawal] err: %s", ssz.ErrLowBufferSize)
+ }
+ obj.Index = ssz.UnmarshalUint64SSZ(buf)
+ obj.Validator = ssz.UnmarshalUint64SSZ(buf[8:])
+ copy(obj.Address[:], buf[16:])
+ obj.Amount = ssz.UnmarshalUint64SSZ(buf[36:])
+ return nil
+}
+
+func (obj *Withdrawal) EncodingSizeSSZ() int {
+ // Validator Index (8 bytes) + Index (8 bytes) + Amount (8 bytes) + address length
+ return 24 + length.Addr
+}
+
+func (obj *Withdrawal) HashSSZ() ([32]byte, error) { // the [32]byte is temporary
+ return merkle_tree.HashTreeRoot(obj.Index, obj.Validator, obj.Address[:], obj.Amount)
+}
+
+func convertExecutionWithdrawalToConsensusWithdrawal(executionWithdrawal *types.Withdrawal) *Withdrawal {
+ return &Withdrawal{
+ Index: executionWithdrawal.Index,
+ Validator: executionWithdrawal.Validator,
+ Address: executionWithdrawal.Address,
+ Amount: executionWithdrawal.Amount,
+ }
+}
+
+func convertConsensusWithdrawalToExecutionWithdrawal(consensusWithdrawal *Withdrawal) *types.Withdrawal {
+ return &types.Withdrawal{
+ Index: consensusWithdrawal.Index,
+ Validator: consensusWithdrawal.Validator,
+ Address: consensusWithdrawal.Address,
+ Amount: consensusWithdrawal.Amount,
+ }
+}
+
+func convertExecutionWithdrawalsToConsensusWithdrawals(executionWithdrawal []*types.Withdrawal) []*Withdrawal {
+ ret := make([]*Withdrawal, len(executionWithdrawal))
+ for i, w := range executionWithdrawal {
+ ret[i] = convertExecutionWithdrawalToConsensusWithdrawal(w)
+ }
+ return ret
+}
diff --git a/cl/domino/cases/binaryblob/case.go b/cl/domino/cases/binaryblob/case.go
index 35df5bafb3f..c088b94e94f 100644
--- a/cl/domino/cases/binaryblob/case.go
+++ b/cl/domino/cases/binaryblob/case.go
@@ -75,7 +75,7 @@ func (c *Case) Domino(ctx context.Context, slot uint64) (*cltypes.SignedBeaconBl
return nil, err
}
_, version := c.EpochAndVersion()
- blk := &cltypes.SignedBeaconBlock{}
+ blk := cltypes.NewSignedBeaconBlock(c.BeaconConfig)
err = blk.DecodeSSZ(bts, version)
if err != nil {
return nil, err
diff --git a/cl/fork/fork.go b/cl/fork/fork.go
index 7b2d4effd56..9121b768853 100644
--- a/cl/fork/fork.go
+++ b/cl/fork/fork.go
@@ -130,7 +130,6 @@ func ComputeNextForkDigest(
// Retrieve next fork version.
nextForkIndex := 0
forkList := forkList(beaconConfig.ForkVersionSchedule)
- fmt.Println(forkList, beaconConfig.ForkVersionSchedule)
for _, fork := range forkList {
if currentEpoch >= fork.epoch {
nextForkIndex++
@@ -143,7 +142,6 @@ func ComputeNextForkDigest(
return [4]byte{}, nil
}
nextForkIndex++
- fmt.Println(forkList[nextForkIndex].version)
return ComputeForkDigestForVersion(forkList[nextForkIndex].version, genesisConfig.GenesisValidatorRoot)
}
@@ -152,7 +150,7 @@ type fork struct {
version [4]byte
}
-func forkList(schedule map[[4]byte]uint64) (f []fork) {
+func forkList(schedule map[libcommon.Bytes4]uint64) (f []fork) {
for version, epoch := range schedule {
f = append(f, fork{epoch: epoch, version: version})
}
@@ -165,7 +163,7 @@ func forkList(schedule map[[4]byte]uint64) (f []fork) {
func ComputeForkDigestForVersion(currentVersion [4]byte, genesisValidatorsRoot [32]byte) (digest [4]byte, err error) {
var currentVersion32 libcommon.Hash
copy(currentVersion32[:], currentVersion[:])
- dataRoot := utils.Keccak256(currentVersion32[:], genesisValidatorsRoot[:])
+ dataRoot := utils.Sha256(currentVersion32[:], genesisValidatorsRoot[:])
// copy first four bytes to output
copy(digest[:], dataRoot[:4])
return
@@ -229,7 +227,7 @@ func ComputeDomain(
) ([]byte, error) {
var currentVersion32 libcommon.Hash
copy(currentVersion32[:], currentVersion[:])
- forkDataRoot := utils.Keccak256(currentVersion32[:], genesisValidatorsRoot[:])
+ forkDataRoot := utils.Sha256(currentVersion32[:], genesisValidatorsRoot[:])
return append(domainType, forkDataRoot[:28]...), nil
}
@@ -241,7 +239,7 @@ func ComputeSigningRoot(
if err != nil {
return [32]byte{}, err
}
- return utils.Keccak256(objRoot[:], domain), nil
+ return utils.Sha256(objRoot[:], domain), nil
}
func Domain(fork *cltypes.Fork, epoch uint64, domainType [4]byte, genesisRoot libcommon.Hash) ([]byte, error) {
diff --git a/cl/fork/fork_test.go b/cl/fork/fork_test.go
index 65bf3b30b0a..8e272facf2f 100644
--- a/cl/fork/fork_test.go
+++ b/cl/fork/fork_test.go
@@ -124,7 +124,7 @@ func TestMainnetForkDigestDenebVersion(t *testing.T) {
func TestMainnetComputeForkNextDigest(t *testing.T) {
beaconCfg := clparams.BeaconConfigs[clparams.MainnetNetwork]
genesisCfg := clparams.GenesisConfigs[clparams.MainnetNetwork]
- beaconCfg.ForkVersionSchedule = make(map[[4]byte]uint64)
+ beaconCfg.ForkVersionSchedule = make(map[common.Bytes4]uint64)
beaconCfg.ForkVersionSchedule[utils.Uint32ToBytes4(uint32(clparams.Phase0Version))] = 0
beaconCfg.ForkVersionSchedule[utils.Uint32ToBytes4(uint32(clparams.BellatrixVersion))] = 210010230210301201
digest, err := ComputeNextForkDigest(&beaconCfg, &genesisCfg)
diff --git a/cl/freezer/utils.go b/cl/freezer/utils.go
index 4d2cb25ca8b..d7b2052186e 100644
--- a/cl/freezer/utils.go
+++ b/cl/freezer/utils.go
@@ -3,9 +3,9 @@ package freezer
import (
"bytes"
"fmt"
+ "github.com/ledgerwatch/erigon/cl/sentinel/communication/ssz_snappy"
"github.com/ledgerwatch/erigon-lib/types/ssz"
- "github.com/ledgerwatch/erigon/cmd/sentinel/sentinel/communication/ssz_snappy"
)
type marshalerHashable interface {
diff --git a/cl/merkle_tree/hasher.go b/cl/merkle_tree/hasher.go
index dafb4fbab2f..6129e09d311 100644
--- a/cl/merkle_tree/hasher.go
+++ b/cl/merkle_tree/hasher.go
@@ -86,7 +86,7 @@ func (m *merkleHasher) transactionsListRoot(transactions [][]byte) ([32]byte, er
}
lengthRoot := Uint64Root(transactionLength)
- leaves[i] = utils.Keccak256(transactionsBaseRoot[:], lengthRoot[:])
+ leaves[i] = utils.Sha256(transactionsBaseRoot[:], lengthRoot[:])
}
transactionsBaseRoot, err := MerkleizeVector(leaves, 1048576)
if err != nil {
@@ -95,5 +95,5 @@ func (m *merkleHasher) transactionsListRoot(transactions [][]byte) ([32]byte, er
countRoot := Uint64Root(txCount)
- return utils.Keccak256(transactionsBaseRoot[:], countRoot[:]), nil
+ return utils.Sha256(transactionsBaseRoot[:], countRoot[:]), nil
}
diff --git a/cl/merkle_tree/list.go b/cl/merkle_tree/list.go
index 791b6a2ab59..c039d1a5691 100644
--- a/cl/merkle_tree/list.go
+++ b/cl/merkle_tree/list.go
@@ -71,7 +71,7 @@ func BitlistRootWithLimit(bits []byte, limit uint64) ([32]byte, error) {
}
lengthRoot := Uint64Root(size)
- return utils.Keccak256(base[:], lengthRoot[:]), nil
+ return utils.Sha256(base[:], lengthRoot[:]), nil
}
func packBits(bytes []byte) [][32]byte {
@@ -123,5 +123,5 @@ func ListObjectSSZRoot[T ssz.HashableSSZ](list []T, limit uint64) ([32]byte, err
return [32]byte{}, err
}
lenLeaf := Uint64Root(uint64(len(list)))
- return utils.Keccak256(vectorLeaf[:], lenLeaf[:]), nil
+ return utils.Sha256(vectorLeaf[:], lenLeaf[:]), nil
}
diff --git a/cl/merkle_tree/merkle_root.go b/cl/merkle_tree/merkle_root.go
index a67a29b9683..ba231d4853f 100644
--- a/cl/merkle_tree/merkle_root.go
+++ b/cl/merkle_tree/merkle_root.go
@@ -52,7 +52,7 @@ func HashTreeRoot(schema ...interface{}) ([32]byte, error) {
copy(leaves[pos:], root[:])
default:
// If the element does not match any supported types, panic with an error message
- panic(fmt.Sprintf("get it out of my face, u put a bad component in the schema. index %d", i))
+ panic(fmt.Sprintf("Can't create TreeRoot: unsported type %T at index %d", i, obj))
}
// Move the position pointer to the next leaf
diff --git a/cl/persistence/base_encoding/primitives.go b/cl/persistence/base_encoding/primitives.go
new file mode 100644
index 00000000000..8fdf2f3b5ac
--- /dev/null
+++ b/cl/persistence/base_encoding/primitives.go
@@ -0,0 +1,55 @@
+package base_encoding
+
+import (
+ "encoding/binary"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+)
+
+func Encode64ToBytes4(x uint64) (out []byte) {
+ // little endian
+ out = make([]byte, 4)
+ binary.BigEndian.PutUint32(out, uint32(x))
+ return
+}
+
+func Decode64FromBytes4(buf []byte) (x uint64) {
+ // little endian
+ return uint64(binary.BigEndian.Uint32(buf))
+}
+
+// IndexAndPeriodKey encodes index and period (can be epoch/slot/epoch period) into 8 bytes
+func IndexAndPeriodKey(index, timeframe uint64) (out []byte) {
+ out = make([]byte, 8)
+ binary.BigEndian.PutUint32(out[:4], uint32(index))
+ binary.BigEndian.PutUint32(out[4:], uint32(timeframe))
+ return
+}
+
+// Encode a number with least amount of bytes
+func EncodeCompactUint64(x uint64) (out []byte) {
+ for x >= 0x80 {
+ out = append(out, byte(x)|0x80)
+ x >>= 7
+ }
+ out = append(out, byte(x))
+ return
+}
+
+// DecodeCompactUint64 decodes a number encoded with EncodeCompactUint64
+func DecodeCompactUint64(buf []byte) (x uint64) {
+ for i := 0; i < len(buf); i++ {
+ x |= uint64(buf[i]&0x7f) << (7 * uint(i))
+ if buf[i]&0x80 == 0 {
+ return
+ }
+ }
+ return
+}
+
+func EncodePeriodAndRoot(period uint32, root libcommon.Hash) []byte {
+ out := make([]byte, 36)
+ binary.BigEndian.PutUint32(out[:4], period)
+ copy(out[4:], root[:])
+ return out
+}
diff --git a/cl/persistence/base_encoding/primitives_test.go b/cl/persistence/base_encoding/primitives_test.go
new file mode 100644
index 00000000000..11c80684ecf
--- /dev/null
+++ b/cl/persistence/base_encoding/primitives_test.go
@@ -0,0 +1,76 @@
+package base_encoding
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func Test64(t *testing.T) {
+ number := uint64(9992)
+
+ out := Encode64ToBytes4(number)
+ require.Equal(t, Decode64FromBytes4(out), number)
+
+ out = EncodeCompactUint64(number)
+ require.Equal(t, DecodeCompactUint64(out), number)
+}
+
+func TestDiff64(t *testing.T) {
+ old := make([]byte, 800000)
+ new := make([]byte, 800008)
+ inc := 1
+ for i := 0; i < 80; i++ {
+ if i%9 == 0 {
+ inc++
+ }
+ old[i] = byte(i)
+ new[i] = byte(i + inc)
+ }
+
+ var b bytes.Buffer
+
+ err := ComputeCompressedSerializedUint64ListDiff(&b, old, new)
+ require.NoError(t, err)
+
+ out := b.Bytes()
+ new2, err := ApplyCompressedSerializedUint64ListDiff(old, nil, out)
+ require.NoError(t, err)
+
+ require.Equal(t, new, new2)
+}
+
+func TestDiff64Effective(t *testing.T) {
+ sizeOld := 800
+ sizeNew := 816
+ old := make([]byte, sizeOld*121)
+ new := make([]byte, sizeNew*121)
+ previous := make([]byte, sizeOld*8)
+ expected := make([]byte, sizeNew*8)
+ for i := 0; i < sizeNew; i++ {
+ validatorOffset := i * 121
+ newNum := i + 32
+ oldNum := i + 12
+ binary.BigEndian.PutUint64(expected[i*8:], uint64(newNum))
+ binary.BigEndian.PutUint64(new[validatorOffset+80:], uint64(newNum))
+ if i < len(old)/121 {
+ binary.BigEndian.PutUint64(previous[i*8:], uint64(oldNum))
+ binary.BigEndian.PutUint64(old[validatorOffset+80:], uint64(oldNum))
+ }
+ }
+
+ var b bytes.Buffer
+
+ err := ComputeCompressedSerializedEffectiveBalancesDiff(&b, old, new)
+ require.NoError(t, err)
+
+ out := b.Bytes()
+ new2, err := ApplyCompressedSerializedUint64ListDiff(previous, nil, out)
+ require.NoError(t, err)
+ fmt.Println(previous)
+
+ require.Equal(t, new2, expected)
+}
diff --git a/cl/persistence/base_encoding/uint64_diff.go b/cl/persistence/base_encoding/uint64_diff.go
new file mode 100644
index 00000000000..1e3496082e9
--- /dev/null
+++ b/cl/persistence/base_encoding/uint64_diff.go
@@ -0,0 +1,245 @@
+package base_encoding
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/klauspost/compress/zstd"
+ "github.com/ledgerwatch/erigon/cl/utils"
+)
+
+// make a sync.pool of compressors (zstd)
+var compressorPool = sync.Pool{
+ New: func() interface{} {
+ compressor, err := zstd.NewWriter(nil, zstd.WithEncoderLevel(zstd.SpeedFastest))
+ if err != nil {
+ panic(err)
+ }
+ return compressor
+ },
+}
+
+var bufferPool = sync.Pool{
+ New: func() interface{} {
+ return &bytes.Buffer{}
+ },
+}
+
+var plainUint64BufferPool = sync.Pool{
+ New: func() interface{} {
+ b := make([]uint64, 1028)
+ return &b
+ },
+}
+
+var plainBytesBufferPool = sync.Pool{
+ New: func() interface{} {
+ b := make([]byte, 1028)
+ return &b
+ },
+}
+
+var repeatedPatternBufferPool = sync.Pool{
+ New: func() interface{} {
+ b := make([]repeatedPatternEntry, 1028)
+ return &b
+ },
+}
+
+type repeatedPatternEntry struct {
+ val uint64
+ count uint32
+}
+
+func ComputeCompressedSerializedUint64ListDiff(w io.Writer, old, new []byte) error {
+ if len(old) > len(new) {
+ return fmt.Errorf("old list is longer than new list")
+ }
+
+ compressor := compressorPool.Get().(*zstd.Encoder)
+ defer compressorPool.Put(compressor)
+ compressor.Reset(w)
+
+ // Get one plain buffer from the pool
+ plainBufferPtr := plainUint64BufferPool.Get().(*[]uint64)
+ defer plainUint64BufferPool.Put(plainBufferPtr)
+ plainBuffer := *plainBufferPtr
+ plainBuffer = plainBuffer[:0]
+
+ // Get one repeated pattern buffer from the pool
+ repeatedPatternPtr := repeatedPatternBufferPool.Get().(*[]repeatedPatternEntry)
+ defer repeatedPatternBufferPool.Put(repeatedPatternPtr)
+ repeatedPattern := *repeatedPatternPtr
+ repeatedPattern = repeatedPattern[:0]
+
+ for i := 0; i < len(new); i += 8 {
+ if i+8 > len(old) {
+ // Append the remaining new bytes that were not in the old slice
+ plainBuffer = append(plainBuffer, binary.LittleEndian.Uint64(new[i:]))
+ continue
+ }
+ plainBuffer = append(plainBuffer, binary.LittleEndian.Uint64(new[i:i+8])-binary.LittleEndian.Uint64(old[i:i+8]))
+ }
+ // Find the repeated pattern
+ prevVal := plainBuffer[0]
+ count := uint32(1)
+ for i := 1; i < len(plainBuffer); i++ {
+ if plainBuffer[i] == prevVal {
+ count++
+ continue
+ }
+ repeatedPattern = append(repeatedPattern, repeatedPatternEntry{prevVal, count})
+ prevVal = plainBuffer[i]
+ count = 1
+ }
+ repeatedPattern = append(repeatedPattern, repeatedPatternEntry{prevVal, count})
+ if err := binary.Write(w, binary.BigEndian, uint32(len(repeatedPattern))); err != nil {
+ return err
+ }
+ temp := make([]byte, 8)
+
+ // Write the repeated pattern
+ for _, entry := range repeatedPattern {
+ binary.BigEndian.PutUint32(temp[:4], entry.count)
+ if _, err := compressor.Write(temp[:4]); err != nil {
+ return err
+ }
+ binary.BigEndian.PutUint64(temp, entry.val)
+ if _, err := compressor.Write(temp); err != nil {
+ return err
+ }
+ }
+ *repeatedPatternPtr = repeatedPattern[:0]
+ *plainBufferPtr = plainBuffer[:0]
+
+ return compressor.Close()
+}
+
+func ComputeCompressedSerializedEffectiveBalancesDiff(w io.Writer, old, new []byte) error {
+ if len(old) > len(new) {
+ return fmt.Errorf("old list is longer than new list")
+ }
+
+ compressor := compressorPool.Get().(*zstd.Encoder)
+ defer compressorPool.Put(compressor)
+ compressor.Reset(w)
+
+ // Get one plain buffer from the pool
+ plainBufferPtr := plainUint64BufferPool.Get().(*[]uint64)
+ defer plainUint64BufferPool.Put(plainBufferPtr)
+ plainBuffer := *plainBufferPtr
+ plainBuffer = plainBuffer[:0]
+
+ // Get one repeated pattern buffer from the pool
+ repeatedPatternPtr := repeatedPatternBufferPool.Get().(*[]repeatedPatternEntry)
+ defer repeatedPatternBufferPool.Put(repeatedPatternPtr)
+ repeatedPattern := *repeatedPatternPtr
+ repeatedPattern = repeatedPattern[:0]
+
+ validatorSize := 121
+ for i := 0; i < len(new); i += validatorSize {
+ // 80:88
+ if i+88 > len(old) {
+ // Append the remaining new bytes that were not in the old slice
+ plainBuffer = append(plainBuffer, binary.LittleEndian.Uint64(new[i+80:i+88]))
+ continue
+ }
+ plainBuffer = append(plainBuffer, binary.LittleEndian.Uint64(new[i+80:i+88])-binary.LittleEndian.Uint64(old[i+80:i+88]))
+ }
+ // Find the repeated pattern
+ prevVal := plainBuffer[0]
+ count := uint32(1)
+ for i := 1; i < len(plainBuffer); i++ {
+ if plainBuffer[i] == prevVal {
+ count++
+ continue
+ }
+ repeatedPattern = append(repeatedPattern, repeatedPatternEntry{prevVal, count})
+ prevVal = plainBuffer[i]
+ count = 1
+ }
+ repeatedPattern = append(repeatedPattern, repeatedPatternEntry{prevVal, count})
+ if err := binary.Write(w, binary.BigEndian, uint32(len(repeatedPattern))); err != nil {
+ return err
+ }
+ temp := make([]byte, 8)
+
+ // Write the repeated pattern
+ for _, entry := range repeatedPattern {
+ binary.BigEndian.PutUint32(temp[:4], entry.count)
+ if _, err := compressor.Write(temp[:4]); err != nil {
+ return err
+ }
+ binary.BigEndian.PutUint64(temp, entry.val)
+ if _, err := compressor.Write(temp); err != nil {
+ return err
+ }
+ }
+ *repeatedPatternPtr = repeatedPattern[:0]
+ *plainBufferPtr = plainBuffer[:0]
+
+ return compressor.Close()
+
+}
+
+func ApplyCompressedSerializedUint64ListDiff(old, out []byte, diff []byte) ([]byte, error) {
+ out = out[:0]
+
+ buffer := bufferPool.Get().(*bytes.Buffer)
+ defer bufferPool.Put(buffer)
+ buffer.Reset()
+
+ if _, err := buffer.Write(diff); err != nil {
+ return nil, err
+ }
+
+ var length uint32
+ if err := binary.Read(buffer, binary.BigEndian, &length); err != nil {
+ return nil, err
+ }
+ var entry repeatedPatternEntry
+
+ decompressor, err := zstd.NewReader(buffer)
+ if err != nil {
+ return nil, err
+ }
+ defer decompressor.Close()
+
+ temp := make([]byte, 8)
+ currIndex := 0
+ for i := 0; i < int(length); i++ {
+ n, err := utils.ReadZSTD(decompressor, temp[:4])
+ if err != nil && !errors.Is(err, io.EOF) {
+ return nil, err
+ }
+ if n != 4 {
+ return nil, io.EOF
+ }
+ entry.count = binary.BigEndian.Uint32(temp[:4])
+
+ n, err = utils.ReadZSTD(decompressor, temp)
+ if err != nil && !errors.Is(err, io.EOF) {
+ return nil, err
+ }
+ if n != 8 {
+ return nil, io.EOF
+ }
+ entry.val = binary.BigEndian.Uint64(temp)
+ for j := 0; j < int(entry.count); j++ {
+ if currIndex+8 > len(old) {
+ // Append the remaining new bytes that were not in the old slice
+ out = binary.LittleEndian.AppendUint64(out, entry.val)
+ currIndex += 8
+ continue
+ }
+ out = binary.LittleEndian.AppendUint64(out, binary.LittleEndian.Uint64(old[currIndex:currIndex+8])+entry.val)
+ currIndex += 8
+ }
+ }
+
+ return out, nil
+}
diff --git a/cl/persistence/beacon_indicies/indicies.go b/cl/persistence/beacon_indicies/indicies.go
new file mode 100644
index 00000000000..a58b0de8c96
--- /dev/null
+++ b/cl/persistence/beacon_indicies/indicies.go
@@ -0,0 +1,275 @@
+package beacon_indicies
+
+import (
+ "context"
+ "fmt"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/persistence/base_encoding"
+ _ "modernc.org/sqlite"
+)
+
+func WriteHighestFinalized(tx kv.RwTx, slot uint64) error {
+ return tx.Put(kv.HighestFinalized, kv.HighestFinalizedKey, base_encoding.Encode64ToBytes4(slot))
+}
+
+func ReadHighestFinalized(tx kv.Tx) (uint64, error) {
+ val, err := tx.GetOne(kv.HighestFinalized, kv.HighestFinalizedKey)
+ if err != nil {
+ return 0, err
+ }
+ if len(val) == 0 {
+ return 0, nil
+ }
+ return base_encoding.Decode64FromBytes4(val), nil
+}
+
+// WriteBlockRootSlot writes the slot associated with a block root.
+func WriteHeaderSlot(tx kv.RwTx, blockRoot libcommon.Hash, slot uint64) error {
+ return tx.Put(kv.BlockRootToSlot, blockRoot[:], base_encoding.Encode64ToBytes4(slot))
+}
+
+func ReadBlockSlotByBlockRoot(tx kv.Tx, blockRoot libcommon.Hash) (*uint64, error) {
+ slotBytes, err := tx.GetOne(kv.BlockRootToSlot, blockRoot[:])
+ if err != nil {
+ return nil, err
+ }
+ if len(slotBytes) == 0 {
+ return nil, nil
+ }
+ slot := new(uint64)
+ *slot = base_encoding.Decode64FromBytes4(slotBytes)
+ return slot, nil
+}
+
+// WriteBlockRootSlot writes the slot associated with a block root.
+func WriteStateRoot(tx kv.RwTx, blockRoot libcommon.Hash, stateRoot libcommon.Hash) error {
+ if err := tx.Put(kv.BlockRootToStateRoot, blockRoot[:], stateRoot[:]); err != nil {
+ return err
+ }
+ return tx.Put(kv.StateRootToBlockRoot, stateRoot[:], blockRoot[:])
+}
+
+func ReadStateRootByBlockRoot(ctx context.Context, tx kv.Tx, blockRoot libcommon.Hash) (libcommon.Hash, error) {
+ var stateRoot libcommon.Hash
+
+ sRoot, err := tx.GetOne(kv.BlockRootToStateRoot, blockRoot[:])
+ if err != nil {
+ return libcommon.Hash{}, err
+ }
+
+ copy(stateRoot[:], sRoot)
+
+ return stateRoot, nil
+}
+
+func ReadBlockRootByStateRoot(tx kv.Tx, stateRoot libcommon.Hash) (libcommon.Hash, error) {
+ var blockRoot libcommon.Hash
+
+ bRoot, err := tx.GetOne(kv.StateRootToBlockRoot, stateRoot[:])
+ if err != nil {
+ return libcommon.Hash{}, err
+ }
+
+ copy(blockRoot[:], bRoot)
+
+ return blockRoot, nil
+}
+
+func ReadCanonicalBlockRoot(tx kv.Tx, slot uint64) (libcommon.Hash, error) {
+ var blockRoot libcommon.Hash
+
+ bRoot, err := tx.GetOne(kv.CanonicalBlockRoots, base_encoding.Encode64ToBytes4(slot))
+ if err != nil {
+ return libcommon.Hash{}, err
+ }
+
+ copy(blockRoot[:], bRoot)
+ return blockRoot, nil
+}
+
+func WriteLastBeaconSnapshot(tx kv.RwTx, slot uint64) error {
+ return tx.Put(kv.LastBeaconSnapshot, []byte(kv.LastBeaconSnapshotKey), base_encoding.Encode64ToBytes4(slot))
+}
+
+func ReadLastBeaconSnapshot(tx kv.Tx) (uint64, error) {
+ val, err := tx.GetOne(kv.LastBeaconSnapshot, []byte(kv.LastBeaconSnapshotKey))
+ if err != nil {
+ return 0, err
+ }
+ if len(val) == 0 {
+ return 0, nil
+ }
+ return base_encoding.Decode64FromBytes4(val), nil
+}
+
+func MarkRootCanonical(ctx context.Context, tx kv.RwTx, slot uint64, blockRoot libcommon.Hash) error {
+ return tx.Put(kv.CanonicalBlockRoots, base_encoding.Encode64ToBytes4(slot), blockRoot[:])
+}
+
+func WriteExecutionBlockNumber(tx kv.RwTx, blockRoot libcommon.Hash, blockNumber uint64) error {
+ return tx.Put(kv.BlockRootToBlockNumber, blockRoot[:], base_encoding.Encode64ToBytes4(blockNumber))
+}
+
+func WriteExecutionBlockHash(tx kv.RwTx, blockRoot, blockHash libcommon.Hash) error {
+ return tx.Put(kv.BlockRootToBlockHash, blockRoot[:], blockHash[:])
+}
+
+func ReadExecutionBlockNumber(tx kv.Tx, blockRoot libcommon.Hash) (*uint64, error) {
+ val, err := tx.GetOne(kv.BlockRootToBlockNumber, blockRoot[:])
+ if err != nil {
+ return nil, err
+ }
+ if len(val) == 0 {
+ return nil, nil
+ }
+ ret := new(uint64)
+ *ret = base_encoding.Decode64FromBytes4(val)
+ return ret, nil
+}
+
+func ReadExecutionBlockHash(tx kv.Tx, blockRoot libcommon.Hash) (libcommon.Hash, error) {
+ val, err := tx.GetOne(kv.BlockRootToBlockHash, blockRoot[:])
+ if err != nil {
+ return libcommon.Hash{}, err
+ }
+ if len(val) == 0 {
+ return libcommon.Hash{}, nil
+ }
+ return libcommon.BytesToHash(val), nil
+}
+
+func WriteBeaconBlockHeader(ctx context.Context, tx kv.RwTx, signedHeader *cltypes.SignedBeaconBlockHeader) error {
+ headersBytes, err := signedHeader.EncodeSSZ(nil)
+ if err != nil {
+ return err
+ }
+ blockRoot, err := signedHeader.Header.HashSSZ()
+ if err != nil {
+ return err
+ }
+ return tx.Put(kv.BeaconBlockHeaders, blockRoot[:], headersBytes)
+}
+
+func WriteBeaconBlockHeaderAndIndicies(ctx context.Context, tx kv.RwTx, signedHeader *cltypes.SignedBeaconBlockHeader, forceCanonical bool) error {
+ blockRoot, err := signedHeader.Header.HashSSZ()
+ if err != nil {
+ return err
+ }
+ if err := WriteBeaconBlockHeader(ctx, tx, signedHeader); err != nil {
+ return err
+ }
+ if err := WriteHeaderSlot(tx, blockRoot, signedHeader.Header.Slot); err != nil {
+ return err
+ }
+ if err := WriteStateRoot(tx, blockRoot, signedHeader.Header.Root); err != nil {
+ return err
+ }
+ if err := WriteParentBlockRoot(ctx, tx, blockRoot, signedHeader.Header.ParentRoot); err != nil {
+ return err
+ }
+ if forceCanonical {
+ if err := MarkRootCanonical(ctx, tx, signedHeader.Header.Slot, blockRoot); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func ReadParentBlockRoot(ctx context.Context, tx kv.Tx, blockRoot libcommon.Hash) (libcommon.Hash, error) {
+ var parentRoot libcommon.Hash
+
+ pRoot, err := tx.GetOne(kv.BlockRootToParentRoot, blockRoot[:])
+ if err != nil {
+ return libcommon.Hash{}, err
+ }
+
+ copy(parentRoot[:], pRoot)
+
+ return parentRoot, nil
+}
+
+func WriteParentBlockRoot(ctx context.Context, tx kv.RwTx, blockRoot, parentRoot libcommon.Hash) error {
+ return tx.Put(kv.BlockRootToParentRoot, blockRoot[:], parentRoot[:])
+}
+
+func TruncateCanonicalChain(ctx context.Context, tx kv.RwTx, slot uint64) error {
+ return tx.ForEach(kv.CanonicalBlockRoots, base_encoding.Encode64ToBytes4(slot), func(k, _ []byte) error {
+ return tx.Delete(kv.CanonicalBlockRoots, k)
+ })
+}
+
+func PruneSignedHeaders(tx kv.RwTx, from uint64) error {
+ cursor, err := tx.RwCursor(kv.BeaconBlockHeaders)
+ if err != nil {
+ return err
+ }
+ for k, _, err := cursor.Seek(base_encoding.Encode64ToBytes4(from)); err == nil && k != nil; k, _, err = cursor.Prev() {
+ if err != nil {
+ return err
+ }
+ if err := cursor.DeleteCurrent(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func RangeBlockRoots(ctx context.Context, tx kv.Tx, fromSlot, toSlot uint64, fn func(slot uint64, beaconBlockRoot libcommon.Hash) bool) error {
+ cursor, err := tx.Cursor(kv.CanonicalBlockRoots)
+ if err != nil {
+ return err
+ }
+ for k, v, err := cursor.Seek(base_encoding.Encode64ToBytes4(fromSlot)); err == nil && k != nil && base_encoding.Decode64FromBytes4(k) <= toSlot; k, v, err = cursor.Next() {
+ if !fn(base_encoding.Decode64FromBytes4(k), libcommon.BytesToHash(v)) {
+ break
+ }
+ }
+ return err
+}
+
+func PruneBlockRoots(ctx context.Context, tx kv.RwTx, fromSlot, toSlot uint64) error {
+ cursor, err := tx.RwCursor(kv.CanonicalBlockRoots)
+ if err != nil {
+ return err
+ }
+ for k, _, err := cursor.Seek(base_encoding.Encode64ToBytes4(fromSlot)); err == nil && k != nil && base_encoding.Decode64FromBytes4(k) <= toSlot; k, _, err = cursor.Next() {
+ if err := cursor.DeleteCurrent(); err != nil {
+ return err
+ }
+ }
+ return err
+}
+
+func ReadBeaconBlockRootsInSlotRange(ctx context.Context, tx kv.Tx, fromSlot, count uint64) ([]libcommon.Hash, []uint64, error) {
+ blockRoots := make([]libcommon.Hash, 0, count)
+ slots := make([]uint64, 0, count)
+ err := RangeBlockRoots(ctx, tx, fromSlot, fromSlot+count, func(slot uint64, beaconBlockRoot libcommon.Hash) bool {
+ blockRoots = append(blockRoots, beaconBlockRoot)
+ slots = append(slots, slot)
+ return true
+ })
+ return blockRoots, slots, err
+}
+
+func ReadSignedHeaderByBlockRoot(ctx context.Context, tx kv.Tx, blockRoot libcommon.Hash) (*cltypes.SignedBeaconBlockHeader, bool, error) {
+ h := &cltypes.SignedBeaconBlockHeader{Header: &cltypes.BeaconBlockHeader{}}
+ headerBytes, err := tx.GetOne(kv.BeaconBlockHeaders, blockRoot[:])
+ if err != nil {
+ return nil, false, err
+ }
+ if len(headerBytes) == 0 {
+ return nil, false, nil
+ }
+ if err := h.DecodeSSZ(headerBytes, 0); err != nil {
+ return nil, false, fmt.Errorf("failed to decode BeaconHeader: %v", err)
+ }
+ canonical, err := ReadCanonicalBlockRoot(tx, h.Header.Slot)
+ if err != nil {
+ return nil, false, err
+ }
+ return h, canonical == blockRoot, nil
+}
diff --git a/cl/persistence/beacon_indicies/indicies_test.go b/cl/persistence/beacon_indicies/indicies_test.go
new file mode 100644
index 00000000000..3db10d48eca
--- /dev/null
+++ b/cl/persistence/beacon_indicies/indicies_test.go
@@ -0,0 +1,174 @@
+package beacon_indicies
+
+import (
+ "context"
+ "testing"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/memdb"
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/stretchr/testify/require"
+)
+
+func setupTestDB(t *testing.T) kv.RwDB {
+ // Create an in-memory SQLite DB for testing purposes
+ db := memdb.NewTestDB(t)
+ return db
+}
+
+func TestWriteBlockRoot(t *testing.T) {
+ db := setupTestDB(t)
+ defer db.Close()
+ tx, _ := db.BeginRw(context.Background())
+ defer tx.Rollback()
+
+ // Mock a block
+ block := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig)
+ block.Block.Slot = 56
+ block.EncodingSizeSSZ()
+
+ require.NoError(t, WriteBeaconBlockHeaderAndIndicies(context.Background(), tx, block.SignedBeaconBlockHeader(), false))
+
+ // Try to retrieve the block's slot by its blockRoot and verify
+ blockRoot, err := block.Block.HashSSZ()
+ require.NoError(t, err)
+
+ retrievedSlot, err := ReadBlockSlotByBlockRoot(tx, blockRoot)
+ require.NoError(t, err)
+ require.Equal(t, block.Block.Slot, *retrievedSlot)
+
+ canonicalRoot, err := ReadCanonicalBlockRoot(tx, *retrievedSlot)
+ require.NoError(t, err)
+ require.Equal(t, libcommon.Hash{}, canonicalRoot)
+
+ err = MarkRootCanonical(context.Background(), tx, *retrievedSlot, blockRoot)
+ require.NoError(t, err)
+
+ canonicalRoot, err = ReadCanonicalBlockRoot(tx, *retrievedSlot)
+ require.NoError(t, err)
+ require.Equal(t, libcommon.Hash(blockRoot), canonicalRoot)
+}
+
+func TestReadParentBlockRoot(t *testing.T) {
+ db := setupTestDB(t)
+ defer db.Close()
+ tx, _ := db.BeginRw(context.Background())
+ defer tx.Rollback()
+
+ mockParentRoot := libcommon.Hash{1}
+ // Mock a block
+ block := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig)
+ block.Block.Slot = 56
+ block.Block.ParentRoot = mockParentRoot
+ block.EncodingSizeSSZ()
+
+ require.NoError(t, WriteBeaconBlockHeaderAndIndicies(context.Background(), tx, block.SignedBeaconBlockHeader(), false))
+
+ // Try to retrieve the block's slot by its blockRoot and verify
+ blockRoot, err := block.Block.HashSSZ()
+ require.NoError(t, err)
+
+ retrieveParentRoot, err := ReadParentBlockRoot(context.Background(), tx, blockRoot)
+ require.NoError(t, err)
+ require.Equal(t, mockParentRoot, retrieveParentRoot)
+}
+
+func TestTruncateCanonicalChain(t *testing.T) {
+ db := setupTestDB(t)
+ defer db.Close()
+ tx, _ := db.BeginRw(context.Background())
+ defer tx.Rollback()
+
+ mockParentRoot := libcommon.Hash{1}
+ // Mock a block
+ block := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig)
+ block.Block.Slot = 56
+ block.Block.ParentRoot = mockParentRoot
+ block.EncodingSizeSSZ()
+
+ require.NoError(t, WriteBeaconBlockHeaderAndIndicies(context.Background(), tx, block.SignedBeaconBlockHeader(), true))
+
+ // Try to retrieve the block's slot by its blockRoot and verify
+ blockRoot, err := block.Block.HashSSZ()
+ require.NoError(t, err)
+
+ canonicalRoot, err := ReadCanonicalBlockRoot(tx, block.Block.Slot)
+ require.NoError(t, err)
+ require.Equal(t, libcommon.Hash(blockRoot), canonicalRoot)
+
+ require.NoError(t, TruncateCanonicalChain(context.Background(), tx, 0))
+
+ canonicalRoot, err = ReadCanonicalBlockRoot(tx, block.Block.Slot)
+ require.NoError(t, err)
+ require.Equal(t, canonicalRoot, libcommon.Hash{})
+}
+
+func TestReadBeaconBlockHeader(t *testing.T) {
+ db := setupTestDB(t)
+ defer db.Close()
+ tx, _ := db.BeginRw(context.Background())
+ defer tx.Rollback()
+
+ mockParentRoot := libcommon.Hash{1}
+ mockSignature := [96]byte{23}
+
+ // Mock a block
+ block := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig)
+ block.Block.Slot = 56
+ block.Block.ParentRoot = mockParentRoot
+ block.Signature = mockSignature
+
+ canonical := true
+ block.EncodingSizeSSZ()
+
+ require.NoError(t, WriteBeaconBlockHeaderAndIndicies(context.Background(), tx, block.SignedBeaconBlockHeader(), canonical))
+
+ // Try to retrieve the block's slot by its blockRoot and verify
+ blockRoot, err := block.Block.HashSSZ()
+ require.NoError(t, err)
+
+ header, isCanonical, err := ReadSignedHeaderByBlockRoot(context.Background(), tx, blockRoot)
+ require.NoError(t, err)
+ require.Equal(t, isCanonical, canonical)
+ require.NotNil(t, header)
+
+ headerRoot, err := header.Header.HashSSZ()
+ require.NoError(t, err)
+
+ require.Equal(t, headerRoot, blockRoot)
+
+}
+
+func TestWriteExecutionBlockNumber(t *testing.T) {
+ db := setupTestDB(t)
+ defer db.Close()
+ tx, _ := db.BeginRw(context.Background())
+ defer tx.Rollback()
+
+ tHash := libcommon.HexToHash("0x2")
+ require.NoError(t, WriteExecutionBlockNumber(tx, tHash, 1))
+ require.NoError(t, WriteExecutionBlockNumber(tx, tHash, 2))
+ require.NoError(t, WriteExecutionBlockNumber(tx, tHash, 3))
+
+ // Try to retrieve the block's slot by its blockRoot and verify
+ blockNumber, err := ReadExecutionBlockNumber(tx, tHash)
+ require.NoError(t, err)
+ require.Equal(t, uint64(3), *blockNumber)
+}
+
+func TestWriteExecutionBlockHash(t *testing.T) {
+ db := setupTestDB(t)
+ defer db.Close()
+ tx, _ := db.BeginRw(context.Background())
+ defer tx.Rollback()
+
+ tHash := libcommon.HexToHash("0x2")
+ tHash2 := libcommon.HexToHash("0x3")
+ require.NoError(t, WriteExecutionBlockHash(tx, tHash, tHash2))
+ // Try to retrieve the block's slot by its blockRoot and verify
+ tHash3, err := ReadExecutionBlockHash(tx, tHash)
+ require.NoError(t, err)
+ require.Equal(t, tHash2, tHash3)
+}
diff --git a/cl/persistence/block_saver.go b/cl/persistence/block_saver.go
new file mode 100644
index 00000000000..d2497e335f7
--- /dev/null
+++ b/cl/persistence/block_saver.go
@@ -0,0 +1,185 @@
+package persistence
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/ledgerwatch/erigon/cl/sentinel/communication/ssz_snappy"
+ "go.uber.org/zap/buffer"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies"
+ "github.com/ledgerwatch/erigon/cl/phase1/execution_client"
+ "github.com/ledgerwatch/erigon/cl/sentinel/peers"
+ "github.com/spf13/afero"
+)
+
+const subDivisionFolderSize = 10_000
+
+type beaconChainDatabaseFilesystem struct {
+ rawDB RawBeaconBlockChain
+ cfg *clparams.BeaconChainConfig
+
+ executionEngine execution_client.ExecutionEngine
+}
+
+func NewBeaconChainDatabaseFilesystem(rawDB RawBeaconBlockChain, executionEngine execution_client.ExecutionEngine, cfg *clparams.BeaconChainConfig) BeaconChainDatabase {
+ return beaconChainDatabaseFilesystem{
+ rawDB: rawDB,
+ cfg: cfg,
+ executionEngine: executionEngine,
+ }
+}
+
+func (b beaconChainDatabaseFilesystem) GetBlock(ctx context.Context, tx kv.Tx, slot uint64) (*peers.PeeredObject[*cltypes.SignedBeaconBlock], error) {
+ blockRoot, err := beacon_indicies.ReadCanonicalBlockRoot(tx, slot)
+ if err != nil {
+ return nil, err
+ }
+ if blockRoot == (libcommon.Hash{}) {
+ return nil, nil
+ }
+
+ r, err := b.rawDB.BlockReader(ctx, slot, blockRoot)
+ if err != nil {
+ return nil, err
+ }
+ defer r.Close()
+
+ block := cltypes.NewSignedBeaconBlock(b.cfg)
+ version := b.cfg.GetCurrentStateVersion(slot / b.cfg.SlotsPerEpoch)
+ if err := ssz_snappy.DecodeAndReadNoForkDigest(r, block, version); err != nil {
+ return nil, err
+ }
+
+ return &peers.PeeredObject[*cltypes.SignedBeaconBlock]{Data: block}, nil
+}
+
+func (b beaconChainDatabaseFilesystem) GetRange(ctx context.Context, tx kv.Tx, from uint64, count uint64) (*peers.PeeredObject[[]*cltypes.SignedBeaconBlock], error) {
+ // Retrieve block roots for each ranged slot
+ beaconBlockRooots, slots, err := beacon_indicies.ReadBeaconBlockRootsInSlotRange(ctx, tx, from, count)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(beaconBlockRooots) == 0 {
+ return &peers.PeeredObject[[]*cltypes.SignedBeaconBlock]{}, nil
+ }
+
+ blocks := []*cltypes.SignedBeaconBlock{}
+ for idx, blockRoot := range beaconBlockRooots {
+ slot := slots[idx]
+
+ r, err := b.rawDB.BlockReader(ctx, slot, blockRoot)
+ if errors.Is(err, afero.ErrFileNotFound) {
+ continue
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer r.Close()
+
+ block := cltypes.NewSignedBeaconBlock(b.cfg)
+ version := b.cfg.GetCurrentStateVersion(slot / b.cfg.SlotsPerEpoch)
+ if err := ssz_snappy.DecodeAndReadNoForkDigest(r, block, version); err != nil {
+ return nil, err
+ }
+
+ blocks = append(blocks, block)
+ }
+ return &peers.PeeredObject[[]*cltypes.SignedBeaconBlock]{Data: blocks}, nil
+
+}
+
+func (b beaconChainDatabaseFilesystem) PurgeRange(ctx context.Context, tx kv.Tx, from uint64, count uint64) error {
+ if err := beacon_indicies.RangeBlockRoots(ctx, tx, from, from+count, func(slot uint64, beaconBlockRoot libcommon.Hash) bool {
+ b.rawDB.DeleteBlock(ctx, slot, beaconBlockRoot)
+ return true
+ }); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (b beaconChainDatabaseFilesystem) WriteBlock(ctx context.Context, tx kv.RwTx, block *cltypes.SignedBeaconBlock, canonical bool) error {
+ blockRoot, err := block.Block.HashSSZ()
+ if err != nil {
+ return err
+ }
+
+ w, err := b.rawDB.BlockWriter(ctx, block.Block.Slot, blockRoot)
+ if err != nil {
+ return err
+ }
+ defer w.Close()
+
+ if fp, ok := w.(afero.File); ok {
+ err = fp.Truncate(0)
+ if err != nil {
+ return err
+ }
+ _, err = fp.Seek(0, io.SeekStart)
+ if err != nil {
+ return err
+ }
+ }
+
+ err = ssz_snappy.EncodeAndWrite(w, block)
+ if err != nil {
+ return err
+ }
+
+ if fp, ok := w.(afero.File); ok {
+ err = fp.Sync()
+ if err != nil {
+ return err
+ }
+ }
+ bodyRoot, err := block.Block.Body.HashSSZ()
+ if err != nil {
+ return err
+ }
+ if block.Version() >= clparams.BellatrixVersion {
+ if err := beacon_indicies.WriteExecutionBlockNumber(tx, blockRoot, block.Block.Body.ExecutionPayload.BlockNumber); err != nil {
+ return err
+ }
+ if err := beacon_indicies.WriteExecutionBlockHash(tx, blockRoot, block.Block.Body.ExecutionPayload.BlockHash); err != nil {
+ return err
+ }
+ }
+
+ if err := beacon_indicies.WriteBeaconBlockHeaderAndIndicies(ctx, tx, &cltypes.SignedBeaconBlockHeader{
+ Signature: block.Signature,
+ Header: &cltypes.BeaconBlockHeader{
+ Slot: block.Block.Slot,
+ ParentRoot: block.Block.ParentRoot,
+ ProposerIndex: block.Block.ProposerIndex,
+ Root: block.Block.StateRoot,
+ BodyRoot: bodyRoot,
+ },
+ }, canonical); err != nil {
+ return err
+ }
+ return nil
+}
+
+// SlotToPaths define the file structure to store a block
+//
+// "/signedBeaconBlock/{slot/10_000}/{root}.ssz_snappy"
+func rootToPaths(slot uint64, root libcommon.Hash, config *clparams.BeaconChainConfig) (folderPath string, filePath string) {
+ // bufio
+ buffer := bPool.Get().(*buffer.Buffer)
+ defer bPool.Put(buffer)
+ buffer.Reset()
+
+ fmt.Fprintf(buffer, "%d/%x.sz", slot/subDivisionFolderSize, root)
+ split := strings.Split(buffer.String(), "/")
+ return split[0], buffer.String()
+}
diff --git a/cl/persistence/block_saver_test.go b/cl/persistence/block_saver_test.go
new file mode 100644
index 00000000000..c49e593feba
--- /dev/null
+++ b/cl/persistence/block_saver_test.go
@@ -0,0 +1,138 @@
+package persistence
+
+import (
+ "context"
+ "testing"
+
+ _ "embed"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/memdb"
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/cltypes/solid"
+ "github.com/ledgerwatch/erigon/cl/phase1/execution_client"
+ "github.com/ledgerwatch/erigon/cl/utils"
+ "github.com/ledgerwatch/erigon/core/types"
+ "github.com/spf13/afero"
+ "github.com/stretchr/testify/require"
+)
+
+type mockEngine struct {
+ blocks map[uint64]*types.Block
+}
+
+func newMockEngine() execution_client.ExecutionEngine {
+ return &mockEngine{
+ blocks: make(map[uint64]*types.Block),
+ }
+}
+
+func (m *mockEngine) ForkChoiceUpdate(finalized libcommon.Hash, head libcommon.Hash) error {
+ panic("unimplemented")
+}
+
+func (m *mockEngine) FrozenBlocks() uint64 {
+ panic("unimplemented")
+}
+
+func (m *mockEngine) NewPayload(payload *cltypes.Eth1Block, beaconParentRoot *libcommon.Hash) (bool, error) {
+ panic("unimplemented")
+}
+
+func (m *mockEngine) SupportInsertion() bool {
+ return true
+}
+
+func (m *mockEngine) InsertBlocks([]*types.Block) error {
+ panic("unimplemented")
+}
+
+func (m *mockEngine) IsCanonicalHash(libcommon.Hash) (bool, error) {
+ panic("unimplemented")
+}
+
+func (m *mockEngine) Ready() (bool, error) {
+ return true, nil
+}
+
+func (m *mockEngine) InsertBlock(b *types.Block) error {
+ m.blocks[b.NumberU64()] = b
+ return nil
+}
+
+func (m *mockEngine) GetBodiesByRange(start, count uint64) ([]*types.RawBody, error) {
+ bds := []*types.RawBody{}
+ for i := start; i < start+count; i++ {
+
+ blk, ok := m.blocks[i]
+ if !ok {
+ break
+ }
+ bds = append(bds, blk.RawBody())
+ }
+ return bds, nil
+}
+
+func (m *mockEngine) GetBodiesByHashes(hashes []libcommon.Hash) ([]*types.RawBody, error) {
+ panic("unimplemented")
+}
+
+//go:embed test_data/test_block.ssz_snappy
+var testBlock []byte
+
+func getTestBlock() *cltypes.SignedBeaconBlock {
+ enc, err := utils.DecompressSnappy(testBlock)
+ if err != nil {
+ panic(err)
+ }
+ bcBlock := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig)
+ if err := bcBlock.DecodeSSZ(enc, int(clparams.CapellaVersion)); err != nil {
+ panic(err)
+ }
+ bcBlock.Block.Slot = (clparams.MainnetBeaconConfig.CapellaForkEpoch + 1) * 32
+ bcBlock.Block.Body.ExecutionPayload.Transactions = solid.NewTransactionsSSZFromTransactions(nil)
+ bcBlock.Block.Body.ExecutionPayload.BlockNumber = 100
+ bcBlock.Block.Body.ExecutionPayload.BlockHash = libcommon.HexToHash("0x78e6ce0d5a80c7416138af475d20c0a0a22124ae67b6dc5a0d0d0fe6f95e365d")
+ return bcBlock
+}
+
+func setupStore(t *testing.T, full bool) (BeaconChainDatabase, kv.RwDB, execution_client.ExecutionEngine) {
+ // Open an in-memory SQLite database for testing
+ db := memdb.NewTestDB(t)
+ // Create an in-memory filesystem
+ fs := afero.NewMemMapFs()
+ engine := newMockEngine()
+ return NewBeaconChainDatabaseFilesystem(NewAferoRawBlockSaver(fs, &clparams.MainnetBeaconConfig), engine, &clparams.MainnetBeaconConfig), db, engine
+}
+
+func TestBlockSaverStoreLoadPurgeFull(t *testing.T) {
+ store, db, _ := setupStore(t, true)
+ defer db.Close()
+
+ tx, _ := db.BeginRw(context.Background())
+ defer tx.Rollback()
+
+ ctx := context.Background()
+ block := getTestBlock()
+ require.NoError(t, store.WriteBlock(ctx, tx, block, true))
+
+ blks, err := store.GetRange(context.Background(), tx, block.Block.Slot, 1)
+ require.NoError(t, err)
+ require.Equal(t, len(blks.Data), 1)
+
+ expectedRoot, err := block.HashSSZ()
+ require.NoError(t, err)
+
+ haveRoot, err := blks.Data[0].HashSSZ()
+ require.NoError(t, err)
+
+ require.Equal(t, expectedRoot, haveRoot)
+
+ require.NoError(t, store.PurgeRange(ctx, tx, 0, 99999999999)) // THE PUURGE
+
+ newBlks, err := store.GetRange(context.Background(), tx, block.Block.Slot, 1)
+ require.NoError(t, err)
+ require.Equal(t, len(newBlks.Data), 0)
+}
diff --git a/cl/persistence/block_store.go b/cl/persistence/block_store.go
new file mode 100644
index 00000000000..c3ad0492497
--- /dev/null
+++ b/cl/persistence/block_store.go
@@ -0,0 +1,161 @@
+package persistence
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/phase1/network"
+ "github.com/ledgerwatch/erigon/cl/rpc"
+ "github.com/ledgerwatch/erigon/cl/sentinel/peers"
+ "github.com/tidwall/btree"
+)
+
+var _ BlockSource = (*BeaconRpcSource)(nil)
+
+type BeaconRpcSource struct {
+ rpc *rpc.BeaconRpcP2P
+}
+
+func (b *BeaconRpcSource) SaveBlocks(ctx context.Context, blocks *peers.PeeredObject[*cltypes.SignedBeaconBlock]) error {
+ // it is a no-op because there is no need to do this
+ return nil
+}
+
+func NewBeaconRpcSource(rpc *rpc.BeaconRpcP2P) *BeaconRpcSource {
+ return &BeaconRpcSource{
+ rpc: rpc,
+ }
+}
+
+func (*BeaconRpcSource) GetBlock(ctx context.Context, tx kv.Tx, slot uint64) (*peers.PeeredObject[*cltypes.SignedBeaconBlock], error) {
+ panic("unimplemented")
+}
+
+func (b *BeaconRpcSource) GetRange(ctx context.Context, _ kv.Tx, from uint64, count uint64) (*peers.PeeredObject[[]*cltypes.SignedBeaconBlock], error) {
+ if count == 0 {
+ return nil, nil
+ }
+ var responses *peers.PeeredObject[[]*cltypes.SignedBeaconBlock]
+ reqInterval := time.NewTicker(200 * time.Millisecond)
+ doneRespCh := make(chan *peers.PeeredObject[[]*cltypes.SignedBeaconBlock], 1)
+ defer reqInterval.Stop()
+
+ for {
+ select {
+ case <-reqInterval.C:
+ go func() {
+ responses, pid, err := b.rpc.SendBeaconBlocksByRangeReq(ctx, from, count)
+ if err != nil {
+ return
+ }
+ select {
+ case doneRespCh <- &peers.PeeredObject[[]*cltypes.SignedBeaconBlock]{Data: responses, Peer: pid}:
+ default:
+ }
+ }()
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case responses = <-doneRespCh:
+ return responses, nil
+ }
+ }
+}
+
+// a noop for rpc source since we always return new data
+func (b *BeaconRpcSource) PurgeRange(ctx context.Context, _ kv.Tx, from uint64, count uint64) error {
+ return nil
+}
+
+var _ BlockSource = (*GossipSource)(nil)
+
+type GossipSource struct {
+ gossip *network.GossipManager
+ gossipBlocks <-chan *peers.PeeredObject[*cltypes.SignedBeaconBlock]
+
+ mu sync.Mutex
+ blocks *btree.Map[uint64, chan *peers.PeeredObject[*cltypes.SignedBeaconBlock]]
+}
+
+func (*GossipSource) GetBlock(ctx context.Context, tx kv.Tx, slot uint64) (*peers.PeeredObject[*cltypes.SignedBeaconBlock], error) {
+ panic("unimplemented")
+}
+
+func NewGossipSource(ctx context.Context, gossip *network.GossipManager) *GossipSource {
+ g := &GossipSource{
+ gossip: gossip,
+ gossipBlocks: gossip.SubscribeSignedBeaconBlocks(ctx),
+ blocks: btree.NewMap[uint64, chan *peers.PeeredObject[*cltypes.SignedBeaconBlock]](32),
+ }
+ go func() {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case recv := <-g.gossipBlocks:
+ ch := g.grabOrCreate(ctx, recv.Data.Block.Slot)
+ select {
+ case ch <- recv:
+ default:
+ }
+ }
+ }
+ }()
+ return g
+}
+
+func (b *GossipSource) grabOrCreate(ctx context.Context, id uint64) chan *peers.PeeredObject[*cltypes.SignedBeaconBlock] {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ ch, ok := b.blocks.Get(id)
+ if !ok {
+ ch = make(chan *peers.PeeredObject[*cltypes.SignedBeaconBlock], 3)
+ b.blocks.Set(id, ch)
+ }
+ // if there are ever more than 512 blocks, clear the last 256 blocks
+ if b.blocks.Len() > 512 {
+ b.purgeRange(ctx, nil, 0, id-256)
+ }
+ return ch
+}
+func (b *GossipSource) GetRange(ctx context.Context, _ kv.Tx, from uint64, count uint64) (*peers.PeeredObject[[]*cltypes.SignedBeaconBlock], error) {
+ out := &peers.PeeredObject[[]*cltypes.SignedBeaconBlock]{}
+ for i := from; i < from+count; i++ {
+ ch := b.grabOrCreate(ctx, i)
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case item := <-ch:
+ out.Data = append(out.Data, item.Data)
+ out.Peer = item.Peer
+ }
+ }
+ return out, nil
+}
+
+func (b *GossipSource) PurgeRange(ctx context.Context, tx kv.Tx, from uint64, count uint64) error {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ return b.purgeRange(ctx, tx, from, count)
+}
+
+func (b *GossipSource) purgeRange(ctx context.Context, _ kv.Tx, from uint64, count uint64) error {
+ initSize := count
+ if initSize > 256 {
+ initSize = 256
+ }
+ xs := make([]uint64, 0, initSize)
+ b.blocks.Ascend(from, func(key uint64, value chan *peers.PeeredObject[*cltypes.SignedBeaconBlock]) bool {
+ if key >= from+count {
+ return false
+ }
+ xs = append(xs, key)
+ return true
+ })
+ for _, v := range xs {
+ b.blocks.Delete(v)
+ }
+ return nil
+}
diff --git a/cl/persistence/db_config/db_config.go b/cl/persistence/db_config/db_config.go
new file mode 100644
index 00000000000..8845fa4a20a
--- /dev/null
+++ b/cl/persistence/db_config/db_config.go
@@ -0,0 +1,36 @@
+package db_config
+
+import (
+ "bytes"
+ "context"
+ "math"
+
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon/ethdb/cbor"
+)
+
+type DatabaseConfiguration struct{ PruneDepth uint64 }
+
+var DefaultDatabaseConfiguration = DatabaseConfiguration{PruneDepth: math.MaxUint64}
+
+func WriteConfigurationIfNotExist(ctx context.Context, tx kv.RwTx, cfg DatabaseConfiguration) error {
+ var b bytes.Buffer
+ if err := cbor.Encoder(&b).Encode(cfg); err != nil {
+ return err
+ }
+
+ return tx.Put(kv.DatabaseInfo, []byte("config"), b.Bytes())
+}
+
+func ReadConfiguration(ctx context.Context, tx kv.Tx) (DatabaseConfiguration, error) {
+ var cfg DatabaseConfiguration
+
+ cfgEncoded, err := tx.GetOne(kv.DatabaseInfo, []byte("config"))
+ if err != nil {
+ return cfg, err
+ }
+ if err := cbor.Decoder(bytes.NewReader(cfgEncoded)).Decode(&cfg); err != nil {
+ return cfg, err
+ }
+ return cfg, err
+}
diff --git a/cl/persistence/db_config/db_config_test.go b/cl/persistence/db_config/db_config_test.go
new file mode 100644
index 00000000000..0133f4862a5
--- /dev/null
+++ b/cl/persistence/db_config/db_config_test.go
@@ -0,0 +1,24 @@
+package db_config
+
+import (
+ "context"
+ "testing"
+
+ "github.com/ledgerwatch/erigon-lib/kv/memdb"
+ "github.com/stretchr/testify/require"
+ _ "modernc.org/sqlite"
+)
+
+func TestDBConfig(t *testing.T) {
+ db := memdb.NewTestDB(t)
+ defer db.Close()
+ tx, err := db.BeginRw(context.Background())
+ defer tx.Rollback()
+ require.NoError(t, err)
+
+ c := DatabaseConfiguration{PruneDepth: 69}
+ require.NoError(t, WriteConfigurationIfNotExist(context.Background(), tx, c))
+ cfg, err := ReadConfiguration(context.Background(), tx)
+ require.NoError(t, err)
+ require.Equal(t, cfg, c)
+}
diff --git a/cl/persistence/format/chunk_encoding/chunks.go b/cl/persistence/format/chunk_encoding/chunks.go
new file mode 100644
index 00000000000..28afb2008d9
--- /dev/null
+++ b/cl/persistence/format/chunk_encoding/chunks.go
@@ -0,0 +1,68 @@
+package chunk_encoding
+
+import (
+ "encoding/binary"
+ "io"
+)
+
+type DataType int
+
+const (
+ ChunkDataType DataType = 0
+ PointerDataType DataType = 1
+)
+
+// writeChunk writes a chunk to the writer.
+func WriteChunk(w io.Writer, buf []byte, t DataType) error {
+
+ // prefix is type of chunk + length of chunk
+ prefix := make([]byte, 8)
+ binary.BigEndian.PutUint64(prefix, uint64(len(buf)))
+ prefix[0] = byte(t)
+ if _, err := w.Write(prefix); err != nil {
+ return err
+ }
+ if _, err := w.Write(buf); err != nil {
+ return err
+ }
+ return nil
+}
+
+func ReadChunk(r io.Reader, out io.Writer) (t DataType, err error) {
+ prefix := make([]byte, 8)
+ if _, err := r.Read(prefix); err != nil {
+ return DataType(0), err
+ }
+ t = DataType(prefix[0])
+ prefix[0] = 0
+
+ bufLen := binary.BigEndian.Uint64(prefix)
+ if bufLen == 0 {
+ return
+ }
+
+ if _, err = io.CopyN(out, r, int64(bufLen)); err != nil {
+ return
+ }
+ return
+}
+
+func ReadChunkToBytes(r io.Reader) (b []byte, t DataType, err error) {
+ prefix := make([]byte, 8)
+ if _, err := r.Read(prefix); err != nil {
+ return nil, DataType(0), err
+ }
+ t = DataType(prefix[0])
+ prefix[0] = 0
+
+ bufLen := binary.BigEndian.Uint64(prefix)
+ if bufLen == 0 {
+ return
+ }
+ b = make([]byte, bufLen)
+
+ if _, err = r.Read(b); err != nil {
+ return
+ }
+ return
+}
diff --git a/cl/persistence/format/snapshot_format/blocks.go b/cl/persistence/format/snapshot_format/blocks.go
new file mode 100644
index 00000000000..f392bee27c5
--- /dev/null
+++ b/cl/persistence/format/snapshot_format/blocks.go
@@ -0,0 +1,229 @@
+package snapshot_format
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "sync"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/persistence/format/chunk_encoding"
+)
+
+type ExecutionBlockReaderByNumber interface {
+ TransactionsSSZ(w io.Writer, number uint64, hash libcommon.Hash) error
+ WithdrawalsSZZ(w io.Writer, number uint64, hash libcommon.Hash) error
+}
+
+var buffersPool = sync.Pool{
+ New: func() interface{} { return &bytes.Buffer{} },
+}
+
+const (
+ blockBaseOffset = 100 /* Signature + Block Offset */ +
+ 84 /* Slot + ProposerIndex + ParentRoot + StateRoot + Body Offset */ +
+ 96 /*Signature*/ + 72 /*Eth1Data*/ + 32 /*Graffiti*/ + 4 /*ProposerSlashings Offset*/ + 4 /*AttesterSlashings Offset*/ + 4 /*Attestations*/ +
+ 4 /*Deposits Offset*/ + 4 /*VoluntaryExits Offset*/
+
+ altairBlockAdditionalBaseOffset = 160 /*SyncAggregate*/
+ bellatrixBlockAdditionalBaseOffset = 4 /*ExecutionPayload Offset*/
+ capellaBlockAdditionalBaseOffset = 4 /*ExecutionChanges Offset*/
+ denebBlockAdditionalBaseOffset = 4 /*BlobKzgCommitments Offset*/
+)
+
+func writeExecutionBlockPtr(w io.Writer, p *cltypes.Eth1Block) error {
+ temp := make([]byte, 40)
+ binary.BigEndian.PutUint64(temp, p.BlockNumber)
+ copy(temp[8:], p.BlockHash[:])
+
+ return chunk_encoding.WriteChunk(w, temp, chunk_encoding.PointerDataType)
+}
+
+func readExecutionBlockPtr(r io.Reader) (uint64, libcommon.Hash, error) {
+ b, dT, err := chunk_encoding.ReadChunkToBytes(r)
+ if err != nil {
+ return 0, libcommon.Hash{}, err
+ }
+ if dT != chunk_encoding.PointerDataType {
+ return 0, libcommon.Hash{}, fmt.Errorf("malformed beacon block, invalid block pointer type %d, expected: %d", dT, chunk_encoding.ChunkDataType)
+ }
+ return binary.BigEndian.Uint64(b[:8]), libcommon.BytesToHash(b[8:]), nil
+}
+
+func computeInitialOffset(version clparams.StateVersion) uint64 {
+ ret := uint64(blockBaseOffset)
+ if version >= clparams.AltairVersion {
+ ret += altairBlockAdditionalBaseOffset
+ }
+ if version >= clparams.BellatrixVersion {
+ ret += bellatrixBlockAdditionalBaseOffset
+ }
+ if version >= clparams.CapellaVersion {
+ ret += capellaBlockAdditionalBaseOffset
+ }
+ if version >= clparams.DenebVersion {
+ ret += denebBlockAdditionalBaseOffset
+ }
+ return ret
+}
+
+// WriteBlockForSnapshot writes a block to the given writer in the format expected by the snapshot.
+// buf is just a reusable buffer. if it had to grow it will be returned back as grown.
+func WriteBlockForSnapshot(w io.Writer, block *cltypes.SignedBeaconBlock, reusable []byte) ([]byte, error) {
+ bodyRoot, err := block.Block.Body.HashSSZ()
+ if err != nil {
+ return reusable, err
+ }
+ reusable = reusable[:0]
+ // Maybe reuse the buffer?
+ encoded, err := block.EncodeSSZ(reusable)
+ if err != nil {
+ return reusable, err
+ }
+ reusable = encoded
+ version := block.Version()
+ if _, err := w.Write([]byte{byte(version)}); err != nil {
+ return reusable, err
+ }
+ if _, err := w.Write(bodyRoot[:]); err != nil {
+ return reusable, err
+ }
+ currentChunkLength := computeInitialOffset(version)
+
+ body := block.Block.Body
+ // count in body for phase0 fields
+ currentChunkLength += uint64(body.ProposerSlashings.EncodingSizeSSZ())
+ currentChunkLength += uint64(body.AttesterSlashings.EncodingSizeSSZ())
+ currentChunkLength += uint64(body.Attestations.EncodingSizeSSZ())
+ currentChunkLength += uint64(body.Deposits.EncodingSizeSSZ())
+ currentChunkLength += uint64(body.VoluntaryExits.EncodingSizeSSZ())
+ // Write the chunk and chunk attestations
+ if err := chunk_encoding.WriteChunk(w, encoded[:currentChunkLength], chunk_encoding.ChunkDataType); err != nil {
+ return reusable, err
+ }
+ // we are done if we are before altair
+ if version <= clparams.AltairVersion {
+ return reusable, nil
+ }
+ encoded = encoded[currentChunkLength:]
+ if err := writeEth1BlockForSnapshot(w, encoded[:body.ExecutionPayload.EncodingSizeSSZ()], body.ExecutionPayload); err != nil {
+ return reusable, err
+ }
+ encoded = encoded[body.ExecutionPayload.EncodingSizeSSZ():]
+ if version <= clparams.BellatrixVersion {
+ return reusable, nil
+ }
+ return reusable, chunk_encoding.WriteChunk(w, encoded, chunk_encoding.ChunkDataType)
+}
+
+func readMetadataForBlock(r io.Reader, b []byte) (clparams.StateVersion, libcommon.Hash, error) {
+ if _, err := r.Read(b); err != nil {
+ return 0, libcommon.Hash{}, err
+ }
+ return clparams.StateVersion(b[0]), libcommon.BytesToHash(b[1:]), nil
+}
+
+func ReadBlockFromSnapshot(r io.Reader, executionReader ExecutionBlockReaderByNumber, cfg *clparams.BeaconChainConfig) (*cltypes.SignedBeaconBlock, error) {
+ block := cltypes.NewSignedBeaconBlock(cfg)
+ buffer := buffersPool.Get().(*bytes.Buffer)
+ defer buffersPool.Put(buffer)
+ buffer.Reset()
+
+ v, err := ReadRawBlockFromSnapshot(r, buffer, executionReader, cfg)
+ if err != nil {
+ return nil, err
+ }
+ return block, block.DecodeSSZ(buffer.Bytes(), int(v))
+}
+
+// ReadBlockHeaderFromSnapshotWithExecutionData reads the beacon block header and the EL block number and block hash.
+func ReadBlockHeaderFromSnapshotWithExecutionData(r io.Reader) (*cltypes.SignedBeaconBlockHeader, uint64, libcommon.Hash, error) {
+ buffer := buffersPool.Get().(*bytes.Buffer)
+ defer buffersPool.Put(buffer)
+ buffer.Reset()
+
+ metadataSlab := make([]byte, 33)
+ v, bodyRoot, err := readMetadataForBlock(r, metadataSlab)
+ if err != nil {
+ return nil, 0, libcommon.Hash{}, err
+ }
+ chunk1, dT1, err := chunk_encoding.ReadChunkToBytes(r)
+ if err != nil {
+ return nil, 0, libcommon.Hash{}, err
+ }
+ if dT1 != chunk_encoding.ChunkDataType {
+ return nil, 0, libcommon.Hash{}, fmt.Errorf("malformed beacon block, invalid chunk 1 type %d, expected: %d", dT1, chunk_encoding.ChunkDataType)
+ }
+
+ var signature libcommon.Bytes96
+ copy(signature[:], chunk1[4:100])
+ header := &cltypes.SignedBeaconBlockHeader{
+ Signature: signature,
+ Header: &cltypes.BeaconBlockHeader{
+ Slot: binary.LittleEndian.Uint64(chunk1[100:108]),
+ ProposerIndex: binary.LittleEndian.Uint64(chunk1[108:116]),
+ ParentRoot: libcommon.BytesToHash(chunk1[116:148]),
+ Root: libcommon.BytesToHash(chunk1[148:180]),
+ BodyRoot: bodyRoot,
+ }}
+ if v <= clparams.AltairVersion {
+ return header, 0, libcommon.Hash{}, nil
+ }
+ if _, err := r.Read(make([]byte, 1)); err != nil {
+ return header, 0, libcommon.Hash{}, nil
+ }
+ // Read the first eth 1 block chunk
+ _, err = chunk_encoding.ReadChunk(r, io.Discard)
+ if err != nil {
+ return nil, 0, libcommon.Hash{}, err
+ }
+ // lastly read the executionBlock ptr
+ blockNumber, blockHash, err := readExecutionBlockPtr(r)
+ if err != nil {
+ return nil, 0, libcommon.Hash{}, err
+ }
+ return header, blockNumber, blockHash, nil
+}
+
+func ReadRawBlockFromSnapshot(r io.Reader, out io.Writer, executionReader ExecutionBlockReaderByNumber, cfg *clparams.BeaconChainConfig) (clparams.StateVersion, error) {
+ metadataSlab := make([]byte, 33)
+ // Metadata section is just the current hardfork of the block.
+ v, _, err := readMetadataForBlock(r, metadataSlab)
+ if err != nil {
+ return v, err
+ }
+
+ // Read the first chunk
+ dT1, err := chunk_encoding.ReadChunk(r, out)
+ if err != nil {
+ return v, err
+ }
+ if dT1 != chunk_encoding.ChunkDataType {
+ return v, fmt.Errorf("malformed beacon block, invalid chunk 1 type %d, expected: %d", dT1, chunk_encoding.ChunkDataType)
+ }
+
+ if v <= clparams.AltairVersion {
+ return v, nil
+ }
+ // Read the block pointer and retrieve chunk4 from the execution reader
+ if _, err := readEth1BlockFromSnapshot(r, out, executionReader, cfg); err != nil {
+ return v, err
+ }
+ if v <= clparams.BellatrixVersion {
+ return v, nil
+ }
+
+ // Read the 5h chunk
+ dT2, err := chunk_encoding.ReadChunk(r, out)
+ if err != nil {
+ return v, err
+ }
+ if dT2 != chunk_encoding.ChunkDataType {
+ return v, fmt.Errorf("malformed beacon block, invalid chunk 5 type %d, expected: %d", dT2, chunk_encoding.ChunkDataType)
+ }
+ return v, nil
+}
diff --git a/cl/persistence/format/snapshot_format/blocks_test.go b/cl/persistence/format/snapshot_format/blocks_test.go
new file mode 100644
index 00000000000..b5d0815fc98
--- /dev/null
+++ b/cl/persistence/format/snapshot_format/blocks_test.go
@@ -0,0 +1,89 @@
+package snapshot_format_test
+
+import (
+ "bytes"
+ _ "embed"
+ "testing"
+
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/persistence/format/snapshot_format"
+ "github.com/ledgerwatch/erigon/cl/utils"
+ "github.com/stretchr/testify/require"
+)
+
+//go:embed test_data/phase0.ssz_snappy
+var phase0BlockSSZSnappy []byte
+
+//go:embed test_data/altair.ssz_snappy
+var altairBlockSSZSnappy []byte
+
+//go:embed test_data/bellatrix.ssz_snappy
+var bellatrixBlockSSZSnappy []byte
+
+//go:embed test_data/capella.ssz_snappy
+var capellaBlockSSZSnappy []byte
+
+//go:embed test_data/deneb.ssz_snappy
+var denebBlockSSZSnappy []byte
+
+var emptyBlock = cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig)
+
+// obtain the test blocks
+func getTestBlocks(t *testing.T) []*cltypes.SignedBeaconBlock {
+ var emptyBlockCapella = cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig)
+ emptyBlockCapella.Block.Slot = clparams.MainnetBeaconConfig.CapellaForkEpoch * 32
+
+ emptyBlock.EncodingSizeSSZ()
+ emptyBlockCapella.EncodingSizeSSZ()
+ denebBlock := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig)
+ capellaBlock := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig)
+ bellatrixBlock := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig)
+ altairBlock := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig)
+ phase0Block := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig)
+
+ require.NoError(t, utils.DecodeSSZSnappy(denebBlock, denebBlockSSZSnappy, int(clparams.DenebVersion)))
+ require.NoError(t, utils.DecodeSSZSnappy(capellaBlock, capellaBlockSSZSnappy, int(clparams.CapellaVersion)))
+ require.NoError(t, utils.DecodeSSZSnappy(bellatrixBlock, bellatrixBlockSSZSnappy, int(clparams.BellatrixVersion)))
+ require.NoError(t, utils.DecodeSSZSnappy(altairBlock, altairBlockSSZSnappy, int(clparams.AltairVersion)))
+ require.NoError(t, utils.DecodeSSZSnappy(phase0Block, phase0BlockSSZSnappy, int(clparams.Phase0Version)))
+ return []*cltypes.SignedBeaconBlock{phase0Block, altairBlock, bellatrixBlock, capellaBlock, denebBlock, emptyBlock, emptyBlockCapella}
+}
+
+func TestBlockSnapshotEncoding(t *testing.T) {
+ for _, blk := range getTestBlocks(t) {
+ var br snapshot_format.MockBlockReader
+ if blk.Version() >= clparams.BellatrixVersion {
+ br = snapshot_format.MockBlockReader{Block: blk.Block.Body.ExecutionPayload}
+ }
+ var b bytes.Buffer
+ _, err := snapshot_format.WriteBlockForSnapshot(&b, blk, nil)
+ require.NoError(t, err)
+ blk2, err := snapshot_format.ReadBlockFromSnapshot(&b, &br, &clparams.MainnetBeaconConfig)
+ require.NoError(t, err)
+
+ hash1, err := blk.HashSSZ()
+ require.NoError(t, err)
+ hash2, err := blk2.HashSSZ()
+ require.NoError(t, err)
+ // Rewrite for header test
+ b.Reset()
+ _, err = snapshot_format.WriteBlockForSnapshot(&b, blk, nil)
+ require.NoError(t, err)
+ header, bn, bHash, err := snapshot_format.ReadBlockHeaderFromSnapshotWithExecutionData(&b)
+ require.NoError(t, err)
+ hash3, err := header.HashSSZ()
+ require.NoError(t, err)
+
+ require.Equal(t, hash1, hash2)
+
+ require.Equal(t, header.Signature, blk.Signature)
+ require.Equal(t, header.Header.Slot, blk.Block.Slot)
+
+ if blk.Version() >= clparams.BellatrixVersion {
+ require.Equal(t, bn, blk.Block.Body.ExecutionPayload.BlockNumber)
+ require.Equal(t, bHash, blk.Block.Body.ExecutionPayload.BlockHash)
+ }
+ require.Equal(t, hash3, hash2)
+ }
+}
diff --git a/cl/persistence/format/snapshot_format/eth1_blocks.go b/cl/persistence/format/snapshot_format/eth1_blocks.go
new file mode 100644
index 00000000000..053c075aa22
--- /dev/null
+++ b/cl/persistence/format/snapshot_format/eth1_blocks.go
@@ -0,0 +1,92 @@
+package snapshot_format
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/ledgerwatch/erigon-lib/common/length"
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/persistence/format/chunk_encoding"
+ "github.com/ledgerwatch/erigon/core/types"
+)
+
+// WriteEth1BlockForSnapshot writes an execution block to the given writer in the format expected by the snapshot.
+func writeEth1BlockForSnapshot(w io.Writer, encoded []byte, block *cltypes.Eth1Block) error {
+ pos := (length.Hash /*ParentHash*/ + length.Addr /*Miner*/ + length.Hash /*StateRoot*/ + length.Hash /*ReceiptsRoot*/ + types.BloomByteLength /*Bloom*/ +
+ length.Hash /*PrevRandao*/ + 32 /*BlockNumber + Timestamp + GasLimit + GasUsed */ + 4 /*ExtraDataOffset*/ + length.Hash /*BaseFee*/ +
+ length.Hash /*BlockHash*/ + 4 /*TransactionOffset*/)
+
+ if block.Version() >= clparams.CapellaVersion {
+ pos += 4 /*WithdrawalsOffset*/
+ }
+ if block.Version() >= clparams.DenebVersion {
+ pos += 16 /*BlobGasUsed + ExcessBlobGas*/
+ }
+ // Add metadata first for Eth1Block, aka. version
+ if _, err := w.Write([]byte{byte(block.Version())}); err != nil {
+ return err
+ }
+
+ // Maybe reuse the buffer?
+ pos += block.Extra.EncodingSizeSSZ()
+ if err := chunk_encoding.WriteChunk(w, encoded[:pos], chunk_encoding.ChunkDataType); err != nil {
+ return err
+ }
+ pos += block.Withdrawals.EncodingSizeSSZ()
+ pos += block.Transactions.EncodingSizeSSZ()
+ encoded = encoded[pos:]
+ //pos = 0
+ // write the block pointer
+ if err := writeExecutionBlockPtr(w, block); err != nil {
+ return err
+ }
+ // From now on here, just finish up
+ return chunk_encoding.WriteChunk(w, encoded, chunk_encoding.ChunkDataType)
+}
+
+func readEth1BlockFromSnapshot(r io.Reader, out io.Writer, executionReader ExecutionBlockReaderByNumber, cfg *clparams.BeaconChainConfig) (clparams.StateVersion, error) {
+ // Metadata section is just the current hardfork of the block.
+ vArr := make([]byte, 1)
+ if _, err := r.Read(vArr); err != nil {
+ return 0, err
+ }
+ v := clparams.StateVersion(vArr[0])
+
+ // Read the first chunk
+ dT1, err := chunk_encoding.ReadChunk(r, out)
+ if err != nil {
+ return v, err
+ }
+ if dT1 != chunk_encoding.ChunkDataType {
+ return v, fmt.Errorf("malformed beacon block, invalid chunk 1 type %d, expected: %d", dT1, chunk_encoding.ChunkDataType)
+ }
+ // Read the block pointer and retrieve chunk4 from the execution reader
+ blockNumber, blockHash, err := readExecutionBlockPtr(r)
+ if err != nil {
+ return v, err
+ }
+ err = executionReader.TransactionsSSZ(out, blockNumber, blockHash)
+ if err != nil {
+ return v, err
+ }
+
+ if v < clparams.CapellaVersion {
+ return v, nil
+ }
+ err = executionReader.WithdrawalsSZZ(out, blockNumber, blockHash)
+ if err != nil {
+ return v, err
+ }
+
+ // Read the 5h chunk
+ dT2, err := chunk_encoding.ReadChunk(r, out)
+ if err != nil {
+ return v, err
+ }
+ if dT2 != chunk_encoding.ChunkDataType {
+ return v, fmt.Errorf("malformed beacon block, invalid chunk 5 type %d, expected: %d", dT2, chunk_encoding.ChunkDataType)
+ }
+
+ return v, nil
+}
diff --git a/cl/persistence/format/snapshot_format/getters/execution_snapshot.go b/cl/persistence/format/snapshot_format/getters/execution_snapshot.go
new file mode 100644
index 00000000000..d201a63ea86
--- /dev/null
+++ b/cl/persistence/format/snapshot_format/getters/execution_snapshot.go
@@ -0,0 +1,166 @@
+package getters
+
+import (
+ "context"
+ "encoding/binary"
+ "fmt"
+ "io"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/types/ssz"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state/lru"
+ "github.com/ledgerwatch/erigon/core/types"
+ "github.com/ledgerwatch/erigon/turbo/services"
+)
+
+type cacheEntry struct {
+ number uint64
+ hash libcommon.Hash
+}
+type ExecutionSnapshotReader struct {
+ ctx context.Context
+
+ blockReader services.FullBlockReader
+
+ db kv.RoDB
+ txsCache *lru.Cache[cacheEntry, []byte]
+ withdrawalsCache *lru.Cache[cacheEntry, []byte]
+}
+
+func NewExecutionSnapshotReader(ctx context.Context, blockReader services.FullBlockReader, db kv.RoDB) *ExecutionSnapshotReader {
+ txsCache, err := lru.New[cacheEntry, []byte]("txsCache", 96)
+ if err != nil {
+ panic(err)
+ }
+ withdrawalsCache, err := lru.New[cacheEntry, []byte]("wsCache", 96)
+ if err != nil {
+ panic(err)
+ }
+ return &ExecutionSnapshotReader{ctx: ctx, blockReader: blockReader, withdrawalsCache: withdrawalsCache, txsCache: txsCache, db: db}
+}
+
+func (r *ExecutionSnapshotReader) TransactionsSSZ(w io.Writer, number uint64, hash libcommon.Hash) error {
+ ok, err := r.lookupTransactionsInCache(w, number, hash)
+ if err != nil {
+ return err
+ }
+ if ok {
+ return nil
+ }
+
+ tx, err := r.db.BeginRo(r.ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ // Get the body and fill both caches
+ body, err := r.blockReader.BodyWithTransactions(r.ctx, tx, hash, number)
+ if err != nil {
+ return err
+ }
+ if body == nil {
+ return fmt.Errorf("transactions not found for block %d", number)
+ }
+ // compute txs flats
+ txs, err := types.MarshalTransactionsBinary(body.Transactions)
+ if err != nil {
+ return err
+ }
+ flattenedTxs := convertTxsToBytesSSZ(txs)
+ r.txsCache.Add(cacheEntry{number: number, hash: hash}, flattenedTxs)
+ // compute withdrawals flat
+ ws := body.Withdrawals
+ flattenedWs := convertWithdrawalsToBytesSSZ(ws)
+
+ r.withdrawalsCache.Add(cacheEntry{number: number, hash: hash}, flattenedWs)
+ _, err = w.Write(flattenedTxs)
+ return err
+}
+
+func convertTxsToBytesSSZ(txs [][]byte) []byte {
+ sumLenTxs := 0
+ for _, tx := range txs {
+ sumLenTxs += len(tx)
+ }
+ flat := make([]byte, 0, 4*len(txs)+sumLenTxs)
+ offset := len(txs) * 4
+ for _, tx := range txs {
+ flat = append(flat, ssz.OffsetSSZ(uint32(offset))...)
+ offset += len(tx)
+ }
+ for _, tx := range txs {
+ flat = append(flat, tx...)
+ }
+ return flat
+}
+
+func convertWithdrawalsToBytesSSZ(ws []*types.Withdrawal) []byte {
+ ret := make([]byte, 44*len(ws))
+ for i, w := range ws {
+ currentPos := i * 44
+ binary.LittleEndian.PutUint64(ret[currentPos:currentPos+8], w.Index)
+ binary.LittleEndian.PutUint64(ret[currentPos+8:currentPos+16], w.Validator)
+ copy(ret[currentPos+16:currentPos+36], w.Address[:])
+ binary.LittleEndian.PutUint64(ret[currentPos+36:currentPos+44], w.Amount)
+ }
+ return ret
+}
+
+func (r *ExecutionSnapshotReader) WithdrawalsSZZ(w io.Writer, number uint64, hash libcommon.Hash) error {
+ ok, err := r.lookupWithdrawalsInCache(w, number, hash)
+ if err != nil {
+ return err
+ }
+ if ok {
+ return nil
+ }
+ tx, err := r.db.BeginRo(r.ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ // Get the body and fill both caches
+ body, err := r.blockReader.BodyWithTransactions(r.ctx, tx, hash, number)
+ if err != nil {
+ return err
+ }
+ if body == nil {
+ return fmt.Errorf("transactions not found for block %d", number)
+ }
+ // compute txs flats
+ txs, err := types.MarshalTransactionsBinary(body.Transactions)
+ if err != nil {
+ return err
+ }
+ flattenedTxs := convertTxsToBytesSSZ(txs)
+ r.txsCache.Add(cacheEntry{number: number, hash: hash}, flattenedTxs)
+ // compute withdrawals flat
+ ws := body.Withdrawals
+ flattenedWs := convertWithdrawalsToBytesSSZ(ws)
+
+ r.withdrawalsCache.Add(cacheEntry{number: number, hash: hash}, flattenedWs)
+ _, err = w.Write(flattenedWs)
+
+ return err
+}
+
+func (r *ExecutionSnapshotReader) lookupWithdrawalsInCache(w io.Writer, number uint64, hash libcommon.Hash) (bool, error) {
+ var wsBytes []byte
+ var ok bool
+ if wsBytes, ok = r.withdrawalsCache.Get(cacheEntry{number: number, hash: hash}); !ok {
+ return false, nil
+ }
+ _, err := w.Write(wsBytes)
+ return true, err
+}
+
+func (r *ExecutionSnapshotReader) lookupTransactionsInCache(w io.Writer, number uint64, hash libcommon.Hash) (bool, error) {
+ var wsBytes []byte
+ var ok bool
+ if wsBytes, ok = r.txsCache.Get(cacheEntry{number: number, hash: hash}); !ok {
+ return false, nil
+ }
+ _, err := w.Write(wsBytes)
+ return true, err
+}
diff --git a/cl/persistence/format/snapshot_format/test_data/altair.ssz_snappy b/cl/persistence/format/snapshot_format/test_data/altair.ssz_snappy
new file mode 100644
index 00000000000..6a68fb0428b
Binary files /dev/null and b/cl/persistence/format/snapshot_format/test_data/altair.ssz_snappy differ
diff --git a/cl/persistence/format/snapshot_format/test_data/bellatrix.ssz_snappy b/cl/persistence/format/snapshot_format/test_data/bellatrix.ssz_snappy
new file mode 100644
index 00000000000..4ea51c0359f
Binary files /dev/null and b/cl/persistence/format/snapshot_format/test_data/bellatrix.ssz_snappy differ
diff --git a/cl/persistence/format/snapshot_format/test_data/capella.ssz_snappy b/cl/persistence/format/snapshot_format/test_data/capella.ssz_snappy
new file mode 100644
index 00000000000..f3301a23195
Binary files /dev/null and b/cl/persistence/format/snapshot_format/test_data/capella.ssz_snappy differ
diff --git a/cl/persistence/format/snapshot_format/test_data/deneb.ssz_snappy b/cl/persistence/format/snapshot_format/test_data/deneb.ssz_snappy
new file mode 100644
index 00000000000..abdebc30b65
Binary files /dev/null and b/cl/persistence/format/snapshot_format/test_data/deneb.ssz_snappy differ
diff --git a/cl/persistence/format/snapshot_format/test_data/phase0.ssz_snappy b/cl/persistence/format/snapshot_format/test_data/phase0.ssz_snappy
new file mode 100644
index 00000000000..dfa9c18707a
Binary files /dev/null and b/cl/persistence/format/snapshot_format/test_data/phase0.ssz_snappy differ
diff --git a/cl/persistence/format/snapshot_format/test_util.go b/cl/persistence/format/snapshot_format/test_util.go
new file mode 100644
index 00000000000..1bf45999522
--- /dev/null
+++ b/cl/persistence/format/snapshot_format/test_util.go
@@ -0,0 +1,30 @@
+package snapshot_format
+
+import (
+ "io"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+)
+
+type MockBlockReader struct {
+ Block *cltypes.Eth1Block
+}
+
+func (t *MockBlockReader) WithdrawalsSZZ(out io.Writer, number uint64, hash libcommon.Hash) error {
+ l, err := t.Block.Withdrawals.EncodeSSZ(nil)
+ if err != nil {
+ return err
+ }
+ _, err = out.Write(l)
+ return err
+}
+
+func (t *MockBlockReader) TransactionsSSZ(out io.Writer, number uint64, hash libcommon.Hash) error {
+ l, err := t.Block.Transactions.EncodeSSZ(nil)
+ if err != nil {
+ return err
+ }
+ _, err = out.Write(l)
+ return err
+}
diff --git a/cl/persistence/interface.go b/cl/persistence/interface.go
new file mode 100644
index 00000000000..985b50a728a
--- /dev/null
+++ b/cl/persistence/interface.go
@@ -0,0 +1,32 @@
+package persistence
+
+import (
+ "context"
+ "io"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/sentinel/peers"
+)
+
+type BlockSource interface {
+ GetRange(ctx context.Context, tx kv.Tx, from uint64, count uint64) (*peers.PeeredObject[[]*cltypes.SignedBeaconBlock], error)
+ PurgeRange(ctx context.Context, tx kv.Tx, from uint64, count uint64) error
+ GetBlock(ctx context.Context, tx kv.Tx, slot uint64) (*peers.PeeredObject[*cltypes.SignedBeaconBlock], error)
+}
+
+type BeaconChainWriter interface {
+ WriteBlock(ctx context.Context, tx kv.RwTx, block *cltypes.SignedBeaconBlock, canonical bool) error
+}
+
+type RawBeaconBlockChain interface {
+ BlockWriter(ctx context.Context, slot uint64, blockRoot libcommon.Hash) (io.WriteCloser, error)
+ BlockReader(ctx context.Context, slot uint64, blockRoot libcommon.Hash) (io.ReadCloser, error)
+ DeleteBlock(ctx context.Context, slot uint64, blockRoot libcommon.Hash) error
+}
+
+type BeaconChainDatabase interface {
+ BlockSource
+ BeaconChainWriter
+}
diff --git a/cl/persistence/raw_block_saver.go b/cl/persistence/raw_block_saver.go
new file mode 100644
index 00000000000..6d41f4d8cd0
--- /dev/null
+++ b/cl/persistence/raw_block_saver.go
@@ -0,0 +1,52 @@
+package persistence
+
+import (
+ "context"
+ "io"
+ "os"
+ "sync"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/spf13/afero"
+ "go.uber.org/zap/buffer"
+)
+
+var bPool = sync.Pool{
+ New: func() interface{} {
+ return &buffer.Buffer{}
+ },
+}
+
+type aferoRawBeaconBlockChain struct {
+ fs afero.Fs
+ cfg *clparams.BeaconChainConfig
+}
+
+func NewAferoRawBlockSaver(fs afero.Fs, cfg *clparams.BeaconChainConfig) RawBeaconBlockChain {
+ return aferoRawBeaconBlockChain{
+ fs: fs,
+ cfg: cfg,
+ }
+}
+
+func AferoRawBeaconBlockChainFromOsPath(cfg *clparams.BeaconChainConfig, path string) (RawBeaconBlockChain, afero.Fs) {
+ dataDirFs := afero.NewBasePathFs(afero.NewOsFs(), path)
+ return NewAferoRawBlockSaver(dataDirFs, cfg), dataDirFs
+}
+
+func (a aferoRawBeaconBlockChain) BlockWriter(ctx context.Context, slot uint64, blockRoot libcommon.Hash) (io.WriteCloser, error) {
+ folderPath, path := rootToPaths(slot, blockRoot, a.cfg)
+ _ = a.fs.MkdirAll(folderPath, 0o755)
+ return a.fs.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0o755)
+}
+
+func (a aferoRawBeaconBlockChain) BlockReader(ctx context.Context, slot uint64, blockRoot libcommon.Hash) (io.ReadCloser, error) {
+ _, path := rootToPaths(slot, blockRoot, a.cfg)
+ return a.fs.OpenFile(path, os.O_RDONLY, 0o755)
+}
+
+func (a aferoRawBeaconBlockChain) DeleteBlock(ctx context.Context, slot uint64, blockRoot libcommon.Hash) error {
+ _, path := rootToPaths(slot, blockRoot, a.cfg)
+ return a.fs.Remove(path)
+}
diff --git a/cl/persistence/state/historical_states_reader/attesting_indicies.go b/cl/persistence/state/historical_states_reader/attesting_indicies.go
new file mode 100644
index 00000000000..e3d5f717b59
--- /dev/null
+++ b/cl/persistence/state/historical_states_reader/attesting_indicies.go
@@ -0,0 +1,77 @@
+package historical_states_reader
+
+import (
+ "fmt"
+
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/cltypes/solid"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state/shuffling"
+ "github.com/ledgerwatch/erigon/cl/utils"
+)
+
+func (r *HistoricalStatesReader) attestingIndicies(attestation solid.AttestationData, aggregationBits []byte, checkBitsLength bool, randaoMixes solid.HashVectorSSZ, idxs []uint64) ([]uint64, error) {
+ slot := attestation.Slot()
+ committeesPerSlot := committeeCount(r.cfg, slot/r.cfg.SlotsPerEpoch, idxs)
+ committeeIndex := attestation.ValidatorIndex()
+ index := (slot%r.cfg.SlotsPerEpoch)*committeesPerSlot + committeeIndex
+ count := committeesPerSlot * r.cfg.SlotsPerEpoch
+
+ committee, err := r.computeCommittee(randaoMixes, idxs, attestation.Slot(), count, index)
+ if err != nil {
+ return nil, err
+ }
+ aggregationBitsLen := utils.GetBitlistLength(aggregationBits)
+ if checkBitsLength && utils.GetBitlistLength(aggregationBits) != len(committee) {
+ return nil, fmt.Errorf("GetAttestingIndicies: invalid aggregation bits. agg bits size: %d, expect: %d", aggregationBitsLen, len(committee))
+ }
+
+ attestingIndices := []uint64{}
+ for i, member := range committee {
+ bitIndex := i % 8
+ sliceIndex := i / 8
+ if sliceIndex >= len(aggregationBits) {
+ return nil, fmt.Errorf("GetAttestingIndicies: committee is too big")
+ }
+ if (aggregationBits[sliceIndex] & (1 << bitIndex)) > 0 {
+ attestingIndices = append(attestingIndices, member)
+ }
+ }
+ return attestingIndices, nil
+}
+
+// computeCommittee uses cache to compute compittee
+func (r *HistoricalStatesReader) computeCommittee(randaoMixes solid.HashVectorSSZ, indicies []uint64, slot uint64, count, index uint64) ([]uint64, error) {
+ cfg := r.cfg
+ lenIndicies := uint64(len(indicies))
+
+ start := (lenIndicies * index) / count
+ end := (lenIndicies * (index + 1)) / count
+ var shuffledIndicies []uint64
+ epoch := slot / cfg.SlotsPerEpoch
+
+ mixPosition := (epoch + cfg.EpochsPerHistoricalVector - cfg.MinSeedLookahead - 1) %
+ cfg.EpochsPerHistoricalVector
+ // Input for the seed hash.
+ mix := randaoMixes.Get(int(mixPosition))
+
+ if shuffledIndicesInterface, ok := r.shuffledSetsCache.Get(epoch); ok {
+ shuffledIndicies = shuffledIndicesInterface
+ } else {
+ shuffledIndicies = make([]uint64, lenIndicies)
+ shuffledIndicies = shuffling.ComputeShuffledIndicies(cfg, mix, shuffledIndicies, indicies, slot)
+ r.shuffledSetsCache.Add(epoch, shuffledIndicies)
+ }
+
+ return shuffledIndicies[start:end], nil
+}
+
+func committeeCount(cfg *clparams.BeaconChainConfig, epoch uint64, idxs []uint64) uint64 {
+ committeCount := uint64(len(idxs)) / cfg.SlotsPerEpoch / cfg.TargetCommitteeSize
+ if cfg.MaxCommitteesPerSlot < committeCount {
+ committeCount = cfg.MaxCommitteesPerSlot
+ }
+ if committeCount < 1 {
+ committeCount = 1
+ }
+ return committeCount
+}
diff --git a/cl/persistence/state/historical_states_reader/historical_states_reader.go b/cl/persistence/state/historical_states_reader/historical_states_reader.go
new file mode 100644
index 00000000000..fe804024ee7
--- /dev/null
+++ b/cl/persistence/state/historical_states_reader/historical_states_reader.go
@@ -0,0 +1,695 @@
+package historical_states_reader
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+ "time"
+
+ "github.com/klauspost/compress/zstd"
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/cltypes/solid"
+ "github.com/ledgerwatch/erigon/cl/persistence/base_encoding"
+ state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state/lru"
+ "github.com/ledgerwatch/erigon/cl/utils"
+ "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks"
+ "github.com/spf13/afero"
+ "golang.org/x/exp/slices"
+)
+
+type HistoricalStatesReader struct {
+ cfg *clparams.BeaconChainConfig
+ fs afero.Fs // some data is on filesystem to avoid database fragmentation
+ validatorTable *state_accessors.StaticValidatorTable // We can save 80% of the I/O by caching the validator table
+ blockReader freezeblocks.BeaconSnapshotReader
+ genesisState *state.CachingBeaconState
+
+ // cache for shuffled sets
+ shuffledSetsCache *lru.Cache[uint64, []uint64]
+}
+
+func NewHistoricalStatesReader(cfg *clparams.BeaconChainConfig, blockReader freezeblocks.BeaconSnapshotReader, validatorTable *state_accessors.StaticValidatorTable, fs afero.Fs, genesisState *state.CachingBeaconState) *HistoricalStatesReader {
+
+ cache, err := lru.New[uint64, []uint64]("shuffledSetsCache_reader", 125)
+ if err != nil {
+ panic(err)
+ }
+
+ return &HistoricalStatesReader{
+ cfg: cfg,
+ fs: fs,
+ blockReader: blockReader,
+ genesisState: genesisState,
+ validatorTable: validatorTable,
+ shuffledSetsCache: cache,
+ }
+}
+
+func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv.Tx, slot uint64) (*state.CachingBeaconState, error) {
+ ret := state.New(r.cfg)
+ latestProcessedState, err := state_accessors.GetStateProcessingProgress(tx)
+ if err != nil {
+ return nil, err
+ }
+
+ // If this happens, we need to update our static tables
+ if slot > latestProcessedState || slot > r.validatorTable.Slot() {
+ return nil, fmt.Errorf("slot %d is greater than latest processed state %d", slot, latestProcessedState)
+ }
+
+ if slot == 0 {
+ return r.genesisState.Copy()
+ }
+ // Read the current block (we need the block header) + other stuff
+ block, err := r.blockReader.ReadBlockBySlot(ctx, tx, slot)
+ if err != nil {
+ return nil, err
+ }
+ if block == nil {
+ return nil, fmt.Errorf("block at slot %d not found", slot)
+ }
+ blockHeader := block.SignedBeaconBlockHeader().Header
+ blockHeader.Root = common.Hash{}
+ // Read the minimal beacon state which have the small fields.
+ minimalBeaconState, err := state_accessors.ReadMinimalBeaconState(tx, slot)
+ if err != nil {
+ return nil, err
+ }
+ // State not found
+ if minimalBeaconState == nil {
+ return nil, nil
+ }
+
+ // Versioning
+ ret.SetVersion(minimalBeaconState.Version)
+ ret.SetGenesisTime(r.genesisState.GenesisTime())
+ ret.SetGenesisValidatorsRoot(r.genesisState.GenesisValidatorsRoot())
+ ret.SetSlot(slot)
+ ret.SetFork(minimalBeaconState.Fork)
+ // History
+ stateRoots, blockRoots := solid.NewHashVector(int(r.cfg.SlotsPerHistoricalRoot)), solid.NewHashVector(int(r.cfg.SlotsPerHistoricalRoot))
+ ret.SetLatestBlockHeader(blockHeader)
+
+ if err := r.readHistoryHashVector(tx, r.genesisState.BlockRoots(), slot, r.cfg.SlotsPerHistoricalRoot, kv.BlockRoot, blockRoots); err != nil {
+ return nil, fmt.Errorf("failed to read block roots: %w", err)
+ }
+ ret.SetBlockRoots(blockRoots)
+
+ if err := r.readHistoryHashVector(tx, r.genesisState.StateRoots(), slot, r.cfg.SlotsPerHistoricalRoot, kv.StateRoot, stateRoots); err != nil {
+ return nil, fmt.Errorf("failed to read state roots: %w", err)
+ }
+ ret.SetStateRoots(stateRoots)
+
+ historicalRoots := solid.NewHashList(int(r.cfg.HistoricalRootsLimit))
+ if err := state_accessors.ReadHistoricalRoots(tx, minimalBeaconState.HistoricalRootsLength, func(idx int, root common.Hash) error {
+ historicalRoots.Append(root)
+ return nil
+ }); err != nil {
+ return nil, fmt.Errorf("failed to read historical roots: %w", err)
+ }
+ ret.SetHistoricalRoots(historicalRoots)
+
+ // Eth1
+ eth1DataVotes := solid.NewStaticListSSZ[*cltypes.Eth1Data](int(r.cfg.Eth1DataVotesLength()), 72)
+ if err := r.readEth1DataVotes(tx, minimalBeaconState.Eth1DataLength, slot, eth1DataVotes); err != nil {
+ return nil, err
+ }
+ ret.SetEth1DataVotes(eth1DataVotes)
+ ret.SetEth1Data(minimalBeaconState.Eth1Data)
+ ret.SetEth1DepositIndex(minimalBeaconState.Eth1DepositIndex)
+ // Registry (Validators + Balances)
+ balancesBytes, err := r.reconstructBalances(tx, slot, kv.ValidatorBalance)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read validator balances: %w", err)
+ }
+ balances := solid.NewUint64ListSSZ(int(r.cfg.ValidatorRegistryLimit))
+ if err := balances.DecodeSSZ(balancesBytes, 0); err != nil {
+ return nil, fmt.Errorf("failed to decode validator balances: %w", err)
+ }
+ ret.SetBalances(balances)
+
+ validatorSet, currActiveIdxs, prevActiveIdxs, err := r.readValidatorsForHistoricalState(tx, slot, minimalBeaconState.ValidatorLength)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read validators: %w", err)
+ }
+ ret.SetValidators(validatorSet)
+ // Randomness
+ randaoMixes := solid.NewHashVector(int(r.cfg.EpochsPerHistoricalVector))
+ if err := r.readRandaoMixes(tx, slot, randaoMixes); err != nil {
+ return nil, fmt.Errorf("failed to read randao mixes: %w", err)
+ }
+ ret.SetRandaoMixes(randaoMixes)
+ slashingsVector := solid.NewUint64VectorSSZ(int(r.cfg.EpochsPerSlashingsVector))
+ // Slashings
+ err = r.reconstructUint64ListDump(tx, slot, kv.ValidatorSlashings, int(r.cfg.EpochsPerSlashingsVector), slashingsVector)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read slashings: %w", err)
+ }
+ ret.SetSlashings(slashingsVector)
+
+ // Finality
+ currentCheckpoint, previousCheckpoint, finalizedCheckpoint, err := state_accessors.ReadCheckpoints(tx, r.cfg.RoundSlotToEpoch(slot))
+ if err != nil {
+ return nil, err
+ }
+ if currentCheckpoint == nil {
+ currentCheckpoint = r.genesisState.CurrentJustifiedCheckpoint()
+ }
+ if previousCheckpoint == nil {
+ previousCheckpoint = r.genesisState.PreviousJustifiedCheckpoint()
+ }
+ if finalizedCheckpoint == nil {
+ finalizedCheckpoint = r.genesisState.FinalizedCheckpoint()
+ }
+ ret.SetJustificationBits(*minimalBeaconState.JustificationBits)
+ ret.SetPreviousJustifiedCheckpoint(previousCheckpoint)
+ ret.SetCurrentJustifiedCheckpoint(currentCheckpoint)
+ ret.SetFinalizedCheckpoint(finalizedCheckpoint)
+ // Participation
+ if ret.Version() == clparams.Phase0Version {
+ currentAtts, previousAtts, err := r.readPendingEpochs(tx, slot, minimalBeaconState.CurrentEpochAttestationsLength, minimalBeaconState.PreviousEpochAttestationsLength)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read pending attestations: %w", err)
+ }
+ ret.SetCurrentEpochAttestations(currentAtts)
+ ret.SetPreviousEpochAttestations(previousAtts)
+ } else {
+ currentIdxs, previousIdxs, err := r.readPartecipations(tx, slot, minimalBeaconState.ValidatorLength, currActiveIdxs, prevActiveIdxs, ret, currentCheckpoint, previousCheckpoint)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read participations: %w", err)
+ }
+ ret.SetCurrentEpochParticipation(currentIdxs)
+ ret.SetPreviousEpochParticipation(previousIdxs)
+ }
+
+ if ret.Version() < clparams.AltairVersion {
+ return ret, ret.InitBeaconState()
+ }
+ inactivityScores := solid.NewUint64ListSSZ(int(r.cfg.ValidatorRegistryLimit))
+ // Inactivity
+ err = r.reconstructUint64ListDump(tx, slot, kv.InactivityScores, int(minimalBeaconState.ValidatorLength), inactivityScores)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read inactivity scores: %w", err)
+ }
+
+ ret.SetInactivityScoresRaw(inactivityScores)
+ // Sync
+ syncCommitteeSlot := r.cfg.RoundSlotToSyncCommitteePeriod(slot)
+ currentSyncCommittee, err := state_accessors.ReadCurrentSyncCommittee(tx, syncCommitteeSlot)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read current sync committee: %w", err)
+ }
+ if currentSyncCommittee == nil {
+ currentSyncCommittee = r.genesisState.CurrentSyncCommittee()
+ }
+
+ nextSyncCommittee, err := state_accessors.ReadNextSyncCommittee(tx, syncCommitteeSlot)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read next sync committee: %w", err)
+ }
+ if nextSyncCommittee == nil {
+ nextSyncCommittee = r.genesisState.NextSyncCommittee()
+ }
+ ret.SetCurrentSyncCommittee(currentSyncCommittee)
+ ret.SetNextSyncCommittee(nextSyncCommittee)
+ // Execution
+ if ret.Version() < clparams.BellatrixVersion {
+ return ret, ret.InitBeaconState()
+ }
+ payloadHeader, err := block.Block.Body.ExecutionPayload.PayloadHeader()
+ if err != nil {
+ return nil, fmt.Errorf("failed to read payload header: %w", err)
+ }
+ ret.SetLatestExecutionPayloadHeader(payloadHeader)
+ if ret.Version() < clparams.CapellaVersion {
+ return ret, ret.InitBeaconState()
+ }
+
+ // Withdrawals
+ ret.SetNextWithdrawalIndex(minimalBeaconState.NextWithdrawalIndex)
+ ret.SetNextWithdrawalValidatorIndex(minimalBeaconState.NextWithdrawalValidatorIndex)
+ // Deep history valid from Capella onwards
+ historicalSummaries := solid.NewStaticListSSZ[*cltypes.HistoricalSummary](int(r.cfg.HistoricalRootsLimit), 64)
+ if err := state_accessors.ReadHistoricalSummaries(tx, minimalBeaconState.HistoricalSummariesLength, func(idx int, historicalSummary *cltypes.HistoricalSummary) error {
+ historicalSummaries.Append(historicalSummary)
+ return nil
+ }); err != nil {
+ return nil, fmt.Errorf("failed to read historical summaries: %w", err)
+ }
+ ret.SetHistoricalSummaries(historicalSummaries)
+ return ret, ret.InitBeaconState()
+}
+
+func (r *HistoricalStatesReader) readHistoryHashVector(tx kv.Tx, genesisVector solid.HashVectorSSZ, slot, size uint64, table string, out solid.HashVectorSSZ) (err error) {
+ var needFromGenesis, inserted uint64
+ if size > slot || slot-size <= r.genesisState.Slot() {
+ needFromGenesis = size - (slot - r.genesisState.Slot())
+ }
+
+ needFromDB := size - needFromGenesis
+ cursor, err := tx.Cursor(table)
+ if err != nil {
+ return err
+ }
+ defer cursor.Close()
+ var currKeySlot uint64
+ for k, v, err := cursor.Seek(base_encoding.Encode64ToBytes4(slot - needFromDB)); err == nil && k != nil; k, v, err = cursor.Next() {
+ if len(v) != 32 {
+ return fmt.Errorf("invalid key %x", k)
+ }
+ currKeySlot = base_encoding.Decode64FromBytes4(k)
+ out.Set(int(currKeySlot%size), common.BytesToHash(v))
+ inserted++
+ if inserted == needFromDB {
+ break
+ }
+ }
+ for i := 0; i < int(needFromGenesis); i++ {
+ currKeySlot++
+ out.Set(int(currKeySlot%size), genesisVector.Get(int(currKeySlot%size)))
+ }
+ return nil
+}
+
+func (r *HistoricalStatesReader) readEth1DataVotes(tx kv.Tx, eth1DataVotesLength, slot uint64, out *solid.ListSSZ[*cltypes.Eth1Data]) error {
+ initialSlot := r.cfg.RoundSlotToVotePeriod(slot)
+ initialKey := base_encoding.Encode64ToBytes4(initialSlot)
+ cursor, err := tx.Cursor(kv.Eth1DataVotes)
+ if err != nil {
+ return err
+ }
+ defer cursor.Close()
+ k, v, err := cursor.Seek(initialKey)
+ if err != nil {
+ return err
+ }
+ if initialSlot <= r.genesisState.Slot() {
+ // We need to prepend the genesis votes
+ for i := 0; i < r.genesisState.Eth1DataVotes().Len(); i++ {
+ out.Append(r.genesisState.Eth1DataVotes().Get(i))
+ }
+ }
+
+ endSlot := r.cfg.RoundSlotToVotePeriod(slot + r.cfg.SlotsPerEpoch*r.cfg.EpochsPerEth1VotingPeriod)
+
+ for k != nil && base_encoding.Decode64FromBytes4(k) < endSlot {
+ if out.Len() >= int(eth1DataVotesLength) {
+ break
+ }
+ eth1Data := &cltypes.Eth1Data{}
+ if err := eth1Data.DecodeSSZ(v, 0); err != nil {
+ return err
+ }
+ out.Append(eth1Data)
+ k, v, err = cursor.Next()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *HistoricalStatesReader) readRandaoMixes(tx kv.Tx, slot uint64, out solid.HashVectorSSZ) error {
+ size := r.cfg.EpochsPerHistoricalVector
+ genesisVector := r.genesisState.RandaoMixes()
+ var needFromGenesis, inserted uint64
+ roundedSlot := r.cfg.RoundSlotToEpoch(slot)
+ epoch := slot / r.cfg.SlotsPerEpoch
+ genesisEpoch := r.genesisState.Slot() / r.cfg.SlotsPerEpoch
+ if size > epoch || epoch-size <= genesisEpoch {
+ needFromGenesis = size - (epoch - genesisEpoch)
+ }
+
+ needFromDB := size - needFromGenesis
+ cursor, err := tx.Cursor(kv.RandaoMixes)
+ if err != nil {
+ return err
+ }
+ defer cursor.Close()
+ var currKeyEpoch uint64
+ for k, v, err := cursor.Seek(base_encoding.Encode64ToBytes4(roundedSlot - (needFromDB)*r.cfg.SlotsPerEpoch)); err == nil && k != nil; k, v, err = cursor.Next() {
+ if len(v) != 32 {
+ return fmt.Errorf("invalid key %x", k)
+ }
+ currKeyEpoch = base_encoding.Decode64FromBytes4(k) / r.cfg.SlotsPerEpoch
+ out.Set(int(currKeyEpoch%size), common.BytesToHash(v))
+ inserted++
+ if inserted == needFromDB {
+ break
+ }
+ }
+ for i := 0; i < int(needFromGenesis); i++ {
+ currKeyEpoch++
+ out.Set(int(currKeyEpoch%size), genesisVector.Get(int(currKeyEpoch%size)))
+ }
+ // Now we need to read the intra epoch randao mix.
+ intraRandaoMix, err := tx.GetOne(kv.IntraRandaoMixes, base_encoding.Encode64ToBytes4(slot))
+ if err != nil {
+ return err
+ }
+ if len(intraRandaoMix) != 32 {
+ return fmt.Errorf("invalid intra randao mix length %d", len(intraRandaoMix))
+ }
+ out.Set(int(epoch%r.cfg.EpochsPerHistoricalVector), common.BytesToHash(intraRandaoMix))
+ return nil
+}
+
+func (r *HistoricalStatesReader) reconstructDiffedUint64List(tx kv.Tx, slot uint64, diffBucket string, fileSuffix string) ([]byte, error) {
+ // Read the file
+ freshDumpSlot := slot - slot%clparams.SlotsPerDump
+ _, filePath := clparams.EpochToPaths(freshDumpSlot, r.cfg, fileSuffix)
+ file, err := r.fs.Open(filePath)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ // Read the diff file
+ zstdReader, err := zstd.NewReader(file)
+ if err != nil {
+ return nil, err
+ }
+ defer zstdReader.Close()
+
+ lenRaw := uint64(0)
+ if err := binary.Read(file, binary.LittleEndian, &lenRaw); err != nil {
+ return nil, err
+ }
+ currentList := make([]byte, lenRaw)
+
+ if _, err = utils.ReadZSTD(zstdReader, currentList); err != nil {
+ return nil, err
+ }
+
+ // now start diffing
+ diffCursor, err := tx.Cursor(diffBucket)
+ if err != nil {
+ return nil, err
+ }
+ defer diffCursor.Close()
+
+ for k, v, err := diffCursor.Seek(base_encoding.Encode64ToBytes4(freshDumpSlot)); err == nil && k != nil && base_encoding.Decode64FromBytes4(k) <= slot; k, v, err = diffCursor.Next() {
+ if err != nil {
+ return nil, err
+ }
+ if len(k) != 4 {
+ return nil, fmt.Errorf("invalid key %x", k)
+ }
+ if base_encoding.Decode64FromBytes4(k) > slot {
+ return nil, fmt.Errorf("diff not found for slot %d", slot)
+ }
+ s := time.Now()
+ currentList, err = base_encoding.ApplyCompressedSerializedUint64ListDiff(currentList, currentList, v)
+ if err != nil {
+ return nil, err
+ }
+ fmt.Println("diffing", time.Since(s))
+ }
+
+ return currentList, err
+}
+
+func (r *HistoricalStatesReader) reconstructBalances(tx kv.Tx, slot uint64, diffBucket string) ([]byte, error) {
+ // Read the file
+ freshDumpSlot := slot - slot%clparams.SlotsPerDump
+ _, filePath := clparams.EpochToPaths(freshDumpSlot, r.cfg, "balances")
+ file, err := r.fs.Open(filePath)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ // Read the diff file
+ zstdReader, err := zstd.NewReader(file)
+ if err != nil {
+ return nil, err
+ }
+ defer zstdReader.Close()
+
+ lenRaw := uint64(0)
+ if err := binary.Read(file, binary.LittleEndian, &lenRaw); err != nil {
+ return nil, err
+ }
+ currentList := make([]byte, lenRaw)
+
+ if _, err = utils.ReadZSTD(zstdReader, currentList); err != nil {
+ return nil, err
+ }
+ roundedSlot := r.cfg.RoundSlotToEpoch(slot)
+ fmt.Println(roundedSlot, freshDumpSlot)
+ for i := freshDumpSlot; i < roundedSlot; i += r.cfg.SlotsPerEpoch {
+ diff, err := tx.GetOne(diffBucket, base_encoding.Encode64ToBytes4(i))
+ if err != nil {
+ return nil, err
+ }
+ if len(diff) == 0 {
+ continue
+ }
+ fmt.Println(i)
+ currentList, err = base_encoding.ApplyCompressedSerializedUint64ListDiff(currentList, currentList, diff)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // now start diffing
+ diffCursor, err := tx.Cursor(diffBucket)
+ if err != nil {
+ return nil, err
+ }
+ defer diffCursor.Close()
+
+ for k, v, err := diffCursor.Seek(base_encoding.Encode64ToBytes4(roundedSlot)); err == nil && k != nil && base_encoding.Decode64FromBytes4(k) <= slot; k, v, err = diffCursor.Next() {
+ if err != nil {
+ return nil, err
+ }
+ if len(k) != 4 {
+ return nil, fmt.Errorf("invalid key %x", k)
+ }
+ if base_encoding.Decode64FromBytes4(k) > slot {
+ return nil, fmt.Errorf("diff not found for slot %d", slot)
+ }
+ s := time.Now()
+ currentList, err = base_encoding.ApplyCompressedSerializedUint64ListDiff(currentList, currentList, v)
+ if err != nil {
+ return nil, err
+ }
+ fmt.Println("diffing", time.Since(s))
+ }
+
+ return currentList, err
+}
+
+func (r *HistoricalStatesReader) reconstructUint64ListDump(tx kv.Tx, slot uint64, bkt string, size int, out solid.Uint64ListSSZ) error {
+ diffCursor, err := tx.Cursor(bkt)
+ if err != nil {
+ return err
+ }
+ defer diffCursor.Close()
+
+ k, v, err := diffCursor.Seek(base_encoding.Encode64ToBytes4(slot))
+ if err != nil {
+ return err
+ }
+ if k == nil {
+ return fmt.Errorf("diff not found for slot %d", slot)
+ }
+ keySlot := base_encoding.Decode64FromBytes4(k)
+ if keySlot > slot {
+ _, v, err = diffCursor.Prev()
+ if err != nil {
+ return err
+ }
+ }
+ var b bytes.Buffer
+ if _, err := b.Write(v); err != nil {
+ return err
+ }
+ // Read the diff file
+ zstdReader, err := zstd.NewReader(&b)
+ if err != nil {
+ return err
+ }
+ defer zstdReader.Close()
+ currentList := make([]byte, size*8)
+
+ if _, err = utils.ReadZSTD(zstdReader, currentList); err != nil && !errors.Is(err, io.EOF) {
+ return fmt.Errorf("failed to read dump: %w, len: %d", err, len(v))
+ }
+
+ return out.DecodeSSZ(currentList, 0)
+}
+
+func (r *HistoricalStatesReader) readValidatorsForHistoricalState(tx kv.Tx, slot, validatorSetLength uint64) (*solid.ValidatorSet, []uint64, []uint64, error) {
+ out := solid.NewValidatorSetWithLength(int(r.cfg.ValidatorRegistryLimit), int(validatorSetLength))
+ // Read the static validator field which are hot in memory (this is > 70% of the whole beacon state)
+ activeIds := make([]uint64, 0, validatorSetLength)
+ epoch := slot / r.cfg.SlotsPerEpoch
+
+ prevActiveIds := make([]uint64, 0, validatorSetLength)
+ if epoch == 0 {
+ prevActiveIds = activeIds
+ }
+ r.validatorTable.ForEach(func(validatorIndex uint64, validator *state_accessors.StaticValidator) bool {
+ if validatorIndex >= validatorSetLength {
+ return false
+ }
+ currValidator := out.Get(int(validatorIndex))
+ validator.ToValidator(currValidator, slot)
+ if currValidator.Active(epoch) {
+ activeIds = append(activeIds, validatorIndex)
+ }
+ if epoch == 0 {
+ return true
+ }
+ if currValidator.Active(epoch - 1) {
+ prevActiveIds = append(prevActiveIds, validatorIndex)
+ }
+ return true
+ })
+ // Read the balances
+
+ bytesEffectiveBalances, err := r.reconstructDiffedUint64List(tx, slot, kv.ValidatorEffectiveBalance, "effective_balances")
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ for i := 0; i < int(validatorSetLength); i++ {
+ out.Get(i).
+ SetEffectiveBalanceFromBytes(bytesEffectiveBalances[(i * 8) : (i*8)+8])
+ }
+ return out, activeIds, prevActiveIds, nil
+}
+
+func (r *HistoricalStatesReader) readPendingEpochs(tx kv.Tx, slot uint64, currentEpochAttestationsLength, previousEpochAttestationsLength uint64) (*solid.ListSSZ[*solid.PendingAttestation], *solid.ListSSZ[*solid.PendingAttestation], error) {
+ if slot < r.cfg.SlotsPerEpoch {
+ return r.genesisState.CurrentEpochAttestations(), r.genesisState.PreviousEpochAttestations(), nil
+ }
+ roundedSlot := r.cfg.RoundSlotToEpoch(slot)
+ // Read the current epoch attestations
+ currentEpochAttestations, err := state_accessors.ReadCurrentEpochAttestations(tx, roundedSlot, int(r.cfg.CurrentEpochAttestationsLength()))
+ if err != nil {
+ return nil, nil, err
+ }
+ previousEpochAttestations, err := state_accessors.ReadPreviousEpochAttestations(tx, roundedSlot, int(r.cfg.PreviousEpochAttestationsLength()))
+ if err != nil {
+ return nil, nil, err
+ }
+ previousEpochAttestations.Truncate(int(previousEpochAttestationsLength))
+ currentEpochAttestations.Truncate(int(currentEpochAttestationsLength))
+ return currentEpochAttestations, previousEpochAttestations, nil
+}
+
+// readParticipations shuffles active indicies and returns the participation flags for the given epoch.
+func (r *HistoricalStatesReader) readPartecipations(tx kv.Tx, slot uint64, validatorLength uint64,
+ currentActiveIndicies, previousActiveIndicies []uint64, ret *state.CachingBeaconState,
+ currentJustifiedCheckpoint, previousJustifiedCheckpoint solid.Checkpoint) (*solid.BitList, *solid.BitList, error) {
+ randaoMixes := ret.RandaoMixes()
+ var beginSlot uint64
+ epoch, prevEpoch := r.computeRelevantEpochs(slot)
+ beginSlot = prevEpoch * r.cfg.SlotsPerEpoch
+
+ currentIdxs := solid.NewBitList(int(validatorLength), int(r.cfg.ValidatorRegistryLimit))
+ previousIdxs := solid.NewBitList(int(validatorLength), int(r.cfg.ValidatorRegistryLimit))
+ // trigger the cache for shuffled sets in parallel
+ s := time.Now()
+ r.tryCachingEpochsInParallell(randaoMixes, [][]uint64{currentActiveIndicies, previousActiveIndicies}, []uint64{epoch, prevEpoch})
+ fmt.Println("parallel", time.Since(s))
+ // Read the previous idxs
+ for i := beginSlot; i <= slot; i++ {
+ // Read the block
+ block, err := r.blockReader.ReadBlockBySlot(context.Background(), tx, i)
+ if err != nil {
+ return nil, nil, err
+ }
+ if block == nil {
+ continue
+ }
+ ret.SetSlot(i)
+ currentEpoch := i / r.cfg.SlotsPerEpoch
+
+ // Read the participation flags
+ block.Block.Body.Attestations.Range(func(index int, attestation *solid.Attestation, length int) bool {
+ data := attestation.AttestantionData()
+ isCurrentEpoch := data.Target().Epoch() == currentEpoch
+ var activeIndicies []uint64
+ // This looks horrible
+ if isCurrentEpoch {
+ if currentEpoch == prevEpoch {
+ activeIndicies = previousActiveIndicies
+ } else {
+ activeIndicies = currentActiveIndicies
+ }
+ } else {
+ if currentEpoch == prevEpoch {
+ return true
+ }
+ activeIndicies = previousActiveIndicies
+ }
+
+ var attestingIndicies []uint64
+ attestingIndicies, err = r.attestingIndicies(attestation.AttestantionData(), attestation.AggregationBits(), true, randaoMixes, activeIndicies)
+ if err != nil {
+ return false
+ }
+ var participationFlagsIndicies []uint8
+ participationFlagsIndicies, err = ret.GetAttestationParticipationFlagIndicies(data, ret.Slot()-data.Slot(), true)
+ if err != nil {
+ return false
+ }
+ // apply the flags
+ for _, idx := range attestingIndicies {
+ for flagIndex := range r.cfg.ParticipationWeights() {
+ var flagParticipation cltypes.ParticipationFlags
+ if isCurrentEpoch && currentEpoch != prevEpoch {
+ flagParticipation = cltypes.ParticipationFlags(currentIdxs.Get(int(idx)))
+ } else {
+ flagParticipation = cltypes.ParticipationFlags(previousIdxs.Get(int(idx)))
+ }
+ if !slices.Contains(participationFlagsIndicies, uint8(flagIndex)) || flagParticipation.HasFlag(flagIndex) {
+ continue
+ }
+ if isCurrentEpoch && currentEpoch != prevEpoch {
+ currentIdxs.Set(int(idx), byte(flagParticipation.Add(flagIndex)))
+ } else {
+ previousIdxs.Set(int(idx), byte(flagParticipation.Add(flagIndex)))
+ }
+ }
+ }
+ return true
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ return currentIdxs, previousIdxs, nil
+}
+
+func (r *HistoricalStatesReader) computeRelevantEpochs(slot uint64) (uint64, uint64) {
+ epoch := slot / r.cfg.SlotsPerEpoch
+ if epoch <= r.cfg.AltairForkEpoch && r.genesisState.Version() < clparams.AltairVersion {
+ return epoch, epoch
+ }
+ return epoch, epoch - 1
+}
+
+func (r *HistoricalStatesReader) tryCachingEpochsInParallell(randaoMixes solid.HashVectorSSZ, activeIdxs [][]uint64, epochs []uint64) {
+ var wg sync.WaitGroup
+ wg.Add(len(epochs))
+ for i, epoch := range epochs {
+ go func(epoch uint64, idxs []uint64) {
+ defer wg.Done()
+ _, _ = r.computeCommittee(randaoMixes, idxs, epoch*r.cfg.SlotsPerEpoch, r.cfg.TargetCommitteeSize, 0)
+ }(epoch, activeIdxs[i])
+ }
+ wg.Wait()
+
+}
diff --git a/cl/persistence/state/historical_states_reader/historical_states_reader_test.go b/cl/persistence/state/historical_states_reader/historical_states_reader_test.go
new file mode 100644
index 00000000000..fb641c730b6
--- /dev/null
+++ b/cl/persistence/state/historical_states_reader/historical_states_reader_test.go
@@ -0,0 +1,65 @@
+package historical_states_reader_test
+
+import (
+ "context"
+ "testing"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/datadir"
+ "github.com/ledgerwatch/erigon-lib/kv/memdb"
+ "github.com/ledgerwatch/erigon/cl/antiquary"
+ "github.com/ledgerwatch/erigon/cl/antiquary/tests"
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state"
+ "github.com/ledgerwatch/erigon/cl/persistence/state/historical_states_reader"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state"
+ "github.com/ledgerwatch/log/v3"
+ "github.com/spf13/afero"
+ "github.com/stretchr/testify/require"
+)
+
+func runTest(t *testing.T, blocks []*cltypes.SignedBeaconBlock, preState, postState *state.CachingBeaconState) {
+ db := memdb.NewTestDB(t)
+ reader := tests.LoadChain(blocks, db)
+
+ ctx := context.Background()
+ vt := state_accessors.NewStaticValidatorTable()
+ f := afero.NewMemMapFs()
+ a := antiquary.NewAntiquary(ctx, preState, vt, &clparams.MainnetBeaconConfig, datadir.New("/tmp"), nil, db, nil, reader, nil, log.New(), true, f)
+ require.NoError(t, a.IncrementBeaconState(ctx, blocks[len(blocks)-1].Block.Slot+33))
+ // Now lets test it against the reader
+ tx, err := db.BeginRw(ctx)
+ require.NoError(t, err)
+ defer tx.Rollback()
+
+ vt = state_accessors.NewStaticValidatorTable()
+ require.NoError(t, state_accessors.ReadValidatorsTable(tx, vt))
+ hr := historical_states_reader.NewHistoricalStatesReader(&clparams.MainnetBeaconConfig, reader, vt, f, preState)
+ s, err := hr.ReadHistoricalState(ctx, tx, blocks[len(blocks)-1].Block.Slot)
+ require.NoError(t, err)
+
+ postHash, err := s.HashSSZ()
+ require.NoError(t, err)
+ postHash2, err := postState.HashSSZ()
+ require.NoError(t, err)
+ require.Equal(t, libcommon.Hash(postHash2), libcommon.Hash(postHash))
+}
+
+func TestStateAntiquaryCapella(t *testing.T) {
+ //t.Skip()
+ blocks, preState, postState := tests.GetCapellaRandom()
+ runTest(t, blocks, preState, postState)
+}
+
+func TestStateAntiquaryPhase0(t *testing.T) {
+ // t.Skip()
+ blocks, preState, postState := tests.GetPhase0Random()
+ runTest(t, blocks, preState, postState)
+}
+
+func TestStateAntiquaryBellatrix(t *testing.T) {
+ // t.Skip()
+ blocks, preState, postState := tests.GetBellatrixRandom()
+ runTest(t, blocks, preState, postState)
+}
diff --git a/cl/persistence/state/minimal_state.go b/cl/persistence/state/minimal_state.go
new file mode 100644
index 00000000000..b22923767b2
--- /dev/null
+++ b/cl/persistence/state/minimal_state.go
@@ -0,0 +1,114 @@
+package state_accessors
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state/raw"
+ ssz2 "github.com/ledgerwatch/erigon/cl/ssz"
+)
+
+type MinimalBeaconState struct {
+ // Block Header and Execution Headers can be retrieved from block snapshots
+ Version clparams.StateVersion
+ // Lengths
+ ValidatorLength uint64
+ Eth1DataLength uint64
+ PreviousEpochAttestationsLength uint64
+ CurrentEpochAttestationsLength uint64
+ HistoricalSummariesLength uint64
+ HistoricalRootsLength uint64
+ // Phase0
+ Eth1Data *cltypes.Eth1Data
+ Eth1DepositIndex uint64
+ JustificationBits *cltypes.JustificationBits
+ Fork *cltypes.Fork
+ // Capella
+ NextWithdrawalIndex uint64
+ NextWithdrawalValidatorIndex uint64
+}
+
+func MinimalBeaconStateFromBeaconState(s *raw.BeaconState) *MinimalBeaconState {
+ justificationCopy := &cltypes.JustificationBits{}
+ jj := s.JustificationBits()
+ copy(justificationCopy[:], jj[:])
+ return &MinimalBeaconState{
+ Fork: s.Fork(),
+ ValidatorLength: uint64(s.ValidatorLength()),
+ Eth1DataLength: uint64(s.Eth1DataVotes().Len()),
+ PreviousEpochAttestationsLength: uint64(s.PreviousEpochAttestations().Len()),
+ CurrentEpochAttestationsLength: uint64(s.CurrentEpochAttestations().Len()),
+ HistoricalSummariesLength: s.HistoricalSummariesLength(),
+ HistoricalRootsLength: s.HistoricalRootsLength(),
+ Version: s.Version(),
+ Eth1Data: s.Eth1Data(),
+ Eth1DepositIndex: s.Eth1DepositIndex(),
+ JustificationBits: justificationCopy,
+ NextWithdrawalIndex: s.NextWithdrawalIndex(),
+ NextWithdrawalValidatorIndex: s.NextWithdrawalValidatorIndex(),
+ }
+
+}
+
+// Serialize serializes the state into a byte slice with zstd compression.
+func (m *MinimalBeaconState) WriteTo(w io.Writer) error {
+ buf, err := ssz2.MarshalSSZ(nil, m.getSchema()...)
+ if err != nil {
+ return err
+ }
+ lenB := make([]byte, 8)
+ binary.BigEndian.PutUint64(lenB, uint64(len(buf)))
+ if _, err = w.Write([]byte{byte(m.Version)}); err != nil {
+ return err
+ }
+ if _, err = w.Write(lenB); err != nil {
+ return err
+ }
+ _, err = w.Write(buf)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// Deserialize deserializes the state from a byte slice with zstd compression.
+func (m *MinimalBeaconState) ReadFrom(r io.Reader) error {
+ m.Eth1Data = &cltypes.Eth1Data{}
+ m.JustificationBits = &cltypes.JustificationBits{}
+ m.Fork = &cltypes.Fork{}
+ var err error
+
+ versionByte := make([]byte, 1)
+ if _, err = r.Read(versionByte); err != nil {
+ return err
+ }
+ m.Version = clparams.StateVersion(versionByte[0])
+
+ lenB := make([]byte, 8)
+ if _, err = r.Read(lenB); err != nil {
+ return err
+ }
+
+ buf := make([]byte, binary.BigEndian.Uint64(lenB))
+ var n int
+
+ n, err = r.Read(buf)
+ if err != nil && !errors.Is(err, io.ErrUnexpectedEOF) {
+ return err
+ }
+ if n != len(buf) {
+ return io.ErrUnexpectedEOF
+ }
+ return ssz2.UnmarshalSSZ(buf, int(m.Version), m.getSchema()...)
+}
+
+func (m *MinimalBeaconState) getSchema() []interface{} {
+ schema := []interface{}{m.Eth1Data, m.Fork, &m.Eth1DepositIndex, m.JustificationBits, &m.ValidatorLength, &m.Eth1DataLength, &m.PreviousEpochAttestationsLength, &m.CurrentEpochAttestationsLength, &m.HistoricalSummariesLength, &m.HistoricalRootsLength}
+ if m.Version >= clparams.CapellaVersion {
+ schema = append(schema, &m.NextWithdrawalIndex, &m.NextWithdrawalValidatorIndex)
+ }
+ return schema
+}
diff --git a/cl/persistence/state/minimal_state_test.go b/cl/persistence/state/minimal_state_test.go
new file mode 100644
index 00000000000..acfb7c22f07
--- /dev/null
+++ b/cl/persistence/state/minimal_state_test.go
@@ -0,0 +1,32 @@
+package state_accessors
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/stretchr/testify/require"
+)
+
+func TestMinimalState(t *testing.T) {
+ m := &MinimalBeaconState{
+ Version: clparams.CapellaVersion,
+ Eth1Data: &cltypes.Eth1Data{},
+ Fork: &cltypes.Fork{},
+ Eth1DepositIndex: 0,
+ JustificationBits: &cltypes.JustificationBits{},
+ NextWithdrawalIndex: 0,
+ NextWithdrawalValidatorIndex: 0,
+ }
+ var b bytes.Buffer
+ if err := m.WriteTo(&b); err != nil {
+ t.Fatal(err)
+ }
+ m2 := &MinimalBeaconState{}
+ if err := m2.ReadFrom(&b); err != nil {
+ t.Fatal(err)
+ }
+
+ require.Equal(t, m, m2)
+}
diff --git a/cl/persistence/state/state_accessors.go b/cl/persistence/state/state_accessors.go
new file mode 100644
index 00000000000..6155bde3371
--- /dev/null
+++ b/cl/persistence/state/state_accessors.go
@@ -0,0 +1,298 @@
+package state_accessors
+
+import (
+ "bytes"
+ "io"
+
+ "github.com/klauspost/compress/zstd"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/cltypes/solid"
+ "github.com/ledgerwatch/erigon/cl/persistence/base_encoding"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+)
+
+// InitializeValidatorTable initializes the validator table in the database.
+func InitializeStaticTables(tx kv.RwTx, state *state.CachingBeaconState) error {
+ var err error
+ if err = tx.ClearBucket(kv.ValidatorPublicKeys); err != nil {
+ return err
+ }
+ if err = tx.ClearBucket(kv.HistoricalRoots); err != nil {
+ return err
+ }
+ if err = tx.ClearBucket(kv.HistoricalSummaries); err != nil {
+ return err
+ }
+ state.ForEachValidator(func(v solid.Validator, idx, total int) bool {
+ key := base_encoding.Encode64ToBytes4(uint64(idx))
+ if err = tx.Append(kv.ValidatorPublicKeys, key, v.PublicKeyBytes()); err != nil {
+ return false
+ }
+ return true
+ })
+ if err != nil {
+ return err
+ }
+ for i := 0; i < int(state.HistoricalRootsLength()); i++ {
+ key := base_encoding.Encode64ToBytes4(uint64(i))
+ root := state.HistoricalRoot(i)
+ if err = tx.Append(kv.HistoricalRoots, key, root[:]); err != nil {
+ return err
+ }
+ }
+ var temp []byte
+ for i := 0; i < int(state.HistoricalSummariesLength()); i++ {
+ temp = temp[:0]
+ key := base_encoding.Encode64ToBytes4(uint64(i))
+ summary := state.HistoricalSummary(i)
+ temp, err = summary.EncodeSSZ(temp)
+ if err != nil {
+ return err
+ }
+ if err = tx.Append(kv.HistoricalSummaries, key, temp); err != nil {
+ return err
+ }
+ }
+ return err
+}
+
+// IncrementValidatorTable increments the validator table in the database, by ignoring all the preverified indices.
+func IncrementPublicKeyTable(tx kv.RwTx, state *state.CachingBeaconState, preverifiedIndicies uint64) error {
+ valLength := state.ValidatorLength()
+ for i := preverifiedIndicies; i < uint64(valLength); i++ {
+ key := base_encoding.Encode64ToBytes4(i)
+ pubKey, err := state.ValidatorPublicKey(int(i))
+ if err != nil {
+ return err
+ }
+ // We put as there could be reorgs and thus some of overwriting
+ if err := tx.Put(kv.ValidatorPublicKeys, key, pubKey[:]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func IncrementHistoricalRootsTable(tx kv.RwTx, state *state.CachingBeaconState, preverifiedIndicies uint64) error {
+ for i := preverifiedIndicies; i < state.HistoricalRootsLength(); i++ {
+ key := base_encoding.Encode64ToBytes4(i)
+ root := state.HistoricalRoot(int(i))
+ if err := tx.Put(kv.HistoricalRoots, key, root[:]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func IncrementHistoricalSummariesTable(tx kv.RwTx, state *state.CachingBeaconState, preverifiedIndicies uint64) error {
+ var temp []byte
+ var err error
+ for i := preverifiedIndicies; i < state.HistoricalSummariesLength(); i++ {
+ temp = temp[:0]
+ key := base_encoding.Encode64ToBytes4(i)
+ summary := state.HistoricalSummary(int(i))
+ temp, err = summary.EncodeSSZ(temp)
+ if err != nil {
+ return err
+ }
+ if err = tx.Put(kv.HistoricalSummaries, key, temp); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func ReadPublicKeyByIndex(tx kv.Tx, index uint64) (libcommon.Bytes48, error) {
+ var pks []byte
+ var err error
+ key := base_encoding.Encode64ToBytes4(index)
+ if pks, err = tx.GetOne(kv.ValidatorPublicKeys, key); err != nil {
+ return libcommon.Bytes48{}, err
+ }
+ var ret libcommon.Bytes48
+ copy(ret[:], pks)
+ return ret, err
+}
+
+func GetStateProcessingProgress(tx kv.Tx) (uint64, error) {
+ progressByytes, err := tx.GetOne(kv.StatesProcessingProgress, kv.StatesProcessingKey)
+ if err != nil {
+ return 0, err
+ }
+ if len(progressByytes) == 0 {
+ return 0, nil
+ }
+ return base_encoding.Decode64FromBytes4(progressByytes), nil
+}
+
+func SetStateProcessingProgress(tx kv.RwTx, progress uint64) error {
+ return tx.Put(kv.StatesProcessingProgress, kv.StatesProcessingKey, base_encoding.Encode64ToBytes4(progress))
+}
+
+func ReadMinimalBeaconState(tx kv.Tx, slot uint64) (*MinimalBeaconState, error) {
+ minimalState := &MinimalBeaconState{}
+ v, err := tx.GetOne(kv.MinimalBeaconState, base_encoding.Encode64ToBytes4(slot))
+ if err != nil {
+ return nil, err
+ }
+ if len(v) == 0 {
+ return nil, nil
+ }
+ buf := bytes.NewBuffer(v)
+
+ return minimalState, minimalState.ReadFrom(buf)
+}
+
+// ReadCheckpoints reads the checkpoints from the database, Current, Previous and Finalized
+func ReadCheckpoints(tx kv.Tx, slot uint64) (current solid.Checkpoint, previous solid.Checkpoint, finalized solid.Checkpoint, err error) {
+ v, err := tx.GetOne(kv.Checkpoints, base_encoding.Encode64ToBytes4(slot))
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ if len(v) == 0 {
+ return nil, nil, nil, nil
+ }
+ // Current, Pre
+ return solid.Checkpoint(v[0:40]), solid.Checkpoint(v[40:80]), solid.Checkpoint(v[80:120]), nil
+}
+
+// ReadCheckpoints reads the checkpoints from the database, Current, Previous and Finalized
+func ReadNextSyncCommittee(tx kv.Tx, slot uint64) (committee *solid.SyncCommittee, err error) {
+ v, err := tx.GetOne(kv.NextSyncCommittee, base_encoding.Encode64ToBytes4(slot))
+ if err != nil {
+ return nil, err
+ }
+ if len(v) == 0 {
+ return nil, nil
+ }
+ committee = &solid.SyncCommittee{}
+ copy(committee[:], v)
+ return
+}
+
+// ReadCheckpoints reads the checkpoints from the database, Current, Previous and Finalized
+func ReadCurrentSyncCommittee(tx kv.Tx, slot uint64) (committee *solid.SyncCommittee, err error) {
+ v, err := tx.GetOne(kv.CurrentSyncCommittee, base_encoding.Encode64ToBytes4(slot))
+ if err != nil {
+ return nil, err
+ }
+ if len(v) == 0 {
+ return nil, nil
+ }
+ committee = &solid.SyncCommittee{}
+ copy(committee[:], v)
+ return
+}
+
+func ReadHistoricalRoots(tx kv.Tx, l uint64, fn func(idx int, root libcommon.Hash) error) error {
+ for i := 0; i < int(l); i++ {
+ key := base_encoding.Encode64ToBytes4(uint64(i))
+ v, err := tx.GetOne(kv.HistoricalRoots, key)
+ if err != nil {
+ return err
+ }
+ if err := fn(i, libcommon.BytesToHash(v)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func ReadHistoricalSummaries(tx kv.Tx, l uint64, fn func(idx int, historicalSummary *cltypes.HistoricalSummary) error) error {
+ for i := 0; i < int(l); i++ {
+ key := base_encoding.Encode64ToBytes4(uint64(i))
+ v, err := tx.GetOne(kv.HistoricalSummaries, key)
+ if err != nil {
+ return err
+ }
+ historicalSummary := &cltypes.HistoricalSummary{}
+ if err := historicalSummary.DecodeSSZ(v, 0); err != nil {
+ return err
+ }
+ if err := fn(i, historicalSummary); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func ReadCurrentEpochAttestations(tx kv.Tx, slot uint64, limit int) (*solid.ListSSZ[*solid.PendingAttestation], error) {
+ v, err := tx.GetOne(kv.CurrentEpochAttestations, base_encoding.Encode64ToBytes4(slot))
+ if err != nil {
+ return nil, err
+ }
+ if len(v) == 0 {
+ return nil, nil
+ }
+ attestations := solid.NewDynamicListSSZ[*solid.PendingAttestation](limit)
+ reader, err := zstd.NewReader(bytes.NewReader(v))
+ if err != nil {
+ return nil, err
+ }
+
+ fullSZZ, err := io.ReadAll(reader)
+ if err != nil {
+ return nil, err
+ }
+ if err := attestations.DecodeSSZ(fullSZZ, 0); err != nil {
+ return nil, err
+ }
+ return attestations, nil
+}
+
+func ReadPreviousEpochAttestations(tx kv.Tx, slot uint64, limit int) (*solid.ListSSZ[*solid.PendingAttestation], error) {
+ v, err := tx.GetOne(kv.PreviousEpochAttestations, base_encoding.Encode64ToBytes4(slot))
+ if err != nil {
+ return nil, err
+ }
+ if len(v) == 0 {
+ return nil, nil
+ }
+ attestations := solid.NewDynamicListSSZ[*solid.PendingAttestation](limit)
+ reader, err := zstd.NewReader(bytes.NewReader(v))
+ if err != nil {
+ return nil, err
+ }
+ fullSZZ, err := io.ReadAll(reader)
+ if err != nil {
+ return nil, err
+ }
+ if err := attestations.DecodeSSZ(fullSZZ, 0); err != nil {
+ return nil, err
+ }
+ return attestations, nil
+}
+
+func ReadValidatorsTable(tx kv.Tx, out *StaticValidatorTable) error {
+ cursor, err := tx.Cursor(kv.StaticValidators)
+ if err != nil {
+ return err
+ }
+ defer cursor.Close()
+
+ var buf bytes.Buffer
+ for k, v, err := cursor.First(); err == nil && k != nil; k, v, err = cursor.Next() {
+ staticValidator := &StaticValidator{}
+ buf.Reset()
+ if _, err := buf.Write(v); err != nil {
+ return err
+ }
+ if err := staticValidator.ReadFrom(&buf); err != nil {
+ return err
+ }
+ out.validatorTable = append(out.validatorTable, staticValidator)
+ }
+ if err != nil {
+ return err
+ }
+ slot, err := GetStateProcessingProgress(tx)
+ if err != nil {
+ return err
+ }
+ out.slot = slot
+ return err
+
+}
diff --git a/cl/persistence/state/static_validator_table.go b/cl/persistence/state/static_validator_table.go
new file mode 100644
index 00000000000..5e32b890e2a
--- /dev/null
+++ b/cl/persistence/state/static_validator_table.go
@@ -0,0 +1,389 @@
+package state_accessors
+
+import (
+ "fmt"
+ "io"
+ "sync"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon/cl/cltypes/solid"
+ "github.com/ledgerwatch/erigon/ethdb/cbor"
+)
+
+// class Validator(Container):
+//
+// pubkey: BLSPubkey
+// withdrawal_credentials: Bytes32 # Commitment to pubkey for withdrawals
+// effective_balance: Gwei # Balance at stake
+// slashed: boolean
+// # Status epochs
+// activation_eligibility_epoch: Epoch # When criteria for activation were met
+// activation_epoch: Epoch
+// exit_epoch: Epoch
+// withdrawable_epoch: Epoch # When validator can withdraw funds
+//
+// StaticValidator is designed to track changes in a validator's attributes over time.
+// It keeps track of attributes such as withdrawal credentials, slashed status, and various epochs
+// that typically change at most twice during the validator's lifespan.
+type StaticValidator struct {
+ publicKeys []staticValidatorField[libcommon.Bytes48] // Tracks changes in public keys.
+ withdrawalCredentials []staticValidatorField[libcommon.Hash] // Tracks changes in withdrawal credentials.
+ slashed []staticValidatorField[bool] // Tracks changes in slashed status.
+ activationEligibility []staticValidatorField[uint64] // Tracks changes in activation eligibility epoch.
+ activationEpoch []staticValidatorField[uint64] // Tracks changes in activation epoch.
+ exitEpoch []staticValidatorField[uint64] // Tracks changes in exit epoch.
+ withdrawableEpoch []staticValidatorField[uint64] // Tracks changes in withdrawable epoch.
+}
+
+// NewStaticValidatorFromValidator creates a new StaticValidator from a given Validator and Slot,
+// initializing the fields with the current state of the Validator at the given Slot.
+func NewStaticValidatorFromValidator(v solid.Validator, slot uint64) *StaticValidator {
+ return &StaticValidator{
+ // Initializes each field with the current state of the validator.
+ publicKeys: []staticValidatorField[libcommon.Bytes48]{{slot, v.PublicKey()}},
+ withdrawalCredentials: []staticValidatorField[libcommon.Hash]{{slot, v.WithdrawalCredentials()}},
+ slashed: []staticValidatorField[bool]{{slot, v.Slashed()}},
+ activationEligibility: []staticValidatorField[uint64]{{slot, v.ActivationEligibilityEpoch()}},
+ activationEpoch: []staticValidatorField[uint64]{{slot, v.ActivationEpoch()}},
+ exitEpoch: []staticValidatorField[uint64]{{slot, v.ExitEpoch()}},
+ withdrawableEpoch: []staticValidatorField[uint64]{{slot, v.WithdrawableEpoch()}},
+ }
+}
+
+// AddWithdrawalCredentials adds a new withdrawal credential entry to the validator.
+// This method is used to track changes in withdrawal credentials over time.
+func (s *StaticValidator) AddWithdrawalCredentials(slot uint64, withdrawalCredentials libcommon.Hash) {
+ s.withdrawalCredentials = append(s.withdrawalCredentials, staticValidatorField[libcommon.Hash]{slot, withdrawalCredentials})
+}
+
+// cborStaticValidator is a struct used for CBOR serialization of StaticValidator data.
+type cborStaticValidator struct {
+ PublicKeys []staticValidatorField[libcommon.Bytes48]
+ WithdrawalCredentials []staticValidatorField[libcommon.Hash]
+ Slashed []staticValidatorField[bool]
+ ActivationEligibility []staticValidatorField[uint64]
+ ActivationEpoch []staticValidatorField[uint64]
+ ExitEpoch []staticValidatorField[uint64]
+ WithdrawableEpoch []staticValidatorField[uint64]
+}
+
+// Serialize encodes the StaticValidator data into CBOR format and writes it to the given writer.
+func (s *StaticValidator) WriteTo(w io.Writer) error {
+ return cbor.Marshal(w, cborStaticValidator{
+ PublicKeys: s.publicKeys,
+ WithdrawalCredentials: s.withdrawalCredentials,
+ Slashed: s.slashed,
+ ActivationEligibility: s.activationEligibility,
+ ActivationEpoch: s.activationEpoch,
+ ExitEpoch: s.exitEpoch,
+ WithdrawableEpoch: s.withdrawableEpoch,
+ })
+}
+
+// Deserialize decodes CBOR data from the given reader and updates the StaticValidator fields.
+func (s *StaticValidator) ReadFrom(r io.Reader) error {
+ tmp := &cborStaticValidator{}
+ if err := cbor.Unmarshal(&tmp, r); err != nil {
+ return err
+ }
+ s.withdrawalCredentials = tmp.WithdrawalCredentials
+ s.slashed = tmp.Slashed
+ s.activationEligibility = tmp.ActivationEligibility
+ s.activationEpoch = tmp.ActivationEpoch
+ s.exitEpoch = tmp.ExitEpoch
+ s.withdrawableEpoch = tmp.WithdrawableEpoch
+ s.publicKeys = tmp.PublicKeys
+ return nil
+}
+
+func (s *StaticValidator) AddSlashed(slot uint64, slashed bool) {
+ s.slashed = append(s.slashed, staticValidatorField[bool]{slot, slashed})
+}
+
+func (s *StaticValidator) AddActivationEligibility(slot uint64, activationEligibility uint64) {
+ s.activationEligibility = append(s.activationEligibility, staticValidatorField[uint64]{slot, activationEligibility})
+}
+
+func (s *StaticValidator) AddActivationEpoch(slot uint64, activationEpoch uint64) {
+ s.activationEpoch = append(s.activationEpoch, staticValidatorField[uint64]{slot, activationEpoch})
+}
+
+func (s *StaticValidator) AddExitEpoch(slot uint64, exitEpoch uint64) {
+ s.exitEpoch = append(s.exitEpoch, staticValidatorField[uint64]{slot, exitEpoch})
+}
+
+func (s *StaticValidator) AddWithdrawableEpoch(slot uint64, withdrawableEpoch uint64) {
+ s.withdrawableEpoch = append(s.withdrawableEpoch, staticValidatorField[uint64]{slot, withdrawableEpoch})
+}
+
+func (s *StaticValidator) WithdrawalCredentials(slot uint64) libcommon.Hash {
+ currIndex := 0
+ for i, v := range s.withdrawalCredentials {
+ if v.Slot > slot {
+ break
+ }
+ currIndex = i
+ }
+ return s.withdrawalCredentials[currIndex].Field
+}
+
+func (s *StaticValidator) Slashed(slot uint64) bool {
+ currIndex := 0
+ for i, v := range s.slashed {
+ if v.Slot > slot {
+ break
+ }
+ currIndex = i
+ }
+ return s.slashed[currIndex].Field
+}
+
+func (s *StaticValidator) ActivationEligibilityEpoch(slot uint64) uint64 {
+ currIndex := 0
+ for i, v := range s.activationEligibility {
+ if v.Slot > slot {
+ break
+ }
+ currIndex = i
+ }
+ return s.activationEligibility[currIndex].Field
+}
+
+func (s *StaticValidator) ActivationEpoch(slot uint64) uint64 {
+ currIndex := 0
+ for i, v := range s.activationEpoch {
+ if v.Slot > slot {
+ break
+ }
+ currIndex = i
+ }
+ return s.activationEpoch[currIndex].Field
+}
+
+func (s *StaticValidator) ExitEpoch(slot uint64) uint64 {
+ currIndex := 0
+ for i, v := range s.exitEpoch {
+ if v.Slot > slot {
+ break
+ }
+ currIndex = i
+ }
+ return s.exitEpoch[currIndex].Field
+}
+
+func (s *StaticValidator) WithdrawableEpoch(slot uint64) uint64 {
+ currIndex := 0
+ for i, v := range s.withdrawableEpoch {
+ if v.Slot > slot {
+ break
+ }
+ currIndex = i
+ }
+ return s.withdrawableEpoch[currIndex].Field
+}
+
+func (s *StaticValidator) PublicKey(slot uint64) libcommon.Bytes48 {
+ currIndex := 0
+ for i, v := range s.publicKeys {
+ if v.Slot > slot {
+ break
+ }
+ currIndex = i
+ }
+ return s.publicKeys[currIndex].Field
+}
+
+func (s *StaticValidator) ToValidator(v solid.Validator, slot uint64) {
+ v.SetPublicKey(s.PublicKey(slot))
+ v.SetWithdrawalCredentials(s.WithdrawalCredentials(slot))
+ v.SetSlashed(s.Slashed(slot))
+ v.SetActivationEligibilityEpoch(s.ActivationEligibilityEpoch(slot))
+ v.SetActivationEpoch(s.ActivationEpoch(slot))
+ v.SetExitEpoch(s.ExitEpoch(slot))
+ v.SetWithdrawableEpoch(s.WithdrawableEpoch(slot))
+}
+
+type staticValidatorField[V any] struct {
+ Slot uint64
+ Field V
+}
+
+// StaticValidatorTable is a structure to manage a collection of StaticValidators.
+// It is used for tracking multiple validators and their state changes.
+type StaticValidatorTable struct {
+ validatorTable []*StaticValidator
+ slot uint64
+ sync sync.RWMutex // Mutex for safe concurrent access.
+}
+
+// NewStaticValidatorTable creates a new instance of StaticValidatorTable.
+func NewStaticValidatorTable() *StaticValidatorTable {
+ return &StaticValidatorTable{
+ validatorTable: make([]*StaticValidator, 0, 2400), // Preallocating memory for efficiency.
+ }
+}
+
+func (s *StaticValidatorTable) AddValidator(v solid.Validator, validatorIndex, slot uint64) error {
+ s.sync.Lock()
+ defer s.sync.Unlock()
+ if slot <= s.slot && s.slot != 0 {
+ return nil
+ }
+ s.validatorTable = append(s.validatorTable, NewStaticValidatorFromValidator(v, slot))
+ if validatorIndex >= uint64(len(s.validatorTable)) {
+ return fmt.Errorf("validator index mismatch")
+ }
+ return nil
+}
+
+func (s *StaticValidatorTable) AddWithdrawalCredentials(validatorIndex, slot uint64, withdrawalCredentials libcommon.Hash) error {
+ s.sync.Lock()
+ defer s.sync.Unlock()
+ if slot <= s.slot && s.slot != 0 {
+ return nil
+ }
+ if validatorIndex >= uint64(len(s.validatorTable)) {
+ return fmt.Errorf("validator index mismatch")
+ }
+ s.validatorTable[validatorIndex].AddWithdrawalCredentials(slot, withdrawalCredentials)
+ return nil
+}
+
+func (s *StaticValidatorTable) AddSlashed(validatorIndex, slot uint64, slashed bool) error {
+ s.sync.Lock()
+ defer s.sync.Unlock()
+ if slot <= s.slot && s.slot != 0 {
+ return nil
+ }
+ if validatorIndex >= uint64(len(s.validatorTable)) {
+ return fmt.Errorf("validator index mismatch")
+ }
+ s.validatorTable[validatorIndex].AddSlashed(slot, slashed)
+ return nil
+}
+
+func (s *StaticValidatorTable) AddActivationEligibility(validatorIndex, slot uint64, activationEligibility uint64) error {
+ s.sync.Lock()
+ defer s.sync.Unlock()
+ if slot <= s.slot && s.slot != 0 {
+ return nil
+ }
+ if validatorIndex >= uint64(len(s.validatorTable)) {
+ return fmt.Errorf("validator index mismatch")
+ }
+ s.validatorTable[validatorIndex].AddActivationEligibility(slot, activationEligibility)
+ return nil
+}
+
+func (s *StaticValidatorTable) AddActivationEpoch(validatorIndex, slot uint64, activationEpoch uint64) error {
+ s.sync.Lock()
+ defer s.sync.Unlock()
+ if slot <= s.slot && s.slot != 0 {
+ return nil
+ }
+ if validatorIndex >= uint64(len(s.validatorTable)) {
+ return fmt.Errorf("validator index mismatch")
+ }
+ s.validatorTable[validatorIndex].AddActivationEpoch(slot, activationEpoch)
+ return nil
+}
+
+func (s *StaticValidatorTable) AddExitEpoch(validatorIndex, slot uint64, exitEpoch uint64) error {
+ s.sync.Lock()
+ defer s.sync.Unlock()
+ if slot <= s.slot && s.slot != 0 {
+ return nil
+ }
+ if validatorIndex >= uint64(len(s.validatorTable)) {
+ return fmt.Errorf("validator index mismatch")
+ }
+ s.validatorTable[validatorIndex].AddExitEpoch(slot, exitEpoch)
+ return nil
+}
+
+func (s *StaticValidatorTable) AddWithdrawableEpoch(validatorIndex, slot uint64, withdrawableEpoch uint64) error {
+ s.sync.Lock()
+ defer s.sync.Unlock()
+ if slot <= s.slot && s.slot != 0 {
+ return nil
+ }
+ if validatorIndex >= uint64(len(s.validatorTable)) {
+ return fmt.Errorf("validator index mismatch")
+ }
+ s.validatorTable[validatorIndex].AddWithdrawableEpoch(slot, withdrawableEpoch)
+ return nil
+}
+
+func (s *StaticValidatorTable) GetInPlace(validatorIndex uint64, slot uint64, v solid.Validator) {
+ s.sync.RLock()
+ defer s.sync.RUnlock()
+ s.validatorTable[validatorIndex].ToValidator(v, slot)
+}
+
+func (s *StaticValidatorTable) ForEach(fn func(validatorIndex uint64, validator *StaticValidator) bool) {
+ s.sync.RLock()
+ defer s.sync.RUnlock()
+ for i, v := range s.validatorTable {
+ if !fn(uint64(i), v) {
+ break
+ }
+ }
+}
+
+func (s *StaticValidatorTable) WithdrawalCredentials(validatorIndex uint64, slot uint64) libcommon.Hash {
+ s.sync.RLock()
+ defer s.sync.RUnlock()
+ return s.validatorTable[validatorIndex].WithdrawalCredentials(slot)
+}
+
+func (s *StaticValidatorTable) Slashed(validatorIndex uint64, slot uint64) bool {
+ s.sync.RLock()
+ defer s.sync.RUnlock()
+ return s.validatorTable[validatorIndex].Slashed(slot)
+}
+
+func (s *StaticValidatorTable) ActivationEligibilityEpoch(validatorIndex uint64, slot uint64) uint64 {
+ s.sync.RLock()
+ defer s.sync.RUnlock()
+ return s.validatorTable[validatorIndex].ActivationEligibilityEpoch(slot)
+}
+
+func (s *StaticValidatorTable) ActivationEpoch(validatorIndex uint64, slot uint64) uint64 {
+ s.sync.RLock()
+ defer s.sync.RUnlock()
+ return s.validatorTable[validatorIndex].ActivationEpoch(slot)
+}
+
+func (s *StaticValidatorTable) ExitEpoch(validatorIndex uint64, slot uint64) uint64 {
+ s.sync.RLock()
+ defer s.sync.RUnlock()
+ return s.validatorTable[validatorIndex].ExitEpoch(slot)
+}
+
+func (s *StaticValidatorTable) WithdrawableEpoch(validatorIndex uint64, slot uint64) uint64 {
+ s.sync.RLock()
+ defer s.sync.RUnlock()
+ return s.validatorTable[validatorIndex].WithdrawableEpoch(slot)
+}
+
+func (s *StaticValidatorTable) GetStaticValidator(validatorIndex uint64) *StaticValidator {
+ s.sync.RLock()
+ defer s.sync.RUnlock()
+ return s.validatorTable[validatorIndex]
+}
+
+// This is for versioning
+func (s *StaticValidatorTable) SetSlot(slot uint64) {
+ s.sync.Lock()
+ defer s.sync.Unlock()
+ if slot <= s.slot && s.slot != 0 {
+ return
+ }
+ s.slot = slot
+}
+
+func (s *StaticValidatorTable) Slot() uint64 {
+ s.sync.RLock()
+ defer s.sync.RUnlock()
+ return s.slot
+}
diff --git a/cl/persistence/state/static_validator_table_test.go b/cl/persistence/state/static_validator_table_test.go
new file mode 100644
index 00000000000..37b7f38af39
--- /dev/null
+++ b/cl/persistence/state/static_validator_table_test.go
@@ -0,0 +1,67 @@
+package state_accessors
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon/cl/cltypes/solid"
+ "github.com/stretchr/testify/require"
+)
+
+func TestStaticValidatorTable(t *testing.T) {
+ // Make 5 validators.
+ vals := []solid.Validator{solid.NewValidator(), solid.NewValidator(), solid.NewValidator(), solid.NewValidator(), solid.NewValidator()}
+
+ table := NewStaticValidatorTable()
+ for i := range vals {
+ require.NoError(t, table.AddValidator(vals[i], uint64(i), 0))
+ }
+ // Now let us play with this
+ require.NoError(t, table.AddExitEpoch(1, 32, 34))
+ require.NoError(t, table.AddExitEpoch(1, 38, 35))
+ require.NoError(t, table.AddExitEpoch(1, 2000, 500))
+
+ require.Equal(t, table.ExitEpoch(1, 31), uint64(0))
+ require.Equal(t, table.ExitEpoch(1, 32), uint64(34))
+ require.Equal(t, table.ExitEpoch(1, 37), uint64(34))
+ require.Equal(t, table.ExitEpoch(1, 38), uint64(35))
+ require.Equal(t, table.ExitEpoch(1, 450), uint64(35))
+ require.Equal(t, table.ExitEpoch(1, 1_000_000), uint64(500))
+ // do the same for withdrawable epoch
+ require.NoError(t, table.AddWithdrawableEpoch(1, 32, 34))
+ require.NoError(t, table.AddWithdrawableEpoch(1, 38, 35))
+ require.NoError(t, table.AddWithdrawableEpoch(1, 2000, 500))
+
+ require.Equal(t, table.WithdrawableEpoch(1, 31), uint64(0))
+ require.Equal(t, table.WithdrawableEpoch(1, 32), uint64(34))
+ require.Equal(t, table.WithdrawableEpoch(1, 37), uint64(34))
+ require.Equal(t, table.WithdrawableEpoch(1, 38), uint64(35))
+ require.Equal(t, table.WithdrawableEpoch(1, 450), uint64(35))
+ require.Equal(t, table.WithdrawableEpoch(1, 1_000_000), uint64(500))
+ // now for withdrawal credentials
+ require.NoError(t, table.AddWithdrawalCredentials(1, 32, common.HexToHash("0x2")))
+ require.NoError(t, table.AddWithdrawalCredentials(1, 38, common.HexToHash("0x3")))
+ require.NoError(t, table.AddWithdrawalCredentials(1, 2000, common.HexToHash("0x40")))
+ require.Equal(t, table.WithdrawalCredentials(1, 31), common.Hash{})
+ require.Equal(t, table.WithdrawalCredentials(1, 32), common.HexToHash("0x2"))
+ require.Equal(t, table.WithdrawalCredentials(1, 37), common.HexToHash("0x2"))
+ require.Equal(t, table.WithdrawalCredentials(1, 38), common.HexToHash("0x3"))
+ require.Equal(t, table.WithdrawalCredentials(1, 450), common.HexToHash("0x3"))
+ require.Equal(t, table.WithdrawalCredentials(1, 1_000_000), common.HexToHash("0x40"))
+ // Now lets try to get the validator at a specific epoch
+ new := solid.NewValidator()
+ table.GetInPlace(1, 38, new)
+ require.Equal(t, new.WithdrawableEpoch(), uint64(35))
+ require.Equal(t, new.ExitEpoch(), uint64(35))
+ require.Equal(t, new.WithdrawalCredentials(), common.HexToHash("0x3"))
+ // Lastly serialize and deserialization
+ table.ForEach(func(validatorIndex uint64, validator *StaticValidator) bool {
+ var b bytes.Buffer
+ require.NoError(t, validator.WriteTo(&b))
+ tmp := &StaticValidator{}
+ require.NoError(t, tmp.ReadFrom(&b))
+ require.Equal(t, validator, tmp)
+ return true
+ })
+}
diff --git a/cl/persistence/state/validator_events.go b/cl/persistence/state/validator_events.go
new file mode 100644
index 00000000000..e1117b52459
--- /dev/null
+++ b/cl/persistence/state/validator_events.go
@@ -0,0 +1,164 @@
+package state_accessors
+
+import (
+ "encoding/binary"
+ "errors"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon/cl/cltypes/solid"
+)
+
+// THIS IS EXPERMIENTAL, IT MAY CHANGE
+
+var ErrUnknownEvent = errors.New("unknown event")
+
+type stateEvent int64
+
+const (
+ addValidator stateEvent = iota
+ changeExitEpoch
+ changeWithdrawableEpoch
+ changeWithdrawalCredentials
+ changeActivationEpoch
+ changeActivationEligibilityEpoch
+ changeSlashed
+)
+
+type StateEvents struct {
+ buf []byte
+}
+
+func NewStateEvents() *StateEvents {
+ return &StateEvents{}
+}
+
+func (se *StateEvents) AddValidator(validatorIndex uint64, validator solid.Validator) {
+ se.buf = append(se.buf, byte(addValidator))
+ se.buf = binary.BigEndian.AppendUint64(se.buf, validatorIndex)
+ se.buf = append(se.buf, validator...)
+}
+
+func (se *StateEvents) ChangeExitEpoch(validatorIndex uint64, exitEpoch uint64) {
+ se.buf = append(se.buf, byte(changeExitEpoch))
+ se.buf = binary.BigEndian.AppendUint64(se.buf, validatorIndex)
+ se.buf = binary.BigEndian.AppendUint64(se.buf, exitEpoch)
+}
+
+func (se *StateEvents) ChangeWithdrawableEpoch(validatorIndex uint64, withdrawableEpoch uint64) {
+ se.buf = append(se.buf, byte(changeWithdrawableEpoch))
+ se.buf = binary.BigEndian.AppendUint64(se.buf, validatorIndex)
+ se.buf = binary.BigEndian.AppendUint64(se.buf, withdrawableEpoch)
+}
+
+func (se *StateEvents) ChangeWithdrawalCredentials(validatorIndex uint64, withdrawalCredentials libcommon.Hash) {
+ se.buf = append(se.buf, byte(changeWithdrawalCredentials))
+ se.buf = binary.BigEndian.AppendUint64(se.buf, validatorIndex)
+ se.buf = append(se.buf, withdrawalCredentials[:]...)
+}
+
+func (se *StateEvents) ChangeActivationEpoch(validatorIndex uint64, activationEpoch uint64) {
+ se.buf = append(se.buf, byte(changeActivationEpoch))
+ se.buf = binary.BigEndian.AppendUint64(se.buf, validatorIndex)
+ se.buf = binary.BigEndian.AppendUint64(se.buf, activationEpoch)
+}
+
+func (se *StateEvents) ChangeActivationEligibilityEpoch(validatorIndex uint64, activationEligibilityEpoch uint64) {
+ se.buf = append(se.buf, byte(changeActivationEligibilityEpoch))
+ se.buf = binary.BigEndian.AppendUint64(se.buf, validatorIndex)
+ se.buf = binary.BigEndian.AppendUint64(se.buf, activationEligibilityEpoch)
+}
+
+func (se *StateEvents) ChangeSlashed(validatorIndex uint64, slashed bool) {
+ se.buf = append(se.buf, byte(changeSlashed))
+ se.buf = binary.BigEndian.AppendUint64(se.buf, validatorIndex)
+ se.buf = append(se.buf, byte(0))
+ if slashed {
+ se.buf[len(se.buf)-1] = byte(1)
+ }
+}
+
+func (se *StateEvents) CopyBytes() []byte {
+ return libcommon.Copy(se.buf)
+}
+
+func (se *StateEvents) Reset() {
+ se.buf = se.buf[:0]
+}
+
+// ReplayEvents replays the events in the buffer and will return the err on handler failure
+func ReplayEvents(onAddValidator func(validatorIndex uint64, validator solid.Validator) error,
+ onChangeExitEpoch func(validatorIndex uint64, exitEpoch uint64) error,
+ onChangeWithdrawableEpoch func(validatorIndex uint64, withdrawableEpoch uint64) error,
+ onChangeWithdrawalCredentials func(validatorIndex uint64, withdrawalCredentials libcommon.Hash) error,
+ onChangeActivationEpoch func(validatorIndex uint64, activationEpoch uint64) error,
+ onChangeActivationEligibilityEpoch func(validatorIndex uint64, activationEligibilityEpoch uint64) error,
+ onChangeSlashed func(validatorIndex uint64, slashed bool) error,
+ e *StateEvents) error {
+ buf := e.buf
+ for len(buf) > 0 {
+ event := stateEvent(buf[0])
+ buf = buf[1:]
+ switch event {
+ case addValidator:
+ validatorIndex := binary.BigEndian.Uint64(buf)
+ buf = buf[8:]
+ validator := solid.Validator(buf[:121])
+ buf = buf[121:]
+ if err := onAddValidator(validatorIndex, validator); err != nil {
+ return err
+ }
+ case changeExitEpoch:
+ validatorIndex := binary.BigEndian.Uint64(buf)
+ buf = buf[8:]
+ exitEpoch := binary.BigEndian.Uint64(buf)
+ buf = buf[8:]
+ if err := onChangeExitEpoch(validatorIndex, exitEpoch); err != nil {
+ return err
+ }
+ case changeWithdrawableEpoch:
+ validatorIndex := binary.BigEndian.Uint64(buf)
+ buf = buf[8:]
+ withdrawableEpoch := binary.BigEndian.Uint64(buf)
+ buf = buf[8:]
+ if err := onChangeWithdrawableEpoch(validatorIndex, withdrawableEpoch); err != nil {
+ return err
+ }
+ case changeWithdrawalCredentials:
+ validatorIndex := binary.BigEndian.Uint64(buf)
+ buf = buf[8:]
+ var withdrawalCredentials [32]byte
+ copy(withdrawalCredentials[:], buf)
+ buf = buf[32:]
+ if err := onChangeWithdrawalCredentials(validatorIndex, withdrawalCredentials); err != nil {
+ return err
+ }
+ case changeActivationEpoch:
+ validatorIndex := binary.BigEndian.Uint64(buf)
+ buf = buf[8:]
+ activationEpoch := binary.BigEndian.Uint64(buf)
+ buf = buf[8:]
+ if err := onChangeActivationEpoch(validatorIndex, activationEpoch); err != nil {
+ return err
+ }
+ case changeActivationEligibilityEpoch:
+ validatorIndex := binary.BigEndian.Uint64(buf)
+ buf = buf[8:]
+ activationEligibilityEpoch := binary.BigEndian.Uint64(buf)
+ buf = buf[8:]
+ if err := onChangeActivationEligibilityEpoch(validatorIndex, activationEligibilityEpoch); err != nil {
+ return err
+ }
+ case changeSlashed:
+ validatorIndex := binary.BigEndian.Uint64(buf)
+ buf = buf[8:]
+ slashed := buf[0] == 1
+ buf = buf[1:]
+ if err := onChangeSlashed(validatorIndex, slashed); err != nil {
+ return err
+ }
+ default:
+ return ErrUnknownEvent
+ }
+ }
+ return nil
+}
diff --git a/cl/persistence/state/validator_events_test.go b/cl/persistence/state/validator_events_test.go
new file mode 100644
index 00000000000..b36be719f5a
--- /dev/null
+++ b/cl/persistence/state/validator_events_test.go
@@ -0,0 +1,72 @@
+package state_accessors
+
+import (
+ "testing"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon/cl/cltypes/solid"
+ "github.com/stretchr/testify/require"
+)
+
+func TestStateEvents(t *testing.T) {
+ events := NewStateEvents()
+ events.AddValidator(0, solid.NewValidator())
+ events.ChangeExitEpoch(1, 3)
+ events.ChangeWithdrawableEpoch(1, 4)
+ events.ChangeWithdrawalCredentials(1, [32]byte{2})
+ events.ChangeActivationEpoch(1, 5)
+ events.ChangeActivationEligibilityEpoch(1, 6)
+ events.ChangeSlashed(1, true)
+ // Make one for index 2
+ events.AddValidator(2, solid.NewValidator())
+ events.ChangeExitEpoch(2, 2)
+ events.ChangeWithdrawableEpoch(2, 3)
+ events.ChangeWithdrawalCredentials(2, [32]byte{1})
+ events.ChangeActivationEpoch(2, 4)
+ events.ChangeActivationEligibilityEpoch(2, 5)
+ events.ChangeSlashed(2, true)
+ // Ok now lets replay it.
+ ReplayEvents(func(validatorIndex uint64, validator solid.Validator) error {
+ require.Equal(t, validator, solid.NewValidator())
+ return nil
+ }, func(validatorIndex, exitEpoch uint64) error {
+ if validatorIndex == 1 {
+ require.Equal(t, exitEpoch, uint64(3))
+ } else {
+ require.Equal(t, exitEpoch, uint64(2))
+ }
+ return nil
+ }, func(validatorIndex, withdrawableEpoch uint64) error {
+ if validatorIndex == 1 {
+ require.Equal(t, withdrawableEpoch, uint64(4))
+ } else {
+ require.Equal(t, withdrawableEpoch, uint64(3))
+ }
+ return nil
+ }, func(validatorIndex uint64, withdrawalCredentials libcommon.Hash) error {
+ if validatorIndex == 1 {
+ require.Equal(t, withdrawalCredentials, libcommon.Hash([32]byte{2}))
+ } else {
+ require.Equal(t, withdrawalCredentials, libcommon.Hash([32]byte{1}))
+ }
+ return nil
+ }, func(validatorIndex, activationEpoch uint64) error {
+ if validatorIndex == 1 {
+ require.Equal(t, activationEpoch, uint64(5))
+ } else {
+ require.Equal(t, activationEpoch, uint64(4))
+ }
+ return nil
+ }, func(validatorIndex, activationEligibilityEpoch uint64) error {
+ if validatorIndex == 1 {
+ require.Equal(t, activationEligibilityEpoch, uint64(6))
+ } else {
+ require.Equal(t, activationEligibilityEpoch, uint64(5))
+ }
+ return nil
+ }, func(validatorIndex uint64, slashed bool) error {
+ require.Equal(t, slashed, true)
+ return nil
+ }, events)
+
+}
diff --git a/cl/persistence/test_data/test_block.ssz_snappy b/cl/persistence/test_data/test_block.ssz_snappy
new file mode 100644
index 00000000000..f3301a23195
Binary files /dev/null and b/cl/persistence/test_data/test_block.ssz_snappy differ
diff --git a/cl/phase1/cache/attestation_indicies_cache.go b/cl/phase1/cache/attestation_indicies_cache.go
index 36c3b83e366..00a0fa36c4c 100644
--- a/cl/phase1/cache/attestation_indicies_cache.go
+++ b/cl/phase1/cache/attestation_indicies_cache.go
@@ -9,22 +9,22 @@ import (
var attestationIndiciesCache *lru.Cache[common.Hash, []uint64]
-const attestationIndiciesCacheSize = 256
+const attestationIndiciesCacheSize = 1024
func LoadAttestatingIndicies(attestation *solid.AttestationData, aggregationBits []byte) ([]uint64, bool) {
- bitsHash := utils.Keccak256(aggregationBits)
+ bitsHash := utils.Sha256(aggregationBits)
hash, err := attestation.HashSSZ()
if err != nil {
return nil, false
}
- return attestationIndiciesCache.Get(utils.Keccak256(hash[:], bitsHash[:]))
+ return attestationIndiciesCache.Get(utils.Sha256(hash[:], bitsHash[:]))
}
func StoreAttestation(attestation *solid.AttestationData, aggregationBits []byte, indicies []uint64) {
- bitsHash := utils.Keccak256(aggregationBits)
+ bitsHash := utils.Sha256(aggregationBits)
hash, err := attestation.HashSSZ()
if err != nil {
return
}
- attestationIndiciesCache.Add(utils.Keccak256(hash[:], bitsHash[:]), indicies)
+ attestationIndiciesCache.Add(utils.Sha256(hash[:], bitsHash[:]), indicies)
}
diff --git a/cl/phase1/core/rawdb/accessors.go b/cl/phase1/core/rawdb/accessors.go
deleted file mode 100644
index 2aa57a8ba5f..00000000000
--- a/cl/phase1/core/rawdb/accessors.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package rawdb
-
-import (
- "encoding/binary"
- "fmt"
-
- "github.com/ledgerwatch/erigon/cl/clparams"
- "github.com/ledgerwatch/erigon/cl/phase1/core/state"
-
- libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon-lib/common/length"
- "github.com/ledgerwatch/erigon-lib/kv"
-
- "github.com/ledgerwatch/erigon/cl/cltypes"
- "github.com/ledgerwatch/erigon/cl/utils"
-)
-
-func EncodeNumber(n uint64) []byte {
- ret := make([]byte, 4)
- binary.BigEndian.PutUint32(ret, uint32(n))
- return ret
-}
-
-// WriteBeaconState writes beacon state for specific block to database.
-func WriteBeaconState(tx kv.Putter, state *state.CachingBeaconState) error {
- data, err := utils.EncodeSSZSnappy(state)
- if err != nil {
- return err
- }
-
- return tx.Put(kv.BeaconState, EncodeNumber(state.Slot()), data)
-}
-
-func WriteBeaconBlock(tx kv.RwTx, signedBlock *cltypes.SignedBeaconBlock) error {
- block := signedBlock.Block
-
- blockRoot, err := block.HashSSZ()
- if err != nil {
- return err
- }
- // database key is is [slot + block root]
- slotBytes := EncodeNumber(block.Slot)
- key := append(slotBytes, blockRoot[:]...)
- value, err := signedBlock.EncodeSSZ(nil)
- if err != nil {
- return err
- }
-
- // Write block hashes
- // We write the block indexing
- if err := tx.Put(kv.RootSlotIndex, blockRoot[:], slotBytes); err != nil {
- return err
- }
- if err := tx.Put(kv.RootSlotIndex, block.StateRoot[:], key); err != nil {
- return err
- }
- // Finally write the beacon block
- return tx.Put(kv.BeaconBlocks, key, utils.CompressSnappy(value))
-}
-
-func ReadBeaconBlock(tx kv.RwTx, blockRoot libcommon.Hash, slot uint64, version clparams.StateVersion) (*cltypes.SignedBeaconBlock, uint64, libcommon.Hash, error) {
- encodedBeaconBlock, err := tx.GetOne(kv.BeaconBlocks, append(EncodeNumber(slot), blockRoot[:]...))
- if err != nil {
- return nil, 0, libcommon.Hash{}, err
- }
- if len(encodedBeaconBlock) == 0 {
- return nil, 0, libcommon.Hash{}, nil
- }
- if encodedBeaconBlock, err = utils.DecompressSnappy(encodedBeaconBlock); err != nil {
- return nil, 0, libcommon.Hash{}, err
- }
- signedBlock := new(cltypes.SignedBeaconBlock)
- if err := signedBlock.DecodeSSZ(encodedBeaconBlock, int(version)); err != nil {
- return nil, 0, libcommon.Hash{}, err
- }
- var eth1Number uint64
- var eth1Hash libcommon.Hash
- if signedBlock.Block.Body.ExecutionPayload != nil {
- eth1Number = signedBlock.Block.Body.ExecutionPayload.BlockNumber
- eth1Hash = signedBlock.Block.Body.ExecutionPayload.BlockHash
- }
- return signedBlock, eth1Number, eth1Hash, err
-}
-
-func WriteFinalizedBlockRoot(tx kv.Putter, slot uint64, blockRoot libcommon.Hash) error {
- return tx.Put(kv.FinalizedBlockRoots, EncodeNumber(slot), blockRoot[:])
-}
-
-func ReadFinalizedBlockRoot(tx kv.Getter, slot uint64) (libcommon.Hash, error) {
- root, err := tx.GetOne(kv.FinalizedBlockRoots, EncodeNumber(slot))
- if err != nil {
- return libcommon.Hash{}, err
- }
- if len(root) == 0 {
- return libcommon.Hash{}, nil
- }
- if len(root) != length.Hash {
- return libcommon.Hash{}, fmt.Errorf("read block root with mismatching length")
- }
- return libcommon.BytesToHash(root), nil
-}
diff --git a/cl/phase1/core/rawdb/accessors_test.go b/cl/phase1/core/rawdb/accessors_test.go
deleted file mode 100644
index 3478e0f7bf5..00000000000
--- a/cl/phase1/core/rawdb/accessors_test.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package rawdb_test
-
-import (
- "testing"
-
- "github.com/ledgerwatch/erigon/cl/clparams"
- "github.com/ledgerwatch/erigon/cl/phase1/core/rawdb"
-
- libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon-lib/kv/memdb"
- "github.com/ledgerwatch/erigon/cl/cltypes"
- "github.com/stretchr/testify/require"
-)
-
-func TestBeaconBlock(t *testing.T) {
- _, tx := memdb.NewTestTx(t)
- signedBeaconBlock := new(cltypes.SignedBeaconBlock)
- require.NoError(t, signedBeaconBlock.DecodeSSZ(rawdb.SSZTestBeaconBlock, int(clparams.BellatrixVersion)))
-
- root, err := signedBeaconBlock.Block.HashSSZ()
- require.NoError(t, err)
-
- require.NoError(t, rawdb.WriteBeaconBlock(tx, signedBeaconBlock))
- newBlock, _, _, err := rawdb.ReadBeaconBlock(tx, root, signedBeaconBlock.Block.Slot, clparams.BellatrixVersion)
- require.NoError(t, err)
- newRoot, err := newBlock.HashSSZ()
- require.NoError(t, err)
- root, err = signedBeaconBlock.HashSSZ()
- require.NoError(t, err)
-
- require.Equal(t, root, newRoot)
-}
-
-func TestFinalizedBlockRoot(t *testing.T) {
- _, tx := memdb.NewTestTx(t)
- signedBeaconBlock := new(cltypes.SignedBeaconBlock)
- require.NoError(t, signedBeaconBlock.DecodeSSZ(rawdb.SSZTestBeaconBlock, int(clparams.BellatrixVersion)))
-
- root, err := signedBeaconBlock.Block.HashSSZ()
- require.NoError(t, err)
-
- require.NoError(t, rawdb.WriteFinalizedBlockRoot(tx, signedBeaconBlock.Block.Slot, root))
- newRoot, err := rawdb.ReadFinalizedBlockRoot(tx, signedBeaconBlock.Block.Slot)
- require.NoError(t, err)
- require.Equal(t, libcommon.BytesToHash(root[:]), newRoot)
-}
diff --git a/cl/phase1/core/rawdb/config.go b/cl/phase1/core/rawdb/config.go
index 4346c81ce85..9c5389f22c5 100644
--- a/cl/phase1/core/rawdb/config.go
+++ b/cl/phase1/core/rawdb/config.go
@@ -2,9 +2,9 @@ package rawdb
import (
"encoding/json"
+ "math"
"github.com/ledgerwatch/erigon-lib/kv"
- "github.com/ledgerwatch/erigon/common/math"
)
type BeaconDataConfig struct {
diff --git a/cl/phase1/core/rawdb/config_test.go b/cl/phase1/core/rawdb/config_test.go
deleted file mode 100644
index 53b733d2e10..00000000000
--- a/cl/phase1/core/rawdb/config_test.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package rawdb_test
-
-import (
- "testing"
-
- "github.com/ledgerwatch/erigon-lib/kv/memdb"
- "github.com/ledgerwatch/erigon/cl/phase1/core/rawdb"
- "github.com/stretchr/testify/require"
-)
-
-func TestBeaconDataConfig(t *testing.T) {
- _, tx := memdb.NewTestTx(t)
- cfg := new(rawdb.BeaconDataConfig)
- require.NoError(t, rawdb.WriteBeaconDataConfig(tx, cfg))
-
- newCfg, err := rawdb.ReadBeaconDataConfig(tx)
- require.NoError(t, err)
- require.Equal(t, cfg, newCfg)
-}
diff --git a/cl/phase1/core/rawdb/test_utils.go b/cl/phase1/core/rawdb/test_utils.go
deleted file mode 100644
index a07b7d2ad64..00000000000
--- a/cl/phase1/core/rawdb/test_utils.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package rawdb
-
-import "encoding/hex"
-
-var SSZTestBeaconBlock, _ = hex.DecodeString("640000008997ec6da35c366e19412d96edbab8db862634d849f95bd57ad25e3faa292a8aebfad0d21291dfbcee6178ec3b684748177e04993b43ff95876e4c35370993aec8a20d10c4de8e8ca070baa2772ea60ac1097100f2bdab45dc7fbf1102965f24a1a64f00000000006a300400000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050a6abe003b99a9be225b53dc3bff875315b2688b7e0c28fadfe98569e2fd6cc2c54000000b598bdc18bf593e4a2ad7431abfea5af242b3f890d06bafeb059594ed517bdb4e26cc2e81b840593ed1f9a382fcc730d18875f91387224d28a7a6ce0759ac02de90466b8e4cc6f992685cb3c181c348d4847458d2f91d9c8804d01d7abc32aa360e416dc4227ae2f4635a924d331972550d7c2f5d0897ef4f6bee0b1a390e94bff7b070000000000a4aa98e1c809da071f818e57153481270d5aa048a19be4b6c9d4050bad9bdaee00000000000000000000000000000000000000000000000000000000000000008001000080010000800100008084000080840000fff7ffff7ffeffffefffff7ffffffffffffffffffdffffffffffffffffffffffdffffffff7ffffbdffffff7ffeffffffff7ffffffffffffdfffffffcffff7fffac27369b928b90b18264905a3e2c0468dafb620020273f1c2f6553cdbc3feced2c972f9d72bafe0dbcc396e1e87eb4e70f574cd95ff0dcbab8d2b5fc7645c1bed06df6b17a9fc8123450540905c16ed62b54769c906189a16d8275961b937b808084000000020000020300000404000006050000080600000a0700000c0800000e090000100a0000120b0000140c0000160d0000180e00001a0f00001c1000001e11000020120000221300002414000026150000281600002a1700002c1800002e190000301a0000321b0000341c0000361d0000381e00003a1f00003c2000003e21000040220000422300004424000046250000482600004a2700004c2800004e290000502a0000522b0000542c0000562d0000582e00005a2f00005c3000005e31000060320000623300006434000066350000683600006a3700006c3800006e390000703a0000723b0000743c0000763d0000783e00007a3f00007c4000007e41000080420000824300008444000086450000884600008a4700008c4800008e490000904a0000924b0000944c0000964d0000984e00009a4f00009c5000009e510000a0520000a2530000a4540000a6550000a8560000aa570000ac580000ae590000b05a0000b25b0000b45c0000b65d0000b85e0000ba5f0000bc600000be610000c0620000c2630000c4640000c6650000c8660000ca670000cc680000ce690000d06a0000d26b0000d46c0000d66d0000d86e0000da6f0000dc700000de710000e0720000e2730000e4740000e6750000e8760000ea770000ec780000ee790000f07a0000f27b0000f47c0000f67d0000f87e0000fa7f0000fc800000fe810000e4000000a0a64f000000000000000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95875df8719ed61f0200308a7311fce34ef538be482af723159dcca5890e5f9a5bb945440c73b0525ce7a729536b2f922314438d6df8dfd0f634c6a143957f16f0052f11fbe72c1d07afa46dcfa9dd03cb6d3b724ee4e0daad46cf30b18f8b9afc000000000800000000000000000000000000000000000000000000000002e4000000a0a64f00000000001a000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95b573262f6cb537f37a314b640afd41d1083107b5a7b78fde6079855895756fd1351d6b2642290cd195339cfbc00f903d0467287786535adf2eb46124f5e413c5b052b140ff6fa1c7e260a2b9937df3c5be1e69166644f3821794ad0c890b62ad000000000000000000000000000000400000000000000000000000000004e4000000a0a64f000000000028000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b958a4af5d4876a6b953405acdaa59032be3fd497554b639b84c568209caa31e167c450cfcc14afbbc6fe5a319fcabf6dc617bc4bf5eb1fa2b8a873d00322eb3985cfcad1a8be168ce289ca18f528228a2170dce8a367c9e199869b33661c453d09000000000200000004000000000000000000000008000020000000000004e4000000a0a64f000000000005000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95b7eb4904203273879633c0bb35371be4d7eb0a281d45fb1cfbbd9aa81ebabda909abf8776a3eb36984de66b943d530fd0779b80313f2bc6c62c74b4317a4018a09f45037d614720c49525b702fa669049c0c88f4ff121a53f42e5f71a84b9876000000040000000000000000000000000000000000000000000400000004e4000000a0a64f00000000000200000000000000ba6dfce86e1ed5ac4afc1651c66497f4e31b5f471090232f00ba7d3a761ea468347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d020000000000ba6dfce86e1ed5ac4afc1651c66497f4e31b5f471090232f00ba7d3a761ea46889c0eeae804d10a5c5c521175575559e9543b5afff3d1986c79a78b1cbf0af4638ce1a7ab8bbef2871664cb69a79b6bd189b528884c14fe125318376989f033655b7329cdd817a8ae67fdf8e4f58f8496b27df726acc60d5f06c975d4a0cf9a9000000000000000020000000000000000000000000000000000000000004e4000000a0a64f000000000026000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95b23be2e6c71204f845b977c8125c8902812d4641eb2f7b57feff7c6104bf96d7957d3d230fb174bb8ef694784479b3450f97ee7c8c4178a3803ade1cbd03884ab969da4f5bab4c3040021a1c82cbdafe93e1e51aa5e3d457e5f27f22dbb694c5000000000000000000000000000000080000000000000000000000000002e4000000a0a64f000000000009000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95a023f084ebbea1eb12727826526571e6d218c3a01224522c6810aa9ceba369313c2380fa8d4dad6cdba3b9d62b87518b0ef8790f6db880631441d77209e8a6273c29e0916c6522e7e94d0dcac906ca88a038bd196daae3378609b7cc2557755f000000000000000000000000000000000000000000000000000020000004e4000000a0a64f00000000003e000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95abe85979700c10a96d494ed3acb8172315b8acd178eaf89ed25ab5794df87214847b4d4e6aa0f56e51d570044e607d7f134064d2c207b56bf6caab51f56d33b471ec38caa9a16c3fc884c1edcb6beb22668b163e26296e7e1e6ca67ae6ea2dcf000000000000000400000000000000000000000000000000000000000004e4000000a0a64f00000000002a000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050a3a1d96721d35071f312ab92b72c1adbe121cfcd428c994f2ce18c4282daf20bd503343084ace6c82e27e4767e58eb6d15dfecf57c2f362e15574d9dc924845bac401b5631793e41b6d904f125848a60a6893e1c6f1f28918ee4b3c3ca06df7dffffffffffffffffffffffffffffffffffffffffbfffffffbffffbffff03e4000000a0a64f00000000001c000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b959795c634dc6de7b742ee91abf7d62ecd03fc3d3baf2f0d7bc556c2aac3f034e4a0f5c17f51121cd7c1466526fdd3b642091f2549325ce20ace30b1ad07ec12ad2c460c9e47ffedb1d70e9329e84764204fd761d99f7526ae4f366f3612dc3276000000000000000000000000000000000000000000000000000000000404e4000000a0a64f000000000003000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050a79a807e5f799bb4b03c108824a2b0a1af7aa79a3a58e42d0bd4201212892303225452f7c756c632ddd47c77f6503411185ba3d5c42483e2c0420e734efd61e8322c49e086beb113e7021f8551463eb26666e19f60b4e691d23d80d3d9cc8883ffffffffffffffffffffffffffffffffffffffffffffffffffdfffffff07e4000000a0a64f00000000000d000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a7448465250509650be875d135eaa553cffe50339d9acd5f2b6172d76e0f8432a31d9b0960fa531c91bf2753428fdb3269750a49cc6350ccacc3ff64ec263dc0c560bb42806ddbc70bd4b4e0e3de23c4ffc4cac333603900571d8e07bbc724c57a649f86a3073fffffffffffffffffffffffffffffffffffffffffffffffffff7ffffff07e4000000a0a64f00000000002e000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a74484652505088a03032a53c1f459820e2a083a5eb97eb9b488bfe4314cc93f38f5916f1b4bb8f46149dc2fadf35a60998534f859e7310380287bc9726b5a34e573732870d264fe8f4e68095118facfbdf8482700dc4ee5131b61ed6981977483119561eb67effffffffffffffffffffffff7fffffffffefffffffffffffffffffffff03e4000000a0a64f00000000000c000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95838222efebbc42680175d7dc36167a2eba30fa6787805c15d11e5bbad5e9c70bcc2267a64af891d41586dbfe245af7300eed0f945dc35b87a46400628f207edfa28122400d9cd86cf3c2b90fb61534f18a4a681a6c52d0a3d0c01edefbf27435000000000000000000000000080000000080000000000000000000000002e4000000a0a64f000000000007000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a7448465250508ace36bcdfaa188584e4203280d40e1712459de543c1ac23753a001b2223764e3fe0ea9d92f077af959627750752df4a1052544b89063bbb77c4b03f9c6b24f7e145fcb5d491d2ce75f6ee890dc87187a8e4e87c5644e172d3c6d13c2f57a9a2fffffffffffffffffffffffffffffffffffffffffdffffffffffffffff07e4000000a0a64f000000000015000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95959f6691705effb2699d934a38fe6e1cd128ed424180af8cbf2a526b8837ae6bc40d283df56b6c3dcfed0d7c948801d61281187cf186b8c01398b1e45bd2ab7b1b73a0d852e5e5c9ff81413e08865c2676bdb5df952eb7f8c2a7c475646c88c1000000000000000000400000000000000200000000000000000000000002e4000000a0a64f000000000012000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a7448465250508483d231734cb64ebbe0bd06598ac78176498ad400e39695f6fd82529d2f60c45a6628cce1453b10674802851ed009c418999ebf96d52bbb8dc646c462f587c7b5656defef9da4023a4485f08c7a9c812e95d86a6aa953d580b9a1207ee2e875ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff07e4000000a0a64f000000000024000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b958dfadf268ec42892891bc9c5ab4a7611841370361c93b53e56820dd921e8c673f53053c36163b292fc2a8f0d93870e420aab6631fe4b32a9e77a3738e43f1decc3d8e60134650912ef945e57cb4edb71e1a4e52e365445d1b1ba916d2344d513000040000000000000000000000100000000000000000000000000000004e4000000a0a64f000000000020000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b958bd9dee96f63649e83426243d04753fbe8aa2200b8a4e1e9e3700a358372d7473c0bc0276f307304cd7a4988a2ddbafa05f7518bfa4cb9aaecb1f0a5af0ff0496d537d285b089b3a8daa486ad7feb0e155bb2b5abd2b3c83fff764e1ad2f7fa3000008000000000000000000000004000000000000000000000000000004e4000000a0a64f00000000002d000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050a1ce830db27b65b020e10687b068159c07fe4eb5a6de0759e8c4c1f1e6356aa4d12a7b17c3c2d24f3d1ac7aec1561676104144512f898601fcfa2c7dc950396abdc451d961cd3ac525251c678c70b5a4e42bf1a10a9c84e0181679cc041d52baefffffffffffffffffffffefffffffffffffffffffffffffffffeffeff07e4000000a0a64f00000000003e000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050979b55315793213c50288893b4f68f75b5b5626b2796bc63aefeb76d498b97386415d6a34921cf44994dedd94ca666ee1367be1137fe5173a965126a25600114e45dccb763bdfc7eaf170143362e5f15fae2aeeae31e8a0c221ac03426f32e14fffffffffffffffbffffffffffffffffffffffffffffffffffffffffff07e4000000a0a64f000000000026000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050b532477a111d0650968d8bb8020774a8486c4a99c06a6c21d1d4659e448f49e7f9d8e34c2642949f14d9a91886838b80134d543d84afdcc75aac5dad20543adac6bd1af522c5edbd0753633f490405f4764edd2a14c82f306b7de714ebac592cfffffffffffff7fffffffffffffffff7ffffffffffffffffffffffffff03e4000000a0a64f000000000010000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95aadb78375a1d0d427e9a46ee455e2da7097767772dbf000fea5318464642bfa0d46ede7f12b0319140f2a85cdf0aa18a0c7e1f10fbff90627e067647da226005c0ae30199d1b3c06618d10e072dfacbb8221c6f8779dcd5560515332f5ef0e56000004000000000000100001000000000000000000000000000000000004e4000000a0a64f000000000031000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050b17c936471f41bfbd9e0cdbe404a8cadb68238b5af5fe1b8cd779584f2246577bbcb4b9357f54f6298ed50c7901c010a0db0a08620bafaa17bfa4c4fed3f2f31ec76930382ac0652ace40ca5e639626d77ede515140f960ce89590da2efb7b01fffffffbfffffffff7ffffffffffffffffffffffffffffffffffffffff07e4000000a0a64f000000000037000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95b58fc6b7c31287f87d5d7775cd5c11802ec4a06b9eac20ac20bdbf00acaa2202376f65fa43f463c846655f5d60e2c0a808bde1cfaf35b752798c375d727310b164cd0650c12ae76645c083042ca5f41622b187f722c88f1757ef368fc6ec8b38000000000000000000000000000000000000000000000000000000004002e4000000a0a64f000000000021000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b958634cf61889c95553dce98e137f2ca77b164d74ba8dd762e635ada921387feebac5952688e45295f066cdc1ab8da825a080849eaf7eabc0e15a96db7421ff3831094d6e3dacbacb33a227f9c8c7b71250f110a81d0d9ca3152480ba8bbbe3765000000000000000000001000000000000100000000000000000000000004e4000000a0a64f00000000001f000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050b4bfda9580bdb187e0d9ac26a2a2f1d8e6f4377f31677fd19412ac851f06c8fc73349a4128f95567d6d6f1684663b232101407bd418b5b406a05441acb482555714bf2781a7e13e083070a77e637ddd6b6afb7ae6d5d1ace7e7aafdd1d3e9e45ffffffffffffffffffffff7fffffffffffffffffffffffffffffefffff07e4000000a0a64f000000000016000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a7448465250508dae08c7aeaa1dc30fe61b200784bc1ad48069639401ec41e0008e72932cf1087c1cb72d1bf94dc7a1bf75e251fbfdcb19e9750648bd8318381b41f12709871160e04ca1d68d5021bd001f70914e310cb819e84e37da3810b12e5bd008d44d46ffffffffffffffffffdfffffffffffffffffffffffffffffffffffffff07e4000000a0a64f000000000032000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050adf0f48de53445bf20a2dc1076eedbae1de3d99163e21ea0cf42af68e854a1a1feee4ebc92c50869b219cef1ae740fec01319713fd85b3ab98d786da66ffef49178a4c3241bb8bfb3fb99818cd4032d48b7ba35ccdecd3d712d2f547e91b28f2fffdfffffffffffff7ffffffffffffffffffffffffffffffffffffffef07e4000000a0a64f000000000021000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a74484652505086a1ec2580636363b8379c1e8980593557a6d198e06c21686242ca4d1f4f505afd3827a043fe8e882eb09ef8aa9dd8560dfa4d8f79d64f4af5ce02bcb47e36e67a7e8118fe39411089b986bf1b60b848d053f17d888f56ee0a307693382c33d2ffffffffffffffffffffeffffffffffffefff7ffffffffffffffffffff07e4000000a0a64f00000000001f000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95b5a98c9d787279dac92315d997ab18d604c15fa45fcaec2ccacc20251af7de99eb2bfece9e5cb9b1dbab76c853c7544e177f436887dc42641941ebd110e65899455b0ebdc908bcea147e7bc574bb4f88f9399d6b1ddc311dbb4facf0c884c765000000000000000000000080000000000000000000000000000010000004e4000000a0a64f00000000000a000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95a12b8e8640dc40e219454db4ba9b0dd8902ae9cd452a6abe20eb641b6a4b2918c78e4f5790acceefe8f964bb190b3da6171ab9c9a885886d791eb926219aadf9b81b6776440673fb1e0f0f2b369c0896bbc72ebfc1cd4a1785716b96d9d1861d000000000000000000000000000000000000000000002000000080000004e4000000a0a64f00000000000e000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a7448465250508171f1987fc4c2efb0fe8fbbab5fabfef44e51daea2cc486a844b0bdf3e027f0470d473d67baea25663c6f4447e3cf4100bb1c832e992e0bd9f79e008ce37062e1bbb74502494a3190610c4dd7048b23bc525031e5543a6b269a1bf9e1eae057ffffffffffffffffffffffffefffffffffffffffffffffffffffffffff07e4000000a0a64f000000000035000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b958d893f9b6956345b2fa647157e39ddb72d6ddb74339c045bd6f611c7331d95da0382cd0e0bea2f15cfb7b5f947f41e9905166b917c41623d2e244b8a451c03ade812846f91fd126ccd24524cdc98385d4cf32fed34937dd416629c90f752d8c4000000000000000000000000000000000200000000000000004000000004e4000000a0a64f000000000034000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050825eedd37f5631b1e9bd9fa43115e139cd4a49d87ecded6799dba1bef7c37380dc4b1b4e51c58c4dbbf4dbf4c8071fa6055256945dfd8c7af4cc073336ff4f24bbd573894baabf3ee5c8e13ab84a4341f79aefccaeb6cac00026e7251d3cea91fffffffffffffffffffffbfffffffbffefffffffffffffffffffffffff07e4000000a0a64f00000000000e000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95b50036908796974cf031b00c2333685ea6a83cb045174add9c27834778d8daec2bbfb7cd9b9ac132e2f02f51ecc0c67916385e2e494ec6958c7d2e5224dad771a9cb8fafef85e504357919f3cce3cef804b38bdc80085cc1869011c66b52ae0f000000000000000000000000100000000000000000000000000000000004e4000000a0a64f00000000001e000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95a57c511cfa01a350fd0c3d5aa490eca2a398d1190f9d2b081e2cdd6ebedfa03f95aa35f503d4d0f4cbac3a4c1e3251a106c1ec4c57a35ee7b7eda40c6445a347b7f5f24c291b7fa8fd16724a614cc13f85cfa764bc49a02188d63b566c4a2f6e000000000000000000000000000020000000000000000000000000000004e4000000a0a64f000000000029000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b9589933d9d9374e1d5ea9c0d146606491d110e85e2d48ca69089e312533d36dc9f7ef99beb21a62221edaf62714bddef210363477f25aabb24b41565e283f6ed5175a661382a950e4761b59ea5821391b5007eadfb64e9f4e03b2a4bc062f440ea000000000000200000000000000000000000000000000000000000000005e4000000a0a64f000000000017000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a74484652505089ec7867a679aa96c850f5d966343835689c894ee5e233769f48931b1cf1d1833a9c433bc49ce2bfa93137d8fa09029911c142b3067deb751947d3185b4d5021594e20c549373241d917c21faa9d467825d77a75484d8ba031d01a294ae5ae6cfffffffffffffbfffffefffffffffffffff7ffffffffffffffbfffffff07e4000000a0a64f00000000002b000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95b103737a916be450cdba6c4663910cc717b779e5e0e76a09515768ff910176a2368a86a228609c9615961eb647b1c3690b3f64f5e3e61adcc4ab0b031d5f267f768df2e133a500eff0d2b3f33decfd58b9fb33f28d943cb6098d3044d1d37905100000004000000000000000000000000000000000000000080000000004e4000000a0a64f00000000001a000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050a25f5f85c9e7903585a5dbc4eefaf42880ca8b64d64f1e13c5c50d9ab44481cc2ea21535f8c869555ec57771aec53e01162f9cc901e76335fa5cc79c0473a5c93f2bacea42963b11aa50c965af41deaf5e78cf471464969118174945addf347affffffffffffffffffffffffffffedbfffffffffffffffbdffffffffff07e4000000a0a64f000000000000000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a74484652505096c0109a2a6f5a46229df912a122e39844feb7e3d864083fb51b3192283c21fc87e2a1271ae01292eeaa0686ca0a2570074e301217c7622373149233097c4ef77c2d12752048750da3375f944af7eb4f6235181529e7640144bfa3d795fc998efffffffff7ffffffffffffffffbfffffffffffffffffffffffffffffff03e4000000a0a64f00000000001b000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95b359bc97259aa32a3b5293ffc65fc9c97e38ad07edb1293e556059a5698562b18b9c494276e531f8b6aa5c38817397b90b8e43e6911bb078d6a6dec70b0f32f1e5be7bb3a339edc4a5b1f1de33428a406fbb914ad79a20238bf806f675958e27000000000000000000000000008000000000000000000000000000000804e4000000a0a64f000000000017000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95add4cf6397f8aceea0bffcc81e71b196f0509c0641c965eb0e036abb2837f66071ea443f24675ef0409143f616c5e24009e87c737d6a58b675dedb1507b1618cacc96f8e541c46cb239aa07592e65546f9a06ed6da52d4702c1d65d9a4de7863000000000000000000010000000000000000000000000000000000000004e4000000a0a64f00000000003f000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050afc03bbda16a4596e429369e3b4d341f89b361fb0a06f4da0d7737f4f766e826290c5119012384bb7a9404b1efa8a0310beecc203e85cc8f340e02b10be081522c3f89688503519a7cd28bf942731d5b4c90f0a28dbf19106a43e9f5498a4841fffffffffffffffbffffffffffffffffffffffffffffffffffffffffff07e4000000a0a64f000000000036000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b9590794a40c53169fa1e79c80d49f2842edf1befdf025812df90a2fc2d9fb9e97a4f53f30633cd9c6ae20d615f6132419314167949f258f90e95b302867716e1aceee4f80eec9d2762fc7e41251717635b5e5c3b40b71a639271bad80f6c89f2e8000000000000000000000000000000000000004000000000100000000004e4000000a0a64f000000000011000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95a470ad345faf967200cb92ab305e0f91ebb52ad47edb1050cf8b4e5224eb739b1437e9e9a9f3fe3d08388c0bd838307f1345a4170cad4b4a408f2ea37fcf94f1a87bb0f4b473fe47c18a66a6b9aceeec14a7375702556223f860b192e4adb78800000c00000000a000000000000000000000000000000000100000000002e4000000a0a64f000000000015000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a74484652505094ca5596a9036df656ce91e2157706c484957a996f48bf988687142505bd1651e3372c5517cb38660defcd6b082c7d8d003688d9b73520411de0640369855766b6271dadd1b20d79949004f8edd0a7d3d36a4450952bcdc11d93e0eb4d3941a9ffffffffffffffffffbfffffffffffeffdffffffffffffffffffffffff03e4000000a0a64f000000000013000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a74484652505084a9d3f569756a0283c53f5c47b9a49b868e7bea97ccc22311578e25b626f90071389f73aa49733c1971de614358886b009dad0818c845393c75d64d280d0efad41f7d3a9c191b1047328cd4aa661b548415ed63c23aa5e128a11ec2a0864d12ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff07e4000000a0a64f00000000001d000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a7448465250508d5c4c8ace02aebca2c8905f072edd1365d3e00b714d068de4ed355c391646e9badfba377e1edf40546ed4d5231520a800880a548fe9629de999f81032f854d273449c0d6306c17dc8b12073ebcd09c17e4e748f774a2e33b4a2e2209ec886dafffffffffffffffff7fffffffffbffbffffbffffdfffdfffffffdfffff03e4000000a0a64f00000000000c000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050b1bf16391c81ced969b9b633744815204ce1d7d5694a76997faaa9abc9d64754f692f0929fa55965ebd4883c89cf5468078e46a58ae6680d5b6692c2c4736c9045279427e991cd6122d9bedcda9a41fc8c71ed6e70cbfec65bc5b91232636e63fffffffffffffffffffffffff7ffffffff7fffffffffffffffffffffff03e4000000a0a64f000000000004000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95b99dd7152d7b3ecabd4d505c1287b4804fc31269f401abf1aa09c3af3800a954ee44a8f8a000a7c618d7b6a0fff6966f09648a102c29c7c0f7fc35e7a4dc7b9da945d22e51485dab1c618a59f14e91a644e0f2792570ec023f554450f2cc5b37000001000000000000000000000000000000000000000000020000000002e4000000a0a64f000000000025000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b9580b06d8ca3902b4c80e5ea1bb7a1e9f4bf20415060afbbe3542fa7abf332e8d702846b1053ee00624860323f2ca60e0e09a50fb4e5479caaaec05072b5cdd89b47c50f741a117d3089211c3c38f255d3ac93234259149497b4a1f167d4e0ab06000000000000000001000000000000000000000000100000000000000004e4000000a0a64f000000000028000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a7448465250508f6155e7e300ee555ca12ebc7b1b6d3b3ec9a6de6d4e49cf57367e8c596c495776a15b80086eed2c328e8e69a89c5257055d912b8e1243c1ac03d87ab91af643d8d594b5378dfe56fec6782f6546b2c3586987e8dbdd7de459cafb10ddbd3067fffffffffdfffffffbfdfffffffffffffffffffff7ffffdfffffffffff07e4000000a0a64f000000000035000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050ae18e8931a8e5661b49f5881f4b6da28f7fa268ebe0f8bb1cc09dc35de296b825d5c76b8cf350641c4c5b4bbd395297d199cda60410d8c6ecaf08c525d15c4e3beaccd3b9e496b0be5c9624bb771883ecbb34ef1ad819c25d3caff59b4b5baf6ffffffffffffffffffffffffffff7ffffdffffffffffffffffbfffffff07e4000000a0a64f00000000003b000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a7448465250509661ad630cd8f5a0d301fa54816c6861a587cd7c38ad3b1466f4fb75976824e805bee51a6eb2636a3a2660f74c5f6c1f1914d97b1fd5e253537224957377509447db017274ebc6487ec36106052e1693c01379608525ffa18322241b1a270297fffffffffffbffffffffffffffffffff7fffffffffffffffffffffffff03e4000000a0a64f000000000010000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050b3e094893413b3bc4352a277deea82e275348114d1f29c928a1db7a437bb4cbeb040422376e4226b306e4f9d6c45aed509c05dcc29947bcf8b2c6f57be05b258aa41c333c1320ca1ab8cb33ade449bcd7ed19529c5ddc7149b2a0386a22b1337fffffbffffffffffffeffffeffffffffffffffffffffffffffffffffff07e4000000a0a64f000000000030000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a74484652505097a7f82a9d282da2a73ac8812045acbc4136d076c3282dc65114866bd6e4f19b8bb888a99b60192b95ba97ddcd65015d028212c24466f09d8dc0f026186ad9dbe94a6970ddd7d91215e54c0827097349dfd8a502cf8cec678d6cb6ddf12581e9fffffffffffffffffffffffefffffffffffffffffff7ffffffffffffff07e4000000a0a64f00000000002c000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050b88aa81dcf7e23d08b350425eece22d588dfedc40c9f90702a9d375960b80a7e20dc67d46d7b7d14cd24c80540f0309216893f33dbb28c95a4622da9baf7a9cb23abdf31e3cf5159a6e0e2053113f194fa842e4f0753f524e7a3b6b91fd6627fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff07e4000000a0a64f000000000025000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a7448465250508413946477ff7360dc70e56f20a937f80e9d673be133c25bae1550f3d633bc63568cb900ca949c7e99609c7c37ab3cd0188a64f0a90ad86c1880f3d2a94ef03825abb4dad47925ce03033fb6906f14c2cc5398e3e8faab0b47b1df9cccfc88b0fffffffffffffffffeffffffffffffffffffffffffeffffffffffbffff07e4000000a0a64f000000000019000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b9593022ef6995ae413564bd7e21a570e4576f48b9ec7079bc621061f1e55aa5a1486bed3447b2f6a408ddfafb7229d8a930f28d0c0a872b699db6fff68cf9706ee91968d646929ccd2a93c2e8e4a7dbe72a14ab073f8b8eed64491cda007b4f202000000000001000000000000000000000000000000000000000080000002e4000000a0a64f000000000039000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a7448465250508fa8252f9a3842efba8d54def51e598bcb6a2d87956e41402ec025f65e64df44fbed80afb1bd6eb99c35160cd3518a3a065bc17aecc5ac0b680cf128985deda309f0207bd3fd30b7c28526496db30ff41cc17a8c376bcb53ae5505bb2adf53c5fffffffffffffffeffffffffffffffbfffeffffffffffffffeffffffff07e4000000a0a64f00000000001c000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050ae1d7c505d438e890697647e9623061aee9762fca0d24423f24b172e2316ac44e7ec91d888616958277b87541a46fb2014cbc25ced4e0be8082d85bb1cbe67f6b2d7b899f2dd62225fe6d695626def79421114c1ff01bb3b586c003e580a08d1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffb07e4000000a0a64f000000000024000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050909f2c2e1b3f1643feb47e0de16e6a40f4ee63d279d899b38914aafab3969c0cf72e6cff96f2a1025824e0dd088fa662049f1294e30864ae8a3219da1996a4e5736ff3736aee218991afaed4bb0f16172c89ef1bd3a14dd0a743a2f569b8790cffbfbffffffffffffffffffffffeffffffffffffffffffffffffffffff07e4000000a0a64f000000000029000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050b213c11c3ff69b84d620661f3702fe602ad715a372047778659d85d96df5d7a042fa030691745cae261dfd4bbdbaf19710c56c0bdc6efbdd56bf031d56984042a1fb7a2d11a67f8e5b1a7b4f062ab13568c0828bdeeb30846757d91cfd225b6dffffffffffffdfffffffffffffffffffffffffffffffffffffffffffff06e4000000a0a64f000000000033000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050a6e5e9adabebbffbd747c4828c9a4d800ada5814f69cfaf675411c8f3ce0a6ce5b1369b06e27d373405f3987e4f407470fad87ce3cc43f5fda3d9a1ae37d47f060c389569f6cc795a67b63a4d8bc3bca5a698726877d073ad2cb2d6f82c9417effffffffffffffffffffffffffffffffffffffffffffffffffffffffff03e4000000a0a64f00000000002f000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050b00f6371afa3fcff69e30225b778fae19424deceedb9ab488ced3e81be5ab3a95ae3afdab077bb19c34c8b5ffa48c0c115cd84b0265b8e924aeed58ef592fb411b02d4b53db3db377096ad8827029edd707c44360ad4c3717b53672238ab7813fffffffffffffffffffffffffffffffffffffffffffdffffffffffffff07e4000000a0a64f000000000039000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95a8311f17fd26db895a97b5f6615a4000ebd9db01433f49e8b361752f9fb09880d6ba36276071fd8313768595fb1243870c941754ba1efd5509e7acd683815c7b0e8fbcc4e621f15762f3aa3f589a34357cbdb4707a55769e4ff6d3592f46c16d000000000000000100000000000000000010000000000000010000000004e4000000a0a64f00000000003d000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050a684b915940cf205e29d243c5e91efd63698e0797727ab5c3a821645d60cab94df689331133fdad14a1ac63bb6e6c71e107e0bf963bf9ef097b185ac4e75d94e586835aaf3c0b5e0cabfdfb700f52300a15279bf26224baa00a119cb389021a8ffffffffffffffffffffffffffffffffffffffffffffffffdfffffffff07e4000000a0a64f00000000002b000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050939e76066b90a401176f65111822dd7721e902582c15d9921f19b0adfff884cad480f3f7408b06676a8e2fa53631448814ba91b46a5dc735121c044f68da4b8a3ccba5ab5eec028feb6fa1ff4a27a06eaf9729a2003d4c5934c3958a483ce431efffffffbfffffffffffffffffffeffffffffffffffffffff7fff7ffff07e4000000a0a64f00000000003a000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a7448465250508f4adab0d7d62008b34750d4294f59e2cb68a798159d9e7843aa32eccf8118b359a60a398e4b64c39502f4f0a285284e0b9a3230a0a997f2fc5b891957fe9056a8929135a15828afec0e8f65d23e8fdb9c58d140090ca6260009433ed893a07effffffffffffffffffffffffffffffffffffffffffffffffffffffffbf07e4000000a0a64f000000000027000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050845df1086264fba3e2e5cebeb8da3fd238cc986bf215ef9142a42c1dd940f23e6c8dcbdcfd99e4b767bd722299f24fd80339fb53c767f857514e667ab10725b17399389fcc25c35d674ffd042d12a4d05d254a67320a3e929ade8bf297be0ba2fffffffffffffffffdffffffffffffffffffffffffffefffffffffffff07e4000000a0a64f00000000000f000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a7448465250509018410244b5eb0aee7df1c9aee88db7a0f88e754ff638793121e3067c1b2a3c0100774d9272c7bfd441b604c61f34470a3e07ea454681bcba6fd02e8f65e4731125a9fc9e1cfdca25face5b23c1b6093c5d5e4584c240fe7c4bf037b4631915ffffffffdfffffffffffffffffffffffffffffffffffffffffffffffff07e4000000a0a64f00000000003f000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95ac482c3bff7dbbd03af8b0ab74b5c227f8ed475240bd4124f2f9b8df21c571c50d0b98828adef6d48cc3ada1e24c9c850c52b5c335945d18a49835b1411262103292f4bf1191f64da967b3f2d926a0f1d8c9f383fdd2d68f96ef93357987fa86000000000000000400000000000000000000000000000000000000000004e4000000a0a64f00000000001b000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a74484652505099368856c419207930720ef6e8d9fdc96455d7d524872cfd3ba467e0e6d9b293764e1195ff317a6831b51c8630691936013c0f32bd8492cf9888f9005851bdd68e0c49be73477857c1e135837e97bde62e94ec326ec1003435e576bf9daf02fbffffffffffffffffffffffffff7ffffffffffffffffffffffffffffff707e4000000a0a64f000000000002000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95a6599532681aff03d8da0eedbe65ac91353958231f8b9456c7dffa3ff5ac3698465a1c92441c74be06c8e395a4072c0a168a61d6a8185db4bec1280fc4f46357cae7eeecad55c6830c9bf658bf41a8ed8b751bc8db3c058bb276a9fa180c3185000000000000000000000000000000000000000000000020000000000004e4000000a0a64f00000000003200000000000000ba6dfce86e1ed5ac4afc1651c66497f4e31b5f471090232f00ba7d3a761ea468347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d020000000000ba6dfce86e1ed5ac4afc1651c66497f4e31b5f471090232f00ba7d3a761ea468aaa360f430a8da44ffa88d30c8b782bd902ee423154ff9c924e6f16a24728568ade677a1f87ab3d512304ceb017718a10e6271a49ec3d31e7fbb51a6e50140a08740de9e9e734c1a8c2960718b9e8f5a7c551e4061f2e293752fd3d7e285220d000200000000000000000000000000000000000000000000000000000004e4000000a0a64f000000000008000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050a68ae5d07525ddede46b508ad1eef1ed12dfdc385ecfd0ea1ca0fd0aad01f069c244d5f483e1ea1b02b24ba3ca3b54c910e6a3284116a81e3ab9e19cc5d7f4a39ac9aa373203de2338257d578074915af7c7509c169b7bb95d77539f630bcf68ff7fffffffffffffdffffffffffffffffffffffff7ffffffffffffffff03e4000000a0a64f00000000000b000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050ac2fe7471d289a3d38a59fa1b836b35eb87d9073a9e1e7fcdfb730db8284043ee969f9a9f003609b51701a2beb71fb950f5ec5c74be9efcf5c8b824ec3ebb66ebbad2719d75faa9abf59f11778889dda02841e5af57fe5cc56f227833dcecf84ffffffffffffffffffffdffffffffffffffffeff7fffffffffffffffff07e4000000a0a64f000000000030000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b958a7e43fb0587d37fd57be5bab9cf0a50e0fa6236ca0a58b7b2b01cda3d47639168710697d966c8055d7bca2e798f891f0d2e7c0b55ea3ee5a2ce1d01ca3ecc63f8fd121131f402fa3c2db65f96ac8a1650c92f0baa0c8d9e9e03f7b744274c70000000000000000000000001000000000000000000080000000000000004e4000000a0a64f000000000001000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050979b7b5572a7ad1d9259144a5ec6c34172fc2bf6b9f6a182036bb8fe51180fb35add42a5a669376032d1461ef346a78709002d6baf692192a71756f4d3f14c9e1fb4a4a3bcb4af795755af21369d97ee4e76c8b79611374cb3c00482bf74362bffffbfffffffffffffffffffffffffffffffffffffffffffdfffffffff07e4000000a0a64f000000000038000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b959641a3f77e612482737c009ddc06b79265f72a64c74a84e265d3f09c9d8c44f2516e235d279858246e2332d91529c76f149689a28c4b43960a790fd8a9c0fa07cf984591c6ae053becd2ec92f5bb86cc4c025d14fd5415a1f090ed01a55b5e92000000000000000000000000000000000000000000000000000004000004e4000000a0a64f00000000000a000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050940d2a76059d3cc0adbc2abf69219880c5091e2d588550f954dc07ad8c2cea3f494ba2a4334e88be5778f730b2312dfa096c2756eca50d07e16001872b88f6bcd8de78fd870b060e9a159745807b37ca34382ebdf98c54d1aa780e085f9bb43fffffffffffffffdffffffffffffffffffffffffffff7dfffffff7fffff07e4000000a0a64f000000000018000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a7448465250508572e438455a1155889c32956bb32896efe0e4fa9b2fb3774166288035e2bf1db574cc3d0a1429fc8c12a7c89d2a655b02819779c9ca45763389a8333bebfa0d8d42258df324844af7e1e193543f4473908cfdd4dc53ea7a9b2f602778aaacd7fffffffffeffffffffffffffffffffffffffffffffffffffffffffffff07e4000000a0a64f00000000000b000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b958877824f66a3e630d3995381f0de9b4a1025007c2f82d98295b7d22dbb452552cb8ed5e9252ec14e5c75a54f2aa744fc0eb92dcfb82eeabac5fda6f6b541bbff29082c7257aa1b8b8ac983a085f6b89657580d2ec56c1937283d22430ff323d2000000000000000000002000000000000000010000000000000000000004e4000000a0a64f00000000002f000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b958ebac609c304c9847d1a986746548ad7af3333c9716f0bc09f085f42026e6cb6cb515b4fbfa4410effc6f2f92a3ac9a215d3dda42a03a9846eb00aaa34cd8868dc66f77e15da87df735b9f54e0f683eb51f872fa873c7c6a80cd6b18d3cca1b6000000000000000000000000000000000000000000020000000000000004e4000000a0a64f000000000027000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b9593b00a6334211b5f1baa5c07b6086a90e701cab4fc9f4c21b12ab97292d91286e24a01c7bfdb061918ae5cee56de56bc09674500ab0c517611477dd5fcb815eb9c9ea9fc1f36160675bbe6e1d80618c6b1a812d0d6711bb4ad1a7336208291f8000000000000000000000000000000000000000000001000000000000004e4000000a0a64f000000000023000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050871bf1ae5be9c4ae727d01ff54d14afd4877d7c57ca33e2509675fba88ef79af56d55f8eb94b9bc9578d46262dba46a705f12a28fcce071a38f4cd2c0615707629a0603a560e7b9a48466bbe23f4964c5ad882e2982b639f8fcb09f846ce6752ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff07e4000000a0a64f000000000037000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050834b88302c23f7fc8f41f26c08283fbb698839cedfe7f4b4aba2e4dec16d1bd97e4df8c364b287b92f7d7efe39099458058b78286f4b24d0978b781b3a3bae833d94ac9185429dda9ba70c6e72e55d725aa168c2c1b7d2ca2e83d4963775ed5effffffffffffffffffffffffffffffffffffefffffffffffffffffffbf03e4000000a0a64f000000000032000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95847262c64b37e5013c580aaad847c14da4679d6eae31c3b6b1b4c1f082317706e9a3053aecebbe3619b8de3eeffc700707ed267f2c154f6b1a2ba4c17747f438e9f9b367f8e634361ce0439a443f0ba34ea078b00ee9398950c388c7161e0992000000000000000008000000000000000000000000000000000000000004e4000000a0a64f000000000004000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050a2600043331a9308cb6c7ad72f51c25bfd662823d6f1e3abdc44a9cd4b547068ed822555daa575c55de53786279773770a74adec794187c4e6e94d6870a8f8b2085cedfb0a8ae6c8dc808be72d9bf6a843a5ceabe5802c0d88dae2f153d120cafffffeffff7fbffffffffffffffffffffffffffffffffffffdffffffff03e4000000a0a64f000000000006000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95a3b0e1dee28e8a92c02bf1b43a524d997e11a1b40a4ff2f090928308b581f9753c66d21f7b84bd6e24c9ded8645407790e5a11cd2adfc3fd32cf9f8299a370782814044a74398d19adac073431b8b16bc0068127d228e16e3d273b3977389691000000000000000000000000000008000000000000000000000000000004e4000000a0a64f00000000002d000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b9586817c9c1445b97614fcf8df49f38471176dc903f4cc0bb6d77fd79f70eeb5824745172f723fe1191f8e1fed23613bb9137ebb9e53c026637929e137eba71e10af51769607c24a4a9491a98d8d621a822d18dd36260722858bf9ef1d734b6d4c000000000000000000000010000000000000000000000000000010000004e4000000a0a64f000000000006000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050adc5589942f05df34d32860774a5356eaddf18ebdd92c30e30576c944ea2235b3aae30fe24459fecf13433fcf0ff79030652e6a7f56b15b6ec88893ee5a4072329999843a08762e0b6fbed785c780e1b65f1d1d7fefd4f144d581b8b14235f4ffffffffffffffffffffffffffffff7ffffffffffffffffffffffffffff07e4000000a0a64f00000000003b000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b9590828c0deccab5ea4ffa658a41e9ec7bc92fb4d1904d75c14bce3cb8a03aa08cc56f6ba65ce8ae7f819e0ebceba58b3904d28b40d9d97486387bf1e5adf4812010cd72aff188d2274e16e2b7130894d72300825d781baed2205c9a90c73f9415000000000004000000000000000000008000000000000000000000000002e4000000a0a64f00000000003c000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95b9283c8d1d5f826ef8994f2469e2de5be890cf2b6648916648ec92c1fb42dc2ae37203106781a3b8e0f4715ce8afb54612dcf98ba2f614874544b0395b79138f432fc53829649f090b1cbfb7a6392bbc00659bf4253b9e981d5d110d1f45f996000200000010000000000000000000208000000000000000000000000004e4000000a0a64f000000000002000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a7448465250508888211d73d260706921a5dec1367d92ba50f153e32aed7f974847ee298308a7d02f2d708c774a758ebbd8787e12e8d618601f5f2402452b399662317515dc86e9718f974484de661e6d64061a2c72a5c0624a421be939898980393a57163e0fffffffffffffffffdfffffffffffffffffffffffffffffdfffffffffff07e4000000a0a64f000000000005000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a7448465250508d8755ba11e06e502b4e3df4ef9c9bda1f6eb707c7c864fcbe559d2de51d983ccd770605dd89a45059efed838d79142d0f46b8c520ad5c19d841c0d0bbf96a00384e3d057e41526ab1ae10d80fb80fa0a34f1d48f8932b48fbebb5da9603a198fffffffbfffffffffffffffffffffffffffffffffffffffffffbffffff07e4000000a0a64f000000000008000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b9583c597823bcbfe3c7a60bbc9af421ba0e2183056284a88bc6e72e69f78a5711f129653cec5ef30b2c224d04bf3eccfa703ebacf97b99354c543f06f7189f07ae6b7b71bbc05514d53ad3e3438f86b87573adab5c40bbff8125419733fc5a13db000000000000000020000000000000000000000000000000000000000002e4000000a0a64f000000000001000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b958c80e9e80f7be9ff227d0e0a1f616e834d110caba0515123873172d5ac5d8cfbdb6966f709aee2452e32018cc0305d890e2a48eacc52b97e1eca29352d144b26f689ed7f779eab93e0c989afb3eefebb44cc9ef458b0177330616e69d3988986000000000000000000000000000000000000000000000000200000000004e4000000a0a64f000000000003000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b958918749d0a67f1824b3ae728bab026fff4cbaa44b14b1132e52cd9a10403e87ffc24e06c2d799be7435c1faee388157d1568fd438543250f580db75a27302c818ede1cf5d2229a54184a144e536d855e67c00274ab57d4eccf52045572e6b1e6000000000000000000000000000000000000000000000000002000000004e4000000a0a64f000000000034000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b958a43e0a71b2f4d27ccb8ebadf4d72bd4859008100b74d6b5b873b0623437fd299c06d769e17dbdafcd102dbde497cecb18fd3c7cd5b82b2bf31a8a633391ff2cbf68f33611585b417bdbe11b2ea91903ec9ecf5b67f4b18c7d0fe5ef61f19c45000000000000000000000400000004000000000000000000000000000004e4000000a0a64f000000000020000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050b7f04b645c48ba674c4ddcff138be61ccb36215e9d98f9726480fdfce56144438e64d2f509861a2ccb01e39f95db603e11b85923c1af01a9b9020cbc180ff1b0d0ff1185a9c1612bdc83f09efdb44a8dd703d92fe944a15d87e6470e14faea48fffff7fffffffffffffffffffffffbffffffffffffffffffffffffffff07e4000000a0a64f000000000011000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a7448465250508b1b4d69682f6697df4890e01ef9bd4acf3413f9afdea9731b43dadcf807e791094b9068f16e68ec719f2d401a9ef46513028894f122f1ef7d894d3156a56b161b5dcb4dbc92d19a7590a4aac2f981a31d7f9d2481463c59b9aa69eafc52d5b3fffff3ffffffff5fffffffffffffffffffffefffffffffffefffffffff03e4000000a0a64f00000000002e000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b958e1d240cc40dc66d285974b23b0dea4ddc2a8770ef9cad60ae08ff4590776de635dbfd7e648b008335c69f6b1fc369100bd0a9c789c6198ba481f8b1ea117df28e5d5d4f8cbfb64ba385eba51e16324bbe1e4e503630490d764a3a32ebe210c1000000000000000000000000800000000010000000000000000000000002e4000000a0a64f00000000001e000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a7448465250508f4baebdb2af01422e6941f241ae4b10a0a61f9d4b14231a5b39ea478908fb7ac2ba8b75a8678ab65a041581722b2f33069c5517da86df502a3d9881681188e52eadde50733191478a81c67938adfb3dfce5cd2d68f394a30dd5093764370633ffffffffffbfffffffffffffffffdfffffffffffffffffffffffffffff07e4000000a0a64f00000000003c000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a7448465250508eb8d8fe010b644ace5b8b7e5a644e72dce76b3936a37403c2cbdf8afeb017f65d3929fc8a17274467f164dd343a8fe71791906b688af4e2d1c73f99a8e048af35e03f79ffb447110953e632b114bec82311faa55ff3d6fd8662719e7eb4b8d0fffdffffffefffffffffffffffffffdf7fffffbfffffffffffffffffff07e4000000a0a64f000000000036000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050b236af8892d13c5bacc321d3d1945989ebc1314421d23de5a322dad76501eb0b674024a2ca637116234a7e53829c3d8119a726378e9debc55d8884ad3294127f92c0d3ae1ce622cd925a670687539098356c127c5cfae50c837528a1ae6423f8ffffffffffffffffffffffffffffffffffffffbfffffffffefffffffff07e4000000a0a64f00000000001700000000000000ba6dfce86e1ed5ac4afc1651c66497f4e31b5f471090232f00ba7d3a761ea468347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d020000000000ba6dfce86e1ed5ac4afc1651c66497f4e31b5f471090232f00ba7d3a761ea468a5f55804539553c435ef88091184a25ad34c5bfe952b1a8c0b7b18614cc4ee681010864e73677f8cf2fae4541cadfa4c0d3f778e103ed5a480a1246367db5798c7e3b29dc1c525016bd7642f75ec4dc7f00abee679498610ccf3fa571aa4a928000000000000040000000000000000000000000000000000000000000004e4000000a0a64f000000000009000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050b62b508c665fa3d2ec44e53ac6771b5780a1ba2389ff57e9e6bdc29493fa9619ff9fa438ea2b3e4c2484c939787fde531106642c8024caa808310d3c7a8cc60ef55f04e92f05d4aa5a14ae21cd90a62f23037ca2ac878161fd9a551ca348ad4cffffffffffffffffffffffffffffffffffffffffffffffffffffdfffff07e4000000a0a64f00000000000d000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95aa2b59355af9d1e9c7119226f0aaec11651059d4e3a04c4a2102981dce58bc4097ef3ea383b2746737d256881b3840e50fe55e1410e6f2e48aac534e72e2df6892a035d86a88c2005177d5ee759a2da51fb7fa9a65a4e3ecf303742d011cfd70000000000000000000000000000000000000000000000000000800000004e4000000a0a64f000000000031000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95aa4300a3d7c4c06501d1006f9a69a55b8fc3f9bddd0b122a9b068e1631d9e8872f4cdbc200852c5a64f6f5e01c77c59012639bb319727bae566a8ef876321ef9f09c5d3bad4c5422b53b3a3450ca8a9ce4c0bf24f171144cc4cfed67874a833b000000040000000008000000000000000000000000000000000000000004e4000000a0a64f000000000022000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050962ef72d96cefabb14bf9b11e9212eb6eac9b5eb5e342b6470f8258acee337b03e810900c18f94f8fd66524ccf528bb610f650da4b850048c79a1db8e9553e709b01eab5d09b1a2a22bb036ebacc33b8f7a0fbef3a2eadcbf2e587c8f28dbd97ffffffffffffffffffffffffffffffffffffffffffffffffff7fffffff03e4000000a0a64f000000000014000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050862ec5425b1252c76a15929ed6e6337318170900831ec9697cc3d16e5130694a79480a0a6133ff6ab74c1c1a98a7a91203a6125780c65e2324955942ce0880b3c5af99d1438cbbcf6bc998634792d5000d8a7a906e59fa9bdc836883dd21221cffffffffffffffffffffffffffbffffffffffffffeffffffffffffffff07e4000000a0a64f000000000038000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050afeac07b398936e4b0b682bca6830ded2e1bd5b84cf009f8ff3ea489c90b1381aac623fe0124894eef4717abab3ce7120a7933c3bc1c9ba9abe502cb9b04db7ff4917c039cd5534d957078ccc2bbc5dd3e5a6cc49a137ac036387a7202298a31fff7fffffffffffffffffffffffffffffffffffffffffffffffffbffff07e4000000a0a64f00000000001d000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95b4019b0c790a08cef9ae2690a5d76d1918ce5c306634f80912e91a99ad3d04faae42fa629c166036185047b500154c2910a0d33d9bda0183f2d065dd925444d242a5464fa663aba381fff358b11601faa6dbe8d9300add27790c17adf6a44e7f000000000000000000000000000000400004000020000000000000000002e4000000a0a64f000000000014000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95b7561860aeff0548786e7d68742744fc041e8ce49cfb9c2fef49d47dc7a91e386b8ed3280e7db654bb9147574af643f910934b0c74888fe37351517a06a71cbc274954bbf40c5c33a6d82e8969c5b3d63c965f8eda00b55069163962d257d13a000000000000000000000000004000000000000001000000000000000004e4000000a0a64f000000000019000000000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0357d0200000000005ae25d65dac2098a3b72649c16948f6f1e5b3cd100c14d02969a744846525050a61472d1df1491a94ce1abaee1caee39f752013f309a6d29a3229ee4be459e70e980f9edaf41a9e40d2535f0fccd37dc18da2673cc44473476918bd640da871048ddf800408e835c6b88e1d03a1f159df57d9a94e185ec80f2dd774093347956fffffffffffefeffffffffff7fffffffffffffffffffffffffff7fffff03e40000009fa64f00000000001c00000000000000ba6dfce86e1ed5ac4afc1651c66497f4e31b5f471090232f00ba7d3a761ea468337d0200000000003b8a1ffd36f06dc7f79f2ee8530899210d04fccd513d987a17d8f29c7385d039347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0ad2d1d07e13ca622798a27b2731ca0da1b42b4089e62e42f163260e24b9818b4d1f6d3bc8a415012749dfd0388078f2902145a97c365b8641d0689736c5bba0eb443117eb14e9d437e798dc06ca5dcab5bf663792fe66289f6a6227493a635ee000000000000000000000000000200000000000000000000000000000004e40000009fa64f00000000002400000000000000ba6dfce86e1ed5ac4afc1651c66497f4e31b5f471090232f00ba7d3a761ea468337d0200000000003b8a1ffd36f06dc7f79f2ee8530899210d04fccd513d987a17d8f29c7385d039347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0a78825fa9f9d9ec1c6b5d0d8143fd67374f35f4436698fa787ea6301419b744ffaa72fe5a3ae43e756e3d599d3c5547800b6d1838d0111274dd992f07ddf46cc371dc96cf3c2cbbfeb10b9faaf78d830b9d1d559c02f54e3fb1b1ff2b720559c000000000080000000000000000000000000200000000000000000000004e40000009fa64f00000000000200000000000000ba6dfce86e1ed5ac4afc1651c66497f4e31b5f471090232f00ba7d3a761ea468337d0200000000003b8a1ffd36f06dc7f79f2ee8530899210d04fccd513d987a17d8f29c7385d039347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0b01444e342bb37a449ea7b8042bd31163f5a8cbdecab2c9be06cdee288b555c192b1274c16f18f6f236ac00d90d737d3151175b1f8b32c4768b254cddbc82998e88abaccf1706779f2d24dee3b2ad7a05dfdeaf03a9e4b794ba04ea4d8d51c7a000000000000000000000000000000000204000000000000000000000004e40000009fa64f00000000003700000000000000ba6dfce86e1ed5ac4afc1651c66497f4e31b5f471090232f00ba7d3a761ea468337d0200000000003b8a1ffd36f06dc7f79f2ee8530899210d04fccd513d987a17d8f29c7385d039347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0ac00218c05636e3ac74f47fe83e37462a633dafe30a8acd222dc31b7370572b792cf6e0bd5d4178753ee38ef82ce1a890de18c1b290db6285b4827e381d43afbf35cd041b453d60c933366d54f8ccdeb2ad436892cbc88675c2cb0f607e84cd6000000000000020000000000000000008000000000000000000000000002e40000009fa64f00000000000000000000000000ba6dfce86e1ed5ac4afc1651c66497f4e31b5f471090232f00ba7d3a761ea468337d0200000000003b8a1ffd36f06dc7f79f2ee8530899210d04fccd513d987a17d8f29c7385d039347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0b15df1b0c7db9d568e39dfaa5ca6cc2e373b5a9310fae735334b7f1257cb37372486e45ca23999fee5f1d6158f8a58b2176ecf0fbfc5d502bfb75ed8b91dd9212f01c09df99593c1872941c4306c1c0972ee7f93730e5b4fa21d4e2b9ddd2304000000000000000000000100000000000000000000000000000000000002e40000009fa64f000000000038000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95337d0200000000003b8a1ffd36f06dc7f79f2ee8530899210d04fccd513d987a17d8f29c7385d039347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0a2a92dfe7471d555b792260b90840c5f4d634821831681a8b9130e4a12d7633c9a719d51be13a029f35271b3838cc3c009018bcf00c10320e684deec39c2a100aa08ef13fa6849473918c686ff241d7cfe8cbab655b5e9f640a1cd5c7bd4e492000400000000000000000000000000000000000000000000000000000004e40000009fa64f000000000023000000000000009079269211ccc5ec2425ee0f13f6ef5348fd84c4d2edc41ea4b4f3a2ecb23d10337d0200000000003b8a1ffd36f06dc7f79f2ee8530899210d04fccd513d987a17d8f29c7385d039347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0976385c1579867feb28929fd6271daa860638548a72b5cda8ab486c2fa40b9030413860306d91471b1bb9cf31f4d57d107466295d1904418bd0f53fb4ce18d43a7fd5fa87b9378ebaaf448d2a4d6857302abfe9e83df40a141180322ae711e9c000000000000000000000000000800000000000000000000000000000004e40000009fa64f00000000000d00000000000000ba6dfce86e1ed5ac4afc1651c66497f4e31b5f471090232f00ba7d3a761ea468337d0200000000003b8a1ffd36f06dc7f79f2ee8530899210d04fccd513d987a17d8f29c7385d039347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0b65a077a725f76b5a763f889573455515b7c0dc4bb079a4a86952029882c704d8d29ab2246412c46547ce15e0b581f5e0f130504d711c40f9a74439baf019d2ce37d53f8f9263814d42ae30e8541b1c7afc15afcacf984eeb9a8dba5f60009c4402000000000000800000000000000000000100000000000000000000002e40000009fa64f000000000035000000000000006c5cf67f4818d595c06db93fd7d953dbcab09b01dc9fa6af8500f29c0b072b95337d0200000000003b8a1ffd36f06dc7f79f2ee8530899210d04fccd513d987a17d8f29c7385d039347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd095368892968815215f0923f29b2d56f65783693b682145554a877fa51594603e1b832f7402843aa1fca921860aee73831993d970bf64402673ad1a15440ae953adb4002395d63095e8e39d413a64380ec7d191b149daa69b44394120200fc431000000200000000000000000000000000000000000000000000000000004e40000009fa64f00000000000500000000000000ba6dfce86e1ed5ac4afc1651c66497f4e31b5f471090232f00ba7d3a761ea468337d0200000000003b8a1ffd36f06dc7f79f2ee8530899210d04fccd513d987a17d8f29c7385d039347d0200000000004be95d8c41e94645879b69b38898d7125cb21d5ce0b1e428ac9dfd3b7fcffbd0b25d443a8c0074a028397d8e7117a130f229bdc16902d29e0b79147c51dc74e2762796bcb2010a803e5eb62d23cc6f720365735f23ff1e0c4b5959a9150991b0c1f5efe19d85ac41d14098a4417d5a0561ce13842b5566066941cbbc7b9c7250000000000000000000000000000000000000000200000000000000000002bf8b0da0e13574fe3f57498203f86a34b0ade70bcf13856c6975e86b416b7880dafea492d9c6733ae3d56b7ed1adb60692c98bc549d61fcaee7a2c6d9d0c3f3e58cb578c6ae97a08fe59a62a4b7c32688c753faa204cdcf91b8f6c0413e49094d9bda7351e8b20768fe5dc29a17bb60cdd966258ba384f4ffde04d7425e86eb8dd14b39ac9f6631005c4277b5399f3080f851317cdef5ce2cf73f0283395a71b537945156a2846a45b2d9716369b8c8d40742fa92a626cd9a21a746d48a524eb213852eff7c97e22d440caeda621adb507dffef7aacfb734f2b9c9fad5b58d3e92648faf2915a955b5acbc62a3b4c1f85feb9f2e7620ee65f7346259364db7592d98ba62909f39a6f1ab60d9d10de84e23c3701c76275cf3c9316656cb0449c6aa99c1bc240be3542d3d47fdf3e85039e311409d9349dcea46f38b2047ec7cff119ad2f33575db8a7d6c93193b9eb6eef26326eb29fdf90311c8f9652f2c4d7b6c96d774a888eccd1c0a1abf8b99e83535588cede719b68c59b9501ee8952f779ce282ba52406984e13d246115f4278484520e9108f7f4000000000080c3c90100000000cd82c90100000000e3ff816300000000fc010000220387ae02000000000000000000000000000000000000000000000000000000a9d8a06f036c9d02cf8d1ccd7a6d2e3672c74b400122980bde94b4f5874821e41b020000496c6c756d696e61746520446d6f63726174697a65204473747269627574650c0400004b06000089080000c70a0000e60c0000260f000066110000a4130000e415000022180000611a0000961c0000d41e00000921000049230000b9230000f7250000362800006b2a0000a02c0000d52e00000a3100003f33000074350000a9370000de3900001b3c00005a3e00009a400000d8420000164500005647000094490000d24b0000124e0000495000006952000087540000ee57000085590000815c0000c05e0000f56000003363000071650000b1670000e4680000036b0000216d0000576f00005571000075730000b5750000d377000045780000647a0000b07b0000217c0000207e0000578000006c82000098850000b8870000cf880000848a0000f68a0000a18b0000e08d00001f900000d5900000f3920000109500004e970000869a0000bc9c0000689d00009f9f0000dea100001ba4000058a6000096a8000005a9000043ab000081ad0000beaf0000fbb1000038b4000075b60000b4b80000f1ba000064bb0000a2bd00009fbf00001dc200001ac4000058c6000095c80000b2ca0000eecc00002dcf00004ad1000088d30000fcd3000071d40000b0d60000edd800002adb000067dd000064df0000a1e10000dfe300001ee600005be80000cfe800000ceb00004bed0000c8ef000006f2000045f4000082f60000c1f800003ffb00007efd0000fcff00003b0201005a04010097060100b4080100d20a0100100d01008e0f0100cb110100081401004716010084180100c11a0100bf1c0100fe1e01003b21010078230100952501009427010007280100452a0100842c0100c12e01003f3101007d330100ba35010037380100343a0100713c0100af3e0100ec4001000b430100484501008647010083490100f6490100334c0100a64c0100a34e0100e05001001e5301005b5501007a57010097590100965b0100d35d0100f05f01002d6201006c640100aa660100e7680100056b0100436d0100626f010080710100bf730100fc75010039780100b67a0100f37c0100327f01006f8101006c830100aa850100e7870100048a0100b88a0100f58c0100348f010071910100b0930100ef9501002e9801006b9a0100a89c0100e79e010004a1010077a10100f7a3010034a6010031a801006eaa01008bac0100caae010007b1010044b3010061b501009eb70100dcb9010019bc010056be010073c00100b0c20100eec401002bc70100c0c701003dca01007acc010099ce010016d1010054d3010091d501008fd701008cd9010001da010040dc01007dde0100bbe001002ee101006ce3010069e50100a8e7010045e9010083eb0100c1ed0100deef010051f001008ef2010001f3010040f501007ef70100bbf90100f9fb010037fe0100b4000200f2020200300502006d0702006a090200a90b0200e60d02004310020059110200ce11020081120200f612020002f9023b0181c78545d964b8008545d964b8008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b7ada863bdd3e62fa72dd5ddf8ee40baa53bce4a43bd7505061dc979879ccce079c9a790faa3e44001937b64c4884560a28b4b46d1881b661a0fdc94c23bc00d8650e243a3c7b0648d96d7a658e388b6563e75d6dfddd7d50464a94ca8a4386a2e89ffe0713dea32d760fb4a5b5a7a168d815ab1f7bdb96228dc91cbf03501d08f64ad079f9b914d3b18d1ccbf9d36bc01b0aa22b1fc63b73832d5c38bba2acd65549830ec99dcc2b6df63619d785251c172ebed1665f42fef02ebf9f4a7b13d926f0f301aede479d9f73194da3e2012a164881fb2915f20b57a4dd7e29f06928367afe706187da4fe6bf16d387642c381960f3653635af3e01da5990a7715edafc56676746ed65e7e0cb2e85384b58a35f638c6fefa696d8c6ee519b7fcaaa602c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a005b8fdeb0a45bac5ed0e97479bf31a2f65f32f00be64873f1ad81334935322efa0532d99fb665e41ee33174166ff4eb014a07b644b4ebbd019a13acfe885aee77802f9023a011e85430e2340008545d964b8008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bfbe7927bf19ef7ecc2ace97306ed1df2cdf384af21e6f40b698ebed41fbe510ee0ff1fd72a9762056337353d4bbd942552debc9c8521657a3376741400909106ec6f6441183ab8c4e16606cf71724f5d7f5c71ba2ea062970a049baff8062e4917dee97fd71544b579d0e99e75a8b69a821f0c85f31f1fe9f53b917a39204b4029ad17b746a0b4b16be231b3a092e26643753bab08a40a56f9604f670ca9b3bffbc12253fa56745c0a5f7e73beaf53bf238359d2c52dc51d299cec72f9d40a9526f0f301aede479d9f73194da3e2012a164881fb2915f20b57a4dd7e29f06928367afe706187da4fe6bf16d387642c381960f3653635af3e01da5990a7715edafc56676746ed65e7e0cb2e85384b58a35f638c6fefa696d8c6ee519b7fcaaa602c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a02d1c91069624054ea7df697cbfc8d14913db3461b04e8e9c3e41de233e48a7bca06f4cdc5b2b7d99c2e92df679efe3742640ebab7b8dc49824678061861b7fc84c02f9023a010785363fe1da0085363fe1da008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b6ff1573e21ad3467bd1f27362f68e6422babd4a590b42701b18b11d620cce84bc562eb0ed149621683b98fbf43b7acddfaa6ccfb08561f0d87366ee9deff2db080a6ffb522c66617d386c26196f40502fa1bf8ce24610ac8782984830c7ebdabaa88ab3d24f5b359e6a658d76c9d657e35b81cf9449c5c8991555d169b106a23c53f3986960c7f7938ec0bf7de012fc0420cb69a2eb1f6c17481638874e58b1bfbc12253fa56745c0a5f7e73beaf53bf238359d2c52dc51d299cec72f9d40a9526f0f301aede479d9f73194da3e2012a164881fb2915f20b57a4dd7e29f06928367afe706187da4fe6bf16d387642c381960f3653635af3e01da5990a7715edafc56676746ed65e7e0cb2e85384b58a35f638c6fefa696d8c6ee519b7fcaaa602c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0e06c4509494b26d3a160a8b7a06d42b78c72b883594ff3c58a8c6a778f265eb0a046f86be246cf8621815dd11e5f7efb828cace32f52b3cbadb6951517edac704002f9021b0181e68533b03b2c008533b03b2c008304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000a2aca32f0103edecc196249ac8af079c8d169c2fc57e414302e045bf7fb0e8cc665370e3a2f2ccd5f3a0f3997e69b11f83c8478e55cb1c8d3f95c45a3d637ffa19d0265eb06769abbd208d33de7824b7cdfa8bcb79f5177a48c89ade65675ecee4c4ba7919a4b80aa96144769154f17280574a628b6dab84092ca0dd4bef852cdc228998f83c3b0943263f8ae8c204a222eadf53e2e3e2cedbce275343bca06ada448110f2091d7638e548db2fb79d2a7a3856581d873bbacda0c0517f9390bb1edbf73ba52d75dafc0dba73303b8cc428ca1329021edc3675720c12464219f17ffaed7b71e17931c50b3e99ec666f5a3bc118c101dc25a2118045b917db2f8ef46007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c080a09ee5c64a0d41c1074cac3b9451858fa1624a14b4d45e4f94ff008430096a7493a060e9e22a701464be9c76bb6e5a9f9bc9cd103f1173bd36d4f85e600995216d0102f9023c0182047b8530e4f9b40085975704e4008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000baedbbb5e3e1ec3668e12f7298be3488aa603c69fd0b3aa9d4d4263983456327378d63743735425a0732c41a76734bd22d154be4f6bc1d917ea4ba5eff83d690c12f65eecf6cab5396bd75fa085e4d89a90fdfba24dff38753897d97bb79b795ac0c7dc6e84432959d8e88803edc5529acc63827aa479a7b01237ae1fc8084425f64ad079f9b914d3b18d1ccbf9d36bc01b0aa22b1fc63b73832d5c38bba2acd65549830ec99dcc2b6df63619d785251c172ebed1665f42fef02ebf9f4a7b13d926f0f301aede479d9f73194da3e2012a164881fb2915f20b57a4dd7e29f06928367afe706187da4fe6bf16d387642c381960f3653635af3e01da5990a7715edafc56676746ed65e7e0cb2e85384b58a35f638c6fefa696d8c6ee519b7fcaaa602c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0a5a1d6a860d997bfde9ed4cd3df6d76b9846556c7d6e3dfe6be1b017bcd5c69aa065bab9e06536a004795f263e64a50c4d4045f9ce723a8e99729baf88148f977d02f9023c0182028c852ecc889a00853a70c40e008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b97bd938fab694f8b4e7171c8cb159d7188e6df42a3ead0434d486c8baedafb81bf2329259b92531d207a1e635195497274af3a3fe412492b6b6a875155ca87429b126a2500b457e371942565bac84f20ec3930bb5f39892c66eb7dbe6b72ca1170cf70f58796ea5b9a880640d898874fdad0dadb6402f8086ed3c671fcef825aa24241628467a8526d652b04a046db8e7a86bb28fcfe2b13054ecf29a2cf302f4a357aa0e28aed6f9b0ead588f38d4a27d852dbb4b90ccbf35d7d49dc3bfed3c7d510e7bd2727bec626207f433b8ee3cb02172d7e3d463087da63edb1aa1e51173bb78fd28784e7e398a646408ffcbe410c7c3aa32abe694422aa038737ab8e1960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a0e75fc55033bea663e4393052bfee398612fe9ae4c3d5a7ea462964b098616e71a0178610324407069afa0840b6133516423bb7fbeee9f663b7703e0e54b33e7de702f9023a0124852e90edd000852e90edd0008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b5e444f6781da644a6170beb65b6cbe1e0cf95d25dbdb7b9b09d13933e383977604b5ece411789638b9b6f00604c2b89c3814d2e8cd5a79f36ac57f3bbc45bc2b1b89aa1737388b6f6f2be9a8a2ef9a59f919ffb39681ac95d9723fbf343e463e386a828a1c3c46306e25eb68b1e9af5159e9d9a98a1ecceaa9782b0247d4ed5ca9fc4c9d4861258f42a4bedb4d17e6a531f66b6a3818b42c08d1a89d1b58222002dc47c65737484bd364c502208fde3574be9e39776789488a862806ea233907b7db17258af1aa1618665bc04645d5b0c2a5bc6c16c7285a4c645fc0aa5dd222e09090a4240cef86f6c4a7c1c98ca76ce47f9feec130fb3dae8c9442a3806c505160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a05c59bf2ddefe2fe992df40782c9f10d884c2539be8225f48406b3549c083f384a03d4bd20db6f4f64d86eda7aaa386d40b970db4e06da331352582ce766a17380302f9023c01820202852bc5ac5800854d8858c2008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bc45abc0b4e641ed0fb2798970e3ed8674642538adf5faee6f1f522761974cc278f3c6e8f6f1cd8cb0c13b67c961ba501b3ecc6fe6206ea1ddfa13581f7543650fb68cd63979a654ff6ac4eb4e711b5a56d6174888279c1c76ea66a60fd47e6ea56a4522b0b271ed881d57ecff1485715e33b013c664476eea266dacfc35aa5b976ae7090546b9b900712031e17f61e88b2e890601fca5cf23e4f2cca9a8bff62a4a8f898ead64765bb8edb3ec5b20f1b11b97dda9335cffd26f8bdcb63b46bb50a26c2c2ea9612e28d0aa9296f80336adcd076167b3fc1705448942052a9852eac5912b920cfe151d881ecdfbd81e708a6fc74fa4215f00e3511a5ac0f229adafc56676746ed65e7e0cb2e85384b58a35f638c6fefa696d8c6ee519b7fcaaa602c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0d62caa5ab1919e633b9db7885b52bda32c0359f81669f24895087fcae7ec2119a05a77188895c5194db8354fd06d0f759912cf991985920bd43a4456eafdb3a23102f9023a0122851ca35f0e00851ca35f0e0083061a8094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bb241325ef06772e471b8abfe5a46e7cd632b6a0a7c33971b181d33c7293a51b4db89038ed2e4be4ff0a7bd5b26a8d6b9442e2f0cea03340a59ab725fda6a0002887ff3b26b70081e49de2b25e88b46087ecc29920eca9d892993071629981a113c0305a9a15f0a9def183e235512e7809d095ba5b9ba9f206885ca77a34e9a139c145f1a961634e373c2ac40dd183321dd17dbcb7435a0f7ba5c37f58120731c368fce61468f4a0753b448f8ce82ddcdf2fd716375a1fb922d174d111abd5fc40f91a6423dba6a49f6944b28d8c41eb2a0acc874bd9cf07636081f448771b2f66ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a0231c36930ea393e43efab0005a68e6d2120f73d9803aa18aa0dc33d058ea9362a04e6cfa85c0e82b61582871e01f7723c323dcff7e9b5a1f746636350ad7604cac02f9023b0181c0851a13b86000851a13b860008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b0fd6e7377f8b3df6becf6ca02309488dbb329a570e5bbcf4f1e226588d2bb0f4cc9fe9f96910c267cc1ecdc02a2964570c66ff0eb25ae87f31a45dfc2760b04189a501e10f41998613944814f1240003f513ac41249daf812dc19c5726bc637d3725ffdc3de47ceefe163f13df54cfa7b398c9171b53598ebf0596009bfb0cfceed0bdf6114248d7bb901fb3d726a53ea3ed244bf39c5029b03d6828cc96f37ccba6d2ef29cccca4fa453062395770480d56588111f94b9615f951fec3a4c57be1c095839abad83fbb732382ce6110e4767327b558f173562c36e0250ac9f3e15d2432a83ea939195e27b727120240cc6fa83ed6cf36a74616a2e8b2e72d5f4736eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a04dc20e678667e8ae41df71ad27e7528f2b9efe802b95c2719c1777d60f12b4d4a0738fd12f4f484a1deb7e4b3a44f58563ae7b2d38fcd04022993f7e02e302b45af902326e85178411b2008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bc8150a14b21d624f4923bf8170388b57e122db7fa8cac145381cb3f7a5705a543f428acb1ea72ec37418e32016759e0a2c877a4df6369ed9cf096f55e064b0ccb8ea5a39b4b2625778970f411c384de0c2a9c2f7a9d7b829b2823bb140cc0d723ce6d75bdc1a4523d71af2fcf4f4fd5247cf3f5db3bee2b52e4852739aaf94c80c7c4e431d837e63c6d425b93e9d17e49ec9524d5772718c2440a49047d8b5a959534ee891dc7b31c689f26da2d42c9d5169363891bad63efa35390a39433d4a1e43257fca161d36bc6af6f7c08e0a55f2644a631163b9c16f0d890de7f223cb367afe706187da4fe6bf16d387642c381960f3653635af3e01da5990a7715edafc56676746ed65e7e0cb2e85384b58a35f638c6fefa696d8c6ee519b7fcaaa602c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ec26a0e703e54c50639e6873e66d6754370d943e0ea05c4c44ae9dee929b4ad2f48bdea033d3c22f2706d776c0266a0b6775cc70301bd3f37ab948516838bb58a6e625d202f9023a015385174876e80085174876e8008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b70e7ce9f0e228f6dd69284870852c38805c822f3be3fbb741fc3c28b04b594e28e36ca6f30237257d8f944ff0b0afa7202334f57cf154c0bbb126176623a47042669c73cc0792fa6ad5e7778485c9b746c7a62c21a018425e02ffe9f75c017b2af135b77dee36d2d1080cc1fbb2750151cb9c68c226f7a142efbd04241071b55a443a479748450adaee230a98fae2a7f7e83ae41978a4a06a7e255ba23e5c90663e97180e1ab3461725b593d992b80fe8cdbc1939562c814a2ca5d174874eb00286b4cf4b050097d14205264d5de8efa790b96fbc3b6ba991b67d2becd1cb8d3874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a0b91a9496db45457f1086dcde624ca1dc1feab1184df31c8e8514563abae80e2aa055737b76836f5f5ae81acc67d029373e87e9efc8b5f15054e9339dcd51fe320ff902327285174876e80083084e4894998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b7408df52cd9413b226445b90f49ccf05016c5571025d3d4cafc32dbe1bfdaf2b3e5847e739d3da6a6ffc7c373ed672b312d1222dff9221ff99a8fca51045ad04cde5d7cf8d0f97a545193629fff084cff06a29796df3366067e7f587a5d88920731f92acec4e3eaf71375d589208e38f700b752a8295ff4a6c47f6a8120d0f0ed57d1c816b23d9e33e8a635ab9df8a95f1c394a9e44b4dad0095db5096d1639b2ac86ebd96f96f7eab60daea44eba5a2a551436123ec6af1858d6c9c9e421d98b7db17258af1aa1618665bc04645d5b0c2a5bc6c16c7285a4c645fc0aa5dd222e09090a4240cef86f6c4a7c1c98ca76ce47f9feec130fb3dae8c9442a3806c505160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ec25a07e1ef14a9f56969ed818b8f80a5fa9cc18f9ac1a6a80d0682f7b2bf268baca18a04c9d85f30a2f59740a3640df0d9536190469c3f06fdeaff81be18d044162f21a02f9023c0182014485174876e80085174876e8008304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bce03ecb0c99e7cf1997e620edc307ccd9a523c9d02ae27107d6e6c63a89b21bc08e85c5f4a1f99f05a6cedbcd0e52acf2e55cffe432c02200d2c26b6bccec1cee52180099a30a501cfe898da153068e42ea4c76d92c1cbee8dfa699c20f1a6db0db86768464f964263b4d04fdcb22dc7fa92617721a737a16f279f245402f172ebf7682281fdde94de5d590f2a34b1a7e7b5c33e28eb1990fdd48d066f3818d1276ba5cfa6cacebec4c356545aabbfebd9579bede1231b857e2baa7c83e7f22cc71f093cbc8098834ba48fe552a93d701ec4d36b6a097a496858db8cdcd208379ac0c400cb6837182b745f26fac1e87ff6eded4c92365a9807bb2150ecbc7121960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a03f7a48121bf117170e1993a4c64d308cb099d7196b1d3783f5f7844b4cc052e9a055e9f9fbe0d434d1a9ed64064cf1e742dbd40f061f890c10a5b3fd5dbdbd987bf86e82048485174876e8008252089456d8c99fd3ef0405a02f95191573778e10420b158872d03b8962fa1025801ba082cfbc2893da534174e4e4716c1b495a7a1d327415deef7f6c53dca1c7a57cc0a03113b13ad036147f3ebc2fb499b60a2c35c3c2d6074f93170344925c3bcb84bb02f9023a011785174876e80085174876e8008304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b442a8423291fb8880d8266aab50d0a5fbd31d2b1aa3be4e965dc92c1e7bd2126945fe5c7fd722f596b09dde8367bba29f9956716e0d6a0179f918051a09319b7fc84c1e044f04daf8758a708abda0cebffaa9cef1a4ffd990a9db30e8bfde31e0e3f9ff8fe75329deb87f104175beb3ed4b66bc012c401d52bb97b16679f9872b5519ca5ce8fdb54347e86127447cd1be7aed6e72f53e763ea703a78c9fa1df6ad846c4b85e9b0b88abfec7daf9b81d6d43d2fddd1fb323e1d8ab86d248a15a70a26c2c2ea9612e28d0aa9296f80336adcd076167b3fc1705448942052a9852eac5912b920cfe151d881ecdfbd81e708a6fc74fa4215f00e3511a5ac0f229adafc56676746ed65e7e0cb2e85384b58a35f638c6fefa696d8c6ee519b7fcaaa602c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a09f6406bce0ff816f3734d86c08fac482176a9830f931a913a2f5b88d50f4bc47a0680824be611207d186fa280dca9437b5e7fc45c14f3671db7e23b32ac4c0268602f9023b0181e585174876e80085174876e8008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b1be40e85049ce6eeaa87dc62cb979a76d483e08b2b800bdec008f244f7757b32bb9ba9978239582f12105fe667e8dc777e2ebb48f6bf93fe7dca76e8dd3a55e167e03b27d10aa470988c22cd968e1bd4a9fcad75afb7cc48d589be0949b2b3b7116a56e0a9b1719b7de2653bcde0fa2f2b9a29368b85d3c8a4cb76b1f067cc2ae638493b504c4955b832bc4cd7aa2f05208f6a8480eddb8dad4d67b6bced37a8ad846c4b85e9b0b88abfec7daf9b81d6d43d2fddd1fb323e1d8ab86d248a15a70a26c2c2ea9612e28d0aa9296f80336adcd076167b3fc1705448942052a9852eac5912b920cfe151d881ecdfbd81e708a6fc74fa4215f00e3511a5ac0f229adafc56676746ed65e7e0cb2e85384b58a35f638c6fefa696d8c6ee519b7fcaaa602c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a07311513ab68655f76bf4e47db59c6acadae2b891e0dff05ddaf38c5ff6181412a04284da41cce0b7a93deff5729f8cb63d6117067b21a65bae853213deb301a811f902323185174876e80083084ef894998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000be6de229f32267f5040e54c266fc3eaa65b70e7bef1358147ec1c4d39b0a3150b4d801216662b5412357c2b6dcf8132de69ba5a1390db4432d91010d508980f1b3faace2870383cc411a32d81d273b70b1234741e5d34ccb587aa76a3f5ca0996be4d8eb489952ada4af205161cdc6be7994f2a8d5b598428e55f356485482cdbde7773b4b8f3d83ccef36bf1845e47781d828cd6d607e5b5b290c0e0947c4644eae5aca00e946d9dca1871117bf328bd20f5f7bc1f5a7e9e3e497e42d3ace8080f91a6423dba6a49f6944b28d8c41eb2a0acc874bd9cf07636081f448771b2f66ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ec26a05024dcab343e5385d02994c4f869f27962b5516d068da17c1ceeac7216884cbda02594c6bef248ea3a5f63890c52c5bb52911bae6f31d4996fd9fba1983ff6509cf9023221851176592e008302957894998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000be3e84dbdf82bd1725462380067ae4b4a5b1e2a4da937f5e0be85d39e6ce3ceaaa3b785082f1158d9ff4cd75b69aa8596052dc62f1ff25a11c69d90120f5499820f30eefe6fed58cf9ccfac0a88d7496af93edf3ba56aa775cafd5c5b961e58e18a5eada59b99aa155ddd1664fcb4bd275afe83d92a5d278298b81c9e7bf5979d361792b864ec7193e05e2902dbb5c1a1dc97b11e6ba2425cd85fc70bfef0ed26340bd88da4d4ad7f678cdfacab3a163c4495be773b266761f072b5775a65c45e6cb6b274fbc933d5161d201b57d1968b3e548233d94e0918b29e3246db24af10874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ec26a0bb8d2dda4f0e3be952a1e0e5a0f618ff4e28c18b7bc51f78cbb28f859fdd7d81a03ed0601e3df1a63af7809a6dcee18ff48545fdc9ebad0e7a59632885b33e84eaf902321e851176592e008302957b94998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b1e91b3db5a2d320e40e7c1ae9babf238a71f67c4045e0f054b0f7316b862524158f35eb988ee5dd493f271bba36795a7f949f378449e339e8041b73b2b09b9bc356173b6a8f7b32c0bb689ecbddbaf35f49219267426a92f825b963f578624cb8a5eada59b99aa155ddd1664fcb4bd275afe83d92a5d278298b81c9e7bf5979d361792b864ec7193e05e2902dbb5c1a1dc97b11e6ba2425cd85fc70bfef0ed26340bd88da4d4ad7f678cdfacab3a163c4495be773b266761f072b5775a65c45e6cb6b274fbc933d5161d201b57d1968b3e548233d94e0918b29e3246db24af10874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ec25a08443fdec0a50c7d45503c183e70e5d9c03f570678ccaa484661f75b9af47a998a026dad5e34a3d0178dd343ed866e0851772c122d3453d88dc925854ff94e99ac5f9023225851176592e008302959394998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b816d79c386a9b7094d75866d7e30603afdc2e56d9fa01c9c51fb93e25ffac99f3edba76b7e8d78e371a8e11c61dd0e1f2d4a13ce175af5fb334ffa2e07dbfef00f30eefe6fed58cf9ccfac0a88d7496af93edf3ba56aa775cafd5c5b961e58e18a5eada59b99aa155ddd1664fcb4bd275afe83d92a5d278298b81c9e7bf5979d361792b864ec7193e05e2902dbb5c1a1dc97b11e6ba2425cd85fc70bfef0ed26340bd88da4d4ad7f678cdfacab3a163c4495be773b266761f072b5775a65c45e6cb6b274fbc933d5161d201b57d1968b3e548233d94e0918b29e3246db24af10874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ec26a08dcf3cbd4002a90fa556942d45cbd34cb3db8d8c1a820e5cfb4427b64500aee4a033008d28e8ddea6b033423497901559447f15ace9194dde2dd01fa679726ae9ff9023223851176592e008302956f94998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bda7fb60b2dc781069eb2266115f758e8f3738858d63fbbb321c964ac10795b0c58f35eb988ee5dd493f271bba36795a7f949f378449e339e8041b73b2b09b9bc356173b6a8f7b32c0bb689ecbddbaf35f49219267426a92f825b963f578624cb8a5eada59b99aa155ddd1664fcb4bd275afe83d92a5d278298b81c9e7bf5979d361792b864ec7193e05e2902dbb5c1a1dc97b11e6ba2425cd85fc70bfef0ed26340bd88da4d4ad7f678cdfacab3a163c4495be773b266761f072b5775a65c45e6cb6b274fbc933d5161d201b57d1968b3e548233d94e0918b29e3246db24af10874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ec25a01a3a5cc3852a047a4bafb64f8a668623a3dae3ef358747ce2acabf6c53a20bf7a0649bfc0fe6dc1863c050380b8950a9e3bd16f6d96cdbd6c509d9fcfe52626caff9023237851176592e008302956894998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b09c68584b63fe0c2b09973e9318570e835f4251485cd35275c950e3f6126cb382fb47cfa56a91a5902b356788509ef27402a02c808bf819fd5e330e7dbdf0dcb18a22af3358533beb27e666daa1e23ad2f463991e85100e2e5a9cf83c1f4092c001eec81d4e5eb47f135bba800c40b6097f54d508e5bee8ee40902f5214c2f9d4d0c10637c2bd2e82fa5c75b6df67103185e90efa5109adda52320fc643f5bbb340bd88da4d4ad7f678cdfacab3a163c4495be773b266761f072b5775a65c45e6cb6b274fbc933d5161d201b57d1968b3e548233d94e0918b29e3246db24af10874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ec26a0441414faee5f59d61df4a8d41d79ad80eef81c2f8431a24c87b9bac17fc0b6e1a06efec9286389c2f08b81fa3bc96d13df4aad4a63472d9db6e312024c9461cc7cf9023227851176592e008302956c94998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bfb04bc90f99e584494713e04d885770e7cacef7fd3000299f67d5eb7b31223d4a3b785082f1158d9ff4cd75b69aa8596052dc62f1ff25a11c69d90120f5499820f30eefe6fed58cf9ccfac0a88d7496af93edf3ba56aa775cafd5c5b961e58e18a5eada59b99aa155ddd1664fcb4bd275afe83d92a5d278298b81c9e7bf5979d361792b864ec7193e05e2902dbb5c1a1dc97b11e6ba2425cd85fc70bfef0ed26340bd88da4d4ad7f678cdfacab3a163c4495be773b266761f072b5775a65c45e6cb6b274fbc933d5161d201b57d1968b3e548233d94e0918b29e3246db24af10874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ec25a03edf1ec5515d24cd6ae48fb7cf7bf71bb8471155f3d1104792c37b05e6edd8aca009013b9615e9fb3fce68811d58ff915d6cbecb8e1516b74742c4051c252c1847f9023228851176592e008302958794998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bb9a8676205e95ae90a5724b2caabc4efd961ddc62e3487920c14c9411d61b2283edba76b7e8d78e371a8e11c61dd0e1f2d4a13ce175af5fb334ffa2e07dbfef00f30eefe6fed58cf9ccfac0a88d7496af93edf3ba56aa775cafd5c5b961e58e18a5eada59b99aa155ddd1664fcb4bd275afe83d92a5d278298b81c9e7bf5979d361792b864ec7193e05e2902dbb5c1a1dc97b11e6ba2425cd85fc70bfef0ed26340bd88da4d4ad7f678cdfacab3a163c4495be773b266761f072b5775a65c45e6cb6b274fbc933d5161d201b57d1968b3e548233d94e0918b29e3246db24af10874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ec25a0522d309cae353a7be299d363c1266eaf01c0af8fccea6bc077df8c0290e35f26a02f9b656d300083aee207fa4fe5a106e6ec8a9aee4461384b457b394380d335f002f90239017f850df84758008512a05f20008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b731757965ed46fc36d68054a97c619a03f0a00720cc10a03dc3c52a38b23330ee48d2187ba0d7673db0b3ae258af1544bc57799f53c7e00d3d4307deb0f28b599012c5df556034a8971588cb55eed2a1ee7a2f7e37e6692714f6d1e69b393893666dce4fcf2dce3d119683aadeb1ea17923ba5b074391c1491f124cadfe4b30ca443a479748450adaee230a98fae2a7f7e83ae41978a4a06a7e255ba23e5c90663e97180e1ab3461725b593d992b80fe8cdbc1939562c814a2ca5d174874eb00286b4cf4b050097d14205264d5de8efa790b96fbc3b6ba991b67d2becd1cb8d3874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc0019fc9728b1c20ba7feebe866fe46d8cf1a57395dee82124f47857c4a9c11ef0f0a04d5f1dad7f137b2e53a75e79534ca65159687e9d656b697b9e11137a8982238802f9023b018196850df84758008545d964b8008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b7bcb053dc9008f8eddc1e917251784f38889b1906ad94f22329bc4d8f823f76ae942adee59f8324d03ee7151ca9f97b30e8035c503f7b1c343e5c60cf121a7d3b74ffd93a9b87839b9f21bba96a3420c2c0b969d223e76c6945b874ed152f4f7001eec81d4e5eb47f135bba800c40b6097f54d508e5bee8ee40902f5214c2f9d4d0c10637c2bd2e82fa5c75b6df67103185e90efa5109adda52320fc643f5bbb340bd88da4d4ad7f678cdfacab3a163c4495be773b266761f072b5775a65c45e6cb6b274fbc933d5161d201b57d1968b3e548233d94e0918b29e3246db24af10874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a04b8bf40783f7a6b0fe88ad16ae1fb5d9cbce6bcb18d9c83757111601e2380e54a048a3a37188500b99394211f56f9462b6f8fa0f275fc3b1709a21ebe972cfbde602f9023c018201b6850ba43b740085174876e8008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b264544abc566124db9e88c858ea8a166518e8ec26a482178cb4d485215c75a8f9bd0fb10f44d92b01a0f120d3d43c06cadf844c9b3cfc3f76054bd1363bb7b921a5188d028e80691e16958566ce420fa9e4acd11bab6b0c075dce32253f03ddbff0586c654c72f961025bfaa62fef509077af2d646ccb51f70f8d002a6f80bc74f248a910747a8e9eb820a3e41030a74a5c855ecf46f86dc3c7a59e806e65db57195d111bb22b6eae6d7433f44fd3ea2e2c4e9695bf742d66309e25cb78c1de5c71f093cbc8098834ba48fe552a93d701ec4d36b6a097a496858db8cdcd208379ac0c400cb6837182b745f26fac1e87ff6eded4c92365a9807bb2150ecbc7121960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a032239dd7ae7d672dbee9ef4591f5d2a083ae4cc1baa2aa980deb514884df7a81a06055b133c2a7e3b70e0c8ca8e4e2149137e3929e57f1f6a635dd6a82343cfbd602f9023a0116850ba43b74008545d964b8008304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b80e84cbfc5c079943ee19b564c7cb1c0904212a33adced3bf09f5ecbfdbc0c5d28528082db6586b8e30032025ff7d517fe8b486b905a158faf32a3886b3ca7cd296055fc571146d9ddf3c739821c98b71536584de28b5c9fefff32e8937edb6cd2f8e9cb25a0c039f4213639428ec514dccfd49c1ff6caca559f9af4f948e89ee04a0a4e50daa4141cbfb0658f625b72ddfbc600881b4257e454b0cdbfcd45a402dc47c65737484bd364c502208fde3574be9e39776789488a862806ea233907b7db17258af1aa1618665bc04645d5b0c2a5bc6c16c7285a4c645fc0aa5dd222e09090a4240cef86f6c4a7c1c98ca76ce47f9feec130fb3dae8c9442a3806c505160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a01c795ee7dd575cf201c7ca3e32b9cff09248dc257aa168a1368900ed82e02ecfa02e50bb03de1a07da9abc191a6f0cf8c8f0174ee0a8a3773c173c39ef1b678d5a02f9023a0154850ba43b74008512a05f20008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b6b8be841ea377404c041938ad8fb2d801de46ed30c4247027093780e68b8701bcc9fe9f96910c267cc1ecdc02a2964570c66ff0eb25ae87f31a45dfc2760b04189a501e10f41998613944814f1240003f513ac41249daf812dc19c5726bc637d3725ffdc3de47ceefe163f13df54cfa7b398c9171b53598ebf0596009bfb0cfceed0bdf6114248d7bb901fb3d726a53ea3ed244bf39c5029b03d6828cc96f37ccba6d2ef29cccca4fa453062395770480d56588111f94b9615f951fec3a4c57be1c095839abad83fbb732382ce6110e4767327b558f173562c36e0250ac9f3e15d2432a83ea939195e27b727120240cc6fa83ed6cf36a74616a2e8b2e72d5f4736eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a04c7d8db89c5422dc65adb097c8f5ab6c011f82d28987543d562543edfd945b42a00503fcb3cbf24d27f8794a7c8401c1eaedbb04205db278e76d0f120312bfa3d402f9023c0182026e850ba43b7400850df84758008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bd62826cdb86f5058750f8d583c145dbfd97729b6e142350b41589ba5ce9e7dd95d3e9b05910872a0b4153757a71464074df8cc0e2e758d8a1907dbec49477a13b2f49b7711b17e3585f9a0c620248969cc357d908a08801d1a1b217dc02c7d62d25255a0452b6ed48bec4cc84769d72a360366c827346026346457ab537629ecc082e8d3b33f23ead586b0c03120973438415a1548113ccdba2873096f347582a27e7fe6f5688081767d606e6a17c36d1a8271eab6b7fc2ef47c2306091e40a611686f88b4b1efb63f3d02102a0bd8872ab8be325fef22fb6b35d9cf8ea8d81a6ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0d78ec7fc21f54daf83cac0ee10369958e6e7e9b2c5e95616409bdc7d759ead0fa001585b308d574104aa68976981e34b0a1bbe43003b6c54a82f166dd1aab4122e02f9023a012f8509502f900085174876e8008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b8176349314eea35c3bfbb03e9ef01ecc9713f46cd379a745dc747218d6a053f5926fe3503555945ef5d6e47e55b7b2a636ae7694d84611b4ab80c22fe3cc7b753e47e067aa2ac79bbc2890e81edb81b943b1e7594f3161a7576757af6b5454e25c21c7bf72845ec58b465183e9bc1d605c870975b3347b8d75184d1ef2ce99053bb3c4fe085120402b6902790224fd459e47fa185d821e298e45366306a5b3bc4a357aa0e28aed6f9b0ead588f38d4a27d852dbb4b90ccbf35d7d49dc3bfed3c7d510e7bd2727bec626207f433b8ee3cb02172d7e3d463087da63edb1aa1e51173bb78fd28784e7e398a646408ffcbe410c7c3aa32abe694422aa038737ab8e1960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a0bbc723489e542abd9206adcb0ff6b6b9efe3f10fc7b118600d07b3220781934ea0117ab64a01a74bd53e6bb86989bbb6f945b71b1edaf93159cd3a3c866a59e08702f9023a0132850ba43b7400850ba43b74008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b1871e752854c0a22563c3a15ef68225d2b5290fd0bc4fd2947953a819bb357325e0fcc51c50d3377baa26bd217d5a64409351d16f2a4da5684ec1b0293466ebd64da476bb44d716ad470b26b162a0e8bd5712c18b4252332643b04aeaecf01b95fdf04e0e639d8cd931bced7b63973c735f3660262ab7b674b3670c146bafc32a9fc4c9d4861258f42a4bedb4d17e6a531f66b6a3818b42c08d1a89d1b58222002dc47c65737484bd364c502208fde3574be9e39776789488a862806ea233907b7db17258af1aa1618665bc04645d5b0c2a5bc6c16c7285a4c645fc0aa5dd222e09090a4240cef86f6c4a7c1c98ca76ce47f9feec130fb3dae8c9442a3806c505160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0d9eaa4b3e05116b039bc7987e3fc8446a7f95f9abc74cfe1a945dd16abba7ee4a040e73630cbeadc2da1198c67277588b0992b8bbf85e95f53bd469cf0fdec59c302f9023c018201448507aef40a008519d81d96008304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b52814b7ca8bcbaa05eb480332276777b395e28aaac9d712fef9aea9df0667fd27e0d1034c1eaae3eecda57a5551daccbda1f253b5430e896289f29620cd2a047ac5945151e9f5337e7353af3325bba2817681b87786e085a549abcd405c8412770ac02216ffa06b83b127b6c3cc19ac4e4e79e6584305fd3b1a3c966de678a1db5519ca5ce8fdb54347e86127447cd1be7aed6e72f53e763ea703a78c9fa1df6ad846c4b85e9b0b88abfec7daf9b81d6d43d2fddd1fb323e1d8ab86d248a15a70a26c2c2ea9612e28d0aa9296f80336adcd076167b3fc1705448942052a9852eac5912b920cfe151d881ecdfbd81e708a6fc74fa4215f00e3511a5ac0f229adafc56676746ed65e7e0cb2e85384b58a35f638c6fefa696d8c6ee519b7fcaaa602c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a054a17f03bb5cbbb641f678f0057881014147c73a837768b7ede3a125532bdf1da0236c667979c3e131a6ac9cf99218bb668a212f0e1587e4f3f1bb2c422059faa9f90234820255850a3e9ab80083084e2094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b4a07b4959191a1d9b9f1b9d7b2d81f27ac5d0bbf295a0fa6e711ade9e888d169ab0c85f7941c383d79bc5fa6e0d23df85a39240727552803d176690519414b2bafe9d4cf2cc3ba077075f652873df1968adaedf5fe80d9786f0413fa516854f370cf70f58796ea5b9a880640d898874fdad0dadb6402f8086ed3c671fcef825aa24241628467a8526d652b04a046db8e7a86bb28fcfe2b13054ecf29a2cf302f4a357aa0e28aed6f9b0ead588f38d4a27d852dbb4b90ccbf35d7d49dc3bfed3c7d510e7bd2727bec626207f433b8ee3cb02172d7e3d463087da63edb1aa1e51173bb78fd28784e7e398a646408ffcbe410c7c3aa32abe694422aa038737ab8e1960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ec26a00b495ab6333f98f467599b047e4448fb952fdfa1d1044d7ac66b97a0d5332c2ea050054dfbe471cd95278b0d4cf9c7afdcb9be216ed79413df83907cae64b8e4f302f9021c0182061e8507558bdb008513580673048308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000a42f26e2c7f40cfcd6d1d3e0cab9b9bbf98a5d4e47cb114110b770c3c854f59e4f687315e5f218f1b48986a3e80f5b10b86079ef914d0673c9f98f19fe4c206898951400eaf551dda3ba916dccba7742f8634a39335b7db03a5c8988508c33c6ca4b09b3f6f27ceb5ccd8b90b73ce2ef6199907132ee96c2e518ff9a3f14ff0e4e873e09ef631ec01f39cef9854e7433f9774f38b3edcf74463e36a28b36d6dc4cf2c918cfd27b5174f472afde4336532960910721cd7680ab2a8634341ec8cd62e80a0dde9daae04d4d607589027b4a67ac9ce47b5db4026e967ccd38695dcec4c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a009dd9e55f0c99d34b5fe21297f7dc6b02a5629aebda2314c8bb9624942b09cf8a054c8a8af6878eb36605944cb50145c9c1009ee19ba27b01432768098845094c802f9021a017a8506fc23ac00850ba43b74008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000aa6939c52c86a280bd6554c4bdd9c01018a686b2338ea6c69650e86ecd39711e068295d691f18dd00307230424c0ce98759eee2b172613fb27b76a1061752c4ece8e63c8a5b994d2a2f50bace168e2305472e12c8d87fbac40a913f00a235b4825d88d7fd2b74ccea50449ad3d5dd2baa127ad9c2a856eb50e33387d689fc2d4978f72220d7b7561b7edac075fc4b95c418f9cbba0c5cbfb7332333436cd3a6866c6b4d327b382242e1b3d577ebb1f93bd3d4012fd971b348f0128322b993929904d2d3a1c94affb7c27d59d54a29768078ad34fe118e46fde6b86664dc8fe3a84c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c080a0f408fca6e722261ccb898055570cfe208079371d88973a6673f6f7912d2081afa052286c2908e634d2c2ccda2a3bc3e0e174a373a80f107a657c57ecfb20edb88902f90363018278b0808502ae870323830f42409400000000500e2fece27a7600435d0c48d64e0c00852331b855609b98a469d741ffe634154cd2f94dff4b8be907940340a4267f000000f902d7f85994c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2f842a0245d32d2da911ebe35956892a50b3686a5bbeb9c3bc6e8bd766cbdeada720e9ea072f864fb980cc162d5448982ebea90aa29973299e5f61bebdc7eff18a3287e68f8dd94a469d741ffe634154cd2f94dff4b8be907940340f8c6a0000000000000000000000000000000000000000000000000000000000000000aa0000000000000000000000000000000000000000000000000000000000000000ca00000000000000000000000000000000000000000000000000000000000000008a00000000000000000000000000000000000000000000000000000000000000006a00000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000009f9018394ed40834a13129509a89be39a9be9c0e96a0ddd71f9016ba02ab79181f575ccb084db8099a1242a3810b6ef62d770127264e3fa8299a8b25aa022ff43878c220263a2daa80473e974df3291a72e1d33f4cf35c3c71c3ca7128ba022ff43878c220263a2daa80473e974df3291a72e1d33f4cf35c3c71c3ca712b3a03df697e29dcbc9356a8d5ef9669e344918fc2193fda97fb5e7316bd981ab393aa02ab79181f575ccb084db8099a1242a3810b6ef62d770127264e3fa8299a861dba02ab79181f575ccb084db8099a1242a3810b6ef62d770127264e3fa8299a8b259a00000000000000000000000000000000000000000000000000000000000000000a0124e578617cded055fba15fa5032dc1dbf226beaa65fec9860c9705f4b2b1ecca022ff43878c220263a2daa80473e974df3291a72e1d33f4cf35c3c71c3ca712b4a0000000000000000000000000000000000000000000000000000000000000000ba00000000000000000000000000000000000000000000000000000000000000006d69403e2b9a4a88c27c996bae7a2f1ff62faec3ce795c001a0f8be51833e401f9dabf20a4270e652c50a02c52d7a615ea86ef7ada47cb43225a025f2cdbfae4d0671f2f29fea5adf361f6778582980d4c4c95de603bca0e546c302f9019301258506c088e2008506c088e2008304b7ab9425553828f22bdd19a20e4f12f052903cb474a33580b9012438ed173900000000000000000000000000000000000000000000000000000000b2d05e0000000000000000000000000000000000000000000000000981cb2bea70c71c8e00000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000fb088f3ce393b4e1cd98a1a0c9aef18bb0a5cf4100000000000000000000000000000000000000000000000000000000638206af0000000000000000000000000000000000000000000000000000000000000003000000000000000000000000a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2000000000000000000000000ed40834a13129509a89be39a9be9c0e96a0ddd71c001a0baf3a0cb192f158f8962e2acceeda750edeb4f51f8d5649fdcdfe7077e4c4f94a04859ba8de92e6216a2c5651bdf7099fbe7475ed9d39860d0eaf2083d10c30eea02f902f8018278b185141f29ef378516cdb0f25a83030c589400000000500e2fece27a7600435d0c48d64e0c008525c5500000af9da469d741ffe634154cd2f94dff4b8be907940340ed40834a13129509a89be39a9be9c0e96a0ddd71a4267e000000f90253d69403e2b9a4a88c27c996bae7a2f1ff62faec3ce795c0f89b94a469d741ffe634154cd2f94dff4b8be907940340f884a0000000000000000000000000000000000000000000000000000000000000000ca00000000000000000000000000000000000000000000000000000000000000008a00000000000000000000000000000000000000000000000000000000000000006a00000000000000000000000000000000000000000000000000000000000000007f85994c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2f842a0245d32d2da911ebe35956892a50b3686a5bbeb9c3bc6e8bd766cbdeada720e9ea072f864fb980cc162d5448982ebea90aa29973299e5f61bebdc7eff18a3287e68f9014194ed40834a13129509a89be39a9be9c0e96a0ddd71f90129a0000000000000000000000000000000000000000000000000000000000000000ba00000000000000000000000000000000000000000000000000000000000000006a022ff43878c220263a2daa80473e974df3291a72e1d33f4cf35c3c71c3ca7128ba022ff43878c220263a2daa80473e974df3291a72e1d33f4cf35c3c71c3ca712b4a03df697e29dcbc9356a8d5ef9669e344918fc2193fda97fb5e7316bd981ab393aa02ab79181f575ccb084db8099a1242a3810b6ef62d770127264e3fa8299a8b25aa0124e578617cded055fba15fa5032dc1dbf226beaa65fec9860c9705f4b2b1ecca00000000000000000000000000000000000000000000000000000000000000000a02ab79181f575ccb084db8099a1242a3810b6ef62d770127264e3fa8299a861db01a0788ddd7a8f3630b9d657366222942980cbed3178b411ab43b42ba2874486c9a7a078de7befe356d9cec73e10e645e3f8c121ff74588d56789b732317d1eb006bca02f9023b01819a8505d21dba00850ba43b74008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bdb089c4a656f5e2cc441099febcbdaa0adbf04c1791cea41884f251c26adb64a4285cc45db4703ef6943852b9b73a249def3d0bf34824ea9f06b6340bf7aaa23d9a756c2f06b203e5ad8a1892b512b5b9bc840f76bf41177cad761a965b188f9eebc50061c39d54d33c09952f6cee90bc1270e81539ca4fd7d58f3dc006d547b72b23902eff9f9f3eff1f8031299f27d86f44118fa4a7341e7549c672dd27d5870f42c97662bb0ff7f0ecd7778cc5f0e1d56b8231ca6676e0d6e65f3c9525fbf66c3d0ec215d48641dc722bbc1415fc0d471cb99d87e39fedfb1e15ef24a4b92e09090a4240cef86f6c4a7c1c98ca76ce47f9feec130fb3dae8c9442a3806c505160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a07691df199e0dbfc09370bd5d82e8241703def1f94b2203bb6139b1fa568d7942a03a1bd708e1912d1840234db943ab293787d70472241f8d25cffe83ce1487aa79f902322e850861c4680083084fc094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b7b3ec7d8dbf6360e35253d0a2ab71b23d616fff30d1e13eee340eff21aa6b55c6acb89d205f74564d979747596f4f7bcc2a43ab54eb107cc5df90591644fa4f26f1b1c964c50f29a2310a3864059850ae92f1344be7b4f6d7c62e882a29d822e6d4d9af745f27cf211a19fa48b97871fb09b98ecdc5bb9c1f417a44549b5aa973888260316e2ba5dd17da490f9c83b1a213b971b71d6f7032c601e799cf3c3e1a5bcf21393057e42dff5aa97a7f8618fa048885409eee239831ea4875d50215eb3d774843f2f704d4b9b833718e9a22df645e4ff2dae134c84af306237fba47b5d2432a83ea939195e27b727120240cc6fa83ed6cf36a74616a2e8b2e72d5f4736eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ec25a0a59c7fb968ce3b9daf3dc5e0603c1072f2119727996d133ed83781bd52cc0d30a042c644a19ca2d62d98d9255c8e631b5b2963343c4d8b34c8785c809ba46974e902f9023a01758504a817c800850826299e008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b7855d214a101f80ee54d7056597b37e2b014a03debda3886f4698d394f3552c3bc2ba24e3723731d7db0cf4ce9667cf68d10940be9af37c798d98b96cc861cac54927259b6daad949ba933e3899d4752e9e6566e49b1e59e606ec64ef07f5d115c65052bff0e03c51fb48f6717821b42be97e6c2633cb2c1ae2597f7c5386db89a2b74895c93e64a7dae5947f6115b4caeda7b2c7bc103692f628e1fd1986b3d6efbea7d2d2578bae5493b49fb41af08c91de3e9cf196bf50d3b51a49e36aefe0815bcf88bffbf15080bc98a4d89ff43e0a6f9cecdd7c0745f45210fad4a5f1073bb78fd28784e7e398a646408ffcbe410c7c3aa32abe694422aa038737ab8e1960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a08b68eeb9cc88d3397c5effa1da10b89de247e470b58d3aa5d47bd40ddbfd683ca02ada6bcced6fa0de8445a4cd8c79e1ebe094179656abeabdd6ccf27482752a9002f9023a01518504a817c8008509502f90008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000baef8297e0d30d40d87353484af0c205d1fc9f09cfe216985fbbeb22750cd5575a427e76751a3c54bb1abc4f31f63e5f3d98367d17e683e6ad61e879bf793209f8aba446591ae08855364c9d460ceafdd8d340fe95f8a6ff333be53b1201272cbbe4d8eb489952ada4af205161cdc6be7994f2a8d5b598428e55f356485482cdbde7773b4b8f3d83ccef36bf1845e47781d828cd6d607e5b5b290c0e0947c4644eae5aca00e946d9dca1871117bf328bd20f5f7bc1f5a7e9e3e497e42d3ace8080f91a6423dba6a49f6944b28d8c41eb2a0acc874bd9cf07636081f448771b2f66ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0e597f2507eb32d615709a8096729f8e93d76f3b2e282b024665ac3c8849aef6ea022a873597a912acb0dc4c3c1f4525210c2d7f9fcdc6c788e5e1a2d9e505e5e8002f9023c0182025b8504a817c8008545d964b8008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b50bea470a810b9a44e8edd5e07855a9e7bb32f001eba392b9d97afc8908a145366e2de2f555044ab55217e36030826d5fec0bd4f57f4d2546db7d1c06aaa5aaa4e958362f8f11d2af8a2e7ab4e350bfb1cc304d930767af5e1f72a64d8d709887b8e382938307571430d6f237192da325989241fa10faa873d51224408ad85cf3888260316e2ba5dd17da490f9c83b1a213b971b71d6f7032c601e799cf3c3e1a5bcf21393057e42dff5aa97a7f8618fa048885409eee239831ea4875d50215eb3d774843f2f704d4b9b833718e9a22df645e4ff2dae134c84af306237fba47b5d2432a83ea939195e27b727120240cc6fa83ed6cf36a74616a2e8b2e72d5f4736eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a07d73dddd32ecf6466811400c5aa7030d62c0fcfe2357f5978d886eb186bdc002a040dadd94d33b5c9525dba7d771632733fe8268dd50ee0f7a53e81147a13afb97f901308208cd850700bf1b088303e418941111111254eeb25477b68fb85ed929f73a96058280b8c80502b1c5000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc200000000000000000000000000000000000000000000000021f92658262f80fd0000000000000000000000000000000000000000000000098b7d7ea849276a6c0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000100000000000000003b6d0340a469d741ffe634154cd2f94dff4b8be907940340e26b997725a0bf3a8076a4cd912dc3f191159d32edc47f0d3f9c36af302326079457e4b91067a030e86082aa7281f4856f03620f1407abcd6a712d6be7be0b5a5df7c01a92ec0302f9021b0181b085037e11d600852e90edd0008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000a6bdffb558ab93d40337f1f86b6a78c339ee8e701a993e3423b6895f45cc2bedecfa6f8e3cc6e275bccb95a387c3bc7a65cbd9ba18489786dc692efab9a77cd7e71f8352e1629f66a21631954386c7f1ded22b3c71a6e48d806dc96a0517fffd01c6b0ee43294536b92a2737ed589fec94d21534859ac21eb9e279ec06504f50778f72220d7b7561b7edac075fc4b95c418f9cbba0c5cbfb7332333436cd3a6866c6b4d327b382242e1b3d577ebb1f93bd3d4012fd971b348f0128322b993929904d2d3a1c94affb7c27d59d54a29768078ad34fe118e46fde6b86664dc8fe3a84c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a0d070296f8b13ebc10af77e8699518d46201d01876a40d0ddbd4beeb92253df0ca03754c9d50429dfbe617cab7b78291c16929b3360d352ae878d4da9d00ff998c302f9021a012485037e11d6008522ecb25c008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000a25f3433e6eb797b90010bd66d802c9db7193bdfbca289e7f932b686a28319b95eb346740d7597ed6f0aca39af7747a82ccd9658984165d043832a183d1fc56fe6a045915cb59702356344e05a76d75eba1bcf7889618de851c8fe5bcf4193012e51d4f4c182e17e6c50e69bf305527680e005a6abe3b367aad091acda0238374ba63c0fdcdf7e22d3e03dd751a495249339c2dababf333a1bc91f55cf6fe5c84f2b47b4054697932573645ed95fecb78e83ad418cbe402f3f956712ce9a9e4f7129106175d04ef2a2bc0df351e0cb830acfd4971f7d420775d642efed0aca938ffaed7b71e17931c50b3e99ec666f5a3bc118c101dc25a2118045b917db2f8ef46007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a06371597b3055660334ce263ab9891ab48ccd3c4c48b3f6d2d625b184592f198ea04fc4de8ab5d0da976d509082230bddbadf7f25d4d5e7bf9903c78cc4a71ddf89f9023381fc8505d21dba0083084f7094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000ba7a541c0eca0980a2c25b4059c582fc738acd2f479b9494094fefce702d02c0bd1eaa743b35b207e29f0fbcf3e4aaa4203c32064b790ee1f0dfc99bde414980cc0f2dba35615ac2592dc1d6b8fb26e39b7246c16d96ad15a12809678a0e37ed76d4d9af745f27cf211a19fa48b97871fb09b98ecdc5bb9c1f417a44549b5aa973888260316e2ba5dd17da490f9c83b1a213b971b71d6f7032c601e799cf3c3e1a5bcf21393057e42dff5aa97a7f8618fa048885409eee239831ea4875d50215eb3d774843f2f704d4b9b833718e9a22df645e4ff2dae134c84af306237fba47b5d2432a83ea939195e27b727120240cc6fa83ed6cf36a74616a2e8b2e72d5f4736eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ec26a06ddea7e5dda69089993cd62547d6ffa126fc6a397614a14e407165e81f2e7c64a03c6cc4943a2e4273664fe7d8e75eabd6ca2dbd6ef359b5fa7960aa7a838b70c002f901fa01778504a817c8008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b90184d2cab056000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000009ee287a183ca622f382bf36dfdcc878ac46aa29c1238d3b9aa630313df095e93f599751c430d0af9c83aaa893ef9d7e869ca37017b78645ba4746547866d2a36b4d074d7c288e87c7c83ac94218e37c893dfb593f959781d38ab7da5c144a1b24379c68415c9beac8697de0b6e09ecdeb979ed4dd950db7cea20f0ec251cc3c7fed220f04e94ab022c296ae9584d24eb79a4ba3b814090b8de476fac2b8526ce49e0e6d9f8276753500114d8a5af3b36b875e5bc835dfa3671f71155bf7574694f6be2e818d23222100f5a2543c3e0133f0dcf5dc686d4918328b86adeb6d38594a82199b3530ac248b1c97de1e8cd1c656efab7035d8bca80a6e135c4bcb077797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a0f5b49d95fac7dd470c39506397158b594cad98bfbbe123d8a728f67f71373db3a0488f5f9086d999ec87c7a6449588d4dcf6b3b0438408037d34ba4172c14ef87c02f9021c0182048d8502540be40085174876e8008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000a9562b24de5bfeac371352355f1b24decb9b37b60e6bb3a046028f86ee3efbd1a75447b2be82ae84ce6b0094e2e4f6603a46fcd561d8ad08e2b2fe2e8be0f5f88a96e11e066144d909529bc0bea1c8695d0e320b4112ff498ab9c2b5d6b7bfb5b57ac235ea98aaf9d6bbb661759f4850633cddea79fdb818c183e27aff75856c4e68e16c59423e104367604622139af9cf090e61899f48cb546f5cb2d14d6d55a6036917a0c38e9bc8a8e5fae1bc68edda5ea467d98ad83bed995feed0ecfd3e1129106175d04ef2a2bc0df351e0cb830acfd4971f7d420775d642efed0aca938ffaed7b71e17931c50b3e99ec666f5a3bc118c101dc25a2118045b917db2f8ef46007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c080a08e24d843c05ea20580c18281480d2b36ba81cf9ab52ed54401413ca47d1a4a28a0498e98b5d2ed154ed8b31905e044887a09ac1085850aa3800b38302d4f3b597902f9023c0182016b8502540be40085174876e8008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bc2d133850b3825def8ede3bf0cda4590448bf2df5faf5118f9ceb9200594f9c448bb34023acc41b666d1dd5a39ab7e056199d0b66d6cf2e493b184451b2b68c5be1bbcf99123ded8e17d7134e061d3acff95ff2448f48fd5ace24e0fb165bf34f77ba808002882f5ad3e4a3d593212291dc1adb4a2c2883826fa2eab7487e13e9a2b74895c93e64a7dae5947f6115b4caeda7b2c7bc103692f628e1fd1986b3d6efbea7d2d2578bae5493b49fb41af08c91de3e9cf196bf50d3b51a49e36aefe0815bcf88bffbf15080bc98a4d89ff43e0a6f9cecdd7c0745f45210fad4a5f1073bb78fd28784e7e398a646408ffcbe410c7c3aa32abe694422aa038737ab8e1960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a052733887f1dcb848a42de5c674c4830b8eb8ca354e3d046f67d31515fda8a66ca0083e22a0ef2651e140cec5727b5ffad9531cf40798fc8bd4f9a859b5b84f09f902f9021a01028502363e7f008505584dc0fc8308ed2894998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000aa3c773be8bc1382183378836a223aab62bb0d6889d8b6dc25adc67868a433a11d37ffcc260fd63ceb43d101e45663ecd0e42b3de784f5938c73b9c11998d844b7963118588116a64dedbeec0dc33397f366921c0e35f19c0ae4412ee11e114c29467c640a6e37406a471a0963268dad95dd69341812624eff05606d06e436423785172ed95a0482469ac7a2e8f9ca7401e3e0c0a071191f2fc3014def9f2d0a84e8d984320f8a4ac96d98628cad8909074f97538c2fbaed8c12d0a045e7424af04d2d3a1c94affb7c27d59d54a29768078ad34fe118e46fde6b86664dc8fe3a84c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a09b8b42b8b5c85276785f49f5b32d494b6448e3f1d8c06eb1373550a51af9392da0631d70f1cb6cf4d29ce86571e24ddf6a2971773ecd371d4639319c6d6907281a02f86f01825971850218711a00850bc0adbf6483061a80946b9aa8ba525bcbfaf010aaf92e0f8da98c76959f8080c001a06a727b1c2484c9a4882be69b9ac801071c3fc610c7c68808fe63b67cfb7a0413a040a12120c89ae04faf7e20f3c7ddb232b74cb31c3a4337bd8e4287af34860e1702f9021b0181a78504a817c8008504a817c8008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000a256dda8920dcd4a5c04490050b9478e6c166c9b974f89eb56a10e8a7d473cc04f5c5f82bab15721606b38b333df1cabe5f1c0b8da4449a32a1388cef0d65ebcb7750ad06f7c3255aba180fc177d12975f243bb1211dd0ba2baf214e2a5509d4cd0fd9cf2238c6694f8c18daf5cb0b5a49d03c42f312bded35f8e663d340669fc7d5e5d9de9a0bc0b6593d1b691e70105216de2cbc5e8dae6e4bc358c4c0c6951cf2c918cfd27b5174f472afde4336532960910721cd7680ab2a8634341ec8cd62e80a0dde9daae04d4d607589027b4a67ac9ce47b5db4026e967ccd38695dcec4c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a0d18a7711df9da7d112c0ac2bb544461f82ce9271583443985a1640d4b3e05d8ca01e799c8df62b788d90a1442b259ee6d706d7150cf3391383440c8298758af728f90149128504a817c80082d6d894c36cf0cfcb5d905b8b513860db0cfe63f6cf9f5c80b8e43db0f8ab0000000000000000000000005ab8ca7e7984fd8e9e9e5ca7b5b77b98214f611d000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000005bb000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000126a0b71d4d454b7ad08d99b7522c947ceccad2796c255c07e7a9e2b07eae2ca0ca90a023ce5af9a30e842fd15f04ababae0b7654d06ae32de14e9a0d11282be2b4b732f86f8304f2a78504a817c80083015f9094d181e690799612e9b22a413e78a5609359e9ca9b87d60298ba4d68008026a0a5b28026eb44ec9b32d35926ba20093ed8f032a598693d69fba30742cc08b11aa05a826604a03b9b3f07bfccb4cb8b96dd42d429db73f8a927d5895ca2c17691f002f901fb0181a78501dcd650008513ca6512008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b90184d2cab0560000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000093d196ef281f0478c9c0814d7b553dc7fd38181ffa297ec326ee030adf5d63b6f5f2f7f8cb5a7c02b54825b233f25f811802148be2635373df49081047ef63cb538f942b721541a3415a609d0abfef271e5fe02dc284cfb677a1bc4f039506ce1e73a79fbc577fae7de1955e0e18d0e94cc3e2be4fd63f9f856f72365c04724d47aff8cfbd92504b0719548d30b0853109a0defcf494e1d8995abfef7a4f90e269e0e6d9f8276753500114d8a5af3b36b875e5bc835dfa3671f71155bf7574694f6be2e818d23222100f5a2543c3e0133f0dcf5dc686d4918328b86adeb6d38594a82199b3530ac248b1c97de1e8cd1c656efab7035d8bca80a6e135c4bcb077797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a0f2e18e1ed2b27aafb928ef8e64bbdde2fe52fd042ae1d060ff8dc8b52278433ca0652534270329e1e53ac219de55a4fc1324545b6c920d9162e80892eefc4b0226f902348201108503e95ba8008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b1c5f3bc9ed581ffe9dfb22ae8a5a95332ed3188f295c1814b1aaed40e59e8381edf363f0ada6ffe81b08e543271d02e6137102a89ab110acc1a88d49b3cb7ebe4e958362f8f11d2af8a2e7ab4e350bfb1cc304d930767af5e1f72a64d8d709887b8e382938307571430d6f237192da325989241fa10faa873d51224408ad85cf3888260316e2ba5dd17da490f9c83b1a213b971b71d6f7032c601e799cf3c3e1a5bcf21393057e42dff5aa97a7f8618fa048885409eee239831ea4875d50215eb3d774843f2f704d4b9b833718e9a22df645e4ff2dae134c84af306237fba47b5d2432a83ea939195e27b727120240cc6fa83ed6cf36a74616a2e8b2e72d5f4736eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ec25a04c0a20ac0a13fb6d90097e18a71c80736bb2b1415a66e6c350499e404e6ab756a06353aec70426b13b474509ec5605da6267cdc151b8e5bdf79e10b3cc70a260d4f902127e8503e95ba8008304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000a7fe8b2f119009b4f4e6ec368feb9699385f99787aa69e5cdf273a8f3f8780968b288fef321ccbbe17946428e2336ab9df34af4a903a6d6e898e75732572d7e51b2b75453e3fc733f9241681faace1633ff6252989ade7ab7b10e10cc367fc2a2e8e6a9ec934b7159150528e2eaf915105561956da23ade19970cebf849b88211f0c2dafa306040fa2e8a7543fb78dc27bef039464182c5d4ef0f4932f43d03826c6b4d327b382242e1b3d577ebb1f93bd3d4012fd971b348f0128322b993929904d2d3a1c94affb7c27d59d54a29768078ad34fe118e46fde6b86664dc8fe3a84c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e6226a0732b217d86cdd192e18e0eadc0504119311e3fe10b1b2d5d454cf5de612cc3b4a05087d2e9d5e7c2beff0f03bc2aea8a3c2afd173a4ac517cddbb8b8564c063d4502f9032801828ee5808504ec65d5e0830493e09487d9da48db6e1f925cb67d3b7d2a292846c24cf780b901042b81bc8700000000000000000000000000000000000000000000000004b73fcfe157caae00000000000000000000000000000000000000000000039d5c1a27d6bb200000000000000000000000000000c09bf2b1bc8725903c509e8caeef9190857215a8000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000063820025000000000000000000000000abd69d0fac4b0851dafe100979df808eb7fb81a90000000000000000000000000000000000000000000000000002367c6036204ef901b6f8dd94c09bf2b1bc8725903c509e8caeef9190857215a8f8c6a00000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000009a0000000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000008a0000000000000000000000000000000000000000000000000000000000000000ca00000000000000000000000000000000000000000000000000000000000000006f85994c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2f842a07bfd1e1b28458bb4f5f19c57111a67018995cb84791030d95e1961bcdc99e256a0113d4cfc2eb9a0e7c9f48ec60bfd3dc70eaed8580af1972f589aa41c044429d1f87a9473d7c860998ca3c01ce8c808f5577d94d545d1b4f863a00000000000000000000000000000000000000000000000000000000000000008a0113d4cfc2eb9a0e7c9f48ec60bfd3dc70eaed8580af1972f589aa41c044429d1a061619f4ff8176f9002dd5ab3523e6aa4aa7eea503fc92d6effaf2faab13e801680a012b6ff41736eec4147c38559a9dc5190b5af57265e01af5ff53b6cc83ee42dbea002e956bb005a76e38f2bff34cbd33c155a5465ee4c1bd06d2215b31c3c215f1502f9021c018201ca85012a05f20085174876e8008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000a0e91f725d2c0dd31115587561ef336ca683c624974d1a713641a7268f2a2c3dc39548c4e34f0ae652449696fb706bc8e258a3f8d4e6e849e39e5584b0e9ad1bca33837566fb054d78a3870d660451d5a9c6bf3b841e52769087e1104a9587f0392c41e6f679c59d57c7581eef926e696483ec17096750a2c3afa8ab7710596637d5e5d9de9a0bc0b6593d1b691e70105216de2cbc5e8dae6e4bc358c4c0c6951cf2c918cfd27b5174f472afde4336532960910721cd7680ab2a8634341ec8cd62e80a0dde9daae04d4d607589027b4a67ac9ce47b5db4026e967ccd38695dcec4c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a043c8ef4602f79f5435c592fedd10e4bd0f15743d3b203bd16f9205804ca99f75a0068ed0cab060dc76e7f86b22a21263347094c33d85263c8572f604af6e88616f02f90113018206a584ee6b28008505d21dba00830231e794ca6d7604ae55ba1ba864c26692a91979f25cdb9680b8a4962cb60200000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000041fa8dbfc7cc1b59cad19b93fb8ae3030314c083724f98befa2c8c7bfe210faf4f4f3c3770797758755ae9f2ec2a512e2fe32e41f3a6f406e33e108dbe32e318dc1b00000000000000000000000000000000000000000000000000000000000000c001a093e7b7a2209132d8d00503609823e0a483546a56f7962cdd080b202fd973fde9a00ed98119678c03e644bc850600a22ffbce9856ecbcbf27c8581e87bd7fc7760df901b28085039bdf3b0082dea894ede26ccc85ae521d06b5ef2604952a421c9124b68817207a9b6760cc00b90144f1e03d67a555c465a2c6494d9520364b884667aa00000000000000000000000000000000000000000000000000000000464e54188ae628be286496d7bc0b21a6cd4aad0a00000000000000000000000028d1b3793be15f0edbd404292a9cc10c2e08722800000000000000000000000000000000000000000000000017207a9b6760cc0000000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000ad2000000000000000000000000000000000000000000000000000000006382a89e000000000000000000000000000000000000000000000000000000000000001ca5584fbb7c4603a0f43ffdd7befeed0d1561525d93ca20006ecc6220097317261497d89bb07d061ffa08da6247bf46b8bb57172a4d34faa369e0dd89cc9d406825a04a70c813f4858109d648374110334897e59d410d7d5b65de8c0de6344628d1baa0545157e6397ee7e0532f04235c42947c8ae84ad821be11dcbcd89d76e55b9f6bf8708373acaa85039ba41a1d8305573094456046280bb4dfd5afe340ad269abbea772370d8881bb3376cbbae00008026a0bd29d90317faf93dfd7b1f9e17198585de820f3d9eda8b99859537e04b4c494ba06002b8d82dd30015d9fed2ee3303ab1bb14f7080003fd46f5db1d49afcdfe543f8a90285037e11d5ff82ea609405237e2bd2dfab39a135d254cabae94b183c8bad80b844a9059cbb000000000000000000000000d947e806c01ae61dea449458ff8464c62714cff00000000000000000000000000000000000000000000000000000078369f8720025a08d692b3b4d483c8b20a3b1d92d607e0ee2e4aec43b4d38f9cf9a46a70ef0bd09a01072efc98bd4ab359402ad8640e2eb370537996950efe02d1c670162d4b8ca1602f9023b0182010584b2d05e008504e3b292008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bceb1191c091b11dc0bdd60703d510d82d6f6819420e96cca481d81090835f8a005ab6a01a4f0f0e3e5ded83648b79190602395df2ef89318f456ac3a2e63145f676e88cb89dc6a5c7cb54eef906f4e55947e60f19cb82945e83baae54f9f0bd1a65e3b79cdf73a12bf37ce442a6cc3a9c2cbab7ad1046373fac1a0041fb8b75f228b2bb2d8a4db141f93f7373bfeb3ada6c15c980958d2bf5fab8a12e721de77041322757cc78e51497468c963c098e84ae8e960e15f8345672c6d16f97f9cad7d510e7bd2727bec626207f433b8ee3cb02172d7e3d463087da63edb1aa1e51173bb78fd28784e7e398a646408ffcbe410c7c3aa32abe694422aa038737ab8e1960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0efc39d27a223cb3082672e357286b2b05f27fb7157e703dee77d1fa4c9cfa851a060cd3104bcab99d366bf4d5251d93f2531723c6bfc8f56b2c165c9b20658db4302f9023b018203cb84b2d05e00850b2d05e0008304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b34f98320a73532059fae6178e188f4eee471e157968fe99a90e1323fd0e5edab60276167e5b01e5f631b47561a7ec0886e58751b7081772c8409ab854b28c4a31edec07336bc39fdea2bcc2b55aca9df43ca5f8f5b2dbb1e5e64299e93e840ed5fdf04e0e639d8cd931bced7b63973c735f3660262ab7b674b3670c146bafc32a9fc4c9d4861258f42a4bedb4d17e6a531f66b6a3818b42c08d1a89d1b58222002dc47c65737484bd364c502208fde3574be9e39776789488a862806ea233907b7db17258af1aa1618665bc04645d5b0c2a5bc6c16c7285a4c645fc0aa5dd222e09090a4240cef86f6c4a7c1c98ca76ce47f9feec130fb3dae8c9442a3806c505160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a0bcbc0aeedd8e8b6e5a9b766a19e3d2eb57001cfb937fb76378a039388fedfce3a06a24e6f824e411a2acea833374df0241b3c039f092b8b37d1750819924a7080002f8b30182073484b2d05e008506fc23ac008307a1209476c5449f4950f6338a393f53cda8b53b0cd3ca3a80b844095ea7b30000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc001a0179e95faf9d4759795cd8c6645decd9758999b383e565df0224669d716b2aa68a03188edd85ae37513c0194d847be0783cad646df7a197d7a745a8663ecbd8041102f9021a0181dc84b2d05e0085104c533c008304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000ae9d225d9e05cf7ce1dc63a73601a7c8d2e7942b4143b6b37693faf946649a1d4789cd0960ea44bc6f0305ad872961a21cb28dbac7626136aa45f502eccc01523b4bfbfa4674225ba36f33c355a3eb6b375c4be7ab56dee23c3ef91477f82925710ae82141d13083170fa2d6d3036716f418d71ea863aad029f828eac4bf857c932e3276095e5232ac252337a3d6a4fec51886748206434b7dcceeea368a3c00bf2b47b4054697932573645ed95fecb78e83ad418cbe402f3f956712ce9a9e4f7129106175d04ef2a2bc0df351e0cb830acfd4971f7d420775d642efed0aca938ffaed7b71e17931c50b3e99ec666f5a3bc118c101dc25a2118045b917db2f8ef46007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a0e5e37a347b8517a312843127e6c3cc4c9cdd304434e9200e47f90a2bff93d07ea040bf99603dcec702aa7308a19eb45ca3520aab5442ee93d5ab34a40063287ada02f90219012484b2d05e0085181b2c03e48304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000ac1677c2d8aebe3c74d31d59ee170413bd3582f9ad62828460d74566a6a18bb3da2a4231c4d3d627ad779f60cf612b5edb53c7804d11552df07248bd30625a4a69ddd7f66185fc195af8fef05ffc4b5fbfe060ef55c3af75df4e9f043bf8754e4decf5e4e1131c590645de42305afd041f1b15fbb4e5c53fca066d72efc15d863e873e09ef631ec01f39cef9854e7433f9774f38b3edcf74463e36a28b36d6dc4cf2c918cfd27b5174f472afde4336532960910721cd7680ab2a8634341ec8cd62e80a0dde9daae04d4d607589027b4a67ac9ce47b5db4026e967ccd38695dcec4c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c080a07e462ce311a1e635888a945b6c045963cc4ccf36cb9eaf96a97b347d3987b9d6a0102dbea1eb1255d9d508b7ff34cd3134159e6bf2d175d97074ae1eccc0ce826202f9023a01818384b2d05e008509502f90008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000be727e46ed97d93bd4cd30332f8fd13ca0e6f96ee39dca9197eed23b36662b802bc2ba24e3723731d7db0cf4ce9667cf68d10940be9af37c798d98b96cc861cac54927259b6daad949ba933e3899d4752e9e6566e49b1e59e606ec64ef07f5d115c65052bff0e03c51fb48f6717821b42be97e6c2633cb2c1ae2597f7c5386db89a2b74895c93e64a7dae5947f6115b4caeda7b2c7bc103692f628e1fd1986b3d6efbea7d2d2578bae5493b49fb41af08c91de3e9cf196bf50d3b51a49e36aefe0815bcf88bffbf15080bc98a4d89ff43e0a6f9cecdd7c0745f45210fad4a5f1073bb78fd28784e7e398a646408ffcbe410c7c3aa32abe694422aa038737ab8e1960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0567c52127a208a82d2b6f1b685ba8f09915af71069718d05338cf644ae6fd04aa0529e78f32790e920dbfb932c8b078ec180f1746e8c496bdc0a1d578b90a99ea502f9033401824ac284b2d05e008503cb5a933083181959943caca7b48d0573d793d3b0279b5f0029180e83b680b902c4b87b0b4c000000000000000000000000b3f5503f93d5ef84b06993a1975b9d21b962892f0000000000000000000000000000000000000000000000000000000000000060000000000000000000000000eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee0000000000000000000000000000000000000000000000000000000000000224ad558ab90000000000000000000000001a5309f208f161a393e8b5a253de8ab894a6718800000000000000000000000027e606fdb5c922f8213dc588a434bf7583697866000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000018bdb3eee6151a000000000000000000000000eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040c7d393100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c080a0308813996e157e91eed63c6eafa2633194ed1bec848dae1dd0cb4a24b41fccdca07086d13de05e62a3d9ee6878bd8329f4885cddeec2a97adae810ef66a393205ff90233819e85035a4e90008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000ba8b5a6374a8061a364bad98aad8a7dbb29dd5a328c682c2b344a1a8203446b320010df544c65e4545e421ba5c6f6462a012285fcf2761648d5f9ef424a82865ce52180099a30a501cfe898da153068e42ea4c76d92c1cbee8dfa699c20f1a6db0db86768464f964263b4d04fdcb22dc7fa92617721a737a16f279f245402f172ebf7682281fdde94de5d590f2a34b1a7e7b5c33e28eb1990fdd48d066f3818d1276ba5cfa6cacebec4c356545aabbfebd9579bede1231b857e2baa7c83e7f22cc71f093cbc8098834ba48fe552a93d701ec4d36b6a097a496858db8cdcd208379ac0c400cb6837182b745f26fac1e87ff6eded4c92365a9807bb2150ecbc7121960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ec26a0d445578f2b90dcf950b9743dd196057c291bb50eae79995655f59c284e0a9edba05e96cbde366c65bf207827aea9316d1eb1baa0b759197d69ff476c9aa6eb7102f8aa0185035a4e90008301027f94a0b86991c6218b36c1d19d4a2e9eb0ce3606eb4880b844a9059cbb0000000000000000000000009994690f3cbd70d977467db0c0b6141b76a96b4c00000000000000000000000000000000000000000000000000000002d049574326a079c738fe2ac2a31db664c41c104f3403a0cd74d0f99dc7023b034353c50d6de5a06e38151af6e93d32708f944ce06cc7c359793f754eca0514c98f10fe12a7374ef9023482015085035a4e90008308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b3870d0649e85c7431ee92925bcb82c42c353a6b3ae7087f812305a64e1f5cc35052c458154c09049dd11bed37a131553ab46d1ef3bb157980b111fb69d796783ed2cf250786a0c103b31b1af72c40e34cba68e463879df224a173c19bb493713116a56e0a9b1719b7de2653bcde0fa2f2b9a29368b85d3c8a4cb76b1f067cc2ae638493b504c4955b832bc4cd7aa2f05208f6a8480eddb8dad4d67b6bced37a8ad846c4b85e9b0b88abfec7daf9b81d6d43d2fddd1fb323e1d8ab86d248a15a70a26c2c2ea9612e28d0aa9296f80336adcd076167b3fc1705448942052a9852eac5912b920cfe151d881ecdfbd81e708a6fc74fa4215f00e3511a5ac0f229adafc56676746ed65e7e0cb2e85384b58a35f638c6fefa696d8c6ee519b7fcaaa602c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ec25a012d90904c1df9bc0560cdcf1081cba7d5571a1906593292f2be437286afc911aa047e4159ba406e113314094c2af0279290cc43c5f0711fe2ce3115a8e0ed3be4502f9023b0182018e84a3e9ab808505e12254488308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bc6379e4707c378a1b48f59ae328130adedeeee2be01e6897877205763d7320a4b100c46ab99a80b5930df4ac91e21ceb7677f81c185ef23e53d94e44ca86798e4b99cc1dbfeb7777d48b5728fe8e73e597158c2d3542e22c2bef2027211c070e05af0660f0c3354b734e06a9427e13e840a916b5cbdd5eda4e40c2d60dd21a57a24241628467a8526d652b04a046db8e7a86bb28fcfe2b13054ecf29a2cf302f4a357aa0e28aed6f9b0ead588f38d4a27d852dbb4b90ccbf35d7d49dc3bfed3c7d510e7bd2727bec626207f433b8ee3cb02172d7e3d463087da63edb1aa1e51173bb78fd28784e7e398a646408ffcbe410c7c3aa32abe694422aa038737ab8e1960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0f3a64da686d1e20581ef1d813d67c27058259a968824db27cca858b82f4a936da04b28765ac7cdee137aaf54134e06c913db895888a42979cbd794d54f66cf9ce502f90239012084a3e9ab808505e12254488308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000be6674ebbfbc519d5958c8d943967b369ca7fd39e41fa5dcdc74b9e819dece2d9d1eaa743b35b207e29f0fbcf3e4aaa4203c32064b790ee1f0dfc99bde414980cc0f2dba35615ac2592dc1d6b8fb26e39b7246c16d96ad15a12809678a0e37ed76d4d9af745f27cf211a19fa48b97871fb09b98ecdc5bb9c1f417a44549b5aa973888260316e2ba5dd17da490f9c83b1a213b971b71d6f7032c601e799cf3c3e1a5bcf21393057e42dff5aa97a7f8618fa048885409eee239831ea4875d50215eb3d774843f2f704d4b9b833718e9a22df645e4ff2dae134c84af306237fba47b5d2432a83ea939195e27b727120240cc6fa83ed6cf36a74616a2e8b2e72d5f4736eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a02b75240daa730b4ad9257ab25234f9eff07da7d95dac9a47e8a9bb8d071b0711a03fe8064f1675ca9e6b31f1e4e8fe6d0a1be0d4b8bf64a8dc15aa93d033990eba02f90239010684a3e9ab808505e12254488308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b21635d6387d997b93425b3637984411332620af98f38631f937fe97a8ca68d5a92463a1b9f155e20d4cbd385ec7fb1718d47d213f9ffc0c309f4a7f7bfeeb608cf4648c423447fb310378a6cf3392d3796ff1bc240d37fadfbcadedf2440324a6667e37f619c7a26f38bfea08235d5cc8404ff299b8033493682b2d73fe24d7a9c145f1a961634e373c2ac40dd183321dd17dbcb7435a0f7ba5c37f58120731c368fce61468f4a0753b448f8ce82ddcdf2fd716375a1fb922d174d111abd5fc40f91a6423dba6a49f6944b28d8c41eb2a0acc874bd9cf07636081f448771b2f66ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a04f9baff53981a3816b217e3b7f37943f79d73ff9375e5b602f99d8a9bd49406fa04fca136a718378b52eeb3ffafc50fd46d8a33dd8277751bc57684c358c31a8be02f9023a01818184a3e9ab808505e12254488308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b99497bc6c8e20939efcf81d0cd34182cdc3435e2a7d94154b162236b840f2a3630f1857e06949437fc37d922f5a3d8f9eb505c85b17bfdb4dae50834af0f5af52c63114896df964394ce28735228db04db1fb7d9e53b4cbca4ccb2f0f1af530ebfae2cc4a04753549155cda73390e77a0898157e5da2ae4acd9089f1539067351c343328645fb4d3145cc0675f6c7e3777aec91f5b1c338fe6fa0795041809d780eccfd9aa7d59e9987455ceee93c47cf30839c9e2242c79288ac5e451a96d567710182dc92f72c8293f10e2a921bc970f5bbc836aa36868eac1ddc8a894c17b9ac0c400cb6837182b745f26fac1e87ff6eded4c92365a9807bb2150ecbc7121960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a070de19bc55e9e30f14306471958642a137d7f5f79832005418cff36ccfa6bfe7a0214f222945ccde51a1477885cbaf79708e304309f64bc2ce9e04d8dc19434b51f86d82770b850346dd16ca82520894fbf083f0d2a9092fdca9aee7430a44bef152d369876379da05b60000801ca086fb17125c8be899ddd314d63cb36a72326c6e51c632ffb4bfbf73e22029044aa017dee11923750fb5ddde0bff31dc1a3e5ec85dce5dac19fb3bc6c480c8010b5702f9023a018192849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bb39e62ba9fa90a052eb76bc81c9422b13c8ca59eeecb796afc9f8fb4c78c3b2ae8c7585e43abf49d73247799cdb2af950d77bb044a7660ee30bfe3a227ffcd8dab138c9d5b45c6ece7a9a2ffc869ca8d60dd94ea9dec8b19c668a98875b772105bcda5c06911aeb660abba38421c0ff56a426281f61fb53eb000f147f02d9a151c343328645fb4d3145cc0675f6c7e3777aec91f5b1c338fe6fa0795041809d780eccfd9aa7d59e9987455ceee93c47cf30839c9e2242c79288ac5e451a96d567710182dc92f72c8293f10e2a921bc970f5bbc836aa36868eac1ddc8a894c17b9ac0c400cb6837182b745f26fac1e87ff6eded4c92365a9807bb2150ecbc7121960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a00ebdc320e421e4d10200a88989abba6a2755790bb51621eaa6c7cd734b87f8aaa01d663cf6ec10c685dea6972d563af56d47b1f3e86b7dfb386c7688efd76c5f5c02f9023a018195849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b3c2351fb8c1decd56b42811597216a1731798475e8b823a51bddafaceb2cf3f80f947e2920086274197649c1092b26a22c06af8b85f5e79b93f5752e77e1eb7ced833eb1d944639dac5d6576acac9527fd5e50b3be3a5f0bdffac713d622b81ca8c9ce932c51b7569e70fad2b348efbfded890dfaf8a8b43f9ef9780c40e47316f8ee6aca360d4cc0b79651c7c4294544080e7c14c28b91bdf155de019bdec5663e97180e1ab3461725b593d992b80fe8cdbc1939562c814a2ca5d174874eb00286b4cf4b050097d14205264d5de8efa790b96fbc3b6ba991b67d2becd1cb8d3874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a056bfc62c17131dbe50300e6ddc1a939c2e95b879a82908dab033452750ea2f20a0077f99b9fad50d032dda80ca029039fe6b8780fd4427622f5a4c9291bfd6708002f90239011d849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000beef5923d4fefe8473ee7de7ffe7fa26c8faf668dc19a2149658ed3e21fdf0ce2efa95f210be598d32974c5a17a399c1d07d1595e60614a5e9a2c988b90decf04aa503d2e2fc07673bac6f0c192da6899a662e13280a43ab0a76502689fac8f382e85b023c706bb0694762973ec8ba6a13e907b6894e00898fd3444968f555bfdeed0bdf6114248d7bb901fb3d726a53ea3ed244bf39c5029b03d6828cc96f37ccba6d2ef29cccca4fa453062395770480d56588111f94b9615f951fec3a4c57be1c095839abad83fbb732382ce6110e4767327b558f173562c36e0250ac9f3e15d2432a83ea939195e27b727120240cc6fa83ed6cf36a74616a2e8b2e72d5f4736eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a0ae53e6725660dffb80c9dc44bcc4f968a3eaa69ca2f9d83f0c15eb28859b4fd9a051ad3368ff606f106758a7162d11d34ee36e6da8579d2eb087524b48f496d59102f902390173849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b18493c5471a4c5a91733120f4fa4d0b5ee1943b317b44f0f942e04c44b29a5f8b0b2dc5bd3ffa19c420efa98da6c333272819beebadd2ae840de658a5a4b4a5016e447315ace9c41b1861774b36838281e6cf85d9dd53ae8ca8823f9d216f129dc1fca82427e68dd7b52eeb8ac01ee60a2a8fc84a7a68efbbd1a0e30ebabd02bca2fac698de5d0738c7811fd3124aebc22e52367beb65d084d958cc9cb9eba252ac86ebd96f96f7eab60daea44eba5a2a551436123ec6af1858d6c9c9e421d98b7db17258af1aa1618665bc04645d5b0c2a5bc6c16c7285a4c645fc0aa5dd222e09090a4240cef86f6c4a7c1c98ca76ce47f9feec130fb3dae8c9442a3806c505160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a0fdedba0fc90dc5e79861b3dc6cb8f3270778fee5443ec1a9d24a3f7489938b8fa01cc3ef3adc1f59d1810c94c3ca431f9255de7cea078cae8496f0a3a058e2583602f902390149849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b4bc07f8765b668852601d284d246dbd67c023bc1da388d731271faef33263656fcd1fe2a802b05783945d4c36fe9c1c71752f86d8c885234d61f4b52fae327704e7077818f1d7d89fa63fc0832b1ac7d0d983b7e8f9e7e2b8f1a7aa4c276bdee8c476bb69dcbe9a4af4cd65265b53a8a1c5df99123653e583ae510e81341fefcad79cc52adfec7aa2fea4409beed20fb4253bb683a7cf772aa821178dd4e4b64a27e7fe6f5688081767d606e6a17c36d1a8271eab6b7fc2ef47c2306091e40a611686f88b4b1efb63f3d02102a0bd8872ab8be325fef22fb6b35d9cf8ea8d81a6ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a0ee26138b2c626e50eb95d85043c2eedab9a51e429fe03fa7f1ee2956fa05593ca07ccab12aee24d902b64735a09087aea4059f7dc438a34a6759976d527a76d1aa02f902390153849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bae968bdbb6945b789b93803da45a62ea16920f45fb69a3c0f91dbd9c35f10aa8fa0c956d059f3f9cbec5b04253f00f73060de3e9b18b8f5646fd8ad61dd19aa96c86c2577bd18956cfa3d48129259a335b791f89c04f972380e60dab571b82d28da992e0e4bddf6c591952171335ccbc862d64ea0db7ec6c4a3b5873c74107c1ebf7682281fdde94de5d590f2a34b1a7e7b5c33e28eb1990fdd48d066f3818d1276ba5cfa6cacebec4c356545aabbfebd9579bede1231b857e2baa7c83e7f22cc71f093cbc8098834ba48fe552a93d701ec4d36b6a097a496858db8cdcd208379ac0c400cb6837182b745f26fac1e87ff6eded4c92365a9807bb2150ecbc7121960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a066369605683b3d99e90fb7be670a7ae76c0890e1170665b726739f5489e175f3a066c9dde909ff63f6c60364c3b42ce9e7f160da600ec71072f7a20a92a7598c8002f9023b0182010e849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b480273f77d83ecdfbfec5b57cf43c69c82ae5725a3c98940331ff61582e6e71a4cea9dc6c2e8975fc0679486c80cfbb363afd3bb163cb0f845bb065b802d3a5a67213571b553445b9a52f7cc984e7c586c0daab4dc9acffe9c3fa975c8dbfc443c5048ea9e29bc7acdd607d3ad8b52911abcbc85db3d3fd35bf96610f634fea8c082e8d3b33f23ead586b0c03120973438415a1548113ccdba2873096f347582a27e7fe6f5688081767d606e6a17c36d1a8271eab6b7fc2ef47c2306091e40a611686f88b4b1efb63f3d02102a0bd8872ab8be325fef22fb6b35d9cf8ea8d81a6ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a064e9877e59512565130384dc762eba575f0247d2414c8aabee3ffc805720802fa06a5e262396ec79b96fd3075994bb3ae433f682ce86b9216a595cd75f075852db02f90239015d849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b268af0d6d315ea9c0d5c6382fc2678e91bd74cbb19f75c972c34c6987c7eabcc4cea9dc6c2e8975fc0679486c80cfbb363afd3bb163cb0f845bb065b802d3a5a67213571b553445b9a52f7cc984e7c586c0daab4dc9acffe9c3fa975c8dbfc443c5048ea9e29bc7acdd607d3ad8b52911abcbc85db3d3fd35bf96610f634fea8c082e8d3b33f23ead586b0c03120973438415a1548113ccdba2873096f347582a27e7fe6f5688081767d606e6a17c36d1a8271eab6b7fc2ef47c2306091e40a611686f88b4b1efb63f3d02102a0bd8872ab8be325fef22fb6b35d9cf8ea8d81a6ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a00db24ab17fced436faa46067a737c1d9e1bf627e6b300431788736984e5133daa05ce898f66c9f0758f45562dc5b30f3233b8da1edc5eeaaf5e59f0707d1b86b3002f8700136849502f9008504dc3b2a048307741094f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b8084118aa48bc080a0e4434142219f93de767bc20bb10004a17f411a28620daf16d8d02dba9bd25364a07a85ac2afdd34f36dd581789b6b39a2051f3baf0dcec3a23d13e8b2a5635eb1e02f9023a018187849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b253745fe74841d4e4ac09a0eed34aad0ba7063d50297a7ac47eb8be585eea20db98f849432cbf66b111e8394825ed2b837ecb7f41d1646f1eff21736b5334162036c70f758b841956492bc4af99d8efbd1d353b0c3fee9024829a2e590099869c4f25e0ba4cef2cea3b6ec94959895c35959601869ae03e3958a91f1c03550258dd36fcbc5833f2116232785f8359d2334dab78eaaf7968a293cd28690e1a457713086cb817451ad27927dcb997da99d54ccef967bb535cbdc21c283e71b2810b3d774843f2f704d4b9b833718e9a22df645e4ff2dae134c84af306237fba47b5d2432a83ea939195e27b727120240cc6fa83ed6cf36a74616a2e8b2e72d5f4736eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a011fa61982e2b6f68317022639803d34b77246f55edb312a16a3f306903906e71a020e0f379c6afd1ce89c72a018077c6b7055b546eaf0c9f694854c53457c4ccfb02f901f90102849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b90184d2cab056000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000009ec5651812086878c3de7814746c26d8e3332ff4a9d154bdfb6455735a940a336dfe4989c3323bf53d08ac7450d979fc0a92f54dcbe3cdd51df8c6f66359bd94c0d3669e0708e9542665e4396247ba04d243c81b4479355ce2df33888c9e0b50504b5e992379c4d3ee343745728e612df3e9676a94c905a59be7b1f3beba3241d2fae1249bd417a9e9c0f5c8c8b276c9574b7637e276eaa4c8b23d4cba87debdd718652425e447279755764eaaf850248e059ef4df20260a4cfbfa72ca14c8fc9f6be2e818d23222100f5a2543c3e0133f0dcf5dc686d4918328b86adeb6d38594a82199b3530ac248b1c97de1e8cd1c656efab7035d8bca80a6e135c4bcb077797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a0fa1d65fd081a92db254aa12ad3da3735f6a5d5fbdbb4d088478302312e520eb5a023ab66e8c699073a0e98602ad3c591d69f45e183ee6a18e9b5cf641b06e6858802f9027a0168849502f9008505584dc0fc830f700994f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b88025bf6196bd10000b90204666c00aa0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000b3fb64426b13305463a1e7a926399dcf62a972d906ddfd227039fd904059a84ef41f58390de847d8c359cf9bc0958c2dbcf36bbb7c3b7449cca8aef7a09e7d470c58392730cd2fdaa42c05cb894af03cdb84aea7f4a35c29424107fd76ae911c51a000bd0b2081757b50ed135366091621048c5fca2e33c7c55aa995176b4a3ce9d69e2074a60ca8082feca2cf790323dcaee36ca7d5bd5a5eff1c785118da0e6eb4e297c388289f40331a599c712a7b67b756f84b98b0cb207da5231d4d3d29b273f11011a9233ef1fd6c504fc555bce333eceb3d1e4e8f69874b75f6d856d531aff4e550b5383b62ece606f193a21eb2ea67c3d75bb0768790978af2b6773f0f834887345df96f4d60b5070d09e1a56b26f122a3ae81b1c3c2a06b6a29f64ff580156337097fc727277780dfea1f31e0683a283836f3d0b2f2f7411b3140ac82c305353be4f2b4e862d9e60a89fb2a46014d041bd09a0460049099bbedbc290c080a07a64532afeca0bfe9affbbf65600ee754f80faf79f13b42dcd845cfd041f3bb0a075f8cae904b99f5b12bebc153bdf0c5edcb3d5fca4de86873258301129d4be7a02f901f90154849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b90184d2cab0560000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000097bf5a6885b2d7429c455eaf29234153bcda6c9c913ca79940b5e3e0807ebfa7ba16dab20521d795fe8776a851828db6d1bbe36d4a4b9d5733dacbc3d8fed57104fa671ec4975cb553cae2b81c79d8f9eb70cf1b92f092b041cfa4c3fbeafbeb33df3d1bbaeba880bb2bc076be02a1f7fa95773e25bad6614a5e98eab106fe498d33d04bcfa9df33b6b428b996bb338b17cf32cca2a3da2fc4b93b0db7860ddffb4e8f769bd74b61d12bab3416ebc55d41ddd709a109e5844c90d0b781d913615f86865b103c0cf0b5920cf393a4fac1135bb0bf631f329af77a883162d3c97dd4a82199b3530ac248b1c97de1e8cd1c656efab7035d8bca80a6e135c4bcb077797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a0111c7e84859964baa2edc06b6fa6f9defa1a5b8cb4e75e9d9d17fb38a79582aca07cc3430b9a84bf3319a23e686e76ed1f16f16cd00a8179865ec3c7e3930cbcad02f9023a0181a1849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000ba9760e4c602dd27940a9e99dc6845e89b0da73b91a8d7d220c324e7ba5a5f6a71ca26df9e94975bc73fc213764ddc22622e039696c7e0c2b3175f1165580121b6c86c2577bd18956cfa3d48129259a335b791f89c04f972380e60dab571b82d28da992e0e4bddf6c591952171335ccbc862d64ea0db7ec6c4a3b5873c74107c1ebf7682281fdde94de5d590f2a34b1a7e7b5c33e28eb1990fdd48d066f3818d1276ba5cfa6cacebec4c356545aabbfebd9579bede1231b857e2baa7c83e7f22cc71f093cbc8098834ba48fe552a93d701ec4d36b6a097a496858db8cdcd208379ac0c400cb6837182b745f26fac1e87ff6eded4c92365a9807bb2150ecbc7121960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0e49f7d431939d4121ade2c2d5c5305c92f233e5781a47e65f9b4c31b8e38c895a03ddd2dc7c4630f82d65292fe4e7a05c4a9015d0ab51477052b61e185e895e7ce02f902390134849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b9e195179ff8e95bc06232f4eab3a9922f9c9c159c617da70bcb1b8221df8c486a4e87da5f3cd6439c63304850ddc26ff8cccc529a66e950970caf825b9326a8e7e29d4406d11c42cf40059c11370050cd6a659e89a8d65ffbba46e8fde16f1c71e7e34397d88cddc049bc1c2deae4beb1f6daf6338d1f7aadb51269915be934dc2430025cf245b31b5c66f6eda28c3ea590926d25dfa31ae73b28855b3df709ce488754043fd18689e005d87631fdc5d396ed24155d873270a8c3afb2177c2487710182dc92f72c8293f10e2a921bc970f5bbc836aa36868eac1ddc8a894c17b9ac0c400cb6837182b745f26fac1e87ff6eded4c92365a9807bb2150ecbc7121960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a04103cc62adc5906a5d18cbfb8982a327284b20426191c23e3db72e952f8f842fa0526dcf1f8463a6875680c4517e22f4b1ec3cd562c55114bd9efa55a112d740ce02f90219012c849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000a144807997d6584b6db9d3906f3d8bf3ebb6e93cf62e2cc07768c72c054bac120bd5a9c7dd84d0729e06026bfc52156f2ff4c688d0772c189d0bb8130b6def69ce88ab91b9770265f314ca683a03bf93419fcd8923b5ce4c54c9b15825f1298efc8d7aa842f3c81ccb061b9d9632b65f1deea4736433ef39a978e12dacffc938f23975ade81aeb4467ee3d0584113a70343f675b0adc969dca2878343fe459c854e8d984320f8a4ac96d98628cad8909074f97538c2fbaed8c12d0a045e7424af04d2d3a1c94affb7c27d59d54a29768078ad34fe118e46fde6b86664dc8fe3a84c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c080a0196504386e10f44541112713f28995c40ea74f541b6c9927313b4883e975559fa043391279b8dee99edf06dbee66429dbbb307579f2b6578690edb9ffde96b20d802f90238016d849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bd30b9bf48329cceda119dea3f01831bb46dfe652efb3a3a501e05b463670bac484c3612208a9287153c12a75652aae497d5869b9da4bc47cebc6281444352404f9675072b423cc404548873e99867b92451d173905857a768fbf970e6cba68da9d1a33d161c334d6e7df7d8adf86043cbd67cd85a9388bfa83278f5c59fedc020f157a3d5a8bafe568e0c270de02c72665df09162a5e9547ccac29b07e651a10cba6d2ef29cccca4fa453062395770480d56588111f94b9615f951fec3a4c57be1c095839abad83fbb732382ce6110e4767327b558f173562c36e0250ac9f3e15d2432a83ea939195e27b727120240cc6fa83ed6cf36a74616a2e8b2e72d5f4736eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc0809fd49f5fdfd69de6d4c2cb9cdfb20975bfc3d92f06806b355984bbb83a99ac31a01ea36756d656fc6fda4b54fdec59854f3c8b9d4e77100ec45252ca171ede9eae02f9023b018201be849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000be25ce5b536a10b9e486afdb7cca97b1a921a3ea54b5817b9fbd8a536b45eb47cbab1dd0ad0d08b855d05aacbacd947e55edfb7d15ca60fbb7a30bfa624b089b0a1245429ffb23db20d43d22d8434d8742d14972cd2d17aed427b80ae5de6603beb6a4d068fa1b355e107c9fe663b0b9ad4b8969b10015a663d3a7a1b28f5861205fc71e2a4dbc946baaa9c64de51e4a875f07d87aff0de8dfff4952e38844850276ba5cfa6cacebec4c356545aabbfebd9579bede1231b857e2baa7c83e7f22cc71f093cbc8098834ba48fe552a93d701ec4d36b6a097a496858db8cdcd208379ac0c400cb6837182b745f26fac1e87ff6eded4c92365a9807bb2150ecbc7121960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a0cd27da49876e8a7298803f75e679e931bf9cda7ecc296df9151577d5ff4da639a0440bd3675dd656dc0370b7221a70fe45464d14b562176c59bac5fc916988aa2b02f902190113849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000a5bbf41a9b4e1e503a6b53639234ba2264b99c9376fac9b6d918af00a0d8a87171342eb4740c8ea34e3b16a7a539f40f960d2dde26312394459fd4f587b8fbe2258455ca6cc651f761854d33956ca947cfd79685afac2be6ae32a4ff488575c8dda2e154e736467bb805327a0d140d616f3ebb696172c0154db5c3258de5d90e5785172ed95a0482469ac7a2e8f9ca7401e3e0c0a071191f2fc3014def9f2d0a84e8d984320f8a4ac96d98628cad8909074f97538c2fbaed8c12d0a045e7424af04d2d3a1c94affb7c27d59d54a29768078ad34fe118e46fde6b86664dc8fe3a84c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a0d0f5f5a0d6221c7e65de0a505bb4fae7d7a2cad2f9398889bad687dda6326a3ca02074726a54f228a9154ee0e33da41d170214f4f33ec809a990a2d87d5380167b02f9023a0181ca849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b5b242e7609999d8767f55676cec40c89ea85b422dd7599b3c7122f8cd5d4b1533d03e0ac80904a0ad91ebc41879494e7e9b567e99214e2c9a8a11805b76648ef07cd927a8dd897d3afb0b2167f0d2e1de9f0d47fba857dda85be2db1dacd336884cb32035c62f3227097b2e8fa05af5711bf7bbaa756b24b9e4ca95852a216e0886dbe84053c9654ee909e5116cd040a38429581726d6d2334965bad30c3a657de5344c7ec1b2dcf94654052e24c8a40a725a9eab591376c8142f4dc6e5e7874286b4cf4b050097d14205264d5de8efa790b96fbc3b6ba991b67d2becd1cb8d3874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a0021a1c76ac35e3ff5396d5af9f461b674fc0ab8a42f273260e481338c8fc6776a00a3000b3486441540b09bac00ecce1be324d4eb90f9de9ddfb4a5cbbf5afd48b02f871018183849502f9008505584dc0fc8307639994f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b8084118aa48bc080a0e5c17dd0a22eb6bd4376cc26b2247c405e7bd8e0ccc7b7b3f0609e4d0fb57203a00306e3f7f733db17dbe5120d6617198257a50f9d1ee8acdba2155db111c164c902f8720182055f849502f9008505584dc0fc8307639994f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b8084118aa48bc080a00ec323c8e02ffa1c9662c9c95b3df3958600bf9e67bfbbecdd2cd46e13cc46c6a004c46d07cc33e41323d2fb37696c378b4f170b326ce75bf09ce64e0b2c98daef02f9023b01820200849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b87a39c8b7e5f9dc10afdc2c67e5257c43758017dae7c460d5daf57a7f6276c51c5065487e269107a966aa5e469d591ab9e888cd204059453a104ac2b99a55f110b2674d4bb65fd9b5b6199ac72e23eb70f63792e7d2fdaa0bf4b3e31c2ce8758f2ec770485d1e71d2dc41e25ac2456b254fde26eee6dcf840088fcee5fe4436e10369baffb808ab0ec10de3cb51ade4c7287c1fbcc49a59aa8bf99ac5332ccab699e64a1e835523b3c4155d0fd43d55d2385dd269e9b1e1bde75322dade668d366c3d0ec215d48641dc722bbc1415fc0d471cb99d87e39fedfb1e15ef24a4b92e09090a4240cef86f6c4a7c1c98ca76ce47f9feec130fb3dae8c9442a3806c505160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0c04fbc0a30a02d0bcbab88d417b383f426ff1c84c55fc0d7073f0de3313638c9a01f7a083e503b89146d894206acfcf18a3cb88c16a5fd4cee6d80c5743532006102f902390108849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b80c569c46b856131b126382ba6e030f7b3f759b87f720a674456d6755d1f5a24b6468af925b2394a35e2e3e1ee54816800e9d592fe1f45068baf0a2f2228422ec6d7a20354f4eeb6b73bdd7e8cabb1dee44801633fae171c51fd7f40c077df116a333744a55fbb0f40b95162d3c365b4afbb957fe0e95b6cdbd17d9ff3b3e76c72b23902eff9f9f3eff1f8031299f27d86f44118fa4a7341e7549c672dd27d5870f42c97662bb0ff7f0ecd7778cc5f0e1d56b8231ca6676e0d6e65f3c9525fbf66c3d0ec215d48641dc722bbc1415fc0d471cb99d87e39fedfb1e15ef24a4b92e09090a4240cef86f6c4a7c1c98ca76ce47f9feec130fb3dae8c9442a3806c505160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a006750235c1714758eaa5f92ac1c46a5e6eeac6b5744934fd1cfa9d4cd3cf9d64a00ae579f278757158137146e5f4374ca9929ee9d340a54e1881ef1f213208ec1d02f90239014a849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bfb3c2e9eeaaec41ae904aa3bc9d0d5b49eb063b20b3187ceb6297e37656abff1cf55d08484af822b9b13848571d27d615cd7ef5037b95394921ffb1358a730ec484774ff0d8e87f19daa4e351b6e2bbfc88e6c68633249b16ef63a48f262ba5d54101d8dd11bae84ddf0839e76b76987033ee3fc08b57f2a13e7358d9b5e48edd2608d423a97ead4a46eb0ad82db90b76ece811cd6ef62d5395c941d88881a33368fce61468f4a0753b448f8ce82ddcdf2fd716375a1fb922d174d111abd5fc40f91a6423dba6a49f6944b28d8c41eb2a0acc874bd9cf07636081f448771b2f66ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a079cf53b59af1bde296fdddd433dd7d8d10ce09a85691adde7c42ee7b79b8eb1fa03ec30a1d8af3787eb69b5f878002735f0a2f6ba81e7e1ea10991f47a25db66cc02f90239015e849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b445d9a8cdb60a5caa4fe1dfc012f339629f07313d4e1ef95511689da854b7d7fd28313a6cdada003c686f06769c868b2cdb91f87f1b82830e08688c06b5331b9b0505508a5c122df8bf221be2783edbb4ebdc48e50673ab0458e7daeecd3b68f93a5a2d61e1cc7af7e696d76a991d10d12d6b72995581c4a123acad70f715acfee087bfa77dd89688e6401f01d9e5ccd1bb547078d1439c8f63831b6d037c2ac7195d111bb22b6eae6d7433f44fd3ea2e2c4e9695bf742d66309e25cb78c1de5c71f093cbc8098834ba48fe552a93d701ec4d36b6a097a496858db8cdcd208379ac0c400cb6837182b745f26fac1e87ff6eded4c92365a9807bb2150ecbc7121960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a033b39680231187756758bfce5d51b30455c0ada11c099351001f2dae10a8fd5ca06d7505099177ede1c824bb28ce841963a1765f7cf2ef7c7ffb2da45682372d0602f901f9016e849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b90184d2cab0560000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000093e072547a5b4aba5cf10024062301bf2d85f5af06a86209355e7571168dc6f2f0408a04220411b5661403391c91d92d9cc01e9ff51a0a0590936f69bf61b09b2d6116199a876e0f6d79d285e590c7d49af25080d3baab95ec1be3822804a2304145d0284bbb5ff2ece72fdd4f2b20d42592616380af3ff67a5447f3efea78e537aff8cfbd92504b0719548d30b0853109a0defcf494e1d8995abfef7a4f90e269e0e6d9f8276753500114d8a5af3b36b875e5bc835dfa3671f71155bf7574694f6be2e818d23222100f5a2543c3e0133f0dcf5dc686d4918328b86adeb6d38594a82199b3530ac248b1c97de1e8cd1c656efab7035d8bca80a6e135c4bcb077797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a049288147adc66f9bf053dad49cc659a94b3395f33424722aa27ebc8f78247faea01d92f00518c8cd8bfc8d68bafe76dc19d6ab622eeb0e273d4d248a7d7c56fc6702f90239013a849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b121c5cf848e027dc4cc22baf95c86e160ee1ea9e490540d179cdf7468b8d6e334947192f9142dce43243f17b43b28dd9a392d834c0b5baa715164776cb53b2c1a5ed327e5b0d2f0c92a617b1e4a3dc03a39eb466d58aebe8780063d1f3681cc4610a1669643bd11695c4fca06549f7317186c0d289ae01671ba762c9fe5a2b179ec43392ff7d810d670be3bc309602b40ba4f105c85f591fc1e59193a6595ea88f4dac38243fcd265b82a58d8be3b0802d037e437f0b91942e1337467b6d2f2f11686f88b4b1efb63f3d02102a0bd8872ab8be325fef22fb6b35d9cf8ea8d81a6ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a03b44ed94d2cc0f76919d88a47ff45b09da7dbd9459c613e73ca67b5266ab2679a0422de4b36bd07e286a830bd994648df9ebf83225307648d7127465ca86fa182b02f9023a0181a8849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bdf96c03502ac2021b76c4fac9c82d85c0d6cca4aa2fabfdc51c807e4216cd51663c72e95718f1f25037a78765bcc4ff89971f8088664e1c42337f305f36ebdafb74ffd93a9b87839b9f21bba96a3420c2c0b969d223e76c6945b874ed152f4f7001eec81d4e5eb47f135bba800c40b6097f54d508e5bee8ee40902f5214c2f9d4d0c10637c2bd2e82fa5c75b6df67103185e90efa5109adda52320fc643f5bbb340bd88da4d4ad7f678cdfacab3a163c4495be773b266761f072b5775a65c45e6cb6b274fbc933d5161d201b57d1968b3e548233d94e0918b29e3246db24af10874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a017da238e7e078dea7ddbdc976d270f817a00a3f8418a7a315f533f1d9f12b46da040de95b0622ddb1b53d8dac977a5a3b9676a73bb7bf4dd09e852c5ee96a2d2af02f9023b01820127849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b07ed1d2191bb7a3ed64e9a7bf1d97c797e8831671f088561a8b9df63639933bf0ab5ce7f3fb0eedc0e8a8b19ba15a4a4a473bf444c0152bdac13f8d607c3ae23d9f8497304a240064cbc015bd4e60ef746c9d8c811123971b71fad55fa7ac59a22c21be9e8ec837d6b51c0241688250fdcb57bdd0a6c733f0d3e562e841affcfb2f94beea3333b68d58665b81219090d402da83803917a75e14745a56542d87e041322757cc78e51497468c963c098e84ae8e960e15f8345672c6d16f97f9cad7d510e7bd2727bec626207f433b8ee3cb02172d7e3d463087da63edb1aa1e51173bb78fd28784e7e398a646408ffcbe410c7c3aa32abe694422aa038737ab8e1960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0958f185c93c78720e376d511ecf23ad677bb69f8af47acc5af1a4e6ff4817fe5a057e677ea9a0ead3bb99716545976c4a5805b179b2571b1199512c7269a4fe19502f90239012d849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b182adc6a199c54c9ae1953e1d7f94b71f14e4a6cca6f8d2e2724024427b440bec479557d300f98e5daf459e6edacc62c9b85fbd9393c6bc8fbdfedb4f346f3b46aa8cb13e381a1d357d13fb1ec724f0deeb2e1d2f919d681564370c76af3a8b5372755e99db10d981dc9ae18b6f26f5dc24451a8e9565c4ae7cdb4570a55ac4506ef85f770c00923b222c953cd33151eb48190d06044fd1d00edbb3a835da2699262acbd5671bdafcc4346e225a5eb7e7f6ee95e8f415b78be64ea5425a447a46cb6b274fbc933d5161d201b57d1968b3e548233d94e0918b29e3246db24af10874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a07eeb31bedfeef057b45995886bcb8ec80a68e0ce9a195bc856719c7d4061683aa023d7a0cd41e78ac6fd1dc9075e518766681624305125fc61b601bd7ce35bc4aa02f8710181f5849502f9008504dc3b2a048307639994f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b8084118aa48bc080a0f5d783d2977e1a821ba8adff950a422a55c98999caa6600acc83631643df57fba003c29d02b81d86e1ac994402a27bcd7fa076345eb247a04848a151755846632402f90239011d849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b903c2f2eb04f87377c2ed1f36718548dcd779ae9eb8433560b246852ca4561625f5b251b1df35be913df44f06cfecf7323958074f1eb17f7905fcd80abc1f63054ba1434b8454e7240ae22bd2eb2f94ad43111ccd1e4a927be2c401b015e4c714e0056a69af46278ad2d07d86497fc282f38fddd53b343af77f221e2bb6dc660d82b8b3ec70cf0d4f465a96e1bc01553d96a727f5e0b70713858f708c2be46a39ca69f0224e93fb3d36e599453e3cc8205528fec829600637c27b4c7093ec8600815bcf88bffbf15080bc98a4d89ff43e0a6f9cecdd7c0745f45210fad4a5f1073bb78fd28784e7e398a646408ffcbe410c7c3aa32abe694422aa038737ab8e1960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0b36b32b2bb9d50b8f0c6909bce13c2432443a2da1eb82549ebd5b8bb9a8075dba003643bd8f21235959e80fcd5abff1748b7eda7caedb89b1ea3fc99f347ebee2702f9023b01820297849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b2f338c5d28e1561b5bfb18a55148735cc872d0659dbae842279876a05cccc647e6529ea5b0f07bb7369308d36d1525498b2c2bf2d0564ddce4c4080591389f5654d46cc8b368dcc4f62d1f9bc2f3d0e03e99c1f46974016e4495000b7b1d7d4a1c57f0d9a8a8c22575d9fabf4d02c345b6bd68689b09c0197c9be4aa8f1a239a10369baffb808ab0ec10de3cb51ade4c7287c1fbcc49a59aa8bf99ac5332ccab699e64a1e835523b3c4155d0fd43d55d2385dd269e9b1e1bde75322dade668d366c3d0ec215d48641dc722bbc1415fc0d471cb99d87e39fedfb1e15ef24a4b92e09090a4240cef86f6c4a7c1c98ca76ce47f9feec130fb3dae8c9442a3806c505160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a050bf305663e7d2add233ceaadc88a5b8d435ac52181a1605dfe77d36065f64c4a022afb2213deb9e8cf5e610529da6fed4a7816f494cf6fa22fb04aab406d6c02b02f902790119849502f9008505584dc0fc830661bf94f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b8778cad1e25d0000b90204666c00aa0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000bbfe0abbb6503ab9e7400b0e1d9de0cbc33573d8a75ed072cf8ef9e0fa457f6782e0ee19cdfd5d4aa2c43ae69617c5140d67c8a436e386a772665b3c8c645b8f9d6f3feccd53c36fdbad0f38877a3259633fcc7285f953ac34435d1891ad0ec0b7e8d1df7f43df2f18a71a9423fc8aae1d97ed63074e05600d58035407c78f776b783ed18591168ee2066e2263ed0aa999d8866ae3bb471d016b1dc974ea79021026c00d73f71e6c9fc7f99bef90d8e88819f37570ab36cc1b459abb88b7eabdd0e6c4248e722c42676cad5ee5a7c6bbcb02e787ea071ed40a7fa79765f9586ad39a965869d76c3a71d2c74451314f38fe0dfbaf382298368bccd25fec916d3c2a9926dfba95a258d32f0f4201888a0c5a15dc50be540b6d7a523548b99777c040b3111eaf415427ac74f8b8b68fc93df5caeb67ec32d8e4105883e2869c067a52c305353be4f2b4e862d9e60a89fb2a46014d041bd09a0460049099bbedbc290c001a0149bc403ea4d7f45a9fcbdec9de8af93729a51ae6916674e0e1b1cd67dc2b79ea076573c5932f1fc7c38a87397e37d1aea8521a8a92114c01707c3fb2079302dd802f9023a0181a5849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bbc449c7f482ef82b60c6199326bc233264f7f61dc3ddfe7551644443cc120f06d69c380e8d324476f552f9186aa830fdd9b68115969aaf77ae986bc1788bed470fcaf63cb0ebb1f9309c24c6272b532256e7aab111beadecfc1be208875540b483457c9fdd59bf72e5a8498937baa8ddf6b5ee96f52a4bf5501a41bf841b2bf18dd36fcbc5833f2116232785f8359d2334dab78eaaf7968a293cd28690e1a457713086cb817451ad27927dcb997da99d54ccef967bb535cbdc21c283e71b2810b3d774843f2f704d4b9b833718e9a22df645e4ff2dae134c84af306237fba47b5d2432a83ea939195e27b727120240cc6fa83ed6cf36a74616a2e8b2e72d5f4736eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a06a55fffd626565d28c2b4d87b6e4fc20d67a18503c3eadb024688ff67d9c0f77a043e28d618ba114eeb3d17cb74818329d7c685513b66ad8cfc9e72c95d9c8342d02f9023b01820156849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b349a4cb6c7b7d13a09cb085ad411debc49e232955ce48341b5bf53880d7b284ee4369aedef96cb9e4bcd9ce8c25fa6ac0defdef4768bc27f10219326463f0bf208efedfce135b509d1544040fbd8ad348cfa31a309529c40ad4a63a36d89b5e0de660bbf169d37cf7a3bdd83d271cd20b2c421392a7637935935b099529f9bd532c3f251efba7755180cc77acc723082ba28ae0b8b8ebce06803c6af580fcf3870f42c97662bb0ff7f0ecd7778cc5f0e1d56b8231ca6676e0d6e65f3c9525fbf66c3d0ec215d48641dc722bbc1415fc0d471cb99d87e39fedfb1e15ef24a4b92e09090a4240cef86f6c4a7c1c98ca76ce47f9feec130fb3dae8c9442a3806c505160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0ccba7d31b17978f14c2db12715df3f8683887e7bea899ad91a27bf340c63aa11a03d1160421ef0ea7fcb04a792fd37855ce9ba54f0304e6b73fa8999bfd5ad85d002f90239010e849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b906d2171853279c054680adb0b51f6eb7acbc322b770468f9893d89258e8d95ba817ae6b33c2ebff43503a6454463819e1cb790458fe2e2e1a3dc279b9c0a8ad763586ceb8a84db50636ac9e3378d9878a11b8cff4cecb6b9f77db2ca06065ec1b7b68d483e594b2b049a510108e8384fca4a98b0c9a65d353db714f1b624b53d2608d423a97ead4a46eb0ad82db90b76ece811cd6ef62d5395c941d88881a33368fce61468f4a0753b448f8ce82ddcdf2fd716375a1fb922d174d111abd5fc40f91a6423dba6a49f6944b28d8c41eb2a0acc874bd9cf07636081f448771b2f66ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a045bbd761da9478a5d76c6b39c22a17b9eae74c4d1570b3ebe2bc2ebb8164c4d4a06d58bbe87a425167aef39bcb91e2082ea8566bb172a198da777fd126712e0d1702f9023b01820106849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000ba6c70f43acc6c77c6d1c8c345547474284cd18961c4a95598ffa397576a4ea527610130a406b6a70b336fe19c1a4ce2cd310b55fe5ff19a78fbe1d2b704307cf3cbb7597f785b6f0c91497ef907416efcaf5b490aa7f12240f77717abadbd7207f8689d4bc597cc9887d741dc8e640862294a68415d5a2641dbf00759a40bb1611d6828e7a5c6f653304a5dee69fdd27a099b0c728730fada01b02c727025310eae5aca00e946d9dca1871117bf328bd20f5f7bc1f5a7e9e3e497e42d3ace8080f91a6423dba6a49f6944b28d8c41eb2a0acc874bd9cf07636081f448771b2f66ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a08fa45af8d2f0e9dddeabfe8d81ffced0380cbe15cdd3e7ba8ddd59927d95b3f7a03883786120c155db5094be77e541b2fc8f0da6085bb3450b31adb0646c4d473202f9027a014e849502f9008505584dc0fc830d2c5194f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b8801e32b4789740000b90204666c00aa0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000b02a6cb215e33e1ace2bf5fefb357cafec198261c800d0de79b03f3a9016fe0f71e8167db33748d73426a91f80336d1543be592d7ce5686ed10aea58c47e6ae7850f2a721c2fbd607e79faeca33c529e6b4f03a4afb8c9b03c5a94cd5e80bd0d6fc29ff3bc9ae3fba105e1a9d95f944928acadd6f9e512c4257e5a5107540b0104fb71290dcb90e62e0ff51648a36d91bab3e797e7b2309c7138af733769be539026c00d73f71e6c9fc7f99bef90d8e88819f37570ab36cc1b459abb88b7eabdd0e6c4248e722c42676cad5ee5a7c6bbcb02e787ea071ed40a7fa79765f9586ad39a965869d76c3a71d2c74451314f38fe0dfbaf382298368bccd25fec916d3c2a9926dfba95a258d32f0f4201888a0c5a15dc50be540b6d7a523548b99777c040b3111eaf415427ac74f8b8b68fc93df5caeb67ec32d8e4105883e2869c067a52c305353be4f2b4e862d9e60a89fb2a46014d041bd09a0460049099bbedbc290c001a0f0974eb37bea7c578f65e0ace146885edb83a78f7f5ae95e72a8194085a84ba0a06ebb98f53dd851411042fbaa7e3890c408eb02646f875b8e344a088f182028c202f9023b0182028f849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b192be21af03104f15f698f408b92e792e97ac423bbc02e155eabb9e57f7ef5e777e86856ddec6369e6ac57dd3c7f9a3036a68642a2806f383ce9e773e79bdfe72effb8e3ad39dd45b385949aaf317896b146f8b2611470623acf370ef5634258ff0586c654c72f961025bfaa62fef509077af2d646ccb51f70f8d002a6f80bc74f248a910747a8e9eb820a3e41030a74a5c855ecf46f86dc3c7a59e806e65db57195d111bb22b6eae6d7433f44fd3ea2e2c4e9695bf742d66309e25cb78c1de5c71f093cbc8098834ba48fe552a93d701ec4d36b6a097a496858db8cdcd208379ac0c400cb6837182b745f26fac1e87ff6eded4c92365a9807bb2150ecbc7121960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a082d10bf9ad048e149f86b40d651716dc575c6e432dd33d65b0827288ade32b12a02008c069232d0f367e5d4f3fa64c5191029cc0a169107fef70331b552225283902f9027a0137849502f9008504dc3b2a048309c71194f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b88012dfb0cb5e88000b90204666c00aa0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000b77da39b06700604f523119bd39f568578f77ccc72088e81bc722cae56ce8cd74c5f21cd8516cae6f90935772532a1a0e8ff6e33be48caafd42d1099c75d4a1cc01f61c56d5e9b2b79da5bf140fc55961d3461d111d360b7cecc4ccd0f9e3049f483339e45ab04599c7014877befcb5ee8e104dbed25ade1f8f3fb5d4f30d8845a2a5aad360a307d5930a4ee4c6439586eb0878524380d9f92982536e1531b5059a79ca6f5b4a75edcc3eb4f5e0cd734ae0b06dca6f0e4b95ee9076cb4028b634807945d7130a37d08e2f6c812321d3c7b9b9196c153b32a4653efd3e98b99a8cc49b7ac174619d63a74f86cc044e6d9556ec1756f43a328339e0259ca88389d559fdebc1c39ad3128408e4db0d31128419114442f0a4b7310299e7265e628d02580156337097fc727277780dfea1f31e0683a283836f3d0b2f2f7411b3140ac82c305353be4f2b4e862d9e60a89fb2a46014d041bd09a0460049099bbedbc290c080a084802fa9028a9ffda1c04ef8ea5785c9a555f5e98ed9e066595dffeac84e2f11a04c1d0aeb6ff3363cea419045fd34954e5ec9a3333f9dc7bccf61c76ccf99d6b302f9023b01820120849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b3bc11602583ea1e8bce07d80cef2263c106604925949cfff4d34905370b679bcc0ee7e9ca63710a4e4f7c23dd3a253bdb7a947f63f3dca5c2bcfe2e92c76a2ef84ab0613cb4f41e87211826ec6c92e1b737a990b879c6647c1035cb76ea886cbdd0ab522f6aedfcbc363c0e3286f3994b4b479a42c313e1c18da47923608bd8a06ef85f770c00923b222c953cd33151eb48190d06044fd1d00edbb3a835da2699262acbd5671bdafcc4346e225a5eb7e7f6ee95e8f415b78be64ea5425a447a46cb6b274fbc933d5161d201b57d1968b3e548233d94e0918b29e3246db24af10874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a0bc321e25f8cb3642f452c856e381d2c812e52e8eaafa363ab076205c254a3467a015d3a1e7d44b818fe33fc73c2c683ff3ffd3fc24dbdc77863da5a304973832bf02f9021b01820242849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000ac59f4f2995f6d588b90f13e540db16b42e3b076cd9adf48fdb8db2032b3cd072d09efe4697ab684e6f23c9b25256c888b6361ae132a870919c84b8f8159fd37658455ca6cc651f761854d33956ca947cfd79685afac2be6ae32a4ff488575c8dda2e154e736467bb805327a0d140d616f3ebb696172c0154db5c3258de5d90e5785172ed95a0482469ac7a2e8f9ca7401e3e0c0a071191f2fc3014def9f2d0a84e8d984320f8a4ac96d98628cad8909074f97538c2fbaed8c12d0a045e7424af04d2d3a1c94affb7c27d59d54a29768078ad34fe118e46fde6b86664dc8fe3a84c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a075138cfc203596c94dd977ed1908c4f3212850541c8e9a2aae75424705a16e50a062cc6a08137356a9b2fa5e395121ef0311992d3b00e02185b9f5728003806a9702f902390108849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b1e0effbeaaf8ba98a40ac381d452166279b828ff7b90308ac4fa2ad457997ab82f505b7ddedbf41636e46b5d284b4c2d74e8144df42f051e57ecdd4d7f76c5fa43d9140cb52d57bbdca10e423ec665012fd616959311e3f04e7db9acff9a9677a339276aa6533a11ba146862d6531fc5eabf801db09512a75f91a040303b5ec89ec43392ff7d810d670be3bc309602b40ba4f105c85f591fc1e59193a6595ea88f4dac38243fcd265b82a58d8be3b0802d037e437f0b91942e1337467b6d2f2f11686f88b4b1efb63f3d02102a0bd8872ab8be325fef22fb6b35d9cf8ea8d81a6ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a060184085bdaa3a53f80bfb450e4db8440febe9e4db096d09364506a2e0ac332ea01f0340a8e64668e837b9773ce1fc587aa9e3eb51108575c63d07cd7797c4851602f902190151849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000a8f95379893e1e544874c9af7352ab8d23a4a613d1328f0952f87aa00685e08385995dc38d81db6576107b1165a2c9ec54e45da5b959ffcfa0345c288bb826605e8e63c8a5b994d2a2f50bace168e2305472e12c8d87fbac40a913f00a235b4825d88d7fd2b74ccea50449ad3d5dd2baa127ad9c2a856eb50e33387d689fc2d4978f72220d7b7561b7edac075fc4b95c418f9cbba0c5cbfb7332333436cd3a6866c6b4d327b382242e1b3d577ebb1f93bd3d4012fd971b348f0128322b993929904d2d3a1c94affb7c27d59d54a29768078ad34fe118e46fde6b86664dc8fe3a84c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c080a0404336da3e63daba9642c41a2d26d955067746a9778520da4487359aa3b799e4a05a57c7292c534b7b8a222bc7052fd96a686ea79ca419b792c6a412aa853bfe0602f9021a018197849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000ab21226e3c37482fe1f91559edff3baae40232e8bc83e135fbcbd176b3f009711139dea2b6989b59d211ba65a8211521e49cc87c2eaffa6910bc5e904291c43050ec2ecaa5b4decfc7e9a38ebec4a393a8f65201cec3eae362fba0443cc6ef3b892c41e6f679c59d57c7581eef926e696483ec17096750a2c3afa8ab7710596637d5e5d9de9a0bc0b6593d1b691e70105216de2cbc5e8dae6e4bc358c4c0c6951cf2c918cfd27b5174f472afde4336532960910721cd7680ab2a8634341ec8cd62e80a0dde9daae04d4d607589027b4a67ac9ce47b5db4026e967ccd38695dcec4c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c080a03bc093607adf861fedf9b1aca5a5fc7e9861d417fe4d78f59b949c3fb8d0db26a01d67d8ffd86d8815cab990f5815ed8a602e7f95ce434686b0ac98d989cc7bd6302f9023a0181f0849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bd2b7fb55765a501a4710680a4795eb8a01c24982b604bdab082ff5c02ea70897b7f73103a1adbb8f5bab0cc68cba212dd9813caf38481af11cb574b92a9ebe0898ace2a74d0456126d1ad502c2de95af6b8eab564e6559fe8e9f3d1de106d6297cfd35718e2c586ff9a3fee7bead78c979e84815e678e9f0833f8782898c03fc228b2bb2d8a4db141f93f7373bfeb3ada6c15c980958d2bf5fab8a12e721de77041322757cc78e51497468c963c098e84ae8e960e15f8345672c6d16f97f9cad7d510e7bd2727bec626207f433b8ee3cb02172d7e3d463087da63edb1aa1e51173bb78fd28784e7e398a646408ffcbe410c7c3aa32abe694422aa038737ab8e1960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0150a03b0516b4d0ad0dec2741983111bb5fd62844695a486317e992f3af29754a06f849d344f59948770db4eada4792f4bad0446033d2a0426d88a0b1c0f10af1002f9027a010c849502f9008504dc3b2a04830c1b1e94f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b8801a6c5de98458000b90204666c00aa0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000bdd093885259c841906b374ab78dc35c8df0f79421494be35cb4f8bcc7166f4c03eccf109238d61e8c7a56af992a4fec84a8cad548e0cb781e6725635d6ae72c46515dbb867f6bd1be0f59b01c41c07885ea470503a55947c6fd33acd464df0f7efcd1056603d54d8d6c2c5b6aa2cdabb33a647aa45f6124f591e9229c6bcf6fcf259d00079dcafd62312e89d4a187eef89e7a1524f6dd0a0f56cc4389285cec3f97f469d19b96fe2d0a5a376f0c301d6ad73e09f0cb231b8e651a000542f655a99b39153d3a57af32bc15af4a9877513615fd178d4fd13c7f10b37e9e7f19cad2fe2469d715f1ebb609042d032d33b50954dacf341820702a87e360fb2f411c3b82cc07d0dfe58215e406ad5d7f66358ed9f78cb30a738d01574d8a1eb14f63c0b3111eaf415427ac74f8b8b68fc93df5caeb67ec32d8e4105883e2869c067a52c305353be4f2b4e862d9e60a89fb2a46014d041bd09a0460049099bbedbc290c080a0b6829e905230aca49215aa176ff1b92cc6d7ba84f1800fa829ed26cc3b81e905a034e9ab04fc705c9d1526e50db43c25f21fcee00bdb44c1c76e39952ffa9e3aad02f90239012e849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b9d9738ac6120e944eac178a41fab5b252b344476c6fcca672ec855d87f1f42395ada051bd4e2d99dc71b21c0a2665805d75e2ad9204c59542b65ab8a1ca71aa0a131e56abc67a32704e81a98b22d65a6276d2f9a8a03e325a0023c567bdd37cfc4c24f71a01124f80059778e0bd53eca71672f38ad5fb96a5b7c204d7497eff44f248a910747a8e9eb820a3e41030a74a5c855ecf46f86dc3c7a59e806e65db57195d111bb22b6eae6d7433f44fd3ea2e2c4e9695bf742d66309e25cb78c1de5c71f093cbc8098834ba48fe552a93d701ec4d36b6a097a496858db8cdcd208379ac0c400cb6837182b745f26fac1e87ff6eded4c92365a9807bb2150ecbc7121960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a071f8a808f043b3656740549cdab840623112a13661dedeadca10bd4eb8b8491ca015a2ab03256714732ed844a83febc459a5a2311cabb8488fc606bbddc25c98e202f902390110849502f9008505584dc0fc830536d494f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b873c6568f12e8000b901c4666c00aa00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000009ed7bc36201af9831e4109b2e6ce7e1493ce77aff577696c33d8ffff15f9dcf8842028f4240045105605fbb13aa15f5f947efcde8bdea0344e203e3398428b88e12db2312bad4e1522b05ee87376c7d0423514c01d596d9fc711bbda3820948b86095f09fac1750a1299afd7756276b3c80164724261e191a286646bb11e788c487ed5aea3957d1025ab4bc6e0ad39d08c682797bd722f06c23d4bb7b7c572243ae1a9cca5c30e523b9d83f240dd6a0998bdda129161fd1ad97116058c11333295ee729d1fbc588c4c7c999c4766ad87c8480fe07dc39896712ab7d96a8917c1ff5059536d5011814734520e7d1edbeee0f311d266f22cc441348ccee2ab53f080442ba98e5c54c5b145def40caf6cd211fc48522e8496808d921817efe59c2ddc001a0c4a3012666544725c75a2f7448c808c786c488d7ec9ee571422dbf08eab37253a0503cb918fb568a42ddf25f07566d7d82f1aa45c587e16752d92819149152324c02f9023b01820191849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bb934d3848746045f8eb77f7f2fa5a85a26fe5ee3b85c3f390dd6020bf9d880d2a9eeaf286a1b3c42854a642ed1b42d38f6d5c47d5e3ca5d9d1a83eef4edc5d01562f0398d9622d9871ca546c9bc04fd3e0284fbf1d3e94e23b25d33908a3763b22c21be9e8ec837d6b51c0241688250fdcb57bdd0a6c733f0d3e562e841affcfb2f94beea3333b68d58665b81219090d402da83803917a75e14745a56542d87e041322757cc78e51497468c963c098e84ae8e960e15f8345672c6d16f97f9cad7d510e7bd2727bec626207f433b8ee3cb02172d7e3d463087da63edb1aa1e51173bb78fd28784e7e398a646408ffcbe410c7c3aa32abe694422aa038737ab8e1960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a0fe4d1ac3270a64e8209aed0225c974b3f43cd355f5450b347e9ce74ff0d1c80fa04af09b96678fa3bde5ea22eff7976847b07d35b16ec7a9742c827ef03901328002f902390181c7849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b5319209db727a8495daf464b1b815b6fc4123d4bc203777eb7f69000eca2ff6ce9b9dec06f69dfbd8e4d0fb4f2de4337ac0959520b4fc922594291cc6bd4d8f85488786bf24a5c5d65828f0e06053bbc0996b736deb973aeeb23128bb88a104cf2ec770485d1e71d2dc41e25ac2456b254fde26eee6dcf840088fcee5fe4436e10369baffb808ab0ec10de3cb51ade4c7287c1fbcc49a59aa8bf99ac5332ccab699e64a1e835523b3c4155d0fd43d55d2385dd269e9b1e1bde75322dade668d366c3d0ec215d48641dc722bbc1415fc0d471cb99d87e39fedfb1e15ef24a4b92e09090a4240cef86f6c4a7c1c98ca76ce47f9feec130fb3dae8c9442a3806c505160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc0809fc9dbc9ebb3452f1532532188a80b0da1466fce6e55faef3fc7fc3e6859648ca0393acc8e46da3886c3d8ab6e3d93d98e107ab4ec8bf26b5c43c9f3ad0ed3bcd702f902390140849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bf68b9be164945a666fe9793d30608f8ee3b7682d0f682e315e79bb9bd3df1713b6fa8af49a6e6a212f713126b5d571d8bb1329725f4ad32b9cfe44c591c68edfa4ec0a46ff472ddbb9141ac68bf1c628d9e46b79e1c4d6127710ab0b4aae94f2ed0b910363c1cb92eb8bef7ab5a06b234ad901e11395512d06a6f436276a2fbbd52ddb6ab5cb993ec2fa6565b6acb4a3b2be53c89889879b842a760e955428b1713086cb817451ad27927dcb997da99d54ccef967bb535cbdc21c283e71b2810b3d774843f2f704d4b9b833718e9a22df645e4ff2dae134c84af306237fba47b5d2432a83ea939195e27b727120240cc6fa83ed6cf36a74616a2e8b2e72d5f4736eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0cda9c23e9d7f3de188c87c56244fb9c6a6eff4fd080eeca282136b978aff0e22a0232b40efe9022f87e91c1ebbbcdf55ab154f2d34370c3ff488efe7292290a2b002f901fa0181e7849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b90184d2cab056000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000009351c5292de890b0facc5fa47f9938926f413801f89aa99b6f5f59a74efe64a0b59706ce824bdc1e4eac690978c3d7d3e1c3e634f0619768fbc5c74c3fb9ef9efae950b037ec9cc2893a196a0c810ad707bd7c9c8e864d9c2106f2df0fd83998299f9964d85040a17ce86723df848a5d64c69a5125c129da9f5b6a2660d8ffb24c2ddb7742c518e54f59cb8da916bfb3a1dc82106a1494f7342262c6011ef8bc7b4e8f769bd74b61d12bab3416ebc55d41ddd709a109e5844c90d0b781d913615f86865b103c0cf0b5920cf393a4fac1135bb0bf631f329af77a883162d3c97dd4a82199b3530ac248b1c97de1e8cd1c656efab7035d8bca80a6e135c4bcb077797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c080a07a0cd0ee4cbb596d86e0afab290833e5a2fba1cb87602ee19403b1dfe5e1571ea04f7be63df987dbd8ff75ab5904428546b1f435e5d74d3bad73f0fca8c5aec26e02f9023b01820189849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bfbf0ad04c6fccb6a29b0738584ed3e6ea1663a7bbe8aa00c508a63f8f279d49a727b3eada6738b17c9859fb57e105bda160625bd5cc7b2cb16f9190fd90a0d61b8c2d912ca68c1667a783dea6a8b6f663a83e7de3a5141c90624c3d78bb674c23c5048ea9e29bc7acdd607d3ad8b52911abcbc85db3d3fd35bf96610f634fea8c082e8d3b33f23ead586b0c03120973438415a1548113ccdba2873096f347582a27e7fe6f5688081767d606e6a17c36d1a8271eab6b7fc2ef47c2306091e40a611686f88b4b1efb63f3d02102a0bd8872ab8be325fef22fb6b35d9cf8ea8d81a6ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a07ac8fba7fa155cede49fd2363e1079770cd98f140376c293bcdec38a2409eeeda034aeed13a0268a6772d056d7f2b66ffea08a4d1aea9478d39090f0ee465a22cd02f902390122849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bccb917dab0143e188980116c7a05385e7ccf1eff9950d1b6776155473e896d8010627d473a34792965a02c9e7c4c5712bd1d0a44045ded264f8bfe09d661ed6bb68fa861bb27e3c3399c8619ee1a6f852d98931285684283e2aeefe7797b2eaf0e3f9ff8fe75329deb87f104175beb3ed4b66bc012c401d52bb97b16679f9872b5519ca5ce8fdb54347e86127447cd1be7aed6e72f53e763ea703a78c9fa1df6ad846c4b85e9b0b88abfec7daf9b81d6d43d2fddd1fb323e1d8ab86d248a15a70a26c2c2ea9612e28d0aa9296f80336adcd076167b3fc1705448942052a9852eac5912b920cfe151d881ecdfbd81e708a6fc74fa4215f00e3511a5ac0f229adafc56676746ed65e7e0cb2e85384b58a35f638c6fefa696d8c6ee519b7fcaaa602c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a01c4900962deec1aec84926817e8f8493ab6164de562bfb576584b538e66c3a31a0604dc7da2bb010db10bd1175889c413aa02774c3c0cf463993f92699d94c9cdb02f902390156849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bfc90e0f154815f8a29b179473b6d316b83162c600e0bcf697c8ae0b007705a204be1af990f7da72d6d4c11d2120eaae40397087c5b38e9dadedd8eb81c2727b69012c5df556034a8971588cb55eed2a1ee7a2f7e37e6692714f6d1e69b393893666dce4fcf2dce3d119683aadeb1ea17923ba5b074391c1491f124cadfe4b30ca443a479748450adaee230a98fae2a7f7e83ae41978a4a06a7e255ba23e5c90663e97180e1ab3461725b593d992b80fe8cdbc1939562c814a2ca5d174874eb00286b4cf4b050097d14205264d5de8efa790b96fbc3b6ba991b67d2becd1cb8d3874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0948fd52b3bb4082ed696bb68b36743496939723fdfca99da14fa78a37eb5c1c3a02f2dc0198ccaff741861c4ed494de5137f37f1c0dccf80c9dc0086bfb971014a02f902190118849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000a8f8c55c60f1465e17cf0dac288a4ddee932c230befba393a6674d981c7fd809f920f8327edcf720fcd5044e0d802f6d5b0195deb3c23fe4c56eb007c601c074a61feec1dfa208108f700f4ceea1a55701ad87f2f8acb3a2d3d79143df7b1539bff5788af8ab01098f2cc2e8b1892fa4d883cfb0933cdaf370293de3706cda345ba63c0fdcdf7e22d3e03dd751a495249339c2dababf333a1bc91f55cf6fe5c84f2b47b4054697932573645ed95fecb78e83ad418cbe402f3f956712ce9a9e4f7129106175d04ef2a2bc0df351e0cb830acfd4971f7d420775d642efed0aca938ffaed7b71e17931c50b3e99ec666f5a3bc118c101dc25a2118045b917db2f8ef46007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a0a5982f188c00f6fe238ca124447787cec83059eb5dc457d67034b85a0680cefca05306e8cef3b1e361b586c12d5a5d9b7b0d437f47a1c16419c4f9f99e1c37429402f901fb01820150849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b90184d2cab056000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000009d81502470ff9c963887cb3ccea7b5acfad742c83e857c7e311063e7da391d9bebdab5e17a0b1bf73cdd6810a02572433be3297e1cca118d50b0578edcf8750864d074d7c288e87c7c83ac94218e37c893dfb593f959781d38ab7da5c144a1b24379c68415c9beac8697de0b6e09ecdeb979ed4dd950db7cea20f0ec251cc3c7fed220f04e94ab022c296ae9584d24eb79a4ba3b814090b8de476fac2b8526ce49e0e6d9f8276753500114d8a5af3b36b875e5bc835dfa3671f71155bf7574694f6be2e818d23222100f5a2543c3e0133f0dcf5dc686d4918328b86adeb6d38594a82199b3530ac248b1c97de1e8cd1c656efab7035d8bca80a6e135c4bcb077797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a051ddf4512ca6ac4eefeeb4371fc6e22ecaaae0568bf5299d46234b271f5cf6e9a01e76ee6ff7cbf9955d26ec76f3c11f13c40db49f3b8db6f7888446abdb4e643502f870011c849502f9008504dc3b2a048305201494f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b8084118aa48bc080a027402e479c09139aa643cf629030dca513341df7f8aed926a4671a94305be9e3a01d0efe795b4f363453a17010972b1dc989af4595253f47764312ec92306ae65402f9023a0181c2849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b5ea3874fe4f38f62b587b6a387ba3ac7e5a8459c1eef220a048b114fe536a3fb7ecd4f21d4d14a5495dadb3013699abc4b23cc362804c9b0768006aca4fbabd2c9987d969ed01cd8dd7208d69278752fae3eba6623cf0c331b9bdea857ade7ffc60dee45982f80139634eb48ef29ed510cef4194d126df3067e2e592b00ef4b381187fe6585a6e1ed377fa861ed2d0615b0fbe526edcdc9bed9e83a78efbf9664f3c100a34a9d04f39f150dc0c7a2479e84bc1da4d18e2bf86793f4743a11ed3990b76c3caa38cc2e3686d9b8a1e17a60d7bcb6b1cdbcbe95aacb3a51c6ee097ac5912b920cfe151d881ecdfbd81e708a6fc74fa4215f00e3511a5ac0f229adafc56676746ed65e7e0cb2e85384b58a35f638c6fefa696d8c6ee519b7fcaaa602c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0a2d046417875b58e1e8e4018f746e2da1ccaa98ce6ff56a487f0ab617efd444fa04133041d67cd4e556610d0bf0ad31ec857084fdb332d942275edca83bc0b6d8c02f9023b018201d0849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b736dbcfa20cdb2287a9808ec7657b2829ba11837922d92cc4d9cf449ad132d52257a4b7defb1f68025d64911aba996240913d51dd59424fff1a9b5ff940fccaf8668f21a2b234d57bbba47e5900dff795a5171ad8b749533061769f9c69115d47eebf675830bd5eef572fdcc127dea194ac442b769290c529437c64309b792d220995c00279da55b3a30d1c36e178eabd25d8d11a07f7712acf5ff67bf8286d56efbea7d2d2578bae5493b49fb41af08c91de3e9cf196bf50d3b51a49e36aefe0815bcf88bffbf15080bc98a4d89ff43e0a6f9cecdd7c0745f45210fad4a5f1073bb78fd28784e7e398a646408ffcbe410c7c3aa32abe694422aa038737ab8e1960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0494d8fdb50d1d6db5c0106668bd8b3c679803c8022b8b5397fbfb535d88751e2a04f72f64424dba8d610462aebef88df50f53be5f0a1ba2b53ab7e71317807abd302f90239014e849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bf6c4ae558ea247e2d39c65d9504d2be857558c9e65a60d85e0669d3eabfb7fb6d6a068bc71fd59a0cc303c5361a41df57cd13288543fdcef1a77afeb8efb3b8e43d9140cb52d57bbdca10e423ec665012fd616959311e3f04e7db9acff9a9677a339276aa6533a11ba146862d6531fc5eabf801db09512a75f91a040303b5ec89ec43392ff7d810d670be3bc309602b40ba4f105c85f591fc1e59193a6595ea88f4dac38243fcd265b82a58d8be3b0802d037e437f0b91942e1337467b6d2f2f11686f88b4b1efb63f3d02102a0bd8872ab8be325fef22fb6b35d9cf8ea8d81a6ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a008fdd77e6a8de0efa8572e06c22b4d752bdfa1d570ceddecd2e3b04f75740402a04d4b87f2fbd7d9d82a9e6643f312bb442b021b242f4196414104c1221c3a862302f9027a0127849502f9008505584dc0fc831ad24c94f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b8804b7ec32d7a20000b90204666c00aa0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000b524e6ac95d0f43dc896b4d4f29f6ff526a499658c0cf540957ae214378a346de08f74729278eb26028e28268641bbbe48d5a9f972ce14b57ceccd9853dfaddf8e75340e2d70f4554b36e06e8005e03e95923b326b6637618c83855fecb80f93b3e765426b5d9fec869098c107879201cfcf9b46b720c5e85a5900ca1cf34a08fd2b863cd9579699eb73ffe847b0b0ee2413ad8c50cf5aeb80ba2bc8d6ac6de5824f2c26fb0812e410cbe97f2a38f9ccdf5ab072620536597f08b6359d4eb25200e6c4248e722c42676cad5ee5a7c6bbcb02e787ea071ed40a7fa79765f9586ad39a965869d76c3a71d2c74451314f38fe0dfbaf382298368bccd25fec916d3c2a9926dfba95a258d32f0f4201888a0c5a15dc50be540b6d7a523548b99777c040b3111eaf415427ac74f8b8b68fc93df5caeb67ec32d8e4105883e2869c067a52c305353be4f2b4e862d9e60a89fb2a46014d041bd09a0460049099bbedbc290c001a0862aa3a25af69faa7f8aaf4fca6b5649d326616ffa20063e63d5db19d7c31826a06d1354c38f2ae3ad87dd2ce1299ddd7f5b0dac27d9e1b97e2fdb71b7a9071ad302f9023a0181bd849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b6450ed43c962b62f21882083dc083ea34129b53516895cc878e9abb8f4c7f995726803b18ca30fae8c6d31f025acbc2ac026907d60b7bbd183337fe4c58a2a0309671924bd7d9303eb8d90875a726aa2335f1a0f5100e828c9fbd284ee480fe39ba171c6b404809e29f17bbded432e8fa50ed36a8081333b1061d9de19be10aeb2f94beea3333b68d58665b81219090d402da83803917a75e14745a56542d87e041322757cc78e51497468c963c098e84ae8e960e15f8345672c6d16f97f9cad7d510e7bd2727bec626207f433b8ee3cb02172d7e3d463087da63edb1aa1e51173bb78fd28784e7e398a646408ffcbe410c7c3aa32abe694422aa038737ab8e1960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a03b614bb2ab9d417dda8626399e8383e0d2ddcaf1c1e1508d4c8bbb6f90a35659a02977592eb5d9e77e00a80e74cf0947c71489d1fe0808aec5e9f4be49bfd2c57902f902390129849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b509c479da1cfdf235cae03cb0b0d3f65eb4f0b81e9d5f9bf176e89ddfe0205fb71afccf5859da1ec66ed180067d01b1bff1b37177ca54c76134ef14358c3b2b20e7d13802b59b72ff9369a97c88269d8dce4974e7410ee6abc7b6ed84f3d711c84325a1d81ae2f3fd2bdf0229e3a0dd50c1d144af422ec4a8d45fff39dd25e503bb3c4fe085120402b6902790224fd459e47fa185d821e298e45366306a5b3bc4a357aa0e28aed6f9b0ead588f38d4a27d852dbb4b90ccbf35d7d49dc3bfed3c7d510e7bd2727bec626207f433b8ee3cb02172d7e3d463087da63edb1aa1e51173bb78fd28784e7e398a646408ffcbe410c7c3aa32abe694422aa038737ab8e1960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a0a411715b0310d63c2e2015955c50beddd02bb63a71bd95cb381bddcb42103eeaa03822d93e3a67994ce143e1acdffa1692610f2c497c8b5fd891d93e8f796e5a9002f902790108849502f9008505584dc0fc8307942c94f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b87b5303ad38b8000b90204666c00aa0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000b3fc4ec8ed26a4da806635e13034c02e58eded96abb996b9c378bbd12dfe85885b899b30633e5a59c79eaec9be41238fd5938fedb5b26643c5a08504f3142d6511f65c1cda86ab20ac16bff32bfc16a23bd568ded29eea98811c181003a7183b1ebf64a9cc708236f9ffbb791461ca7d77ffa935b24505182f93c7e67f8713d70f6e010e4b636435cfd1e5b081a8db33357d2961259e7cca951e9cdaf9f907c98ebfce73347b9c0d0ed797e0607094760566e323866eed59825e458446db87d96807945d7130a37d08e2f6c812321d3c7b9b9196c153b32a4653efd3e98b99a8cc49b7ac174619d63a74f86cc044e6d9556ec1756f43a328339e0259ca88389d559fdebc1c39ad3128408e4db0d31128419114442f0a4b7310299e7265e628d02580156337097fc727277780dfea1f31e0683a283836f3d0b2f2f7411b3140ac82c305353be4f2b4e862d9e60a89fb2a46014d041bd09a0460049099bbedbc290c001a0acda7974f1255551b94a6edbb072026a139576fb988acb7f76c1ff62b4229933a02c244fc0bdc72164dfe43293d23e3908689dd3bf54af36936282a44fe6177f4302f901f9015a849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b90184d2cab056000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000009bab7ed09844354aaf2b5ecefc5b3fed83b28aabdab55b1dad2b333719b0753f4bbb4fc83a32450c0927d7845a17aee2da2b07a8d2fe5a8695e4615ddcf64202dfd536448de02a1fee53b81a5318442e9f21ed98a1618941e4ae9a5136a70d0e33df3d1bbaeba880bb2bc076be02a1f7fa95773e25bad6614a5e98eab106fe498d33d04bcfa9df33b6b428b996bb338b17cf32cca2a3da2fc4b93b0db7860ddffb4e8f769bd74b61d12bab3416ebc55d41ddd709a109e5844c90d0b781d913615f86865b103c0cf0b5920cf393a4fac1135bb0bf631f329af77a883162d3c97dd4a82199b3530ac248b1c97de1e8cd1c656efab7035d8bca80a6e135c4bcb077797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c080a0eb134968f04e38acda7583516443b4b3ce49d5fc59461ffda78ce403c4bbfbf7a0438230355f83ca37c2bb20dd210a7c879f48b2d934110c88bb46c8683efe0f9502f902390156849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b0be536cca05d0ec8beea5909b8db5534336be4b122f8ae2d7dee8ca651eb86afb6fa8af49a6e6a212f713126b5d571d8bb1329725f4ad32b9cfe44c591c68edfa4ec0a46ff472ddbb9141ac68bf1c628d9e46b79e1c4d6127710ab0b4aae94f2ed0b910363c1cb92eb8bef7ab5a06b234ad901e11395512d06a6f436276a2fbbd52ddb6ab5cb993ec2fa6565b6acb4a3b2be53c89889879b842a760e955428b1713086cb817451ad27927dcb997da99d54ccef967bb535cbdc21c283e71b2810b3d774843f2f704d4b9b833718e9a22df645e4ff2dae134c84af306237fba47b5d2432a83ea939195e27b727120240cc6fa83ed6cf36a74616a2e8b2e72d5f4736eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a026e25f11ea30f8a5c29eb876eeac9627f4a5778341a1205bc0f652850d6cec42a008abe8404931508e96ee78d5c366a2eccbfff587afa41470eb1c7dd72110bd9602f9023a018188849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b59761a3fc7b062d7e4d0bb51ce3851ce409009fe6f3e76f00783444bd32d9fc1b63d4d4ae6f08bc5d0c6101a7f3e2f56cadebc9ad4e5922c98389e43a1b93cc3266492525f26594a7c067520bbabf46f290b737fab684e8225ae40285821205eeb4fe1a9535342be73740c26220f0516ad12b3493db131b40a035c35e1cd14867bdd4f986da04a30804d517d664066aba62bbdb168a98dd71cd421edac215be7699e64a1e835523b3c4155d0fd43d55d2385dd269e9b1e1bde75322dade668d366c3d0ec215d48641dc722bbc1415fc0d471cb99d87e39fedfb1e15ef24a4b92e09090a4240cef86f6c4a7c1c98ca76ce47f9feec130fb3dae8c9442a3806c505160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a05f1de1956f80dab0d9a895ef0470bd5a39c9c72a69b8abe82fbf6514a8490ab5a070b548005ad200d8d01b845ae2fedf6a7065f7e50a5b785b8cab66212cfbe22702f902390172849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bf778df556336aa96bf90cd0bcc1db5b782067c734acdbb00baefe0be0c4436dadb89038ed2e4be4ff0a7bd5b26a8d6b9442e2f0cea03340a59ab725fda6a0002887ff3b26b70081e49de2b25e88b46087ecc29920eca9d892993071629981a113c0305a9a15f0a9def183e235512e7809d095ba5b9ba9f206885ca77a34e9a139c145f1a961634e373c2ac40dd183321dd17dbcb7435a0f7ba5c37f58120731c368fce61468f4a0753b448f8ce82ddcdf2fd716375a1fb922d174d111abd5fc40f91a6423dba6a49f6944b28d8c41eb2a0acc874bd9cf07636081f448771b2f66ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a0edfc55aa8960aa6b3b74a27c7d75eadbb8fafce5f2ea32f99a22195dc69911a6a05bd8278b7f3de581348aceb5b80f6913dbf6076c1c3fb15b0c727592eccb202e02f9021b01820166849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000adee82f3eca686a45deb0428e255230b88713436d28f81bc58cc564de8024f7f8a4afb97385d60be26fb20508b005b79f196600e207dd8020fee2b3e4800cd9d95aa826e3c73d97b88e2619248158f4a463d2464bb5fe08f8d6b8f1f7e3e42b2e9467c640a6e37406a471a0963268dad95dd69341812624eff05606d06e436423785172ed95a0482469ac7a2e8f9ca7401e3e0c0a071191f2fc3014def9f2d0a84e8d984320f8a4ac96d98628cad8909074f97538c2fbaed8c12d0a045e7424af04d2d3a1c94affb7c27d59d54a29768078ad34fe118e46fde6b86664dc8fe3a84c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c080a06fc35001132e137c9fdb90a709d08a3cfad8a9443c857d4c1eccae93c21a7a0da07b29f07ada192801b1d8657cea7c2a5d2cd90dd5427a206171c203f78f8335bd02f902390140849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b1311c79574584689df4b57ec9745e80a6febfc9ff45e46fd69737674759e7b66cbea32e7ceec669130e2b000490e0c91a766d3f479ae3b93aedb7360b405a9178189793f50123fdf85a781a87903dc36d53afb3eeb845913b10b17a98f3db77db18d55a2a32b5da789825058e61e42ce64907345e80c9f43440992add1ca403fc2430025cf245b31b5c66f6eda28c3ea590926d25dfa31ae73b28855b3df709ce488754043fd18689e005d87631fdc5d396ed24155d873270a8c3afb2177c2487710182dc92f72c8293f10e2a921bc970f5bbc836aa36868eac1ddc8a894c17b9ac0c400cb6837182b745f26fac1e87ff6eded4c92365a9807bb2150ecbc7121960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a07f629eecbbc38fe541fa28c7e531f4d463d3c7bea79e50c80bc171e17c9144daa0099090aa3eaabff4a0fde909502146b9a1599dba4fcddc82c7d41fd8707582bd02f9023a010c849502f9008505584dc0fc83261b0e94f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b880713e24c43730000b901c4666c00aa0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000001e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000098f171a2febf7c27179dcc6cf3fe755d18455bcad44bbe255826427abf25a2c15a68a6d65c9a80a9e55912568c7361a0789af1a8614b2ec305d26598939d3d0ffca00a0336a48f0e9cfb42452bd8e4883bbf35daff3eacbf183f457d435a9b440881b41ac98715932fed6bb536e9fecce2b6fd626f2eb40b528f87877c61ea4f187ed5aea3957d1025ab4bc6e0ad39d08c682797bd722f06c23d4bb7b7c572243ae1a9cca5c30e523b9d83f240dd6a0998bdda129161fd1ad97116058c11333295ee729d1fbc588c4c7c999c4766ad87c8480fe07dc39896712ab7d96a8917c1ff5059536d5011814734520e7d1edbeee0f311d266f22cc441348ccee2ab53f080442ba98e5c54c5b145def40caf6cd211fc48522e8496808d921817efe59c2ddc001a050fb1adb94abeca0024eb9ba778f1d916403cf2b6365ed2c3c960a02e2c52e77a05588ca7c5664e33c92695919fb514471cbf408bd58384262f1ab98adf0504c8102f901f90101849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b90184d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000923f490eb23ddd66f6862187a400e492e7bba2ff805244aefd7b8e3e56784c34125af9e96b06640036ddd78c1e6d0ec30f3a763623ac343ea983b20871627af317ae4e6b3a1bb53f784c63e3a58c743e1f18732bedfe56436c1e38885759ba26642bdba744a1341265247401d61b2042a3bc73c956865e4d79d5dc88ad3d4bc40d33d04bcfa9df33b6b428b996bb338b17cf32cca2a3da2fc4b93b0db7860ddffb4e8f769bd74b61d12bab3416ebc55d41ddd709a109e5844c90d0b781d913615f86865b103c0cf0b5920cf393a4fac1135bb0bf631f329af77a883162d3c97dd4a82199b3530ac248b1c97de1e8cd1c656efab7035d8bca80a6e135c4bcb077797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c080a049472a60cc620cd7957b1a7e94985d16bec210815e6c94d9b7cd944b71e6bfcfa04a925123d4b79586b61cc5bbd1a8c520224f8217c5992fd6145da251e603185602f8700104849502f9008505584dc0fc8307639994f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b8084118aa48bc001a0d08259f90ed8b8699e39371303d62780104b2ab36186619d8c99b04e1c841b44a0694e444813402c3b1e23f1b9e3c9740741436c774e5f635a30a521339902b3bd02f902390118849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bc52ade6b9ebf3f6ad99e8d215306e1c1f532e0322e4eb0cd5538305c024641dd4482189726d71797dc557529bc10b64e528d3ec056810223c968ba2e13c982e8afd03d070cef8f63156e3eac534f80d289a839519b83dee0cff57b1de6cfc17eb8585a5c614171d8d03e5e60e70694ef9790951848c59c2a758c9853c82996c60f157a3d5a8bafe568e0c270de02c72665df09162a5e9547ccac29b07e651a10cba6d2ef29cccca4fa453062395770480d56588111f94b9615f951fec3a4c57be1c095839abad83fbb732382ce6110e4767327b558f173562c36e0250ac9f3e15d2432a83ea939195e27b727120240cc6fa83ed6cf36a74616a2e8b2e72d5f4736eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a00f1c773eeafdfa5972430c97f2e87e83f34cf99ce7b8c731467c15911593b2dfa020e575db1ca446556ca5d043aabab6e1ce2b9d20213fa4feabc1c8886a3ec6ee02f8700101849502f9008505584dc0fc8307639994f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b8084118aa48bc001a0e333f05c7bf6e3b07395d349f3c715b2323f6c14b1efb04d24c665ea10cc7cfca02413561e3cba534f48194c8fa21f96ac0c7698f1ec5277c3539b5b90ca2a7ee502f901f9014c849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b90184d2cab0560000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000095967323982be49005b549d1db469f9b7f7f119a804ab53e6b8cfa32484ad604cea070676e81dbecfae1b4b1b57206e52e05df07fe86ad9c0e15cf2d21b4810c1b8c4aad4c362f530de98c8a9526ab71fed8ae6275787d36194afc71c9b119b2898866891ba30af5abbf95d3d9260bd7855e4c709dbbd89aa411cc91ccf95840a26a7da6f0e1172722ee464d05c66168d366100b630a9d7cd0476a6cfac2dd7d0f91aa3f0d43e208a12792ddcafece2badc6ceba90ece955620c9da8009eef264f86865b103c0cf0b5920cf393a4fac1135bb0bf631f329af77a883162d3c97dd4a82199b3530ac248b1c97de1e8cd1c656efab7035d8bca80a6e135c4bcb077797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c080a02238008da3f678e80737a79f64868be874c25bc8c3727c158ec443901f58573ba002b22c22b3f57e84b5cde2afc320166749b0006fdd2d3f55fd06252a17afa1d202f902390151849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b4ed34f87037caff8506338cb74a5f49c70e74a51639a928ac147877ca27f3e64d4c5c0a26c55742d2f032abddbc0279611b8e7ccf187f092394bb11119105873be206a9ea50adff84152710d16ee2d0d89c690b6fbe4ce3638addf8988bfc8bf778cf8aa1828d5886bc232489c6bccb860d46a5104d76fefc2d676960131e50fb1e942ea83388a599e38673067c354f8610912cea7ce976c11d48ca902a8212cde5344c7ec1b2dcf94654052e24c8a40a725a9eab591376c8142f4dc6e5e7874286b4cf4b050097d14205264d5de8efa790b96fbc3b6ba991b67d2becd1cb8d3874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0f1fb52f9fba5c236f495b00a4b1c2b9ef8445cb4e7698f98a0f26befd0a6a4a9a01a036f90da4836edd3d3268e74a26d6f2e365dbb98c7524d6bda3efd056ccb3f02f9023a018186849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bf248ce01cfa487aa5848e5ac064e27a9d0203e54164b21c39070cd8d82a382848340e1645a4d5e2e8fae3b55bb7779a6b8ea7e2348ea2ac567eba50d46a282e17d98660ee87c3eb44feed09be1f249cc11b94e2a31f0652f08d6b068be37e65f00f893cc4f4ed5b32f6bfad609c8f060048194d408d07223fa4b17ba45d4e6b76f8ee6aca360d4cc0b79651c7c4294544080e7c14c28b91bdf155de019bdec5663e97180e1ab3461725b593d992b80fe8cdbc1939562c814a2ca5d174874eb00286b4cf4b050097d14205264d5de8efa790b96fbc3b6ba991b67d2becd1cb8d3874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a07a49e3b934dc406d33377b002f52e12e40346cf81db66e2ed7d5a4dfa9eafad9a06215be7d0fd5a4a8c55c68aad1bd29f82cd85e45e077d1231c86b38ca35082b702f90239011b849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b839b9ebd68fdd6e736780b1774867958d8d4d2eac0a207cc267e729d59976c8bc28040eb79a7c307f694cfdb4661dbd401b6b9a462e3ae36c6a198309b08cd4b9012a7f8e1bd49fda7f0f6c51d886769e22a185e2eb931f7abc6957aae2c796338034969baf1d1e8c9c022fffc490fb3802d1d1ea679f5f37649c1c224097616ad65cd20966275313620654e2c8f63b64a7b93a9e40ef5c911085942e32b31b8e488754043fd18689e005d87631fdc5d396ed24155d873270a8c3afb2177c2487710182dc92f72c8293f10e2a921bc970f5bbc836aa36868eac1ddc8a894c17b9ac0c400cb6837182b745f26fac1e87ff6eded4c92365a9807bb2150ecbc7121960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a0a28e65feb1183956f468144553fee27e3f0f57a3c742884e81bd4bbdab894184a01d9dded94c9e9769e9bcca78da0fbb862eae6b07d52a7e1133d86a4178bf4e8d02f9021b01820128849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000a9a509b4f18edde75a26e0a8d7f9094c20dc29c12676068ab89bd95b12ed9f23e524bb2fe6dab20647015103bc58f58d5df9086495531f44b9cb12f5173b59b4fcf355d0cc68998dec60b8edb06d3c0ffac9c02616aa3fb323b39643a927960b7e8e6a9ec934b7159150528e2eaf915105561956da23ade19970cebf849b88211f0c2dafa306040fa2e8a7543fb78dc27bef039464182c5d4ef0f4932f43d03826c6b4d327b382242e1b3d577ebb1f93bd3d4012fd971b348f0128322b993929904d2d3a1c94affb7c27d59d54a29768078ad34fe118e46fde6b86664dc8fe3a84c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c080a0f5a72316e5706fb06bde178c0d5712e3bef8101a6b1b5a5f239f2f854fb0311ca06f3c0d6f988422bece5d273268309780b324475398e94c50523f0d190cc7a68902f902190136849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000ae959ecee197642ea7d471c5ac06803dec1e3fe484506d7acf10edba51764146870d2c8c5930027dba224208ebd8187d460213302b63a1ad36bf4305744d5bfb4fe1aecf0628fe674ae6fe488996ea79cfabc606fb2ac788bf8a76ddbd479935172140766b6043d87467ef1106f42e726a9705a4f0c3448ded17ab9c2e090733523975ade81aeb4467ee3d0584113a70343f675b0adc969dca2878343fe459c854e8d984320f8a4ac96d98628cad8909074f97538c2fbaed8c12d0a045e7424af04d2d3a1c94affb7c27d59d54a29768078ad34fe118e46fde6b86664dc8fe3a84c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a092750f96c4ce786f36e871b18d0fa06c6e025ecda9af809da6e9c43bef9457c8a06d52576daf3a8b7c8cb74cde25dab367238f9a93cbe2c63c5552cbb3c1d1d3d502f901fb01820195849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b90184d2cab056000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000009896f2f3a7674dcaff25ce15c5a6ede8afd0346fb77359c897e97f9e162743013ef248224f56d6fb586a53cc88c48937c4c7d1c99178795ec7a2a33270d6a2708ab096e3484ee818fdc65b9622c337185264097284b3ce8fa4ded216075d017b2145d0284bbb5ff2ece72fdd4f2b20d42592616380af3ff67a5447f3efea78e537aff8cfbd92504b0719548d30b0853109a0defcf494e1d8995abfef7a4f90e269e0e6d9f8276753500114d8a5af3b36b875e5bc835dfa3671f71155bf7574694f6be2e818d23222100f5a2543c3e0133f0dcf5dc686d4918328b86adeb6d38594a82199b3530ac248b1c97de1e8cd1c656efab7035d8bca80a6e135c4bcb077797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a0d8c845cd0cecddd93cad2f277903912291b62b47efaccf1ec973e1910eb35fe7a00f6dbf31efce1ab33723dc9f5896e0d027cd41843918b06a570ae095ff01941802f902390146849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b8661e6c57cf13a2b0f83b6f9d7bdde779f5bb071016d5339cdf51f83000f57e29fd3d94b10a0ff4867faa4003b189a34c85d92c35e28c63ef271ed42c8f679058c96706067d9a7b04ce7142497de5963519f45cf93c9fc25fd27444aefbdf0d9dd0ab522f6aedfcbc363c0e3286f3994b4b479a42c313e1c18da47923608bd8a06ef85f770c00923b222c953cd33151eb48190d06044fd1d00edbb3a835da2699262acbd5671bdafcc4346e225a5eb7e7f6ee95e8f415b78be64ea5425a447a46cb6b274fbc933d5161d201b57d1968b3e548233d94e0918b29e3246db24af10874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a08a29b1df1b1f4d47f437ba897062bed50dbfb8713ac54981869e67f34830caf3a034ffdf248d1cf4c2d549dcbc12a540041a5e626b581a4119ab07faa475384f2902f90219010a849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000aed1526130c37fe1d9386713b7b114d4150d2a040342512ec9ccfbed80b9bd7a0849c7ae8ce68c47c684e8620b0337ca930c0701f18c3f11ed4ab61d2e2ef3ed20919477d74c1d7550eeff3a99ff35bd10fd7765d7adabf1cbaff7d82c75a9ff0931124e7b4fb045211ec89ee9447aeccbfd85801214fc4467ff39d2fe586259cc228998f83c3b0943263f8ae8c204a222eadf53e2e3e2cedbce275343bca06ada448110f2091d7638e548db2fb79d2a7a3856581d873bbacda0c0517f9390bb1edbf73ba52d75dafc0dba73303b8cc428ca1329021edc3675720c12464219f17ffaed7b71e17931c50b3e99ec666f5a3bc118c101dc25a2118045b917db2f8ef46007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a09e8984485d8904be29cecf30966a2c7872937a76588afd5ea460680b49dae733a07250bff4edc0f516246ecdbcee0814002844a20cc47df7bca9286cca6974a1d302f90239017b849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b4bc96c18e934d3b37b5cf20a2aa0f5cd624b76d48f3fcc1775b749ee4cf89f8861f64cd100cc7daae1b2de215970a40cdfa7ac34cf7ceff6220d1b34530daa8ba28b26e71464a3fbf0ca71a9d56c70e2ca6e230c5262e48154ba316f9449de8700b3cb6f3a3085d5ebdd919fb066b157039c7007696fbd595624218ab6b8559fe0b04471e38d438279becf0fc1aab239877a95f1629f992c2dc289eac8dc4760a5bcf21393057e42dff5aa97a7f8618fa048885409eee239831ea4875d50215eb3d774843f2f704d4b9b833718e9a22df645e4ff2dae134c84af306237fba47b5d2432a83ea939195e27b727120240cc6fa83ed6cf36a74616a2e8b2e72d5f4736eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a0b41a2768fb045f150c391d42de3dbbebe78806caf1fbda684d4c35417f25c32ba03c93fb91a8851f17397497c45eecb2c5afa8b4623378dc53206f696ff68a1c1c02f9023b01820154849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b05e5958385315e92e380504f4917331ac6728b96ecb59bccfeaea853fdedbeb009dc977e1819e9ad7b51a017ec0920a9e2762990705d31ffa0f1a534358ff00ae89ae0bcdcf8e426103f8b533c61f518bab94c603cf5405f48fbbe733cb7c3e3ffda629977354c31eec661b208d7ae2dfe36a7eed146ab62f714fa519ac2fa9f20995c00279da55b3a30d1c36e178eabd25d8d11a07f7712acf5ff67bf8286d56efbea7d2d2578bae5493b49fb41af08c91de3e9cf196bf50d3b51a49e36aefe0815bcf88bffbf15080bc98a4d89ff43e0a6f9cecdd7c0745f45210fad4a5f1073bb78fd28784e7e398a646408ffcbe410c7c3aa32abe694422aa038737ab8e1960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a072ee954f1fbd5c9eec75d5439e9e26b244a5c2d1e6602f0437bab551383dec24a021784554b0572865a93b1ce36ce7f0036e6e84e5196134fb06ff9794a85865ea02f9023a018195849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b188382a0f8b8a26665ee117e2068f7a4b8e4db005232fa66debfdbedea2e91f3013bb8eb3e5a9968f8ac6119e72e8241b900a9b19f621ddc2a185b85cecf462d7081e2de01d1009b4f9f059e4962e25a24345d826781ff49fb66bb37c92408a5cc27ab0f6a33b96dafacd306bd846cedb44ed82f83c88e936c0e0b57c9ac6e4676ae7090546b9b900712031e17f61e88b2e890601fca5cf23e4f2cca9a8bff62a4a8f898ead64765bb8edb3ec5b20f1b11b97dda9335cffd26f8bdcb63b46bb50a26c2c2ea9612e28d0aa9296f80336adcd076167b3fc1705448942052a9852eac5912b920cfe151d881ecdfbd81e708a6fc74fa4215f00e3511a5ac0f229adafc56676746ed65e7e0cb2e85384b58a35f638c6fefa696d8c6ee519b7fcaaa602c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a0a88da5c2f4f5fdd154dbee033eaa526ca777c2d117af3aae5c90407fa1cc839ca02322b6c900df5573e528c3fee52bfb4cb3568fb3abcf80733e7a98d5d7f5e75c02f902390168849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b0d08828da3b451ff48506bec0aa56083b7ae8d434df628582028e91b73e7f34a5b85bd0a8dc95e34b5a4d2dc272933cfa8a678202754716b9eca267e9ebb19604c8d914b9a598c3dc8e3319ed0cd63cb12dd82ec33e72d493a5b12ac4e9f18817eebf675830bd5eef572fdcc127dea194ac442b769290c529437c64309b792d220995c00279da55b3a30d1c36e178eabd25d8d11a07f7712acf5ff67bf8286d56efbea7d2d2578bae5493b49fb41af08c91de3e9cf196bf50d3b51a49e36aefe0815bcf88bffbf15080bc98a4d89ff43e0a6f9cecdd7c0745f45210fad4a5f1073bb78fd28784e7e398a646408ffcbe410c7c3aa32abe694422aa038737ab8e1960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a0eb7e3f0ebf5d62518954a69228f3037a798923642e87b11bd53a4da98f691e3ba052b8cd41feecc347f2c20889faf4b5cc897e92233c1dc5e891335d33d0d917dd02f9021a018195849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000ad207893211540d533608d502859464056f84adba1d44014fd11b4b6e649a40c4f642adfdfbcaac1ff4cc2cf39c7764cbba46f08a95dfab60e494cccdb1f63fdc8951400eaf551dda3ba916dccba7742f8634a39335b7db03a5c8988508c33c6ca4b09b3f6f27ceb5ccd8b90b73ce2ef6199907132ee96c2e518ff9a3f14ff0e4e873e09ef631ec01f39cef9854e7433f9774f38b3edcf74463e36a28b36d6dc4cf2c918cfd27b5174f472afde4336532960910721cd7680ab2a8634341ec8cd62e80a0dde9daae04d4d607589027b4a67ac9ce47b5db4026e967ccd38695dcec4c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c080a023661fdfe8d72ef21339e31eefc462086922e37af9384267d45162cc45642985a0427ef02a363a2585e59a42cf9069bf0b26b5c07719245d615a99e76a3b93bbf402f9023a01819f849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b1edd981d61540d1900849710dce55ef7ad964107f1ea19cea3fa844d99270044a79e5653eefc8300c47758bdc1a74f1a8b1fbbf93b282ec0c8ed62331e6b91f46f1b1c964c50f29a2310a3864059850ae92f1344be7b4f6d7c62e882a29d822e6d4d9af745f27cf211a19fa48b97871fb09b98ecdc5bb9c1f417a44549b5aa973888260316e2ba5dd17da490f9c83b1a213b971b71d6f7032c601e799cf3c3e1a5bcf21393057e42dff5aa97a7f8618fa048885409eee239831ea4875d50215eb3d774843f2f704d4b9b833718e9a22df645e4ff2dae134c84af306237fba47b5d2432a83ea939195e27b727120240cc6fa83ed6cf36a74616a2e8b2e72d5f4736eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a0df163ad87d1b58587e5944a3f2733e7210b6c791d5d17467df92a4f1c1c5d3f0a06d801bbae254dc33fa4eeca7cfbeefa31ee436f6826d194abcaacc7313adb51c02f9021b018205bb849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000a518bb2d6072ff208e712c84d1c296739e7f794118db976a9e1d425b63419c6c165d46e8fe50622b8da5171e53087706f2dae4bebc9d268fb3538527c9669e3eb90247a5d13af88db5834bcc5393da74a52b72162ba8e1d66fbf5bd86b97b4836da2e154e736467bb805327a0d140d616f3ebb696172c0154db5c3258de5d90e5785172ed95a0482469ac7a2e8f9ca7401e3e0c0a071191f2fc3014def9f2d0a84e8d984320f8a4ac96d98628cad8909074f97538c2fbaed8c12d0a045e7424af04d2d3a1c94affb7c27d59d54a29768078ad34fe118e46fde6b86664dc8fe3a84c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c080a06d7a4c596869ae9ce3a4cd8de61b2228626b9156c53049f3123c5b3261516c01a07ca600df6570d17cf87cb90ab6a97e03e3af94653b73db92857e5f03aff0226302f9021a0181c4849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000a4dd43a382bd01e7698278e33a3c6d2c3d41b4cf41c2084725f510b58b3f7b937b0bf5d1eca343426b7e040243d921e7a1e825c6a683cb7898bfc0222aadc8356cfc1efdcdd0a0c19a30dd4a7d006be80f6d35fa5366c85c786359fb59355b014d0fd9cf2238c6694f8c18daf5cb0b5a49d03c42f312bded35f8e663d340669fc7d5e5d9de9a0bc0b6593d1b691e70105216de2cbc5e8dae6e4bc358c4c0c6951cf2c918cfd27b5174f472afde4336532960910721cd7680ab2a8634341ec8cd62e80a0dde9daae04d4d607589027b4a67ac9ce47b5db4026e967ccd38695dcec4c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c080a02d2f2a1cbd1156d1a312410e423d96e472f62cfaf7820c5bcb2a6e50e848c2cba01998d1a97ca259e91c4ccad70289427a6c9b44ab657fd6259d45b801918155d402f9023b01820297849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b6047f226e7be513c4cd5a36d9dd1f2d9616734f386bbd4d11528d62dc058268c2de506fbf7cfcf4061218f14d6de2fccc409aecdf4a627d96adac11060187ecd9e1869e7f0bb66f8dd85cf4cc76296db012afa883787b984d97a5a7b96c63669b5c0d57981397e009f796ac78aa78f15703ccf1873c532fe560eadc8ddeda6de886dbe84053c9654ee909e5116cd040a38429581726d6d2334965bad30c3a657de5344c7ec1b2dcf94654052e24c8a40a725a9eab591376c8142f4dc6e5e7874286b4cf4b050097d14205264d5de8efa790b96fbc3b6ba991b67d2becd1cb8d3874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0cd6785a6c3493515b0cce9979deb195ba332289d40e7569d6af7f7581af3b52ea04ced26a87e5b56c9d0f53bea69303c4030b02ca28cb2f71c7edabb1a58c5316102f902390161849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b6aeb584d45a4f012d704a7caee8e613ebecb8a097fc9a49af8a2bf5f4e5b5d961f55e799e95ac031a40a304009e31de78a36d8cf452508634abe7588c369c118cb382107536f84e832ca9e4ff206d001d0b2b1ed702bd7f4c29f0aab421b771f8ba0acf0e93da5d52b1b6f30b27ac7664995047d613e83565c05305f2cdce8007bdd4f986da04a30804d517d664066aba62bbdb168a98dd71cd421edac215be7699e64a1e835523b3c4155d0fd43d55d2385dd269e9b1e1bde75322dade668d366c3d0ec215d48641dc722bbc1415fc0d471cb99d87e39fedfb1e15ef24a4b92e09090a4240cef86f6c4a7c1c98ca76ce47f9feec130fb3dae8c9442a3806c505160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0c7dd85766e71faf1aba9438dccc3da35d039193b0fff6a514f9dc8e13ac4ca36a040ffd2637724b944c7a1dd9f478f0ec5ce527fa68ce8336d7370b78b0bc606e702f902390127849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bcd1ee71aabef82c553d2ac785ca7de18f10dbab36e30ac5ee98f344d2754e4d115c0667cf0b5ad79923cda38a2ba9dcda84b34eff778bb3d389149a8cba5e1981d6521e32daf5ae7924960a4ffd182371c3d38f508eb32a58613e3c7fbfee374c8bdb39ad6528fdd7ad571f514664b9f2dab9990abed698d88ee18f9799c681232c3f251efba7755180cc77acc723082ba28ae0b8b8ebce06803c6af580fcf3870f42c97662bb0ff7f0ecd7778cc5f0e1d56b8231ca6676e0d6e65f3c9525fbf66c3d0ec215d48641dc722bbc1415fc0d471cb99d87e39fedfb1e15ef24a4b92e09090a4240cef86f6c4a7c1c98ca76ce47f9feec130fb3dae8c9442a3806c505160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a018fb03af5b92221d6764c2be7528635c9603b262bb7a867a7a2bf116a53eb50fa0608ecbdee61129bccbfd0fb2afc28da771204c2d46ee1234ec70654979a515db02f90279010e849502f9008505584dc0fc830661dd94f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b8778cad1e25d0000b90204666c00aa0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000bdfaa115f7a1e4cd2ef96829f5f86bae1718722e834dc65e92a33d366ae217d343b1976018f89407c59f519bf0ac219ffd8dce400eaab1410f982fcf297a26dd947daa3dc28186504ec4753e556518f0788ccd68bf4f8d5d3de3e0dc455f82f9383d8bb2f45e6a685545cf0164a5a14d7e95ae5f34e87bfb20784e8586e8fd300f16c0f0d955335c82a267dd53ae3de98131a2549b7c01b83efbfd839fcbe7bdddf33050c843ba4e57566a12338065848ca80675399ae46265650d472bd5ee3e6c10096c71c7e35f393ef615a134ee1ee5a18adc0fbd607ea21cd7e98a493beb00bd4d2fdc72bde8f269e91b18b74fc4dbcc35f4b182127e8bfc1fd7dd5678140f834887345df96f4d60b5070d09e1a56b26f122a3ae81b1c3c2a06b6a29f64ff580156337097fc727277780dfea1f31e0683a283836f3d0b2f2f7411b3140ac82c305353be4f2b4e862d9e60a89fb2a46014d041bd09a0460049099bbedbc290c001a07a4063f1efaa189828c9f3ffb6b70023e557c283300c0cf5be4f3c3eb6efeeffa0790e0491e2cf968ad813e6053b2a71645d57268d319b624b7f4fbab9dd8bfd3702f902390156849502f9008505598d0c648308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000be36a12959c5046b9c240180cd56d6d636ec93be85fcd5f9e34c42ced450114cd01e90a628ec97d4193d8f11f75ed1356179d660af0b336f19c09ffea003329ff8f06948d9a3cd6c79ac5e38b4610a9a17c0662f02223defe332a483155a68f0baf135b77dee36d2d1080cc1fbb2750151cb9c68c226f7a142efbd04241071b55a443a479748450adaee230a98fae2a7f7e83ae41978a4a06a7e255ba23e5c90663e97180e1ab3461725b593d992b80fe8cdbc1939562c814a2ca5d174874eb00286b4cf4b050097d14205264d5de8efa790b96fbc3b6ba991b67d2becd1cb8d3874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0c96a61cec59ff3e46d1569dd338cb3ddace483e446db6d5dbc9d367bb550471ca0025400ece2dcf0cbf03b0604d7b4846f3e7778019d8d297620cd23e279fc9c9102f9023b018201d5849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000beff1576922656b11e3cd4d5eccf7d925112698b84a2d3ae0bb208e1496112c0bc84b88390c160f9ed43ece70a743f5b27286a4a2b78ec486cb5d6c3380ff841b813af59b6419dd7b38a65e138e25281630740551ec83fe7f967bcc78dcd5b59d93900c311c2a6c6186367f2cbef3f47e86101f96cb9acbbb1f40f84db9ca84a8b06980f95496918ab0dd23927efec5665be9ddc0bf707cd004a6319845cafacf9ca69f0224e93fb3d36e599453e3cc8205528fec829600637c27b4c7093ec8600815bcf88bffbf15080bc98a4d89ff43e0a6f9cecdd7c0745f45210fad4a5f1073bb78fd28784e7e398a646408ffcbe410c7c3aa32abe694422aa038737ab8e1960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a082bb4e35ebd37fc05014a8104003f63b8163a1e749825561f2533722b730227fa0320bca9df318fa3148648ae2d865c5c2bf36e491dfdca6503f677101054761fb02f902390120849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bc591bdb99854bffa605089251aa11f1580606bc948a4619a0bb1d7f416c09fd6b1f365eabbb5196338309a2d30ee0e03f05f13ab1cec1f92bd43e4e870f49f5f8189793f50123fdf85a781a87903dc36d53afb3eeb845913b10b17a98f3db77db18d55a2a32b5da789825058e61e42ce64907345e80c9f43440992add1ca403fc2430025cf245b31b5c66f6eda28c3ea590926d25dfa31ae73b28855b3df709ce488754043fd18689e005d87631fdc5d396ed24155d873270a8c3afb2177c2487710182dc92f72c8293f10e2a921bc970f5bbc836aa36868eac1ddc8a894c17b9ac0c400cb6837182b745f26fac1e87ff6eded4c92365a9807bb2150ecbc7121960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a05a2573fcd83754fb5eeac75c701a5e3116f1c758afb4d1e0b4fbad170458c531a0185efed498ccc93ef4bd24de9f48e6d7769a515557a830f0cfe1dfdf3343f09002f901f9016d849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b90184d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000920bf62ff97907c25fcbdb84a450718c8e7e5c6f7c8e541cdb24b5348d2f7864c81ea3c62d00c564802083bb1fdef5fa435e276e4780d543bf0cd3f21825282d2b8c4aad4c362f530de98c8a9526ab71fed8ae6275787d36194afc71c9b119b2898866891ba30af5abbf95d3d9260bd7855e4c709dbbd89aa411cc91ccf95840a26a7da6f0e1172722ee464d05c66168d366100b630a9d7cd0476a6cfac2dd7d0f91aa3f0d43e208a12792ddcafece2badc6ceba90ece955620c9da8009eef264f86865b103c0cf0b5920cf393a4fac1135bb0bf631f329af77a883162d3c97dd4a82199b3530ac248b1c97de1e8cd1c656efab7035d8bca80a6e135c4bcb077797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a0ea048e55174cb0d1a65210ae13f5fe3114709ebed9381dd0f86d398f9f83e04ca01c83ea62f3aaf0c9f3f7073137b48ee8351ea77cbd560780be22d0c739872b8e02f9023a0181bc849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b5f0547d10854fbb95dd3d2e6fff5fbe5e73838af7016f090ff4a99cdd5ad4a7324e93441c761cf8775c4802619ceb01d51d8d635c866447cab66fa5127880a253dcbe248a07bc8251b951cd4b3b6faa63cab80808d456c6b71ff7db1a87af0433ac73431e9c2315f4a9e75c67b1d4a5214ae452ef8227b0c7cf5364e6747ac4fad79cc52adfec7aa2fea4409beed20fb4253bb683a7cf772aa821178dd4e4b64a27e7fe6f5688081767d606e6a17c36d1a8271eab6b7fc2ef47c2306091e40a611686f88b4b1efb63f3d02102a0bd8872ab8be325fef22fb6b35d9cf8ea8d81a6ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0d01717709e2097a0db9dd5fc790789df9334717084aed5057b8c5f74217fe2cda037d35e90f2ab813f31cc4b43909ca88e04e308fd5403f7d6f23aa20d2b2c7d0002f90239012f849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bda149a3e36e5dab90e69c65eb92eae622b6fa99f596370fedbb7dd786db9a7a562792a9ce6f3d3bec916b3659de1bc3a4ef6eda7fbac672cdb2272d7ae41edcb8aba446591ae08855364c9d460ceafdd8d340fe95f8a6ff333be53b1201272cbbe4d8eb489952ada4af205161cdc6be7994f2a8d5b598428e55f356485482cdbde7773b4b8f3d83ccef36bf1845e47781d828cd6d607e5b5b290c0e0947c4644eae5aca00e946d9dca1871117bf328bd20f5f7bc1f5a7e9e3e497e42d3ace8080f91a6423dba6a49f6944b28d8c41eb2a0acc874bd9cf07636081f448771b2f66ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a05b4c101bfe26c92485a646a5a98354ed5bba19e280b5b75ae938764d804392aca04e5699e95b77bb71a524626b27c4e85f7d08916050e100ba9983f27963b8875302f902190116849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000ab38b88c468fa35276be986980608d830e1a4f0929d2dc01ed0a8aa544f4b9ad9d369ee813d019c4fdfed10231c8a352216c732923c1b80a7dff155cfa6e45bf17750ad06f7c3255aba180fc177d12975f243bb1211dd0ba2baf214e2a5509d4cd0fd9cf2238c6694f8c18daf5cb0b5a49d03c42f312bded35f8e663d340669fc7d5e5d9de9a0bc0b6593d1b691e70105216de2cbc5e8dae6e4bc358c4c0c6951cf2c918cfd27b5174f472afde4336532960910721cd7680ab2a8634341ec8cd62e80a0dde9daae04d4d607589027b4a67ac9ce47b5db4026e967ccd38695dcec4c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c080a0c27f283508879af2bfa2b3223a2fc05aa162753011248dc5d1823e37b06ad894a0454a8d2bd6acedf67ece33b8bebd17593e51ad2d1fe147a6974231b26dd4cdd802f8b1011a849502f9008505584dc0fc8301620c94a0b86991c6218b36c1d19d4a2e9eb0ce3606eb4880b844095ea7b3000000000000000000000000c8871267e07408b89aa5aecc58adca5e574557f80000000000000000000000000000000000000000000000000000000165a0bc00c001a044054675cbb65c0357136b7279b81146608b672e47f59c2e6bbae38957bcf7d1a07c1657f96629f2637ef363f9801c2bf94beaa6e8f06ab392188b0be4bd650e6c02f90239014b849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b92afc7c5636e754cb271223071cb919b889df32a98cb1c88a939f2f92fc165f1fdd3bd202c862e82486dbb36d4b351ec71a058691026f1e242217e6323cb05fe0fcaf63cb0ebb1f9309c24c6272b532256e7aab111beadecfc1be208875540b483457c9fdd59bf72e5a8498937baa8ddf6b5ee96f52a4bf5501a41bf841b2bf18dd36fcbc5833f2116232785f8359d2334dab78eaaf7968a293cd28690e1a457713086cb817451ad27927dcb997da99d54ccef967bb535cbdc21c283e71b2810b3d774843f2f704d4b9b833718e9a22df645e4ff2dae134c84af306237fba47b5d2432a83ea939195e27b727120240cc6fa83ed6cf36a74616a2e8b2e72d5f4736eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a092781076d98a5fcbaddceef505f2b8f1e9ab3f87fd657678b7e477531e732feda055609d6a1a3084698c6b541867e40a27c0d842ba39f90292e175baaaa092032d02f9023b0182010d849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b2a9bead16570655876cf3c319f997826ce0b82cdc68d43de21d3f7bcb9bc0d5715ad696d366a123ef5f1dd04239926fd01038f864cb3aa77f5c098c09de011b3a96382f1ee45d142fec35455c2e50aec5fd7db9ccc9b46106ca2ac37179489c037058c9696528f3eaa5609367ac44d72a2cc100eb104eee1f2ab7cbe3bbcc0517bdbeb05da7d57e84e50bf7e5dafd47934ba427f12621d440dcbaf391c39dba26164a4a24b1402a57e8a1204a10b73460677f293ee7bde90d60a0533387d3a96e1c095839abad83fbb732382ce6110e4767327b558f173562c36e0250ac9f3e15d2432a83ea939195e27b727120240cc6fa83ed6cf36a74616a2e8b2e72d5f4736eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0828955a823a88729f1aa9140562a0826ae78c6755fc7e5cfff1628e7b0fef1d1a00a2d86518baef3953a8fa11d54ebe61f740b94dc03c4157138e458f61aced54d02f90239013d849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bec2c811d5daf80dfb8d8e7ddfae60369316ed6d9e539e17dbca2b9414c4a290f7a3474f1fc569aa3fb5454ec294e3bce7555ffcd6b4dcd69293a7b8fab75ddebb34fe61434f36bfb4d8c3d1d20d5ef0d908fdb51aee91e8514e07a65e0f27e391c57f0d9a8a8c22575d9fabf4d02c345b6bd68689b09c0197c9be4aa8f1a239a10369baffb808ab0ec10de3cb51ade4c7287c1fbcc49a59aa8bf99ac5332ccab699e64a1e835523b3c4155d0fd43d55d2385dd269e9b1e1bde75322dade668d366c3d0ec215d48641dc722bbc1415fc0d471cb99d87e39fedfb1e15ef24a4b92e09090a4240cef86f6c4a7c1c98ca76ce47f9feec130fb3dae8c9442a3806c505160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a06d6d370c586cffbb981cf2860ab71ed6d393d81fb4e3134eeeaeee54a26efc18a060e281d112a8e39b7cb7652a5f17b43d5cf298468ed760bc8d02b2b7f21e89f902f9023b01820132849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b27fdfec8b13c31e435f70fdd44a37dfba8adb384c484a4b686acd44e1e8d47e4bdc81bb1053c1554ac47d48ad474a3b2f400b7f875d2b03cd75b189093bc716fa2471078d689ac7f53bdba4a182b884d6c58284a7328073b21df8f2283b7c570d2f8e9cb25a0c039f4213639428ec514dccfd49c1ff6caca559f9af4f948e89ee04a0a4e50daa4141cbfb0658f625b72ddfbc600881b4257e454b0cdbfcd45a402dc47c65737484bd364c502208fde3574be9e39776789488a862806ea233907b7db17258af1aa1618665bc04645d5b0c2a5bc6c16c7285a4c645fc0aa5dd222e09090a4240cef86f6c4a7c1c98ca76ce47f9feec130fb3dae8c9442a3806c505160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0a5d63110b57789243f7302a7ac1b3abaf905cbc876bf2d6cc00d6904af7a4d37a06ab07133bca91f898fedc1aacd4054d33c5d1d9c592a865df7dc3a3bac9d0d7f02f9023b01820164849502f9008505598d0c648308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b146ce1b74d5e687513365cb8bc73789ed6a337d93edfa894e8e8781c7b426d86800554800128bdcdf583001c83d4b61c480484c305e27fb7fa59731ffc5c1207b8c2d912ca68c1667a783dea6a8b6f663a83e7de3a5141c90624c3d78bb674c23c5048ea9e29bc7acdd607d3ad8b52911abcbc85db3d3fd35bf96610f634fea8c082e8d3b33f23ead586b0c03120973438415a1548113ccdba2873096f347582a27e7fe6f5688081767d606e6a17c36d1a8271eab6b7fc2ef47c2306091e40a611686f88b4b1efb63f3d02102a0bd8872ab8be325fef22fb6b35d9cf8ea8d81a6ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a0860fd950c509638ec6fec84d4ee58f7ef339e5bed865d6d24e8a1a0c788871e5a01db4bf617925466e453cdc93c370bf28a0575a321edf1e26d885a862db2ea7da02f9023b0182015b849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b0700d96cf9a04621383af3bb03403e17bed823b7700c64e4687ba0bdca9d8590a660a4e2adc5c5ae150851e30af78b7bdc2497f46d488b5f556352310824088c0e7d13802b59b72ff9369a97c88269d8dce4974e7410ee6abc7b6ed84f3d711c84325a1d81ae2f3fd2bdf0229e3a0dd50c1d144af422ec4a8d45fff39dd25e503bb3c4fe085120402b6902790224fd459e47fa185d821e298e45366306a5b3bc4a357aa0e28aed6f9b0ead588f38d4a27d852dbb4b90ccbf35d7d49dc3bfed3c7d510e7bd2727bec626207f433b8ee3cb02172d7e3d463087da63edb1aa1e51173bb78fd28784e7e398a646408ffcbe410c7c3aa32abe694422aa038737ab8e1960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0a81693ec5857aabfde7fff9cd3f6e69d6f6150c54989b4d52b714ab3ecf2c02ba06cdb14b08c4ba30ca67a3ebfad20c06d7e67443066fd290046682e77d24b7bc902f902390114849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b1886509080dd056af6b391145a577ae1bd4dee6d2fe96a00c312aef25c142b77f92946d9223739432576035c4c2b4a9e99d558ab24faf94ac0f12ffe74a23830813af59b6419dd7b38a65e138e25281630740551ec83fe7f967bcc78dcd5b59d93900c311c2a6c6186367f2cbef3f47e86101f96cb9acbbb1f40f84db9ca84a8b06980f95496918ab0dd23927efec5665be9ddc0bf707cd004a6319845cafacf9ca69f0224e93fb3d36e599453e3cc8205528fec829600637c27b4c7093ec8600815bcf88bffbf15080bc98a4d89ff43e0a6f9cecdd7c0745f45210fad4a5f1073bb78fd28784e7e398a646408ffcbe410c7c3aa32abe694422aa038737ab8e1960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a011020bbea96a331f4f0d8b5b8e78617e48dab82fea9ff554a5da0537c6e2644ea0771e968b3669e8c13a5260c2fe06759d0c9ba6678d45542051c12c0e3ae4e70202f90239016a849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bfada20644b14e3288edcd7f2f4a3b7c0b95bca3d6fce1421b8c9518c66dda735f8ba877a6bfbce30c4ce611502a1ccff9f19be6a492ef7046816fc9fd0b39a182c63114896df964394ce28735228db04db1fb7d9e53b4cbca4ccb2f0f1af530ebfae2cc4a04753549155cda73390e77a0898157e5da2ae4acd9089f1539067351c343328645fb4d3145cc0675f6c7e3777aec91f5b1c338fe6fa0795041809d780eccfd9aa7d59e9987455ceee93c47cf30839c9e2242c79288ac5e451a96d567710182dc92f72c8293f10e2a921bc970f5bbc836aa36868eac1ddc8a894c17b9ac0c400cb6837182b745f26fac1e87ff6eded4c92365a9807bb2150ecbc7121960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0860258b9d4abb9c1e8c76cf001b69c6be91685af87b76f8c3856f5135901c70da02a6e1208457ec2eb4fc74cef0135030913ae902f73745cebc26a0c9c608f231902f9023b0182010e849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bccee4eb9a2218470e55110440a1fd07d36ad7e04812d2f63ba183ac4b36b2642b5c9fb285b5f48ede753c8bd3e039f5cb6f9bd3ef96c3d5c1ffc73f2169a1eeba29b4b3faf8626e616209d7eae7e34a343f547c39a97765395c3c0aca891d42299b0a0a6c94eddb43658e0025e02c677d57337ea13820b6f06b08b3008a6e2a0ad65cd20966275313620654e2c8f63b64a7b93a9e40ef5c911085942e32b31b8e488754043fd18689e005d87631fdc5d396ed24155d873270a8c3afb2177c2487710182dc92f72c8293f10e2a921bc970f5bbc836aa36868eac1ddc8a894c17b9ac0c400cb6837182b745f26fac1e87ff6eded4c92365a9807bb2150ecbc7121960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a06eb9dfb62d53253a002672fddf71869b82bba33db76395d212acf8a3bdeef011a076455ff2fc72a7d6a186b4e84a9eb12151a7db58480b037e8598d044e054ff8602f902190174849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000a28e531cb1c1eb28e91d0ce1cb181d4ab263060332debbec80f528c932545d5fe9de0e64b74342b482e8e73cff64f3363fa8371cada91802c2fe4b235000e65c6f50ad5527e97b35406314cb872ad6f064e957b9f8312a5f07d86eb96f9e0a6a6decf5e4e1131c590645de42305afd041f1b15fbb4e5c53fca066d72efc15d863e873e09ef631ec01f39cef9854e7433f9774f38b3edcf74463e36a28b36d6dc4cf2c918cfd27b5174f472afde4336532960910721cd7680ab2a8634341ec8cd62e80a0dde9daae04d4d607589027b4a67ac9ce47b5db4026e967ccd38695dcec4c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a06e2fd4da92dd2c50590a83e7020398d82504b156c64a087ae588b033e64e29b5a046a193d11d234ff20d1421f7d20e5de181a51237f8752ef9d0ccf917271c6f1602f8700109849502f9008505584dc0fc8307741094f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b8084118aa48bc080a0c86c17b0872cb1eac219028976612898fdc66a596e1c3f7388f5663af6aa234da07d00b7ad6841e3149a5b8eeaa4e00f7e35a919f94a2a33b8f2dc1df20996526302f9027c01820108849502f9008505584dc0fc830e4e3894f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b88021f90b07aa28000b90204666c00aa0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000b6eea621a31d557f53864c5cd1c2f64f0497844f8a54c7d0c82421040f5264fb92138918e4848c0aea1d9aaca364bdc2f08a20b26555adc2fd70f795a2dfe910a6357d8b5536718d263d0ae705227a389d3a0e5db2fca320f8ea349a5b612b468645d80d34963f6c5f03cc8dd1ac1e74c9f5f2c8da158e3aedba2f8aeb4aa131e2b20ce259ab0cb0632330ecd89e8624683f7b669061b4de775a47178f576b9a19eeee8887e94b31146d1cc79ec0149374366bc57dd45786582687a85bdc4bffc46aff3c63caf1d6db3458d94251408fabe4dc4d385e5fa102e82c426963b69ec2fe2469d715f1ebb609042d032d33b50954dacf341820702a87e360fb2f411c3b82cc07d0dfe58215e406ad5d7f66358ed9f78cb30a738d01574d8a1eb14f63c0b3111eaf415427ac74f8b8b68fc93df5caeb67ec32d8e4105883e2869c067a52c305353be4f2b4e862d9e60a89fb2a46014d041bd09a0460049099bbedbc290c080a063cf25040c2c43a549220a750c6c70b66286f1bafd95b0b7b41caeb14554f9e8a02a698cb2dadb1183f508ddf9d6fb3083269f1969b3f064bb3aa43ecb99c0127602f902390141849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b8c1d3e8c1eb8bf453bf677cc88089499c627a067b0828f8145b1a1fa56d3416c886f8dc29c7254cdfe8e97e03bd7f7fffa6008bbd628b77d98b397201ae9f5a252b748b5bfe02a654f4f1f9904f6c73536a61eacd016e09cff9dc6f4dba5cb3be29ab2b0e017d31537acd9944de544d6b03768af1443851845043fe36e79e06611d6828e7a5c6f653304a5dee69fdd27a099b0c728730fada01b02c727025310eae5aca00e946d9dca1871117bf328bd20f5f7bc1f5a7e9e3e497e42d3ace8080f91a6423dba6a49f6944b28d8c41eb2a0acc874bd9cf07636081f448771b2f66ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a01170ad236e39c7e34b8a68119a35a0eca91aeeaed22b64c0081efee708cece8da06b5207f70150737b129845a553340d2f98ea2066bf3522d4f258451329e1571602f901f9015e849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b90184d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000980163a563f9d648e1a4ccf691620acef51be250c487ddc38a70a75b14c977a2d4746fff53fff88abee3d2159b14f641b7213978e824c33cd24e99421f98a142e2e9f6dc48afacfdd8cb7719e559a73484a1b4c7f4228aeeb62ce93c6d65b21a698866891ba30af5abbf95d3d9260bd7855e4c709dbbd89aa411cc91ccf95840a26a7da6f0e1172722ee464d05c66168d366100b630a9d7cd0476a6cfac2dd7d0f91aa3f0d43e208a12792ddcafece2badc6ceba90ece955620c9da8009eef264f86865b103c0cf0b5920cf393a4fac1135bb0bf631f329af77a883162d3c97dd4a82199b3530ac248b1c97de1e8cd1c656efab7035d8bca80a6e135c4bcb077797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c080a0386d8be98ed5d98cdc1c8e4007a5d22bcbf66550e520fa309e1030b9e2b2da3da07e4101c847748438885a3d3d1233d64403c7aa5fb0c1977f491140265bedaadb02f90239011e849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bef9226c68acfb69b32f87393453e9a62b37165e87df9740041729ff5d658dd07385a27eb74af451c905108c255a3fbea5ba430d1c898028c18f862a4dd2d97208c96706067d9a7b04ce7142497de5963519f45cf93c9fc25fd27444aefbdf0d9dd0ab522f6aedfcbc363c0e3286f3994b4b479a42c313e1c18da47923608bd8a06ef85f770c00923b222c953cd33151eb48190d06044fd1d00edbb3a835da2699262acbd5671bdafcc4346e225a5eb7e7f6ee95e8f415b78be64ea5425a447a46cb6b274fbc933d5161d201b57d1968b3e548233d94e0918b29e3246db24af10874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a03e6338014436686a4b4751668fe534a468810b6803b4a06dbf8c9fbbb6ce3e30a04d70e94009a5f7307c1099b8090d4ba69bac5f1ca517c0fbe0f4264e4abaeb1202f902190160849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000a632a01f7aca4110af985d1110c5b158f8fcb7c921e3d143e92475029f609c81d18c50f0f9c5c959daa06bd028be0bb7a0bb332062dd0f9c8f4f611b2fa1d16bdad2eef155d5d80d8b962acc037e2743e5c76027144b1f707e2b4ab94e67946c00c23e9299fe55bc6d288074093fa5edec958df0a758b364a52ba1938aae54e8d8b241b25901d96ff75624149cf8483acc852d4680d915625802016f2f8600296181ef6e8b3dd65904baaf7e994510197d8c197f50381e40c345d598f9cc7344e2e80a0dde9daae04d4d607589027b4a67ac9ce47b5db4026e967ccd38695dcec4c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a0ba702b0a92ac0a3fe80ad733bf239150b57c8135cc550a3778cb9dc2509a08c8a0315de20c244085680f3ca860b2d276b5c84b2f9e9deac67cd5212cd59f1ccf3b02f9023b0182010b849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bc2b4f6a4e941b0436a01ba81bf28d112cf8386ed1ab9ab3ca964bfaf53a721feb03b9d74944f5b3dad3ebc9fda919ed203510246a6616b58397305f87d6b8215362ea5b975c3fd8e5a1c4fa997855cd1d8dbb377b9005630b0e8d8971abca64256ff198866201a39698895f89da74571687399f4d0e805cd7475289b2b151386a4f398452c98223a824d143041a0f7d210b0da1200d216838ebfe8383cb2b59c9262acbd5671bdafcc4346e225a5eb7e7f6ee95e8f415b78be64ea5425a447a46cb6b274fbc933d5161d201b57d1968b3e548233d94e0918b29e3246db24af10874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a096c8ba294c3b20de69a51e020313d1aa8240af62c89933239f31248f61bee13ca008531e0347a1b559666d25aa2cd0e0ecfc848de41cf7d915bdb1cc14fd411a4e02f902390138849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b8b853dea75c281dcea2cc230c6cbdecbd98824a39363bdb3ed1835c759d5bbd099031f6b04528465c381b6227794bc4e908cd10ee940bb016b7bcc1e4a3b76b21d6521e32daf5ae7924960a4ffd182371c3d38f508eb32a58613e3c7fbfee374c8bdb39ad6528fdd7ad571f514664b9f2dab9990abed698d88ee18f9799c681232c3f251efba7755180cc77acc723082ba28ae0b8b8ebce06803c6af580fcf3870f42c97662bb0ff7f0ecd7778cc5f0e1d56b8231ca6676e0d6e65f3c9525fbf66c3d0ec215d48641dc722bbc1415fc0d471cb99d87e39fedfb1e15ef24a4b92e09090a4240cef86f6c4a7c1c98ca76ce47f9feec130fb3dae8c9442a3806c505160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a00e18dea71ae0d94685e4dcbe5cb46ec2d3654288d9fd2f56c69594eef69e5f58a07f43e6324cc8843635e034b43a6d8bd8f7d211bc707a05d7b5bd7367cf541aee02f90239011e849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b64563f20d6035f86632ae11ab42f4975d4d17db44e6e4fcbe9d9e252738b3c23338430f7b29410907b75a3c79aac55c5c28b36abbe267691ef4208971881fbb014d3ea2e1a96c637b30cd1ee39102afc6a8f2ee38e4b2cc0d01e14f44c1758507cfd35718e2c586ff9a3fee7bead78c979e84815e678e9f0833f8782898c03fc228b2bb2d8a4db141f93f7373bfeb3ada6c15c980958d2bf5fab8a12e721de77041322757cc78e51497468c963c098e84ae8e960e15f8345672c6d16f97f9cad7d510e7bd2727bec626207f433b8ee3cb02172d7e3d463087da63edb1aa1e51173bb78fd28784e7e398a646408ffcbe410c7c3aa32abe694422aa038737ab8e1960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0fb72db308d2d3ca53ae6d2e451f48ec0b15ed1dc3a1c2c1f68604ad59ba599a4a0679d4a59cb92e99375ff3c7b3af14deea0f2dcbd725efe36b3f6f89004771bd802f902190167849502f9008505598d0c648308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000a3e11e16283f5f9fd0b8df967931363efa6321acde24be4a52a9c1ead5cca4cfa3267a88a754e7c7dff4b7f7d2c1d9a64ceee4c781888f4c5d56ed96c5d9d3755d688cbe37e98361e903ea9470e44477b88189da0f86c8f48df88f722df1699afb63095b985d501ad553dd01dac2096522e38b834a23b0d6aa7565dfd02e9aa4c16d440f5ac5df0a14514f2cf5bd0ff41a17eaf0518b6d90f3b3a564e1d82c3aca448110f2091d7638e548db2fb79d2a7a3856581d873bbacda0c0517f9390bb1edbf73ba52d75dafc0dba73303b8cc428ca1329021edc3675720c12464219f17ffaed7b71e17931c50b3e99ec666f5a3bc118c101dc25a2118045b917db2f8ef46007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c080a032f6c3c033b7901bc58a9e39f18dac220090ed2cad9702f1218b704df007ea45a00250f1477542d44babb037b7de1816244f0b74a99ab41282385e9c94e52c16e202f902390109849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b422f0f67b1941909ea2b9e6d7f55a3e8c84ddebe05c4b2eb10e325a5f3f29140e3058cda09d4654bea424891deaa42aeb3658df3043ace4436b4424864de9193515b39bb6f3dbe4591751f072fb568ce7f686b0bf3739fab2a73a5d2258beb0f8c476bb69dcbe9a4af4cd65265b53a8a1c5df99123653e583ae510e81341fefcad79cc52adfec7aa2fea4409beed20fb4253bb683a7cf772aa821178dd4e4b64a27e7fe6f5688081767d606e6a17c36d1a8271eab6b7fc2ef47c2306091e40a611686f88b4b1efb63f3d02102a0bd8872ab8be325fef22fb6b35d9cf8ea8d81a6ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0e001f2abf794fd823b6a64ab5729dd28029b6e833eada8b40a0f5fe397c7a413a06025c034c3a485302602ab9d571f8796f54c759be241c47a8fd1d48f5bc17a1b02f9023a0113849502f9008505584dc0fc830e557e94f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b88021f90b07aa28000b901c4666c00aa000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000094faeb91a513f418161f43a1b41bb72f9706bbcd57fa4d592828c0eabcde5e349676b4ff882887c4420b3f334d279e718b62e4eecde831686ff2b85d4079fca90156269608ac8c9801c6276132dc79e7459f44ac599021990c2b51c034e02511b9a391c1b6622c3c04974261af13eaa64e458eb496069baba7e01f721f11332900db90eaf1bbb910e4ba48fd7f57af014f9c40d87a622629259ac5467f43a4998a0d041a10b45c160b9c53664871c7b10a02f343590dc45fe288099d4fca2d3678dcfbe23f194f8364b9e34635ecd5b474ed7836af67dd6cc5a39dc59202e3aacf5059536d5011814734520e7d1edbeee0f311d266f22cc441348ccee2ab53f080442ba98e5c54c5b145def40caf6cd211fc48522e8496808d921817efe59c2ddc001a0300434a93ca5c057cc54740a0e7f448e673fe7e93f936d3f42141083933c53bba07953419109feb2869d1e131f9a29251f889542724bbe657fd4cd45900d54b4f902f90239014a849502f9008505598d0c648308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000be4354598d645950bf6c63fc0373b02841d214e092ff05d592577bb4e23c4c9484fe8e5eea18c084228f6fd61334ae61f0198944c267f57a1303ff796a88d51c71dbe194217f3123519b79844a57567bef96f842a6cd8e42f05ce9524f2b2ff306ff849b380ebca34b170fa318f2b395b3c270eb9d9a44bba9dc4619411563d354d0c10637c2bd2e82fa5c75b6df67103185e90efa5109adda52320fc643f5bbb340bd88da4d4ad7f678cdfacab3a163c4495be773b266761f072b5775a65c45e6cb6b274fbc933d5161d201b57d1968b3e548233d94e0918b29e3246db24af10874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a08604a5bc03df8d223f942159f9a46c623706a6a00aaaa64f8740fc4fd4cdc31fa032468176fc3184bc843103cf8ca2f423cdeede7c8c2290d457ca1d54da284b7302f902390158849502f9008505598d0c648308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b259e81e4e86ff30d27e4fca7f0fa26ecb3f60bb4e84ce1361e8df9a79cb76bbacf55d08484af822b9b13848571d27d615cd7ef5037b95394921ffb1358a730ec484774ff0d8e87f19daa4e351b6e2bbfc88e6c68633249b16ef63a48f262ba5d54101d8dd11bae84ddf0839e76b76987033ee3fc08b57f2a13e7358d9b5e48edd2608d423a97ead4a46eb0ad82db90b76ece811cd6ef62d5395c941d88881a33368fce61468f4a0753b448f8ce82ddcdf2fd716375a1fb922d174d111abd5fc40f91a6423dba6a49f6944b28d8c41eb2a0acc874bd9cf07636081f448771b2f66ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a003fd3dfaefc712046b00c564d74f6185c54abaeb786332466cc6bc9024839735a0407dd57c84462f70e23ba3ef69b089eb9b4627246b7342d86f8040a423d6978c02f902190170849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000a3ecae468c94e6be0bacc16f7d5984f25924b9a2d0abea2cc300f7eb744c7b394106e9e3d586a4a880e27dbf6a71074131ef244df4e544d424d77d4cb2535ee35923ecbf9ead1801b891c8a634852c5ec2ab3b4e6e4b7d1cd7a1c9e4722154d039dfa13d077a86e730de6b5eeec072cc8791d5c39427b1ee4e804f1472d6439256985f69b584dbfd1dd4767d34614eb1fe52f627bd020b850ab86a00c7d95e0a8181ef6e8b3dd65904baaf7e994510197d8c197f50381e40c345d598f9cc7344e2e80a0dde9daae04d4d607589027b4a67ac9ce47b5db4026e967ccd38695dcec4c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c080a0c88b0e4e2bad08ba9d88487ae4d470e66ec6039ffbb7ba6344d19699504e7561a0148638279b9510e51307faae40fdf4a5f4c7a41e26ad4a6afab4e59977dc630702f90239015f849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bcd9ff59d96ea9cb4c7b360d162b9cd49dedd2f30abb7d166fb01d732207fe8cff62e41169f954e875f18e4061ecf546b3ada32def88ecb0c50effb3cddbe98b2b9cfee1bfe1fd33ea4b6e0da93c059e23e4a85c6fbc9cece1c16184f090f4537a959c361794f5a19aa16f439e5da49cec5cdb477cbef871e7f4008c9c369841aca2fac698de5d0738c7811fd3124aebc22e52367beb65d084d958cc9cb9eba252ac86ebd96f96f7eab60daea44eba5a2a551436123ec6af1858d6c9c9e421d98b7db17258af1aa1618665bc04645d5b0c2a5bc6c16c7285a4c645fc0aa5dd222e09090a4240cef86f6c4a7c1c98ca76ce47f9feec130fb3dae8c9442a3806c505160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0560b7e01c6a6b46ddc68b8e73db2f0c8e7b4d8957d4df6171b47fc2649ac11b9a002a997f48c3c78f65f015dccd6472110c88e7b487451ed56aaf0b75a97289bf502f9023a01818e849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bfac421bc3130c7fe9c8c33eab3d0f65395a12d2f5dce6043ea2964468ec33e63e8c7585e43abf49d73247799cdb2af950d77bb044a7660ee30bfe3a227ffcd8dab138c9d5b45c6ece7a9a2ffc869ca8d60dd94ea9dec8b19c668a98875b772105bcda5c06911aeb660abba38421c0ff56a426281f61fb53eb000f147f02d9a151c343328645fb4d3145cc0675f6c7e3777aec91f5b1c338fe6fa0795041809d780eccfd9aa7d59e9987455ceee93c47cf30839c9e2242c79288ac5e451a96d567710182dc92f72c8293f10e2a921bc970f5bbc836aa36868eac1ddc8a894c17b9ac0c400cb6837182b745f26fac1e87ff6eded4c92365a9807bb2150ecbc7121960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a0df5f22de3a9ead188267fd8ee5d1dbc947c790aaf9cc2c63d53e5ac452067a9fa00f81cea80752b64b4d9d0e7b3fb4b6f46d2d14b2b9152de9c4f5f92b1a6d2e3c02f902390133849502f9008505598d0c648308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b1af8ca38ba541fb0427eb306db622f0b04701fdd9da4ae87722b15123dde084a6c80b72f7fac6ef27baca35481bb77740c893e5ae83d6827614bcd2c1b949712f8a7476b2279bf091e119d53faad7aa724d6994f74495da46dd5825c59727fd20d56bbc4d044d270179d1803b85650d8d5fa97d9ca10a80270ce6f2db93219aad57d1c816b23d9e33e8a635ab9df8a95f1c394a9e44b4dad0095db5096d1639b2ac86ebd96f96f7eab60daea44eba5a2a551436123ec6af1858d6c9c9e421d98b7db17258af1aa1618665bc04645d5b0c2a5bc6c16c7285a4c645fc0aa5dd222e09090a4240cef86f6c4a7c1c98ca76ce47f9feec130fb3dae8c9442a3806c505160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a07716794d74eee6ba6553939624596f8cd3e39ddd3fa4f15dfdfd1eedc4c29ea7a029ec2cf3aeba1fb828c3870ce9172ec2955d7e9467a7aba64fbe91528277caf502f89201820327849502f9008505598d0c648301241394c1eb766366e3c0916eca6b6160a372c742971cd480a4755edd17000000000000000000000000ed5c00132ae1ec02a4efae1b4dd8fb84e969e347c080a06ab8edb04b3f5424c8eb9769985622178e9b3b1a5a1edbe2579136396febf4bfa03285ae1622b1369010c209c4984a0ee369509eea58d39be288dfa5c98edb5b8502f902790146849502f9008505584dc0fc8305405b94f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b873c6568f12e8000b90204666c00aa0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000ba09eb53fe4a4d546b0f877bafc40acdecc5e42351a81edd95ebf6f52ce6e392c8bbc00dd4dd9cf53a31a6fd7c8393224e2e29e04a0b1b990120c4a64236eecab94ac6002d224952cd98a693f73b8dfe9c397ee3956485c32a751bd32538c05c0992b37cce9d1cfc962441ad7c330e89fd0a4b8d30d5de12b30ff16653661cdd8f47d4d2983175b9545d55cd7942df7234050884b3c91b4b1a3ea473f8a5a08dff3a4e6c30dee90ccb3d334795ce9bc9ee201691020e87c7a25e3177a40d4089bc345577c6cb0a8cd1251c96094cbc4337e4a67344fa6993838e43abc3f60586e1aff4e550b5383b62ece606f193a21eb2ea67c3d75bb0768790978af2b6773f0f834887345df96f4d60b5070d09e1a56b26f122a3ae81b1c3c2a06b6a29f64ff580156337097fc727277780dfea1f31e0683a283836f3d0b2f2f7411b3140ac82c305353be4f2b4e862d9e60a89fb2a46014d041bd09a0460049099bbedbc290c001a054b89e51ca65462c1bafb2ae64b1faca06ef4640510781f1f9fd8518f97e3e24a013eaac50a8a9dbb8ef34131df297e19da4d6734313daa68023c7656b6199e26f02f902390123849502f9008505598d0c648304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bb47903ecdbfb9c2379fca96ebaf5d1924edfde83c422d90e43e5d836d79e85380f947e2920086274197649c1092b26a22c06af8b85f5e79b93f5752e77e1eb7ced833eb1d944639dac5d6576acac9527fd5e50b3be3a5f0bdffac713d622b81ca8c9ce932c51b7569e70fad2b348efbfded890dfaf8a8b43f9ef9780c40e47316f8ee6aca360d4cc0b79651c7c4294544080e7c14c28b91bdf155de019bdec5663e97180e1ab3461725b593d992b80fe8cdbc1939562c814a2ca5d174874eb00286b4cf4b050097d14205264d5de8efa790b96fbc3b6ba991b67d2becd1cb8d3874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0c800a7af45c6f3811235299a770e407088f1146d89e81e7aca762f857b7fc09ea04e77c20303f3501167f173e3d577b495bb194a49169de73a4f3f330ad8583ff802f9021b01820267849502f9008505598d0c648308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000a870cb2539958325415d2d00e75ae260ccdcf25a05512fe371d96e687dd6f6fce5b9b7de407d3dadb41f5c6c3de92244961cc0f322bd71456c76d643876f6c1d2a33837566fb054d78a3870d660451d5a9c6bf3b841e52769087e1104a9587f0392c41e6f679c59d57c7581eef926e696483ec17096750a2c3afa8ab7710596637d5e5d9de9a0bc0b6593d1b691e70105216de2cbc5e8dae6e4bc358c4c0c6951cf2c918cfd27b5174f472afde4336532960910721cd7680ab2a8634341ec8cd62e80a0dde9daae04d4d607589027b4a67ac9ce47b5db4026e967ccd38695dcec4c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c080a05896498421d289d051b7740013e122c2b0db1e4f365dca0232fef07aa3ee0c1ba06d88b20b1ac4720ad30cf935b399982c96d8921baad8da360c953d52ec7c6af102f902790115849502f9008505584dc0fc8307940894f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b87b5303ad38b8000b90204666c00aa0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000b87f28d450469ca40d68b214a8107ed9b3edd18d115298001ada5baf1ea0a296f7e951212cb74c893b0ff0091745eb8a73127077fe4de80d9c4fedd01653dba00e796087c9034e87201e8b11ba62a935b6417af09120707df2a72486c49e1aaec8e976b37a00984b38ad93aa0f8e3d5d4887415ae96a95a544acfac9077b262e3e1a5af6305d5ace328c07ff321f9b2f7788e0633950c49099f4828f06a005a984e5367de50b41d4429acb810b3d1a3aca860c1b8bffce92eb491adbccd8d96238e6a5ca93fe49390189db8a219f9cf7a18df3aa68c0f80ac67a4a6a1d2d7fe72ac29f282a4a0a93f7e72ebbefd663d620e6691c6b6b9917b89722b1a8cdc4226b82cc07d0dfe58215e406ad5d7f66358ed9f78cb30a738d01574d8a1eb14f63c0b3111eaf415427ac74f8b8b68fc93df5caeb67ec32d8e4105883e2869c067a52c305353be4f2b4e862d9e60a89fb2a46014d041bd09a0460049099bbedbc290c080a0d4faf69b48527bdeac694d47b7107565b04aa9e74ae4363025e3c1757d96df8ba0308ae86b7d46e3796985d907afd25e4ac2162736e3c4bda7e844f56d95288f9202f9023a018199849502f9008505598d0c648308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b2828f7c9fd939b43c663236267fe6783d7bd5730ded7608d1a52acf9df083e6f8d400102777d6ca3e525d68cdd475f80393d932e84eaab89632565a1fdfa7ad48f06948d9a3cd6c79ac5e38b4610a9a17c0662f02223defe332a483155a68f0baf135b77dee36d2d1080cc1fbb2750151cb9c68c226f7a142efbd04241071b55a443a479748450adaee230a98fae2a7f7e83ae41978a4a06a7e255ba23e5c90663e97180e1ab3461725b593d992b80fe8cdbc1939562c814a2ca5d174874eb00286b4cf4b050097d14205264d5de8efa790b96fbc3b6ba991b67d2becd1cb8d3874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a068385999581b69256bd531cef665e98fd47c9298884471edca9fd711a812f9e9a067c0fd0c8315a541df6f194538eab93243b4ebdf8425e2facbd44acd794107a602f90239017d849502f9008505598d0c648308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bbcbf6577a0ad813b1dda2ef03abe024cc5ef43157a51e3e1ec05041db63acfa8d6a068bc71fd59a0cc303c5361a41df57cd13288543fdcef1a77afeb8efb3b8e43d9140cb52d57bbdca10e423ec665012fd616959311e3f04e7db9acff9a9677a339276aa6533a11ba146862d6531fc5eabf801db09512a75f91a040303b5ec89ec43392ff7d810d670be3bc309602b40ba4f105c85f591fc1e59193a6595ea88f4dac38243fcd265b82a58d8be3b0802d037e437f0b91942e1337467b6d2f2f11686f88b4b1efb63f3d02102a0bd8872ab8be325fef22fb6b35d9cf8ea8d81a6ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a028699f5393b159a057fe050280fcb59ebc13887280b78128e9bb020755d3d64ba0369139a0294796ae56c241c8d1a48655e61514c9c8cec15e4b58bb5d25978eda02f901fa01818c849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b90184d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000996ed02fa98825bc71a294397b5540893321cd914757817689cf750555985a03a78d4213308735bfc5cedc3dbe9ff4f8152438f79c153ec156fffa56403d9b348a6120707845473c21df36c848a6a1093c83e798b52b8d8fab8f21dade91f642d698de7bc58de4b95086202361c53dbf05d3d523768f78dac4f6ee41ad9da78dff94d209e5c9915122dedd3d314363cdd949dd6b4ed2a6eaa201ec16f081bbaad718652425e447279755764eaaf850248e059ef4df20260a4cfbfa72ca14c8fc9f6be2e818d23222100f5a2543c3e0133f0dcf5dc686d4918328b86adeb6d38594a82199b3530ac248b1c97de1e8cd1c656efab7035d8bca80a6e135c4bcb077797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c080a0e1a92a4d813816fac36a60e7f1bc4b182070049d20cccfe52fbecbab91294feaa018877cf51e75bed52b242ad79d7f234413f47eeceef8c17eb9528c11674670fb02f901f90106849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b90184d2cab056000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000009cdd9221a6aee417e5aaf62d27c1f7334392a8e9ffd6b44404f2c5863524d706ddfe4989c3323bf53d08ac7450d979fc0a92f54dcbe3cdd51df8c6f66359bd94c0d3669e0708e9542665e4396247ba04d243c81b4479355ce2df33888c9e0b50504b5e992379c4d3ee343745728e612df3e9676a94c905a59be7b1f3beba3241d2fae1249bd417a9e9c0f5c8c8b276c9574b7637e276eaa4c8b23d4cba87debdd718652425e447279755764eaaf850248e059ef4df20260a4cfbfa72ca14c8fc9f6be2e818d23222100f5a2543c3e0133f0dcf5dc686d4918328b86adeb6d38594a82199b3530ac248b1c97de1e8cd1c656efab7035d8bca80a6e135c4bcb077797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c080a0dc3dd58a4c48357a6a13df8609b5b9bcf483e0065a7b085e3545daadf9878802a025ba78f1bd74bf057acb403721d552c4b8717ee637a42c6ba88ed2b384ef020902f8720182014f849502f9008505584dc0fc8307639994f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b8084118aa48bc001a035951660be9d05c721d1c7fb207885a9fb4919b89988b7c26e4046a6cbafa53aa01cacb266ad889dd48d1d96cd975212be94e78fd3e27514b5e34534e1bddf01a402f9023b0182013f849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b2dabbc492dd7b3ab54579785a50425fa520aabe52876de05f02e245d907717519cc9fef179ac2697fb16da35800f6e3c2d1e694adbf0ab496b1ee7ef9f41fed3d1d2e3656c8917a08f03cd7fc624737d71ef72e1a9c878f6d2b8c14120dd01db1b7b68d483e594b2b049a510108e8384fca4a98b0c9a65d353db714f1b624b53d2608d423a97ead4a46eb0ad82db90b76ece811cd6ef62d5395c941d88881a33368fce61468f4a0753b448f8ce82ddcdf2fd716375a1fb922d174d111abd5fc40f91a6423dba6a49f6944b28d8c41eb2a0acc874bd9cf07636081f448771b2f66ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0e141f6cfb4bdc4b6a6baa937a08db6154a0db1793bfa431b39254fd1cf7dfb46a01ad32a03bd09e0a31430773e16b19a00b3e1d65d35fe14aef7ef32ae1bfbada702f90239016c849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bafb20f5c1395a42f8d3143cbeef9c3e19e40efaa68f06513d63773e3f123db231b730f2eafafd79af940ee7bcce5c87a49c3b7f84aee2602d39ab50477116731b7b14df794dcbd96f536f2b970303a27eb5ba2f35971c3e746fc8acaa750b4a4610a1669643bd11695c4fca06549f7317186c0d289ae01671ba762c9fe5a2b179ec43392ff7d810d670be3bc309602b40ba4f105c85f591fc1e59193a6595ea88f4dac38243fcd265b82a58d8be3b0802d037e437f0b91942e1337467b6d2f2f11686f88b4b1efb63f3d02102a0bd8872ab8be325fef22fb6b35d9cf8ea8d81a6ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a0e1d90897bdafd53bfeedee54db0a06b92249f971c5cb3e43b1339350a97d2cbea070f825b54920d4d228a70c4d66b54d7b918439aa06cf86a64bfc524caffd162402f9023a0181ad849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b5ee211edd36beb0ed1627ab99989bd9654d3d854f7f9473e56c2a94e8aa5c116c21bcb4f9ecbd8391fd7a9118ce480ddaad6d67e15f7e14884072844c88fe9d609671924bd7d9303eb8d90875a726aa2335f1a0f5100e828c9fbd284ee480fe39ba171c6b404809e29f17bbded432e8fa50ed36a8081333b1061d9de19be10aeb2f94beea3333b68d58665b81219090d402da83803917a75e14745a56542d87e041322757cc78e51497468c963c098e84ae8e960e15f8345672c6d16f97f9cad7d510e7bd2727bec626207f433b8ee3cb02172d7e3d463087da63edb1aa1e51173bb78fd28784e7e398a646408ffcbe410c7c3aa32abe694422aa038737ab8e1960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a0f6ced6e990458ba89c88059ee542f270ebfa721595180bcc47f778a78a20def6a006022a2268cae4133cdaf8ae64bd8230701cd9e24851fec19b329e934afd685002f870012e849502f9008504e63b75708307741094f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b8084118aa48bc001a019f2ec1a1471a52e79438163b026c545cb0a933823023893c5f7828b89e02546a05123043e9f4470e5635e095b9f3a23112a6e5db01f82a4141244db4d04f7c2bd02f9023a0181a8849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b3fe2b1c261b23c6db8d041dce4a6054c03d31f7f372d903397f62ed35d17132c9cc9fef179ac2697fb16da35800f6e3c2d1e694adbf0ab496b1ee7ef9f41fed3d1d2e3656c8917a08f03cd7fc624737d71ef72e1a9c878f6d2b8c14120dd01db1b7b68d483e594b2b049a510108e8384fca4a98b0c9a65d353db714f1b624b53d2608d423a97ead4a46eb0ad82db90b76ece811cd6ef62d5395c941d88881a33368fce61468f4a0753b448f8ce82ddcdf2fd716375a1fb922d174d111abd5fc40f91a6423dba6a49f6944b28d8c41eb2a0acc874bd9cf07636081f448771b2f66ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a084b33cd09962caca06ed38a42ee5c2231fc4e1b89647770ebac1ccdeb7189a13a07510d86e07061fe1f5813bafa0064f53cb0d489fd2421cfcd724a526b804b39c02f901f9011f849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b90184d2cab056000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000009559dad900cc3ad4d01e7f191095e1ce31b2956edf367fae9f54995c596e4a5ab8bee5cb9a81d63d0d7c34a1d93554ef5401aad078404b80cc823916ba299c3e5fd536448de02a1fee53b81a5318442e9f21ed98a1618941e4ae9a5136a70d0e33df3d1bbaeba880bb2bc076be02a1f7fa95773e25bad6614a5e98eab106fe498d33d04bcfa9df33b6b428b996bb338b17cf32cca2a3da2fc4b93b0db7860ddffb4e8f769bd74b61d12bab3416ebc55d41ddd709a109e5844c90d0b781d913615f86865b103c0cf0b5920cf393a4fac1135bb0bf631f329af77a883162d3c97dd4a82199b3530ac248b1c97de1e8cd1c656efab7035d8bca80a6e135c4bcb077797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a024bcf783994f58587c1df76ce7f6b21a83d1329735f69e26dde59802d6ea3949a068581d4abafccc2da42c05c67694be27d532b06171cbeadc13b04936c46dbc0802f9023b018201f0849502f9008505598d0c648308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bedea750191b0cfad53b645cc98ac996d3208b576fb528db3d7f62b214850c758167bce595700e5b2c974044ca0255281e8d7a4b169b44bf5146cb6b6bd38bc89d9f8497304a240064cbc015bd4e60ef746c9d8c811123971b71fad55fa7ac59a22c21be9e8ec837d6b51c0241688250fdcb57bdd0a6c733f0d3e562e841affcfb2f94beea3333b68d58665b81219090d402da83803917a75e14745a56542d87e041322757cc78e51497468c963c098e84ae8e960e15f8345672c6d16f97f9cad7d510e7bd2727bec626207f433b8ee3cb02172d7e3d463087da63edb1aa1e51173bb78fd28784e7e398a646408ffcbe410c7c3aa32abe694422aa038737ab8e1960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a03beb94d8969544fc794fd1601257a17016f743f0c454fab409aafa7ab9ce9841a06b2ab554d706bb7d96cc410cad6402d76160279013fb45086a3e02f9566fd6a102f901990133849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b90124d2cab056000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000006877f8605a46ebe8a5b2e643a1dc49c6621647363a64caadf4e8c4a9d2c03f097f94d209e5c9915122dedd3d314363cdd949dd6b4ed2a6eaa201ec16f081bbaad718652425e447279755764eaaf850248e059ef4df20260a4cfbfa72ca14c8fc9f6be2e818d23222100f5a2543c3e0133f0dcf5dc686d4918328b86adeb6d38594a82199b3530ac248b1c97de1e8cd1c656efab7035d8bca80a6e135c4bcb077797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a0fd8bcd687dbbc437d0e95b7b62909cd2613c43823425200f1e6989252fccbfc6a016ebaf9558a6ac3cd7707c32352668dcb5cfc7abdab163fbf12650e8974a266002f9023a0181bd849502f9008505598d0c648308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b28c2441231191a521621fae6b54b06211b381f4f11ca019e2c3c0814929ed78fa9eeaf286a1b3c42854a642ed1b42d38f6d5c47d5e3ca5d9d1a83eef4edc5d01562f0398d9622d9871ca546c9bc04fd3e0284fbf1d3e94e23b25d33908a3763b22c21be9e8ec837d6b51c0241688250fdcb57bdd0a6c733f0d3e562e841affcfb2f94beea3333b68d58665b81219090d402da83803917a75e14745a56542d87e041322757cc78e51497468c963c098e84ae8e960e15f8345672c6d16f97f9cad7d510e7bd2727bec626207f433b8ee3cb02172d7e3d463087da63edb1aa1e51173bb78fd28784e7e398a646408ffcbe410c7c3aa32abe694422aa038737ab8e1960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a0e9aece63cc4761ff65a3409e186d8aadd16d9242706cca6095a8c7aa42ea4257a053e1f08d68493d93e7c6b7b5a535155a62b676097127b8b98754cabbec6aae7202f9023a015e849502f9008505584dc0fc83261b1194f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b880713e24c43730000b901c4666c00aa0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000009f1bfd2e804e37b157fec5f2beb37277786a21e94fba1e1ae209cae9b285a78ef9cb91b54428bb28230c788dc6428c9d09124390fbb232076c46049386ae81a43172bda82613d8daf5b7b091d0949f9e2c58e9b189343484e395b4dbe8e9deb74a26807b00bfe996ea4517a9b7d1cbbb82d23f99487d7a362b5dda38fc611fb589418cf0aa125bb90b1b6e29a65436f8c87fe17f014c8f38fc1f8ff16609b834ea0d041a10b45c160b9c53664871c7b10a02f343590dc45fe288099d4fca2d3678dcfbe23f194f8364b9e34635ecd5b474ed7836af67dd6cc5a39dc59202e3aacf5059536d5011814734520e7d1edbeee0f311d266f22cc441348ccee2ab53f080442ba98e5c54c5b145def40caf6cd211fc48522e8496808d921817efe59c2ddc001a0363aa457d18ffaeed29b608301e59ec2a25a763bf78ea07361ebaf355d3eee33a01e6765b34d8c09ab4979df081c92090b726c25083c08e48a4039571e8d1b9e9902f90219011e849502f9008505598d0c648308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901a4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000ae085cdf73d637ea3f73699d9bdbf81fc9dc55f10d8c5e09eb811f5ffe9bd455fa060fbe0f5f49580bd235aa4c61a66ef8dafd2f661320122b5bc83dfcab63a725df56dec48daab045ac60c8897583b114d20f0705a155f40935e7628d2a66cf60d14b0fb6ba649284d60ca4d5fd018f80ac8d8bc3108362d3522aa5768049b6bf0c2dafa306040fa2e8a7543fb78dc27bef039464182c5d4ef0f4932f43d03826c6b4d327b382242e1b3d577ebb1f93bd3d4012fd971b348f0128322b993929904d2d3a1c94affb7c27d59d54a29768078ad34fe118e46fde6b86664dc8fe3a84c8ab6db31ab033d06beea33dd8498047eb5289f8070b67317d0ab84be9cd2e146007c73e4c61c23a0372aa51067443d1dd9c56727c361acc833cff4e81be4d797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a09091ababe9d0f3c9667ac9faf36b4a10fcb88dec054002539eb8afaf2005c15ea04fbc7ca665c123a9065dd3618c5bc19d54250c7fe354019b749f057e640b4ce702f8700113849502f9008505584dc0fc8306ff6794f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b8084118aa48bc001a02830be0700c4eb240dcf8aacfb2d49ad5c6cfa525bb336b9b369aa2292975839a028789b6be45c3a514b3a0461dcc298b2871102e3eaedec9f0b2780af5c8f091402f90239010c849502f9008505584dc0fc830536ef94f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b873c6568f12e8000b901c4666c00aa00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000009d978ba13bebdd0420e808733923129d2e9197f382a25e3e61e17f1e1e46f2334de30cd3388d1a773d9f5fb4b333ce98a39458e4bfdfe81ee6bc9ff9d3bbf42f35787958f37f09e017ae48de338e391323eb3817a09fcd2f084d782df532ef768ae58c0659cecf668d4c1616de550c04e7dface8b951552580a4b77f0c3e70a87b742dd0b61e086f3e5aef31170384d31ccc1ca8fb75f2913a6abf4f697440622293fc46b92e942d8a1028494f50020d102f9dcc6bb705418fd2e8988337c3fddebd0bc0b8740df88074b3a73716023fc3e52ac02392e091a51f18872732a9c2d47af97052ed8026631afaf913ba92ead0f95804b5afbda8ac5f3e10f5d27fb240442ba98e5c54c5b145def40caf6cd211fc48522e8496808d921817efe59c2ddc080a04f7accfa0658d70c7bfc91e2f6f51387929e70f90802e49f762e15afcc0384eda0690f1971a83e3c7b456cbe62f9cab8dbab58a6fad6e1199b834233555f8eabcf02f8700108849502f9008505584dc0fc8305201494f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b8084118aa48bc001a023de1824fa281e2bacdea39955e4addeaa6525d9b9cf995358c65fe00996bc89a04d28b03452ce2f2a00c59cc68b7267fa6d67341a6be08a86292ced61c6e9d38a02f9023b01820107849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b03550d7e417ebc11f274e7ba4d9a1df4351fb264416ae7a230103bcde0396cf4185b06f0c0178b1756f08470c4f1e29b4d692383a7dd5d508ccd24aaa2dc4b17ba3d9ee7ad6b914ef0c6ce0fd3f76ea8f318b169143e0d2aa47d2ec5e9c3c3db1cda86aca26bbb0c88b68d60cc2dbfeee0ee5e5568e40c2485ca4c0979f0a798361792b864ec7193e05e2902dbb5c1a1dc97b11e6ba2425cd85fc70bfef0ed26340bd88da4d4ad7f678cdfacab3a163c4495be773b266761f072b5775a65c45e6cb6b274fbc933d5161d201b57d1968b3e548233d94e0918b29e3246db24af10874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a07cf5f24567f7d7f2adb3b3468ac43dc3c4b1ae1e82dc966eb0c98737f181f49ca05b07fe8da9b9a78f5fc5f0c7a3a27d9ae2156a707049cf64d4f6b0dd6505c16002f9023a0181de849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bc1731cdaa718634fa805240267f2c7440a5eeec4bbba1ae2f3b8291c67e5ddc124e93441c761cf8775c4802619ceb01d51d8d635c866447cab66fa5127880a253dcbe248a07bc8251b951cd4b3b6faa63cab80808d456c6b71ff7db1a87af0433ac73431e9c2315f4a9e75c67b1d4a5214ae452ef8227b0c7cf5364e6747ac4fad79cc52adfec7aa2fea4409beed20fb4253bb683a7cf772aa821178dd4e4b64a27e7fe6f5688081767d606e6a17c36d1a8271eab6b7fc2ef47c2306091e40a611686f88b4b1efb63f3d02102a0bd8872ab8be325fef22fb6b35d9cf8ea8d81a6ed352b50d8fdd21d343f34aa3f2d0fcff433113ec491b8bb01dffc7c66ed40c36eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a03624d5079f9fac64143cbf62369da65a7402eae7995e79234ed0cb71a86ed78ba027cad9bc4b671c5d439d0f6e115d98535c34416698d65ac2b167d4cbc0b904ad02f90239014c849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bdfe2b215ed990fd933ff40aa6b470585f712e79735a97ef89b9c8ecf2415e27c2aa4f180d6dc6ed3c2ed9e30994fa6bca520f657ada9277d5887af4e541f56e964da476bb44d716ad470b26b162a0e8bd5712c18b4252332643b04aeaecf01b95fdf04e0e639d8cd931bced7b63973c735f3660262ab7b674b3670c146bafc32a9fc4c9d4861258f42a4bedb4d17e6a531f66b6a3818b42c08d1a89d1b58222002dc47c65737484bd364c502208fde3574be9e39776789488a862806ea233907b7db17258af1aa1618665bc04645d5b0c2a5bc6c16c7285a4c645fc0aa5dd222e09090a4240cef86f6c4a7c1c98ca76ce47f9feec130fb3dae8c9442a3806c505160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a033d7f0c877c912f7edac349de00d14e03233fd9a21d2d5c5bd4ddb421ada9bd6a055fa14e1fd2f39023eae7367a9ec771a3bdb1a1ae6d1b2e99525b5d79e78d24c02f9023a018195849502f9008505598d0c648308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bab7e481a825814e1d99b6d0bd7692187dab1917ae407c716b4638f4d4e562738840be20d182aa21dfd006d26115b64992ee21d17170be77ffea4f0da024e64f93f995517d9918c997427f568372b554c650a64996ca538fd6ae05a40e3b8db54c0c7dc6e84432959d8e88803edc5529acc63827aa479a7b01237ae1fc8084425f64ad079f9b914d3b18d1ccbf9d36bc01b0aa22b1fc63b73832d5c38bba2acd65549830ec99dcc2b6df63619d785251c172ebed1665f42fef02ebf9f4a7b13d926f0f301aede479d9f73194da3e2012a164881fb2915f20b57a4dd7e29f06928367afe706187da4fe6bf16d387642c381960f3653635af3e01da5990a7715edafc56676746ed65e7e0cb2e85384b58a35f638c6fefa696d8c6ee519b7fcaaa602c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a09d39e25ae8fe84578b32809cf3c1925f92292147d95d3676a2d5b466d8530de3a05bc65c01d1dc7881d77bc81ca6ad3d9a075eecb8a3b1dcb3e643fb076d677eba02f9023a0181ad849502f9008505584dc0fc8308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b4e03a29942868b08ab540dac1c97e5de3281dd97e72f236cdc6ad209fbd0341bdb0b46751528d1967c5f486d1f5b4fe2da99316bcf738158c3fd8835473af580cb5980f67eb63cb64d3144878c512fe3214732c45dc44006369691eca9adffda3a90319b36295833e302342943d4433a8373940bf96835db19a51a35071046dbe0b04471e38d438279becf0fc1aab239877a95f1629f992c2dc289eac8dc4760a5bcf21393057e42dff5aa97a7f8618fa048885409eee239831ea4875d50215eb3d774843f2f704d4b9b833718e9a22df645e4ff2dae134c84af306237fba47b5d2432a83ea939195e27b727120240cc6fa83ed6cf36a74616a2e8b2e72d5f4736eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a00bd9c1947f7649852eaf4032b0afbe77da99cf0dc6c18957a72eab83845f249fa02a419c78bade4eb153716c44d95f5de13025e801418d3447abdc13d48d494ca402f90279011a849502f9008505584dc0fc8308b5d094f776e9a9642dbeb9ee7de20c25224e4a0bfcc03b87f195a3c4ba0000b90204666c00aa0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000bb3f22ad8ea9e70b75e62c8c7adc4b3aa1c542e94606d7d02c75da23272fb5ecc82dd8cac11e3d7bc1d01e1640731e11b861a49146ab9cb959da31e932927e3fd5f93669e431fa3c499b9b5c25e2f72e6ad64574690362a7ee400fdbf00d0f846205af47fcd96dc251a27724191a73b720f71687f53dd2bb8a9dc98f8c370b56147260205584b26bcdf5dd2d486db74ab07f8c095d5106db78bede89a7b4acb6124f2c26fb0812e410cbe97f2a38f9ccdf5ab072620536597f08b6359d4eb25200e6c4248e722c42676cad5ee5a7c6bbcb02e787ea071ed40a7fa79765f9586ad39a965869d76c3a71d2c74451314f38fe0dfbaf382298368bccd25fec916d3c2a9926dfba95a258d32f0f4201888a0c5a15dc50be540b6d7a523548b99777c040b3111eaf415427ac74f8b8b68fc93df5caeb67ec32d8e4105883e2869c067a52c305353be4f2b4e862d9e60a89fb2a46014d041bd09a0460049099bbedbc290c001a0c08adc09c4b97ffb7e6a118baffaf79ceee1af1795fe5c1ae2c039c57d04436ea008cbe599f7a2d07927394f448114dc63aeba9e7aae59b9deb30dac7c5f3a050402f9023a0181fc849502f9008505598d0c648308b29094998715a4ed2c41bbf4c9181120bb5857627816aa87d529ae9e860000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000bd959aa18190ca73b6f20946e6c39526535466345af139cc486b23a54e635eda0e6d9017ce4be3ac29eb61a31ff9bc35ae8f20c08b5d811769f885068a4c1ca91b41573e21ef2fcec436d0b504aecea78583bbec5bb6873a250be7412e598b4f09401bb360c3100175b46dd722a8088d82e60b0c71a59d301b81d7bb7cf7e87c7dd7082d9a12cd7326d3355e6cec5fcf4ffe87221a5341f03a3238bf4e741432b59534ee891dc7b31c689f26da2d42c9d5169363891bad63efa35390a39433d4a1e43257fca161d36bc6af6f7c08e0a55f2644a631163b9c16f0d890de7f223cb367afe706187da4fe6bf16d387642c381960f3653635af3e01da5990a7715edafc56676746ed65e7e0cb2e85384b58a35f638c6fefa696d8c6ee519b7fcaaa602c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc080a085829d1f49cf4cb058f60744bd9d7fcc2b10d38f86aa8879f6cf9a051d16b88ba037a29c86225563115070f41835ac3b1e2cfed81fd19ee15d125a456c619cf25002f9023a0181d8849502f9008505598d0c648304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b9626291ccde3e734b4679c678b26cba8551f1f92f84f44963f65d3a9608e9598be5f50e28b64e7927c627ceaa2925c95a594888586ab6257d68b64d898034b0c7c5122a241bb52d7f8b94632b4dcc7641e7f1ecaa157628d50ace2d436554028372755e99db10d981dc9ae18b6f26f5dc24451a8e9565c4ae7cdb4570a55ac4506ef85f770c00923b222c953cd33151eb48190d06044fd1d00edbb3a835da2699262acbd5671bdafcc4346e225a5eb7e7f6ee95e8f415b78be64ea5425a447a46cb6b274fbc933d5161d201b57d1968b3e548233d94e0918b29e3246db24af10874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0ddf7b3d4bae285aff45140358532c0caa833bd6cbc22af7319c3b032d4e79690a052d967da6febb2f71da8fecb3779db1c737d870f13275982f5fd58fed1bd6b5202f90239014c849502f9008505598d0c648304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b22b7d1d09123f187b961e90452aba6b711c5cb2ac6f3bddf1c721519ac4757c701e90a628ec97d4193d8f11f75ed1356179d660af0b336f19c09ffea003329ff8f06948d9a3cd6c79ac5e38b4610a9a17c0662f02223defe332a483155a68f0baf135b77dee36d2d1080cc1fbb2750151cb9c68c226f7a142efbd04241071b55a443a479748450adaee230a98fae2a7f7e83ae41978a4a06a7e255ba23e5c90663e97180e1ab3461725b593d992b80fe8cdbc1939562c814a2ca5d174874eb00286b4cf4b050097d14205264d5de8efa790b96fbc3b6ba991b67d2becd1cb8d3874004b0d4dbf1a5628989d6ced262a681f9ec8f3b8621cefd1321156f3ff6f15160bf7b0fa952413adb2f439cfe5d870e2233e79d654ac0bd3c3328ed6955a3cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a091c47b1d5a8628fd66e9bcadf2ee9a36d5d4b5d0c9cfd61196181166e3a6d843a077198438f7ef8165dbbda66042502c3c08316c3178039618337214f2780a1b4502f901f90140849502f9008505598d0c648304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b90184d2cab0560000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000094be7351f4a14055d843b16ae7fcc27c3496667e7367ab20bdccbdb1e95bfd96ee1c70ab57dcc574ca41295e85942c1febe38801a116c77e84fcf248815400feb842c9280afc0eb24b286473dc6ec60d11dc9d669bcb9bd45d7cb86000d354cdffadfa13b13e721ec7ad89696360858786b5cda930ad671e871f67f2811dc4b08e2fb2c969a8ab4ad0920f0ef7558bd32d4685bb0fbe1121fbe8988e7774d7162f91aa3f0d43e208a12792ddcafece2badc6ceba90ece955620c9da8009eef264f86865b103c0cf0b5920cf393a4fac1135bb0bf631f329af77a883162d3c97dd4a82199b3530ac248b1c97de1e8cd1c656efab7035d8bca80a6e135c4bcb077797c2b088717282e29e95f0b8a3b1aecec6dd3de61e189d8657b0d4ab4cf53e62c001a0a2c51935413a14c1516e7da63eae5907d2c8a0270ae04e9c7c76731615c27c31a0477b14756d724bce01b3a812bc50a805c5dd1698236d35c851ea99f3056da27d02f9023b01820152849502f9008505598d0c648304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b635d6ce272c67784b2a789dba352d1091ae1848c2627562e6e109435868c9bf29d441e8cf9a8de7fdba6fe0d5bf524a3e125737e2c92c22a4cfb684fe3ab61e1a10c20800d2e62bcba930bb7d7249c1811fd1c97a3bd8c0415649adb3ca013433725ffdc3de47ceefe163f13df54cfa7b398c9171b53598ebf0596009bfb0cfceed0bdf6114248d7bb901fb3d726a53ea3ed244bf39c5029b03d6828cc96f37ccba6d2ef29cccca4fa453062395770480d56588111f94b9615f951fec3a4c57be1c095839abad83fbb732382ce6110e4767327b558f173562c36e0250ac9f3e15d2432a83ea939195e27b727120240cc6fa83ed6cf36a74616a2e8b2e72d5f4736eacb4ee9de81df876b4907dee3e9d520891b93054fc527c8a1c10df1e63b492c67ecbdb1864004b58ffe6434d304ee0d107818a7df2fc160d72a5842ecfb8d517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a0a93f711a68db52306b540c14e254585fb97554533d8b33eec198a3c8456b9d2ba0425143660da44f3f923f50fd9e76b3b8ab1c64e8276d7f10f745f31fe91b6c5302f902390132849502f9008505584dc0fc8304594894998715a4ed2c41bbf4c9181120bb5857627816aa876a94d74f430000b901c4d2cab05600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b0605f27dbbe18355420e4129e59bfc8797ed2cfaf7a3953455ee4ec8d2df9c82ddc04e9555712c5218211a6e2a3aac3de33748c82e25d5e27125132b171fdc57095ef874f8e9a4a0502b188a1ae9ccf6adfb75c4395db8eb64ca83a9666502a68da992e0e4bddf6c591952171335ccbc862d64ea0db7ec6c4a3b5873c74107c1ebf7682281fdde94de5d590f2a34b1a7e7b5c33e28eb1990fdd48d066f3818d1276ba5cfa6cacebec4c356545aabbfebd9579bede1231b857e2baa7c83e7f22cc71f093cbc8098834ba48fe552a93d701ec4d36b6a097a496858db8cdcd208379ac0c400cb6837182b745f26fac1e87ff6eded4c92365a9807bb2150ecbc7121960103dbfb3f007c1cd34801920b74b6caaf25e675db113b857d3a4f48e79a04cbc5a45ec0e2417bc5723c92647fa7527040f47a4e5cb2b0ab564200dc455242517dde32034f4ae3e7ee6590c33db7c94f0834eb80f436f51a6462d41c4374ecc001a06abb0d828df4d875ca00ac844ea72a8ab4ae05d88ef7ddcfeba9934f3f9c8acda074137e3ce2e0802e52e851f662832449a9d536053440fc7c60883506a3090e9902f902590115849502f9008505584dc0fc830262d294c25eafabd9946abeabe1c9452da046b40f19e39a8738d7ea4c680000b901e4642d4214000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000b8faba8b4774687858ca53f09ff6fd09bdad48ab731f767f73bf9535c7b16bbc68585d1cd2e4cf3a290b99b0fa866029bb020cb02b9b3b4f38ee5927e00b368db18cc69da7ef11b054756d2d0dba25fdbf269d2ad737eedb6851329a1ca122cb3e359f2767acff23f4ac5b11a89695789ff9f98ae2bccd02b43dcf8f50f03302dd7025e76856698c2ee9b1ba2e95cb2cb3b765ce2313865a342fd8558d62175d9a32778ef89742118439766b311ef4cf5a4d328b7344f327ae67a32f9afb6a940e05cc2e7b9f793229b8317ef221cbf7465640e0d6e3c5f1ec209c037b10ec8420f1bc8161156421a1c19b9a708112433f6cb4942a2cecb9b36f438ccafc359d9605bc8da2b6c8af52a524e946d908add61fae75d500db43f777b5bcd16606d9e883a7f36377aca34b243adbfd004defb867d541a7c419d1aa74c1239a30b1660cd373cb958eada7a64fd9d464606bdb8bf66011af70051e2d352eb416396163ac001a07856e71d865715d934eb14d62c5472ac3ec70f9ca91221427aa20a55968fdc5ca05ae37f7629482234d1bd23a0d7f738fe4a9bb446ff8a7e81fc039a33e787a98e02f90112016b8502887fb6dc85033cae1ce3830151f594485d17a6f1b8780392d53d64751824253011a26080b8a44000aea00000000000000000000000003b198e26e473b8fab2085b37978e36c9de5d7f6800000000000000000000000000000000000000000000000000000001dc63df20000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000038c001a09bf7ac9e214e9430d9652aeb310db34e28ce2eaabb2786976ba1d92e04328824a04b40c7315d265588aeeef4b27aaa52311477284fd7da7571aca243a267d5843702f87201058477359400850342770c00825208942c35f9a466d1cae63d46174120b1d9edd648648687fd7e3b419a96b280c080a0220d14ab163184e70a277e133db3945bebccc2d14e0983b1b02255eaab030046a03de8350f06c5505bd81fae05a46e6a347f2b0b14541fa9f247fda4fb3fa65eeb02f8b00114847735940085041a25574482b62394da4dd9586d27202a338843dd6b9824d26700678380b844095ea7b3000000000000000000000000881d40237659c251811cec9c364ef91dc08d300cffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc001a0f7066f9fa30c3104a9ca1f3710625dc4165214d6b799eb1e677de212691addeaa050a85e0730ef42da12278ae6f0c332c5de60a433b02d05f786be0acbfb6d9e0602f87201808477359400850342770c008252089461b260db23f294330adadcc9f7b25e23496bfa89870f867de9ab200080c080a0a704102fb39810e0dc1d8bf7e116cd6868961c9e7b2de57f6f123f2b644fb53ba019defcebbb78d4b7fbf033b8bb42dc91b0814746fe154d07869a224badebe08802f872018301a53e808502ae870322825208944675c7e5baafbffbca748158becba61ef3b0a2638807d04a00cfc7c3f180c080a01584761166d6fea147f4dcf8e544319c8d664dd394ccd66f47a2fb691081a974a04b7a4d9552d1a224aa2abad57a422e96c059c195e5b71498c20e0be4ea31b15c")
diff --git a/cl/phase1/core/state/accessors.go b/cl/phase1/core/state/accessors.go
index 0833437e734..e5faa8e2eb8 100644
--- a/cl/phase1/core/state/accessors.go
+++ b/cl/phase1/core/state/accessors.go
@@ -2,16 +2,16 @@ package state
import (
"fmt"
- "github.com/ledgerwatch/erigon/cl/abstract"
"github.com/Giulio2002/bls"
libcommon "github.com/ledgerwatch/erigon-lib/common"
+
+ "github.com/ledgerwatch/erigon/cl/abstract"
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
"github.com/ledgerwatch/erigon/cl/fork"
"github.com/ledgerwatch/erigon/cl/utils"
- "github.com/ledgerwatch/erigon/core/types"
)
const PreAllocatedRewardsAndPenalties = 8192
@@ -191,7 +191,7 @@ func ComputeTimestampAtSlot(b abstract.BeaconState, slot uint64) uint64 {
}
// ExpectedWithdrawals calculates the expected withdrawals that can be made by validators in the current epoch
-func ExpectedWithdrawals(b abstract.BeaconState) []*types.Withdrawal {
+func ExpectedWithdrawals(b abstract.BeaconState) []*cltypes.Withdrawal {
// Get the current epoch, the next withdrawal index, and the next withdrawal validator index
currentEpoch := Epoch(b)
nextWithdrawalIndex := b.NextWithdrawalIndex()
@@ -201,7 +201,7 @@ func ExpectedWithdrawals(b abstract.BeaconState) []*types.Withdrawal {
maxValidators := uint64(b.ValidatorLength())
maxValidatorsPerWithdrawalsSweep := b.BeaconConfig().MaxValidatorsPerWithdrawalsSweep
bound := utils.Min64(maxValidators, maxValidatorsPerWithdrawalsSweep)
- withdrawals := make([]*types.Withdrawal, 0, bound)
+ withdrawals := make([]*cltypes.Withdrawal, 0, bound)
// Loop through the validators to calculate expected withdrawals
for validatorCount := uint64(0); validatorCount < bound && len(withdrawals) != int(b.BeaconConfig().MaxWithdrawalsPerPayload); validatorCount++ {
@@ -213,7 +213,7 @@ func ExpectedWithdrawals(b abstract.BeaconState) []*types.Withdrawal {
// Check if the validator is fully withdrawable
if isFullyWithdrawableValidator(b.BeaconConfig(), currentValidator, currentBalance, currentEpoch) {
// Add a new withdrawal with the validator's withdrawal credentials and balance
- newWithdrawal := &types.Withdrawal{
+ newWithdrawal := &cltypes.Withdrawal{
Index: nextWithdrawalIndex,
Validator: nextWithdrawalValidatorIndex,
Address: libcommon.BytesToAddress(wd[12:]),
@@ -223,7 +223,7 @@ func ExpectedWithdrawals(b abstract.BeaconState) []*types.Withdrawal {
nextWithdrawalIndex++
} else if isPartiallyWithdrawableValidator(b.BeaconConfig(), currentValidator, currentBalance) { // Check if the validator is partially withdrawable
// Add a new withdrawal with the validator's withdrawal credentials and balance minus the maximum effective balance
- newWithdrawal := &types.Withdrawal{
+ newWithdrawal := &cltypes.Withdrawal{
Index: nextWithdrawalIndex,
Validator: nextWithdrawalValidatorIndex,
Address: libcommon.BytesToAddress(wd[12:]),
diff --git a/cl/phase1/core/state/cache.go b/cl/phase1/core/state/cache.go
index 791d2b0a0c7..fcd410f080d 100644
--- a/cl/phase1/core/state/cache.go
+++ b/cl/phase1/core/state/cache.go
@@ -3,6 +3,9 @@ package state
import (
"crypto/sha256"
"encoding/binary"
+ "io"
+ "math"
+
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
"github.com/ledgerwatch/erigon/cl/phase1/core/state/lru"
"github.com/ledgerwatch/erigon/cl/phase1/core/state/raw"
@@ -14,6 +17,11 @@ import (
"github.com/ledgerwatch/erigon/cl/utils"
)
+const (
+ shuffledSetsCacheSize = 5
+ activeValidatorsCacheSize = 5
+)
+
type HashFunc func([]byte) ([32]byte, error)
// CachingBeaconState is a cached wrapper around a raw CachingBeaconState provider
@@ -24,20 +32,20 @@ type CachingBeaconState struct {
// Internals
publicKeyIndicies map[[48]byte]uint64
// Caches
- activeValidatorsCache *lru.Cache[uint64, []uint64]
- shuffledSetsCache *lru.Cache[common.Hash, []uint64]
+ activeValidatorsCache *lru.Cache[uint64, []uint64]
+ shuffledSetsCache *lru.Cache[common.Hash, []uint64]
+
totalActiveBalanceCache *uint64
totalActiveBalanceRootCache uint64
proposerIndex *uint64
previousStateRoot common.Hash
- // Configs
}
func New(cfg *clparams.BeaconChainConfig) *CachingBeaconState {
state := &CachingBeaconState{
BeaconState: raw.New(cfg),
}
- state.initBeaconState()
+ state.InitBeaconState()
return state
}
@@ -45,7 +53,7 @@ func NewFromRaw(r *raw.BeaconState) *CachingBeaconState {
state := &CachingBeaconState{
BeaconState: r,
}
- state.initBeaconState()
+ state.InitBeaconState()
return state
}
@@ -74,7 +82,6 @@ func (b *CachingBeaconState) _updateProposerIndex() (err error) {
seed := hash.Sum(nil)
indices := b.GetActiveValidatorsIndices(epoch)
-
// Write the seed to an array.
seedArray := [32]byte{}
copy(seedArray[:], seed)
@@ -199,22 +206,24 @@ func (b *CachingBeaconState) _refreshActiveBalances() {
func (b *CachingBeaconState) initCaches() error {
var err error
- if b.activeValidatorsCache, err = lru.New[uint64, []uint64]("beacon_active_validators_cache", 5); err != nil {
+ if b.activeValidatorsCache, err = lru.New[uint64, []uint64]("beacon_active_validators_cache", activeValidatorsCacheSize); err != nil {
return err
}
- if b.shuffledSetsCache, err = lru.New[common.Hash, []uint64]("beacon_shuffled_sets_cache", 5); err != nil {
+ if b.shuffledSetsCache, err = lru.New[common.Hash, []uint64]("beacon_shuffled_sets_cache", shuffledSetsCacheSize); err != nil {
return err
}
+
return nil
}
-func (b *CachingBeaconState) initBeaconState() error {
+func (b *CachingBeaconState) InitBeaconState() error {
b._refreshActiveBalances()
b.publicKeyIndicies = make(map[[48]byte]uint64)
b.ForEachValidator(func(validator solid.Validator, i, total int) bool {
b.publicKeyIndicies[validator.PublicKey()] = uint64(i)
+
return true
})
@@ -229,3 +238,218 @@ func (b *CachingBeaconState) initBeaconState() error {
return nil
}
+
+// EncodeCaches, encodes the beacon state caches into a byte slice
+func (b *CachingBeaconState) EncodeCaches(w io.Writer) error {
+ num := make([]byte, 8)
+ // activeValidatorsCaches
+ if err := b.encodeActiveValidatorsCache(w, num); err != nil {
+ return err
+ }
+ // shuffledSetsCache
+ if err := b.encodeShuffledSetsCache(w, num); err != nil {
+ return err
+ }
+ // Now do the extra caches
+ if b.totalActiveBalanceCache == nil {
+ if err := binary.Write(w, binary.BigEndian, uint64(math.MaxUint64)); err != nil {
+ return err
+ }
+ } else {
+ if err := binary.Write(w, binary.BigEndian, *b.totalActiveBalanceCache); err != nil {
+ return err
+ }
+ }
+ if err := binary.Write(w, binary.BigEndian, b.totalActiveBalanceRootCache); err != nil {
+ return err
+ }
+ if b.proposerIndex == nil {
+ if err := binary.Write(w, binary.BigEndian, uint64(math.MaxUint64)); err != nil {
+ return err
+ }
+ } else {
+ if err := binary.Write(w, binary.BigEndian, *b.proposerIndex); err != nil {
+ return err
+ }
+ }
+ if _, err := w.Write(b.previousStateRoot[:]); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (b *CachingBeaconState) DecodeCaches(r io.Reader) error {
+ num := make([]byte, 8)
+ // activeValidatorsCaches
+ if err := b.decodeActiveValidatorsCache(r, num); err != nil {
+ return err
+ }
+ // shuffledSetsCache
+ if err := b.decodeShuffledSetsCache(r, num); err != nil {
+ return err
+ }
+ // Now do the extra caches
+ var totalActiveBalanceCache uint64
+ if err := binary.Read(r, binary.BigEndian, &totalActiveBalanceCache); err != nil {
+ return err
+ }
+ if totalActiveBalanceCache == math.MaxUint64 {
+ b.totalActiveBalanceCache = nil
+ } else {
+ b.totalActiveBalanceCache = &totalActiveBalanceCache
+ }
+ if err := binary.Read(r, binary.BigEndian, &b.totalActiveBalanceRootCache); err != nil {
+ return err
+ }
+ var proposerIndex uint64
+ if err := binary.Read(r, binary.BigEndian, &proposerIndex); err != nil {
+ return err
+ }
+ if proposerIndex == math.MaxUint64 {
+ b.proposerIndex = nil
+ } else {
+ b.proposerIndex = &proposerIndex
+ }
+ if _, err := r.Read(b.previousStateRoot[:]); err != nil {
+ return err
+ }
+ return nil
+}
+
+func writeUint64WithBuffer(w io.Writer, num uint64, buf []byte) error {
+ binary.BigEndian.PutUint64(buf, num)
+ if _, err := w.Write(buf); err != nil {
+ return err
+ }
+ return nil
+}
+
+func readUint64WithBuffer(r io.Reader, buf []byte, out *uint64) error {
+ if _, err := r.Read(buf); err != nil {
+ return err
+ }
+ *out = binary.BigEndian.Uint64(buf)
+ return nil
+}
+
+// internal encoding/decoding algos
+func (b *CachingBeaconState) encodeActiveValidatorsCache(w io.Writer, num []byte) error {
+ keys := b.activeValidatorsCache.Keys()
+ lists := make([][]uint64, len(keys))
+
+ for i, key := range keys {
+ lists[i], _ = b.activeValidatorsCache.Get(key)
+ }
+ // Write the total length
+ if err := writeUint64WithBuffer(w, uint64(len(keys)), num); err != nil {
+ return err
+ }
+
+ for i, key := range keys {
+ if err := writeUint64WithBuffer(w, uint64(len(lists[i])), num); err != nil {
+ return err
+ }
+ if err := writeUint64WithBuffer(w, key, num); err != nil {
+ return err
+ }
+ for _, v := range lists[i] {
+ if err := writeUint64WithBuffer(w, v, num); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (b *CachingBeaconState) decodeActiveValidatorsCache(r io.Reader, num []byte) error {
+ var err error
+ b.activeValidatorsCache, err = lru.New[uint64, []uint64]("beacon_active_validators_cache", activeValidatorsCacheSize)
+ if err != nil {
+ return err
+ }
+ var l uint64
+
+ if err := readUint64WithBuffer(r, num, &l); err != nil {
+ return err
+ }
+ for i := 0; i < int(l); i++ {
+ var l uint64
+
+ if err := readUint64WithBuffer(r, num, &l); err != nil {
+ return err
+ }
+ var key uint64
+ if err := readUint64WithBuffer(r, num, &key); err != nil {
+ return err
+ }
+ list := make([]uint64, l)
+ for i := 0; i < int(l); i++ {
+ if err := readUint64WithBuffer(r, num, &list[i]); err != nil {
+ return err
+ }
+ }
+ b.activeValidatorsCache.Add(key, list)
+ }
+ return nil
+}
+
+// internal encoding/decoding algos
+func (b *CachingBeaconState) encodeShuffledSetsCache(w io.Writer, num []byte) error {
+ keys := b.shuffledSetsCache.Keys()
+ lists := make([][]uint64, len(keys))
+
+ for i, key := range keys {
+ lists[i], _ = b.shuffledSetsCache.Get(key)
+ }
+ // Write the total length
+ if err := writeUint64WithBuffer(w, uint64(len(keys)), num); err != nil {
+ return err
+ }
+ for i, key := range keys {
+ if err := writeUint64WithBuffer(w, uint64(len(lists[i])), num); err != nil {
+ return err
+ }
+ if _, err := w.Write(key[:]); err != nil {
+ return err
+ }
+ for _, v := range lists[i] {
+ if err := writeUint64WithBuffer(w, v, num); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (b *CachingBeaconState) decodeShuffledSetsCache(r io.Reader, num []byte) error {
+ var err error
+ b.shuffledSetsCache, err = lru.New[common.Hash, []uint64]("beacon_shuffled_sets_cache", shuffledSetsCacheSize)
+ if err != nil {
+ return err
+ }
+
+ var l uint64
+ if err := readUint64WithBuffer(r, num, &l); err != nil {
+ return err
+ }
+ for i := 0; i < int(l); i++ {
+ var l uint64
+ if err := readUint64WithBuffer(r, num, &l); err != nil {
+ return err
+ }
+ var key common.Hash
+ if _, err := r.Read(key[:]); err != nil {
+ return err
+ }
+ list := make([]uint64, l)
+ for i := 0; i < int(l); i++ {
+ if err := readUint64WithBuffer(r, num, &list[i]); err != nil {
+ return err
+ }
+ }
+ b.shuffledSetsCache.Add(key, list)
+ }
+
+ return nil
+}
diff --git a/cl/phase1/core/state/cache_accessors.go b/cl/phase1/core/state/cache_accessors.go
index 38834689206..99f2f17a9bb 100644
--- a/cl/phase1/core/state/cache_accessors.go
+++ b/cl/phase1/core/state/cache_accessors.go
@@ -6,10 +6,10 @@ import (
"math"
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
- "github.com/ledgerwatch/erigon/cl/phase1/cache"
"github.com/ledgerwatch/erigon/cl/phase1/core/state/shuffling"
"github.com/Giulio2002/bls"
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/utils"
@@ -18,19 +18,24 @@ import (
// these are view functions for the beacon state cache
// GetActiveValidatorsIndices returns the list of validator indices active for the given epoch.
-func (b *CachingBeaconState) GetActiveValidatorsIndices(epoch uint64) (indicies []uint64) {
+func (b *CachingBeaconState) GetActiveValidatorsIndices(epoch uint64) []uint64 {
+ var indicies []uint64
if cachedIndicies, ok := b.activeValidatorsCache.Get(epoch); ok && len(cachedIndicies) > 0 {
return cachedIndicies
}
+
+ size := 0
b.ForEachValidator(func(v solid.Validator, i, total int) bool {
if !v.Active(epoch) {
return true
}
+ size++
indicies = append(indicies, uint64(i))
return true
})
b.activeValidatorsCache.Add(epoch, indicies)
- return
+
+ return indicies[:size]
}
// GetTotalActiveBalance return the sum of all balances within active validators.
@@ -55,12 +60,15 @@ func (b *CachingBeaconState) ComputeCommittee(indicies []uint64, slot uint64, in
// Input for the seed hash.
mix := b.GetRandaoMix(int(mixPosition))
seed := shuffling.GetSeed(b.BeaconConfig(), mix, epoch, b.BeaconConfig().DomainBeaconAttester)
+
if shuffledIndicesInterface, ok := b.shuffledSetsCache.Get(seed); ok {
shuffledIndicies = shuffledIndicesInterface
} else {
- shuffledIndicies = shuffling.ComputeShuffledIndicies(b.BeaconConfig(), mix, indicies, slot)
+ shuffledIndicies = make([]uint64, lenIndicies)
+ shuffledIndicies = shuffling.ComputeShuffledIndicies(b.BeaconConfig(), mix, shuffledIndicies, indicies, slot)
b.shuffledSetsCache.Add(seed, shuffledIndicies)
}
+
return shuffledIndicies[start:end], nil
}
@@ -127,7 +135,8 @@ func (b *CachingBeaconState) CommitteeCount(epoch uint64) uint64 {
return committeCount
}
-func (b *CachingBeaconState) GetAttestationParticipationFlagIndicies(data solid.AttestationData, inclusionDelay uint64) ([]uint8, error) {
+func (b *CachingBeaconState) GetAttestationParticipationFlagIndicies(data solid.AttestationData, inclusionDelay uint64, skipAssert bool) ([]uint8, error) {
+
var justifiedCheckpoint solid.Checkpoint
// get checkpoint from epoch
if data.Target().Epoch() == Epoch(b) {
@@ -136,8 +145,17 @@ func (b *CachingBeaconState) GetAttestationParticipationFlagIndicies(data solid.
justifiedCheckpoint = b.PreviousJustifiedCheckpoint()
}
// Matching roots
- if !data.Source().Equal(justifiedCheckpoint) {
- return nil, fmt.Errorf("GetAttestationParticipationFlagIndicies: source does not match")
+ if !data.Source().Equal(justifiedCheckpoint) && !skipAssert {
+ // jsonify the data.Source and justifiedCheckpoint
+ jsonSource, err := data.Source().MarshalJSON()
+ if err != nil {
+ return nil, err
+ }
+ jsonJustifiedCheckpoint, err := justifiedCheckpoint.MarshalJSON()
+ if err != nil {
+ return nil, err
+ }
+ return nil, fmt.Errorf("GetAttestationParticipationFlagIndicies: source does not match. source: %s, justifiedCheckpoint: %s", jsonSource, jsonJustifiedCheckpoint)
}
targetRoot, err := GetBlockRoot(b, data.Target().Epoch())
if err != nil {
@@ -164,14 +182,15 @@ func (b *CachingBeaconState) GetAttestationParticipationFlagIndicies(data solid.
// GetBeaconCommitee grabs beacon committee using cache first
func (b *CachingBeaconState) GetBeaconCommitee(slot, committeeIndex uint64) ([]uint64, error) {
- var cacheKey [16]byte
- binary.BigEndian.PutUint64(cacheKey[:], slot)
- binary.BigEndian.PutUint64(cacheKey[8:], committeeIndex)
+ // var cacheKey [16]byte
+ // binary.BigEndian.PutUint64(cacheKey[:], slot)
+ // binary.BigEndian.PutUint64(cacheKey[8:], committeeIndex)
epoch := GetEpochAtSlot(b.BeaconConfig(), slot)
committeesPerSlot := b.CommitteeCount(epoch)
+ indicies := b.GetActiveValidatorsIndices(epoch)
committee, err := b.ComputeCommittee(
- b.GetActiveValidatorsIndices(epoch),
+ indicies,
slot,
(slot%b.BeaconConfig().SlotsPerEpoch)*committeesPerSlot+committeeIndex,
committeesPerSlot*b.BeaconConfig().SlotsPerEpoch,
@@ -184,7 +203,7 @@ func (b *CachingBeaconState) GetBeaconCommitee(slot, committeeIndex uint64) ([]u
func (b *CachingBeaconState) ComputeNextSyncCommittee() (*solid.SyncCommittee, error) {
beaconConfig := b.BeaconConfig()
- optimizedHashFunc := utils.OptimizedKeccak256NotThreadSafe()
+ optimizedHashFunc := utils.OptimizedSha256NotThreadSafe()
epoch := Epoch(b) + 1
//math.MaxUint8
activeValidatorIndicies := b.GetActiveValidatorsIndices(epoch)
@@ -195,7 +214,7 @@ func (b *CachingBeaconState) ComputeNextSyncCommittee() (*solid.SyncCommittee, e
mix := b.GetRandaoMix(int(mixPosition))
seed := shuffling.GetSeed(b.BeaconConfig(), mix, epoch, beaconConfig.DomainSyncCommittee)
i := uint64(0)
- syncCommitteePubKeys := make([][48]byte, 0, cltypes.SyncCommitteeSize)
+ syncCommitteePubKeys := make([]libcommon.Bytes48, 0, cltypes.SyncCommitteeSize)
preInputs := shuffling.ComputeShuffledIndexPreInputs(b.BeaconConfig(), seed)
for len(syncCommitteePubKeys) < cltypes.SyncCommitteeSize {
shuffledIndex, err := shuffling.ComputeShuffledIndex(
@@ -214,7 +233,7 @@ func (b *CachingBeaconState) ComputeNextSyncCommittee() (*solid.SyncCommittee, e
buf := make([]byte, 8)
binary.LittleEndian.PutUint64(buf, i/32)
input := append(seed[:], buf...)
- randomByte := uint64(utils.Keccak256(input)[i%32])
+ randomByte := uint64(utils.Sha256(input)[i%32])
// retrieve validator.
validator, err := b.ValidatorForValidatorIndex(int(candidateIndex))
if err != nil {
@@ -235,7 +254,7 @@ func (b *CachingBeaconState) ComputeNextSyncCommittee() (*solid.SyncCommittee, e
if err != nil {
return nil, err
}
- var aggregate [48]byte
+ var aggregate libcommon.Bytes48
copy(aggregate[:], aggregatePublicKeyBytes)
return solid.NewSyncCommitteeFromParameters(syncCommitteePubKeys, aggregate), nil
@@ -244,9 +263,9 @@ func (b *CachingBeaconState) ComputeNextSyncCommittee() (*solid.SyncCommittee, e
// GetAttestingIndicies retrieves attesting indicies for a specific attestation. however some tests will not expect the aggregation bits check.
// thus, it is a flag now.
func (b *CachingBeaconState) GetAttestingIndicies(attestation solid.AttestationData, aggregationBits []byte, checkBitsLength bool) ([]uint64, error) {
- if cached, ok := cache.LoadAttestatingIndicies(&attestation, aggregationBits); ok {
- return cached, nil
- }
+ // if cached, ok := cache.LoadAttestatingIndicies(&attestation, aggregationBits); ok {
+ // return cached, nil
+ // }
committee, err := b.GetBeaconCommitee(attestation.Slot(), attestation.ValidatorIndex())
if err != nil {
return nil, err
@@ -267,7 +286,7 @@ func (b *CachingBeaconState) GetAttestingIndicies(attestation solid.AttestationD
attestingIndices = append(attestingIndices, member)
}
}
- cache.StoreAttestation(&attestation, aggregationBits, attestingIndices)
+ // cache.StoreAttestation(&attestation, aggregationBits, attestingIndices)
return attestingIndices, nil
}
diff --git a/cl/phase1/core/state/cache_mutators.go b/cl/phase1/core/state/cache_mutators.go
index 8d4f8a1f9ed..05f9080c91a 100644
--- a/cl/phase1/core/state/cache_mutators.go
+++ b/cl/phase1/core/state/cache_mutators.go
@@ -44,7 +44,8 @@ func (b *CachingBeaconState) SlashValidator(slashedInd uint64, whistleblowerInd
if err != nil {
return err
}
- b.IncrementSlashingSegmentAt(slashingsIndex, currentEffectiveBalance)
+
+ b.SetSlashingSegmentAt(slashingsIndex, b.SlashingSegmentAt(slashingsIndex)+currentEffectiveBalance)
newEffectiveBalance, err := b.ValidatorEffectiveBalance(int(slashedInd))
if err != nil {
return err
diff --git a/cl/phase1/core/state/copy.go b/cl/phase1/core/state/copy.go
index ebf0a111b53..de4ecd63538 100644
--- a/cl/phase1/core/state/copy.go
+++ b/cl/phase1/core/state/copy.go
@@ -22,7 +22,7 @@ func (b *CachingBeaconState) CopyInto(bs *CachingBeaconState) (err error) {
func (b *CachingBeaconState) copyCachesInto(bs *CachingBeaconState) error {
if b.Version() == clparams.Phase0Version {
- return bs.initBeaconState()
+ return bs.InitBeaconState()
}
if bs.publicKeyIndicies == nil {
bs.publicKeyIndicies = make(map[[48]byte]uint64)
diff --git a/cl/phase1/core/state/lru/lru.go b/cl/phase1/core/state/lru/lru.go
index 7784c2ddd35..b1da5285616 100644
--- a/cl/phase1/core/state/lru/lru.go
+++ b/cl/phase1/core/state/lru/lru.go
@@ -3,8 +3,8 @@ package lru
import (
"fmt"
- "github.com/VictoriaMetrics/metrics"
lru "github.com/hashicorp/golang-lru/v2"
+ "github.com/ledgerwatch/erigon-lib/metrics"
)
// Cache is a wrapper around hashicorp lru but with metric for Get
diff --git a/cl/phase1/core/state/raw/copy.go b/cl/phase1/core/state/raw/copy.go
index 4e547c71763..86aa9e5e75a 100644
--- a/cl/phase1/core/state/raw/copy.go
+++ b/cl/phase1/core/state/raw/copy.go
@@ -1,8 +1,6 @@
package raw
import (
- "fmt"
-
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
@@ -18,12 +16,11 @@ func (b *BeaconState) CopyInto(dst *BeaconState) error {
b.stateRoots.CopyTo(dst.stateRoots)
b.historicalRoots.CopyTo(dst.historicalRoots)
dst.eth1Data = b.eth1Data.Copy()
- dst.eth1DataVotes = solid.NewDynamicListSSZ[*cltypes.Eth1Data](int(b.beaconConfig.Eth1DataVotesLength()))
+ dst.eth1DataVotes = solid.NewStaticListSSZ[*cltypes.Eth1Data](int(b.beaconConfig.Eth1DataVotesLength()), 72)
b.eth1DataVotes.Range(func(index int, value *cltypes.Eth1Data, length int) bool {
dst.eth1DataVotes.Append(value.Copy())
return true
})
-
dst.eth1DepositIndex = b.eth1DepositIndex
b.validators.CopyTo(dst.validators)
b.balances.CopyTo(dst.balances)
@@ -31,9 +28,20 @@ func (b *BeaconState) CopyInto(dst *BeaconState) error {
b.slashings.CopyTo(dst.slashings)
b.previousEpochParticipation.CopyTo(dst.previousEpochParticipation)
b.currentEpochParticipation.CopyTo(dst.currentEpochParticipation)
+ dst.currentEpochAttestations.Clear()
+ dst.previousEpochAttestations.Clear()
+ b.currentEpochAttestations.Range(func(index int, value *solid.PendingAttestation, length int) bool {
+ dst.currentEpochAttestations.Append(value)
+ return true
+ })
+ b.previousEpochAttestations.Range(func(index int, value *solid.PendingAttestation, length int) bool {
+ dst.previousEpochAttestations.Append(value)
+ return true
+ })
dst.finalizedCheckpoint = b.finalizedCheckpoint.Copy()
dst.currentJustifiedCheckpoint = b.currentJustifiedCheckpoint.Copy()
dst.previousJustifiedCheckpoint = b.previousJustifiedCheckpoint.Copy()
+ dst.justificationBits = b.justificationBits.Copy()
if b.version == clparams.Phase0Version {
dst.init()
return nil
@@ -41,7 +49,6 @@ func (b *BeaconState) CopyInto(dst *BeaconState) error {
dst.currentSyncCommittee = b.currentSyncCommittee.Copy()
dst.nextSyncCommittee = b.nextSyncCommittee.Copy()
b.inactivityScores.CopyTo(dst.inactivityScores)
- dst.justificationBits = b.justificationBits.Copy()
if b.version >= clparams.BellatrixVersion {
dst.latestExecutionPayloadHeader = b.latestExecutionPayloadHeader.Copy()
@@ -65,6 +72,5 @@ func (b *BeaconState) CopyInto(dst *BeaconState) error {
func (b *BeaconState) Copy() (*BeaconState, error) {
copied := New(b.BeaconConfig())
- fmt.Println(copied.slashings)
return copied, b.CopyInto(copied)
}
diff --git a/cl/phase1/core/state/raw/events.go b/cl/phase1/core/state/raw/events.go
new file mode 100644
index 00000000000..ba63353d59f
--- /dev/null
+++ b/cl/phase1/core/state/raw/events.go
@@ -0,0 +1,28 @@
+package raw
+
+import (
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/cltypes/solid"
+)
+
+type Events struct {
+ OnNewBlockRoot func(index int, root libcommon.Hash) error
+ OnNewStateRoot func(index int, root libcommon.Hash) error
+ OnRandaoMixChange func(index int, mix [32]byte) error
+ OnNewValidator func(index int, v solid.Validator, balance uint64) error
+ OnNewValidatorBalance func(index int, balance uint64) error
+ OnNewValidatorEffectiveBalance func(index int, balance uint64) error
+ OnNewValidatorActivationEpoch func(index int, epoch uint64) error
+ OnNewValidatorExitEpoch func(index int, epoch uint64) error
+ OnNewValidatorWithdrawableEpoch func(index int, epoch uint64) error
+ OnNewValidatorSlashed func(index int, slashed bool) error
+ OnNewValidatorActivationEligibilityEpoch func(index int, epoch uint64) error
+ OnNewValidatorWithdrawalCredentials func(index int, wc []byte) error
+ OnNewSlashingSegment func(index int, segment uint64) error
+ OnEpochBoundary func(epoch uint64) error
+ OnNewNextSyncCommittee func(committee *solid.SyncCommittee) error
+ OnNewCurrentSyncCommittee func(committee *solid.SyncCommittee) error
+ OnAppendEth1Data func(data *cltypes.Eth1Data) error
+ OnResetParticipation func(previousParticipation *solid.BitList) error
+}
diff --git a/cl/phase1/core/state/raw/getters.go b/cl/phase1/core/state/raw/getters.go
index 9d1d512025b..a1676714b28 100644
--- a/cl/phase1/core/state/raw/getters.go
+++ b/cl/phase1/core/state/raw/getters.go
@@ -68,6 +68,18 @@ func (b *BeaconState) Eth1DataVotes() *solid.ListSSZ[*cltypes.Eth1Data] {
return b.eth1DataVotes
}
+func (b *BeaconState) Slashings() solid.Uint64VectorSSZ {
+ return b.slashings
+}
+
+func (b *BeaconState) Balances() solid.Uint64ListSSZ {
+ return b.balances
+}
+
+func (b *BeaconState) InactivityScores() solid.Uint64ListSSZ {
+ return b.inactivityScores
+}
+
func (b *BeaconState) Eth1DepositIndex() uint64 {
return b.eth1DepositIndex
}
@@ -106,6 +118,13 @@ func (b *BeaconState) ValidatorBalance(index int) (uint64, error) {
return b.balances.Get(index), nil
}
+func (b *BeaconState) ValidatorPublicKey(index int) (libcommon.Bytes48, error) {
+ if index >= b.balances.Length() {
+ return libcommon.Bytes48{}, ErrInvalidValidatorIndex
+ }
+ return b.validators.Get(index).PublicKey(), nil
+}
+
func (b *BeaconState) ValidatorExitEpoch(index int) (uint64, error) {
if index >= b.validators.Length() {
return 0, ErrInvalidValidatorIndex
diff --git a/cl/phase1/core/state/raw/hashing.go b/cl/phase1/core/state/raw/hashing.go
index 6ae869bf53f..72c840fbf35 100644
--- a/cl/phase1/core/state/raw/hashing.go
+++ b/cl/phase1/core/state/raw/hashing.go
@@ -1,10 +1,13 @@
package raw
import (
+ "time"
+
"github.com/ledgerwatch/erigon-lib/common"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/merkle_tree"
+ "github.com/ledgerwatch/log/v3"
)
func (b *BeaconState) HashSSZ() (out [32]byte, err error) {
@@ -82,6 +85,8 @@ func (b *BeaconState) computeDirtyLeaves() error {
b.updateLeaf(StateRootsLeafIndex, root)
}
+ begin := time.Now()
+
// Field(7): HistoricalRoots
if b.isLeafDirty(HistoricalRootsLeafIndex) {
root, err := b.historicalRoots.HashSSZ()
@@ -90,6 +95,7 @@ func (b *BeaconState) computeDirtyLeaves() error {
}
b.updateLeaf(HistoricalRootsLeafIndex, root)
}
+ log.Trace("HistoricalRoots hashing", "elapsed", time.Since(begin))
// Field(8): Eth1Data
if b.isLeafDirty(Eth1DataLeafIndex) {
@@ -114,6 +120,8 @@ func (b *BeaconState) computeDirtyLeaves() error {
b.updateLeaf(Eth1DepositIndexLeafIndex, merkle_tree.Uint64Root(b.eth1DepositIndex))
}
+ begin = time.Now()
+
// Field(11): Validators
if b.isLeafDirty(ValidatorsLeafIndex) {
root, err := b.validators.HashSSZ()
@@ -123,7 +131,9 @@ func (b *BeaconState) computeDirtyLeaves() error {
b.updateLeaf(ValidatorsLeafIndex, root)
}
+ log.Trace("ValidatorSet hashing", "elapsed", time.Since(begin))
+ begin = time.Now()
// Field(12): Balances
if b.isLeafDirty(BalancesLeafIndex) {
root, err := b.balances.HashSSZ()
@@ -132,7 +142,9 @@ func (b *BeaconState) computeDirtyLeaves() error {
}
b.updateLeaf(BalancesLeafIndex, root)
}
+ log.Trace("Balances hashing", "elapsed", time.Since(begin))
+ begin = time.Now()
// Field(13): RandaoMixes
if b.isLeafDirty(RandaoMixesLeafIndex) {
root, err := b.randaoMixes.HashSSZ()
@@ -141,7 +153,9 @@ func (b *BeaconState) computeDirtyLeaves() error {
}
b.updateLeaf(RandaoMixesLeafIndex, root)
}
+ log.Trace("RandaoMixes hashing", "elapsed", time.Since(begin))
+ begin = time.Now()
// Field(14): Slashings
if b.isLeafDirty(SlashingsLeafIndex) {
root, err := b.slashings.HashSSZ()
@@ -150,8 +164,10 @@ func (b *BeaconState) computeDirtyLeaves() error {
}
b.updateLeaf(SlashingsLeafIndex, root)
}
+ log.Trace("Slashings hashing", "elapsed", time.Since(begin))
// Field(15) and Field(16) are special due to the fact that they have different format in Phase0.
+ begin = time.Now()
// Field(15): PreviousEpochParticipation
if b.isLeafDirty(PreviousEpochParticipationLeafIndex) {
var root libcommon.Hash
@@ -167,6 +183,9 @@ func (b *BeaconState) computeDirtyLeaves() error {
b.updateLeaf(PreviousEpochParticipationLeafIndex, root)
}
+ log.Trace("PreviousEpochParticipation hashing", "elapsed", time.Since(begin))
+
+ begin = time.Now()
// Field(16): CurrentEpochParticipation
if b.isLeafDirty(CurrentEpochParticipationLeafIndex) {
@@ -182,6 +201,7 @@ func (b *BeaconState) computeDirtyLeaves() error {
}
b.updateLeaf(CurrentEpochParticipationLeafIndex, root)
}
+ log.Trace("CurrentEpochParticipation hashing", "elapsed", time.Since(begin))
// Field(17): JustificationBits
if b.isLeafDirty(JustificationBitsLeafIndex) {
@@ -218,6 +238,7 @@ func (b *BeaconState) computeDirtyLeaves() error {
if b.version == clparams.Phase0Version {
return nil
}
+ begin = time.Now()
// Field(21): Inactivity Scores
if b.isLeafDirty(InactivityScoresLeafIndex) {
root, err := b.inactivityScores.HashSSZ()
@@ -226,6 +247,7 @@ func (b *BeaconState) computeDirtyLeaves() error {
}
b.updateLeaf(InactivityScoresLeafIndex, root)
}
+ log.Trace("InactivityScores hashing", "elapsed", time.Since(begin))
// Field(22): CurrentSyncCommitte
if b.isLeafDirty(CurrentSyncCommitteeLeafIndex) {
@@ -271,6 +293,7 @@ func (b *BeaconState) computeDirtyLeaves() error {
b.updateLeaf(NextWithdrawalValidatorIndexLeafIndex, merkle_tree.Uint64Root(b.nextWithdrawalValidatorIndex))
}
+ begin = time.Now()
// Field(27): HistoricalSummaries
if b.isLeafDirty(HistoricalSummariesLeafIndex) {
root, err := b.historicalSummaries.HashSSZ()
@@ -279,6 +302,7 @@ func (b *BeaconState) computeDirtyLeaves() error {
}
b.updateLeaf(HistoricalSummariesLeafIndex, root)
}
+ log.Trace("HistoricalSummaries hashing", "elapsed", time.Since(begin))
return nil
}
diff --git a/cl/phase1/core/state/raw/setters.go b/cl/phase1/core/state/raw/setters.go
index 8753b816948..5abb514333f 100644
--- a/cl/phase1/core/state/raw/setters.go
+++ b/cl/phase1/core/state/raw/setters.go
@@ -13,6 +13,10 @@ func (b *BeaconState) SetVersion(version clparams.StateVersion) {
func (b *BeaconState) SetSlot(slot uint64) {
b.slot = slot
+ if b.events.OnEpochBoundary != nil && b.slot%b.beaconConfig.SlotsPerEpoch == 0 {
+ b.events.OnEpochBoundary(b.slot / b.beaconConfig.SlotsPerEpoch)
+ }
+
b.markLeaf(SlotLeafIndex)
}
@@ -27,47 +31,74 @@ func (b *BeaconState) SetLatestBlockHeader(header *cltypes.BeaconBlockHeader) {
}
func (b *BeaconState) SetBlockRootAt(index int, root libcommon.Hash) {
+ if b.events.OnNewBlockRoot != nil {
+ b.events.OnNewBlockRoot(index, root)
+ }
b.markLeaf(BlockRootsLeafIndex)
b.blockRoots.Set(index, root)
}
func (b *BeaconState) SetStateRootAt(index int, root libcommon.Hash) {
+ if b.events.OnNewStateRoot != nil {
+ b.events.OnNewStateRoot(index, root)
+ }
b.markLeaf(StateRootsLeafIndex)
b.stateRoots.Set(index, root)
}
func (b *BeaconState) SetWithdrawalCredentialForValidatorAtIndex(index int, creds libcommon.Hash) {
b.markLeaf(ValidatorsLeafIndex)
- b.validators.Get(index).SetWithdrawalCredentials(creds)
+ if b.events.OnNewValidatorWithdrawalCredentials != nil {
+ b.events.OnNewValidatorWithdrawalCredentials(index, creds[:])
+ }
+ b.validators.SetWithdrawalCredentialForValidatorAtIndex(index, creds)
}
func (b *BeaconState) SetExitEpochForValidatorAtIndex(index int, epoch uint64) {
b.markLeaf(ValidatorsLeafIndex)
- b.validators.Get(index).SetExitEpoch(epoch)
+ if b.events.OnNewValidatorExitEpoch != nil {
+ b.events.OnNewValidatorExitEpoch(index, epoch)
+ }
+ b.validators.SetExitEpochForValidatorAtIndex(index, epoch)
}
func (b *BeaconState) SetWithdrawableEpochForValidatorAtIndex(index int, epoch uint64) error {
if index >= b.balances.Length() {
return ErrInvalidValidatorIndex
}
+ if b.events.OnNewValidatorWithdrawableEpoch != nil {
+ b.events.OnNewValidatorWithdrawableEpoch(index, epoch)
+ }
+
b.markLeaf(ValidatorsLeafIndex)
- b.validators.Get(index).SetWithdrawableEpoch(epoch)
+ b.validators.SetWithdrawableEpochForValidatorAtIndex(index, epoch)
return nil
}
func (b *BeaconState) SetEffectiveBalanceForValidatorAtIndex(index int, balance uint64) {
b.markLeaf(ValidatorsLeafIndex)
- b.validators.Get(index).SetEffectiveBalance(balance)
+ if b.events.OnNewValidatorEffectiveBalance != nil {
+ b.events.OnNewValidatorEffectiveBalance(index, balance)
+ }
+ b.validators.SetEffectiveBalanceForValidatorAtIndex(index, balance)
}
func (b *BeaconState) SetActivationEpochForValidatorAtIndex(index int, epoch uint64) {
b.markLeaf(ValidatorsLeafIndex)
- b.validators.Get(index).SetActivationEpoch(epoch)
+ if b.events.OnNewValidatorActivationEpoch != nil {
+ b.events.OnNewValidatorActivationEpoch(index, epoch)
+ }
+
+ b.validators.SetActivationEpochForValidatorAtIndex(index, epoch)
}
func (b *BeaconState) SetActivationEligibilityEpochForValidatorAtIndex(index int, epoch uint64) {
b.markLeaf(ValidatorsLeafIndex)
- b.validators.Get(index).SetActivationEligibilityEpoch(epoch)
+ if b.events.OnNewValidatorActivationEligibilityEpoch != nil {
+ b.events.OnNewValidatorActivationEligibilityEpoch(index, epoch)
+ }
+
+ b.validators.SetActivationEligibilityEpochForValidatorAtIndex(index, epoch)
}
func (b *BeaconState) SetEth1Data(eth1Data *cltypes.Eth1Data) {
@@ -75,7 +106,15 @@ func (b *BeaconState) SetEth1Data(eth1Data *cltypes.Eth1Data) {
b.eth1Data = eth1Data
}
+func (b *BeaconState) SetEth1DataVotes(votes *solid.ListSSZ[*cltypes.Eth1Data]) {
+ b.markLeaf(Eth1DataVotesLeafIndex)
+ b.eth1DataVotes = votes
+}
+
func (b *BeaconState) AddEth1DataVote(vote *cltypes.Eth1Data) {
+ if b.events.OnAppendEth1Data != nil {
+ b.events.OnAppendEth1Data(vote)
+ }
b.markLeaf(Eth1DataVotesLeafIndex)
b.eth1DataVotes.Append(vote)
}
@@ -90,12 +129,33 @@ func (b *BeaconState) SetEth1DepositIndex(eth1DepositIndex uint64) {
b.eth1DepositIndex = eth1DepositIndex
}
+func (b *BeaconState) SetValidators(validators *solid.ValidatorSet) {
+ b.markLeaf(ValidatorsLeafIndex)
+ b.validators = validators
+}
+
+func (b *BeaconState) SetRandaoMixes(mixes solid.HashVectorSSZ) {
+ b.markLeaf(RandaoMixesLeafIndex)
+ b.randaoMixes = mixes
+}
+
+func (b *BeaconState) SetHistoricalRoots(hRoots solid.HashListSSZ) {
+ b.markLeaf(HistoricalRootsLeafIndex)
+ b.historicalRoots = hRoots
+}
+
func (b *BeaconState) SetValidatorSlashed(index int, slashed bool) error {
if index >= b.balances.Length() {
return ErrInvalidValidatorIndex
}
b.markLeaf(ValidatorsLeafIndex)
- b.validators.Get(index).SetSlashed(slashed)
+ if b.events.OnNewValidatorSlashed != nil {
+ if err := b.events.OnNewValidatorSlashed(index, slashed); err != nil {
+ return err
+ }
+ }
+
+ b.validators.SetValidatorSlashed(index, slashed)
return nil
}
@@ -170,12 +230,20 @@ func (b *BeaconState) SetValidatorBalance(index int, balance uint64) error {
if index >= b.balances.Length() {
return ErrInvalidValidatorIndex
}
+ if b.events.OnNewValidatorBalance != nil {
+ if err := b.events.OnNewValidatorBalance(index, balance); err != nil {
+ return err
+ }
+ }
b.markLeaf(BalancesLeafIndex)
b.balances.Set(index, balance)
return nil
}
func (b *BeaconState) AddValidator(validator solid.Validator, balance uint64) {
+ if b.events.OnNewValidator != nil {
+ b.events.OnNewValidator(b.validators.Length(), validator, balance)
+ }
b.validators.Append(validator)
b.balances.Append(balance)
@@ -184,18 +252,20 @@ func (b *BeaconState) AddValidator(validator solid.Validator, balance uint64) {
}
func (b *BeaconState) SetRandaoMixAt(index int, mix libcommon.Hash) {
+ if b.events.OnRandaoMixChange != nil {
+ b.events.OnRandaoMixChange(index, mix)
+ }
b.markLeaf(RandaoMixesLeafIndex)
b.randaoMixes.Set(index, mix)
}
func (b *BeaconState) SetSlashingSegmentAt(index int, segment uint64) {
+ if b.events.OnNewSlashingSegment != nil {
+ b.events.OnNewSlashingSegment(index, segment)
+ }
b.markLeaf(SlashingsLeafIndex)
b.slashings.Set(index, segment)
}
-func (b *BeaconState) IncrementSlashingSegmentAt(index int, delta uint64) {
- b.markLeaf(SlashingsLeafIndex)
- b.slashings.Set(index, b.SlashingSegmentAt(index)+delta)
-}
func (b *BeaconState) SetEpochParticipationForValidatorIndex(isCurrentEpoch bool, index int, flags cltypes.ParticipationFlags) {
if isCurrentEpoch {
@@ -214,6 +284,9 @@ func (b *BeaconState) SetValidatorAtIndex(index int, validator solid.Validator)
func (b *BeaconState) ResetEpochParticipation() {
b.previousEpochParticipation = b.currentEpochParticipation
+ if b.events.OnResetParticipation != nil {
+ b.events.OnResetParticipation(b.previousEpochParticipation)
+ }
b.currentEpochParticipation = solid.NewBitList(b.validators.Length(), int(b.beaconConfig.ValidatorRegistryLimit))
b.markLeaf(CurrentEpochParticipationLeafIndex)
b.markLeaf(PreviousEpochParticipationLeafIndex)
@@ -240,11 +313,17 @@ func (b *BeaconState) SetFinalizedCheckpoint(finalizedCheckpoint solid.Checkpoin
}
func (b *BeaconState) SetCurrentSyncCommittee(currentSyncCommittee *solid.SyncCommittee) {
+ if b.events.OnNewCurrentSyncCommittee != nil {
+ b.events.OnNewCurrentSyncCommittee(currentSyncCommittee)
+ }
b.currentSyncCommittee = currentSyncCommittee
b.markLeaf(CurrentSyncCommitteeLeafIndex)
}
func (b *BeaconState) SetNextSyncCommittee(nextSyncCommittee *solid.SyncCommittee) {
+ if b.events.OnNewNextSyncCommittee != nil {
+ b.events.OnNewNextSyncCommittee(nextSyncCommittee)
+ }
b.nextSyncCommittee = nextSyncCommittee
b.markLeaf(NextSyncCommitteeLeafIndex)
}
@@ -269,6 +348,11 @@ func (b *BeaconState) ResetHistoricalSummaries() {
b.markLeaf(HistoricalSummariesLeafIndex)
}
+func (b *BeaconState) SetHistoricalSummaries(l *solid.ListSSZ[*cltypes.HistoricalSummary]) {
+ b.historicalSummaries = l
+ b.markLeaf(HistoricalSummariesLeafIndex)
+}
+
func (b *BeaconState) AddHistoricalSummary(summary *cltypes.HistoricalSummary) {
b.historicalSummaries.Append(summary)
b.markLeaf(HistoricalSummariesLeafIndex)
@@ -287,6 +371,11 @@ func (b *BeaconState) SetInactivityScores(scores []uint64) {
b.markLeaf(InactivityScoresLeafIndex)
}
+func (b *BeaconState) SetInactivityScoresRaw(scores solid.Uint64VectorSSZ) {
+ b.inactivityScores = scores
+ b.markLeaf(InactivityScoresLeafIndex)
+}
+
func (b *BeaconState) AddInactivityScore(score uint64) {
b.inactivityScores.Append(score)
b.markLeaf(InactivityScoresLeafIndex)
@@ -349,12 +438,62 @@ func (b *BeaconState) ResetCurrentEpochAttestations() {
b.currentEpochAttestations = solid.NewDynamicListSSZ[*solid.PendingAttestation](int(b.beaconConfig.PreviousEpochAttestationsLength()))
}
+func (b *BeaconState) SetCurrentEpochAttestations(attestations *solid.ListSSZ[*solid.PendingAttestation]) {
+ b.markLeaf(CurrentEpochParticipationLeafIndex)
+ b.currentEpochAttestations = attestations
+}
+
func (b *BeaconState) SetPreviousEpochAttestations(attestations *solid.ListSSZ[*solid.PendingAttestation]) {
b.markLeaf(PreviousEpochParticipationLeafIndex)
b.previousEpochAttestations = attestations
}
+func (b *BeaconState) SetCurrentEpochParticipation(participation *solid.BitList) {
+ b.markLeaf(CurrentEpochParticipationLeafIndex)
+ b.currentEpochParticipation = participation
+}
+
+func (b *BeaconState) SetPreviousEpochParticipation(participation *solid.BitList) {
+ b.markLeaf(PreviousEpochParticipationLeafIndex)
+ b.previousEpochParticipation = participation
+}
+
func (b *BeaconState) ResetPreviousEpochAttestations() {
b.markLeaf(PreviousEpochParticipationLeafIndex)
b.previousEpochAttestations = solid.NewDynamicListSSZ[*solid.PendingAttestation](int(b.beaconConfig.PreviousEpochAttestationsLength()))
}
+
+// SetGenesisTime sets the genesis time of the BeaconState.
+func (b *BeaconState) SetGenesisTime(time uint64) {
+ b.markLeaf(GenesisTimeLeafIndex)
+ b.genesisTime = time
+}
+
+// SetGenesisValidatorsRoot sets the genesis validators root of the BeaconState.
+func (b *BeaconState) SetGenesisValidatorsRoot(root libcommon.Hash) {
+ b.markLeaf(GenesisValidatorsRootLeafIndex)
+ b.genesisValidatorsRoot = root
+}
+
+// SetBlockRoots sets the block roots of the BeaconState.
+func (b *BeaconState) SetBlockRoots(roots solid.HashVectorSSZ) {
+ b.markLeaf(BlockRootsLeafIndex)
+ b.blockRoots = roots
+}
+
+// SetStateRoots sets the state roots of the BeaconState.
+func (b *BeaconState) SetStateRoots(roots solid.HashVectorSSZ) {
+ b.markLeaf(StateRootsLeafIndex)
+ b.stateRoots = roots
+}
+
+// SetBlockRoots sets the block roots of the BeaconState.
+func (b *BeaconState) SetBalances(balances solid.Uint64VectorSSZ) {
+ b.markLeaf(BalancesLeafIndex)
+ b.balances = balances
+}
+
+func (b *BeaconState) SetSlashings(slashings solid.Uint64VectorSSZ) {
+ b.markLeaf(SlashingsLeafIndex)
+ b.slashings = slashings
+}
diff --git a/cl/phase1/core/state/raw/setters_test.go b/cl/phase1/core/state/raw/setters_test.go
index 98ea9fcf7e7..166e5d10ce6 100644
--- a/cl/phase1/core/state/raw/setters_test.go
+++ b/cl/phase1/core/state/raw/setters_test.go
@@ -330,15 +330,6 @@ func TestBeaconState_SetSlashingSegmentAt(t *testing.T) {
assert.Equal(t, segment, state.slashings.Get(index))
}
-func TestBeaconState_IncrementSlashingSegmentAt(t *testing.T) {
- state := GetTestState()
- index := 0
- delta := uint64(10)
- state.SetSlashingSegmentAt(index, 100)
- state.IncrementSlashingSegmentAt(index, delta)
- assert.Equal(t, uint64(110), state.slashings.Get(index))
-}
-
func TestBeaconState_SetEpochParticipationForValidatorIndex(t *testing.T) {
state := GetTestState()
isCurrentEpoch := true
diff --git a/cl/phase1/core/state/raw/state.go b/cl/phase1/core/state/raw/state.go
index dc8c92b849a..3f6533e4e92 100644
--- a/cl/phase1/core/state/raw/state.go
+++ b/cl/phase1/core/state/raw/state.go
@@ -1,6 +1,8 @@
package raw
import (
+ "encoding/json"
+
"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes"
@@ -8,10 +10,10 @@ import (
)
const (
- blockRootsLength = 8192
- stateRootsLength = 8192
- randoMixesLength = 65536
- slashingsLength = 8192
+ BlockRootsLength = 8192
+ StateRootsLength = 8192
+ RandoMixesLength = 65536
+ SlashingsLength = 8192
)
type BeaconState struct {
@@ -58,6 +60,7 @@ type BeaconState struct {
// cl version
version clparams.StateVersion // State version
beaconConfig *clparams.BeaconChainConfig
+ events Events
}
func New(cfg *clparams.BeaconChainConfig) *BeaconState {
@@ -76,7 +79,7 @@ func New(cfg *clparams.BeaconChainConfig) *BeaconState {
balances: solid.NewUint64ListSSZ(int(cfg.ValidatorRegistryLimit)),
previousEpochParticipation: solid.NewBitList(0, int(cfg.ValidatorRegistryLimit)),
currentEpochParticipation: solid.NewBitList(0, int(cfg.ValidatorRegistryLimit)),
- slashings: solid.NewUint64VectorSSZ(slashingsLength),
+ slashings: solid.NewUint64VectorSSZ(SlashingsLength),
currentEpochAttestations: solid.NewDynamicListSSZ[*solid.PendingAttestation](int(cfg.CurrentEpochAttestationsLength())),
previousEpochAttestations: solid.NewDynamicListSSZ[*solid.PendingAttestation](int(cfg.PreviousEpochAttestationsLength())),
historicalRoots: solid.NewHashList(int(cfg.HistoricalRootsLimit)),
@@ -99,3 +102,98 @@ func (b *BeaconState) init() error {
}
return nil
}
+
+func (b *BeaconState) MarshalJSON() ([]byte, error) {
+ obj := map[string]interface{}{
+ "genesis_time": b.genesisTime,
+ "genesis_validators_root": b.genesisValidatorsRoot,
+ "slot": b.slot,
+ "fork": b.fork,
+ "latest_block_header": b.latestBlockHeader,
+ "block_roots": b.blockRoots,
+ "state_roots": b.stateRoots,
+ "historical_roots": b.historicalRoots,
+ "eth1_data": b.eth1Data,
+ "eth1_data_votes": b.eth1DataVotes,
+ "eth1_deposit_index": b.eth1DepositIndex,
+ "validators": b.validators,
+ "balances": b.balances,
+ "randao_mixes": b.randaoMixes,
+ "slashings": b.slashings,
+ "previous_epoch_participation": b.previousEpochParticipation,
+ "current_epoch_participation": b.currentEpochParticipation,
+ "justification_bits": b.justificationBits,
+ "previous_justified_checkpoint": b.previousJustifiedCheckpoint,
+ "current_justified_checkpoint": b.currentJustifiedCheckpoint,
+ "finalized_checkpoint": b.finalizedCheckpoint,
+ }
+ if b.version == clparams.Phase0Version {
+ obj["previous_epoch_attestations"] = b.previousEpochAttestations
+ obj["current_epoch_attestations"] = b.currentEpochAttestations
+ }
+
+ if b.version >= clparams.AltairVersion {
+ obj["inactivity_scores"] = b.inactivityScores
+ obj["current_sync_committee"] = b.currentSyncCommittee
+ obj["next_sync_committee"] = b.nextSyncCommittee
+ }
+ if b.version >= clparams.BellatrixVersion {
+ obj["latest_execution_payload_header"] = b.latestExecutionPayloadHeader
+ }
+ if b.version >= clparams.CapellaVersion {
+ obj["next_withdrawal_index"] = b.nextWithdrawalIndex
+ obj["next_withdrawal_validator_index"] = b.nextWithdrawalValidatorIndex
+ obj["historical_summaries"] = b.historicalSummaries
+ }
+ return json.Marshal(obj)
+}
+
+// Get validators field
+func (b *BeaconState) Validators() *solid.ValidatorSet {
+ return b.validators
+}
+
+func (b *BeaconState) SetEvents(events Events) {
+ b.events = events
+}
+
+func (b *BeaconState) HistoricalSummariesLength() uint64 {
+ return uint64(b.historicalSummaries.Len())
+}
+
+func (b *BeaconState) HistoricalRootsLength() uint64 {
+ return uint64(b.historicalRoots.Length())
+}
+
+// Dangerous
+func (b *BeaconState) RawInactivityScores() []byte {
+ return b.inactivityScores.Bytes()
+}
+
+func (b *BeaconState) RawBalances() []byte {
+ return b.balances.Bytes()
+}
+
+func (b *BeaconState) RawValidatorSet() []byte {
+ return b.validators.Bytes()
+}
+
+func (b *BeaconState) RawPreviousEpochParticipation() []byte {
+ return b.previousEpochParticipation.Bytes()
+}
+
+func (b *BeaconState) RawCurrentEpochParticipation() []byte {
+ return b.currentEpochParticipation.Bytes()
+}
+
+func (b *BeaconState) HistoricalRoot(index int) common.Hash {
+ return b.historicalRoots.Get(index)
+}
+
+func (b *BeaconState) HistoricalSummary(index int) *cltypes.HistoricalSummary {
+ return b.historicalSummaries.Get(index)
+}
+
+func (b *BeaconState) RawSlashings() []byte {
+ return b.slashings.Bytes()
+}
diff --git a/cl/phase1/core/state/shuffling/shuffling.go b/cl/phase1/core/state/shuffling/shuffling.go
index 4bec3242a4b..ed7d7322d80 100644
--- a/cl/phase1/core/state/shuffling/shuffling.go
+++ b/cl/phase1/core/state/shuffling/shuffling.go
@@ -19,7 +19,7 @@ func ComputeProposerIndex(b *raw.BeaconState, indices []uint64, seed [32]byte) (
input := make([]byte, 40)
preInputs := ComputeShuffledIndexPreInputs(b.BeaconConfig(), seed)
for {
- shuffled, err := ComputeShuffledIndex(b.BeaconConfig(), i%total, total, seed, preInputs, utils.Keccak256)
+ shuffled, err := ComputeShuffledIndex(b.BeaconConfig(), i%total, total, seed, preInputs, utils.Sha256)
if err != nil {
return 0, err
}
@@ -29,7 +29,7 @@ func ComputeProposerIndex(b *raw.BeaconState, indices []uint64, seed [32]byte) (
}
copy(input, seed[:])
binary.LittleEndian.PutUint64(input[32:], i/32)
- randomByte := uint64(utils.Keccak256(input)[i%32])
+ randomByte := uint64(utils.Sha256(input)[i%32])
validator, err := b.ValidatorForValidatorIndex(int(candidateIndex))
if err != nil {
return 0, err
diff --git a/cl/phase1/core/state/shuffling/shuffling_test.go b/cl/phase1/core/state/shuffling/shuffling_test.go
index 2a80c1295b1..a664e929095 100644
--- a/cl/phase1/core/state/shuffling/shuffling_test.go
+++ b/cl/phase1/core/state/shuffling/shuffling_test.go
@@ -4,10 +4,11 @@ import (
_ "embed"
"testing"
+ "github.com/ledgerwatch/erigon-lib/common/eth2shuffle"
+
"github.com/ledgerwatch/erigon/cl/phase1/core/state"
"github.com/ledgerwatch/erigon/cl/phase1/core/state/raw"
"github.com/ledgerwatch/erigon/cl/phase1/core/state/shuffling"
- "github.com/ledgerwatch/erigon/common/eth2shuffle"
"github.com/stretchr/testify/require"
"github.com/ledgerwatch/erigon/cl/clparams"
@@ -15,7 +16,7 @@ import (
)
func BenchmarkLambdaShuffledIndex(b *testing.B) {
- keccakOptimized := utils.OptimizedKeccak256()
+ keccakOptimized := utils.OptimizedSha256NotThreadSafe()
eth2ShuffleHash := func(data []byte) []byte {
hashed := keccakOptimized(data)
return hashed[:]
@@ -30,7 +31,7 @@ func BenchmarkLambdaShuffledIndex(b *testing.B) {
// Faster by ~40%, the effects of it will be felt mostly on computation of the proposer index.
func BenchmarkErigonShuffledIndex(b *testing.B) {
s := state.New(&clparams.MainnetBeaconConfig)
- keccakOptimized := utils.OptimizedKeccak256NotThreadSafe()
+ keccakOptimized := utils.OptimizedSha256NotThreadSafe()
seed := [32]byte{2, 35, 6}
preInputs := shuffling.ComputeShuffledIndexPreInputs(s.BeaconConfig(), seed)
diff --git a/cl/phase1/core/state/shuffling/util.go b/cl/phase1/core/state/shuffling/util.go
index 0eb6e9e67ce..afd16e67c47 100644
--- a/cl/phase1/core/state/shuffling/util.go
+++ b/cl/phase1/core/state/shuffling/util.go
@@ -4,10 +4,11 @@ import (
"encoding/binary"
"fmt"
+ "github.com/ledgerwatch/erigon-lib/common/eth2shuffle"
+
"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/utils"
- "github.com/ledgerwatch/erigon/common/eth2shuffle"
)
func ComputeShuffledIndex(conf *clparams.BeaconChainConfig, ind, ind_count uint64, seed [32]byte, preInputs [][32]byte, hashFunc utils.HashFunc) (uint64, error) {
@@ -49,7 +50,7 @@ func ComputeShuffledIndex(conf *clparams.BeaconChainConfig, ind, ind_count uint6
func ComputeShuffledIndexPreInputs(conf *clparams.BeaconChainConfig, seed [32]byte) [][32]byte {
ret := make([][32]byte, conf.ShuffleRoundCount)
for i := range ret {
- ret[i] = utils.Keccak256(append(seed[:], byte(i)))
+ ret[i] = utils.Sha256(append(seed[:], byte(i)))
}
return ret
}
@@ -59,19 +60,18 @@ func GetSeed(beaconConfig *clparams.BeaconChainConfig, mix common.Hash, epoch ui
binary.LittleEndian.PutUint64(epochByteArray, epoch)
input := append(domain[:], epochByteArray...)
input = append(input, mix[:]...)
- return utils.Keccak256(input)
+ return utils.Sha256(input)
}
-func ComputeShuffledIndicies(beaconConfig *clparams.BeaconChainConfig, mix common.Hash, indicies []uint64, slot uint64) []uint64 {
- shuffledIndicies := make([]uint64, len(indicies))
- copy(shuffledIndicies, indicies)
- hashFunc := utils.OptimizedKeccak256NotThreadSafe()
+func ComputeShuffledIndicies(beaconConfig *clparams.BeaconChainConfig, mix common.Hash, out, indicies []uint64, slot uint64) []uint64 {
+ copy(out, indicies)
+ hashFunc := utils.OptimizedSha256NotThreadSafe()
epoch := slot / beaconConfig.SlotsPerEpoch
seed := GetSeed(beaconConfig, mix, epoch, beaconConfig.DomainBeaconAttester)
eth2ShuffleHashFunc := func(data []byte) []byte {
hashed := hashFunc(data)
return hashed[:]
}
- eth2shuffle.UnshuffleList(eth2ShuffleHashFunc, shuffledIndicies, uint8(beaconConfig.ShuffleRoundCount), seed)
- return shuffledIndicies
+ eth2shuffle.UnshuffleList(eth2ShuffleHashFunc, out, uint8(beaconConfig.ShuffleRoundCount), seed)
+ return out
}
diff --git a/cl/phase1/core/state/ssz.go b/cl/phase1/core/state/ssz.go
index 12c817c570b..057666abfa9 100644
--- a/cl/phase1/core/state/ssz.go
+++ b/cl/phase1/core/state/ssz.go
@@ -1,32 +1,31 @@
package state
import (
- "github.com/ledgerwatch/erigon/metrics/methelp"
-
+ "github.com/ledgerwatch/erigon-lib/metrics"
"github.com/ledgerwatch/erigon-lib/types/clonable"
)
func (b *CachingBeaconState) EncodeSSZ(buf []byte) ([]byte, error) {
- h := methelp.NewHistTimer("encode_ssz_beacon_state_dur")
+ h := metrics.NewHistTimer("encode_ssz_beacon_state_dur")
bts, err := b.BeaconState.EncodeSSZ(buf)
if err != nil {
return nil, err
}
h.PutSince()
- sz := methelp.NewHistTimer("encode_ssz_beacon_state_size")
- sz.Update(float64(len(bts)))
+ sz := metrics.NewHistTimer("encode_ssz_beacon_state_size")
+ sz.Observe(float64(len(bts)))
return bts, err
}
func (b *CachingBeaconState) DecodeSSZ(buf []byte, version int) error {
- h := methelp.NewHistTimer("decode_ssz_beacon_state_dur")
+ h := metrics.NewHistTimer("decode_ssz_beacon_state_dur")
if err := b.BeaconState.DecodeSSZ(buf, version); err != nil {
return err
}
- sz := methelp.NewHistTimer("decode_ssz_beacon_state_size")
- sz.Update(float64(len(buf)))
+ sz := metrics.NewHistTimer("decode_ssz_beacon_state_size")
+ sz.Observe(float64(len(buf)))
h.PutSince()
- return b.initBeaconState()
+ return b.InitBeaconState()
}
// SSZ size of the Beacon State
diff --git a/cl/phase1/core/state/ssz_test.go b/cl/phase1/core/state/ssz_test.go
index 9327d357e7a..1bfffa37173 100644
--- a/cl/phase1/core/state/ssz_test.go
+++ b/cl/phase1/core/state/ssz_test.go
@@ -1,11 +1,10 @@
-package state_test
+package state
import (
+ "bytes"
_ "embed"
"testing"
- "github.com/ledgerwatch/erigon/cl/phase1/core/state"
-
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/cl/clparams"
@@ -20,21 +19,35 @@ var capellaBeaconSnappyTest []byte
var phase0BeaconSnappyTest []byte
func TestBeaconStateCapellaEncodingDecoding(t *testing.T) {
- state := state.New(&clparams.MainnetBeaconConfig)
+ state := New(&clparams.MainnetBeaconConfig)
decodedSSZ, err := utils.DecompressSnappy(capellaBeaconSnappyTest)
require.NoError(t, err)
require.NoError(t, state.DecodeSSZ(decodedSSZ, int(clparams.CapellaVersion)))
root, err := state.HashSSZ()
require.NoError(t, err)
require.Equal(t, libcommon.Hash(root), libcommon.HexToHash("0xb3012b73c02ab66b2779d996f9d33d36e58bf71ffc8f3e12e07024606617a9c0"))
+
}
func TestBeaconStatePhase0EncodingDecoding(t *testing.T) {
- state := state.New(&clparams.MainnetBeaconConfig)
+ state := New(&clparams.MainnetBeaconConfig)
decodedSSZ, err := utils.DecompressSnappy(phase0BeaconSnappyTest)
require.NoError(t, err)
state.DecodeSSZ(decodedSSZ, int(clparams.Phase0Version))
root, err := state.HashSSZ()
require.NoError(t, err)
require.Equal(t, libcommon.Hash(root), libcommon.HexToHash("0xf23b6266af40567516afeee250c1f8c06e9800f34a990a210604c380b506e053"))
+ // Lets test the caches too
+ var w bytes.Buffer
+ require.NoError(t, state.EncodeCaches(&w))
+ values1 := state.activeValidatorsCache.Values()
+ keys1 := state.activeValidatorsCache.Keys()
+ values2 := state.shuffledSetsCache.Values()
+ keys2 := state.shuffledSetsCache.Keys()
+
+ require.NoError(t, state.DecodeCaches(&w))
+ require.Equal(t, values1, state.activeValidatorsCache.Values())
+ require.Equal(t, keys1, state.activeValidatorsCache.Keys())
+ require.Equal(t, values2, state.shuffledSetsCache.Values())
+ require.Equal(t, keys2, state.shuffledSetsCache.Keys())
}
diff --git a/cl/phase1/core/state/upgrade.go b/cl/phase1/core/state/upgrade.go
index 2270fe37113..caa12f7ead3 100644
--- a/cl/phase1/core/state/upgrade.go
+++ b/cl/phase1/core/state/upgrade.go
@@ -25,7 +25,7 @@ func (b *CachingBeaconState) UpgradeToAltair() error {
// Fill in previous epoch participation from the pre state's pending attestations
if err := solid.RangeErr[*solid.PendingAttestation](b.PreviousEpochAttestations(), func(i1 int, pa *solid.PendingAttestation, i2 int) error {
attestationData := pa.AttestantionData()
- flags, err := b.GetAttestationParticipationFlagIndicies(attestationData, pa.InclusionDelay())
+ flags, err := b.GetAttestationParticipationFlagIndicies(attestationData, pa.InclusionDelay(), false)
if err != nil {
return err
}
diff --git a/cl/phase1/core/state/util.go b/cl/phase1/core/state/util.go
index ba34e86a66c..3706283ec6b 100644
--- a/cl/phase1/core/state/util.go
+++ b/cl/phase1/core/state/util.go
@@ -29,7 +29,7 @@ func GetIndexedAttestation(attestation *solid.Attestation, attestingIndicies []u
return attestingIndicies[i] < attestingIndicies[j]
})
return &cltypes.IndexedAttestation{
- AttestingIndices: solid.NewUint64ListSSZFromSlice(2048, attestingIndicies),
+ AttestingIndices: solid.NewRawUint64List(2048, attestingIndicies),
Data: attestation.AttestantionData(),
Signature: attestation.Signature(),
}
diff --git a/cl/phase1/execution_client/execution_client.go b/cl/phase1/execution_client/execution_client.go
deleted file mode 100644
index fdfbb3aa6c3..00000000000
--- a/cl/phase1/execution_client/execution_client.go
+++ /dev/null
@@ -1,251 +0,0 @@
-package execution_client
-
-import (
- "context"
- "encoding/binary"
- "fmt"
- "math/big"
- "time"
-
- "github.com/c2h5oh/datasize"
- "github.com/holiman/uint256"
- libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon-lib/gointerfaces"
- "github.com/ledgerwatch/erigon-lib/gointerfaces/execution"
- types2 "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
- "github.com/ledgerwatch/log/v3"
-
- "google.golang.org/grpc"
- "google.golang.org/grpc/credentials/insecure"
- "google.golang.org/grpc/keepalive"
-
- "github.com/ledgerwatch/erigon/cl/cltypes"
- "github.com/ledgerwatch/erigon/cl/phase1/execution_client/rpc_helper"
- "github.com/ledgerwatch/erigon/core/types"
- "github.com/ledgerwatch/erigon/turbo/engineapi/engine_types"
-)
-
-const fcuTimeout = 12 * time.Second
-
-// ExecutionClient interfaces with the Erigon-EL component consensus side.
-type ExecutionClient struct {
- client execution.ExecutionClient
- ctx context.Context
-}
-
-func HeaderRpcToHeader(header *execution.Header) (*types.Header, error) {
- var blockNonce types.BlockNonce
- binary.BigEndian.PutUint64(blockNonce[:], header.Nonce)
- h := &types.Header{
- ParentHash: gointerfaces.ConvertH256ToHash(header.ParentHash),
- UncleHash: gointerfaces.ConvertH256ToHash(header.OmmerHash),
- Coinbase: gointerfaces.ConvertH160toAddress(header.Coinbase),
- Root: gointerfaces.ConvertH256ToHash(header.StateRoot),
- TxHash: gointerfaces.ConvertH256ToHash(header.TransactionHash),
- ReceiptHash: gointerfaces.ConvertH256ToHash(header.ReceiptRoot),
- Bloom: gointerfaces.ConvertH2048ToBloom(header.LogsBloom),
- Difficulty: gointerfaces.ConvertH256ToUint256Int(header.Difficulty).ToBig(),
- Number: big.NewInt(int64(header.BlockNumber)),
- GasLimit: header.GasLimit,
- GasUsed: header.GasUsed,
- Time: header.Timestamp,
- Extra: header.ExtraData,
- MixDigest: gointerfaces.ConvertH256ToHash(header.PrevRandao),
- Nonce: blockNonce,
- }
- if header.BaseFeePerGas != nil {
- h.BaseFee = gointerfaces.ConvertH256ToUint256Int(header.BaseFeePerGas).ToBig()
- }
- if header.WithdrawalHash != nil {
- h.WithdrawalsHash = new(libcommon.Hash)
- *h.WithdrawalsHash = gointerfaces.ConvertH256ToHash(header.WithdrawalHash)
- }
- blockHash := gointerfaces.ConvertH256ToHash(header.BlockHash)
- if blockHash != h.Hash() {
- return nil, fmt.Errorf("block %d, %x has invalid hash. expected: %x", header.BlockNumber, h.Hash(), blockHash)
- }
- return h, nil
-}
-
-func HeaderToHeaderRPC(header *types.Header) *execution.Header {
- difficulty := new(uint256.Int)
- difficulty.SetFromBig(header.Difficulty)
-
- var baseFeeReply *types2.H256
- if header.BaseFee != nil {
- var baseFee uint256.Int
- baseFee.SetFromBig(header.BaseFee)
- baseFeeReply = gointerfaces.ConvertUint256IntToH256(&baseFee)
- }
- var withdrawalHashReply *types2.H256
- if header.WithdrawalsHash != nil {
- withdrawalHashReply = gointerfaces.ConvertHashToH256(*header.WithdrawalsHash)
- }
- return &execution.Header{
- ParentHash: gointerfaces.ConvertHashToH256(header.ParentHash),
- Coinbase: gointerfaces.ConvertAddressToH160(header.Coinbase),
- StateRoot: gointerfaces.ConvertHashToH256(header.Root),
- TransactionHash: gointerfaces.ConvertHashToH256(header.TxHash),
- LogsBloom: gointerfaces.ConvertBytesToH2048(header.Bloom[:]),
- ReceiptRoot: gointerfaces.ConvertHashToH256(header.ReceiptHash),
- PrevRandao: gointerfaces.ConvertHashToH256(header.MixDigest),
- BlockNumber: header.Number.Uint64(),
- Nonce: header.Nonce.Uint64(),
- GasLimit: header.GasLimit,
- GasUsed: header.GasUsed,
- Timestamp: header.Time,
- ExtraData: header.Extra,
- Difficulty: gointerfaces.ConvertUint256IntToH256(difficulty),
- BlockHash: gointerfaces.ConvertHashToH256(header.Hash()),
- OmmerHash: gointerfaces.ConvertHashToH256(header.UncleHash),
- BaseFeePerGas: baseFeeReply,
- WithdrawalHash: withdrawalHashReply,
- }
-
-}
-
-// NewExecutionClient establishes a client-side connection with Erigon-EL
-func NewExecutionClient(ctx context.Context, addr string) (*ExecutionClient, error) {
- // Set up dial options for the gRPC client connection
- var dialOpts []grpc.DialOption
- dialOpts = []grpc.DialOption{
- grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(16 * datasize.MB))),
- grpc.WithKeepaliveParams(keepalive.ClientParameters{
- Time: 5 * time.Minute,
- Timeout: 10 * time.Minute,
- PermitWithoutStream: true,
- }),
- }
-
- // Add transport credentials to the dial options
- dialOpts = append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials()))
-
- // Create the gRPC client connection
- conn, err := grpc.DialContext(ctx, addr, dialOpts...)
- if err != nil {
- // Return an error if the connection fails
- return nil, fmt.Errorf("creating client connection to execution client: %w", err)
- }
-
- // Return a new ExecutionClient struct with the gRPC client and context set as fields
- return &ExecutionClient{
- client: execution.NewExecutionClient(conn),
- ctx: ctx,
- }, nil
-}
-
-// InsertHeaders will send block bodies to execution client
-func (ec *ExecutionClient) InsertHeaders(headers []*types.Header) error {
- grpcHeaders := make([]*execution.Header, 0, len(headers))
- for _, header := range headers {
- grpcHeaders = append(grpcHeaders, HeaderToHeaderRPC(header))
- }
- _, err := ec.client.InsertHeaders(ec.ctx, &execution.InsertHeadersRequest{Headers: grpcHeaders})
- return err
-}
-
-// InsertBodies will send block bodies to execution client
-func (ec *ExecutionClient) InsertBodies(bodies []*types.RawBody, blockHashes []libcommon.Hash, blockNumbers []uint64) error {
- if len(bodies) != len(blockHashes) || len(bodies) != len(blockNumbers) {
- return fmt.Errorf("unbalanced inputs")
- }
- grpcBodies := make([]*execution.BlockBody, 0, len(bodies))
- for i, body := range bodies {
- grpcBodies = append(grpcBodies, &execution.BlockBody{
- BlockHash: gointerfaces.ConvertHashToH256(blockHashes[i]),
- BlockNumber: blockNumbers[i],
- Transactions: body.Transactions,
- Withdrawals: engine_types.ConvertWithdrawalsToRpc(body.Withdrawals),
- })
- }
- _, err := ec.client.InsertBodies(ec.ctx, &execution.InsertBodiesRequest{Bodies: grpcBodies})
- return err
-}
-
-// InsertExecutionPayloads insert a segment of execution payloads
-func (ec *ExecutionClient) InsertExecutionPayloads(payloads []*cltypes.Eth1Block) error {
- headers := make([]*types.Header, 0, len(payloads))
- bodies := make([]*types.RawBody, 0, len(payloads))
- blockHashes := make([]libcommon.Hash, 0, len(payloads))
- blockNumbers := make([]uint64, 0, len(payloads))
-
- for _, payload := range payloads {
- rlpHeader, err := payload.RlpHeader()
- if err != nil {
- return err
- }
- headers = append(headers, rlpHeader)
- bodies = append(bodies, payload.Body())
- blockHashes = append(blockHashes, payload.BlockHash)
- blockNumbers = append(blockNumbers, payload.BlockNumber)
- }
-
- if err := ec.InsertHeaders(headers); err != nil {
- return err
- }
- return ec.InsertBodies(bodies, blockHashes, blockNumbers)
-}
-
-func (ec *ExecutionClient) ForkChoiceUpdate(headHash libcommon.Hash) (*execution.ForkChoiceReceipt, error) {
- log.Debug("[ExecutionClientRpc] Calling EL", "method", rpc_helper.ForkChoiceUpdatedV1)
-
- return ec.client.UpdateForkChoice(ec.ctx, &execution.ForkChoice{
- HeadBlockHash: gointerfaces.ConvertHashToH256(headHash),
- Timeout: uint64(fcuTimeout.Milliseconds()),
- })
-}
-
-func (ec *ExecutionClient) IsCanonical(hash libcommon.Hash) (bool, error) {
- resp, err := ec.client.IsCanonicalHash(ec.ctx, gointerfaces.ConvertHashToH256(hash))
- if err != nil {
- return false, err
- }
- return resp.Canonical, nil
-}
-
-func (ec *ExecutionClient) ReadHeader(number uint64, blockHash libcommon.Hash) (*types.Header, error) {
- resp, err := ec.client.GetHeader(ec.ctx, &execution.GetSegmentRequest{
- BlockNumber: &number,
- BlockHash: gointerfaces.ConvertHashToH256(blockHash),
- })
- if err != nil {
- return nil, err
- }
-
- return HeaderRpcToHeader(resp.Header)
-}
-
-func (ec *ExecutionClient) ReadExecutionPayload(number uint64, blockHash libcommon.Hash) (*cltypes.Eth1Block, error) {
- header, err := ec.ReadHeader(number, blockHash)
- if err != nil {
- return nil, err
- }
- body, err := ec.ReadBody(number, blockHash)
- if err != nil {
- return nil, err
- }
- return cltypes.NewEth1BlockFromHeaderAndBody(header, body), nil
-}
-
-func (ec *ExecutionClient) ReadBody(number uint64, blockHash libcommon.Hash) (*types.RawBody, error) {
- resp, err := ec.client.GetBody(ec.ctx, &execution.GetSegmentRequest{
- BlockNumber: &number,
- BlockHash: gointerfaces.ConvertHashToH256(blockHash),
- })
- if err != nil {
- return nil, err
- }
- uncles := make([]*types.Header, 0, len(resp.Body.Uncles))
- for _, uncle := range resp.Body.Uncles {
- h, err := HeaderRpcToHeader(uncle)
- if err != nil {
- return nil, err
- }
- uncles = append(uncles, h)
- }
- return &types.RawBody{
- Transactions: resp.Body.Transactions,
- Uncles: uncles,
- Withdrawals: engine_types.ConvertWithdrawalsFromRpc(resp.Body.Withdrawals),
- }, nil
-}
diff --git a/cl/phase1/execution_client/execution_client_direct.go b/cl/phase1/execution_client/execution_client_direct.go
index da21fdf3d2b..9f8eb1f50a7 100644
--- a/cl/phase1/execution_client/execution_client_direct.go
+++ b/cl/phase1/execution_client/execution_client_direct.go
@@ -3,26 +3,23 @@ package execution_client
import (
"context"
"fmt"
- "math/big"
libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/execution"
"github.com/ledgerwatch/erigon/cl/cltypes"
- "github.com/ledgerwatch/erigon/common/hexutil"
- "github.com/ledgerwatch/erigon/turbo/engineapi"
- "github.com/ledgerwatch/erigon/turbo/engineapi/engine_types"
- "github.com/ledgerwatch/log/v3"
+ "github.com/ledgerwatch/erigon/core/types"
+ "github.com/ledgerwatch/erigon/turbo/execution/eth1/eth1_chain_reader.go"
)
type ExecutionClientDirect struct {
- api engineapi.EngineAPI
- ctx context.Context
+ chainRW eth1_chain_reader.ChainReaderWriterEth1
+ ctx context.Context
}
-func NewExecutionClientDirect(ctx context.Context, api engineapi.EngineAPI) (*ExecutionClientDirect, error) {
+func NewExecutionClientDirect(ctx context.Context, chainRW eth1_chain_reader.ChainReaderWriterEth1) (*ExecutionClientDirect, error) {
return &ExecutionClientDirect{
- api: api,
- ctx: ctx,
+ chainRW: chainRW,
+ ctx: ctx,
}, nil
}
@@ -31,81 +28,74 @@ func (cc *ExecutionClientDirect) NewPayload(payload *cltypes.Eth1Block, beaconPa
return
}
- reversedBaseFeePerGas := libcommon.Copy(payload.BaseFeePerGas[:])
- for i, j := 0, len(reversedBaseFeePerGas)-1; i < j; i, j = i+1, j-1 {
- reversedBaseFeePerGas[i], reversedBaseFeePerGas[j] = reversedBaseFeePerGas[j], reversedBaseFeePerGas[i]
- }
- baseFee := new(big.Int).SetBytes(reversedBaseFeePerGas)
-
- request := engine_types.ExecutionPayload{
- ParentHash: payload.ParentHash,
- FeeRecipient: payload.FeeRecipient,
- StateRoot: payload.StateRoot,
- ReceiptsRoot: payload.ReceiptsRoot,
- LogsBloom: payload.LogsBloom[:],
- PrevRandao: payload.PrevRandao,
- BlockNumber: hexutil.Uint64(payload.BlockNumber),
- GasLimit: hexutil.Uint64(payload.GasLimit),
- GasUsed: hexutil.Uint64(payload.GasUsed),
- Timestamp: hexutil.Uint64(payload.Time),
- ExtraData: payload.Extra.Bytes(),
- BlockHash: payload.BlockHash,
+ header, err := payload.RlpHeader()
+ if err != nil {
+ return true, err
}
- request.BaseFeePerGas = new(hexutil.Big)
- *request.BaseFeePerGas = hexutil.Big(*baseFee)
- payloadBody := payload.Body()
- // Setup transactionbody
- request.Withdrawals = payloadBody.Withdrawals
-
- for _, bytesTransaction := range payloadBody.Transactions {
- request.Transactions = append(request.Transactions, bytesTransaction)
- }
- // Process Deneb
- if payload.Version() >= clparams.DenebVersion {
- request.BlobGasUsed = new(hexutil.Uint64)
- request.ExcessBlobGas = new(hexutil.Uint64)
- *request.BlobGasUsed = hexutil.Uint64(payload.BlobGasUsed)
- *request.ExcessBlobGas = hexutil.Uint64(payload.ExcessBlobGas)
+ body := payload.Body()
+ txs, err := types.DecodeTransactions(body.Transactions)
+ if err != nil {
+ return true, err
}
- payloadStatus := &engine_types.PayloadStatus{} // As it is done in the rpcdaemon
-
- // determine the engine method
- switch payload.Version() {
- case clparams.BellatrixVersion:
- payloadStatus, err = cc.api.NewPayloadV1(cc.ctx, &request)
- case clparams.CapellaVersion:
- payloadStatus, err = cc.api.NewPayloadV2(cc.ctx, &request)
- case clparams.DenebVersion:
- //TODO: Add 4844 and 4788 fields correctly
- payloadStatus, err = cc.api.NewPayloadV3(cc.ctx, &request, nil, beaconParentRoot)
- default:
- err = fmt.Errorf("invalid payload version")
+ if err := cc.chainRW.InsertBlockAndWait(types.NewBlockFromStorage(payload.BlockHash, header, txs, nil, body.Withdrawals)); err != nil {
+ return false, err
}
+
+ status, _, err := cc.chainRW.ValidateChain(payload.BlockHash, payload.BlockNumber)
if err != nil {
- err = fmt.Errorf("execution Client RPC failed to retrieve the NewPayload status response, err: %w", err)
- return
+ return false, err
}
+ invalid = status == execution.ExecutionStatus_BadBlock
- invalid = payloadStatus.Status == engine_types.InvalidStatus || payloadStatus.Status == engine_types.InvalidBlockHashStatus
- err = checkPayloadStatus(payloadStatus)
- if payloadStatus.Status == engine_types.AcceptedStatus {
- log.Info("[ExecutionClientRpc] New block accepted")
- }
return
}
func (cc *ExecutionClientDirect) ForkChoiceUpdate(finalized libcommon.Hash, head libcommon.Hash) error {
- forkChoiceRequest := engine_types.ForkChoiceState{
- HeadHash: head,
- SafeBlockHash: head,
- FinalizedBlockHash: finalized,
- }
-
- forkChoiceResp, err := cc.api.ForkchoiceUpdatedV2(cc.ctx, &forkChoiceRequest, nil)
+ status, _, err := cc.chainRW.UpdateForkChoice(head, head, finalized)
if err != nil {
return fmt.Errorf("execution Client RPC failed to retrieve ForkChoiceUpdate response, err: %w", err)
}
- return checkPayloadStatus(forkChoiceResp.PayloadStatus)
+ if status == execution.ExecutionStatus_InvalidForkchoice {
+ return fmt.Errorf("forkchoice was invalid")
+ }
+ if status == execution.ExecutionStatus_BadBlock {
+ return fmt.Errorf("bad block as forkchoice")
+ }
+ return nil
+}
+
+func (cc *ExecutionClientDirect) SupportInsertion() bool {
+ return true
+}
+
+func (cc *ExecutionClientDirect) InsertBlocks(blks []*types.Block) error {
+ return cc.chainRW.InsertBlocksAndWait(blks)
+}
+
+func (cc *ExecutionClientDirect) InsertBlock(blk *types.Block) error {
+ return cc.chainRW.InsertBlockAndWait(blk)
+}
+
+func (cc *ExecutionClientDirect) IsCanonicalHash(hash libcommon.Hash) (bool, error) {
+ return cc.chainRW.IsCanonicalHash(hash)
+}
+
+func (cc *ExecutionClientDirect) Ready() (bool, error) {
+ return cc.chainRW.Ready()
+}
+
+// GetBodiesByRange gets block bodies in given block range
+func (cc *ExecutionClientDirect) GetBodiesByRange(start, count uint64) ([]*types.RawBody, error) {
+ return cc.chainRW.GetBodiesByRange(start, count)
+}
+
+// GetBodiesByHashes gets block bodies with given hashes
+func (cc *ExecutionClientDirect) GetBodiesByHashes(hashes []libcommon.Hash) ([]*types.RawBody, error) {
+ return cc.chainRW.GetBodiesByHashes(hashes)
+}
+
+func (cc *ExecutionClientDirect) FrozenBlocks() uint64 {
+ return cc.chainRW.FrozenBlocks()
}
diff --git a/cl/phase1/execution_client/execution_client_rpc.go b/cl/phase1/execution_client/execution_client_rpc.go
index 87a488fb1ef..82515a2f5fb 100644
--- a/cl/phase1/execution_client/execution_client_rpc.go
+++ b/cl/phase1/execution_client/execution_client_rpc.go
@@ -3,6 +3,7 @@ package execution_client
import (
"context"
"fmt"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
"math/big"
"net/http"
"strings"
@@ -12,7 +13,7 @@ import (
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/phase1/execution_client/rpc_helper"
- "github.com/ledgerwatch/erigon/common/hexutil"
+ "github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/rpc"
"github.com/ledgerwatch/erigon/turbo/engineapi/engine_types"
"github.com/ledgerwatch/log/v3"
@@ -163,3 +164,67 @@ func checkPayloadStatus(payloadStatus *engine_types.PayloadStatus) error {
}
return nil
}
+
+func (cc *ExecutionClientRpc) SupportInsertion() bool {
+ return false
+}
+
+func (cc *ExecutionClientRpc) InsertBlocks([]*types.Block) error {
+ panic("unimplemented")
+}
+
+func (cc *ExecutionClientRpc) InsertBlock(*types.Block) error {
+ panic("unimplemented")
+}
+
+func (cc *ExecutionClientRpc) IsCanonicalHash(libcommon.Hash) (bool, error) {
+ panic("unimplemented")
+}
+
+func (cc *ExecutionClientRpc) Ready() (bool, error) {
+ return true, nil // Engine API is always ready
+}
+
+// Range methods
+
+// GetBodiesByRange gets block bodies in given block range
+func (cc *ExecutionClientRpc) GetBodiesByRange(start, count uint64) ([]*types.RawBody, error) {
+ result := []*engine_types.ExecutionPayloadBodyV1{}
+
+ if err := cc.client.CallContext(cc.ctx, &result, rpc_helper.GetPayloadBodiesByRangeV1, hexutil.Uint64(start), hexutil.Uint64(count)); err != nil {
+ return nil, err
+ }
+ ret := make([]*types.RawBody, len(result))
+ for i := range result {
+ ret[i] = &types.RawBody{
+ Withdrawals: result[i].Withdrawals,
+ }
+ for _, txn := range result[i].Transactions {
+ ret[i].Transactions = append(ret[i].Transactions, txn)
+ }
+ }
+ return ret, nil
+}
+
+// GetBodiesByHashes gets block bodies with given hashes
+func (cc *ExecutionClientRpc) GetBodiesByHashes(hashes []libcommon.Hash) ([]*types.RawBody, error) {
+ result := []*engine_types.ExecutionPayloadBodyV1{}
+
+ if err := cc.client.CallContext(cc.ctx, &result, rpc_helper.GetPayloadBodiesByHashV1, hashes); err != nil {
+ return nil, err
+ }
+ ret := make([]*types.RawBody, len(result))
+ for i := range result {
+ ret[i] = &types.RawBody{
+ Withdrawals: result[i].Withdrawals,
+ }
+ for _, txn := range result[i].Transactions {
+ ret[i].Transactions = append(ret[i].Transactions, txn)
+ }
+ }
+ return ret, nil
+}
+
+func (cc *ExecutionClientRpc) FrozenBlocks() uint64 {
+ panic("unimplemented")
+}
diff --git a/cl/phase1/execution_client/insert_batch.go b/cl/phase1/execution_client/insert_batch.go
deleted file mode 100644
index 5f75096757a..00000000000
--- a/cl/phase1/execution_client/insert_batch.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package execution_client
-
-import (
- "sync"
-
- "github.com/ledgerwatch/erigon/cl/cltypes"
-)
-
-const batchSize = 10000
-
-// InsertBatch is a struct for batching and inserting execution payloads.
-type InsertBatch struct {
- ec *ExecutionClient // The execution client to use for inserting payloads.
- payloadBuf []*cltypes.Eth1Block // A buffer for storing execution payloads before they are inserted.
- mu sync.Mutex // A mutex for synchronizing access to the payload buffer.
-}
-
-// NewInsertBatch creates a new InsertBatch struct with the given execution client.
-func NewInsertBatch(ec *ExecutionClient) *InsertBatch {
- return &InsertBatch{
- ec: ec,
- payloadBuf: make([]*cltypes.Eth1Block, 0, batchSize),
- }
-}
-
-// WriteExecutionPayload adds an execution payload to the payload buffer. If the buffer
-// has reached the batch size, the payloads in the buffer are inserted using the
-// execution client.
-func (b *InsertBatch) WriteExecutionPayload(payload *cltypes.Eth1Block) error {
- b.mu.Lock()
- defer b.mu.Unlock()
-
- b.payloadBuf = append(b.payloadBuf, payload)
- if len(b.payloadBuf) >= batchSize {
- if err := b.Flush(); err != nil {
- return err
- }
- }
- return nil
-}
-
-// Flush inserts the execution payloads in the payload buffer using the execution client.
-func (b *InsertBatch) Flush() error {
- b.mu.Lock()
- defer b.mu.Unlock()
- if len(b.payloadBuf) == 0 {
- return nil
- }
- if err := b.ec.InsertExecutionPayloads(b.payloadBuf); err != nil {
- return err
- }
- b.payloadBuf = b.payloadBuf[:0] // Clear the payload buffer.
- return nil
-}
diff --git a/cl/phase1/execution_client/interface.go b/cl/phase1/execution_client/interface.go
index e896b29f722..a83c60c9489 100644
--- a/cl/phase1/execution_client/interface.go
+++ b/cl/phase1/execution_client/interface.go
@@ -4,6 +4,7 @@ import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/core/types"
)
var errContextExceeded = "rpc error: code = DeadlineExceeded desc = context deadline exceeded"
@@ -13,4 +14,14 @@ var errContextExceeded = "rpc error: code = DeadlineExceeded desc = context dead
type ExecutionEngine interface {
NewPayload(payload *cltypes.Eth1Block, beaconParentRoot *libcommon.Hash) (bool, error)
ForkChoiceUpdate(finalized libcommon.Hash, head libcommon.Hash) error
+ SupportInsertion() bool
+ InsertBlocks([]*types.Block) error
+ InsertBlock(*types.Block) error
+ IsCanonicalHash(libcommon.Hash) (bool, error)
+ Ready() (bool, error)
+ // Range methods
+ GetBodiesByRange(start, count uint64) ([]*types.RawBody, error)
+ GetBodiesByHashes(hashes []libcommon.Hash) ([]*types.RawBody, error)
+ // Snapshots
+ FrozenBlocks() uint64
}
diff --git a/cl/phase1/execution_client/rpc_helper/methods.go b/cl/phase1/execution_client/rpc_helper/methods.go
index 68f039729e0..558dd6f0117 100644
--- a/cl/phase1/execution_client/rpc_helper/methods.go
+++ b/cl/phase1/execution_client/rpc_helper/methods.go
@@ -7,3 +7,6 @@ const EngineNewPayloadV3 = "engine_newPayloadV3"
const ForkChoiceUpdatedV1 = "engine_forkchoiceUpdatedV1"
const ForkChoiceUpdatedV2 = "engine_forkchoiceUpdatedV2"
const ForkChoiceUpdatedV3 = "engine_forkchoiceUpdatedV3"
+
+const GetPayloadBodiesByHashV1 = "engine_getPayloadBodiesByHashV1"
+const GetPayloadBodiesByRangeV1 = "engine_getPayloadBodiesByRangeV1"
diff --git a/cl/phase1/forkchoice/checkpoint_state.go b/cl/phase1/forkchoice/checkpoint_state.go
index 14a5ad90493..e5937fc3cc5 100644
--- a/cl/phase1/forkchoice/checkpoint_state.go
+++ b/cl/phase1/forkchoice/checkpoint_state.go
@@ -8,6 +8,7 @@ import (
"github.com/Giulio2002/bls"
libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/length"
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes"
@@ -16,61 +17,86 @@ import (
const randaoMixesLength = 65536
-// Active returns if validator is active for given epoch
-func (cv *checkpointValidator) active(epoch uint64) bool {
- return cv.activationEpoch <= epoch && epoch < cv.exitEpoch
-}
-
-type checkpointValidator struct {
- publicKey [48]byte
- activationEpoch uint64
- exitEpoch uint64
- balance uint64
- slashed bool
-}
-
-type shuffledSet struct {
- set []uint64
- lenActive uint64
-}
-
-// We only keep in memory a fraction of the beacon state
+// We only keep in memory a fraction of the beacon state when it comes to checkpoint.
type checkpointState struct {
- beaconConfig *clparams.BeaconChainConfig
- randaoMixes solid.HashVectorSSZ
- shuffledSetsCache map[uint64]*shuffledSet // Map each epoch to its shuffled index
- // public keys list
- validators []*checkpointValidator
+ beaconConfig *clparams.BeaconChainConfig
+ randaoMixes solid.HashVectorSSZ
+ shuffledSet []uint64 // shuffled set of active validators
+ // validator data
+ balances []uint64
+ // These are flattened to save memory and anchor public keys are static and shared.
+ anchorPublicKeys []byte // flattened base public keys
+ publicKeys []byte // flattened public keys
+ actives []byte
+ slasheds []byte
+
+ validatorSetSize int
// fork data
genesisValidatorsRoot libcommon.Hash
fork *cltypes.Fork
activeBalance, epoch uint64 // current active balance and epoch
}
-func newCheckpointState(beaconConfig *clparams.BeaconChainConfig, validatorSet []solid.Validator, randaoMixes solid.HashVectorSSZ,
+func writeToBitset(bitset []byte, i int, value bool) {
+ bitIndex := i % 8
+ sliceIndex := i / 8
+ if value {
+ bitset[sliceIndex] = ((1 << bitIndex) | bitset[sliceIndex])
+ } else {
+ bitset[sliceIndex] &= ^(1 << uint(bitIndex))
+ }
+}
+
+func readFromBitset(bitset []byte, i int) bool {
+ bitIndex := i % 8
+ sliceIndex := i / 8
+ return (bitset[sliceIndex] & (1 << uint(bitIndex))) > 0
+}
+
+func newCheckpointState(beaconConfig *clparams.BeaconChainConfig, anchorPublicKeys []byte, validatorSet []solid.Validator, randaoMixes solid.HashVectorSSZ,
genesisValidatorsRoot libcommon.Hash, fork *cltypes.Fork, activeBalance, epoch uint64) *checkpointState {
- validators := make([]*checkpointValidator, len(validatorSet))
+ publicKeys := make([]byte, (len(validatorSet)-(len(anchorPublicKeys)/length.Bytes48))*length.Bytes48)
+ balances := make([]uint64, len(validatorSet))
+
+ bitsetSize := (len(validatorSet) + 7) / 8
+ actives := make([]byte, bitsetSize)
+ slasheds := make([]byte, bitsetSize)
for i := range validatorSet {
- validators[i] = &checkpointValidator{
- publicKey: validatorSet[i].PublicKey(),
- activationEpoch: validatorSet[i].ActivationEpoch(),
- exitEpoch: validatorSet[i].ExitEpoch(),
- balance: validatorSet[i].EffectiveBalance(),
- slashed: validatorSet[i].Slashed(),
- }
+ balances[i] = validatorSet[i].EffectiveBalance()
+ writeToBitset(actives, i, validatorSet[i].Active(epoch))
+ writeToBitset(slasheds, i, validatorSet[i].Slashed())
}
+ // Add the post-anchor public keys as surplus
+ for i := len(anchorPublicKeys) / length.Bytes48; i < len(validatorSet); i++ {
+ pos := i - len(anchorPublicKeys)/length.Bytes48
+ copy(publicKeys[pos*length.Bytes48:], validatorSet[i].PublicKeyBytes())
+ }
+
mixes := solid.NewHashVector(randaoMixesLength)
randaoMixes.CopyTo(mixes)
- return &checkpointState{
+
+ // bitsets size
+ c := &checkpointState{
beaconConfig: beaconConfig,
randaoMixes: mixes,
- validators: validators,
+ balances: balances,
+ anchorPublicKeys: anchorPublicKeys,
+ publicKeys: publicKeys,
genesisValidatorsRoot: genesisValidatorsRoot,
fork: fork,
- shuffledSetsCache: map[uint64]*shuffledSet{},
activeBalance: activeBalance,
- epoch: epoch,
+ slasheds: slasheds,
+ actives: actives,
+ validatorSetSize: len(validatorSet),
+
+ epoch: epoch,
}
+ mixPosition := (epoch + beaconConfig.EpochsPerHistoricalVector - beaconConfig.MinSeedLookahead - 1) %
+ beaconConfig.EpochsPerHistoricalVector
+ activeIndicies := c.getActiveIndicies(epoch)
+ c.shuffledSet = make([]uint64, len(activeIndicies))
+ c.shuffledSet = shuffling.ComputeShuffledIndicies(c.beaconConfig, c.randaoMixes.Get(int(mixPosition)), c.shuffledSet, activeIndicies, epoch*beaconConfig.SlotsPerEpoch)
+ return c
}
// getAttestingIndicies retrieves the beacon committee.
@@ -79,30 +105,14 @@ func (c *checkpointState) getAttestingIndicies(attestation *solid.AttestationDat
slot := attestation.Slot()
epoch := c.epochAtSlot(slot)
// Compute shuffled indicies
- var shuffledIndicies []uint64
- var lenIndicies uint64
-
- beaconConfig := c.beaconConfig
- mixPosition := (epoch + beaconConfig.EpochsPerHistoricalVector - beaconConfig.MinSeedLookahead - 1) %
- beaconConfig.EpochsPerHistoricalVector
- // Input for the seed hash.
-
- if shuffledIndicesCached, ok := c.shuffledSetsCache[epoch]; ok {
- shuffledIndicies = shuffledIndicesCached.set
- lenIndicies = shuffledIndicesCached.lenActive
- } else {
- activeIndicies := c.getActiveIndicies(epoch)
- lenIndicies = uint64(len(activeIndicies))
- shuffledIndicies = shuffling.ComputeShuffledIndicies(c.beaconConfig, c.randaoMixes.Get(int(mixPosition)), activeIndicies, slot)
- c.shuffledSetsCache[epoch] = &shuffledSet{set: shuffledIndicies, lenActive: uint64(len(activeIndicies))}
- }
+ lenIndicies := uint64(len(c.shuffledSet))
committeesPerSlot := c.committeeCount(epoch, lenIndicies)
count := committeesPerSlot * c.beaconConfig.SlotsPerEpoch
index := (slot%c.beaconConfig.SlotsPerEpoch)*committeesPerSlot + attestation.ValidatorIndex()
start := (lenIndicies * index) / count
end := (lenIndicies * (index + 1)) / count
- committee := shuffledIndicies[start:end]
+ committee := c.shuffledSet[start:end]
attestingIndices := []uint64{}
for i, member := range committee {
@@ -119,8 +129,8 @@ func (c *checkpointState) getAttestingIndicies(attestation *solid.AttestationDat
}
func (c *checkpointState) getActiveIndicies(epoch uint64) (activeIndicies []uint64) {
- for i, validator := range c.validators {
- if !validator.active(epoch) {
+ for i := 0; i < c.validatorSetSize; i++ {
+ if !readFromBitset(c.actives, i) {
continue
}
activeIndicies = append(activeIndicies, uint64(i))
@@ -156,8 +166,12 @@ func (c *checkpointState) isValidIndexedAttestation(att *cltypes.IndexedAttestat
pks := [][]byte{}
inds.Range(func(_ int, v uint64, _ int) bool {
- publicKey := c.validators[v].publicKey
- pks = append(pks, publicKey[:])
+ if v < uint64(len(c.anchorPublicKeys)) {
+ pks = append(pks, c.anchorPublicKeys[v*length.Bytes48:(v+1)*length.Bytes48])
+ } else {
+ offset := uint64(len(c.anchorPublicKeys) / length.Bytes48)
+ pks = append(pks, c.publicKeys[(v-offset)*length.Bytes48:])
+ }
return true
})
diff --git a/cl/phase1/forkchoice/fork_choice_test.go b/cl/phase1/forkchoice/fork_choice_test.go
index 76f2153faac..4b5c5d81975 100644
--- a/cl/phase1/forkchoice/fork_choice_test.go
+++ b/cl/phase1/forkchoice/fork_choice_test.go
@@ -1,12 +1,16 @@
package forkchoice_test
import (
+ "context"
_ "embed"
"testing"
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
"github.com/ledgerwatch/erigon/cl/phase1/core/state"
"github.com/ledgerwatch/erigon/cl/phase1/forkchoice"
+ "github.com/ledgerwatch/erigon/cl/phase1/forkchoice/fork_graph"
+ "github.com/ledgerwatch/erigon/cl/pool"
+ "github.com/spf13/afero"
libcommon "github.com/ledgerwatch/erigon-lib/common"
@@ -36,7 +40,7 @@ func TestForkChoiceBasic(t *testing.T) {
expectedCheckpoint := solid.NewCheckpointFromParameters(libcommon.HexToHash("0x564d76d91f66c1fb2977484a6184efda2e1c26dd01992e048353230e10f83201"), 0)
// Decode test blocks
- block0x3a, block0xc2, block0xd4 := &cltypes.SignedBeaconBlock{}, &cltypes.SignedBeaconBlock{}, &cltypes.SignedBeaconBlock{}
+ block0x3a, block0xc2, block0xd4 := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig), cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig), cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig)
require.NoError(t, utils.DecodeSSZSnappy(block0x3a, block3aEncoded, int(clparams.AltairVersion)))
require.NoError(t, utils.DecodeSSZSnappy(block0xc2, blockc2Encoded, int(clparams.AltairVersion)))
require.NoError(t, utils.DecodeSSZSnappy(block0xd4, blockd4Encoded, int(clparams.AltairVersion)))
@@ -46,7 +50,8 @@ func TestForkChoiceBasic(t *testing.T) {
// Initialize forkchoice store
anchorState := state.New(&clparams.MainnetBeaconConfig)
require.NoError(t, utils.DecodeSSZSnappy(anchorState, anchorStateEncoded, int(clparams.AltairVersion)))
- store, err := forkchoice.NewForkChoiceStore(anchorState, nil, nil, false)
+ pool := pool.NewOperationsPool(&clparams.MainnetBeaconConfig)
+ store, err := forkchoice.NewForkChoiceStore(context.Background(), anchorState, nil, nil, pool, fork_graph.NewForkGraphDisk(anchorState, afero.NewMemMapFs()))
require.NoError(t, err)
// first steps
store.OnTick(0)
@@ -85,4 +90,20 @@ func TestForkChoiceBasic(t *testing.T) {
require.Equal(t, headRoot, libcommon.HexToHash("0x744cc484f6503462f0f3a5981d956bf4fcb3e57ab8687ed006467e05049ee033"))
// lastly do attestation
require.NoError(t, store.OnAttestation(testAttestation, false))
+ // Try processing a voluntary exit
+ err = store.OnVoluntaryExit(&cltypes.SignedVoluntaryExit{
+ VoluntaryExit: &cltypes.VoluntaryExit{
+ Epoch: 0,
+ ValidatorIndex: 0,
+ },
+ }, true)
+ require.NoError(t, err)
+ // Try processing a bls execution change exit
+ err = store.OnBlsToExecutionChange(&cltypes.SignedBLSToExecutionChange{
+ Message: &cltypes.BLSToExecutionChange{
+ ValidatorIndex: 0,
+ },
+ }, true)
+ require.NoError(t, err)
+ require.Equal(t, len(pool.VoluntaryExistsPool.Raw()), 1)
}
diff --git a/cl/phase1/forkchoice/fork_graph/fork_graph.go b/cl/phase1/forkchoice/fork_graph/fork_graph.go
deleted file mode 100644
index 5c957b63406..00000000000
--- a/cl/phase1/forkchoice/fork_graph/fork_graph.go
+++ /dev/null
@@ -1,335 +0,0 @@
-package fork_graph
-
-import (
- libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon/cl/clparams"
- "github.com/ledgerwatch/erigon/cl/cltypes"
- "github.com/ledgerwatch/erigon/cl/cltypes/solid"
- "github.com/ledgerwatch/erigon/cl/phase1/core/state"
- "github.com/ledgerwatch/erigon/cl/transition"
- "github.com/ledgerwatch/log/v3"
- "golang.org/x/exp/slices"
-)
-
-type ChainSegmentInsertionResult uint
-
-const (
- Success ChainSegmentInsertionResult = 0
- InvalidBlock ChainSegmentInsertionResult = 1
- MissingSegment ChainSegmentInsertionResult = 2
- BelowAnchor ChainSegmentInsertionResult = 3
- LogisticError ChainSegmentInsertionResult = 4
- PreValidated ChainSegmentInsertionResult = 5
-)
-
-const snapshotStateEverySlot = 64
-
-/*
-* The state store process is related to graph theory in the sense that the Ethereum blockchain can be thought of as a directed graph,
-* where each block represents a node and the links between blocks represent directed edges.
-* In this context, rolling back the state of Ethereum to a previous state can be thought of as traversing the graph in reverse,
-* from the current state to a previous state.
-* The process of reverting the state involves undoing the changes made in the blocks that have been added to the blockchain since the previous state.
-* This can be thought of as "reversing the edges" in the graph, effectively undoing the changes made to the state of Ethereum.
-* By thinking of the Ethereum blockchain as a graph, we can use graph theory concepts, such as traversal algorithms,
-* to analyze and manipulate the state of the blockchain.
- */
-
-// ForkGraph is our graph for ETH 2.0 consensus forkchoice. Each node is a (block root, changes) pair and
-// each edge is the path described as (prevBlockRoot, currBlockRoot). if we want to go forward we use blocks.
-type ForkGraph struct {
- // Alternate beacon states
- currentReferenceState *state.CachingBeaconState
- nextReferenceState *state.CachingBeaconState
- blocks map[libcommon.Hash]*cltypes.SignedBeaconBlock // set of blocks
- headers map[libcommon.Hash]*cltypes.BeaconBlockHeader // set of headers
- badBlocks map[libcommon.Hash]struct{} // blocks that are invalid and that leads to automatic fail of extension.
- // current state data
- currentState *state.CachingBeaconState
- currentStateBlockRoot libcommon.Hash
- // childrens maps each block roots to its children block roots
- childrens map[libcommon.Hash][]libcommon.Hash
- // for each block root we also keep track of te equivalent current justified and finalized checkpoints for faster head retrieval.
- currentJustifiedCheckpoints map[libcommon.Hash]solid.Checkpoint
- finalizedCheckpoints map[libcommon.Hash]solid.Checkpoint
- // Disable for tests
- enabledPruning bool
- // configurations
- beaconCfg *clparams.BeaconChainConfig
- genesisTime uint64
- // highest block seen
- highestSeen uint64
-}
-
-func (f *ForkGraph) AnchorSlot() uint64 {
- return f.currentReferenceState.Slot()
-}
-
-// Initialize fork graph with a new state
-func New(anchorState *state.CachingBeaconState, enabledPruning bool) *ForkGraph {
- farthestExtendingPath := make(map[libcommon.Hash]bool)
- anchorRoot, err := anchorState.BlockRoot()
- if err != nil {
- panic(err)
- }
- headers := make(map[libcommon.Hash]*cltypes.BeaconBlockHeader)
- anchorHeader := anchorState.LatestBlockHeader()
- if anchorHeader.Root, err = anchorState.HashSSZ(); err != nil {
- panic(err)
- }
- headers[anchorRoot] = &anchorHeader
-
- farthestExtendingPath[anchorRoot] = true
- currentStateReference, err := anchorState.Copy()
- if err != nil {
- panic(err)
- }
- nextStateReference, err := anchorState.Copy()
- if err != nil {
- panic(err)
- }
- return &ForkGraph{
- currentReferenceState: currentStateReference,
- nextReferenceState: nextStateReference,
- // storage
- blocks: make(map[libcommon.Hash]*cltypes.SignedBeaconBlock),
- headers: headers,
- badBlocks: make(map[libcommon.Hash]struct{}),
- // current state data
- currentState: anchorState,
- currentStateBlockRoot: anchorRoot,
- // childrens
- childrens: make(map[libcommon.Hash][]libcommon.Hash),
- // checkpoints trackers
- currentJustifiedCheckpoints: make(map[libcommon.Hash]solid.Checkpoint),
- finalizedCheckpoints: make(map[libcommon.Hash]solid.Checkpoint),
- enabledPruning: enabledPruning,
- // configuration
- beaconCfg: anchorState.BeaconConfig(),
- genesisTime: anchorState.GenesisTime(),
- }
-}
-
-// Add a new node and edge to the graph
-func (f *ForkGraph) AddChainSegment(signedBlock *cltypes.SignedBeaconBlock, fullValidation bool) (*state.CachingBeaconState, ChainSegmentInsertionResult, error) {
- block := signedBlock.Block
- blockRoot, err := block.HashSSZ()
- if err != nil {
- return nil, LogisticError, err
- }
-
- if _, ok := f.headers[blockRoot]; ok {
- return nil, PreValidated, nil
- }
- // Blocks below anchors are invalid.
- if block.Slot <= f.currentReferenceState.Slot() {
- log.Debug("block below anchor slot", "slot", block.Slot, "hash", libcommon.Hash(blockRoot))
- f.badBlocks[blockRoot] = struct{}{}
- return nil, BelowAnchor, nil
- }
- // Check if block being process right now was marked as invalid.
- if _, ok := f.badBlocks[blockRoot]; ok {
- log.Debug("block has invalid parent", "slot", block.Slot, "hash", libcommon.Hash(blockRoot))
- f.badBlocks[blockRoot] = struct{}{}
- return nil, InvalidBlock, nil
- }
-
- newState, didLongRecconnection, err := f.GetState(block.ParentRoot, false)
- if err != nil {
- return nil, InvalidBlock, err
- }
- if newState == nil {
- log.Debug("AddChainSegment: missing segment", "block", libcommon.Hash(blockRoot))
- return nil, MissingSegment, nil
- }
- // if we did so by long recconection, i am afraid we need to discard the current state.
- if didLongRecconnection {
- log.Debug("AddChainSegment: Resetting state reference as it was orphaned")
- f.currentReferenceState.CopyInto(f.nextReferenceState)
- }
-
- // Execute the state
- if invalidBlockErr := transition.TransitionState(newState, signedBlock, fullValidation); invalidBlockErr != nil {
- // Add block to list of invalid blocks
- log.Debug("Invalid beacon block", "reason", invalidBlockErr)
- f.badBlocks[blockRoot] = struct{}{}
- f.nextReferenceState.CopyInto(f.currentState)
- f.currentStateBlockRoot, err = f.nextReferenceState.BlockRoot()
- if err != nil {
- log.Error("[Caplin] Could not recover from invalid block")
- }
- return nil, InvalidBlock, invalidBlockErr
- }
-
- f.blocks[blockRoot] = signedBlock
- bodyRoot, err := signedBlock.Block.Body.HashSSZ()
- if err != nil {
- return nil, LogisticError, err
- }
- f.headers[blockRoot] = &cltypes.BeaconBlockHeader{
- Slot: block.Slot,
- ProposerIndex: block.ProposerIndex,
- ParentRoot: block.ParentRoot,
- Root: block.StateRoot,
- BodyRoot: bodyRoot,
- }
- // Update the children of the parent
- f.updateChildren(block.ParentRoot, blockRoot)
- // Lastly add checkpoints to caches as well.
- f.currentJustifiedCheckpoints[blockRoot] = newState.CurrentJustifiedCheckpoint().Copy()
- f.finalizedCheckpoints[blockRoot] = newState.FinalizedCheckpoint().Copy()
- if newState.Slot() > f.highestSeen {
- f.highestSeen = newState.Slot()
- f.currentState = newState
- f.currentStateBlockRoot = blockRoot
- if newState.Slot()%snapshotStateEverySlot == 0 && f.nextReferenceState.Slot() > f.beaconCfg.SlotsPerEpoch && f.enabledPruning {
- if err := f.removeOldData(); err != nil {
- return nil, LogisticError, err
- }
- }
- }
- return newState, Success, nil
-}
-
-func (f *ForkGraph) GenesisTime() uint64 {
- return f.genesisTime
-}
-
-func (f *ForkGraph) Config() *clparams.BeaconChainConfig {
- return f.beaconCfg
-}
-
-func (f *ForkGraph) GetHeader(blockRoot libcommon.Hash) (*cltypes.BeaconBlockHeader, bool) {
- obj, has := f.headers[blockRoot]
- return obj, has
-}
-
-func (f *ForkGraph) getBlock(blockRoot libcommon.Hash) (*cltypes.SignedBeaconBlock, bool) {
- obj, has := f.blocks[blockRoot]
- return obj, has
-}
-
-func (f *ForkGraph) GetState(blockRoot libcommon.Hash, alwaysCopy bool) (*state.CachingBeaconState, bool, error) {
- // collect all blocks beetwen greatest extending node path and block.
- blocksInTheWay := []*cltypes.SignedBeaconBlock{}
- // Use the parent root as a reverse iterator.
- currentIteratorRoot := blockRoot
- // use the current reference state root as reconnectio
- reconnectionRootLong, err := f.currentReferenceState.BlockRoot()
- if err != nil {
- return nil, false, err
- }
- reconnectionRootShort, err := f.nextReferenceState.BlockRoot()
- if err != nil {
- return nil, false, err
- }
- // try and find the point of recconection
- for currentIteratorRoot != reconnectionRootLong && currentIteratorRoot != reconnectionRootShort {
- block, isSegmentPresent := f.getBlock(currentIteratorRoot)
- if !isSegmentPresent {
- log.Debug("Could not retrieve state: Missing header", "missing", currentIteratorRoot,
- "longRecconection", libcommon.Hash(reconnectionRootLong), "shortRecconection", libcommon.Hash(reconnectionRootShort))
- return nil, false, nil
- }
- blocksInTheWay = append(blocksInTheWay, block)
- currentIteratorRoot = block.Block.ParentRoot
- }
-
- var copyReferencedState *state.CachingBeaconState
- didLongRecconnection := currentIteratorRoot == reconnectionRootLong && reconnectionRootLong != reconnectionRootShort
- if f.currentStateBlockRoot == blockRoot {
- if alwaysCopy {
- s, err := f.currentState.Copy()
- return s, didLongRecconnection, err
- }
- return f.currentState, didLongRecconnection, nil
- }
- // Take a copy to the reference state.
- if currentIteratorRoot == reconnectionRootLong {
- copyReferencedState, err = f.currentReferenceState.Copy()
- if err != nil {
- return nil, true, err
- }
- } else {
- copyReferencedState, err = f.nextReferenceState.Copy()
- if err != nil {
- return nil, false, err
- }
- }
-
- // Traverse the blocks from top to bottom.
- for i := len(blocksInTheWay) - 1; i >= 0; i-- {
- if err := transition.TransitionState(copyReferencedState, blocksInTheWay[i], false); err != nil {
- return nil, didLongRecconnection, err
- }
- }
- return copyReferencedState, didLongRecconnection, nil
-}
-
-// updateChildren adds a new child to the parent node hash.
-func (f *ForkGraph) updateChildren(parent, child libcommon.Hash) {
- childrens := f.childrens[parent]
- if slices.Contains(childrens, child) {
- return
- }
- childrens = append(childrens, child)
- f.childrens[parent] = childrens
-}
-
-// GetChildren retrieves the children block root of the given block root.
-func (f *ForkGraph) GetChildren(parent libcommon.Hash) []libcommon.Hash {
- return f.childrens[parent]
-}
-
-func (f *ForkGraph) GetCurrentJustifiedCheckpoint(blockRoot libcommon.Hash) (solid.Checkpoint, bool) {
- obj, has := f.currentJustifiedCheckpoints[blockRoot]
- return obj, has
-}
-
-func (f *ForkGraph) GetFinalizedCheckpoint(blockRoot libcommon.Hash) (solid.Checkpoint, bool) {
- obj, has := f.finalizedCheckpoints[blockRoot]
- return obj, has
-}
-
-func (f *ForkGraph) MarkHeaderAsInvalid(blockRoot libcommon.Hash) {
- f.badBlocks[blockRoot] = struct{}{}
-}
-
-func (f *ForkGraph) removeOldData() (err error) {
- pruneSlot := f.nextReferenceState.Slot() - f.beaconCfg.SlotsPerEpoch
- oldRoots := make([]libcommon.Hash, 0, len(f.blocks))
- for hash, signedBlock := range f.blocks {
- if signedBlock.Block.Slot >= pruneSlot {
- continue
- }
- oldRoots = append(oldRoots, hash)
- }
- for _, root := range oldRoots {
- delete(f.badBlocks, root)
- delete(f.blocks, root)
- delete(f.childrens, root)
- delete(f.currentJustifiedCheckpoints, root)
- delete(f.finalizedCheckpoints, root)
- delete(f.headers, root)
- }
- // Lastly snapshot the state
- err = f.nextReferenceState.CopyInto(f.currentReferenceState)
- if err != nil {
- panic(err) // dead at this point
- }
- err = f.currentState.CopyInto(f.nextReferenceState)
- if err != nil {
- panic(err) // dead at this point
- }
- // use the current reference state root as reconnectio
- reconnectionRootLong, err := f.currentReferenceState.BlockRoot()
- if err != nil {
- panic(err)
- }
- reconnectionRootShort, err := f.nextReferenceState.BlockRoot()
- if err != nil {
- panic(err)
- }
- log.Debug("Pruned old blocks", "pruneSlot", pruneSlot, "longRecconection", libcommon.Hash(reconnectionRootLong), "shortRecconection", libcommon.Hash(reconnectionRootShort))
- return
-}
diff --git a/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go b/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go
new file mode 100644
index 00000000000..d22d99905f3
--- /dev/null
+++ b/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go
@@ -0,0 +1,390 @@
+package fork_graph
+
+import (
+ "bytes"
+ "errors"
+ "sync"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/cltypes/solid"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state"
+ "github.com/ledgerwatch/erigon/cl/transition"
+ "github.com/ledgerwatch/log/v3"
+ "github.com/pierrec/lz4"
+ "github.com/spf13/afero"
+ "golang.org/x/exp/slices"
+)
+
+var lz4PoolWriterPool = sync.Pool{
+ New: func() interface{} {
+ return lz4.NewWriter(nil)
+ },
+}
+
+var lz4PoolReaderPool = sync.Pool{
+ New: func() interface{} {
+ return lz4.NewReader(nil)
+ },
+}
+
+var ErrStateNotFound = errors.New("state not found")
+
+type ChainSegmentInsertionResult uint
+
+const (
+ Success ChainSegmentInsertionResult = 0
+ InvalidBlock ChainSegmentInsertionResult = 1
+ MissingSegment ChainSegmentInsertionResult = 2
+ BelowAnchor ChainSegmentInsertionResult = 3
+ LogisticError ChainSegmentInsertionResult = 4
+ PreValidated ChainSegmentInsertionResult = 5
+)
+
+type savedStateRecord struct {
+ slot uint64
+}
+
+// ForkGraph is our graph for ETH 2.0 consensus forkchoice. Each node is a (block root, changes) pair and
+// each edge is the path described as (prevBlockRoot, currBlockRoot). if we want to go forward we use blocks.
+type forkGraphDisk struct {
+ // Alternate beacon states
+ fs afero.Fs
+ blocks map[libcommon.Hash]*cltypes.SignedBeaconBlock // set of blocks
+ headers map[libcommon.Hash]*cltypes.BeaconBlockHeader // set of headers
+ badBlocks map[libcommon.Hash]struct{} // blocks that are invalid and that leads to automatic fail of extension.
+
+ // TODO: this leaks, but it isn't a big deal since it's only ~24 bytes per block.
+ // the dirty solution is to just make it an LRU with max size of like 128 epochs or something probably?
+ stateRoots map[libcommon.Hash]libcommon.Hash // set of stateHash -> blockHash
+
+ // current state data
+ currentState *state.CachingBeaconState
+ currentStateBlockRoot libcommon.Hash
+
+ // saveStates are indexed by block index
+ saveStates map[libcommon.Hash]savedStateRecord
+
+ // for each block root we also keep track of te equivalent current justified and finalized checkpoints for faster head retrieval.
+ currentJustifiedCheckpoints map[libcommon.Hash]solid.Checkpoint
+ finalizedCheckpoints map[libcommon.Hash]solid.Checkpoint
+
+ // configurations
+ beaconCfg *clparams.BeaconChainConfig
+ genesisTime uint64
+ // highest block seen
+ highestSeen, anchorSlot uint64
+
+ // reusable buffers
+ sszBuffer bytes.Buffer
+ sszSnappyBuffer bytes.Buffer
+}
+
+// Initialize fork graph with a new state
+func NewForkGraphDisk(anchorState *state.CachingBeaconState, aferoFs afero.Fs) ForkGraph {
+ farthestExtendingPath := make(map[libcommon.Hash]bool)
+ anchorRoot, err := anchorState.BlockRoot()
+ if err != nil {
+ panic(err)
+ }
+ headers := make(map[libcommon.Hash]*cltypes.BeaconBlockHeader)
+ anchorHeader := anchorState.LatestBlockHeader()
+ if anchorHeader.Root, err = anchorState.HashSSZ(); err != nil {
+ panic(err)
+ }
+ headers[anchorRoot] = &anchorHeader
+
+ farthestExtendingPath[anchorRoot] = true
+
+ f := &forkGraphDisk{
+ fs: aferoFs,
+ // storage
+ blocks: make(map[libcommon.Hash]*cltypes.SignedBeaconBlock),
+ headers: headers,
+ badBlocks: make(map[libcommon.Hash]struct{}),
+ stateRoots: make(map[libcommon.Hash]libcommon.Hash),
+ // current state data
+ currentState: anchorState,
+ currentStateBlockRoot: anchorRoot,
+ saveStates: make(map[libcommon.Hash]savedStateRecord),
+ // checkpoints trackers
+ currentJustifiedCheckpoints: make(map[libcommon.Hash]solid.Checkpoint),
+ finalizedCheckpoints: make(map[libcommon.Hash]solid.Checkpoint),
+ // configuration
+ beaconCfg: anchorState.BeaconConfig(),
+ genesisTime: anchorState.GenesisTime(),
+ anchorSlot: anchorState.Slot(),
+ }
+ f.dumpBeaconStateOnDisk(anchorState, anchorRoot)
+ return f
+}
+
+func (f *forkGraphDisk) AnchorSlot() uint64 {
+ return f.anchorSlot
+}
+
+// Add a new node and edge to the graph
+func (f *forkGraphDisk) AddChainSegment(signedBlock *cltypes.SignedBeaconBlock, fullValidation bool) (*state.CachingBeaconState, ChainSegmentInsertionResult, error) {
+ block := signedBlock.Block
+ blockRoot, err := block.HashSSZ()
+ if err != nil {
+ return nil, LogisticError, err
+ }
+
+ if _, ok := f.headers[blockRoot]; ok {
+ return nil, PreValidated, nil
+ }
+ // Blocks below anchors are invalid.
+ if block.Slot <= f.anchorSlot {
+ log.Debug("block below anchor slot", "slot", block.Slot, "hash", libcommon.Hash(blockRoot))
+ f.badBlocks[blockRoot] = struct{}{}
+ return nil, BelowAnchor, nil
+ }
+ // Check if block being process right now was marked as invalid.
+ if _, ok := f.badBlocks[blockRoot]; ok {
+ log.Debug("block has invalid parent", "slot", block.Slot, "hash", libcommon.Hash(blockRoot))
+ f.badBlocks[blockRoot] = struct{}{}
+ return nil, InvalidBlock, nil
+ }
+
+ newState, err := f.GetState(block.ParentRoot, false)
+ if err != nil {
+ return nil, InvalidBlock, err
+ }
+ if newState == nil {
+ log.Debug("AddChainSegment: missing segment", "block", libcommon.Hash(blockRoot))
+ return nil, MissingSegment, nil
+ }
+
+ // Execute the state
+ if invalidBlockErr := transition.TransitionState(newState, signedBlock, fullValidation); invalidBlockErr != nil {
+ // Add block to list of invalid blocks
+ log.Debug("Invalid beacon block", "reason", invalidBlockErr)
+ f.badBlocks[blockRoot] = struct{}{}
+ f.currentStateBlockRoot = libcommon.Hash{}
+ f.currentState, err = f.GetState(block.ParentRoot, true)
+ if err != nil {
+ log.Error("[Caplin] Could not recover from invalid block", "err", err)
+ } else {
+ f.currentStateBlockRoot = block.ParentRoot
+ }
+
+ return nil, InvalidBlock, invalidBlockErr
+ }
+
+ f.blocks[blockRoot] = signedBlock
+ bodyRoot, err := signedBlock.Block.Body.HashSSZ()
+ if err != nil {
+ return nil, LogisticError, err
+ }
+ f.headers[blockRoot] = &cltypes.BeaconBlockHeader{
+ Slot: block.Slot,
+ ProposerIndex: block.ProposerIndex,
+ ParentRoot: block.ParentRoot,
+ Root: block.StateRoot,
+ BodyRoot: bodyRoot,
+ }
+
+ // add the state root
+ stateRoot, err := newState.HashSSZ()
+ if err != nil {
+ return nil, LogisticError, err
+ }
+ f.stateRoots[stateRoot] = blockRoot
+
+ if newState.Slot()%f.beaconCfg.SlotsPerEpoch == 0 {
+ if err := f.dumpBeaconStateOnDisk(newState, blockRoot); err != nil {
+ return nil, LogisticError, err
+ }
+ f.saveStates[blockRoot] = savedStateRecord{slot: newState.Slot()}
+ }
+
+ // Lastly add checkpoints to caches as well.
+ f.currentJustifiedCheckpoints[blockRoot] = newState.CurrentJustifiedCheckpoint().Copy()
+ f.finalizedCheckpoints[blockRoot] = newState.FinalizedCheckpoint().Copy()
+ if newState.Slot() > f.highestSeen {
+ f.highestSeen = newState.Slot()
+ f.currentState = newState
+ f.currentStateBlockRoot = blockRoot
+ }
+ return newState, Success, nil
+}
+
+func (f *forkGraphDisk) GetHeader(blockRoot libcommon.Hash) (*cltypes.BeaconBlockHeader, bool) {
+ obj, has := f.headers[blockRoot]
+ return obj, has
+}
+
+func (f *forkGraphDisk) getBlock(blockRoot libcommon.Hash) (*cltypes.SignedBeaconBlock, bool) {
+ obj, has := f.blocks[blockRoot]
+ return obj, has
+}
+
+// GetStateAtSlot is for getting a state based off the slot number
+// NOTE: all this does is call GetStateAtSlot using the stateRoots index and existing blocks.
+func (f *forkGraphDisk) GetStateAtStateRoot(root libcommon.Hash, alwaysCopy bool) (*state.CachingBeaconState, error) {
+ blockRoot, ok := f.stateRoots[root]
+ if !ok {
+ return nil, ErrStateNotFound
+ }
+ blockSlot, ok := f.blocks[blockRoot]
+ if !ok {
+ return nil, ErrStateNotFound
+ }
+ return f.GetStateAtSlot(blockSlot.Block.Slot, alwaysCopy)
+
+}
+
+// GetStateAtSlot is for getting a state based off the slot number
+// TODO: this is rather inefficient. we could create indices that make it faster
+func (f *forkGraphDisk) GetStateAtSlot(slot uint64, alwaysCopy bool) (*state.CachingBeaconState, error) {
+ // fast path for if the slot is the current slot
+ if f.currentState.Slot() == slot {
+ // always copy.
+ if alwaysCopy {
+ ret, err := f.currentState.Copy()
+ return ret, err
+ }
+ return f.currentState, nil
+ }
+ // if the slot requested is larger than the current slot, we know it is not found, so another fast path
+ if slot > f.currentState.Slot() {
+ return nil, ErrStateNotFound
+ }
+ if len(f.saveStates) == 0 {
+ return nil, ErrStateNotFound
+ }
+ bestSlot := uint64(0)
+ startHash := libcommon.Hash{}
+ // iterate over all savestates. there should be less than 10 of these, so this should be safe.
+ for blockHash, v := range f.saveStates {
+ // make sure the slot is smaller than the target slot
+ // (equality case caught by short circuit)
+ // and that the slot is larger than the current best found starting slot
+ if v.slot < slot && v.slot > bestSlot {
+ bestSlot = v.slot
+ startHash = blockHash
+ }
+ }
+ // no snapshot old enough to honor this request :(
+ if bestSlot == 0 {
+ return nil, ErrStateNotFound
+ }
+ copyReferencedState, err := f.readBeaconStateFromDisk(startHash)
+ if err != nil {
+ return nil, err
+ }
+ // cache lied? return state not found
+ if copyReferencedState == nil {
+ return nil, ErrStateNotFound
+ }
+
+ // what we need to do is grab every block in our block store that is between the target slot and the current slot
+ // this is linear time from the distance to our last snapshot.
+ blocksInTheWay := []*cltypes.SignedBeaconBlock{}
+ for _, v := range f.blocks {
+ if v.Block.Slot <= f.currentState.Slot() && v.Block.Slot >= slot {
+ blocksInTheWay = append(blocksInTheWay, v)
+ }
+ }
+
+ // sort the slots from low to high
+ slices.SortStableFunc(blocksInTheWay, func(a, b *cltypes.SignedBeaconBlock) int {
+ return int(a.Block.Slot) - int(b.Block.Slot)
+ })
+
+ // Traverse the blocks from top to bottom.
+ for _, block := range blocksInTheWay {
+ if err := transition.TransitionState(copyReferencedState, block, false); err != nil {
+ return nil, err
+ }
+ }
+ return copyReferencedState, nil
+}
+
+func (f *forkGraphDisk) GetState(blockRoot libcommon.Hash, alwaysCopy bool) (*state.CachingBeaconState, error) {
+ if f.currentStateBlockRoot == blockRoot {
+ if alwaysCopy {
+ ret, err := f.currentState.Copy()
+ return ret, err
+ }
+ return f.currentState, nil
+ }
+
+ // collect all blocks beetwen greatest extending node path and block.
+ blocksInTheWay := []*cltypes.SignedBeaconBlock{}
+ // Use the parent root as a reverse iterator.
+ currentIteratorRoot := blockRoot
+
+ // try and find the point of recconection
+ for {
+ block, isSegmentPresent := f.getBlock(currentIteratorRoot)
+ if !isSegmentPresent {
+ // check if it is in the header
+ bHeader, ok := f.GetHeader(currentIteratorRoot)
+ if ok && bHeader.Slot%f.beaconCfg.SlotsPerEpoch == 0 {
+ break
+ }
+ log.Debug("Could not retrieve state: Missing header", "missing", currentIteratorRoot)
+ return nil, nil
+ }
+ if block.Block.Slot%f.beaconCfg.SlotsPerEpoch == 0 {
+ break
+ }
+ blocksInTheWay = append(blocksInTheWay, block)
+ currentIteratorRoot = block.Block.ParentRoot
+ }
+ copyReferencedState, err := f.readBeaconStateFromDisk(currentIteratorRoot)
+ if err != nil {
+ return nil, err
+ }
+ if copyReferencedState == nil {
+ return nil, ErrStateNotFound
+ }
+
+ // Traverse the blocks from top to bottom.
+ for i := len(blocksInTheWay) - 1; i >= 0; i-- {
+ if err := transition.TransitionState(copyReferencedState, blocksInTheWay[i], false); err != nil {
+ return nil, err
+ }
+ }
+ return copyReferencedState, nil
+}
+
+func (f *forkGraphDisk) GetCurrentJustifiedCheckpoint(blockRoot libcommon.Hash) (solid.Checkpoint, bool) {
+ obj, has := f.currentJustifiedCheckpoints[blockRoot]
+ return obj, has
+}
+
+func (f *forkGraphDisk) GetFinalizedCheckpoint(blockRoot libcommon.Hash) (solid.Checkpoint, bool) {
+ obj, has := f.finalizedCheckpoints[blockRoot]
+ return obj, has
+}
+
+func (f *forkGraphDisk) MarkHeaderAsInvalid(blockRoot libcommon.Hash) {
+ f.badBlocks[blockRoot] = struct{}{}
+}
+
+func (f *forkGraphDisk) Prune(pruneSlot uint64) (err error) {
+ pruneSlot -= f.beaconCfg.SlotsPerEpoch * 2
+ oldRoots := make([]libcommon.Hash, 0, len(f.blocks))
+ for hash, signedBlock := range f.blocks {
+ if signedBlock.Block.Slot >= pruneSlot {
+ continue
+ }
+ oldRoots = append(oldRoots, hash)
+ }
+ for _, root := range oldRoots {
+ delete(f.badBlocks, root)
+ delete(f.blocks, root)
+ delete(f.currentJustifiedCheckpoints, root)
+ delete(f.finalizedCheckpoints, root)
+ delete(f.headers, root)
+ delete(f.saveStates, root)
+ f.fs.Remove(getBeaconStateFilename(root))
+ f.fs.Remove(getBeaconStateCacheFilename(root))
+ }
+ log.Debug("Pruned old blocks", "pruneSlot", pruneSlot)
+ return
+}
diff --git a/cl/phase1/forkchoice/fork_graph/fork_graph_disk_fs.go b/cl/phase1/forkchoice/fork_graph/fork_graph_disk_fs.go
new file mode 100644
index 00000000000..e0ebf2a80f2
--- /dev/null
+++ b/cl/phase1/forkchoice/fork_graph/fork_graph_disk_fs.go
@@ -0,0 +1,153 @@
+package fork_graph
+
+import (
+ "encoding/binary"
+ "fmt"
+ "os"
+
+ "github.com/golang/snappy"
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state"
+ "github.com/pierrec/lz4"
+ "github.com/spf13/afero"
+)
+
+func getBeaconStateFilename(blockRoot libcommon.Hash) string {
+ return fmt.Sprintf("%x.snappy_ssz", blockRoot)
+}
+
+func getBeaconStateCacheFilename(blockRoot libcommon.Hash) string {
+ return fmt.Sprintf("%x.cache", blockRoot)
+}
+
+func (f *forkGraphDisk) readBeaconStateFromDisk(blockRoot libcommon.Hash) (bs *state.CachingBeaconState, err error) {
+ var file afero.File
+ file, err = f.fs.Open(getBeaconStateFilename(blockRoot))
+
+ if err != nil {
+ return
+ }
+ defer file.Close()
+ // Read the version
+ v := []byte{0}
+ if _, err := file.Read(v); err != nil {
+ return nil, err
+ }
+ // Read the length
+ lengthBytes := make([]byte, 8)
+ _, err = file.Read(lengthBytes)
+ if err != nil {
+ return
+ }
+ // Grow the snappy buffer
+ f.sszSnappyBuffer.Grow(int(binary.BigEndian.Uint64(lengthBytes)))
+ // Read the snappy buffer
+ sszSnappyBuffer := f.sszSnappyBuffer.Bytes()
+ sszSnappyBuffer = sszSnappyBuffer[:cap(sszSnappyBuffer)]
+ var n int
+ n, err = file.Read(sszSnappyBuffer)
+ if err != nil {
+ return
+ }
+
+ decLen, err := snappy.DecodedLen(sszSnappyBuffer[:n])
+ if err != nil {
+ return
+ }
+ // Grow the plain ssz buffer
+ f.sszBuffer.Grow(decLen)
+ sszBuffer := f.sszBuffer.Bytes()
+ sszBuffer, err = snappy.Decode(sszBuffer, sszSnappyBuffer[:n])
+ if err != nil {
+ return
+ }
+ bs = state.New(f.beaconCfg)
+ err = bs.DecodeSSZ(sszBuffer, int(v[0]))
+ // decode the cache file
+ cacheFile, err := f.fs.Open(getBeaconStateCacheFilename(blockRoot))
+ if err != nil {
+ return
+ }
+ defer cacheFile.Close()
+
+ lz4Reader := lz4PoolReaderPool.Get().(*lz4.Reader)
+ defer lz4PoolReaderPool.Put(lz4Reader)
+
+ lz4Reader.Reset(cacheFile)
+
+ if err := bs.DecodeCaches(lz4Reader); err != nil {
+ return nil, err
+ }
+
+ return
+}
+
+// dumpBeaconStateOnDisk dumps a beacon state on disk in ssz snappy format
+func (f *forkGraphDisk) dumpBeaconStateOnDisk(bs *state.CachingBeaconState, blockRoot libcommon.Hash) (err error) {
+ // Truncate and then grow the buffer to the size of the state.
+ encodingSizeSSZ := bs.EncodingSizeSSZ()
+ f.sszBuffer.Grow(encodingSizeSSZ)
+ f.sszBuffer.Reset()
+
+ sszBuffer := f.sszBuffer.Bytes()
+ sszBuffer, err = bs.EncodeSSZ(sszBuffer)
+ if err != nil {
+ return
+ }
+ // Grow the snappy buffer
+ f.sszSnappyBuffer.Grow(snappy.MaxEncodedLen(len(sszBuffer)))
+ // Compress the ssz buffer
+ sszSnappyBuffer := f.sszSnappyBuffer.Bytes()
+ sszSnappyBuffer = sszSnappyBuffer[:cap(sszSnappyBuffer)]
+ sszSnappyBuffer = snappy.Encode(sszSnappyBuffer, sszBuffer)
+ var dumpedFile afero.File
+ dumpedFile, err = f.fs.OpenFile(getBeaconStateFilename(blockRoot), os.O_TRUNC|os.O_CREATE|os.O_RDWR, 0o755)
+ if err != nil {
+ return
+ }
+ defer dumpedFile.Close()
+ // First write the hard fork version
+ _, err = dumpedFile.Write([]byte{byte(bs.Version())})
+ if err != nil {
+ return
+ }
+ // Second write the length
+ length := make([]byte, 8)
+ binary.BigEndian.PutUint64(length, uint64(len(sszSnappyBuffer)))
+ _, err = dumpedFile.Write(length)
+ if err != nil {
+ return
+ }
+ // Lastly dump the state
+ _, err = dumpedFile.Write(sszSnappyBuffer)
+ if err != nil {
+ return
+ }
+
+ err = dumpedFile.Sync()
+ if err != nil {
+ return
+ }
+
+ cacheFile, err := f.fs.OpenFile(getBeaconStateCacheFilename(blockRoot), os.O_TRUNC|os.O_CREATE|os.O_RDWR, 0o755)
+ if err != nil {
+ return
+ }
+ defer cacheFile.Close()
+
+ lz4Writer := lz4PoolWriterPool.Get().(*lz4.Writer)
+ defer lz4PoolWriterPool.Put(lz4Writer)
+
+ lz4Writer.CompressionLevel = 5
+ lz4Writer.Reset(cacheFile)
+
+ if err := bs.EncodeCaches(lz4Writer); err != nil {
+ return err
+ }
+ if err = lz4Writer.Flush(); err != nil {
+ return
+ }
+ err = cacheFile.Sync()
+
+ return
+}
diff --git a/cl/phase1/forkchoice/fork_graph/fork_graph_test.go b/cl/phase1/forkchoice/fork_graph/fork_graph_test.go
index 38754bb3b7e..67820471a8d 100644
--- a/cl/phase1/forkchoice/fork_graph/fork_graph_test.go
+++ b/cl/phase1/forkchoice/fork_graph/fork_graph_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/ledgerwatch/erigon/cl/phase1/core/state"
+ "github.com/spf13/afero"
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes"
@@ -21,14 +22,15 @@ var block2 []byte
//go:embed test_data/anchor_state.ssz_snappy
var anchor []byte
-func TestForkGraph(t *testing.T) {
- blockA, blockB, blockC := &cltypes.SignedBeaconBlock{}, &cltypes.SignedBeaconBlock{}, &cltypes.SignedBeaconBlock{}
+func TestForkGraphInDisk(t *testing.T) {
+ blockA, blockB, blockC := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig),
+ cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig), cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig)
anchorState := state.New(&clparams.MainnetBeaconConfig)
require.NoError(t, utils.DecodeSSZSnappy(blockA, block1, int(clparams.Phase0Version)))
require.NoError(t, utils.DecodeSSZSnappy(blockB, block2, int(clparams.Phase0Version)))
require.NoError(t, utils.DecodeSSZSnappy(blockC, block2, int(clparams.Phase0Version)))
require.NoError(t, utils.DecodeSSZSnappy(anchorState, anchor, int(clparams.Phase0Version)))
- graph := New(anchorState, false)
+ graph := NewForkGraphDisk(anchorState, afero.NewMemMapFs())
_, status, err := graph.AddChainSegment(blockA, true)
require.NoError(t, err)
require.Equal(t, status, Success)
@@ -45,5 +47,4 @@ func TestForkGraph(t *testing.T) {
_, status, err = graph.AddChainSegment(blockB, true)
require.NoError(t, err)
require.Equal(t, status, PreValidated)
- graph.removeOldData()
}
diff --git a/cl/phase1/forkchoice/fork_graph/interface.go b/cl/phase1/forkchoice/fork_graph/interface.go
new file mode 100644
index 00000000000..66a2edd0e83
--- /dev/null
+++ b/cl/phase1/forkchoice/fork_graph/interface.go
@@ -0,0 +1,33 @@
+package fork_graph
+
+import (
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/cltypes/solid"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state"
+)
+
+/*
+* The state store process is related to graph theory in the sense that the Ethereum blockchain can be thought of as a directed graph,
+* where each block represents a node and the links between blocks represent directed edges.
+* In this context, rolling back the state of Ethereum to a previous state can be thought of as traversing the graph in reverse,
+* from the current state to a previous state.
+* The process of reverting the state involves undoing the changes made in the blocks that have been added to the blockchain since the previous state.
+* This can be thought of as "reversing the edges" in the graph, effectively undoing the changes made to the state of Ethereum.
+* By thinking of the Ethereum blockchain as a graph, we can use graph theory concepts, such as traversal algorithms,
+* to analyze and manipulate the state of the blockchain.
+ */
+type ForkGraph interface {
+ AddChainSegment(signedBlock *cltypes.SignedBeaconBlock, fullValidation bool) (*state.CachingBeaconState, ChainSegmentInsertionResult, error)
+ GetHeader(blockRoot libcommon.Hash) (*cltypes.BeaconBlockHeader, bool)
+ GetState(blockRoot libcommon.Hash, alwaysCopy bool) (*state.CachingBeaconState, error)
+ GetCurrentJustifiedCheckpoint(blockRoot libcommon.Hash) (solid.Checkpoint, bool)
+ GetFinalizedCheckpoint(blockRoot libcommon.Hash) (solid.Checkpoint, bool)
+ MarkHeaderAsInvalid(blockRoot libcommon.Hash)
+ AnchorSlot() uint64
+ Prune(uint64) error
+
+ // extra methods for validator api
+ GetStateAtSlot(slot uint64, alwaysCopy bool) (*state.CachingBeaconState, error)
+ GetStateAtStateRoot(root libcommon.Hash, alwaysCopy bool) (*state.CachingBeaconState, error)
+}
diff --git a/cl/phase1/forkchoice/forkchoice.go b/cl/phase1/forkchoice/forkchoice.go
index b85053db109..bbe79bc4549 100644
--- a/cl/phase1/forkchoice/forkchoice.go
+++ b/cl/phase1/forkchoice/forkchoice.go
@@ -1,26 +1,39 @@
package forkchoice
import (
+ "context"
"sync"
+ "github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
"github.com/ledgerwatch/erigon/cl/freezer"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state"
state2 "github.com/ledgerwatch/erigon/cl/phase1/core/state"
"github.com/ledgerwatch/erigon/cl/phase1/execution_client"
"github.com/ledgerwatch/erigon/cl/phase1/forkchoice/fork_graph"
+ "github.com/ledgerwatch/erigon/cl/pool"
+ "golang.org/x/exp/slices"
lru "github.com/hashicorp/golang-lru/v2"
libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/length"
)
type checkpointComparable string
const (
checkpointsPerCache = 1024
- allowedCachedStates = 4
+ allowedCachedStates = 8
)
+type preverifiedAppendListsSizes struct {
+ validatorLength uint64
+ historicalRootsLength uint64
+ historicalSummariesLength uint64
+}
+
type ForkChoiceStore struct {
+ ctx context.Context
time uint64
highestSeen uint64
justifiedCheckpoint solid.Checkpoint
@@ -28,19 +41,31 @@ type ForkChoiceStore struct {
unrealizedJustifiedCheckpoint solid.Checkpoint
unrealizedFinalizedCheckpoint solid.Checkpoint
proposerBoostRoot libcommon.Hash
+ headHash libcommon.Hash
+ headSlot uint64
+ genesisTime uint64
+ childrens map[libcommon.Hash]childrens
+
// Use go map because this is actually an unordered set
equivocatingIndicies map[uint64]struct{}
- forkGraph *fork_graph.ForkGraph
+ forkGraph fork_graph.ForkGraph
// I use the cache due to the convenient auto-cleanup feauture.
- checkpointStates *lru.Cache[checkpointComparable, *checkpointState] // We keep ssz snappy of it as the full beacon state is full of rendundant data.
+ checkpointStates map[checkpointComparable]*checkpointState // We keep ssz snappy of it as the full beacon state is full of rendundant data.
latestMessages map[uint64]*LatestMessage
+ anchorPublicKeys []byte
// We keep track of them so that we can forkchoice with EL.
eth2Roots *lru.Cache[libcommon.Hash, libcommon.Hash] // ETH2 root -> ETH1 hash
- mu sync.Mutex
+ // preverifid sizes
+ preverifiedSizes *lru.Cache[libcommon.Hash, preverifiedAppendListsSizes]
+
+ mu sync.Mutex
// EL
engine execution_client.ExecutionEngine
// freezer
recorder freezer.Freezer
+ // operations pool
+ operationsPool pool.OperationsPool
+ beaconCfg *clparams.BeaconChainConfig
}
type LatestMessage struct {
@@ -48,8 +73,13 @@ type LatestMessage struct {
Root libcommon.Hash
}
+type childrens struct {
+ childrenHashes []libcommon.Hash
+ parentSlot uint64 // we keep this one for pruning
+}
+
// NewForkChoiceStore initialize a new store from the given anchor state, either genesis or checkpoint sync state.
-func NewForkChoiceStore(anchorState *state2.CachingBeaconState, engine execution_client.ExecutionEngine, recorder freezer.Freezer, enabledPruning bool) (*ForkChoiceStore, error) {
+func NewForkChoiceStore(ctx context.Context, anchorState *state2.CachingBeaconState, engine execution_client.ExecutionEngine, recorder freezer.Freezer, operationsPool pool.OperationsPool, forkGraph fork_graph.ForkGraph) (*ForkChoiceStore, error) {
anchorRoot, err := anchorState.BlockRoot()
if err != nil {
return nil, err
@@ -58,28 +88,51 @@ func NewForkChoiceStore(anchorState *state2.CachingBeaconState, engine execution
anchorRoot,
state2.Epoch(anchorState.BeaconState),
)
- checkpointStates, err := lru.New[checkpointComparable, *checkpointState](allowedCachedStates)
+
+ eth2Roots, err := lru.New[libcommon.Hash, libcommon.Hash](checkpointsPerCache)
if err != nil {
return nil, err
}
- eth2Roots, err := lru.New[libcommon.Hash, libcommon.Hash](checkpointsPerCache)
+ anchorPublicKeys := make([]byte, anchorState.ValidatorLength()*length.Bytes48)
+ for idx := 0; idx < anchorState.ValidatorLength(); idx++ {
+ pk, err := anchorState.ValidatorPublicKey(idx)
+ if err != nil {
+ return nil, err
+ }
+ copy(anchorPublicKeys[idx*length.Bytes48:], pk[:])
+ }
+
+ preverifiedSizes, err := lru.New[libcommon.Hash, preverifiedAppendListsSizes](checkpointsPerCache * 10)
if err != nil {
return nil, err
}
+ preverifiedSizes.Add(anchorRoot, preverifiedAppendListsSizes{
+ validatorLength: uint64(anchorState.ValidatorLength()),
+ historicalRootsLength: anchorState.HistoricalRootsLength(),
+ historicalSummariesLength: anchorState.HistoricalSummariesLength(),
+ })
+
return &ForkChoiceStore{
+ ctx: ctx,
highestSeen: anchorState.Slot(),
time: anchorState.GenesisTime() + anchorState.BeaconConfig().SecondsPerSlot*anchorState.Slot(),
justifiedCheckpoint: anchorCheckpoint.Copy(),
finalizedCheckpoint: anchorCheckpoint.Copy(),
unrealizedJustifiedCheckpoint: anchorCheckpoint.Copy(),
unrealizedFinalizedCheckpoint: anchorCheckpoint.Copy(),
- forkGraph: fork_graph.New(anchorState, enabledPruning),
+ forkGraph: forkGraph,
equivocatingIndicies: map[uint64]struct{}{},
latestMessages: map[uint64]*LatestMessage{},
- checkpointStates: checkpointStates,
+ checkpointStates: make(map[checkpointComparable]*checkpointState),
eth2Roots: eth2Roots,
engine: engine,
recorder: recorder,
+ operationsPool: operationsPool,
+ anchorPublicKeys: anchorPublicKeys,
+ genesisTime: anchorState.GenesisTime(),
+ beaconCfg: anchorState.BeaconConfig(),
+ childrens: make(map[libcommon.Hash]childrens),
+ preverifiedSizes: preverifiedSizes,
}, nil
}
@@ -90,6 +143,36 @@ func (f *ForkChoiceStore) HighestSeen() uint64 {
return f.highestSeen
}
+func (f *ForkChoiceStore) children(parent libcommon.Hash) []libcommon.Hash {
+ children, ok := f.childrens[parent]
+ if !ok {
+ return nil
+ }
+ return children.childrenHashes
+}
+
+// updateChildren adds a new child to the parent node hash.
+func (f *ForkChoiceStore) updateChildren(parentSlot uint64, parent, child libcommon.Hash) {
+ c, ok := f.childrens[parent]
+ if !ok {
+ c = childrens{}
+ }
+ c.parentSlot = parentSlot // can be innacurate.
+ if slices.Contains(c.childrenHashes, child) {
+ return
+ }
+ c.childrenHashes = append(c.childrenHashes, child)
+ f.childrens[parent] = c
+}
+
+// AdvanceHighestSeen advances the highest seen block by n and returns the new slot after the change
+func (f *ForkChoiceStore) AdvanceHighestSeen(n uint64) uint64 {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ f.highestSeen += n
+ return f.highestSeen
+}
+
// Time returns current time
func (f *ForkChoiceStore) Time() uint64 {
f.mu.Lock()
@@ -111,6 +194,13 @@ func (f *ForkChoiceStore) JustifiedCheckpoint() solid.Checkpoint {
return f.justifiedCheckpoint
}
+// FinalizedCheckpoint returns justified checkpoint
+func (f *ForkChoiceStore) JustifiedSlot() uint64 {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ return f.computeStartSlotAtEpoch(f.justifiedCheckpoint.Epoch())
+}
+
// FinalizedCheckpoint returns justified checkpoint
func (f *ForkChoiceStore) FinalizedCheckpoint() solid.Checkpoint {
f.mu.Lock()
@@ -146,3 +236,40 @@ func (f *ForkChoiceStore) AnchorSlot() uint64 {
defer f.mu.Unlock()
return f.forkGraph.AnchorSlot()
}
+
+func (f *ForkChoiceStore) GetStateAtBlockRoot(blockRoot libcommon.Hash, alwaysCopy bool) (*state2.CachingBeaconState, error) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ return f.forkGraph.GetState(blockRoot, alwaysCopy)
+}
+func (f *ForkChoiceStore) GetStateAtStateRoot(stateRoot libcommon.Hash, alwaysCopy bool) (*state2.CachingBeaconState, error) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ return f.forkGraph.GetState(stateRoot, alwaysCopy)
+}
+func (f *ForkChoiceStore) GetStateAtSlot(slot uint64, alwaysCopy bool) (*state.CachingBeaconState, error) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ return f.forkGraph.GetStateAtSlot(slot, alwaysCopy)
+}
+
+func (f *ForkChoiceStore) PreverifiedValidator(blockRoot libcommon.Hash) uint64 {
+ if ret, ok := f.preverifiedSizes.Get(blockRoot); ok {
+ return ret.validatorLength
+ }
+ return 0
+}
+
+func (f *ForkChoiceStore) PreverifiedHistoricalRoots(blockRoot libcommon.Hash) uint64 {
+ if ret, ok := f.preverifiedSizes.Get(blockRoot); ok {
+ return ret.historicalRootsLength
+ }
+ return 0
+}
+
+func (f *ForkChoiceStore) PreverifiedHistoricalSummaries(blockRoot libcommon.Hash) uint64 {
+ if ret, ok := f.preverifiedSizes.Get(blockRoot); ok {
+ return ret.historicalSummariesLength
+ }
+ return 0
+}
diff --git a/cl/phase1/forkchoice/forkchoice_slot.go b/cl/phase1/forkchoice/forkchoice_slot.go
new file mode 100644
index 00000000000..ef71778dcad
--- /dev/null
+++ b/cl/phase1/forkchoice/forkchoice_slot.go
@@ -0,0 +1 @@
+package forkchoice
diff --git a/cl/phase1/forkchoice/get_head.go b/cl/phase1/forkchoice/get_head.go
index ce821f1db6c..e1300c2c022 100644
--- a/cl/phase1/forkchoice/get_head.go
+++ b/cl/phase1/forkchoice/get_head.go
@@ -17,19 +17,22 @@ func (f *ForkChoiceStore) GetHead() (libcommon.Hash, uint64, error) {
}
func (f *ForkChoiceStore) getHead() (libcommon.Hash, uint64, error) {
+ if f.headHash != (libcommon.Hash{}) {
+ return f.headHash, f.headSlot, nil
+ }
// Retrieve att
- head := f.justifiedCheckpoint.BlockRoot()
- blocks := f.getFilteredBlockTree(head)
+ f.headHash = f.justifiedCheckpoint.BlockRoot()
+ blocks := f.getFilteredBlockTree(f.headHash)
// See which validators can be used for attestation score
justificationState, err := f.getCheckpointState(f.justifiedCheckpoint)
if err != nil {
return libcommon.Hash{}, 0, err
}
// Filter all validators deemed as bad
- filteredIndicies := f.filterValidatorSetForAttestationScores(justificationState.validators, justificationState.epoch)
+ filteredIndicies := f.filterValidatorSetForAttestationScores(justificationState, justificationState.epoch)
for {
// Filter out current head children.
- unfilteredChildren := f.forkGraph.GetChildren(head)
+ unfilteredChildren := f.children(f.headHash)
children := []libcommon.Hash{}
for _, child := range unfilteredChildren {
if _, ok := blocks[child]; ok {
@@ -38,15 +41,16 @@ func (f *ForkChoiceStore) getHead() (libcommon.Hash, uint64, error) {
}
// Stop if we dont have any more children
if len(children) == 0 {
- header, hasHeader := f.forkGraph.GetHeader(head)
+ header, hasHeader := f.forkGraph.GetHeader(f.headHash)
if !hasHeader {
return libcommon.Hash{}, 0, fmt.Errorf("no slot for head is stored")
}
- return head, header.Slot, nil
+ f.headSlot = header.Slot
+ return f.headHash, f.headSlot, nil
}
// Average case scenario.
if len(children) == 1 {
- head = children[0]
+ f.headHash = children[0]
continue
}
// Sort children by lexigographical order
@@ -57,13 +61,13 @@ func (f *ForkChoiceStore) getHead() (libcommon.Hash, uint64, error) {
})
// After sorting is done determine best fit.
- head = children[0]
+ f.headHash = children[0]
maxWeight := f.getWeight(children[0], filteredIndicies, justificationState)
for i := 1; i < len(children); i++ {
weight := f.getWeight(children[i], filteredIndicies, justificationState)
// Lexicographical order is king.
if weight >= maxWeight {
- head = children[i]
+ f.headHash = children[i]
maxWeight = weight
}
}
@@ -71,10 +75,10 @@ func (f *ForkChoiceStore) getHead() (libcommon.Hash, uint64, error) {
}
// filterValidatorSetForAttestationScores preliminarly filter the validator set obliging to consensus rules.
-func (f *ForkChoiceStore) filterValidatorSetForAttestationScores(validatorSet []*checkpointValidator, epoch uint64) []uint64 {
- filtered := make([]uint64, 0, len(validatorSet))
- for validatorIndex, validator := range validatorSet {
- if !validator.active(epoch) || validator.slashed {
+func (f *ForkChoiceStore) filterValidatorSetForAttestationScores(c *checkpointState, epoch uint64) []uint64 {
+ filtered := make([]uint64, 0, c.validatorSetSize)
+ for validatorIndex := 0; validatorIndex < c.validatorSetSize; validatorIndex++ {
+ if !readFromBitset(c.actives, validatorIndex) || readFromBitset(c.slasheds, validatorIndex) {
continue
}
if _, hasLatestMessage := f.latestMessages[uint64(validatorIndex)]; !hasLatestMessage {
@@ -94,14 +98,13 @@ func (f *ForkChoiceStore) getWeight(root libcommon.Hash, indicies []uint64, stat
if !has {
return 0
}
- validators := state.validators
// Compute attestation score
var attestationScore uint64
for _, validatorIndex := range indicies {
if f.Ancestor(f.latestMessages[validatorIndex].Root, header.Slot) != root {
continue
}
- attestationScore += validators[validatorIndex].balance
+ attestationScore += state.balances[validatorIndex]
}
if f.proposerBoostRoot == (libcommon.Hash{}) {
return attestationScore
@@ -130,7 +133,7 @@ func (f *ForkChoiceStore) getFilterBlockTree(blockRoot libcommon.Hash, blocks ma
if !has {
return false
}
- children := f.forkGraph.GetChildren(blockRoot)
+ children := f.children(blockRoot)
// If there are children iterate down recursively and see which branches are viable.
if len(children) > 0 {
isAnyViable := false
@@ -153,7 +156,7 @@ func (f *ForkChoiceStore) getFilterBlockTree(blockRoot libcommon.Hash, blocks ma
return false
}
- genesisEpoch := f.forkGraph.Config().GenesisEpoch
+ genesisEpoch := f.beaconCfg.GenesisEpoch
if (f.justifiedCheckpoint.Epoch() == genesisEpoch || currentJustifiedCheckpoint.Equal(f.justifiedCheckpoint)) &&
(f.finalizedCheckpoint.Epoch() == genesisEpoch || finalizedJustifiedCheckpoint.Equal(f.finalizedCheckpoint)) {
blocks[blockRoot] = header
diff --git a/cl/phase1/forkchoice/interface.go b/cl/phase1/forkchoice/interface.go
new file mode 100644
index 00000000000..96d34abd561
--- /dev/null
+++ b/cl/phase1/forkchoice/interface.go
@@ -0,0 +1,42 @@
+package forkchoice
+
+import (
+ "github.com/ledgerwatch/erigon-lib/common"
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/cltypes/solid"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state"
+ "github.com/ledgerwatch/erigon/cl/phase1/execution_client"
+)
+
+type ForkChoiceStorage interface {
+ ForkChoiceStorageWriter
+ ForkChoiceStorageReader
+}
+
+type ForkChoiceStorageReader interface {
+ Ancestor(root common.Hash, slot uint64) common.Hash
+ AnchorSlot() uint64
+ Engine() execution_client.ExecutionEngine
+ FinalizedCheckpoint() solid.Checkpoint
+ FinalizedSlot() uint64
+ GetEth1Hash(eth2Root common.Hash) common.Hash
+ GetHead() (common.Hash, uint64, error)
+ HighestSeen() uint64
+ JustifiedCheckpoint() solid.Checkpoint
+ JustifiedSlot() uint64
+ ProposerBoostRoot() common.Hash
+ GetStateAtBlockRoot(blockRoot libcommon.Hash, alwaysCopy bool) (*state.CachingBeaconState, error)
+ Slot() uint64
+ Time() uint64
+
+ GetStateAtSlot(slot uint64, alwaysCopy bool) (*state.CachingBeaconState, error)
+ GetStateAtStateRoot(root libcommon.Hash, alwaysCopy bool) (*state.CachingBeaconState, error)
+}
+
+type ForkChoiceStorageWriter interface {
+ OnAttestation(attestation *solid.Attestation, fromBlock bool) error
+ OnAttesterSlashing(attesterSlashing *cltypes.AttesterSlashing, test bool) error
+ OnBlock(block *cltypes.SignedBeaconBlock, newPayload bool, fullValidation bool) error
+ OnTick(time uint64)
+}
diff --git a/cl/phase1/forkchoice/on_attestation.go b/cl/phase1/forkchoice/on_attestation.go
index c5fc7ac3583..ed4b0ce674c 100644
--- a/cl/phase1/forkchoice/on_attestation.go
+++ b/cl/phase1/forkchoice/on_attestation.go
@@ -2,22 +2,30 @@ package forkchoice
import (
"fmt"
+ "time"
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
"github.com/ledgerwatch/erigon/cl/phase1/cache"
"github.com/ledgerwatch/erigon/cl/phase1/core/state"
+ "github.com/ledgerwatch/log/v3"
libcommon "github.com/ledgerwatch/erigon-lib/common"
)
-// OnAttestation processes incoming attestations. TODO(Giulio2002): finish it with forward changesets.
+// OnAttestation processes incoming attestations.
func (f *ForkChoiceStore) OnAttestation(attestation *solid.Attestation, fromBlock bool) error {
f.mu.Lock()
defer f.mu.Unlock()
+ f.headHash = libcommon.Hash{}
data := attestation.AttestantionData()
if err := f.validateOnAttestation(attestation, fromBlock); err != nil {
return err
}
+ // Schedule for later processing.
+ if f.Slot() < attestation.AttestantionData().Slot()+1 {
+ f.scheduleAttestationForLaterProcessing(attestation, fromBlock)
+ return nil
+ }
target := data.Target()
if cachedIndicies, ok := cache.LoadAttestatingIndicies(&data, attestation.AggregationBits()); ok {
f.processAttestingIndicies(attestation, cachedIndicies)
@@ -50,11 +58,33 @@ func (f *ForkChoiceStore) OnAttestation(attestation *solid.Attestation, fromBloc
return fmt.Errorf("invalid attestation")
}
}
+ cache.StoreAttestation(&data, attestation.AggregationBits(), attestationIndicies)
// Lastly update latest messages.
f.processAttestingIndicies(attestation, attestationIndicies)
return nil
}
+// scheduleAttestationForLaterProcessing scheudules an attestation for later processing
+func (f *ForkChoiceStore) scheduleAttestationForLaterProcessing(attestation *solid.Attestation, fromBlock bool) {
+ go func() {
+ logInterval := time.NewTicker(50 * time.Millisecond)
+ for {
+ select {
+ case <-f.ctx.Done():
+ return
+ case <-logInterval.C:
+ if f.Slot() < attestation.AttestantionData().Slot()+1 {
+ continue
+ }
+ if err := f.OnAttestation(attestation, false); err != nil {
+ log.Trace("could not process scheduled attestation", "reason", err)
+ }
+ return
+ }
+ }
+ }()
+}
+
func (f *ForkChoiceStore) processAttestingIndicies(attestation *solid.Attestation, indicies []uint64) {
beaconBlockRoot := attestation.AttestantionData().BeaconBlockRoot()
target := attestation.AttestantionData().Target()
@@ -99,9 +129,7 @@ func (f *ForkChoiceStore) validateOnAttestation(attestation *solid.Attestation,
if ancestorRoot != target.BlockRoot() {
return fmt.Errorf("ancestor root mismatches with target")
}
- if f.Slot() < attestation.AttestantionData().Slot()+1 {
- return fmt.Errorf("future attestation")
- }
+
return nil
}
@@ -111,8 +139,8 @@ func (f *ForkChoiceStore) validateTargetEpochAgainstCurrentTime(attestation *sol
currentEpoch := f.computeEpochAtSlot(f.Slot())
// Use GENESIS_EPOCH for previous when genesis to avoid underflow
previousEpoch := currentEpoch - 1
- if currentEpoch <= f.forkGraph.Config().GenesisEpoch {
- previousEpoch = f.forkGraph.Config().GenesisEpoch
+ if currentEpoch <= f.beaconCfg.GenesisEpoch {
+ previousEpoch = f.beaconCfg.GenesisEpoch
}
if target.Epoch() == currentEpoch || target.Epoch() == previousEpoch {
return nil
diff --git a/cl/phase1/forkchoice/on_attester_slashing.go b/cl/phase1/forkchoice/on_attester_slashing.go
index a51d702ffae..1c4ea9d5dc3 100644
--- a/cl/phase1/forkchoice/on_attester_slashing.go
+++ b/cl/phase1/forkchoice/on_attester_slashing.go
@@ -3,48 +3,122 @@ package forkchoice
import (
"fmt"
+ "github.com/Giulio2002/bls"
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
+ "github.com/ledgerwatch/erigon/cl/fork"
"github.com/ledgerwatch/erigon/cl/phase1/core/state"
+ "github.com/ledgerwatch/erigon/cl/pool"
"github.com/ledgerwatch/erigon/cl/cltypes"
)
-func (f *ForkChoiceStore) OnAttesterSlashing(attesterSlashing *cltypes.AttesterSlashing) error {
+func (f *ForkChoiceStore) OnAttesterSlashing(attesterSlashing *cltypes.AttesterSlashing, test bool) error {
+ if f.operationsPool.AttesterSlashingsPool.Has(pool.ComputeKeyForAttesterSlashing(attesterSlashing)) {
+ return nil
+ }
f.mu.Lock()
- defer f.mu.Unlock()
- // Check if these attestation is even slashable.
+ // Check if this attestation is even slashable.
attestation1 := attesterSlashing.Attestation_1
attestation2 := attesterSlashing.Attestation_2
if !cltypes.IsSlashableAttestationData(attestation1.Data, attestation2.Data) {
+ f.mu.Unlock()
return fmt.Errorf("attestation data is not slashable")
}
// Retrieve justified state
- s, _, err := f.forkGraph.GetState(f.justifiedCheckpoint.BlockRoot(), false)
+ s, err := f.forkGraph.GetState(f.justifiedCheckpoint.BlockRoot(), false)
if err != nil {
+ f.mu.Unlock()
return err
}
if s == nil {
+ f.mu.Unlock()
return fmt.Errorf("justified checkpoint state not accessible")
}
- // Verify validity of slashings
- valid, err := state.IsValidIndexedAttestation(s, attestation1)
+ attestation1PublicKeys, err := getIndexedAttestationPublicKeys(s, attestation1)
if err != nil {
- return fmt.Errorf("error calculating indexed attestation 1 validity: %v", err)
+ f.mu.Unlock()
+ return err
}
- if !valid {
- return fmt.Errorf("invalid indexed attestation 1")
+ attestation2PublicKeys, err := getIndexedAttestationPublicKeys(s, attestation2)
+ if err != nil {
+ f.mu.Unlock()
+ return err
}
-
- valid, err = state.IsValidIndexedAttestation(s, attestation2)
+ domain1, err := s.GetDomain(s.BeaconConfig().DomainBeaconAttester, attestation1.Data.Target().Epoch())
+ if err != nil {
+ return fmt.Errorf("unable to get the domain: %v", err)
+ }
+ domain2, err := s.GetDomain(s.BeaconConfig().DomainBeaconAttester, attestation2.Data.Target().Epoch())
if err != nil {
- return fmt.Errorf("error calculating indexed attestation 2 validity: %v", err)
+ return fmt.Errorf("unable to get the domain: %v", err)
}
- if !valid {
- return fmt.Errorf("invalid indexed attestation 2")
+ f.mu.Unlock()
+
+ if !test {
+ // Verify validity of slashings (1)
+ signingRoot, err := fork.ComputeSigningRoot(attestation1.Data, domain1)
+ if err != nil {
+ return fmt.Errorf("unable to get signing root: %v", err)
+ }
+
+ valid, err := bls.VerifyAggregate(attestation1.Signature[:], signingRoot[:], attestation1PublicKeys)
+ if err != nil {
+ return fmt.Errorf("error while validating signature: %v", err)
+ }
+ if !valid {
+ return fmt.Errorf("invalid aggregate signature")
+ }
+ // Verify validity of slashings (2)
+ signingRoot, err = fork.ComputeSigningRoot(attestation2.Data, domain2)
+ if err != nil {
+ return fmt.Errorf("unable to get signing root: %v", err)
+ }
+
+ valid, err = bls.VerifyAggregate(attestation2.Signature[:], signingRoot[:], attestation2PublicKeys)
+ if err != nil {
+ return fmt.Errorf("error while validating signature: %v", err)
+ }
+ if !valid {
+ return fmt.Errorf("invalid aggregate signature")
+ }
}
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ var anySlashed bool
for _, index := range solid.IntersectionOfSortedSets(attestation1.AttestingIndices, attestation2.AttestingIndices) {
f.equivocatingIndicies[index] = struct{}{}
+ if !anySlashed {
+ v, err := s.ValidatorForValidatorIndex(int(index))
+ if err != nil {
+ return fmt.Errorf("unable to retrieve state: %v", err)
+ }
+ if v.IsSlashable(state.Epoch(s)) {
+ anySlashed = true
+ }
+ }
+ }
+ if anySlashed {
+ f.operationsPool.AttesterSlashingsPool.Insert(pool.ComputeKeyForAttesterSlashing(attesterSlashing), attesterSlashing)
}
- // add attestation indicies to equivocating indicies.
return nil
}
+
+func getIndexedAttestationPublicKeys(b *state.CachingBeaconState, att *cltypes.IndexedAttestation) ([][]byte, error) {
+ inds := att.AttestingIndices
+ if inds.Length() == 0 || !solid.IsUint64SortedSet(inds) {
+ return nil, fmt.Errorf("isValidIndexedAttestation: attesting indices are not sorted or are null")
+ }
+ pks := make([][]byte, 0, inds.Length())
+ if err := solid.RangeErr[uint64](inds, func(_ int, v uint64, _ int) error {
+ val, err := b.ValidatorForValidatorIndex(int(v))
+ if err != nil {
+ return err
+ }
+ pk := val.PublicKey()
+ pks = append(pks, pk[:])
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ return pks, nil
+}
diff --git a/cl/phase1/forkchoice/on_block.go b/cl/phase1/forkchoice/on_block.go
index 7435dadf4cb..2e709f7b0c1 100644
--- a/cl/phase1/forkchoice/on_block.go
+++ b/cl/phase1/forkchoice/on_block.go
@@ -2,7 +2,9 @@ package forkchoice
import (
"fmt"
+ "time"
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/log/v3"
"github.com/ledgerwatch/erigon/cl/cltypes"
@@ -14,6 +16,8 @@ import (
func (f *ForkChoiceStore) OnBlock(block *cltypes.SignedBeaconBlock, newPayload, fullValidation bool) error {
f.mu.Lock()
defer f.mu.Unlock()
+ f.headHash = libcommon.Hash{}
+ start := time.Now()
blockRoot, err := block.Block.HashSSZ()
if err != nil {
return err
@@ -27,7 +31,17 @@ func (f *ForkChoiceStore) OnBlock(block *cltypes.SignedBeaconBlock, newPayload,
return nil
}
- config := f.forkGraph.Config()
+ var invalidBlock bool
+ if newPayload && f.engine != nil {
+ if invalidBlock, err = f.engine.NewPayload(block.Block.Body.ExecutionPayload, &block.Block.ParentRoot); err != nil {
+ if invalidBlock {
+ f.forkGraph.MarkHeaderAsInvalid(blockRoot)
+ }
+ log.Warn("newPayload failed", "err", err)
+ return err
+ }
+ }
+
lastProcessedState, status, err := f.forkGraph.AddChainSegment(block, fullValidation)
if err != nil {
return err
@@ -36,6 +50,7 @@ func (f *ForkChoiceStore) OnBlock(block *cltypes.SignedBeaconBlock, newPayload,
case fork_graph.PreValidated:
return nil
case fork_graph.Success:
+ f.updateChildren(block.Block.Slot-1, block.Block.ParentRoot, blockRoot) // parent slot can be innacurate
case fork_graph.BelowAnchor:
log.Debug("replay block", "code", status)
return nil
@@ -45,31 +60,26 @@ func (f *ForkChoiceStore) OnBlock(block *cltypes.SignedBeaconBlock, newPayload,
if block.Block.Body.ExecutionPayload != nil {
f.eth2Roots.Add(blockRoot, block.Block.Body.ExecutionPayload.BlockHash)
}
- var invalidBlock bool
- if newPayload && f.engine != nil {
- if invalidBlock, err = f.engine.NewPayload(block.Block.Body.ExecutionPayload, &block.Block.ParentRoot); err != nil {
- log.Warn("newPayload failed", "err", err)
- return err
- }
- }
- if invalidBlock {
- f.forkGraph.MarkHeaderAsInvalid(blockRoot)
- }
if block.Block.Slot > f.highestSeen {
f.highestSeen = block.Block.Slot
}
// Add proposer score boost if the block is timely
- timeIntoSlot := (f.time - f.forkGraph.GenesisTime()) % lastProcessedState.BeaconConfig().SecondsPerSlot
- isBeforeAttestingInterval := timeIntoSlot < config.SecondsPerSlot/config.IntervalsPerSlot
- if f.Slot() == block.Block.Slot && isBeforeAttestingInterval {
+ timeIntoSlot := (f.time - f.genesisTime) % lastProcessedState.BeaconConfig().SecondsPerSlot
+ isBeforeAttestingInterval := timeIntoSlot < f.beaconCfg.SecondsPerSlot/f.beaconCfg.IntervalsPerSlot
+ if f.Slot() == block.Block.Slot && isBeforeAttestingInterval && f.proposerBoostRoot == (libcommon.Hash{}) {
f.proposerBoostRoot = blockRoot
}
- if lastProcessedState.Slot()%f.forkGraph.Config().SlotsPerEpoch == 0 {
+ if lastProcessedState.Slot()%f.beaconCfg.SlotsPerEpoch == 0 {
if err := freezer.PutObjectSSZIntoFreezer("beaconState", "caplin_core", lastProcessedState.Slot(), lastProcessedState, f.recorder); err != nil {
return err
}
}
+ f.preverifiedSizes.Add(blockRoot, preverifiedAppendListsSizes{
+ validatorLength: uint64(lastProcessedState.ValidatorLength()),
+ historicalRootsLength: lastProcessedState.HistoricalRootsLength(),
+ historicalSummariesLength: lastProcessedState.HistoricalSummariesLength(),
+ })
// Update checkpoints
f.updateCheckpoints(lastProcessedState.CurrentJustifiedCheckpoint().Copy(), lastProcessedState.FinalizedCheckpoint().Copy())
// First thing save previous values of the checkpoints (avoid memory copy of all states and ensure easy revert)
@@ -80,9 +90,10 @@ func (f *ForkChoiceStore) OnBlock(block *cltypes.SignedBeaconBlock, newPayload,
justificationBits = lastProcessedState.JustificationBits().Copy()
)
// Eagerly compute unrealized justification and finality
- if err := statechange.ProcessJustificationBitsAndFinality(lastProcessedState); err != nil {
+ if err := statechange.ProcessJustificationBitsAndFinality(lastProcessedState, nil); err != nil {
return err
}
+ f.operationsPool.NotifyBlock(block.Block)
f.updateUnrealizedCheckpoints(lastProcessedState.CurrentJustifiedCheckpoint().Copy(), lastProcessedState.FinalizedCheckpoint().Copy())
// Set the changed value pre-simulation
lastProcessedState.SetPreviousJustifiedCheckpoint(previousJustifiedCheckpoint)
@@ -95,5 +106,6 @@ func (f *ForkChoiceStore) OnBlock(block *cltypes.SignedBeaconBlock, newPayload,
if blockEpoch < currentEpoch {
f.updateCheckpoints(lastProcessedState.CurrentJustifiedCheckpoint().Copy(), lastProcessedState.FinalizedCheckpoint().Copy())
}
+ log.Debug("OnBlock", "elapsed", time.Since(start))
return nil
}
diff --git a/cl/phase1/forkchoice/on_operations.go b/cl/phase1/forkchoice/on_operations.go
new file mode 100644
index 00000000000..8679fb6a905
--- /dev/null
+++ b/cl/phase1/forkchoice/on_operations.go
@@ -0,0 +1,216 @@
+package forkchoice
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+
+ "github.com/Giulio2002/bls"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/fork"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state"
+ "github.com/ledgerwatch/erigon/cl/pool"
+ "github.com/ledgerwatch/erigon/cl/utils"
+)
+
+// NOTE: This file implements non-official handlers for other types of iterations. what it does is,using the forkchoices
+// and verify external operations and eventually push them in the operations pool.
+
+// OnVoluntaryExit is a non-official handler for voluntary exit operations. it pushes the voluntary exit in the pool.
+func (f *ForkChoiceStore) OnVoluntaryExit(signedVoluntaryExit *cltypes.SignedVoluntaryExit, test bool) error {
+ voluntaryExit := signedVoluntaryExit.VoluntaryExit
+ if f.operationsPool.VoluntaryExistsPool.Has(voluntaryExit.ValidatorIndex) {
+ return nil
+ }
+ f.mu.Lock()
+
+ headHash, _, err := f.getHead()
+ if err != nil {
+ f.mu.Unlock()
+ return err
+ }
+ s, err := f.forkGraph.GetState(headHash, false)
+ if err != nil {
+ f.mu.Unlock()
+ return err
+ }
+
+ val, err := s.ValidatorForValidatorIndex(int(voluntaryExit.ValidatorIndex))
+ if err != nil {
+ f.mu.Unlock()
+ return err
+ }
+
+ if val.ExitEpoch() != f.beaconCfg.FarFutureEpoch {
+ f.mu.Unlock()
+ return nil
+ }
+
+ pk := val.PublicKey()
+ f.mu.Unlock()
+
+ domain, err := s.GetDomain(s.BeaconConfig().DomainVoluntaryExit, voluntaryExit.Epoch)
+ if err != nil {
+ return err
+ }
+ signingRoot, err := fork.ComputeSigningRoot(voluntaryExit, domain)
+ if err != nil {
+ return err
+ }
+ if !test {
+ valid, err := bls.Verify(signedVoluntaryExit.Signature[:], signingRoot[:], pk[:])
+ if err != nil {
+ return err
+ }
+ if !valid {
+ return errors.New("ProcessVoluntaryExit: BLS verification failed")
+ }
+ }
+ f.operationsPool.VoluntaryExistsPool.Insert(voluntaryExit.ValidatorIndex, signedVoluntaryExit)
+ return nil
+}
+
+// OnProposerSlashing is a non-official handler for proposer slashing operations. it pushes the proposer slashing in the pool.
+func (f *ForkChoiceStore) OnProposerSlashing(proposerSlashing *cltypes.ProposerSlashing, test bool) (err error) {
+ if f.operationsPool.ProposerSlashingsPool.Has(pool.ComputeKeyForProposerSlashing(proposerSlashing)) {
+ return nil
+ }
+ h1 := proposerSlashing.Header1.Header
+ h2 := proposerSlashing.Header2.Header
+
+ if h1.Slot != h2.Slot {
+ return fmt.Errorf("non-matching slots on proposer slashing: %d != %d", h1.Slot, h2.Slot)
+ }
+
+ if h1.ProposerIndex != h2.ProposerIndex {
+ return fmt.Errorf("non-matching proposer indices proposer slashing: %d != %d", h1.ProposerIndex, h2.ProposerIndex)
+ }
+
+ if *h1 == *h2 {
+ return fmt.Errorf("proposee slashing headers are the same")
+ }
+
+ // Take lock as we interact with state.
+ f.mu.Lock()
+ headHash, _, err := f.getHead()
+ if err != nil {
+ f.mu.Unlock()
+ return err
+ }
+ s, err := f.forkGraph.GetState(headHash, false)
+ if err != nil {
+ f.mu.Unlock()
+ return err
+ }
+ proposer, err := s.ValidatorForValidatorIndex(int(h1.ProposerIndex))
+ if err != nil {
+ f.mu.Unlock()
+ return fmt.Errorf("unable to retrieve state: %v", err)
+ }
+ if !proposer.IsSlashable(state.Epoch(s)) {
+ f.mu.Unlock()
+ return fmt.Errorf("proposer is not slashable: %v", proposer)
+ }
+ domain1, err := s.GetDomain(s.BeaconConfig().DomainBeaconProposer, state.GetEpochAtSlot(s.BeaconConfig(), h1.Slot))
+ if err != nil {
+ return fmt.Errorf("unable to get domain: %v", err)
+ }
+ domain2, err := s.GetDomain(s.BeaconConfig().DomainBeaconProposer, state.GetEpochAtSlot(s.BeaconConfig(), h2.Slot))
+ if err != nil {
+ return fmt.Errorf("unable to get domain: %v", err)
+ }
+ pk := proposer.PublicKey()
+ f.mu.Unlock()
+ if test {
+ f.operationsPool.ProposerSlashingsPool.Insert(pool.ComputeKeyForProposerSlashing(proposerSlashing), proposerSlashing)
+ return nil
+ }
+ signingRoot, err := fork.ComputeSigningRoot(h1, domain1)
+ if err != nil {
+ return fmt.Errorf("unable to compute signing root: %v", err)
+ }
+ valid, err := bls.Verify(proposerSlashing.Header1.Signature[:], signingRoot[:], pk[:])
+ if err != nil {
+ return fmt.Errorf("unable to verify signature: %v", err)
+ }
+ if !valid {
+ return fmt.Errorf("invalid signature: signature %v, root %v, pubkey %v", proposerSlashing.Header1.Signature[:], signingRoot[:], pk)
+ }
+ signingRoot, err = fork.ComputeSigningRoot(h2, domain2)
+ if err != nil {
+ return fmt.Errorf("unable to compute signing root: %v", err)
+ }
+
+ valid, err = bls.Verify(proposerSlashing.Header2.Signature[:], signingRoot[:], pk[:])
+ if err != nil {
+ return fmt.Errorf("unable to verify signature: %v", err)
+ }
+ if !valid {
+ return fmt.Errorf("invalid signature: signature %v, root %v, pubkey %v", proposerSlashing.Header2.Signature[:], signingRoot[:], pk)
+ }
+ f.operationsPool.ProposerSlashingsPool.Insert(pool.ComputeKeyForProposerSlashing(proposerSlashing), proposerSlashing)
+
+ return nil
+}
+
+func (f *ForkChoiceStore) OnBlsToExecutionChange(signedChange *cltypes.SignedBLSToExecutionChange, test bool) error {
+ if f.operationsPool.BLSToExecutionChangesPool.Has(signedChange.Signature) {
+ return nil
+ }
+ change := signedChange.Message
+
+ // Take lock as we interact with state.
+ f.mu.Lock()
+
+ headHash, _, err := f.getHead()
+ if err != nil {
+ f.mu.Unlock()
+ return err
+ }
+ s, err := f.forkGraph.GetState(headHash, false)
+ if err != nil {
+ f.mu.Unlock()
+ return err
+ }
+ validator, err := s.ValidatorForValidatorIndex(int(change.ValidatorIndex))
+ if err != nil {
+ f.mu.Unlock()
+ return fmt.Errorf("unable to retrieve state: %v", err)
+ }
+ wc := validator.WithdrawalCredentials()
+
+ if wc[0] != f.beaconCfg.BLSWithdrawalPrefixByte {
+ f.mu.Unlock()
+ return fmt.Errorf("invalid withdrawal credentials prefix")
+ }
+ genesisValidatorRoot := s.GenesisValidatorsRoot()
+ f.mu.Unlock()
+ // Perform full validation if requested.
+ if !test {
+ // Check the validator's withdrawal credentials against the provided message.
+ hashedFrom := utils.Sha256(change.From[:])
+ if !bytes.Equal(hashedFrom[1:], wc[1:]) {
+ return fmt.Errorf("invalid withdrawal credentials")
+ }
+
+ // Compute the signing domain and verify the message signature.
+ domain, err := fork.ComputeDomain(f.beaconCfg.DomainBLSToExecutionChange[:], utils.Uint32ToBytes4(f.beaconCfg.GenesisForkVersion), genesisValidatorRoot)
+ if err != nil {
+ return err
+ }
+ signedRoot, err := fork.ComputeSigningRoot(change, domain)
+ if err != nil {
+ return err
+ }
+ valid, err := bls.Verify(signedChange.Signature[:], signedRoot[:], change.From[:])
+ if err != nil {
+ return err
+ }
+ if !valid {
+ return fmt.Errorf("invalid signature")
+ }
+ }
+
+ f.operationsPool.BLSToExecutionChangesPool.Insert(signedChange.Signature, signedChange)
+ return nil
+}
diff --git a/cl/phase1/forkchoice/on_tick.go b/cl/phase1/forkchoice/on_tick.go
index 4f59528d25c..6020e979900 100644
--- a/cl/phase1/forkchoice/on_tick.go
+++ b/cl/phase1/forkchoice/on_tick.go
@@ -6,9 +6,9 @@ import libcommon "github.com/ledgerwatch/erigon-lib/common"
func (f *ForkChoiceStore) OnTick(time uint64) {
f.mu.Lock()
defer f.mu.Unlock()
- tickSlot := (time - f.forkGraph.GenesisTime()) / f.forkGraph.Config().SecondsPerSlot
+ tickSlot := (time - f.genesisTime) / f.beaconCfg.SecondsPerSlot
for f.Slot() < tickSlot {
- previousTime := f.forkGraph.GenesisTime() + (f.Slot()+1)*f.forkGraph.Config().SecondsPerSlot
+ previousTime := f.genesisTime + (f.Slot()+1)*f.beaconCfg.SecondsPerSlot
f.onTickPerSlot(previousTime)
}
f.onTickPerSlot(time)
@@ -22,6 +22,7 @@ func (f *ForkChoiceStore) onTickPerSlot(time uint64) {
if currentSlot <= previousSlot {
return
}
+ f.headHash = libcommon.Hash{}
// If this is a new slot, reset store.proposer_boost_root
f.proposerBoostRoot = libcommon.Hash{}
if f.computeSlotsSinceEpochStart(currentSlot) == 0 {
diff --git a/cl/phase1/forkchoice/utils.go b/cl/phase1/forkchoice/utils.go
index ada018f16f7..b3eaca58da7 100644
--- a/cl/phase1/forkchoice/utils.go
+++ b/cl/phase1/forkchoice/utils.go
@@ -2,6 +2,7 @@ package forkchoice
import (
"fmt"
+
"github.com/ledgerwatch/erigon/cl/transition"
libcommon "github.com/ledgerwatch/erigon-lib/common"
@@ -12,7 +13,7 @@ import (
// Slot calculates the current slot number using the time and genesis slot.
func (f *ForkChoiceStore) Slot() uint64 {
- return f.forkGraph.Config().GenesisSlot + ((f.time - f.forkGraph.GenesisTime()) / f.forkGraph.Config().SecondsPerSlot)
+ return f.beaconCfg.GenesisSlot + ((f.time - f.genesisTime) / f.beaconCfg.SecondsPerSlot)
}
// updateCheckpoints updates the justified and finalized checkpoints if new checkpoints have higher epochs.
@@ -21,8 +22,29 @@ func (f *ForkChoiceStore) updateCheckpoints(justifiedCheckpoint, finalizedCheckp
f.justifiedCheckpoint = justifiedCheckpoint
}
if finalizedCheckpoint.Epoch() > f.finalizedCheckpoint.Epoch() {
+ f.onNewFinalized(finalizedCheckpoint)
f.finalizedCheckpoint = finalizedCheckpoint
+
+ }
+}
+
+func (f *ForkChoiceStore) onNewFinalized(newFinalized solid.Checkpoint) {
+ // get rid of checkpoint states
+ for k := range f.checkpointStates {
+ checkpoint := solid.Checkpoint(k)
+ if checkpoint.Epoch() <= newFinalized.Epoch() {
+ delete(f.checkpointStates, k)
+ continue
+ }
+ }
+ // get rid of children
+ for k, children := range f.childrens {
+ if children.parentSlot <= newFinalized.Epoch()*f.beaconCfg.SlotsPerEpoch {
+ delete(f.childrens, k)
+ continue
+ }
}
+ f.forkGraph.Prune(newFinalized.Epoch() * f.beaconCfg.SlotsPerEpoch)
}
// updateCheckpoints updates the justified and finalized checkpoints if new checkpoints have higher epochs.
@@ -37,12 +59,12 @@ func (f *ForkChoiceStore) updateUnrealizedCheckpoints(justifiedCheckpoint, final
// computeEpochAtSlot calculates the epoch at a given slot number.
func (f *ForkChoiceStore) computeEpochAtSlot(slot uint64) uint64 {
- return slot / f.forkGraph.Config().SlotsPerEpoch
+ return slot / f.beaconCfg.SlotsPerEpoch
}
// computeStartSlotAtEpoch calculates the starting slot of a given epoch.
func (f *ForkChoiceStore) computeStartSlotAtEpoch(epoch uint64) uint64 {
- return epoch * f.forkGraph.Config().SlotsPerEpoch
+ return epoch * f.beaconCfg.SlotsPerEpoch
}
// computeSlotsSinceEpochStart calculates the number of slots since the start of the epoch of a given slot.
@@ -69,11 +91,11 @@ func (f *ForkChoiceStore) Ancestor(root libcommon.Hash, slot uint64) libcommon.H
// getCheckpointState computes and caches checkpoint states.
func (f *ForkChoiceStore) getCheckpointState(checkpoint solid.Checkpoint) (*checkpointState, error) {
// check if it can be found in cache.
- if state, ok := f.checkpointStates.Get(checkpointComparable(checkpoint)); ok {
+ if state, ok := f.checkpointStates[checkpointComparable(checkpoint)]; ok {
return state, nil
}
// If it is not in cache compute it and then put in cache.
- baseState, _, err := f.forkGraph.GetState(checkpoint.BlockRoot(), true)
+ baseState, err := f.forkGraph.GetState(checkpoint.BlockRoot(), true)
if err != nil {
return nil, err
}
@@ -95,9 +117,9 @@ func (f *ForkChoiceStore) getCheckpointState(checkpoint solid.Checkpoint) (*chec
validators[idx] = v
return true
})
- checkpointState := newCheckpointState(f.forkGraph.Config(), validators,
+ checkpointState := newCheckpointState(f.beaconCfg, f.anchorPublicKeys, validators,
mixes, baseState.GenesisValidatorsRoot(), baseState.Fork(), baseState.GetTotalActiveBalance(), state.Epoch(baseState.BeaconState))
// Cache in memory what we are left with.
- f.checkpointStates.Add(checkpointComparable(checkpoint), checkpointState)
+ f.checkpointStates[checkpointComparable(checkpoint)] = checkpointState
return checkpointState, nil
}
diff --git a/cl/phase1/main.go b/cl/phase1/main.go
index e64c2ccabfa..10f4c7070f6 100644
--- a/cl/phase1/main.go
+++ b/cl/phase1/main.go
@@ -8,22 +8,22 @@ func main() {}
// "fmt"
// "os"
-// "github.com/ledgerwatch/erigon/cl/phase1/core"
-// "github.com/ledgerwatch/erigon/cl/phase1/core/state"
-// "github.com/ledgerwatch/erigon/cl/phase1/execution_client"
-// "github.com/ledgerwatch/erigon/cl/phase1/forkchoice"
-// network2 "github.com/ledgerwatch/erigon/cl/phase1/network"
-// stages2 "github.com/ledgerwatch/erigon/cl/phase1/stages"
-// rawdb2 "github.com/ledgerwatch/erigon/cl/phase4/rawdb"
+// "github.com/ledgerwatch/erigon-lib/cl/phase1/core"
+// "github.com/ledgerwatch/erigon-lib/cl/phase1/core/state"
+// "github.com/ledgerwatch/erigon-lib/cl/phase1/execution_client"
+// "github.com/ledgerwatch/erigon-lib/cl/phase1/forkchoice"
+// network2 "github.com/ledgerwatch/erigon-lib/cl/phase1/network"
+// stages2 "github.com/ledgerwatch/erigon-lib/cl/phase1/stages"
+// rawdb2 "github.com/ledgerwatch/erigon-lib/cl/phase4/rawdb"
// sentinelrpc "github.com/ledgerwatch/erigon-lib/gointerfaces/sentinel"
// "github.com/ledgerwatch/erigon-lib/kv"
// "github.com/ledgerwatch/erigon-lib/kv/mdbx"
-// "github.com/ledgerwatch/erigon/cl/clparams"
-// "github.com/ledgerwatch/erigon/cl/clparams/initial_state"
-// "github.com/ledgerwatch/erigon/cl/cltypes"
-// "github.com/ledgerwatch/erigon/cl/fork"
-// "github.com/ledgerwatch/erigon/cl/rpc"
+// "github.com/ledgerwatch/erigon-lib/cl/clparams"
+// "github.com/ledgerwatch/erigon-lib/cl/clparams/initial_state"
+// "github.com/ledgerwatch/erigon-lib/cl/cltypes"
+// "github.com/ledgerwatch/erigon-lib/cl/fork"
+// "github.com/ledgerwatch/erigon-lib/cl/rpc"
// lcCli "github.com/ledgerwatch/erigon/cmd/sentinel/cli"
// "github.com/ledgerwatch/erigon/cmd/sentinel/cli/flags"
diff --git a/cl/phase1/network/backward_beacon_downloader.go b/cl/phase1/network/backward_beacon_downloader.go
index 612406d5f05..08cf7aa80f6 100644
--- a/cl/phase1/network/backward_beacon_downloader.go
+++ b/cl/phase1/network/backward_beacon_downloader.go
@@ -2,12 +2,16 @@ package network
import (
"sync"
+ "sync/atomic"
+ "time"
libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/log/v3"
"golang.org/x/net/context"
"github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies"
"github.com/ledgerwatch/erigon/cl/rpc"
)
@@ -21,17 +25,30 @@ type BackwardBeaconDownloader struct {
rpc *rpc.BeaconRpcP2P
onNewBlock OnNewBlock
finished bool
+ reqInterval *time.Ticker
+ db kv.RwDB
+ neverSkip bool
mu sync.Mutex
}
-func NewBackwardBeaconDownloader(ctx context.Context, rpc *rpc.BeaconRpcP2P) *BackwardBeaconDownloader {
+func NewBackwardBeaconDownloader(ctx context.Context, rpc *rpc.BeaconRpcP2P, db kv.RwDB) *BackwardBeaconDownloader {
return &BackwardBeaconDownloader{
- ctx: ctx,
- rpc: rpc,
+ ctx: ctx,
+ rpc: rpc,
+ db: db,
+ reqInterval: time.NewTicker(300 * time.Millisecond),
+ neverSkip: true,
}
}
+// SetThrottle sets the throttle.
+func (b *BackwardBeaconDownloader) SetThrottle(throttle time.Duration) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.reqInterval.Reset(throttle)
+}
+
// SetSlotToDownload sets slot to download.
func (b *BackwardBeaconDownloader) SetSlotToDownload(slot uint64) {
b.mu.Lock()
@@ -46,6 +63,13 @@ func (b *BackwardBeaconDownloader) SetExpectedRoot(root libcommon.Hash) {
b.expectedRoot = root
}
+// SetExpectedRoot sets the expected root we expect to download.
+func (b *BackwardBeaconDownloader) SetNeverSkip(neverSkip bool) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.neverSkip = neverSkip
+}
+
// SetShouldStopAtFn sets the stop condition.
func (b *BackwardBeaconDownloader) SetOnNewBlock(onNewBlock OnNewBlock) {
b.mu.Lock()
@@ -78,21 +102,51 @@ func (b *BackwardBeaconDownloader) Peers() (uint64, error) {
// It then processes the response by iterating over the blocks in reverse order and calling a provided callback function onNewBlock on each block.
// If the callback returns an error or signals that the download should be finished, the function will exit.
// If the block's root hash does not match the expected root hash, it will be rejected and the function will continue to the next block.
-func (b *BackwardBeaconDownloader) RequestMore(ctx context.Context) {
- count := uint64(64)
+func (b *BackwardBeaconDownloader) RequestMore(ctx context.Context) error {
+ count := uint64(32)
start := b.slotToDownload - count + 1
// Overflow? round to 0.
if start > b.slotToDownload {
start = 0
}
- responses, _, err := b.rpc.SendBeaconBlocksByRangeReq(ctx, start, count)
- if err != nil {
- return
+ var atomicResp atomic.Value
+ atomicResp.Store([]*cltypes.SignedBeaconBlock{})
+
+Loop:
+ for {
+ select {
+ case <-b.reqInterval.C:
+ go func() {
+ if len(atomicResp.Load().([]*cltypes.SignedBeaconBlock)) > 0 {
+ return
+ }
+ responses, peerId, err := b.rpc.SendBeaconBlocksByRangeReq(ctx, start, count)
+ if err != nil {
+ return
+ }
+ if responses == nil {
+ return
+ }
+ if len(responses) == 0 {
+ b.rpc.BanPeer(peerId)
+ return
+ }
+ atomicResp.Store(responses)
+ }()
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ if len(atomicResp.Load().([]*cltypes.SignedBeaconBlock)) > 0 {
+ break Loop
+ }
+ time.Sleep(10 * time.Millisecond)
+ }
}
+ responses := atomicResp.Load().([]*cltypes.SignedBeaconBlock)
// Import new blocks, order is forward so reverse the whole packet
for i := len(responses) - 1; i >= 0; i-- {
if b.finished {
- return
+ return nil
}
segment := responses[i]
// is this new block root equal to the expected root?
@@ -103,6 +157,7 @@ func (b *BackwardBeaconDownloader) RequestMore(ctx context.Context) {
}
// No? Reject.
if blockRoot != b.expectedRoot {
+ log.Debug("Gotten unexpected root", "got", blockRoot, "expected", b.expectedRoot)
continue
}
// Yes? then go for the callback.
@@ -115,4 +170,35 @@ func (b *BackwardBeaconDownloader) RequestMore(ctx context.Context) {
b.expectedRoot = segment.Block.ParentRoot
b.slotToDownload = segment.Block.Slot - 1 // update slot (might be inexact but whatever)
}
+ if b.neverSkip {
+ return nil
+ }
+ // try skipping if the next slot is in db
+ tx, err := b.db.BeginRw(b.ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ // it will stop if we end finding a gap or if we reach the maxIterations
+ for {
+ // check if the expected root is in db
+ slot, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, b.expectedRoot)
+ if err != nil {
+ return err
+ }
+ if slot == nil || *slot == 0 {
+ break
+ }
+ b.slotToDownload = *slot - 1
+ if err := beacon_indicies.MarkRootCanonical(b.ctx, tx, *slot, b.expectedRoot); err != nil {
+ return err
+ }
+ b.expectedRoot, err = beacon_indicies.ReadParentBlockRoot(b.ctx, tx, b.expectedRoot)
+ if err != nil {
+ return err
+ }
+ }
+
+ return tx.Commit()
}
diff --git a/cl/phase1/network/gossip_manager.go b/cl/phase1/network/gossip_manager.go
index 5d6b9c7f11d..abc33f3d6c4 100644
--- a/cl/phase1/network/gossip_manager.go
+++ b/cl/phase1/network/gossip_manager.go
@@ -2,49 +2,91 @@ package network
import (
"context"
- "runtime"
+ "fmt"
+ "github.com/ledgerwatch/erigon-lib/common"
+ "sync"
- "github.com/VictoriaMetrics/metrics"
- "github.com/ledgerwatch/erigon/cl/cltypes/solid"
"github.com/ledgerwatch/erigon/cl/freezer"
"github.com/ledgerwatch/erigon/cl/phase1/forkchoice"
+ "github.com/ledgerwatch/erigon/cl/sentinel/peers"
- libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon-lib/common/dbg"
"github.com/ledgerwatch/erigon-lib/gointerfaces/sentinel"
"github.com/ledgerwatch/erigon-lib/types/ssz"
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/utils"
- "github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/log/v3"
)
// Gossip manager is sending all messages to fork choice or others
type GossipManager struct {
- ctx context.Context
-
recorder freezer.Freezer
forkChoice *forkchoice.ForkChoiceStore
sentinel sentinel.SentinelClient
// configs
beaconConfig *clparams.BeaconChainConfig
genesisConfig *clparams.GenesisConfig
+
+ mu sync.RWMutex
+ subs map[int]chan *peers.PeeredObject[*cltypes.SignedBeaconBlock]
+ totalSubs int
}
-func NewGossipReceiver(ctx context.Context, s sentinel.SentinelClient, forkChoice *forkchoice.ForkChoiceStore,
+func NewGossipReceiver(s sentinel.SentinelClient, forkChoice *forkchoice.ForkChoiceStore,
beaconConfig *clparams.BeaconChainConfig, genesisConfig *clparams.GenesisConfig, recorder freezer.Freezer) *GossipManager {
return &GossipManager{
sentinel: s,
forkChoice: forkChoice,
- ctx: ctx,
beaconConfig: beaconConfig,
genesisConfig: genesisConfig,
recorder: recorder,
+ subs: make(map[int]chan *peers.PeeredObject[*cltypes.SignedBeaconBlock]),
+ }
+}
+
+// this subscribes to signed beacon blocks..... i wish this was better
+func (g *GossipManager) SubscribeSignedBeaconBlocks(ctx context.Context) <-chan *peers.PeeredObject[*cltypes.SignedBeaconBlock] {
+ // a really big limit because why not....
+ out := make(chan *peers.PeeredObject[*cltypes.SignedBeaconBlock], 512)
+ g.mu.Lock()
+ g.totalSubs++
+ idx := g.totalSubs
+ g.subs[idx] = out
+ g.mu.Unlock()
+ go func() {
+ <-ctx.Done()
+ g.mu.Lock()
+ delete(g.subs, idx)
+ g.mu.Unlock()
+ }()
+ return out
+}
+
+func operationsContract[T ssz.EncodableSSZ](ctx context.Context, g *GossipManager, l log.Ctx, data *sentinel.GossipData, version int, name string, fn func(T, bool) error) error {
+ var t T
+ object := t.Clone().(T)
+ if err := object.DecodeSSZ(common.CopyBytes(data.Data), version); err != nil {
+ g.sentinel.BanPeer(ctx, data.Peer)
+ l["at"] = fmt.Sprintf("decoding %s", name)
+ return err
}
+ if err := fn(object /*test=*/, false); err != nil {
+ l["at"] = fmt.Sprintf("verify %s", name)
+ return err
+ }
+ if _, err := g.sentinel.PublishGossip(ctx, data); err != nil {
+ log.Debug("failed publish gossip", "err", err)
+ }
+ return nil
}
-func (g *GossipManager) onRecv(data *sentinel.GossipData, l log.Ctx) error {
+func (g *GossipManager) onRecv(ctx context.Context, data *sentinel.GossipData, l log.Ctx) (err error) {
+ defer func() {
+ r := recover()
+ if r != nil {
+ err = fmt.Errorf("%v", r)
+ }
+ }()
currentEpoch := utils.GetCurrentEpoch(g.genesisConfig.GenesisTime, g.beaconConfig.SecondsPerSlot, g.beaconConfig.SlotsPerEpoch)
version := g.beaconConfig.GetCurrentStateVersion(currentEpoch)
@@ -56,9 +98,9 @@ func (g *GossipManager) onRecv(data *sentinel.GossipData, l log.Ctx) error {
var object ssz.Unmarshaler
switch data.Type {
case sentinel.GossipType_BeaconBlockGossipType:
- object = &cltypes.SignedBeaconBlock{}
+ object = cltypes.NewSignedBeaconBlock(g.beaconConfig)
if err := object.DecodeSSZ(common.CopyBytes(data.Data), int(version)); err != nil {
- g.sentinel.BanPeer(g.ctx, data.Peer)
+ g.sentinel.BanPeer(ctx, data.Peer)
l["at"] = "decoding block"
return err
}
@@ -71,119 +113,57 @@ func (g *GossipManager) onRecv(data *sentinel.GossipData, l log.Ctx) error {
return nil
}
if block.Block.Slot == currentSlotByTime {
- if _, err := g.sentinel.PublishGossip(g.ctx, data); err != nil {
+ if _, err := g.sentinel.PublishGossip(ctx, data); err != nil {
log.Debug("failed publish gossip", "err", err)
}
}
- count, err := g.sentinel.GetPeers(g.ctx, &sentinel.EmptyMessage{})
+ count, err := g.sentinel.GetPeers(ctx, &sentinel.EmptyMessage{})
if err != nil {
l["at"] = "sentinel peer count"
return err
}
- var m runtime.MemStats
- dbg.ReadMemStats(&m)
log.Debug("Received block via gossip",
"peers", count.Amount,
"slot", block.Block.Slot,
- "alloc/sys", libcommon.ByteCount(m.Alloc)+"/"+libcommon.ByteCount(m.Sys),
- "numGC", m.NumGC,
)
if err := freezer.PutObjectSSZIntoFreezer("signedBeaconBlock", "caplin_core", block.Block.Slot, block, g.recorder); err != nil {
return err
}
- peers := metrics.GetOrCreateGauge("caplin_peer_count", func() float64 {
- return float64(count.Amount)
- })
-
- peers.Get()
-
- if err := g.forkChoice.OnBlock(block, true, true); err != nil {
- // if we are within a quarter of an epoch within chain tip we ban it
- if currentSlotByTime < g.forkChoice.HighestSeen()+(g.beaconConfig.SlotsPerEpoch/4) {
- g.sentinel.BanPeer(g.ctx, data.Peer)
- }
- l["at"] = "block process"
- return err
- }
- block.Block.Body.Attestations.Range(func(idx int, a *solid.Attestation, total int) bool {
- if err = g.forkChoice.OnAttestation(a, true); err != nil {
- return false
- }
- return true
- })
- if err != nil {
- l["at"] = "attestation process"
- return err
- }
- // Now check the head
- headRoot, headSlot, err := g.forkChoice.GetHead()
- if err != nil {
- l["slot"] = block.Block.Slot
- l["at"] = "fetch head data"
- return err
- }
- // Do forkchoice if possible
- if g.forkChoice.Engine() != nil {
- finalizedCheckpoint := g.forkChoice.FinalizedCheckpoint()
- log.Info("Caplin is sending forkchoice")
- // Run forkchoice
- if err := g.forkChoice.Engine().ForkChoiceUpdate(
- g.forkChoice.GetEth1Hash(finalizedCheckpoint.BlockRoot()),
- g.forkChoice.GetEth1Hash(headRoot),
- ); err != nil {
- log.Warn("Could not set forkchoice", "err", err)
- l["at"] = "sending forkchoice"
- return err
+ g.mu.RLock()
+ for _, v := range g.subs {
+ select {
+ case v <- &peers.PeeredObject[*cltypes.SignedBeaconBlock]{Data: block, Peer: data.Peer.Pid}:
+ default:
}
}
- // Log final result
- log.Debug("New gossip block imported",
- "slot", block.Block.Slot,
- "head", headSlot,
- "headRoot", headRoot,
- )
+ g.mu.RUnlock()
+
case sentinel.GossipType_VoluntaryExitGossipType:
- object = &cltypes.SignedVoluntaryExit{}
- if err := object.DecodeSSZ(data.Data, int(version)); err != nil {
- g.sentinel.BanPeer(g.ctx, data.Peer)
- l["at"] = "decode exit"
+ if err := operationsContract[*cltypes.SignedVoluntaryExit](ctx, g, l, data, int(version), "voluntary exit", g.forkChoice.OnVoluntaryExit); err != nil {
return err
}
case sentinel.GossipType_ProposerSlashingGossipType:
- object = &cltypes.ProposerSlashing{}
- if err := object.DecodeSSZ(data.Data, int(version)); err != nil {
- l["at"] = "decode proposer slash"
- g.sentinel.BanPeer(g.ctx, data.Peer)
+ if err := operationsContract[*cltypes.ProposerSlashing](ctx, g, l, data, int(version), "proposer slashing", g.forkChoice.OnProposerSlashing); err != nil {
return err
}
case sentinel.GossipType_AttesterSlashingGossipType:
- object = &cltypes.AttesterSlashing{}
- if err := object.DecodeSSZ(data.Data, int(version)); err != nil {
- l["at"] = "decode attester slash"
- g.sentinel.BanPeer(g.ctx, data.Peer)
- return err
- }
- if err := g.forkChoice.OnAttesterSlashing(object.(*cltypes.AttesterSlashing)); err != nil {
- l["at"] = "on attester slash"
+ if err := operationsContract[*cltypes.AttesterSlashing](ctx, g, l, data, int(version), "attester slashing", g.forkChoice.OnAttesterSlashing); err != nil {
return err
}
- case sentinel.GossipType_AggregateAndProofGossipType:
- object = &cltypes.SignedAggregateAndProof{}
- if err := object.DecodeSSZ(data.Data, int(version)); err != nil {
- l["at"] = "decoding proof"
- g.sentinel.BanPeer(g.ctx, data.Peer)
+ case sentinel.GossipType_BlsToExecutionChangeGossipType:
+ if err := operationsContract[*cltypes.SignedBLSToExecutionChange](ctx, g, l, data, int(version), "bls to execution change", g.forkChoice.OnBlsToExecutionChange); err != nil {
return err
}
}
return nil
}
-func (g *GossipManager) Start() {
- subscription, err := g.sentinel.SubscribeGossip(g.ctx, &sentinel.EmptyMessage{})
+func (g *GossipManager) Start(ctx context.Context) {
+ subscription, err := g.sentinel.SubscribeGossip(ctx, &sentinel.EmptyMessage{})
if err != nil {
return
}
@@ -198,7 +178,7 @@ func (g *GossipManager) Start() {
for k := range l {
delete(l, k)
}
- err = g.onRecv(data, l)
+ err = g.onRecv(ctx, data, l)
if err != nil {
l["err"] = err
log.Debug("[Beacon Gossip] Recoverable Error", l)
diff --git a/cl/phase1/stages/clstages.go b/cl/phase1/stages/clstages.go
new file mode 100644
index 00000000000..4d3f7f188cc
--- /dev/null
+++ b/cl/phase1/stages/clstages.go
@@ -0,0 +1,635 @@
+package stages
+
+import (
+ "context"
+ "errors"
+ "runtime"
+ "time"
+
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/dbg"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon/cl/antiquary"
+ "github.com/ledgerwatch/erigon/cl/beacon/synced_data"
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/clstages"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/persistence"
+ "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies"
+ "github.com/ledgerwatch/erigon/cl/persistence/db_config"
+ state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state"
+ "github.com/ledgerwatch/erigon/cl/phase1/execution_client"
+ "github.com/ledgerwatch/erigon/cl/phase1/forkchoice"
+ "github.com/ledgerwatch/erigon/core/types"
+ "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks"
+
+ network2 "github.com/ledgerwatch/erigon/cl/phase1/network"
+ "github.com/ledgerwatch/erigon/cl/rpc"
+ "github.com/ledgerwatch/erigon/cl/sentinel/peers"
+ "github.com/ledgerwatch/erigon/cl/utils"
+ "github.com/ledgerwatch/log/v3"
+)
+
+type Cfg struct {
+ rpc *rpc.BeaconRpcP2P
+ genesisCfg *clparams.GenesisConfig
+ beaconCfg *clparams.BeaconChainConfig
+ executionClient execution_client.ExecutionEngine
+ state *state.CachingBeaconState
+ gossipManager *network2.GossipManager
+ forkChoice *forkchoice.ForkChoiceStore
+ beaconDB persistence.BeaconChainDatabase
+ indiciesDB kv.RwDB
+ tmpdir string
+ dbConfig db_config.DatabaseConfiguration
+ sn *freezeblocks.CaplinSnapshots
+ antiquary *antiquary.Antiquary
+ syncedData *synced_data.SyncedDataManager
+
+ hasDownloaded, backfilling bool
+}
+
+type Args struct {
+ peers uint64
+
+ targetEpoch, seenEpoch uint64
+ targetSlot, seenSlot uint64
+
+ hasDownloaded bool
+}
+
+func ClStagesCfg(
+ rpc *rpc.BeaconRpcP2P,
+ antiquary *antiquary.Antiquary,
+ genesisCfg *clparams.GenesisConfig,
+ beaconCfg *clparams.BeaconChainConfig,
+ state *state.CachingBeaconState,
+ executionClient execution_client.ExecutionEngine,
+ gossipManager *network2.GossipManager,
+ forkChoice *forkchoice.ForkChoiceStore,
+ beaconDB persistence.BeaconChainDatabase,
+ indiciesDB kv.RwDB,
+ sn *freezeblocks.CaplinSnapshots,
+ tmpdir string,
+ dbConfig db_config.DatabaseConfiguration,
+ backfilling bool,
+ syncedData *synced_data.SyncedDataManager,
+) *Cfg {
+ return &Cfg{
+ rpc: rpc,
+ antiquary: antiquary,
+ genesisCfg: genesisCfg,
+ beaconCfg: beaconCfg,
+ state: state,
+ executionClient: executionClient,
+ gossipManager: gossipManager,
+ forkChoice: forkChoice,
+ tmpdir: tmpdir,
+ beaconDB: beaconDB,
+ indiciesDB: indiciesDB,
+ dbConfig: dbConfig,
+ sn: sn,
+ backfilling: backfilling,
+ syncedData: syncedData,
+ }
+}
+
+type StageName = string
+
+const (
+ WaitForPeers StageName = "WaitForPeers"
+ CatchUpEpochs StageName = "CatchUpEpochs"
+ CatchUpBlocks StageName = "CatchUpBlocks"
+ ForkChoice StageName = "ForkChoice"
+ ListenForForks StageName = "ListenForForks"
+ CleanupAndPruning StageName = "CleanupAndPruning"
+ SleepForSlot StageName = "SleepForSlot"
+ DownloadHistoricalBlocks StageName = "DownloadHistoricalBlocks"
+)
+
+const (
+ minPeersForDownload = uint64(4)
+)
+
+func MetaCatchingUp(args Args) StageName {
+ if args.peers < minPeersForDownload {
+ return WaitForPeers
+ }
+ if !args.hasDownloaded {
+ return DownloadHistoricalBlocks
+ }
+ if args.seenEpoch < args.targetEpoch {
+ return CatchUpEpochs
+ }
+ if args.seenSlot < args.targetSlot {
+ return CatchUpBlocks
+ }
+
+ return ""
+}
+
+/*
+
+this graph describes the state transitions for cl
+
+digraph {
+ compound=true;
+ subgraph cluster_0 {
+ label="syncing";
+ WaitForPeers;
+ CatchUpBlocks;
+ CatchUpEpochs;
+ }
+
+ subgraph cluster_3 {
+ label="if behind (transition function)"
+ MetaCatchingUp;
+ }
+
+ subgraph cluster_1 {
+ label="head";
+ ForkChoice; CleanupAndPruning; ListenForForks; SleepForSlot;
+ }
+
+ MetaCatchingUp -> WaitForPeers
+ MetaCatchingUp -> CatchUpEpochs
+ MetaCatchingUp -> CatchUpBlocks
+
+ WaitForPeers -> MetaCatchingUp[lhead=cluster_3]
+ CatchUpEpochs -> MetaCatchingUp[lhead=cluster_3]
+ CatchUpBlocks -> MetaCatchingUp[lhead=cluster_3]
+ CleanupAndPruning -> MetaCatchingUp[lhead=cluster_3]
+ ListenForForks -> MetaCatchingUp[lhead=cluster_3]
+ ForkChoice -> MetaCatchingUp[lhead=cluster_3]
+
+ CatchUpBlocks -> ForkChoice
+ ForkChoice -> ListenForForks
+
+ SleepForSlot -> WaitForPeers
+
+ ListenForForks -> ForkChoice
+ ListenForForks -> SleepForSlot
+ ListenForForks -> CleanupAndPruning
+ CleanupAndPruning -> SleepForSlot
+}
+*/
+
+// ConsensusClStages creates a stage loop container to be used to run caplin
+func ConsensusClStages(ctx context.Context,
+ cfg *Cfg,
+) *clstages.StageGraph[*Cfg, Args] {
+
+ rpcSource := persistence.NewBeaconRpcSource(cfg.rpc)
+ gossipSource := persistence.NewGossipSource(ctx, cfg.gossipManager)
+ processBlock := func(tx kv.RwTx, block *cltypes.SignedBeaconBlock, newPayload, fullValidation bool) error {
+ if err := cfg.forkChoice.OnBlock(block, newPayload, fullValidation); err != nil {
+ log.Warn("fail to process block", "reason", err, "slot", block.Block.Slot)
+ return err
+ }
+ if err := beacon_indicies.WriteHighestFinalized(tx, cfg.forkChoice.FinalizedSlot()); err != nil {
+ return err
+ }
+ // Write block to database optimistically if we are very behind.
+ return cfg.beaconDB.WriteBlock(ctx, tx, block, false)
+ }
+
+ // TODO: this is an ugly hack, but it works! Basically, we want shared state in the clstages.
+ // Probably the correct long term solution is to create a third generic parameter that defines shared state
+ // but for now, all it would have are the two gossip sources and the forkChoicesSinceReorg, so i don't think its worth it (yet).
+ shouldForkChoiceSinceReorg := false
+
+ // clstages run in a single thread - so we don't need to worry about any synchronization.
+ return &clstages.StageGraph[*Cfg, Args]{
+ // the ArgsFunc is run after every stage. It is passed into the transition function, and the same args are passed into the next stage.
+ ArgsFunc: func(ctx context.Context, cfg *Cfg) (args Args) {
+ var err error
+ args.peers, err = cfg.rpc.Peers()
+ if err != nil {
+ log.Error("failed to get sentinel peer count", "err", err)
+ args.peers = 0
+ }
+ args.hasDownloaded = cfg.hasDownloaded
+ args.seenSlot = cfg.forkChoice.HighestSeen()
+ args.seenEpoch = args.seenSlot / cfg.beaconCfg.SlotsPerEpoch
+ args.targetSlot = utils.GetCurrentSlot(cfg.genesisCfg.GenesisTime, cfg.beaconCfg.SecondsPerSlot)
+ // Note that the target epoch is always one behind. this is because we are always behind in the current epoch, so it would not be very useful
+ args.targetEpoch = utils.GetCurrentEpoch(cfg.genesisCfg.GenesisTime, cfg.beaconCfg.SecondsPerSlot, cfg.beaconCfg.SlotsPerEpoch) - 1
+ return
+ },
+ Stages: map[string]clstages.Stage[*Cfg, Args]{
+ WaitForPeers: {
+ Description: `wait for enough peers. This is also a safe stage to go to when unsure of what stage to use`,
+ TransitionFunc: func(cfg *Cfg, args Args, err error) string {
+ if x := MetaCatchingUp(args); x != "" {
+ return x
+ }
+ return CatchUpBlocks
+ },
+ ActionFunc: func(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error {
+ peersCount, err := cfg.rpc.Peers()
+ if err != nil {
+ return nil
+ }
+ waitWhenNotEnoughPeers := 3 * time.Second
+ for {
+ if peersCount >= minPeersForDownload {
+ break
+ }
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ logger.Info("[Caplin] Waiting For Peers", "have", peersCount, "needed", minPeersForDownload, "retryIn", waitWhenNotEnoughPeers)
+ time.Sleep(waitWhenNotEnoughPeers)
+ peersCount, err = cfg.rpc.Peers()
+ if err != nil {
+ peersCount = 0
+ }
+ }
+ return nil
+ },
+ },
+ DownloadHistoricalBlocks: {
+ Description: "Download historical blocks",
+ TransitionFunc: func(cfg *Cfg, args Args, err error) string {
+ if x := MetaCatchingUp(args); x != "" {
+ return x
+ }
+ return CatchUpBlocks
+ },
+ ActionFunc: func(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error {
+ cfg.hasDownloaded = true
+ startingRoot, err := cfg.state.BlockRoot()
+ if err != nil {
+ return err
+ }
+ // This stage is special so use context.Background() TODO(Giulio2002): make the context be passed in
+ startingSlot := cfg.state.LatestBlockHeader().Slot
+ downloader := network2.NewBackwardBeaconDownloader(context.Background(), cfg.rpc, cfg.indiciesDB)
+
+ if err := SpawnStageHistoryDownload(StageHistoryReconstruction(downloader, cfg.antiquary, cfg.sn, cfg.beaconDB, cfg.indiciesDB, cfg.executionClient, cfg.genesisCfg, cfg.beaconCfg, cfg.backfilling, false, startingRoot, startingSlot, cfg.tmpdir, logger), context.Background(), logger); err != nil {
+ cfg.hasDownloaded = false
+ return err
+ }
+ return nil
+ },
+ },
+ CatchUpEpochs: {
+ Description: `if we are 1 or more epochs behind, we download in parallel by epoch`,
+ TransitionFunc: func(cfg *Cfg, args Args, err error) string {
+ if x := MetaCatchingUp(args); x != "" {
+ return x
+ }
+ return CatchUpBlocks
+ },
+ ActionFunc: func(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error {
+ logger.Info("[Caplin] Downloading epochs from reqresp", "from", args.seenEpoch, "to", args.targetEpoch)
+ currentEpoch := args.seenEpoch
+ blockBatch := []*types.Block{}
+ shouldInsert := cfg.executionClient != nil && cfg.executionClient.SupportInsertion()
+ tx, err := cfg.indiciesDB.BeginRw(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ MainLoop:
+ for currentEpoch <= args.targetEpoch+1 {
+ startBlock := currentEpoch * cfg.beaconCfg.SlotsPerEpoch
+ blocks, err := rpcSource.GetRange(ctx, tx, startBlock, cfg.beaconCfg.SlotsPerEpoch)
+ if err != nil {
+ return err
+ }
+ // If we got an empty packet ban the peer
+ if len(blocks.Data) == 0 {
+ cfg.rpc.BanPeer(blocks.Peer)
+ log.Debug("no data received from peer in epoch download")
+ continue MainLoop
+ }
+
+ logger.Info("[Caplin] Epoch downloaded", "epoch", currentEpoch)
+ for _, block := range blocks.Data {
+
+ if shouldInsert && block.Version() >= clparams.BellatrixVersion {
+ executionPayload := block.Block.Body.ExecutionPayload
+ body := executionPayload.Body()
+ txs, err := types.DecodeTransactions(body.Transactions)
+ if err != nil {
+ log.Warn("bad blocks segment received", "err", err)
+ cfg.rpc.BanPeer(blocks.Peer)
+ currentEpoch = utils.Max64(args.seenEpoch, currentEpoch-1)
+ continue MainLoop
+ }
+ header, err := executionPayload.RlpHeader()
+ if err != nil {
+ log.Warn("bad blocks segment received", "err", err)
+ cfg.rpc.BanPeer(blocks.Peer)
+ currentEpoch = utils.Max64(args.seenEpoch, currentEpoch-1)
+ continue MainLoop
+ }
+ blockBatch = append(blockBatch, types.NewBlockFromStorage(executionPayload.BlockHash, header, txs, nil, body.Withdrawals))
+ }
+ if err := processBlock(tx, block, false, true); err != nil {
+ log.Warn("bad blocks segment received", "err", err)
+ cfg.rpc.BanPeer(blocks.Peer)
+ currentEpoch = utils.Max64(args.seenEpoch, currentEpoch-1)
+ continue MainLoop
+ }
+ }
+ if len(blockBatch) > 0 {
+ if err := cfg.executionClient.InsertBlocks(blockBatch); err != nil {
+ log.Warn("bad blocks segment received", "err", err)
+ currentEpoch = utils.Max64(args.seenEpoch, currentEpoch-1)
+ blockBatch = blockBatch[:0]
+ continue MainLoop
+ }
+ blockBatch = blockBatch[:0]
+ }
+ currentEpoch++
+ }
+ return tx.Commit()
+ },
+ },
+ CatchUpBlocks: {
+ Description: `if we are within the epoch but not at head, we run catchupblocks`,
+ TransitionFunc: func(cfg *Cfg, args Args, err error) string {
+ if x := MetaCatchingUp(args); x != "" {
+ return x
+ }
+ return ForkChoice
+ },
+ ActionFunc: func(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error {
+ totalRequest := args.targetSlot - args.seenSlot
+ logger.Debug("waiting for blocks...",
+ "seenSlot", args.seenSlot,
+ "targetSlot", args.targetSlot,
+ "requestedSlots", totalRequest,
+ )
+ respCh := make(chan *peers.PeeredObject[[]*cltypes.SignedBeaconBlock])
+ errCh := make(chan error)
+ sources := []persistence.BlockSource{gossipSource}
+
+ // if we are more than one block behind, we request the rpc source as well
+ if totalRequest > 2 {
+ sources = append(sources, rpcSource)
+ }
+ // 15 seconds is a good timeout for this
+ ctx, cn := context.WithTimeout(ctx, 15*time.Second)
+ defer cn()
+
+ tx, err := cfg.indiciesDB.BeginRw(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ // we go ask all the sources and see who gets back to us first. whoever does is the winner!!
+ for _, v := range sources {
+ sourceFunc := v.GetRange
+ go func() {
+ blocks, err := sourceFunc(ctx, tx, args.seenSlot+1, totalRequest)
+ if err != nil {
+ errCh <- err
+ return
+ }
+ respCh <- blocks
+ }()
+ }
+ logTimer := time.NewTicker(30 * time.Second)
+ defer logTimer.Stop()
+ select {
+ case err := <-errCh:
+ return err
+ case blocks := <-respCh:
+ for _, block := range blocks.Data {
+ if err := processBlock(tx, block, true, true); err != nil {
+ return err
+ }
+ }
+ case <-logTimer.C:
+ logger.Info("[Caplin] Progress", "progress", cfg.forkChoice.HighestSeen(), "from", args.seenEpoch, "to", args.targetSlot)
+ }
+ return tx.Commit()
+ },
+ },
+ ForkChoice: {
+ Description: `fork choice stage. We will send all fork choise things here
+ also, we will wait up to delay seconds to deal with attestations + side forks`,
+ TransitionFunc: func(cfg *Cfg, args Args, err error) string {
+ if x := MetaCatchingUp(args); x != "" {
+ return x
+ }
+ return ListenForForks
+ },
+ ActionFunc: func(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error {
+
+ // TODO: we need to get the last run block in order to process attestations here
+ ////////block.Block.Body.Attestations.Range(func(idx int, a *solid.Attestation, total int) bool {
+ //////// if err = g.forkChoice.OnAttestation(a, true); err != nil {
+ //////// return false
+ //////// }
+ //////// return true
+ ////////})
+ ////////if err != nil {
+ //////// return err
+ ////////}
+
+ // Now check the head
+ headRoot, headSlot, err := cfg.forkChoice.GetHead()
+ if err != nil {
+ return err
+ }
+
+ // Do forkchoice if possible
+ if cfg.forkChoice.Engine() != nil {
+ finalizedCheckpoint := cfg.forkChoice.FinalizedCheckpoint()
+ logger.Debug("Caplin is sending forkchoice")
+ // Run forkchoice
+ if err := cfg.forkChoice.Engine().ForkChoiceUpdate(
+ cfg.forkChoice.GetEth1Hash(finalizedCheckpoint.BlockRoot()),
+ cfg.forkChoice.GetEth1Hash(headRoot),
+ ); err != nil {
+ logger.Warn("Could not set forkchoice", "err", err)
+ return err
+ }
+ }
+ tx, err := cfg.indiciesDB.BeginRw(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ type canonicalEntry struct {
+ slot uint64
+ root common.Hash
+ }
+
+ currentRoot := headRoot
+ currentSlot := headSlot
+ currentCanonical, err := beacon_indicies.ReadCanonicalBlockRoot(tx, currentSlot)
+ if err != nil {
+ return err
+ }
+ reconnectionRoots := make([]canonicalEntry, 0, 1)
+
+ for currentRoot != currentCanonical {
+ var newFoundSlot *uint64
+
+ if currentRoot, err = beacon_indicies.ReadParentBlockRoot(ctx, tx, currentRoot); err != nil {
+ return err
+ }
+ if newFoundSlot, err = beacon_indicies.ReadBlockSlotByBlockRoot(tx, currentRoot); err != nil {
+ return err
+ }
+ if newFoundSlot == nil {
+ break
+ }
+ currentSlot = *newFoundSlot
+ currentCanonical, err = beacon_indicies.ReadCanonicalBlockRoot(tx, currentSlot)
+ if err != nil {
+ return err
+ }
+ reconnectionRoots = append(reconnectionRoots, canonicalEntry{currentSlot, currentRoot})
+ }
+ if err := beacon_indicies.TruncateCanonicalChain(ctx, tx, currentSlot); err != nil {
+ return err
+ }
+ for i := len(reconnectionRoots) - 1; i >= 0; i-- {
+ if err := beacon_indicies.MarkRootCanonical(ctx, tx, reconnectionRoots[i].slot, reconnectionRoots[i].root); err != nil {
+ return err
+ }
+ }
+ if err := beacon_indicies.MarkRootCanonical(ctx, tx, headSlot, headRoot); err != nil {
+ return err
+ }
+
+ // Increment validator set
+ headState, err := cfg.forkChoice.GetStateAtBlockRoot(headRoot, false)
+ if err != nil {
+ return err
+ }
+ if err := cfg.syncedData.OnHeadState(headState); err != nil {
+ return err
+ }
+ start := time.Now()
+ // Incement some stuff here
+ preverifiedValidators := cfg.forkChoice.PreverifiedValidator(headState.FinalizedCheckpoint().BlockRoot())
+ preverifiedHistoricalSummary := cfg.forkChoice.PreverifiedHistoricalSummaries(headState.FinalizedCheckpoint().BlockRoot())
+ preverifiedHistoricalRoots := cfg.forkChoice.PreverifiedHistoricalRoots(headState.FinalizedCheckpoint().BlockRoot())
+ if err := state_accessors.IncrementPublicKeyTable(tx, headState, preverifiedValidators); err != nil {
+ return err
+ }
+ if err := state_accessors.IncrementHistoricalSummariesTable(tx, headState, preverifiedHistoricalSummary); err != nil {
+ return err
+ }
+ if err := state_accessors.IncrementHistoricalRootsTable(tx, headState, preverifiedHistoricalRoots); err != nil {
+ return err
+ }
+ log.Debug("Incremented state history", "elapsed", time.Since(start), "preverifiedValidators", preverifiedValidators)
+
+ var m runtime.MemStats
+ dbg.ReadMemStats(&m)
+ logger.Debug("Imported chain segment",
+ "hash", headRoot, "slot", headSlot,
+ "alloc", common.ByteCount(m.Alloc),
+ "sys", common.ByteCount(m.Sys))
+ return tx.Commit()
+ },
+ },
+ ListenForForks: {
+ TransitionFunc: func(cfg *Cfg, args Args, err error) string {
+ defer func() {
+ shouldForkChoiceSinceReorg = false
+ }()
+ if x := MetaCatchingUp(args); x != "" {
+ return x
+ }
+ if shouldForkChoiceSinceReorg {
+ return ForkChoice
+ }
+ return CleanupAndPruning
+
+ },
+ ActionFunc: func(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error {
+ slotTime := utils.GetSlotTime(cfg.genesisCfg.GenesisTime, cfg.beaconCfg.SecondsPerSlot, args.targetSlot).Add(
+ time.Duration(cfg.beaconCfg.SecondsPerSlot) * (time.Second / 3),
+ )
+ waitDur := slotTime.Sub(time.Now())
+ ctx, cn := context.WithTimeout(ctx, waitDur)
+ defer cn()
+ tx, err := cfg.indiciesDB.BeginRw(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ // try to get the current block
+ blocks, err := gossipSource.GetRange(ctx, tx, args.seenSlot, 1)
+ if err != nil {
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return nil
+ }
+ return err
+ }
+
+ for _, block := range blocks.Data {
+ err := processBlock(tx, block, true, true)
+ if err != nil {
+ // its okay if block processing fails
+ logger.Warn("extra block failed validation", "err", err)
+ return nil
+ }
+ shouldForkChoiceSinceReorg = true
+ logger.Debug("extra block received", "slot", args.seenSlot)
+ }
+ return tx.Commit()
+ },
+ },
+ CleanupAndPruning: {
+ Description: `cleanup and pruning is done here`,
+ TransitionFunc: func(cfg *Cfg, args Args, err error) string {
+ if x := MetaCatchingUp(args); x != "" {
+ return x
+ }
+ return SleepForSlot
+ },
+ ActionFunc: func(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error {
+ tx, err := cfg.indiciesDB.BeginRw(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ // clean up some old ranges
+ err = gossipSource.PurgeRange(ctx, tx, 1, args.seenSlot-cfg.beaconCfg.SlotsPerEpoch*16)
+ if err != nil {
+ return err
+ }
+ // TODO(Giulio2002): schedule snapshots retirement if needed.
+ if !cfg.backfilling {
+ if err := cfg.beaconDB.PurgeRange(ctx, tx, 1, cfg.forkChoice.HighestSeen()-100_000); err != nil {
+ return err
+ }
+ if err := beacon_indicies.PruneBlockRoots(ctx, tx, 0, cfg.forkChoice.HighestSeen()-100_000); err != nil {
+ return err
+ }
+ }
+
+ return tx.Commit()
+ },
+ },
+ SleepForSlot: {
+ Description: `sleep until the next slot`,
+ TransitionFunc: func(cfg *Cfg, args Args, err error) string {
+ return WaitForPeers
+ },
+ ActionFunc: func(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error {
+ nextSlot := args.seenSlot + 1
+ nextSlotTime := utils.GetSlotTime(cfg.genesisCfg.GenesisTime, cfg.beaconCfg.SecondsPerSlot, nextSlot)
+ nextSlotDur := nextSlotTime.Sub(time.Now())
+ logger.Debug("sleeping until next slot", "slot", nextSlot, "time", nextSlotTime, "dur", nextSlotDur)
+ time.Sleep(nextSlotDur)
+ return nil
+ },
+ },
+ },
+ }
+}
diff --git a/cl/phase1/stages/stage_fork_choice.go b/cl/phase1/stages/stage_fork_choice.go
deleted file mode 100644
index 8f9d2bfd385..00000000000
--- a/cl/phase1/stages/stage_fork_choice.go
+++ /dev/null
@@ -1,231 +0,0 @@
-package stages
-
-import (
- "context"
- "runtime"
- "time"
-
- "github.com/ledgerwatch/erigon/cl/freezer"
- "github.com/ledgerwatch/erigon/cl/phase1/core/state"
- "github.com/ledgerwatch/erigon/cl/phase1/execution_client"
- "github.com/ledgerwatch/erigon/cl/phase1/forkchoice"
- network2 "github.com/ledgerwatch/erigon/cl/phase1/network"
-
- libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon-lib/common/dbg"
- "github.com/ledgerwatch/erigon-lib/kv"
- "github.com/ledgerwatch/log/v3"
-
- "github.com/ledgerwatch/erigon/cl/clparams"
- "github.com/ledgerwatch/erigon/cl/cltypes"
- "github.com/ledgerwatch/erigon/cl/utils"
- "github.com/ledgerwatch/erigon/eth/stagedsync"
-)
-
-type StageForkChoiceCfg struct {
- db kv.RwDB
- downloader *network2.ForwardBeaconDownloader
- genesisCfg *clparams.GenesisConfig
- beaconCfg *clparams.BeaconChainConfig
- executionClient *execution_client.ExecutionClient
- state *state.CachingBeaconState
- gossipManager *network2.GossipManager
- forkChoice *forkchoice.ForkChoiceStore
- caplinFreezer freezer.Freezer
-}
-
-const minPeersForDownload = 2
-const minPeersForSyncStart = 4
-
-var (
- freezerNameSpacePrefix = ""
- blockObjectName = "singedBeaconBlock"
- stateObjectName = "beaconState"
- gossipAction = "gossip"
-)
-
-func StageForkChoice(db kv.RwDB, downloader *network2.ForwardBeaconDownloader, genesisCfg *clparams.GenesisConfig,
- beaconCfg *clparams.BeaconChainConfig, state *state.CachingBeaconState, executionClient *execution_client.ExecutionClient, gossipManager *network2.GossipManager,
- forkChoice *forkchoice.ForkChoiceStore, caplinFreezer freezer.Freezer) StageForkChoiceCfg {
- return StageForkChoiceCfg{
- db: db,
- downloader: downloader,
- genesisCfg: genesisCfg,
- beaconCfg: beaconCfg,
- state: state,
- executionClient: executionClient,
- gossipManager: gossipManager,
- forkChoice: forkChoice,
- caplinFreezer: caplinFreezer,
- }
-}
-
-// StageForkChoice enables the fork choice state. it is never supposed to exit this stage once it gets in.
-func SpawnStageForkChoice(cfg StageForkChoiceCfg, s *stagedsync.StageState, tx kv.RwTx, ctx context.Context) error {
- /*useExternalTx := tx != nil
- var err error
- if !useExternalTx {
- tx, err = cfg.db.BeginRw(ctx)
- if err != nil {
- return err
- }
- defer tx.Rollback()
- }*/
- // Start download service
- log.Info("Started Ethereum 2.0 Gossip Service")
- // We start gossip management.
- go cfg.gossipManager.Start()
- go onTickService(ctx, cfg)
- go func() {
- logIntervalPeers := time.NewTicker(1 * time.Minute)
- for {
- select {
- case <-logIntervalPeers.C:
- if peerCount, err := cfg.downloader.Peers(); err == nil {
- log.Info("[Caplin] P2P", "peers", peerCount)
-
- }
- case <-ctx.Done():
- return
- }
-
- }
- }()
- startDownloadService(s, cfg)
- /*if !useExternalTx {
- if err = tx.Commit(); err != nil {
- return err
- }
- }*/
- return nil
-}
-
-func startDownloadService(s *stagedsync.StageState, cfg StageForkChoiceCfg) {
- cfg.downloader.SetHighestProcessedRoot(libcommon.Hash{})
- cfg.downloader.SetHighestProcessedSlot(cfg.state.Slot())
- cfg.downloader.SetProcessFunction(func(highestSlotProcessed uint64, _ libcommon.Hash, newBlocks []*cltypes.SignedBeaconBlock) (uint64, libcommon.Hash, error) {
- for _, block := range newBlocks {
- if err := freezer.PutObjectSSZIntoFreezer("signedBeaconBlock", "caplin_core", block.Block.Slot, block, cfg.caplinFreezer); err != nil {
- return highestSlotProcessed, libcommon.Hash{}, err
- }
-
- sendForckchoice :=
- utils.GetCurrentSlot(cfg.genesisCfg.GenesisTime, cfg.beaconCfg.SecondsPerSlot) == block.Block.Slot
- if err := cfg.forkChoice.OnBlock(block, sendForckchoice, true); err != nil {
- log.Warn("Could not download block", "reason", err, "slot", block.Block.Slot)
- return highestSlotProcessed, libcommon.Hash{}, err
- }
- highestSlotProcessed = utils.Max64(block.Block.Slot, highestSlotProcessed)
- if sendForckchoice {
- var m runtime.MemStats
- dbg.ReadMemStats(&m)
- // Import the head
- headRoot, headSlot, err := cfg.forkChoice.GetHead()
-
- log.Debug("New block imported",
- "slot", block.Block.Slot,
- "head", headSlot,
- "headRoot", headRoot,
- "alloc/sys", libcommon.ByteCount(m.Alloc)+"/"+libcommon.ByteCount(m.Sys),
- "numGC", m.NumGC,
- )
- if err != nil {
- log.Debug("Could not fetch head data",
- "slot", block.Block.Slot,
- "err", err)
- continue
- }
-
- // Do forkchoice if possible
- if cfg.forkChoice.Engine() != nil {
- finalizedCheckpoint := cfg.forkChoice.FinalizedCheckpoint()
- log.Info("Caplin is sending forkchoice")
- // Run forkchoice
- if err := cfg.forkChoice.Engine().ForkChoiceUpdate(
- cfg.forkChoice.GetEth1Hash(finalizedCheckpoint.BlockRoot()),
- cfg.forkChoice.GetEth1Hash(headRoot),
- ); err != nil {
- log.Warn("Could not set forkchoice", "err", err)
- }
- }
- }
- }
- // Checks done, update all internals accordingly
- return highestSlotProcessed, libcommon.Hash{}, nil
- })
- maxBlockBehindBeforeDownload := int64(32)
- overtimeMargin := uint64(6) // how much time has passed before trying download the next block in seconds
- ctx := context.TODO()
- isDownloading := false
-MainLoop:
- for {
- targetSlot := utils.GetCurrentSlot(cfg.genesisCfg.GenesisTime, cfg.beaconCfg.SecondsPerSlot)
- overtime := utils.GetCurrentSlotOverTime(cfg.genesisCfg.GenesisTime, cfg.beaconCfg.SecondsPerSlot)
- seenSlot := cfg.forkChoice.HighestSeen()
- if targetSlot == seenSlot || (targetSlot == seenSlot+1 && overtime < overtimeMargin) {
- time.Sleep(time.Second)
- continue
- }
- peersCount, err := cfg.downloader.Peers()
- if err != nil {
- continue
- }
- waitWhenNotEnoughPeers := 5 * time.Second
- if !isDownloading {
- isDownloading = peersCount >= minPeersForSyncStart
- }
- if isDownloading {
- isDownloading = peersCount >= minPeersForDownload
- if !isDownloading {
- log.Debug("[Caplin] Lost too many peers", "have", peersCount, "needed", minPeersForDownload)
- }
- }
- if !isDownloading {
- log.Debug("[Caplin] Waiting For Peers", "have", peersCount, "needed", minPeersForSyncStart, "retryIn", waitWhenNotEnoughPeers)
- time.Sleep(waitWhenNotEnoughPeers)
- continue
- }
- highestSeen := cfg.forkChoice.HighestSeen()
- startDownloadSlot := highestSeen - uint64(maxBlockBehindBeforeDownload)
- // Detect underflow
- if startDownloadSlot > highestSeen {
- startDownloadSlot = 0
- }
-
- cfg.downloader.SetHighestProcessedRoot(libcommon.Hash{})
- cfg.downloader.SetHighestProcessedSlot(
- utils.Max64(startDownloadSlot, cfg.forkChoice.FinalizedSlot()))
-
- // Wait small time
- log.Debug("Caplin may have missed some slots, started downloading chain")
- // Process blocks until we reach our target
- for highestProcessed := cfg.downloader.GetHighestProcessedSlot(); utils.GetCurrentSlot(cfg.genesisCfg.GenesisTime, cfg.beaconCfg.SecondsPerSlot) > highestProcessed; highestProcessed = cfg.downloader.GetHighestProcessedSlot() {
- ctx, cancel := context.WithTimeout(ctx, 12*time.Second)
- cfg.downloader.RequestMore(ctx)
- cancel()
- peersCount, err = cfg.downloader.Peers()
- if err != nil {
- break
- }
- if utils.GetCurrentSlot(cfg.genesisCfg.GenesisTime, cfg.beaconCfg.SecondsPerSlot) == cfg.forkChoice.HighestSeen() {
- break
- }
- if peersCount < minPeersForDownload {
- continue MainLoop
- }
- }
- log.Debug("Finished catching up", "slot", cfg.downloader.GetHighestProcessedSlot())
- }
-}
-
-func onTickService(ctx context.Context, cfg StageForkChoiceCfg) {
- tickInterval := time.NewTicker(50 * time.Millisecond)
- for {
- select {
- case <-tickInterval.C:
- cfg.forkChoice.OnTick(uint64(time.Now().Unix()))
- case <-ctx.Done():
- return
- }
- }
-}
diff --git a/cl/phase1/stages/stage_history_download.go b/cl/phase1/stages/stage_history_download.go
new file mode 100644
index 00000000000..c65941b727b
--- /dev/null
+++ b/cl/phase1/stages/stage_history_download.go
@@ -0,0 +1,274 @@
+package stages
+
+import (
+ "context"
+ "fmt"
+ "sync/atomic"
+ "time"
+
+ "github.com/ledgerwatch/erigon-lib/kv/dbutils"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/etl"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/memdb"
+ "github.com/ledgerwatch/erigon/cl/antiquary"
+ "github.com/ledgerwatch/erigon/cl/persistence"
+ "github.com/ledgerwatch/erigon/cl/phase1/execution_client"
+ "github.com/ledgerwatch/erigon/cl/phase1/network"
+ "github.com/ledgerwatch/erigon/cl/utils"
+ "github.com/ledgerwatch/erigon/core/types"
+ "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks"
+
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/log/v3"
+)
+
+type StageHistoryReconstructionCfg struct {
+ genesisCfg *clparams.GenesisConfig
+ beaconCfg *clparams.BeaconChainConfig
+ downloader *network.BackwardBeaconDownloader
+ sn *freezeblocks.CaplinSnapshots
+ startingRoot libcommon.Hash
+ backfilling bool
+ waitForAllRoutines bool
+ startingSlot uint64
+ tmpdir string
+ db persistence.BeaconChainDatabase
+ indiciesDB kv.RwDB
+ engine execution_client.ExecutionEngine
+ antiquary *antiquary.Antiquary
+ logger log.Logger
+}
+
+const logIntervalTime = 30 * time.Second
+
+func StageHistoryReconstruction(downloader *network.BackwardBeaconDownloader, antiquary *antiquary.Antiquary, sn *freezeblocks.CaplinSnapshots, db persistence.BeaconChainDatabase, indiciesDB kv.RwDB, engine execution_client.ExecutionEngine, genesisCfg *clparams.GenesisConfig, beaconCfg *clparams.BeaconChainConfig, backfilling, waitForAllRoutines bool, startingRoot libcommon.Hash, startinSlot uint64, tmpdir string, logger log.Logger) StageHistoryReconstructionCfg {
+ return StageHistoryReconstructionCfg{
+ genesisCfg: genesisCfg,
+ beaconCfg: beaconCfg,
+ downloader: downloader,
+ startingRoot: startingRoot,
+ tmpdir: tmpdir,
+ startingSlot: startinSlot,
+ waitForAllRoutines: waitForAllRoutines,
+ logger: logger,
+ backfilling: backfilling,
+ indiciesDB: indiciesDB,
+ antiquary: antiquary,
+ db: db,
+ engine: engine,
+ sn: sn,
+ }
+}
+
+// SpawnStageBeaconsForward spawn the beacon forward stage
+func SpawnStageHistoryDownload(cfg StageHistoryReconstructionCfg, ctx context.Context, logger log.Logger) error {
+ // Wait for execution engine to be ready.
+ blockRoot := cfg.startingRoot
+ currentSlot := cfg.startingSlot
+
+ if !clparams.SupportBackfilling(cfg.beaconCfg.DepositNetworkID) {
+ cfg.backfilling = false // disable backfilling if not on a supported network
+ }
+ executionBlocksCollector := etl.NewCollector("HistoryDownload", cfg.tmpdir, etl.NewSortableBuffer(etl.BufferOptimalSize), logger)
+ defer executionBlocksCollector.Close()
+ executionBlocksCollector.LogLvl(log.LvlDebug)
+ // Start the procedure
+ logger.Info("Starting downloading History", "from", currentSlot)
+ // Setup slot and block root
+ cfg.downloader.SetSlotToDownload(currentSlot)
+ cfg.downloader.SetExpectedRoot(blockRoot)
+ foundLatestEth1ValidBlock := &atomic.Bool{}
+ foundLatestEth1ValidBlock.Store(false)
+ if cfg.engine == nil || !cfg.engine.SupportInsertion() {
+ foundLatestEth1ValidBlock.Store(true) // skip this if we are not using an engine supporting direct insertion
+ }
+
+ var currEth1Progress atomic.Int64
+
+ bytesReadInTotal := atomic.Uint64{}
+ // Set up onNewBlock callback
+ cfg.downloader.SetOnNewBlock(func(blk *cltypes.SignedBeaconBlock) (finished bool, err error) {
+ tx, err := cfg.indiciesDB.BeginRw(ctx)
+ if err != nil {
+ return false, err
+ }
+ defer tx.Rollback()
+ if blk.Version() >= clparams.BellatrixVersion {
+ currEth1Progress.Store(int64(blk.Block.Body.ExecutionPayload.BlockNumber))
+ }
+
+ destinationSlot := cfg.sn.SegmentsMax()
+ bytesReadInTotal.Add(uint64(blk.EncodingSizeSSZ()))
+
+ slot := blk.Block.Slot
+ if destinationSlot <= blk.Block.Slot {
+ if err := cfg.db.WriteBlock(ctx, tx, blk, true); err != nil {
+ return false, err
+ }
+ }
+ if !foundLatestEth1ValidBlock.Load() && blk.Version() >= clparams.BellatrixVersion {
+ payload := blk.Block.Body.ExecutionPayload
+ encodedPayload, err := payload.EncodeSSZ(nil)
+ if err != nil {
+ return false, fmt.Errorf("error encoding execution payload during download: %s", err)
+ }
+ // Use snappy compression that the temporary files do not take too much disk.
+ encodedPayload = utils.CompressSnappy(append(encodedPayload, byte(blk.Version())))
+ if err := executionBlocksCollector.Collect(dbutils.BlockBodyKey(payload.BlockNumber, payload.BlockHash), encodedPayload); err != nil {
+ return false, fmt.Errorf("error collecting execution payload during download: %s", err)
+ }
+ if currEth1Progress.Load()%100 == 0 {
+ return false, tx.Commit()
+ }
+
+ bodyChainHeader, err := cfg.engine.GetBodiesByHashes([]libcommon.Hash{payload.BlockHash})
+ if err != nil {
+ return false, fmt.Errorf("error retrieving whether execution payload is present: %s", err)
+ }
+ foundLatestEth1ValidBlock.Store(len(bodyChainHeader) > 0 || cfg.engine.FrozenBlocks() > payload.BlockNumber)
+ }
+ if blk.Version() <= clparams.AltairVersion {
+ foundLatestEth1ValidBlock.Store(true)
+ }
+
+ return foundLatestEth1ValidBlock.Load() && (!cfg.backfilling || slot <= destinationSlot), tx.Commit()
+ })
+ prevProgress := cfg.downloader.Progress()
+
+ finishCh := make(chan struct{})
+ // Start logging thread
+
+ go func() {
+ logInterval := time.NewTicker(logIntervalTime)
+ defer logInterval.Stop()
+ for {
+ select {
+ case <-logInterval.C:
+ logTime := logIntervalTime
+ // if we found the latest valid hash extend ticker to 10 times the normal amout
+ if foundLatestEth1ValidBlock.Load() {
+ logTime = 20 * logIntervalTime
+ logInterval.Reset(logTime)
+ }
+
+ if cfg.engine != nil && cfg.engine.SupportInsertion() {
+ if ready, err := cfg.engine.Ready(); !ready {
+ if err != nil {
+ log.Warn("could not log progress", "err", err)
+ }
+ continue
+ }
+
+ }
+ logArgs := []interface{}{}
+ currProgress := cfg.downloader.Progress()
+ blockProgress := float64(prevProgress - currProgress)
+ ratio := float64(logTime / time.Second)
+ speed := blockProgress / ratio
+ prevProgress = currProgress
+ peerCount, err := cfg.downloader.Peers()
+ if err != nil {
+ return
+ }
+ logArgs = append(logArgs,
+ "slot", currProgress,
+ "blockNumber", currEth1Progress.Load(),
+ "blk/sec", fmt.Sprintf("%.1f", speed),
+ "mbps/sec", fmt.Sprintf("%.4f", float64(bytesReadInTotal.Load())/(1000*1000*ratio)),
+ "peers", peerCount,
+ "snapshots", cfg.sn.SegmentsMax(),
+ "reconnected", foundLatestEth1ValidBlock.Load(),
+ )
+ bytesReadInTotal.Store(0)
+ logger.Info("Downloading History", logArgs...)
+ case <-finishCh:
+ return
+ case <-ctx.Done():
+ }
+ }
+ }()
+
+ go func() {
+ for !cfg.downloader.Finished() {
+ if err := cfg.downloader.RequestMore(ctx); err != nil {
+ log.Debug("closing backfilling routine", "err", err)
+ return
+ }
+ }
+ cfg.antiquary.NotifyBackfilled()
+ log.Info("Backfilling finished")
+
+ close(finishCh)
+ }()
+ // Lets wait for the latestValidHash to be turned on
+ for !foundLatestEth1ValidBlock.Load() || (cfg.waitForAllRoutines && !cfg.downloader.Finished()) {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-time.After(5 * time.Second):
+ }
+ }
+ cfg.downloader.SetThrottle(600 * time.Millisecond) // throttle to 0.6 second for backfilling
+ cfg.downloader.SetNeverSkip(false)
+ // If i do not give it a database, erigon lib starts to cry uncontrollably
+ db2 := memdb.New(cfg.tmpdir)
+ defer db2.Close()
+ tx2, err := db2.BeginRw(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx2.Rollback()
+
+ blockBatch := []*types.Block{}
+ blockBatchMaxSize := 1000
+
+ cfg.logger.Info("Ready to insert history, waiting for sync cycle to finish")
+
+ if err := executionBlocksCollector.Load(tx2, kv.Headers, func(k, vComp []byte, _ etl.CurrentTableReader, next etl.LoadNextFunc) error {
+ if cfg.engine == nil || !cfg.engine.SupportInsertion() {
+ return next(k, nil, nil)
+ }
+ var err error
+ var v []byte
+ if v, err = utils.DecompressSnappy(vComp); err != nil {
+ return fmt.Errorf("error decompressing dump during collection: %s", err)
+ }
+
+ version := clparams.StateVersion(v[len(v)-1])
+ executionPayload := cltypes.NewEth1Block(version, cfg.beaconCfg)
+ if err := executionPayload.DecodeSSZ(v[:len(v)-1], int(version)); err != nil {
+ return fmt.Errorf("error decoding execution payload during collection: %s", err)
+ }
+ body := executionPayload.Body()
+ header, err := executionPayload.RlpHeader()
+ if err != nil {
+ return fmt.Errorf("error parsing rlp header during collection: %s", err)
+ }
+
+ txs, err := types.DecodeTransactions(body.Transactions)
+ if err != nil {
+ return err
+ }
+
+ block := types.NewBlockFromStorage(executionPayload.BlockHash, header, txs, nil, body.Withdrawals)
+ blockBatch = append(blockBatch, block)
+ if len(blockBatch) >= blockBatchMaxSize {
+ if err := cfg.engine.InsertBlocks(blockBatch); err != nil {
+ return fmt.Errorf("error inserting block during collection: %s", err)
+ }
+ blockBatch = blockBatch[:0]
+ }
+ return next(k, nil, nil)
+ }, etl.TransformArgs{Quit: ctx.Done()}); err != nil {
+ return err
+ }
+ if cfg.engine != nil && cfg.engine.SupportInsertion() {
+ if err := cfg.engine.InsertBlocks(blockBatch); err != nil {
+ return fmt.Errorf("error doing last block insertion during collection: %s", err)
+ }
+ }
+ return nil
+}
diff --git a/cl/phase1/stages/stage_history_reconstruction.go b/cl/phase1/stages/stage_history_reconstruction.go
deleted file mode 100644
index 9797227718c..00000000000
--- a/cl/phase1/stages/stage_history_reconstruction.go
+++ /dev/null
@@ -1,220 +0,0 @@
-package stages
-
-import (
- "context"
- "fmt"
- "time"
-
- rawdb2 "github.com/ledgerwatch/erigon/cl/phase1/core/rawdb"
- "github.com/ledgerwatch/erigon/cl/phase1/core/state"
- execution_client2 "github.com/ledgerwatch/erigon/cl/phase1/execution_client"
- "github.com/ledgerwatch/erigon/cl/phase1/network"
- "github.com/ledgerwatch/erigon/cl/utils"
-
- "github.com/ledgerwatch/erigon-lib/etl"
- "github.com/ledgerwatch/erigon-lib/kv"
- "github.com/ledgerwatch/erigon/cl/clparams"
- "github.com/ledgerwatch/erigon/cl/cltypes"
- "github.com/ledgerwatch/erigon/eth/stagedsync"
- "github.com/ledgerwatch/log/v3"
-)
-
-type StageHistoryReconstructionCfg struct {
- db kv.RwDB
- genesisCfg *clparams.GenesisConfig
- beaconCfg *clparams.BeaconChainConfig
- downloader *network.BackwardBeaconDownloader
- state *state.CachingBeaconState
- executionClient *execution_client2.ExecutionClient
- beaconDBCfg *rawdb2.BeaconDataConfig
- tmpdir string
-}
-
-const logIntervalTime = 30 * time.Second
-
-func StageHistoryReconstruction(db kv.RwDB, downloader *network.BackwardBeaconDownloader, genesisCfg *clparams.GenesisConfig, beaconCfg *clparams.BeaconChainConfig, beaconDBCfg *rawdb2.BeaconDataConfig, state *state.CachingBeaconState, tmpdir string, executionClient *execution_client2.ExecutionClient) StageHistoryReconstructionCfg {
- return StageHistoryReconstructionCfg{
- db: db,
- genesisCfg: genesisCfg,
- beaconCfg: beaconCfg,
- downloader: downloader,
- state: state,
- tmpdir: tmpdir,
- executionClient: executionClient,
- beaconDBCfg: beaconDBCfg,
- }
-}
-
-// SpawnStageBeaconsForward spawn the beacon forward stage
-func SpawnStageHistoryReconstruction(cfg StageHistoryReconstructionCfg, s *stagedsync.StageState, tx kv.RwTx, ctx context.Context, logger log.Logger) error {
- // This stage must be done only once.
- progress := s.BlockNumber
- if progress != 0 {
- return nil
- }
-
- useExternalTx := tx != nil
- var err error
- if !useExternalTx {
- tx, err = cfg.db.BeginRw(ctx)
- if err != nil {
- return err
- }
- defer tx.Rollback()
- }
- blockRoot, err := cfg.state.BlockRoot()
- if err != nil {
- return err
- }
- destinationSlot := uint64(0)
- currentSlot := cfg.state.LatestBlockHeader().Slot
- if currentSlot > cfg.beaconDBCfg.BackFillingAmount {
- destinationSlot = currentSlot - cfg.beaconDBCfg.BackFillingAmount
- }
-
- // ETL collectors for attestations + beacon blocks
- beaconBlocksCollector := etl.NewCollector(s.LogPrefix(), cfg.tmpdir, etl.NewSortableBuffer(etl.BufferOptimalSize), logger)
- defer beaconBlocksCollector.Close()
- attestationsCollector := etl.NewCollector(s.LogPrefix(), cfg.tmpdir, etl.NewSortableBuffer(etl.BufferOptimalSize), logger)
- defer attestationsCollector.Close()
- executionPayloadsCollector := etl.NewCollector(s.LogPrefix(), cfg.tmpdir, etl.NewSortableBuffer(etl.BufferOptimalSize), logger)
- defer executionPayloadsCollector.Close()
- // Indexes collector
- rootToSlotCollector := etl.NewCollector(s.LogPrefix(), cfg.tmpdir, etl.NewSortableBuffer(etl.BufferOptimalSize), logger)
- defer rootToSlotCollector.Close()
- // Lastly finalizations markers collector.
- finalizationCollector := etl.NewCollector(s.LogPrefix(), cfg.tmpdir, etl.NewSortableBuffer(etl.BufferOptimalSize), logger)
- defer finalizationCollector.Close()
- // Start the procedure
- logger.Info(fmt.Sprintf("[%s] Reconstructing", s.LogPrefix()), "from", cfg.state.LatestBlockHeader().Slot, "to", destinationSlot)
- // Setup slot and block root
- cfg.downloader.SetSlotToDownload(currentSlot)
- cfg.downloader.SetExpectedRoot(blockRoot)
- foundLatestEth1ValidHash := false
- if cfg.executionClient == nil {
- foundLatestEth1ValidHash = true
- }
- // Set up onNewBlock callback
- cfg.downloader.SetOnNewBlock(func(blk *cltypes.SignedBeaconBlock) (finished bool, err error) {
- slot := blk.Block.Slot
- blockRoot, err := blk.Block.HashSSZ()
- if err != nil {
- return false, err
- }
- key := append(rawdb2.EncodeNumber(slot), blockRoot[:]...)
- // Collect beacon blocks
- encodedBeaconBlock, err := blk.EncodeSSZ(nil)
- if err != nil {
- return false, err
- }
- slotBytes := rawdb2.EncodeNumber(slot)
- if err := beaconBlocksCollector.Collect(key, utils.CompressSnappy(encodedBeaconBlock)); err != nil {
- return false, err
- }
- // Collect hashes
- if err := rootToSlotCollector.Collect(blockRoot[:], slotBytes); err != nil {
- return false, err
- }
- if err := rootToSlotCollector.Collect(blk.Block.StateRoot[:], slotBytes); err != nil {
- return false, err
- }
- // Mark finalization markers.
- if err := finalizationCollector.Collect(slotBytes, blockRoot[:]); err != nil {
- return false, err
- }
- // Collect Execution Payloads
- if blk.Version() >= clparams.BellatrixVersion && !foundLatestEth1ValidHash {
- payload := blk.Block.Body.ExecutionPayload
- if foundLatestEth1ValidHash, err = cfg.executionClient.IsCanonical(payload.BlockHash); err != nil {
- return false, err
- }
- if foundLatestEth1ValidHash {
- return slot <= destinationSlot, nil
- }
- encodedPayload := make([]byte, 0, payload.EncodingSizeSSZ())
- encodedPayload, err = payload.EncodeSSZ(encodedPayload)
- if err != nil {
- return false, err
- }
- if err := executionPayloadsCollector.Collect(rawdb2.EncodeNumber(slot), encodedPayload); err != nil {
- return false, err
- }
- }
- return slot <= destinationSlot && foundLatestEth1ValidHash, nil
- })
- prevProgress := cfg.downloader.Progress()
-
- logInterval := time.NewTicker(logIntervalTime)
- finishCh := make(chan struct{})
- // Start logging thread
- go func() {
- for {
- select {
- case <-logInterval.C:
- logArgs := []interface{}{}
- currProgress := cfg.downloader.Progress()
- speed := float64(prevProgress-currProgress) / float64(logIntervalTime/time.Second)
- prevProgress = currProgress
- peerCount, err := cfg.downloader.Peers()
- if err != nil {
- return
- }
- logArgs = append(logArgs,
- "progress", currProgress,
- "blk/sec", fmt.Sprintf("%.1f", speed),
- "peers", peerCount)
- if currentSlot > destinationSlot {
- logArgs = append(logArgs, "remaining", currProgress-destinationSlot)
- }
- logger.Info(fmt.Sprintf("[%s] Backwards downloading phase", s.LogPrefix()), logArgs...)
- case <-finishCh:
- return
- case <-ctx.Done():
-
- }
- }
- }()
- for !cfg.downloader.Finished() {
- cfg.downloader.RequestMore(ctx)
- }
- close(finishCh)
- if err := attestationsCollector.Load(tx, kv.Attestetations, etl.IdentityLoadFunc, etl.TransformArgs{Quit: context.Background().Done()}); err != nil {
- return err
- }
- if err := beaconBlocksCollector.Load(tx, kv.BeaconBlocks, etl.IdentityLoadFunc, etl.TransformArgs{Quit: context.Background().Done()}); err != nil {
- return err
- }
- if err := rootToSlotCollector.Load(tx, kv.RootSlotIndex, etl.IdentityLoadFunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil {
- return err
- }
- if err := finalizationCollector.Load(tx, kv.FinalizedBlockRoots, etl.IdentityLoadFunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil {
- return err
- }
- executionPayloadInsertionBatch := execution_client2.NewInsertBatch(cfg.executionClient)
- // Send in ordered manner EL blocks to Execution Layer
- if err := executionPayloadsCollector.Load(tx, kv.BeaconBlocks, func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error {
- payload := &cltypes.Eth1Block{}
- if err := payload.DecodeSSZ(v, int(clparams.BellatrixVersion)); err != nil {
- return err
- }
- if err := executionPayloadInsertionBatch.WriteExecutionPayload(payload); err != nil {
- return err
- }
- return next(k, nil, nil)
- }, etl.TransformArgs{Quit: context.Background().Done()}); err != nil {
- return err
- }
- if err := executionPayloadInsertionBatch.Flush(); err != nil {
- return err
- }
- if err := s.Update(tx, 1); err != nil {
- return err
- }
-
- if !useExternalTx {
- if err = tx.Commit(); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/cl/phase1/stages/stages.go b/cl/phase1/stages/stages.go
deleted file mode 100644
index 8829a57aca7..00000000000
--- a/cl/phase1/stages/stages.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package stages
-
-import (
- "context"
-
- "github.com/ledgerwatch/erigon/cl/phase1/core/rawdb"
- "github.com/ledgerwatch/erigon/cl/phase1/core/state"
- "github.com/ledgerwatch/erigon/cl/phase1/execution_client"
- "github.com/ledgerwatch/erigon/cl/phase1/forkchoice"
- network2 "github.com/ledgerwatch/erigon/cl/phase1/network"
-
- "github.com/ledgerwatch/erigon-lib/kv"
- "github.com/ledgerwatch/erigon/cl/clparams"
- "github.com/ledgerwatch/erigon/eth/stagedsync"
- "github.com/ledgerwatch/erigon/eth/stagedsync/stages"
- "github.com/ledgerwatch/log/v3"
-)
-
-// StateStages are all stages necessary for basic unwind and stage computation, it is primarly used to process side forks and memory execution.
-func ConsensusStages(ctx context.Context, historyReconstruction StageHistoryReconstructionCfg, beaconState StageBeaconStateCfg, forkchoice StageForkChoiceCfg) []*stagedsync.Stage {
- return []*stagedsync.Stage{
- {
- ID: stages.BeaconHistoryReconstruction,
- Description: "Download beacon blocks backwards.",
- Forward: func(firstCycle bool, badBlockUnwind bool, s *stagedsync.StageState, u stagedsync.Unwinder, tx kv.RwTx, logger log.Logger) error {
- return SpawnStageHistoryReconstruction(historyReconstruction, s, tx, ctx, logger)
- },
- Unwind: func(firstCycle bool, u *stagedsync.UnwindState, s *stagedsync.StageState, tx kv.RwTx, logger log.Logger) error {
- return nil
- },
- },
- {
- ID: stages.BeaconState,
- Description: "Execute Consensus Layer transition",
- Forward: func(firstCycle bool, badBlockUnwind bool, s *stagedsync.StageState, u stagedsync.Unwinder, tx kv.RwTx, logger log.Logger) error {
- return SpawnStageBeaconState(beaconState, tx, ctx)
- },
- Unwind: func(firstCycle bool, u *stagedsync.UnwindState, s *stagedsync.StageState, tx kv.RwTx, logger log.Logger) error {
- return nil
- },
- },
- {
- ID: stages.BeaconBlocks,
- Description: "Download beacon blocks forward.",
- Forward: func(firstCycle bool, badBlockUnwind bool, s *stagedsync.StageState, u stagedsync.Unwinder, tx kv.RwTx, logger log.Logger) error {
- return SpawnStageForkChoice(forkchoice, s, tx, ctx)
- },
- Unwind: func(firstCycle bool, u *stagedsync.UnwindState, s *stagedsync.StageState, tx kv.RwTx, logger log.Logger) error {
- return nil
- },
- },
- }
-}
-
-var ConsensusUnwindOrder = stagedsync.UnwindOrder{
- stages.BeaconState,
- stages.BeaconBlocks,
-}
-
-var ConsensusPruneOrder = stagedsync.PruneOrder{
- stages.BeaconState,
- stages.BeaconBlocks,
-}
-
-func NewConsensusStagedSync(ctx context.Context,
- db kv.RwDB,
- forwardDownloader *network2.ForwardBeaconDownloader,
- backwardDownloader *network2.BackwardBeaconDownloader,
- genesisCfg *clparams.GenesisConfig,
- beaconCfg *clparams.BeaconChainConfig,
- state *state.CachingBeaconState,
- tmpdir string,
- executionClient *execution_client.ExecutionClient,
- beaconDBCfg *rawdb.BeaconDataConfig,
- gossipManager *network2.GossipManager,
- forkChoice *forkchoice.ForkChoiceStore,
- logger log.Logger,
-) (*stagedsync.Sync, error) {
- return stagedsync.New(
- ConsensusStages(
- ctx,
- StageHistoryReconstruction(db, backwardDownloader, genesisCfg, beaconCfg, beaconDBCfg, state, tmpdir, executionClient),
- StageBeaconState(db, beaconCfg, state, executionClient),
- StageForkChoice(db, forwardDownloader, genesisCfg, beaconCfg, state, executionClient, gossipManager, forkChoice, nil),
- ),
- ConsensusUnwindOrder,
- ConsensusPruneOrder,
- logger,
- ), nil
-}
diff --git a/cl/phase1/stages/stages_beacon_state.go b/cl/phase1/stages/stages_beacon_state.go
deleted file mode 100644
index 638273a5c29..00000000000
--- a/cl/phase1/stages/stages_beacon_state.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package stages
-
-import (
- "context"
- "github.com/ledgerwatch/erigon/cl/transition"
-
- "github.com/ledgerwatch/erigon/cl/phase1/core/rawdb"
- state2 "github.com/ledgerwatch/erigon/cl/phase1/core/state"
- "github.com/ledgerwatch/erigon/cl/phase1/execution_client"
-
- libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon-lib/kv"
- "github.com/ledgerwatch/erigon/cl/clparams"
- "github.com/ledgerwatch/erigon/cl/utils"
- "github.com/ledgerwatch/erigon/eth/stagedsync/stages"
- "github.com/ledgerwatch/log/v3"
-)
-
-type StageBeaconStateCfg struct {
- db kv.RwDB
- beaconCfg *clparams.BeaconChainConfig
- state *state2.CachingBeaconState
- executionClient *execution_client.ExecutionClient
- enabled bool
-}
-
-func StageBeaconState(db kv.RwDB,
- beaconCfg *clparams.BeaconChainConfig, state *state2.CachingBeaconState, executionClient *execution_client.ExecutionClient) StageBeaconStateCfg {
- return StageBeaconStateCfg{
- db: db,
- beaconCfg: beaconCfg,
- state: state,
- executionClient: executionClient,
- enabled: false,
- }
-}
-
-// SpawnStageBeaconState is used to replay historical states
-func SpawnStageBeaconState(cfg StageBeaconStateCfg, tx kv.RwTx, ctx context.Context) error {
- if !cfg.enabled {
- return nil
- }
- // This code need to be fixed.
- useExternalTx := tx != nil
- var err error
- if !useExternalTx {
- tx, err = cfg.db.BeginRw(ctx)
- if err != nil {
- return err
- }
- defer tx.Rollback()
- }
-
- endSlot, err := stages.GetStageProgress(tx, stages.BeaconBlocks)
- if err != nil {
- return err
- }
- latestBlockHeader := cfg.state.LatestBlockHeader()
-
- fromSlot := latestBlockHeader.Slot
- for slot := fromSlot + 1; slot <= endSlot; slot++ {
- finalizedRoot, err := rawdb.ReadFinalizedBlockRoot(tx, slot)
- if err != nil {
- return err
- }
- // Slot had a missing proposal in this case.
- if finalizedRoot == (libcommon.Hash{}) {
- continue
- }
- // TODO(Giulio2002): proper versioning
- block, eth1Number, eth1Hash, err := rawdb.ReadBeaconBlock(tx, finalizedRoot, slot, clparams.Phase0Version)
- if err != nil {
- return err
- }
-
- // Query execution engine only if the payload have an hash.
- if eth1Hash != (libcommon.Hash{}) {
- if block.Block.Body.ExecutionPayload, err = cfg.executionClient.ReadExecutionPayload(eth1Number, eth1Hash); err != nil {
- return err
- }
- }
- // validate fully only in current epoch.
- fullValidate := utils.GetCurrentEpoch(cfg.state.GenesisTime(), cfg.beaconCfg.SecondsPerSlot, cfg.beaconCfg.SlotsPerEpoch) == state2.Epoch(cfg.state.BeaconState)
- if err := transition.TransitionState(cfg.state, block, fullValidate); err != nil {
- log.Info("Found epoch, so stopping now...", "count", slot-(fromSlot+1), "slot", slot)
- return err
- }
- log.Info("Applied state transition", "from", slot, "to", slot+1)
- }
-
- log.Info("[CachingBeaconState] Finished transitioning state", "from", fromSlot, "to", endSlot)
- if !useExternalTx {
- if err = tx.Commit(); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/cl/pool/operation_pool.go b/cl/pool/operation_pool.go
new file mode 100644
index 00000000000..44962135584
--- /dev/null
+++ b/cl/pool/operation_pool.go
@@ -0,0 +1,35 @@
+package pool
+
+import (
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state/lru"
+)
+
+var operationsMultiplier = 20 // Cap the amount of cached element to max_operations_per_block * operations_multiplier
+
+type OperationPool[K comparable, T any] struct {
+ pool *lru.Cache[K, T] // Map the Signature to the underlying object
+}
+
+func NewOperationPool[K comparable, T any](maxOperationsPerBlock int, matricName string) *OperationPool[K, T] {
+ pool, err := lru.New[K, T](matricName, maxOperationsPerBlock*operationsMultiplier)
+ if err != nil {
+ panic(err)
+ }
+ return &OperationPool[K, T]{pool: pool}
+}
+
+func (o *OperationPool[K, T]) Insert(k K, operation T) {
+ o.pool.Add(k, operation)
+}
+
+func (o *OperationPool[K, T]) DeleteIfExist(k K) (removed bool) {
+ return o.pool.Remove(k)
+}
+
+func (o *OperationPool[K, T]) Has(k K) (hash bool) {
+ return o.pool.Contains(k)
+}
+
+func (o *OperationPool[K, T]) Raw() []T {
+ return o.pool.Values()
+}
diff --git a/cl/pool/operations_pool.go b/cl/pool/operations_pool.go
new file mode 100644
index 00000000000..949e6fda237
--- /dev/null
+++ b/cl/pool/operations_pool.go
@@ -0,0 +1,62 @@
+package pool
+
+import (
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/crypto/blake2b"
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/cltypes/solid"
+)
+
+// DoubleSignatureKey uses blake2b algorithm to merge two signatures together. blake2 is faster than sha3.
+func doubleSignatureKey(one, two libcommon.Bytes96) (out libcommon.Bytes96) {
+ res := blake2b.Sum256(append(one[:], two[:]...))
+ copy(out[:], res[:])
+ return
+}
+
+func ComputeKeyForProposerSlashing(slashing *cltypes.ProposerSlashing) libcommon.Bytes96 {
+ return doubleSignatureKey(slashing.Header1.Signature, slashing.Header2.Signature)
+}
+
+func ComputeKeyForAttesterSlashing(slashing *cltypes.AttesterSlashing) libcommon.Bytes96 {
+ return doubleSignatureKey(slashing.Attestation_1.Signature, slashing.Attestation_2.Signature)
+}
+
+// OperationsPool is the collection of all gossip-collectable operations.
+type OperationsPool struct {
+ AttestationsPool *OperationPool[libcommon.Bytes96, *solid.Attestation]
+ AttesterSlashingsPool *OperationPool[libcommon.Bytes96, *cltypes.AttesterSlashing]
+ ProposerSlashingsPool *OperationPool[libcommon.Bytes96, *cltypes.ProposerSlashing]
+ BLSToExecutionChangesPool *OperationPool[libcommon.Bytes96, *cltypes.SignedBLSToExecutionChange]
+ VoluntaryExistsPool *OperationPool[uint64, *cltypes.SignedVoluntaryExit]
+}
+
+func NewOperationsPool(beaconCfg *clparams.BeaconChainConfig) OperationsPool {
+ return OperationsPool{
+ AttestationsPool: NewOperationPool[libcommon.Bytes96, *solid.Attestation](int(beaconCfg.MaxAttestations), "attestationsPool"),
+ AttesterSlashingsPool: NewOperationPool[libcommon.Bytes96, *cltypes.AttesterSlashing](int(beaconCfg.MaxAttestations), "attesterSlashingsPool"),
+ ProposerSlashingsPool: NewOperationPool[libcommon.Bytes96, *cltypes.ProposerSlashing](int(beaconCfg.MaxAttestations), "proposerSlashingsPool"),
+ BLSToExecutionChangesPool: NewOperationPool[libcommon.Bytes96, *cltypes.SignedBLSToExecutionChange](int(beaconCfg.MaxBlsToExecutionChanges), "blsExecutionChangesPool"),
+ VoluntaryExistsPool: NewOperationPool[uint64, *cltypes.SignedVoluntaryExit](int(beaconCfg.MaxBlsToExecutionChanges), "voluntaryExitsPool"),
+ }
+}
+
+func (o *OperationsPool) NotifyBlock(blk *cltypes.BeaconBlock) {
+ blk.Body.VoluntaryExits.Range(func(_ int, exit *cltypes.SignedVoluntaryExit, _ int) bool {
+ o.VoluntaryExistsPool.DeleteIfExist(exit.VoluntaryExit.ValidatorIndex)
+ return true
+ })
+ blk.Body.AttesterSlashings.Range(func(_ int, att *cltypes.AttesterSlashing, _ int) bool {
+ o.AttesterSlashingsPool.DeleteIfExist(ComputeKeyForAttesterSlashing(att))
+ return true
+ })
+ blk.Body.ProposerSlashings.Range(func(_ int, ps *cltypes.ProposerSlashing, _ int) bool {
+ o.ProposerSlashingsPool.DeleteIfExist(ComputeKeyForProposerSlashing(ps))
+ return true
+ })
+ blk.Body.ExecutionChanges.Range(func(_ int, c *cltypes.SignedBLSToExecutionChange, _ int) bool {
+ o.BLSToExecutionChangesPool.DeleteIfExist(c.Signature)
+ return true
+ })
+}
diff --git a/cl/pool/operations_pool_test.go b/cl/pool/operations_pool_test.go
new file mode 100644
index 00000000000..865df13df65
--- /dev/null
+++ b/cl/pool/operations_pool_test.go
@@ -0,0 +1,71 @@
+package pool
+
+import (
+ "testing"
+
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/cltypes/solid"
+ "github.com/stretchr/testify/require"
+)
+
+func TestOperationsPool(t *testing.T) {
+ pools := NewOperationsPool(&clparams.MainnetBeaconConfig)
+
+ // AttestationsPool
+ pools.AttestationsPool.Insert([96]byte{}, &solid.Attestation{})
+ pools.AttestationsPool.Insert([96]byte{1}, &solid.Attestation{})
+ require.Equal(t, 2, len(pools.AttestationsPool.Raw()))
+ require.True(t, pools.AttestationsPool.DeleteIfExist([96]byte{}))
+ require.Equal(t, 1, len(pools.AttestationsPool.Raw()))
+ // ProposerSlashingsPool
+ slashing1 := &cltypes.ProposerSlashing{
+ Header1: &cltypes.SignedBeaconBlockHeader{
+ Signature: [96]byte{1},
+ },
+ Header2: &cltypes.SignedBeaconBlockHeader{
+ Signature: [96]byte{2},
+ },
+ }
+ slashing2 := &cltypes.ProposerSlashing{
+ Header1: &cltypes.SignedBeaconBlockHeader{
+ Signature: [96]byte{3},
+ },
+ Header2: &cltypes.SignedBeaconBlockHeader{
+ Signature: [96]byte{4},
+ },
+ }
+ pools.ProposerSlashingsPool.Insert(ComputeKeyForProposerSlashing(slashing1), slashing1)
+ pools.ProposerSlashingsPool.Insert(ComputeKeyForProposerSlashing(slashing2), slashing2)
+ require.True(t, pools.ProposerSlashingsPool.DeleteIfExist(ComputeKeyForProposerSlashing(slashing2)))
+ // AttesterSlashingsPool
+ attesterSlashing1 := &cltypes.AttesterSlashing{
+ Attestation_1: &cltypes.IndexedAttestation{
+ Signature: [96]byte{1},
+ },
+ Attestation_2: &cltypes.IndexedAttestation{
+ Signature: [96]byte{2},
+ },
+ }
+ attesterSlashing2 := &cltypes.AttesterSlashing{
+ Attestation_1: &cltypes.IndexedAttestation{
+ Signature: [96]byte{3},
+ },
+ Attestation_2: &cltypes.IndexedAttestation{
+ Signature: [96]byte{4},
+ },
+ }
+ pools.AttesterSlashingsPool.Insert(ComputeKeyForAttesterSlashing(attesterSlashing1), attesterSlashing1)
+ pools.AttesterSlashingsPool.Insert(ComputeKeyForAttesterSlashing(attesterSlashing2), attesterSlashing2)
+ require.True(t, pools.AttesterSlashingsPool.DeleteIfExist(ComputeKeyForAttesterSlashing(attesterSlashing2)))
+ require.Equal(t, 1, len(pools.AttesterSlashingsPool.Raw()))
+
+ // BLSToExecutionChangesPool
+ pools.BLSToExecutionChangesPool.Insert([96]byte{}, &cltypes.SignedBLSToExecutionChange{})
+ pools.BLSToExecutionChangesPool.Insert([96]byte{1}, &cltypes.SignedBLSToExecutionChange{})
+ require.Equal(t, 2, len(pools.BLSToExecutionChangesPool.Raw()))
+ require.True(t, pools.BLSToExecutionChangesPool.DeleteIfExist([96]byte{}))
+ require.Equal(t, 1, len(pools.BLSToExecutionChangesPool.Raw()))
+
+ require.Equal(t, 1, len(pools.ProposerSlashingsPool.Raw()))
+}
diff --git a/cl/readme.md b/cl/readme.md
new file mode 100644
index 00000000000..e62ff8ef5c7
--- /dev/null
+++ b/cl/readme.md
@@ -0,0 +1,3 @@
+# cl
+
+all code under this directory and subdirectories falls under apache 2.0 license, seen in ./LICENSE
diff --git a/cl/rpc/rpc.go b/cl/rpc/rpc.go
index a2ff32f75e0..0ada88e8115 100644
--- a/cl/rpc/rpc.go
+++ b/cl/rpc/rpc.go
@@ -8,6 +8,9 @@ import (
"io"
"time"
+ "github.com/ledgerwatch/erigon/cl/sentinel/communication"
+ "github.com/ledgerwatch/erigon/cl/sentinel/communication/ssz_snappy"
+
"github.com/c2h5oh/datasize"
"github.com/golang/snappy"
libcommon "github.com/ledgerwatch/erigon-lib/common"
@@ -21,9 +24,6 @@ import (
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
"github.com/ledgerwatch/erigon/cl/fork"
"github.com/ledgerwatch/erigon/cl/utils"
- "github.com/ledgerwatch/erigon/cmd/sentinel/sentinel/communication"
- "github.com/ledgerwatch/erigon/cmd/sentinel/sentinel/communication/ssz_snappy"
- "github.com/ledgerwatch/erigon/common"
)
const maxMessageLength = 18 * datasize.MB
@@ -55,7 +55,7 @@ func (b *BeaconRpcP2P) sendBlocksRequest(ctx context.Context, topic string, reqD
// Prepare output slice.
responsePacket := []*cltypes.SignedBeaconBlock{}
- ctx, cn := context.WithTimeout(ctx, time.Second*time.Duration(5+10*count))
+ ctx, cn := context.WithTimeout(ctx, time.Second*time.Duration(16+30*count))
defer cn()
message, err := b.sentinel.SendRequest(ctx, &sentinel.RequestData{
Data: reqData,
@@ -67,7 +67,7 @@ func (b *BeaconRpcP2P) sendBlocksRequest(ctx context.Context, topic string, reqD
if message.Error {
rd := snappy.NewReader(bytes.NewBuffer(message.Data))
errBytes, _ := io.ReadAll(rd)
- log.Debug("received range req error", "err", string(errBytes))
+ log.Trace("received range req error", "err", string(errBytes), "raw", string(message.Data))
return nil, message.Peer.Pid, nil
}
@@ -84,7 +84,7 @@ func (b *BeaconRpcP2P) sendBlocksRequest(ctx context.Context, topic string, reqD
// Read varint for length of message.
encodedLn, _, err := ssz_snappy.ReadUvarint(r)
if err != nil {
- return nil, message.Peer.Pid, fmt.Errorf("unable to read varint from message prefix: %v", err)
+ return nil, message.Peer.Pid, fmt.Errorf("unable to read varint from message prefix: %w", err)
}
// Sanity check for message size.
if encodedLn > uint64(maxMessageLength) {
@@ -112,7 +112,7 @@ func (b *BeaconRpcP2P) sendBlocksRequest(ctx context.Context, topic string, reqD
if err != nil {
return nil, message.Peer.Pid, err
}
- responseChunk := &cltypes.SignedBeaconBlock{}
+ responseChunk := cltypes.NewSignedBeaconBlock(b.beaconConfig)
if err = responseChunk.DecodeSSZ(raw, int(version)); err != nil {
return nil, message.Peer.Pid, err
@@ -137,13 +137,13 @@ func (b *BeaconRpcP2P) SendBeaconBlocksByRangeReq(ctx context.Context, start, co
return nil, "", err
}
- data := common.CopyBytes(buffer.Bytes())
+ data := libcommon.CopyBytes(buffer.Bytes())
return b.sendBlocksRequest(ctx, communication.BeaconBlocksByRangeProtocolV2, data, count)
}
// SendBeaconBlocksByRootReq retrieves blocks by root from beacon chain.
func (b *BeaconRpcP2P) SendBeaconBlocksByRootReq(ctx context.Context, roots [][32]byte) ([]*cltypes.SignedBeaconBlock, string, error) {
- var req solid.HashListSSZ = solid.NewHashList(69696969)
+ var req solid.HashListSSZ = solid.NewHashList(69696969) // The number is used for hashing, it is innofensive here.
for _, root := range roots {
req.Append(root)
}
@@ -151,7 +151,7 @@ func (b *BeaconRpcP2P) SendBeaconBlocksByRootReq(ctx context.Context, roots [][3
if err := ssz_snappy.EncodeAndWrite(&buffer, req); err != nil {
return nil, "", err
}
- data := common.CopyBytes(buffer.Bytes())
+ data := libcommon.CopyBytes(buffer.Bytes())
return b.sendBlocksRequest(ctx, communication.BeaconBlocksByRootProtocolV2, data, uint64(len(roots)))
}
diff --git a/cmd/sentinel/sentinel/communication/ssz_snappy/encoding.go b/cl/sentinel/communication/ssz_snappy/encoding.go
similarity index 97%
rename from cmd/sentinel/sentinel/communication/ssz_snappy/encoding.go
rename to cl/sentinel/communication/ssz_snappy/encoding.go
index 65c38652e42..6f460b3599d 100644
--- a/cmd/sentinel/sentinel/communication/ssz_snappy/encoding.go
+++ b/cl/sentinel/communication/ssz_snappy/encoding.go
@@ -36,11 +36,18 @@ var writerPool = sync.Pool{
}
func EncodeAndWrite(w io.Writer, val ssz.Marshaler, prefix ...byte) error {
+ enc := make([]byte, 0, val.EncodingSizeSSZ())
+ var err error
+ enc, err = val.EncodeSSZ(enc)
+ if err != nil {
+ return err
+ }
// create prefix for length of packet
lengthBuf := make([]byte, 10)
- vin := binary.PutUvarint(lengthBuf, uint64(val.EncodingSizeSSZ()))
+ vin := binary.PutUvarint(lengthBuf, uint64(len(enc)))
+
// Create writer size
- wr := bufio.NewWriterSize(w, 10+val.EncodingSizeSSZ())
+ wr := bufio.NewWriterSize(w, 10+len(enc))
defer wr.Flush()
// Write length of packet
wr.Write(prefix)
@@ -53,12 +60,6 @@ func EncodeAndWrite(w io.Writer, val ssz.Marshaler, prefix ...byte) error {
writerPool.Put(sw)
}()
// Marshall and snap it
- enc := make([]byte, 0, val.EncodingSizeSSZ())
- var err error
- enc, err = val.EncodeSSZ(enc)
- if err != nil {
- return err
- }
_, err = sw.Write(enc)
return err
}
diff --git a/cmd/sentinel/sentinel/communication/topics.go b/cl/sentinel/communication/topics.go
similarity index 100%
rename from cmd/sentinel/sentinel/communication/topics.go
rename to cl/sentinel/communication/topics.go
diff --git a/cmd/sentinel/sentinel/config.go b/cl/sentinel/config.go
similarity index 98%
rename from cmd/sentinel/sentinel/config.go
rename to cl/sentinel/config.go
index 96af15c853b..c27e8b4e220 100644
--- a/cmd/sentinel/sentinel/config.go
+++ b/cl/sentinel/config.go
@@ -21,8 +21,8 @@ import (
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/log/v3"
"github.com/libp2p/go-libp2p"
+ mplex "github.com/libp2p/go-libp2p-mplex"
"github.com/libp2p/go-libp2p/core/crypto"
- "github.com/libp2p/go-libp2p/p2p/muxer/mplex"
"github.com/libp2p/go-libp2p/p2p/security/noise"
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
"github.com/multiformats/go-multiaddr"
diff --git a/cmd/sentinel/sentinel/config_test.go b/cl/sentinel/config_test.go
similarity index 100%
rename from cmd/sentinel/sentinel/config_test.go
rename to cl/sentinel/config_test.go
diff --git a/cmd/sentinel/sentinel/discovery.go b/cl/sentinel/discovery.go
similarity index 73%
rename from cmd/sentinel/sentinel/discovery.go
rename to cl/sentinel/discovery.go
index 36fe1a6fc61..de596304643 100644
--- a/cmd/sentinel/sentinel/discovery.go
+++ b/cl/sentinel/discovery.go
@@ -1,16 +1,3 @@
-/*
- Copyright 2022 Erigon-Lightclient contributors
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
package sentinel
import (
@@ -20,40 +7,37 @@ import (
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/fork"
- "github.com/ledgerwatch/erigon/cmd/sentinel/sentinel/peers"
"github.com/ledgerwatch/erigon/p2p/enode"
"github.com/ledgerwatch/erigon/p2p/enr"
"github.com/ledgerwatch/log/v3"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/multiformats/go-multiaddr"
-
"github.com/prysmaticlabs/go-bitfield"
)
-func (s *Sentinel) ConnectWithPeer(ctx context.Context, info peer.AddrInfo, skipHandshake bool) (err error) {
+// ConnectWithPeer is used to attempt to connect and add the peer to our pool
+// it errors when if fail to connect with the peer, for instance, if it fails the handshake
+// if it does not return an error, the peer is attempted to be added to the pool
+func (s *Sentinel) ConnectWithPeer(ctx context.Context, info peer.AddrInfo) (err error) {
if info.ID == s.host.ID() {
return nil
}
- s.peers.WithPeer(info.ID, func(peer *peers.Peer) {
- if peer.IsBad() {
- err = fmt.Errorf("refused to connect to bad peer")
- }
- })
- if err != nil {
- return err
+ if s.peers.BanStatus(info.ID) {
+ return fmt.Errorf("refused to connect to bad peer")
}
ctxWithTimeout, cancel := context.WithTimeout(ctx, clparams.MaxDialTimeout)
defer cancel()
- if err := s.host.Connect(ctxWithTimeout, info); err != nil {
- s.peers.WithPeer(info.ID, func(peer *peers.Peer) {
- peer.Disconnect(err.Error())
- })
+ err = s.host.Connect(ctxWithTimeout, info)
+ if err != nil {
return err
}
return nil
}
+// connectWithAllPeers is a helper function used to connect with a list of addrs.
+// it only returns an error on fail to parse multiaddrs
+// will print connect with peer errors to trace debug level
func (s *Sentinel) connectWithAllPeers(multiAddrs []multiaddr.Multiaddr) error {
addrInfos, err := peer.AddrInfosFromP2pAddrs(multiAddrs...)
if err != nil {
@@ -61,7 +45,7 @@ func (s *Sentinel) connectWithAllPeers(multiAddrs []multiaddr.Multiaddr) error {
}
for _, peerInfo := range addrInfos {
go func(peerInfo peer.AddrInfo) {
- if err := s.ConnectWithPeer(s.ctx, peerInfo, true); err != nil {
+ if err := s.ConnectWithPeer(s.ctx, peerInfo); err != nil {
log.Trace("[Sentinel] Could not connect with peer", "err", err)
}
}(peerInfo)
@@ -87,7 +71,6 @@ func (s *Sentinel) listenForPeers() {
multiAddresses := convertToMultiAddr(enodes)
if err := s.connectWithAllPeers(multiAddresses); err != nil {
log.Warn("Could not connect to static peers", "reason", err)
-
}
iterator := s.listener.RandomNodes()
@@ -124,7 +107,7 @@ func (s *Sentinel) listenForPeers() {
}
go func(peerInfo *peer.AddrInfo) {
- if err := s.ConnectWithPeer(s.ctx, *peerInfo, false); err != nil {
+ if err := s.ConnectWithPeer(s.ctx, *peerInfo); err != nil {
log.Trace("[Sentinel] Could not connect with peer", "err", err)
}
}(peerInfo)
@@ -161,12 +144,19 @@ func (s *Sentinel) setupENR(
func (s *Sentinel) onConnection(net network.Network, conn network.Conn) {
go func() {
peerId := conn.RemotePeer()
- invalid := !s.handshaker.ValidatePeer(peerId)
- if invalid {
+ valid, err := s.handshaker.ValidatePeer(peerId)
+ if err != nil {
+ log.Trace("[sentinel] failed to validate peer:", "err", err)
+ }
+ if !valid {
log.Trace("Handshake was unsuccessful")
- s.peers.WithPeer(peerId, func(peer *peers.Peer) {
- peer.Disconnect("invalid peer", "bad handshake")
- })
+ // on handshake fail, we disconnect with said peer, and remove them from our pool
+ s.host.Peerstore().RemovePeer(peerId)
+ s.host.Network().ClosePeer(peerId)
+ s.peers.RemovePeer(peerId)
+ } else {
+ // we were able to succesfully connect, so add this peer to our pool
+ s.peers.AddPeer(peerId)
}
}()
}
diff --git a/cmd/sentinel/sentinel/gater.go b/cl/sentinel/gater.go
similarity index 100%
rename from cmd/sentinel/sentinel/gater.go
rename to cl/sentinel/gater.go
diff --git a/cmd/sentinel/sentinel/pubsub.go b/cl/sentinel/gossip.go
similarity index 65%
rename from cmd/sentinel/sentinel/pubsub.go
rename to cl/sentinel/gossip.go
index 005abb913bb..38af103b890 100644
--- a/cmd/sentinel/sentinel/pubsub.go
+++ b/cl/sentinel/gossip.go
@@ -1,20 +1,19 @@
-/*
- Copyright 2022 Erigon-Lightclient contributors
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
+// Copyright 2022 Erigon-Caplin contributors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
package sentinel
import (
"context"
+ "errors"
"fmt"
"strings"
"sync"
@@ -23,6 +22,7 @@ import (
"github.com/ledgerwatch/erigon/cl/fork"
"github.com/ledgerwatch/log/v3"
pubsub "github.com/libp2p/go-libp2p-pubsub"
+ "github.com/libp2p/go-libp2p/core/peer"
)
var (
@@ -43,6 +43,7 @@ const (
VoluntaryExitTopic TopicName = "voluntary_exit"
ProposerSlashingTopic TopicName = "proposer_slashing"
AttesterSlashingTopic TopicName = "attester_slashing"
+ BlsToExecutionChangeTopic TopicName = "bls_to_execution_change"
BlobSidecarTopic TopicName = "blob_sidecar_%d" // This topic needs an index
)
@@ -55,23 +56,32 @@ var BeaconBlockSsz = GossipTopic{
Name: BeaconBlockTopic,
CodecStr: SSZSnappyCodec,
}
+
var BeaconAggregateAndProofSsz = GossipTopic{
Name: BeaconAggregateAndProofTopic,
CodecStr: SSZSnappyCodec,
}
+
var VoluntaryExitSsz = GossipTopic{
Name: VoluntaryExitTopic,
CodecStr: SSZSnappyCodec,
}
+
var ProposerSlashingSsz = GossipTopic{
Name: ProposerSlashingTopic,
CodecStr: SSZSnappyCodec,
}
+
var AttesterSlashingSsz = GossipTopic{
Name: AttesterSlashingTopic,
CodecStr: SSZSnappyCodec,
}
+var BlsToExecutionChangeSsz = GossipTopic{
+ Name: BlsToExecutionChangeTopic,
+ CodecStr: SSZSnappyCodec,
+}
+
type GossipManager struct {
ch chan *pubsub.Message
subscriptions map[string]*GossipSubscription
@@ -171,21 +181,21 @@ func (s *Sentinel) topicScoreParams(topic string) *pubsub.TopicScoreParams {
case strings.Contains(topic, string(BeaconBlockTopic)):
return s.defaultBlockTopicParams()
/*case strings.Contains(topic, GossipAggregateAndProofMessage):
- return defaultAggregateTopicParams(activeValidators), nil
+ return defaultAggregateTopicParams(activeValidators), nil
case strings.Contains(topic, GossipAttestationMessage):
- return defaultAggregateSubnetTopicParams(activeValidators), nil
+ return defaultAggregateSubnetTopicParams(activeValidators), nil
case strings.Contains(topic, GossipSyncCommitteeMessage):
- return defaultSyncSubnetTopicParams(activeValidators), nil
+ return defaultSyncSubnetTopicParams(activeValidators), nil
case strings.Contains(topic, GossipContributionAndProofMessage):
- return defaultSyncContributionTopicParams(), nil
+ return defaultSyncContributionTopicParams(), nil
case strings.Contains(topic, GossipExitMessage):
- return defaultVoluntaryExitTopicParams(), nil
+ return defaultVoluntaryExitTopicParams(), nil
case strings.Contains(topic, GossipProposerSlashingMessage):
- return defaultProposerSlashingTopicParams(), nil
+ return defaultProposerSlashingTopicParams(), nil
case strings.Contains(topic, GossipAttesterSlashingMessage):
- return defaultAttesterSlashingTopicParams(), nil
+ return defaultAttesterSlashingTopicParams(), nil
case strings.Contains(topic, GossipBlsToExecutionChangeMessage):
- return defaultBlsToExecutionChangeTopicParams(), nil*/
+ return defaultBlsToExecutionChangeTopicParams(), nil*/
default:
return nil
}
@@ -224,3 +234,89 @@ func (g *GossipManager) Close() {
}
}
}
+
+// GossipSubscription abstracts a gossip subscription to write decoded structs.
+type GossipSubscription struct {
+ gossip_topic GossipTopic
+ host peer.ID
+ ch chan *pubsub.Message
+ ctx context.Context
+
+ topic *pubsub.Topic
+ sub *pubsub.Subscription
+
+ cf context.CancelFunc
+ rf pubsub.RelayCancelFunc
+
+ setup sync.Once
+ stopCh chan struct{}
+}
+
+func (sub *GossipSubscription) Listen() (err error) {
+ sub.setup.Do(func() {
+ sub.stopCh = make(chan struct{}, 3)
+ sub.sub, err = sub.topic.Subscribe()
+ if err != nil {
+ err = fmt.Errorf("failed to begin topic %s subscription, err=%w", sub.topic.String(), err)
+ return
+ }
+ var sctx context.Context
+ sctx, sub.cf = context.WithCancel(sub.ctx)
+ go sub.run(sctx, sub.sub, sub.sub.Topic())
+ })
+ return nil
+}
+
+// calls the cancel func for the subscriber and closes the topic and sub
+func (s *GossipSubscription) Close() {
+ s.stopCh <- struct{}{}
+ if s.cf != nil {
+ s.cf()
+ }
+ if s.rf != nil {
+ s.rf()
+ }
+ if s.sub != nil {
+ s.sub.Cancel()
+ s.sub = nil
+ }
+ if s.topic != nil {
+ s.topic.Close()
+ s.topic = nil
+ }
+}
+
+// this is a helper to begin running the gossip subscription.
+// function should not be used outside of the constructor for gossip subscription
+func (s *GossipSubscription) run(ctx context.Context, sub *pubsub.Subscription, topic string) {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Error("[Sentinel Gossip] Message Handler Crashed", "err", r)
+ }
+ }()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-s.stopCh:
+ return
+ default:
+ msg, err := sub.Next(ctx)
+ if err != nil {
+ if errors.Is(err, context.Canceled) {
+ return
+ }
+ log.Warn("[Sentinel] fail to decode gossip packet", "err", err, "topic", topic)
+ return
+ }
+ if msg.GetFrom() == s.host {
+ continue
+ }
+ s.ch <- msg
+ }
+ }
+}
+
+func (g *GossipSubscription) Publish(data []byte) error {
+ return g.topic.Publish(g.ctx, data)
+}
diff --git a/cmd/sentinel/sentinel/handlers/blocks.go b/cl/sentinel/handlers/blocks.go
similarity index 80%
rename from cmd/sentinel/sentinel/handlers/blocks.go
rename to cl/sentinel/handlers/blocks.go
index fd2b19b0e61..21f4aca2a33 100644
--- a/cmd/sentinel/sentinel/handlers/blocks.go
+++ b/cl/sentinel/handlers/blocks.go
@@ -14,11 +14,16 @@
package handlers
import (
- "github.com/ledgerwatch/erigon/cmd/sentinel/sentinel/communication/ssz_snappy"
+ "github.com/ledgerwatch/erigon/cl/sentinel/communication/ssz_snappy"
"github.com/ledgerwatch/log/v3"
"github.com/libp2p/go-libp2p/core/network"
)
+// func (c *ConsensusHandlers) blocksByRangeHandlerPROTODONOTTOUCH69(stream network.Stream) error {
+// log.Trace("Got block by range handler call")
+// return ssz_snappy.EncodeAndWrite(stream, &emptyString{}, ResourceUnavaiablePrefix)
+// }
+
func (c *ConsensusHandlers) blocksByRangeHandler(stream network.Stream) error {
log.Trace("Got block by range handler call")
return ssz_snappy.EncodeAndWrite(stream, &emptyString{}, ResourceUnavaiablePrefix)
diff --git a/cmd/sentinel/sentinel/handlers/handlers.go b/cl/sentinel/handlers/handlers.go
similarity index 53%
rename from cmd/sentinel/sentinel/handlers/handlers.go
rename to cl/sentinel/handlers/handlers.go
index 00b868be508..051b6d4fdfb 100644
--- a/cmd/sentinel/sentinel/handlers/handlers.go
+++ b/cl/sentinel/handlers/handlers.go
@@ -15,46 +15,72 @@ package handlers
import (
"context"
+ "errors"
"strings"
+ "sync"
+ "time"
+
+ "github.com/ledgerwatch/erigon/cl/sentinel/communication"
+ "github.com/ledgerwatch/erigon/cl/sentinel/peers"
+ "github.com/ledgerwatch/erigon/cl/utils"
+ "golang.org/x/time/rate"
- "github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes"
- "github.com/ledgerwatch/erigon/cmd/sentinel/sentinel/communication"
- "github.com/ledgerwatch/erigon/cmd/sentinel/sentinel/peers"
+ "github.com/ledgerwatch/erigon/cl/persistence"
"github.com/ledgerwatch/log/v3"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/protocol"
)
+type RateLimits struct {
+ pingLimit int
+ goodbyeLimit int
+ metadataV1Limit int
+ metadataV2Limit int
+ statusLimit int
+}
+
+const punishmentPeriod = time.Minute
+
+var defaultRateLimits = RateLimits{
+ pingLimit: 5000,
+ goodbyeLimit: 5000,
+ metadataV1Limit: 5000,
+ metadataV2Limit: 5000,
+ statusLimit: 5000,
+}
+
type ConsensusHandlers struct {
- handlers map[protocol.ID]network.StreamHandler
- host host.Host
- peers *peers.Manager
- metadata *cltypes.Metadata
- beaconConfig *clparams.BeaconChainConfig
- genesisConfig *clparams.GenesisConfig
- ctx context.Context
-
- db kv.RoDB // Read stuff from database to answer
+ handlers map[protocol.ID]network.StreamHandler
+ host host.Host
+ metadata *cltypes.Metadata
+ beaconConfig *clparams.BeaconChainConfig
+ genesisConfig *clparams.GenesisConfig
+ ctx context.Context
+ beaconDB persistence.RawBeaconBlockChain
+ peerRateLimits sync.Map
+ punishmentEndTimes sync.Map
}
const (
SuccessfulResponsePrefix = 0x00
+ RateLimitedPrefix = 0x02
ResourceUnavaiablePrefix = 0x03
)
-func NewConsensusHandlers(ctx context.Context, db kv.RoDB, host host.Host,
- peers *peers.Manager, beaconConfig *clparams.BeaconChainConfig, genesisConfig *clparams.GenesisConfig, metadata *cltypes.Metadata) *ConsensusHandlers {
+func NewConsensusHandlers(ctx context.Context, db persistence.RawBeaconBlockChain, host host.Host,
+ peers *peers.Pool, beaconConfig *clparams.BeaconChainConfig, genesisConfig *clparams.GenesisConfig, metadata *cltypes.Metadata) *ConsensusHandlers {
c := &ConsensusHandlers{
- peers: peers,
- host: host,
- metadata: metadata,
- db: db,
- genesisConfig: genesisConfig,
- beaconConfig: beaconConfig,
- ctx: ctx,
+ host: host,
+ metadata: metadata,
+ beaconDB: db,
+ genesisConfig: genesisConfig,
+ beaconConfig: beaconConfig,
+ ctx: ctx,
+ peerRateLimits: sync.Map{},
+ punishmentEndTimes: sync.Map{},
}
hm := map[string]func(s network.Stream) error{
@@ -74,6 +100,29 @@ func NewConsensusHandlers(ctx context.Context, db kv.RoDB, host host.Host,
return c
}
+func (c *ConsensusHandlers) checkRateLimit(peerId string, method string, limit int) error {
+ keyHash := utils.Sha256([]byte(peerId), []byte(method))
+
+ if punishmentEndTime, ok := c.punishmentEndTimes.Load(keyHash); ok {
+ if time.Now().Before(punishmentEndTime.(time.Time)) {
+ return errors.New("rate limit exceeded, punishment period in effect")
+ } else {
+ c.punishmentEndTimes.Delete(keyHash)
+ }
+ }
+
+ value, _ := c.peerRateLimits.LoadOrStore(keyHash, rate.NewLimiter(rate.Every(time.Minute), limit))
+ limiter := value.(*rate.Limiter)
+
+ if !limiter.Allow() {
+ c.punishmentEndTimes.Store(keyHash, time.Now().Add(punishmentPeriod))
+ c.peerRateLimits.Delete(keyHash)
+ return errors.New("rate limit exceeded")
+ }
+
+ return nil
+}
+
func (c *ConsensusHandlers) Start() {
for id, handler := range c.handlers {
c.host.SetStreamHandler(id, handler)
@@ -103,7 +152,7 @@ func (c *ConsensusHandlers) wrapStreamHandler(name string, fn func(s network.Str
if err != nil {
l["err"] = err
if !(strings.Contains(name, "goodbye") && (strings.Contains(err.Error(), "session shut down") || strings.Contains(err.Error(), "stream reset"))) {
- log.Warn("[pubsubhandler] close stream", l)
+ log.Trace("[pubsubhandler] close stream", l)
}
}
}
diff --git a/cmd/sentinel/sentinel/handlers/heartbeats.go b/cl/sentinel/handlers/heartbeats.go
similarity index 61%
rename from cmd/sentinel/sentinel/handlers/heartbeats.go
rename to cl/sentinel/handlers/heartbeats.go
index f42b345c0c9..4dc04556916 100644
--- a/cmd/sentinel/sentinel/handlers/heartbeats.go
+++ b/cl/sentinel/handlers/heartbeats.go
@@ -16,7 +16,7 @@ package handlers
import (
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes"
- "github.com/ledgerwatch/erigon/cmd/sentinel/sentinel/communication/ssz_snappy"
+ "github.com/ledgerwatch/erigon/cl/sentinel/communication/ssz_snappy"
"github.com/libp2p/go-libp2p/core/network"
)
@@ -24,18 +24,36 @@ import (
// Since packets are just structs, they can be resent with no issue
func (c *ConsensusHandlers) pingHandler(s network.Stream) error {
+ peerId := s.Conn().RemotePeer().String()
+ if err := c.checkRateLimit(peerId, "ping", defaultRateLimits.pingLimit); err != nil {
+ ssz_snappy.EncodeAndWrite(s, &emptyString{}, RateLimitedPrefix)
+ defer s.Close()
+ return err
+ }
return ssz_snappy.EncodeAndWrite(s, &cltypes.Ping{
Id: c.metadata.SeqNumber,
}, SuccessfulResponsePrefix)
}
func (c *ConsensusHandlers) goodbyeHandler(s network.Stream) error {
+ peerId := s.Conn().RemotePeer().String()
+ if err := c.checkRateLimit(peerId, "goodbye", defaultRateLimits.goodbyeLimit); err != nil {
+ ssz_snappy.EncodeAndWrite(s, &emptyString{}, RateLimitedPrefix)
+ defer s.Close()
+ return err
+ }
return ssz_snappy.EncodeAndWrite(s, &cltypes.Ping{
Id: 1,
}, SuccessfulResponsePrefix)
}
func (c *ConsensusHandlers) metadataV1Handler(s network.Stream) error {
+ peerId := s.Conn().RemotePeer().String()
+ if err := c.checkRateLimit(peerId, "metadataV1", defaultRateLimits.metadataV1Limit); err != nil {
+ ssz_snappy.EncodeAndWrite(s, &emptyString{}, RateLimitedPrefix)
+ defer s.Close()
+ return err
+ }
return ssz_snappy.EncodeAndWrite(s, &cltypes.Metadata{
SeqNumber: c.metadata.SeqNumber,
Attnets: c.metadata.Attnets,
@@ -43,11 +61,23 @@ func (c *ConsensusHandlers) metadataV1Handler(s network.Stream) error {
}
func (c *ConsensusHandlers) metadataV2Handler(s network.Stream) error {
+ peerId := s.Conn().RemotePeer().String()
+ if err := c.checkRateLimit(peerId, "metadataV2", defaultRateLimits.metadataV2Limit); err != nil {
+ ssz_snappy.EncodeAndWrite(s, &emptyString{}, RateLimitedPrefix)
+ defer s.Close()
+ return err
+ }
return ssz_snappy.EncodeAndWrite(s, c.metadata, SuccessfulResponsePrefix)
}
// TODO: Actually respond with proper status
func (c *ConsensusHandlers) statusHandler(s network.Stream) error {
+ peerId := s.Conn().RemotePeer().String()
+ if err := c.checkRateLimit(peerId, "status", defaultRateLimits.statusLimit); err != nil {
+ ssz_snappy.EncodeAndWrite(s, &emptyString{}, RateLimitedPrefix)
+ defer s.Close()
+ return err
+ }
defer s.Close()
status := &cltypes.Status{}
if err := ssz_snappy.DecodeAndReadNoForkDigest(s, status, clparams.Phase0Version); err != nil {
diff --git a/cmd/sentinel/sentinel/handshake/handshake.go b/cl/sentinel/handshake/handshake.go
similarity index 56%
rename from cmd/sentinel/sentinel/handshake/handshake.go
rename to cl/sentinel/handshake/handshake.go
index 41a8392d1e9..f5b6baadf07 100644
--- a/cmd/sentinel/sentinel/handshake/handshake.go
+++ b/cl/sentinel/handshake/handshake.go
@@ -3,17 +3,19 @@ package handshake
import (
"bytes"
"context"
+ "fmt"
+ "io"
+ "net/http"
"sync"
+ communication2 "github.com/ledgerwatch/erigon/cl/sentinel/communication"
+ "github.com/ledgerwatch/erigon/cl/sentinel/communication/ssz_snappy"
+ "github.com/ledgerwatch/erigon/cl/sentinel/httpreqresp"
+
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/fork"
- "github.com/ledgerwatch/erigon/cmd/sentinel/sentinel/communication"
- "github.com/ledgerwatch/erigon/cmd/sentinel/sentinel/communication/ssz_snappy"
- "github.com/ledgerwatch/erigon/common"
- "github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
- "go.uber.org/zap/buffer"
)
// HandShaker is the data type which will handle handshakes and determine if
@@ -23,17 +25,17 @@ type HandShaker struct {
// Status object to send over.
status *cltypes.Status // Contains status object for handshakes
set bool
- host host.Host
+ handler http.Handler
genesisConfig *clparams.GenesisConfig
beaconConfig *clparams.BeaconChainConfig
mu sync.Mutex
}
-func New(ctx context.Context, genesisConfig *clparams.GenesisConfig, beaconConfig *clparams.BeaconChainConfig, host host.Host) *HandShaker {
+func New(ctx context.Context, genesisConfig *clparams.GenesisConfig, beaconConfig *clparams.BeaconChainConfig, handler http.Handler) *HandShaker {
return &HandShaker{
ctx: ctx,
- host: host,
+ handler: handler,
genesisConfig: genesisConfig,
beaconConfig: beaconConfig,
status: &cltypes.Status{},
@@ -62,32 +64,41 @@ func (h *HandShaker) IsSet() bool {
return h.set
}
-func (h *HandShaker) ValidatePeer(id peer.ID) bool {
+func (h *HandShaker) ValidatePeer(id peer.ID) (bool, error) {
// Unprotected if it is not set
if !h.IsSet() {
- return true
+ return true, nil
}
status := h.Status()
// Encode our status
- var buffer buffer.Buffer
- if err := ssz_snappy.EncodeAndWrite(&buffer, status); err != nil {
- return false
+ buf := new(bytes.Buffer)
+ if err := ssz_snappy.EncodeAndWrite(buf, status); err != nil {
+ return false, err
}
-
- data := common.CopyBytes(buffer.Bytes())
- response, errResponse, err := communication.SendRequestRawToPeer(h.ctx, h.host, data, communication.StatusProtocolV1, id)
- if err != nil || errResponse > 0 {
- return false
+ req, err := http.NewRequest("GET", "http://service.internal/", buf)
+ if err != nil {
+ return false, err
+ }
+ req.Header.Set("REQRESP-PEER-ID", id.String())
+ req.Header.Set("REQRESP-TOPIC", communication2.StatusProtocolV1)
+ resp, err := httpreqresp.Do(h.handler, req)
+ if err != nil {
+ return false, err
+ }
+ defer resp.Body.Close()
+ if resp.Header.Get("REQRESP-RESPONSE-CODE") != "0" {
+ a, _ := io.ReadAll(resp.Body)
+ //TODO: proper errors
+ return false, fmt.Errorf("handshake error: %s", string(a))
}
responseStatus := &cltypes.Status{}
- if err := ssz_snappy.DecodeAndReadNoForkDigest(bytes.NewReader(response), responseStatus, clparams.Phase0Version); err != nil {
- return false
+ if err := ssz_snappy.DecodeAndReadNoForkDigest(resp.Body, responseStatus, clparams.Phase0Version); err != nil {
+ return false, nil
}
forkDigest, err := fork.ComputeForkDigest(h.beaconConfig, h.genesisConfig)
if err != nil {
- return false
+ return false, nil
}
-
- return responseStatus.ForkDigest == forkDigest
+ return responseStatus.ForkDigest == forkDigest, nil
}
diff --git a/cl/sentinel/httpreqresp/server.go b/cl/sentinel/httpreqresp/server.go
new file mode 100644
index 00000000000..f9daabe1653
--- /dev/null
+++ b/cl/sentinel/httpreqresp/server.go
@@ -0,0 +1,122 @@
+// package httpreqresp encapsulates eth2 beacon chain resp-resp into http
+package httpreqresp
+
+import (
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "strconv"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+)
+
+const (
+ ResponseCodeHeader = "Reqresp-Response-Code"
+ PeerIdHeader = "Reqresp-Peer-Id"
+ TopicHeader = "Reqresp-Topic"
+)
+
+// Do performs an http request against the http handler.
+// NOTE: this is actually very similar to the http.RoundTripper interface... maybe we should investigate using that.
+/*
+
+the following headers have meaning when passed in to the request:
+
+ REQRESP-PEER-ID - the peer id to target for the request
+ REQRESP-TOPIC - the topic to request with
+ REQRESP-EXPECTED-CHUNKS - this is an integer, which will be multiplied by 10 to calculate the amount of seconds the peer has to respond with all the data
+*/
+func Do(handler http.Handler, r *http.Request) (*http.Response, error) {
+ // TODO: there potentially extra alloc here (responses are bufferd)
+ // is that a big deal? not sure. maybe can reuse these buffers since they are read once (and known when close) if so
+ ans := make(chan *http.Response)
+ go func() {
+ res := httptest.NewRecorder()
+ handler.ServeHTTP(res, r)
+ // linter does not know we are passing the resposne through channel.
+ // nolint: bodyclose
+ resp := res.Result()
+ ans <- resp
+ }()
+ select {
+ case res := <-ans:
+ return res, nil
+ case <-r.Context().Done():
+ return nil, r.Context().Err()
+ }
+}
+
+// Handles a request
+func NewRequestHandler(host host.Host) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ // get the peer parameters
+ peerIdBase58 := r.Header.Get("REQRESP-PEER-ID")
+ topic := r.Header.Get("REQRESP-TOPIC")
+ chunkCount := r.Header.Get("REQRESP-EXPECTED-CHUNKS")
+ chunks, _ := strconv.Atoi(chunkCount)
+ // some sanity checking on chunks
+ if chunks < 1 {
+ chunks = 1
+ }
+ // idk why this would happen, so lets make sure it doesnt. future-proofing from bad input
+ if chunks > 512 {
+ chunks = 512
+ }
+ // read the base58 encoded peer id to know which we are trying to dial
+ peerId, err := peer.Decode(peerIdBase58)
+ if err != nil {
+ http.Error(w, "Invalid Peer Id", http.StatusBadRequest)
+ return
+ }
+ // we can't connect to the peer - so we should disconnect them. send a code 4xx
+ stream, err := host.NewStream(r.Context(), peerId, protocol.ID(topic))
+ if err != nil {
+ http.Error(w, "Can't Connect to Peer: "+err.Error(), http.StatusBadRequest)
+ return
+ }
+ defer stream.Close()
+ // this write deadline is not part of the eth p2p spec, but we are implying it.
+ stream.SetWriteDeadline(time.Now().Add(5 * time.Second))
+ if r.Body != nil && r.ContentLength > 0 {
+ _, err := io.Copy(stream, r.Body)
+ if err != nil {
+ http.Error(w, "Processing Stream: "+err.Error(), http.StatusBadRequest)
+ return
+ }
+ }
+ err = stream.CloseWrite()
+ if err != nil {
+ http.Error(w, "Close Write Side: "+err.Error(), http.StatusBadRequest)
+ return
+ }
+ code := make([]byte, 1)
+ // we have 5 seconds to read the next byte. this is the 5 TTFB_TIMEOUT in the spec
+ stream.SetReadDeadline(time.Now().Add(5 * time.Second))
+ _, err = io.ReadFull(stream, code)
+ if err != nil {
+ http.Error(w, "Read Code: "+err.Error(), http.StatusBadRequest)
+ return
+ }
+ // this is not neccesary, but seems like the right thing to do
+ w.Header().Set("CONTENT-TYPE", "application/octet-stream")
+ w.Header().Set("CONTENT-ENCODING", "snappy/stream")
+ // add the response code & headers
+ w.Header().Set("REQRESP-RESPONSE-CODE", strconv.Itoa(int(code[0])))
+ w.Header().Set("REQRESP-PEER-ID", peerIdBase58)
+ w.Header().Set("REQRESP-TOPIC", topic)
+ // the deadline is 10 * expected chunk count, which the user can send. otherwise we will only wait 10 seconds
+ // this is technically incorrect, and more aggressive than the network might like.
+ stream.SetReadDeadline(time.Now().Add(10 * time.Second * time.Duration(chunks)))
+ // copy the data now to the stream
+ // the first write to w will call code 200, so we do not need to
+ _, err = io.Copy(w, stream)
+ if err != nil {
+ http.Error(w, "Reading Stream Response: "+err.Error(), http.StatusBadRequest)
+ return
+ }
+ return
+ }
+}
diff --git a/cl/sentinel/libp2p_settings.go b/cl/sentinel/libp2p_settings.go
new file mode 100644
index 00000000000..82ce46f5368
--- /dev/null
+++ b/cl/sentinel/libp2p_settings.go
@@ -0,0 +1,66 @@
+package sentinel
+
+import (
+ "math"
+ "time"
+
+ pubsub "github.com/libp2p/go-libp2p-pubsub"
+ "github.com/libp2p/go-libp2p/core/peer"
+)
+
+// determines the decay rate from the provided time period till
+// the decayToZero value. Ex: ( 1 -> 0.01)
+func (s *Sentinel) scoreDecay(totalDurationDecay time.Duration) float64 {
+ numOfTimes := totalDurationDecay / s.oneSlotDuration()
+ return math.Pow(decayToZero, 1/float64(numOfTimes))
+}
+
+func (s *Sentinel) pubsubOptions() []pubsub.Option {
+ thresholds := &pubsub.PeerScoreThresholds{
+ GossipThreshold: -4000,
+ PublishThreshold: -8000,
+ GraylistThreshold: -16000,
+ AcceptPXThreshold: 100,
+ OpportunisticGraftThreshold: 5,
+ }
+ scoreParams := &pubsub.PeerScoreParams{
+ Topics: make(map[string]*pubsub.TopicScoreParams),
+ TopicScoreCap: 32.72,
+ AppSpecificScore: func(p peer.ID) float64 {
+ return 0
+ },
+ AppSpecificWeight: 1,
+ IPColocationFactorWeight: -35.11,
+ IPColocationFactorThreshold: 10,
+ IPColocationFactorWhitelist: nil,
+ BehaviourPenaltyWeight: -15.92,
+ BehaviourPenaltyThreshold: 6,
+ BehaviourPenaltyDecay: s.scoreDecay(10 * s.oneEpochDuration()), // 10 epochs
+ DecayInterval: s.oneSlotDuration(),
+ DecayToZero: decayToZero,
+ RetainScore: 100 * s.oneEpochDuration(), // Retain for 100 epochs
+ }
+ pubsubQueueSize := 600
+ psOpts := []pubsub.Option{
+ pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign),
+ pubsub.WithMessageIdFn(s.msgId),
+ pubsub.WithNoAuthor(),
+ pubsub.WithPeerOutboundQueueSize(pubsubQueueSize),
+ pubsub.WithMaxMessageSize(int(s.cfg.NetworkConfig.GossipMaxSizeBellatrix)),
+ pubsub.WithValidateQueueSize(pubsubQueueSize),
+ pubsub.WithPeerScore(scoreParams, thresholds),
+ pubsub.WithGossipSubParams(pubsubGossipParam()),
+ }
+ return psOpts
+}
+
+// creates a custom gossipsub parameter set.
+func pubsubGossipParam() pubsub.GossipSubParams {
+ gParams := pubsub.DefaultGossipSubParams()
+ gParams.Dlo = gossipSubDlo
+ gParams.D = gossipSubD
+ gParams.HeartbeatInterval = gossipSubHeartbeatInterval
+ gParams.HistoryLength = gossipSubMcacheLen
+ gParams.HistoryGossip = gossipSubMcacheGossip
+ return gParams
+}
diff --git a/cmd/sentinel/sentinel/msg_id.go b/cl/sentinel/msg_id.go
similarity index 97%
rename from cmd/sentinel/sentinel/msg_id.go
rename to cl/sentinel/msg_id.go
index bf584681d1f..572b8ea69dc 100644
--- a/cmd/sentinel/sentinel/msg_id.go
+++ b/cl/sentinel/msg_id.go
@@ -42,7 +42,7 @@ func (s *Sentinel) msgId(pmsg *pubsubpb.Message) string {
combinedData = append(combinedData, topicLenBytes...)
combinedData = append(combinedData, topic...)
combinedData = append(combinedData, pmsg.Data...)
- h := utils.Keccak256(combinedData)
+ h := utils.Sha256(combinedData)
return string(h[:20])
}
totalLength := len(s.cfg.NetworkConfig.MessageDomainValidSnappy) +
@@ -55,6 +55,6 @@ func (s *Sentinel) msgId(pmsg *pubsubpb.Message) string {
combinedData = append(combinedData, topicLenBytes...)
combinedData = append(combinedData, topic...)
combinedData = append(combinedData, decodedData...)
- h := utils.Keccak256(combinedData)
+ h := utils.Sha256(combinedData)
return string(h[:20])
}
diff --git a/cmd/sentinel/sentinel/msg_id_test.go b/cl/sentinel/msg_id_test.go
similarity index 94%
rename from cmd/sentinel/sentinel/msg_id_test.go
rename to cl/sentinel/msg_id_test.go
index d8313aeb8cb..ac7e66ba37f 100644
--- a/cmd/sentinel/sentinel/msg_id_test.go
+++ b/cl/sentinel/msg_id_test.go
@@ -33,7 +33,7 @@ func TestMsgID(t *testing.T) {
combinedObj := append(n.MessageDomainInvalidSnappy[:], topicLenBytes...)
combinedObj = append(combinedObj, tpc...)
combinedObj = append(combinedObj, pMsg.Data...)
- hashedData := utils.Keccak256(combinedObj)
+ hashedData := utils.Sha256(combinedObj)
msgID := string(hashedData[:20])
require.Equal(t, msgID, s.msgId(pMsg), "Got incorrect msg id")
@@ -44,7 +44,7 @@ func TestMsgID(t *testing.T) {
combinedObj = append(n.MessageDomainValidSnappy[:], topicLenBytes...)
combinedObj = append(combinedObj, tpc...)
combinedObj = append(combinedObj, validObj[:]...)
- hashedData = utils.Keccak256(combinedObj)
+ hashedData = utils.Sha256(combinedObj)
msgID = string(hashedData[:20])
require.Equal(t, msgID, s.msgId(nMsg), "Got incorrect msg id")
}
diff --git a/cl/sentinel/peers/manager.go b/cl/sentinel/peers/manager.go
deleted file mode 100644
index 033c99f074f..00000000000
--- a/cl/sentinel/peers/manager.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package peers
-
-import (
- "context"
- "sync"
- "time"
-
- "github.com/ledgerwatch/erigon/cl/phase1/core/state/lru"
- "github.com/libp2p/go-libp2p/core/host"
- "github.com/libp2p/go-libp2p/core/peer"
-)
-
-const (
- maxBadPeers = 50000
- maxPeerRecordSize = 1000
- DefaultMaxPeers = 33
- MaxBadResponses = 50
-)
-
-func newPeer() *Peer {
- return &Peer{
- lastTouched: time.Now(),
- working: make(chan struct{}),
- }
-}
-
-type Manager struct {
- host host.Host
- peers *lru.Cache[peer.ID, *Peer]
- peerTimeout time.Duration
-
- mu sync.Mutex
-}
-
-func NewManager(ctx context.Context, host host.Host) *Manager {
- c, err := lru.NewWithEvict("beacon_peer_manager", 500, func(i peer.ID, p *Peer) {
- p.Disconnect("booted for inactivity")
- })
- if err != nil {
- panic(err)
- }
- m := &Manager{
- peerTimeout: 8 * time.Hour,
- peers: c,
- host: host,
- }
- return m
-}
-
-func (m *Manager) GetPeer(id peer.ID) (peer *Peer) {
- m.mu.Lock()
- p, ok := m.peers.Get(id)
- if !ok {
- p = &Peer{
- pid: id,
- working: make(chan struct{}, 1),
- m: m,
- penalties: 0,
- banned: false,
- }
- m.peers.Add(id, p)
- }
- p.lastTouched = time.Now()
- m.mu.Unlock()
- return p
-}
diff --git a/cl/sentinel/peers/peer.go b/cl/sentinel/peers/peer.go
deleted file mode 100644
index ad616dd4059..00000000000
--- a/cl/sentinel/peers/peer.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package peers
-
-import (
- "strings"
- "sync"
- "time"
-
- "github.com/ledgerwatch/log/v3"
- "github.com/libp2p/go-libp2p/core/peer"
-)
-
-const USERAGENT_UNKNOWN = "unknown"
-
-// Record Peer data.
-type Peer struct {
- penalties int
- banned bool
-
- // request info
- lastRequest time.Time
- successCount int
- useCount int
- // gc data
- lastTouched time.Time
-
- mu sync.Mutex
-
- // peer id
- pid peer.ID
-
- // acts as the mutex for making requests. channel used to avoid use of TryLock
- working chan struct{}
- // backref to the manager that owns this peer
- m *Manager
-}
-
-func (p *Peer) do(fn func(p *Peer)) {
- if fn == nil {
- return
- }
- p.mu.Lock()
- defer p.mu.Unlock()
- fn(p)
-}
-
-func (p *Peer) UserAgent() string {
- rawVer, err := p.m.host.Peerstore().Get(p.pid, "AgentVersion")
- if err == nil {
- if str, ok := rawVer.(string); ok {
- return str
- }
- }
- return USERAGENT_UNKNOWN
-}
-
-func (p *Peer) Penalize() {
- log.Debug("[Sentinel Peers] peer penalized", "peer-id", p.pid)
- p.do(func(p *Peer) {
- p.penalties++
- })
-}
-
-func (p *Peer) Forgive() {
- log.Debug("[Sentinel Peers] peer forgiven", "peer-id", p.pid)
- p.do(func(p *Peer) {
- if p.penalties > 0 {
- p.penalties--
- }
- })
-}
-
-func (p *Peer) MarkUsed() {
- p.do(func(p *Peer) {
- p.useCount++
- p.lastRequest = time.Now()
- })
- log.Debug("[Sentinel Peers] peer used", "peer-id", p.pid, "uses", p.useCount)
-}
-
-func (p *Peer) MarkReplied() {
- p.do(func(p *Peer) {
- p.successCount++
- })
- log.Debug("[Sentinel Peers] peer replied", "peer-id", p.pid, "uses", p.useCount, "success", p.successCount)
-}
-
-func (p *Peer) IsAvailable() (available bool) {
- p.mu.Lock()
- defer p.mu.Unlock()
- if p.banned {
- return false
- }
- if p.penalties > MaxBadResponses {
- return false
- }
- if time.Now().Sub(p.lastRequest) > 0*time.Second {
- return true
- }
- return false
-}
-
-func (p *Peer) IsBad() (bad bool) {
- p.mu.Lock()
- defer p.mu.Unlock()
- if p.banned {
- bad = true
- return
- }
- bad = p.penalties > MaxBadResponses
- return
-}
-
-var skipReasons = []string{
- "bad handshake",
- "context",
- "security protocol",
- "connect:",
- "dial backoff",
-}
-
-func anySetInString(set []string, in string) bool {
- for _, v := range skipReasons {
- if strings.Contains(in, v) {
- return true
- }
- }
- return false
-}
-
-func (p *Peer) Disconnect(reason ...string) {
- rzn := strings.Join(reason, " ")
- if !anySetInString(skipReasons, rzn) {
- log.Debug("[Sentinel Peers] disconnecting from peer", "peer-id", p.pid, "reason", strings.Join(reason, " "))
- }
- p.m.host.Peerstore().RemovePeer(p.pid)
- p.m.host.Network().ClosePeer(p.pid)
- p.do(func(p *Peer) {
- p.penalties = 0
- })
-}
-
-func (p *Peer) Ban(reason ...string) {
- log.Debug("[Sentinel Peers] bad peers has been banned", "peer-id", p.pid, "reason", strings.Join(reason, " "))
- p.do(func(p *Peer) {
- p.banned = true
- })
- p.Disconnect(reason...)
- return
-}
diff --git a/cl/sentinel/peers/peers.go b/cl/sentinel/peers/peers.go
new file mode 100644
index 00000000000..443aa660980
--- /dev/null
+++ b/cl/sentinel/peers/peers.go
@@ -0,0 +1,13 @@
+package peers
+
+const (
+ maxBadPeers = 50000
+ maxPeerRecordSize = 1000
+ DefaultMaxPeers = 64
+ MaxBadResponses = 50
+)
+
+type PeeredObject[T any] struct {
+ Peer string
+ Data T
+}
diff --git a/cl/sentinel/peers/pool.go b/cl/sentinel/peers/pool.go
new file mode 100644
index 00000000000..e5237b6abdd
--- /dev/null
+++ b/cl/sentinel/peers/pool.go
@@ -0,0 +1,141 @@
+package peers
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+
+ "github.com/ledgerwatch/erigon-lib/common/ring"
+ "github.com/libp2p/go-libp2p/core/peer"
+)
+
+// Item is an item in the pool
+type Item struct {
+ id peer.ID
+ score atomic.Int64
+ uses int
+}
+
+func (i *Item) Id() peer.ID {
+ return i.id
+}
+
+func (i *Item) String() string {
+ return i.id.String()
+}
+
+func (i *Item) Score() int {
+ return int(i.score.Load())
+}
+
+func (i *Item) Add(n int) int {
+ return int(i.score.Add(int64(n)))
+}
+
+// PeerPool is a pool of peers
+type Pool struct {
+
+ // allowedPeers are the peers that are allowed.
+ // peers not on this list will be silently discarded
+ // when returned, and skipped when requesting
+ peerData map[peer.ID]*Item
+
+ bannedPeers map[peer.ID]struct{}
+ queue *ring.Buffer[*Item]
+
+ mu sync.Mutex
+}
+
+func NewPool() *Pool {
+ return &Pool{
+ peerData: make(map[peer.ID]*Item),
+ bannedPeers: map[peer.ID]struct{}{},
+ queue: ring.NewBuffer[*Item](0, 1024),
+ }
+}
+
+func (p *Pool) BanStatus(pid peer.ID) bool {
+ _, ok := p.bannedPeers[pid]
+ return ok
+}
+
+func (p *Pool) AddPeer(pid peer.ID) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // if peer banned, return immediately
+ if _, ok := p.bannedPeers[pid]; ok {
+ return
+ }
+ // if peer already here, return immediately
+ if _, ok := p.peerData[pid]; ok {
+ return
+ }
+ newItem := &Item{
+ id: pid,
+ }
+ p.peerData[pid] = newItem
+ // add it to our queue as a new item
+ p.queue.PushBack(newItem)
+}
+
+func (p *Pool) SetBanStatus(pid peer.ID, banned bool) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if banned {
+ p.bannedPeers[pid] = struct{}{}
+ delete(p.peerData, pid)
+ } else {
+ delete(p.bannedPeers, pid)
+ }
+}
+
+func (p *Pool) RemovePeer(pid peer.ID) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ delete(p.peerData, pid)
+}
+
+// returnPeer is an internal function to return per to the pool. assume has lock
+func (p *Pool) returnPeer(i *Item) {
+ // if peer not in our map, return and do not return peer
+ if _, ok := p.peerData[i.id]; !ok {
+ return
+ }
+ // append peer to the end of our ring buffer
+ p.queue.PushBack(i)
+}
+
+// nextPeer gets next peer, skipping bad peers. assume has lock
+func (p *Pool) nextPeer() (i *Item, ok bool) {
+ val, ok := p.queue.PopFront()
+ if !ok {
+ return nil, false
+ }
+ // if peer been banned, get next peer
+ if _, ok := p.bannedPeers[val.id]; ok {
+ return p.nextPeer()
+ }
+ // if peer not in set, get next peer
+ if _, ok := p.peerData[val.id]; !ok {
+ return p.nextPeer()
+ }
+ return val, true
+}
+
+// Request a peer from the pool
+// caller MUST call the done function when done with peer IFF err != nil
+func (p *Pool) Request() (pid *Item, done func(), err error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ //grab a peer from our ringbuffer
+ val, ok := p.queue.PopFront()
+ if !ok {
+ return nil, nil, fmt.Errorf("no peers? ( :( > ")
+ }
+ return val, func() {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ val.uses = val.uses + 1
+ p.returnPeer(val)
+ }, nil
+}
diff --git a/cl/sentinel/peers/readme.md b/cl/sentinel/peers/readme.md
deleted file mode 100644
index 8804be80f7b..00000000000
--- a/cl/sentinel/peers/readme.md
+++ /dev/null
@@ -1,4 +0,0 @@
-## wip
-
-
-this is work in progress
diff --git a/cmd/sentinel/sentinel/sentinel.go b/cl/sentinel/sentinel.go
similarity index 70%
rename from cmd/sentinel/sentinel/sentinel.go
rename to cl/sentinel/sentinel.go
index 7e9e9bf118a..839906fb1bb 100644
--- a/cmd/sentinel/sentinel/sentinel.go
+++ b/cl/sentinel/sentinel.go
@@ -17,15 +17,18 @@ import (
"context"
"crypto/ecdsa"
"fmt"
- "math"
"net"
+ "net/http"
"time"
- "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/go-chi/chi/v5"
+ "github.com/ledgerwatch/erigon/cl/sentinel/handlers"
+ "github.com/ledgerwatch/erigon/cl/sentinel/handshake"
+ "github.com/ledgerwatch/erigon/cl/sentinel/httpreqresp"
+ "github.com/ledgerwatch/erigon/cl/sentinel/peers"
+
"github.com/ledgerwatch/erigon/cl/cltypes"
- "github.com/ledgerwatch/erigon/cmd/sentinel/sentinel/handlers"
- "github.com/ledgerwatch/erigon/cmd/sentinel/sentinel/handshake"
- "github.com/ledgerwatch/erigon/cmd/sentinel/sentinel/peers"
+ "github.com/ledgerwatch/erigon/cl/persistence"
"github.com/ledgerwatch/erigon/crypto"
"github.com/ledgerwatch/erigon/p2p/discover"
"github.com/ledgerwatch/erigon/p2p/enode"
@@ -59,16 +62,19 @@ const (
)
type Sentinel struct {
- started bool
- listener *discover.UDPv5 // this is us in the network.
- ctx context.Context
- host host.Host
- cfg *SentinelConfig
- peers *peers.Manager
+ started bool
+ listener *discover.UDPv5 // this is us in the network.
+ ctx context.Context
+ host host.Host
+ cfg *SentinelConfig
+ peers *peers.Pool
+
+ httpApi http.Handler
+
metadataV2 *cltypes.Metadata
handshaker *handshake.HandShaker
- db kv.RoDB
+ db persistence.RawBeaconBlockChain
discoverConfig discover.Config
pubsub *pubsub.PubSub
@@ -84,7 +90,7 @@ func (s *Sentinel) createLocalNode(
udpPort, tcpPort int,
tmpDir string,
) (*enode.LocalNode, error) {
- db, err := enode.OpenDB("", tmpDir)
+ db, err := enode.OpenDB(s.ctx, "", tmpDir)
if err != nil {
return nil, fmt.Errorf("could not open node's peer database: %w", err)
}
@@ -162,75 +168,18 @@ func (s *Sentinel) createListener() (*discover.UDPv5, error) {
// Start stream handlers
handlers.NewConsensusHandlers(s.ctx, s.db, s.host, s.peers, s.cfg.BeaconConfig, s.cfg.GenesisConfig, s.metadataV2).Start()
- net, err := discover.ListenV5(s.ctx, conn, localNode, discCfg)
+ net, err := discover.ListenV5(s.ctx, "any", conn, localNode, discCfg)
if err != nil {
return nil, err
}
return net, err
}
-// creates a custom gossipsub parameter set.
-func pubsubGossipParam() pubsub.GossipSubParams {
- gParams := pubsub.DefaultGossipSubParams()
- gParams.Dlo = gossipSubDlo
- gParams.D = gossipSubD
- gParams.HeartbeatInterval = gossipSubHeartbeatInterval
- gParams.HistoryLength = gossipSubMcacheLen
- gParams.HistoryGossip = gossipSubMcacheGossip
- return gParams
-}
-
-// determines the decay rate from the provided time period till
-// the decayToZero value. Ex: ( 1 -> 0.01)
-func (s *Sentinel) scoreDecay(totalDurationDecay time.Duration) float64 {
- numOfTimes := totalDurationDecay / s.oneSlotDuration()
- return math.Pow(decayToZero, 1/float64(numOfTimes))
-}
-
-func (s *Sentinel) pubsubOptions() []pubsub.Option {
- thresholds := &pubsub.PeerScoreThresholds{
- GossipThreshold: -4000,
- PublishThreshold: -8000,
- GraylistThreshold: -16000,
- AcceptPXThreshold: 100,
- OpportunisticGraftThreshold: 5,
- }
- scoreParams := &pubsub.PeerScoreParams{
- Topics: make(map[string]*pubsub.TopicScoreParams),
- TopicScoreCap: 32.72,
- AppSpecificScore: func(p peer.ID) float64 {
- return 0
- },
- AppSpecificWeight: 1,
- IPColocationFactorWeight: -35.11,
- IPColocationFactorThreshold: 10,
- IPColocationFactorWhitelist: nil,
- BehaviourPenaltyWeight: -15.92,
- BehaviourPenaltyThreshold: 6,
- BehaviourPenaltyDecay: s.scoreDecay(10 * s.oneEpochDuration()), // 10 epochs
- DecayInterval: s.oneSlotDuration(),
- DecayToZero: decayToZero,
- RetainScore: 100 * s.oneEpochDuration(), // Retain for 100 epochs
- }
- pubsubQueueSize := 600
- psOpts := []pubsub.Option{
- pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign),
- pubsub.WithMessageIdFn(s.msgId),
- pubsub.WithNoAuthor(),
- pubsub.WithPeerOutboundQueueSize(pubsubQueueSize),
- pubsub.WithMaxMessageSize(int(s.cfg.NetworkConfig.GossipMaxSizeBellatrix)),
- pubsub.WithValidateQueueSize(pubsubQueueSize),
- pubsub.WithPeerScore(scoreParams, thresholds),
- pubsub.WithGossipSubParams(pubsubGossipParam()),
- }
- return psOpts
-}
-
// This is just one of the examples from the libp2p repository.
func New(
ctx context.Context,
cfg *SentinelConfig,
- db kv.RoDB,
+ db persistence.RawBeaconBlockChain,
logger log.Logger,
) (*Sentinel, error) {
s := &Sentinel{
@@ -263,19 +212,16 @@ func New(
if err != nil {
return nil, err
}
- if s.metrics {
-
- str, err := rcmgrObs.NewStatsTraceReporter()
- if err != nil {
- return nil, err
- }
+ str, err := rcmgrObs.NewStatsTraceReporter()
+ if err != nil {
+ return nil, err
+ }
- rmgr, err := rcmgr.NewResourceManager(rcmgr.NewFixedLimiter(rcmgr.DefaultLimits.AutoScale()), rcmgr.WithTraceReporter(str))
- if err != nil {
- return nil, err
- }
- opts = append(opts, libp2p.ResourceManager(rmgr))
+ rmgr, err := rcmgr.NewResourceManager(rcmgr.NewFixedLimiter(rcmgr.DefaultLimits.AutoScale()), rcmgr.WithTraceReporter(str))
+ if err != nil {
+ return nil, err
}
+ opts = append(opts, libp2p.ResourceManager(rmgr))
gater, err := NewGater(cfg)
if err != nil {
@@ -288,11 +234,16 @@ func New(
if err != nil {
return nil, err
}
+ s.host = host
- s.handshaker = handshake.New(ctx, cfg.GenesisConfig, cfg.BeaconConfig, host)
+ s.peers = peers.NewPool()
- s.host = host
- s.peers = peers.NewManager(ctx, s.host)
+ mux := chi.NewRouter()
+ // mux := httpreqresp.NewRequestHandler(host)
+ mux.Get("/", httpreqresp.NewRequestHandler(host))
+ s.httpApi = mux
+
+ s.handshaker = handshake.New(ctx, cfg.GenesisConfig, cfg.BeaconConfig, s.httpApi)
pubsub.TimeCacheDuration = 550 * gossipSubHeartbeatInterval
s.pubsub, err = pubsub.NewGossipSub(s.ctx, s.host, s.pubsubOptions()...)
@@ -303,6 +254,10 @@ func New(
return s, nil
}
+func (s *Sentinel) ReqRespHandler() http.Handler {
+ return s.httpApi
+}
+
func (s *Sentinel) RecvGossip() <-chan *pubsub.Message {
return s.subManager.Recv()
}
@@ -323,6 +278,10 @@ func (s *Sentinel) Start() error {
// Configuring handshake
s.host.Network().Notify(&network.NotifyBundle{
ConnectedF: s.onConnection,
+ DisconnectedF: func(n network.Network, c network.Conn) {
+ peerId := c.RemotePeer()
+ s.peers.RemovePeer(peerId)
+ },
})
s.subManager = NewGossipManager(s.ctx)
@@ -347,14 +306,20 @@ func (s *Sentinel) HasTooManyPeers() bool {
}
func (s *Sentinel) GetPeersCount() int {
+ // sub := s.subManager.GetMatchingSubscription(string(BeaconBlockTopic))
+
+ // if sub == nil {
return len(s.host.Network().Peers())
+ // }
+
+ // return len(sub.topic.ListPeers())
}
func (s *Sentinel) Host() host.Host {
return s.host
}
-func (s *Sentinel) Peers() *peers.Manager {
+func (s *Sentinel) Peers() *peers.Pool {
return s.peers
}
@@ -366,10 +331,6 @@ func (s *Sentinel) Config() *SentinelConfig {
return s.cfg
}
-func (s *Sentinel) DB() kv.RoDB {
- return s.db
-}
-
func (s *Sentinel) Status() *cltypes.Status {
return s.handshaker.Status()
}
diff --git a/cmd/sentinel/sentinel/service/notifiers.go b/cl/sentinel/service/notifiers.go
similarity index 100%
rename from cmd/sentinel/sentinel/service/notifiers.go
rename to cl/sentinel/service/notifiers.go
diff --git a/cmd/sentinel/sentinel/service/service.go b/cl/sentinel/service/service.go
similarity index 57%
rename from cmd/sentinel/sentinel/service/service.go
rename to cl/sentinel/service/service.go
index 1550730cc8f..1c72d1fe4c6 100644
--- a/cmd/sentinel/sentinel/service/service.go
+++ b/cl/sentinel/service/service.go
@@ -1,21 +1,25 @@
package service
import (
+ "bytes"
"context"
"errors"
"fmt"
+ "io"
+ "net/http"
"strconv"
"strings"
"sync"
"time"
+ "github.com/ledgerwatch/erigon-lib/diagnostics"
+ "github.com/ledgerwatch/erigon/cl/sentinel"
+ "github.com/ledgerwatch/erigon/cl/sentinel/httpreqresp"
+
"github.com/ledgerwatch/erigon-lib/gointerfaces"
sentinelrpc "github.com/ledgerwatch/erigon-lib/gointerfaces/sentinel"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/utils"
- "github.com/ledgerwatch/erigon/cmd/sentinel/sentinel"
- "github.com/ledgerwatch/erigon/cmd/sentinel/sentinel/communication"
- "github.com/ledgerwatch/erigon/cmd/sentinel/sentinel/peers"
"github.com/ledgerwatch/log/v3"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/peer"
@@ -30,6 +34,8 @@ type SentinelServer struct {
mu sync.RWMutex
logger log.Logger
+
+ peerStatistics map[string]*diagnostics.PeerStatistics
}
func NewSentinelServer(ctx context.Context, sentinel *sentinel.Sentinel, logger log.Logger) *SentinelServer {
@@ -38,6 +44,7 @@ func NewSentinelServer(ctx context.Context, sentinel *sentinel.Sentinel, logger
ctx: ctx,
gossipNotifier: newGossipNotifier(),
logger: logger,
+ peerStatistics: make(map[string]*diagnostics.PeerStatistics),
}
}
@@ -56,24 +63,23 @@ func extractBlobSideCarIndex(topic string) int {
//BanPeer(context.Context, *Peer) (*EmptyMessage, error)
func (s *SentinelServer) BanPeer(_ context.Context, p *sentinelrpc.Peer) (*sentinelrpc.EmptyMessage, error) {
- s.mu.RLock()
- defer s.mu.RUnlock()
var pid peer.ID
if err := pid.UnmarshalText([]byte(p.Pid)); err != nil {
return nil, err
}
- s.sentinel.Peers().WithPeer(pid, func(peer *peers.Peer) {
- peer.Ban()
- })
+ s.sentinel.Peers().SetBanStatus(pid, true)
+ s.sentinel.Host().Peerstore().RemovePeer(pid)
+ s.sentinel.Host().Network().ClosePeer(pid)
return &sentinelrpc.EmptyMessage{}, nil
}
func (s *SentinelServer) PublishGossip(_ context.Context, msg *sentinelrpc.GossipData) (*sentinelrpc.EmptyMessage, error) {
- s.mu.RLock()
- defer s.mu.RUnlock()
manager := s.sentinel.GossipManager()
// Snappify payload before sending it to gossip
compressedData := utils.CompressSnappy(msg.Data)
+
+ s.trackPeerStatistics(msg.GetPeer().Pid, false, msg.Type.String(), "unknown", len(compressedData))
+
var subscription *sentinel.GossipSubscription
switch msg.Type {
@@ -146,77 +152,93 @@ func (s *SentinelServer) withTimeoutCtx(pctx context.Context, dur time.Duration)
return ctx, cn
}
-func (s *SentinelServer) SendRequest(pctx context.Context, req *sentinelrpc.RequestData) (*sentinelrpc.ResponseData, error) {
- s.mu.RLock()
- defer s.mu.RUnlock()
- retryReqInterval := time.NewTicker(200 * time.Millisecond)
- defer retryReqInterval.Stop()
- ctx, cn := s.withTimeoutCtx(pctx, 0)
- defer cn()
- doneCh := make(chan *sentinelrpc.ResponseData)
- // Try finding the data to our peers
- uniquePeers := map[peer.ID]struct{}{}
- requestPeer := func(peer *peers.Peer) {
- peer.MarkUsed()
- data, isError, err := communication.SendRequestRawToPeer(ctx, s.sentinel.Host(), req.Data, req.Topic, peer.ID())
- if err != nil {
- return
- }
- if isError > 3 {
- peer.Disconnect(fmt.Sprintf("invalid response, starting byte %d", isError))
- peer.Penalize()
- }
- if isError != 0 {
- return
- }
- ans := &sentinelrpc.ResponseData{
- Data: data,
- Error: isError != 0,
- Peer: &sentinelrpc.Peer{
- Pid: peer.ID().String(),
- },
+func (s *SentinelServer) requestPeer(ctx context.Context, pid peer.ID, req *sentinelrpc.RequestData) (*sentinelrpc.ResponseData, error) {
+ // prepare the http request
+ httpReq, err := http.NewRequest("GET", "http://service.internal/", bytes.NewBuffer(req.Data))
+ if err != nil {
+ return nil, err
+ }
+ // set the peer and topic we are requesting
+ httpReq.Header.Set("REQRESP-PEER-ID", pid.String())
+ httpReq.Header.Set("REQRESP-TOPIC", req.Topic)
+ // for now this can't actually error. in the future, it can due to a network error
+ resp, err := httpreqresp.Do(s.sentinel.ReqRespHandler(), httpReq)
+ if err != nil {
+ // we remove, but dont ban the peer if we fail. this is because its probably not their fault, but maybe it is.
+ return nil, err
+ }
+ defer resp.Body.Close()
+ // some standard http error code parsing
+ if resp.StatusCode < 200 || resp.StatusCode > 399 {
+ errBody, _ := io.ReadAll(resp.Body)
+ errorMessage := fmt.Errorf("SentinelHttp: %s", string(errBody))
+ if resp.StatusCode >= 400 && resp.StatusCode < 500 {
+ s.sentinel.Peers().RemovePeer(pid)
+ s.sentinel.Host().Peerstore().RemovePeer(pid)
+ s.sentinel.Host().Network().ClosePeer(pid)
}
- select {
- case doneCh <- ans:
- peer.MarkReplied()
- retryReqInterval.Stop()
- return
- case <-ctx.Done():
- return
+ if resp.StatusCode >= 500 && resp.StatusCode < 600 {
+ s.sentinel.Host().Peerstore().RemovePeer(pid)
+ s.sentinel.Host().Network().ClosePeer(pid)
}
+ return nil, errorMessage
}
- go func() {
- for {
- pid, err := s.sentinel.RandomPeer(req.Topic)
- if err != nil {
- continue
- }
- if _, ok := uniquePeers[pid]; !ok {
- go s.sentinel.Peers().WithPeer(pid, requestPeer)
- uniquePeers[pid] = struct{}{}
- }
- select {
- case <-retryReqInterval.C:
- case <-ctx.Done():
- return
- }
- }
- }()
- select {
- case resp := <-doneCh:
- return resp, nil
- case <-ctx.Done():
- return &sentinelrpc.ResponseData{
- Data: []byte("request timeout"),
- Error: true,
- Peer: &sentinelrpc.Peer{Pid: ""},
- }, nil
+ // we should never get an invalid response to this. our responder should always set it on non-error response
+ isError, err := strconv.Atoi(resp.Header.Get("REQRESP-RESPONSE-CODE"))
+ if err != nil {
+ // TODO: think about how to properly handle this. should we? (or should we just assume no response is success?)
+ return nil, err
+ }
+ // known error codes, just remove the peer
+ if isError == 3 || isError == 2 {
+ s.sentinel.Host().Peerstore().RemovePeer(pid)
+ s.sentinel.Host().Network().ClosePeer(pid)
+ return nil, fmt.Errorf("peer error code: %d", isError)
+ }
+ // unknown error codes
+ if isError > 3 {
+ s.logger.Debug("peer returned unknown erro", "id", pid.String())
+ s.sentinel.Host().Peerstore().RemovePeer(pid)
+ s.sentinel.Host().Network().ClosePeer(pid)
+ return nil, fmt.Errorf("peer returned unknown error: %d", isError)
+ }
+ // read the body from the response
+ data, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ ans := &sentinelrpc.ResponseData{
+ Data: data,
+ Error: isError != 0,
+ Peer: &sentinelrpc.Peer{
+ Pid: pid.String(),
+ },
}
+ return ans, nil
+
+}
+
+func (s *SentinelServer) SendRequest(ctx context.Context, req *sentinelrpc.RequestData) (*sentinelrpc.ResponseData, error) {
+ // Try finding the data to our peers
+ // this is using return statements instead of continue, since it saves a few lines
+ // but me writing this comment has put them back.. oh no!!! anyways, returning true means we stop.
+ peer, done, err := s.sentinel.Peers().Request()
+ if err != nil {
+ return nil, err
+ }
+ defer done()
+ pid := peer.Id()
+
+ resp, err := s.requestPeer(ctx, pid, req)
+ if err != nil {
+ s.logger.Trace("[sentinel] peer gave us bad data", "peer", pid, "err", err)
+ return nil, err
+ }
+ return resp, nil
+
}
func (s *SentinelServer) SetStatus(_ context.Context, req *sentinelrpc.Status) (*sentinelrpc.EmptyMessage, error) {
- s.mu.RLock()
- defer s.mu.RUnlock()
// Send the request and get the data if we get an answer.
s.sentinel.SetStatus(&cltypes.Status{
ForkDigest: utils.Uint32ToBytes4(req.ForkDigest),
@@ -229,8 +251,6 @@ func (s *SentinelServer) SetStatus(_ context.Context, req *sentinelrpc.Status) (
}
func (s *SentinelServer) GetPeers(_ context.Context, _ *sentinelrpc.EmptyMessage) (*sentinelrpc.PeerCount, error) {
- s.mu.RLock()
- defer s.mu.RUnlock()
// Send the request and get the data if we get an answer.
return &sentinelrpc.PeerCount{
Amount: uint64(s.sentinel.GetPeersCount()),
@@ -256,6 +276,7 @@ func (s *SentinelServer) ListenToGossip() {
func (s *SentinelServer) handleGossipPacket(pkt *pubsub.Message) error {
var err error
s.logger.Trace("[Sentinel Gossip] Received Packet", "topic", pkt.Topic)
+
data := pkt.GetData()
// If we use snappy codec then decompress it accordingly.
@@ -269,6 +290,10 @@ func (s *SentinelServer) handleGossipPacket(pkt *pubsub.Message) error {
if err != nil {
return err
}
+
+ msgType, msgCap := parseTopic(pkt.GetTopic())
+ s.trackPeerStatistics(string(textPid), true, msgType, msgCap, len(data))
+
// Check to which gossip it belongs to.
if strings.Contains(*pkt.Topic, string(sentinel.BeaconBlockTopic)) {
s.gossipNotifier.notify(sentinelrpc.GossipType_BeaconBlockGossipType, data, string(textPid))
@@ -280,10 +305,61 @@ func (s *SentinelServer) handleGossipPacket(pkt *pubsub.Message) error {
s.gossipNotifier.notify(sentinelrpc.GossipType_ProposerSlashingGossipType, data, string(textPid))
} else if strings.Contains(*pkt.Topic, string(sentinel.AttesterSlashingTopic)) {
s.gossipNotifier.notify(sentinelrpc.GossipType_AttesterSlashingGossipType, data, string(textPid))
+ } else if strings.Contains(*pkt.Topic, string(sentinel.BlsToExecutionChangeTopic)) {
+ s.gossipNotifier.notify(sentinelrpc.GossipType_BlsToExecutionChangeGossipType, data, string(textPid))
} else if strings.Contains(*pkt.Topic, string(sentinel.BlobSidecarTopic)) {
// extract the index
-
s.gossipNotifier.notifyBlob(sentinelrpc.GossipType_BlobSidecarType, data, string(textPid), extractBlobSideCarIndex(*pkt.Topic))
}
return nil
}
+
+func (s *SentinelServer) GetPeersStatistics() map[string]*diagnostics.PeerStatistics {
+ stats := make(map[string]*diagnostics.PeerStatistics)
+ for k, v := range s.peerStatistics {
+ stats[k] = v
+ delete(s.peerStatistics, k)
+ }
+
+ return stats
+}
+
+func (s *SentinelServer) trackPeerStatistics(peerID string, inbound bool, msgType string, msgCap string, bytes int) {
+ if s.peerStatistics == nil {
+ s.peerStatistics = make(map[string]*diagnostics.PeerStatistics)
+ }
+
+ if _, exists := s.peerStatistics[peerID]; !exists {
+ s.peerStatistics[peerID] = &diagnostics.PeerStatistics{
+ CapBytesIn: make(map[string]uint64),
+ CapBytesOut: make(map[string]uint64),
+ TypeBytesIn: make(map[string]uint64),
+ TypeBytesOut: make(map[string]uint64),
+ }
+ }
+
+ stats := s.peerStatistics[peerID]
+
+ if inbound {
+ stats.BytesIn += uint64(bytes)
+ stats.CapBytesIn[msgCap] += uint64(bytes)
+ stats.TypeBytesIn[msgType] += uint64(bytes)
+ } else {
+ stats.BytesOut += uint64(bytes)
+ stats.CapBytesOut[msgCap] += uint64(bytes)
+ stats.TypeBytesOut[msgType] += uint64(bytes)
+ }
+}
+
+func parseTopic(input string) (string, string) {
+ parts := strings.Split(input, "/")
+
+ if len(parts) < 4 {
+ return "unknown", "unknown"
+ }
+
+ capability := parts[1]
+ topick := parts[3]
+
+ return capability, topick
+}
diff --git a/cmd/sentinel/sentinel/service/start.go b/cl/sentinel/service/start.go
similarity index 61%
rename from cmd/sentinel/sentinel/service/start.go
rename to cl/sentinel/service/start.go
index 533eeeef494..efcd95ac77a 100644
--- a/cmd/sentinel/sentinel/service/start.go
+++ b/cl/sentinel/service/start.go
@@ -2,30 +2,25 @@ package service
import (
"context"
- "fmt"
"net"
- "time"
+ "github.com/ledgerwatch/erigon/cl/sentinel"
+
+ "github.com/ledgerwatch/erigon-lib/direct"
sentinelrpc "github.com/ledgerwatch/erigon-lib/gointerfaces/sentinel"
- "github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon/cl/cltypes"
- "github.com/ledgerwatch/erigon/cmd/sentinel/sentinel"
+ "github.com/ledgerwatch/erigon/cl/persistence"
"github.com/ledgerwatch/log/v3"
- rcmgrObs "github.com/libp2p/go-libp2p/p2p/host/resource-manager/obs"
- "github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
- "google.golang.org/grpc/credentials/insecure"
)
-const maxMessageSize = 437323800
-
type ServerConfig struct {
Network string
Addr string
}
-func createSentinel(cfg *sentinel.SentinelConfig, db kv.RoDB, logger log.Logger) (*sentinel.Sentinel, error) {
+func createSentinel(cfg *sentinel.SentinelConfig, db persistence.RawBeaconBlockChain, logger log.Logger) (*sentinel.Sentinel, error) {
sent, err := sentinel.New(context.Background(), cfg, db, logger)
if err != nil {
return nil, err
@@ -36,9 +31,10 @@ func createSentinel(cfg *sentinel.SentinelConfig, db kv.RoDB, logger log.Logger)
gossipTopics := []sentinel.GossipTopic{
sentinel.BeaconBlockSsz,
//sentinel.BeaconAggregateAndProofSsz,
- //sentinel.VoluntaryExitSsz,
- //sentinel.ProposerSlashingSsz,
- //sentinel.AttesterSlashingSsz,
+ sentinel.VoluntaryExitSsz,
+ sentinel.ProposerSlashingSsz,
+ sentinel.AttesterSlashingSsz,
+ sentinel.BlsToExecutionChangeSsz,
}
// gossipTopics = append(gossipTopics, sentinel.GossipSidecarTopics(chain.MaxBlobsPerBlock)...)
@@ -61,46 +57,21 @@ func createSentinel(cfg *sentinel.SentinelConfig, db kv.RoDB, logger log.Logger)
return sent, nil
}
-func StartSentinelService(cfg *sentinel.SentinelConfig, db kv.RoDB, srvCfg *ServerConfig, creds credentials.TransportCredentials, initialStatus *cltypes.Status, logger log.Logger) (sentinelrpc.SentinelClient, error) {
+func StartSentinelService(cfg *sentinel.SentinelConfig, db persistence.RawBeaconBlockChain, srvCfg *ServerConfig, creds credentials.TransportCredentials, initialStatus *cltypes.Status, logger log.Logger) (sentinelrpc.SentinelClient, error) {
ctx := context.Background()
sent, err := createSentinel(cfg, db, logger)
if err != nil {
return nil, err
}
- rcmgrObs.MustRegisterWith(prometheus.DefaultRegisterer)
+ // rcmgrObs.MustRegisterWith(prometheus.DefaultRegisterer)
logger.Info("[Sentinel] Sentinel started", "enr", sent.String())
if initialStatus != nil {
sent.SetStatus(initialStatus)
}
server := NewSentinelServer(ctx, sent, logger)
- if creds == nil {
- creds = insecure.NewCredentials()
- }
-
go StartServe(server, srvCfg, creds)
- timeOutTimer := time.NewTimer(5 * time.Second)
-WaitingLoop:
- for {
- select {
- case <-timeOutTimer.C:
- return nil, fmt.Errorf("[Server] timeout beginning server")
- default:
- if _, err := server.GetPeers(ctx, &sentinelrpc.EmptyMessage{}); err == nil {
- break WaitingLoop
- }
- }
- }
-
- conn, err := grpc.DialContext(ctx,
- srvCfg.Addr,
- grpc.WithTransportCredentials(creds),
- grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMessageSize)),
- )
- if err != nil {
- return nil, err
- }
- return sentinelrpc.NewSentinelClient(conn), nil
+ return direct.NewSentinelClientDirect(server), nil
}
func StartServe(server *SentinelServer, srvCfg *ServerConfig, creds credentials.TransportCredentials) {
diff --git a/cmd/sentinel/sentinel/utils.go b/cl/sentinel/utils.go
similarity index 61%
rename from cmd/sentinel/sentinel/utils.go
rename to cl/sentinel/utils.go
index 7b01e00f9aa..e3fdd9cd61c 100644
--- a/cmd/sentinel/sentinel/utils.go
+++ b/cl/sentinel/utils.go
@@ -1,29 +1,24 @@
-/*
- Copyright 2022 Erigon-Lightclient contributors
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
+// Copyright 2022 Erigon-Lightclient contributors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// package sentinel
package sentinel
import (
"crypto/ecdsa"
- "crypto/rand"
"fmt"
- "math/big"
"net"
- "strings"
"time"
"github.com/btcsuite/btcd/btcec/v2"
- "github.com/ledgerwatch/erigon/cmd/sentinel/sentinel/peers"
"github.com/ledgerwatch/erigon/p2p/enode"
"github.com/ledgerwatch/log/v3"
"github.com/libp2p/go-libp2p/core/crypto"
@@ -107,61 +102,6 @@ func convertToMultiAddr(nodes []*enode.Node) []multiaddr.Multiaddr {
var shuffleSource = randutil.NewMathRandomGenerator()
-// will iterate onto randoms nodes until our sentinel connects to one
-func connectToRandomPeer(s *Sentinel, topic string) (peerInfo peer.ID, err error) {
- var sub *GossipSubscription
- for t, currSub := range s.subManager.subscriptions {
- if strings.Contains(t, topic) {
- sub = currSub
- }
- }
-
- if sub == nil {
- return peer.ID(""), fmt.Errorf("no peers")
- }
-
- validPeerList := s.Host().Network().Peers()
- //validPeerList := sub.topic.ListPeers()
- if len(validPeerList) == 0 {
- return peer.ID(""), fmt.Errorf("no peers")
- }
- for i := range validPeerList {
- j := shuffleSource.Intn(i + 1)
- validPeerList[i], validPeerList[j] = validPeerList[j], validPeerList[i]
- }
-
- connectedPeer := false
- maxTries := peers.DefaultMaxPeers
- tries := 0
- for !connectedPeer {
- if tries >= maxTries {
- break
- }
- tries++
- index := int64(0)
- if len(validPeerList) > 1 {
- n, err := rand.Int(rand.Reader, big.NewInt(int64(len(validPeerList)-1)))
- if err != nil {
- panic(err)
- }
- index = n.Int64()
- }
- available := false
- s.peers.TryPeer(validPeerList[index], func(peer *peers.Peer, ok bool) {
- if !ok {
- return
- }
- available = peer.IsAvailable()
- })
- if !available {
- continue
- }
- return validPeerList[index], nil
- }
-
- return peer.ID(""), fmt.Errorf("failed to connect to peer")
-}
-
func (s *Sentinel) oneSlotDuration() time.Duration {
return time.Duration(s.cfg.BeaconConfig.SecondsPerSlot) * time.Second
}
diff --git a/cmd/sentinel/sentinel/utils_test.go b/cl/sentinel/utils_test.go
similarity index 64%
rename from cmd/sentinel/sentinel/utils_test.go
rename to cl/sentinel/utils_test.go
index 10e60588b64..106fdd9a7a7 100644
--- a/cmd/sentinel/sentinel/utils_test.go
+++ b/cl/sentinel/utils_test.go
@@ -4,9 +4,6 @@ import (
"encoding/hex"
"testing"
- "github.com/ledgerwatch/erigon/p2p/enode"
- "github.com/ledgerwatch/erigon/p2p/enr"
- "github.com/ledgerwatch/erigon/rlp"
"github.com/libp2p/go-libp2p/core/peer"
)
@@ -63,32 +60,33 @@ func TestMultiAddressBuilderWithID(t *testing.T) {
}
}
-func TestConvertToMultiAddr(t *testing.T) {
- var r enr.Record
- if err := rlp.DecodeBytes(pyRecord, &r); err != nil {
- t.Fatalf("can't decode: %v", err)
- }
- n, err := enode.New(enode.ValidSchemes, &r)
- if err != nil {
- t.Fatalf("cannot create new node: %v", err)
- }
-
- testCases := []struct {
- nodes []*enode.Node
- expected []string
- }{
- {
- nodes: []*enode.Node{n},
- expected: []string{"/ip4/127.0.0.1/tcp/0/p2p/16Uiu2HAmSH2XVgZqYHWucap5kuPzLnt2TsNQkoppVxB5eJGvaXwm"},
- },
- }
-
- for _, testCase := range testCases {
- multiAddrs := convertToMultiAddr(testCase.nodes)
- for i, multiAddr := range multiAddrs {
- if multiAddr.String() != testCase.expected[i] {
- t.Errorf("for test case: %d, expected: %s, got: %s", i, testCase.expected[i], multiAddr)
- }
- }
- }
-}
+// TODO: reimplement this test with the new erigon-lib rlp decoder at some point
+//func TestConvertToMultiAddr(t *testing.T) {
+// var r enr.Record
+// if err := rlp.DecodeBytes(pyRecord, &r); err != nil {
+// t.Fatalf("can't decode: %v", err)
+// }
+// n, err := enode.New(enode.ValidSchemes, &r)
+// if err != nil {
+// t.Fatalf("cannot create new node: %v", err)
+// }
+//
+// testCases := []struct {
+// nodes []*enode.Node
+// expected []string
+// }{
+// {
+// nodes: []*enode.Node{n},
+// expected: []string{"/ip4/127.0.0.1/tcp/0/p2p/16Uiu2HAmSH2XVgZqYHWucap5kuPzLnt2TsNQkoppVxB5eJGvaXwm"},
+// },
+// }
+//
+// for _, testCase := range testCases {
+// multiAddrs := convertToMultiAddr(testCase.nodes)
+// for i, multiAddr := range multiAddrs {
+// if multiAddr.String() != testCase.expected[i] {
+// t.Errorf("for test case: %d, expected: %s, got: %s", i, testCase.expected[i], multiAddr)
+// }
+// }
+// }
+//}
diff --git a/cl/spectest/Makefile b/cl/spectest/Makefile
index 6d467b542ee..f4f5be19693 100644
--- a/cl/spectest/Makefile
+++ b/cl/spectest/Makefile
@@ -2,18 +2,18 @@
tests:
- git clone https://github.com/ethereum/consensus-spec-tests
- cd consensus-spec-tests && git lfs pull && cd ..
+ GIT_LFS_SKIP_SMUDGE=1 git clone https://github.com/ethereum/consensus-spec-tests
+ cd consensus-spec-tests && git checkout 70dc28b18c71f3ae080c02f51bd3421e0b60609b && git lfs pull --exclude=tests/general,tests/minimal && cd ..
mv consensus-spec-tests/tests .
rm -rf consensus-spec-tests
rm -rf tests/minimal
+ # not needed for now
+ rm -rf tests/mainnet/eip6110
+ # will not implement until i see it on a testnet
+ rm -rf tests/mainnet/deneb
clean:
- rm -rf junit.xml
- rm -rf test_report.html
+ rm -rf tests
mainnet:
- go test -tags=spectest -run=/mainnet
-
-run:
- go test -v
+ CGO_CFLAGS=-D__BLST_PORTABLE__ go test -tags=spectest -run=/mainnet -failfast -v --timeout 30m
diff --git a/cl/spectest/consensus_tests/appendix.go b/cl/spectest/consensus_tests/appendix.go
index da0a11cc30f..788ac359a65 100644
--- a/cl/spectest/consensus_tests/appendix.go
+++ b/cl/spectest/consensus_tests/appendix.go
@@ -85,8 +85,8 @@ func addSszTests() {
With("Attestation", getSSZStaticConsensusTest(&solid.Attestation{})).
With("AttestationData", getSSZStaticConsensusTest(solid.AttestationData{})).
With("AttesterSlashing", getSSZStaticConsensusTest(&cltypes.AttesterSlashing{})).
- With("BeaconBlock", getSSZStaticConsensusTest(&cltypes.BeaconBlock{})).
- With("BeaconBlockBody", getSSZStaticConsensusTest(&cltypes.BeaconBody{})).
+ With("BeaconBlock", getSSZStaticConsensusTest(cltypes.NewBeaconBlock(&clparams.MainnetBeaconConfig))).
+ With("BeaconBlockBody", getSSZStaticConsensusTest(cltypes.NewBeaconBody(&clparams.MainnetBeaconConfig))).
With("BeaconBlockHeader", getSSZStaticConsensusTest(&cltypes.BeaconBlockHeader{})).
With("BeaconState", getSSZStaticConsensusTest(state.New(&clparams.MainnetBeaconConfig))).
//With("BlobIdentifier", getSSZStaticConsensusTest(&cltypes.BlobIdentifier{})).
@@ -99,13 +99,13 @@ func addSszTests() {
// With("DepositMessage", getSSZStaticConsensusTest(&cltypes.DepositMessage{})).
// With("Eth1Block", getSSZStaticConsensusTest(&cltypes.Eth1Block{})).
With("Eth1Data", getSSZStaticConsensusTest(&cltypes.Eth1Data{})).
- With("ExecutionPayload", getSSZStaticConsensusTest(&cltypes.Eth1Block{})).
+ //With("ExecutionPayload", getSSZStaticConsensusTest(&cltypes.NewEth1Block(mainn))).
With("ExecutionPayloadHeader", getSSZStaticConsensusTest(&cltypes.Eth1Header{})).
With("Fork", getSSZStaticConsensusTest(&cltypes.Fork{})).
//With("ForkData", getSSZStaticConsensusTest(&cltypes.ForkData{})).
//With("HistoricalBatch", getSSZStaticConsensusTest(&cltypes.HistoricalBatch{})).
With("HistoricalSummary", getSSZStaticConsensusTest(&cltypes.HistoricalSummary{})).
- // With("IndexedAttestation", getSSZStaticConsensusTest(&cltypes.IndexedAttestation{})).
+ With("IndexedAttestation", getSSZStaticConsensusTest(&cltypes.IndexedAttestation{})).
// With("LightClientBootstrap", getSSZStaticConsensusTest(&cltypes.LightClientBootstrap{})). Unimplemented
// With("LightClientFinalityUpdate", getSSZStaticConsensusTest(&cltypes.LightClientFinalityUpdate{})). Unimplemented
// With("LightClientHeader", getSSZStaticConsensusTest(&cltypes.LightClientHeader{})). Unimplemented
@@ -114,8 +114,8 @@ func addSszTests() {
With("PendingAttestation", getSSZStaticConsensusTest(&solid.PendingAttestation{})).
// With("PowBlock", getSSZStaticConsensusTest(&cltypes.PowBlock{})). Unimplemented
With("ProposerSlashing", getSSZStaticConsensusTest(&cltypes.ProposerSlashing{})).
- // With("SignedAggregateAndProof", getSSZStaticConsensusTest(&cltypes.SignedAggregateAndProof{})).
- With("SignedBeaconBlock", getSSZStaticConsensusTest(&cltypes.SignedBeaconBlock{})).
+ With("SignedAggregateAndProof", getSSZStaticConsensusTest(&cltypes.SignedAggregateAndProof{})).
+ With("SignedBeaconBlock", getSSZStaticConsensusTest(cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig))).
With("SignedBeaconBlockHeader", getSSZStaticConsensusTest(&cltypes.SignedBeaconBlockHeader{})).
//With("SignedBlobSidecar", getSSZStaticConsensusTest(&cltypes.SignedBlobSideCar{})).
With("SignedBLSToExecutionChange", getSSZStaticConsensusTest(&cltypes.SignedBLSToExecutionChange{})).
diff --git a/cl/spectest/consensus_tests/bls.go b/cl/spectest/consensus_tests/bls.go
index 59a8da3fecc..2c1e6247fee 100644
--- a/cl/spectest/consensus_tests/bls.go
+++ b/cl/spectest/consensus_tests/bls.go
@@ -1,10 +1,9 @@
package consensus_tests
import (
+ "github.com/ledgerwatch/erigon/spectest"
"io/fs"
"testing"
-
- "github.com/ledgerwatch/erigon/spectest"
)
type BlsAggregateVerify struct {
diff --git a/cl/spectest/consensus_tests/epoch_processing.go b/cl/spectest/consensus_tests/epoch_processing.go
index 70dd1df2563..390c2ae4289 100644
--- a/cl/spectest/consensus_tests/epoch_processing.go
+++ b/cl/spectest/consensus_tests/epoch_processing.go
@@ -1,14 +1,15 @@
package consensus_tests
import (
+ "github.com/ledgerwatch/erigon/spectest"
"io/fs"
"os"
"testing"
"github.com/ledgerwatch/erigon/cl/abstract"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state"
"github.com/ledgerwatch/erigon/cl/transition/impl/eth2/statechange"
- "github.com/ledgerwatch/erigon/spectest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -65,11 +66,11 @@ var historicalRootsUpdateTest = NewEpochProcessing(func(s abstract.BeaconState)
})
var inactivityUpdateTest = NewEpochProcessing(func(s abstract.BeaconState) error {
- return statechange.ProcessInactivityScores(s)
+ return statechange.ProcessInactivityScores(s, state.EligibleValidatorsIndicies(s), statechange.GetUnslashedIndiciesSet(s))
})
var justificationFinalizationTest = NewEpochProcessing(func(s abstract.BeaconState) error {
- return statechange.ProcessJustificationBitsAndFinality(s)
+ return statechange.ProcessJustificationBitsAndFinality(s, nil)
})
var participationFlagUpdatesTest = NewEpochProcessing(func(s abstract.BeaconState) error {
@@ -90,7 +91,7 @@ var registryUpdatesTest = NewEpochProcessing(func(s abstract.BeaconState) error
})
var rewardsAndPenaltiesTest = NewEpochProcessing(func(s abstract.BeaconState) error {
- return statechange.ProcessRewardsAndPenalties(s)
+ return statechange.ProcessRewardsAndPenalties(s, state.EligibleValidatorsIndicies(s), statechange.GetUnslashedIndiciesSet(s))
})
var slashingsTest = NewEpochProcessing(func(s abstract.BeaconState) error {
diff --git a/cl/spectest/consensus_tests/finality.go b/cl/spectest/consensus_tests/finality.go
index 1c72fcfb059..9a1de019a58 100644
--- a/cl/spectest/consensus_tests/finality.go
+++ b/cl/spectest/consensus_tests/finality.go
@@ -2,11 +2,12 @@ package consensus_tests
import (
"fmt"
- "github.com/ledgerwatch/erigon/cl/transition/machine"
"io/fs"
"testing"
+ "github.com/ledgerwatch/erigon/cl/transition/machine"
"github.com/ledgerwatch/erigon/spectest"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/cl/spectest/consensus_tests/fork_choice.go b/cl/spectest/consensus_tests/fork_choice.go
index 64986bb83f4..ca39a83c042 100644
--- a/cl/spectest/consensus_tests/fork_choice.go
+++ b/cl/spectest/consensus_tests/fork_choice.go
@@ -1,17 +1,22 @@
package consensus_tests
import (
+ "context"
"fmt"
+ "github.com/ledgerwatch/erigon/spectest"
"io/fs"
"testing"
"github.com/ledgerwatch/erigon/cl/abstract"
+ "github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
"github.com/ledgerwatch/erigon/cl/phase1/forkchoice"
+ "github.com/ledgerwatch/erigon/cl/phase1/forkchoice/fork_graph"
+ "github.com/ledgerwatch/erigon/cl/pool"
+ "github.com/spf13/afero"
"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/cl/cltypes"
- "github.com/ledgerwatch/erigon/spectest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -63,7 +68,7 @@ func (f *ForkChoiceStep) GetValid() bool {
if f.Valid == nil {
return true
}
- return false
+ return *f.Valid
}
func (f *ForkChoiceStep) GetAttestation() string {
if f.Attestation == nil {
@@ -150,7 +155,7 @@ func (b *ForkChoice) Run(t *testing.T, root fs.FS, c spectest.TestCase) (err err
anchorState, err := spectest.ReadBeaconState(root, c.Version(), "anchor_state.ssz_snappy")
require.NoError(t, err)
- forkStore, err := forkchoice.NewForkChoiceStore(anchorState, nil, nil, false)
+ forkStore, err := forkchoice.NewForkChoiceStore(context.Background(), anchorState, nil, nil, pool.NewOperationsPool(&clparams.MainnetBeaconConfig), fork_graph.NewForkGraphDisk(anchorState, afero.NewMemMapFs()))
require.NoError(t, err)
var steps []ForkChoiceStep
@@ -167,27 +172,16 @@ func (b *ForkChoice) Run(t *testing.T, root fs.FS, c spectest.TestCase) (err err
data := &cltypes.AttesterSlashing{}
err := spectest.ReadSsz(root, c.Version(), step.GetAttesterSlashing()+".ssz_snappy", data)
require.NoError(t, err, stepstr)
- err = forkStore.OnAttesterSlashing(data)
+ err = forkStore.OnAttesterSlashing(data, false)
if step.GetValid() {
require.NoError(t, err, stepstr)
} else {
require.Error(t, err, stepstr)
}
case "on_merge_block":
- // on_merge_block is for testing things related to the ethereum "The Merge" event
- // this has already happened, so let's just pass these tests
return nil
- // blk := &cltypes.SignedBeaconBlock{}
- // err := spectest.ReadSsz(root, c.Version(), step.GetPowBlock()+".ssz_snappy", blk)
- // require.NoError(t, err, stepstr)
- // err = forkStore.OnBlock(blk, true, true)
- // if step.GetValid() {
- // require.NoError(t, err, stepstr)
- // } else {
- // require.Error(t, err, stepstr)
- // }
case "on_block":
- blk := &cltypes.SignedBeaconBlock{}
+ blk := cltypes.NewSignedBeaconBlock(anchorState.BeaconConfig())
err := spectest.ReadSsz(root, c.Version(), step.GetBlock()+".ssz_snappy", blk)
require.NoError(t, err, stepstr)
err = forkStore.OnBlock(blk, true, true)
diff --git a/cl/spectest/consensus_tests/forks.go b/cl/spectest/consensus_tests/forks.go
index 796673f5b3f..39d97f98165 100644
--- a/cl/spectest/consensus_tests/forks.go
+++ b/cl/spectest/consensus_tests/forks.go
@@ -2,12 +2,12 @@ package consensus_tests
import (
"fmt"
+ "github.com/ledgerwatch/erigon/spectest"
"io/fs"
"os"
"testing"
"github.com/ledgerwatch/erigon/cl/clparams"
- "github.com/ledgerwatch/erigon/spectest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/cl/spectest/consensus_tests/operations.go b/cl/spectest/consensus_tests/operations.go
index 1aaca37cc04..f2de0d6a784 100644
--- a/cl/spectest/consensus_tests/operations.go
+++ b/cl/spectest/consensus_tests/operations.go
@@ -2,14 +2,15 @@ package consensus_tests
import (
"fmt"
+ "github.com/ledgerwatch/erigon/spectest"
"io/fs"
"os"
"testing"
+ "github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
"github.com/ledgerwatch/erigon/cl/cltypes"
- "github.com/ledgerwatch/erigon/spectest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -124,7 +125,7 @@ func operationBlockHeaderHandler(t *testing.T, root fs.FS, c spectest.TestCase)
if err != nil && !expectedError {
return err
}
- block := &cltypes.BeaconBlock{}
+ block := cltypes.NewBeaconBlock(&clparams.MainnetBeaconConfig)
if err := spectest.ReadSszOld(root, block, c.Version(), blockFileName); err != nil {
return err
}
@@ -244,7 +245,7 @@ func operationWithdrawalHandler(t *testing.T, root fs.FS, c spectest.TestCase) e
if err != nil && !expectedError {
return err
}
- executionPayload := &cltypes.Eth1Block{}
+ executionPayload := cltypes.NewEth1Block(c.Version(), &clparams.MainnetBeaconConfig)
if err := spectest.ReadSszOld(root, executionPayload, c.Version(), executionPayloadFileName); err != nil {
return err
}
diff --git a/cl/spectest/consensus_tests/rewards.go b/cl/spectest/consensus_tests/rewards.go
index f22f02af4dd..15c3460d813 100644
--- a/cl/spectest/consensus_tests/rewards.go
+++ b/cl/spectest/consensus_tests/rewards.go
@@ -1,10 +1,9 @@
package consensus_tests
import (
+ "github.com/ledgerwatch/erigon/spectest"
"io/fs"
"testing"
-
- "github.com/ledgerwatch/erigon/spectest"
)
type RewardsCore struct {
diff --git a/cl/spectest/consensus_tests/sanity.go b/cl/spectest/consensus_tests/sanity.go
index 493f3ddcf4e..85f5d0c3b8b 100644
--- a/cl/spectest/consensus_tests/sanity.go
+++ b/cl/spectest/consensus_tests/sanity.go
@@ -1,13 +1,14 @@
package consensus_tests
import (
- "github.com/ledgerwatch/erigon/cl/transition/machine"
"io/fs"
"os"
"testing"
- "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/transition/machine"
"github.com/ledgerwatch/erigon/spectest"
+
+ "github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/cl/spectest/consensus_tests/shuffling.go b/cl/spectest/consensus_tests/shuffling.go
index c26711d3b8c..b7a0a9338b0 100644
--- a/cl/spectest/consensus_tests/shuffling.go
+++ b/cl/spectest/consensus_tests/shuffling.go
@@ -4,13 +4,14 @@ import (
"io/fs"
"testing"
+ "github.com/ledgerwatch/erigon/spectest"
+
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/phase1/core/state"
"github.com/ledgerwatch/erigon/cl/phase1/core/state/shuffling"
"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/cl/utils"
- "github.com/ledgerwatch/erigon/spectest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -29,7 +30,7 @@ func (b *ShufflingCore) Run(t *testing.T, root fs.FS, c spectest.TestCase) (err
}
s := state.New(&clparams.MainnetBeaconConfig)
- keccakOptimized := utils.OptimizedKeccak256NotThreadSafe()
+ keccakOptimized := utils.OptimizedSha256NotThreadSafe()
preInputs := shuffling.ComputeShuffledIndexPreInputs(s.BeaconConfig(), meta.Seed)
for idx, v := range meta.Mapping {
shuffledIdx, err := shuffling.ComputeShuffledIndex(s.BeaconConfig(), uint64(idx), uint64(meta.Count), meta.Seed, preInputs, keccakOptimized)
diff --git a/cl/spectest/consensus_tests/ssz_static.go b/cl/spectest/consensus_tests/ssz_static.go
index 57dd6d3cdb9..a1512837d44 100644
--- a/cl/spectest/consensus_tests/ssz_static.go
+++ b/cl/spectest/consensus_tests/ssz_static.go
@@ -1,9 +1,15 @@
package consensus_tests
import (
+ "bytes"
"io/fs"
"testing"
+ "github.com/ledgerwatch/erigon/spectest"
+
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/persistence/format/snapshot_format"
"github.com/ledgerwatch/erigon/cl/phase1/core/state"
libcommon "github.com/ledgerwatch/erigon-lib/common"
@@ -11,7 +17,6 @@ import (
"github.com/ledgerwatch/erigon-lib/types/ssz"
"github.com/ledgerwatch/erigon/cl/utils"
- "github.com/ledgerwatch/erigon/spectest"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v2"
)
@@ -44,6 +49,7 @@ func getSSZStaticConsensusTest[T unmarshalerMarshalerHashable](ref T) spectest.H
require.NoError(t, err)
encoded, err := utils.DecompressSnappy(snappyEncoded)
require.NoError(t, err)
+
if err := object.DecodeSSZ(encoded, int(c.Version())); err != nil && !isBeaconState {
return err
}
@@ -51,13 +57,31 @@ func getSSZStaticConsensusTest[T unmarshalerMarshalerHashable](ref T) spectest.H
require.NoError(t, err)
require.EqualValues(t, expectedRoot, haveRoot)
// Cannot test it without a config.
- // TODO: parse and use config
if isBeaconState {
return nil
}
haveEncoded, err := object.EncodeSSZ(nil)
require.NoError(t, err)
require.EqualValues(t, haveEncoded, encoded)
+ // Now let it do the encoding in snapshot format
+ if blk, ok := object.(*cltypes.SignedBeaconBlock); ok {
+ var b bytes.Buffer
+ _, err := snapshot_format.WriteBlockForSnapshot(&b, blk, nil)
+ require.NoError(t, err)
+ var br snapshot_format.MockBlockReader
+ if blk.Version() >= clparams.BellatrixVersion {
+ br = snapshot_format.MockBlockReader{Block: blk.Block.Body.ExecutionPayload}
+
+ }
+
+ blk2, err := snapshot_format.ReadBlockFromSnapshot(&b, &br, &clparams.MainnetBeaconConfig)
+ require.NoError(t, err)
+
+ haveRoot, err := blk2.HashSSZ()
+ require.NoError(t, err)
+ require.EqualValues(t, expectedRoot, haveRoot)
+ }
+
return nil
})
}
diff --git a/cl/spectest/consensus_tests/transition.go b/cl/spectest/consensus_tests/transition.go
index 105ab477fb9..8a6ac6cac78 100644
--- a/cl/spectest/consensus_tests/transition.go
+++ b/cl/spectest/consensus_tests/transition.go
@@ -3,12 +3,12 @@ package consensus_tests
import (
"fmt"
"github.com/ledgerwatch/erigon/cl/transition/machine"
+ "github.com/ledgerwatch/erigon/spectest"
"io/fs"
"testing"
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes"
- "github.com/ledgerwatch/erigon/spectest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/cl/spectest/tests_test.go b/cl/spectest/tests_test.go
index db4fe28c987..a556ae7b03e 100644
--- a/cl/spectest/tests_test.go
+++ b/cl/spectest/tests_test.go
@@ -4,11 +4,11 @@ import (
"os"
"testing"
+ "github.com/ledgerwatch/erigon/spectest"
+
"github.com/ledgerwatch/erigon/cl/transition"
"github.com/ledgerwatch/erigon/cl/spectest/consensus_tests"
-
- "github.com/ledgerwatch/erigon/spectest"
)
func Test(t *testing.T) {
diff --git a/cl/spectest/wmake.ps1 b/cl/spectest/wmake.ps1
new file mode 100644
index 00000000000..ecc6269a7bf
--- /dev/null
+++ b/cl/spectest/wmake.ps1
@@ -0,0 +1,37 @@
+# Clean Function
+function Clean {
+ Remove-Item -Recurse -Force -Path .\tests
+}
+
+# Tests Function
+function Tests {
+ $env:GIT_LFS_SKIP_SMUDGE = "1"
+ $gitCloneCmd = "git clone https://github.com/ethereum/consensus-spec-tests"
+ $gitCheckoutCmd = "cd consensus-spec-tests; git checkout 70dc28b18c71f3ae080c02f51bd3421e0b60609b; git lfs pull --exclude=tests/general,tests/minimal; cd .."
+
+ Invoke-Expression $gitCloneCmd
+ Invoke-Expression $gitCheckoutCmd
+
+ Move-Item -Path ".\consensus-spec-tests\tests" -Destination ".\" -Force
+ Remove-Item -Path ".\consensus-spec-tests" -Recurse -Force
+ Remove-Item -Path ".\tests\minimal" -Recurse -Force
+ Remove-Item -Path ".\tests\mainnet\eip6110" -Recurse -Force
+ Remove-Item -Path ".\tests\mainnet\deneb" -Recurse -Force
+}
+
+# Mainnet Function
+function Mainnet {
+ $env:CGO_CFLAGS = "-D__BLST_PORTABLE__"
+ go test -tags=spectest -run="/mainnet" -failfast -v
+}
+
+# Main Targets
+if ($MyInvocation.BoundParameters["clean"]) {
+ Clean
+}
+elseif ($MyInvocation.BoundParameters["tests"]) {
+ Tests
+} else {
+ Mainnet
+}
+
diff --git a/cl/ssz/decode.go b/cl/ssz/decode.go
index 1abaf33ede8..1308081adf7 100644
--- a/cl/ssz/decode.go
+++ b/cl/ssz/decode.go
@@ -37,12 +37,11 @@ types such as uint64, []byte, and objects that implement the SizedObjectSSZ inte
It handles both static (fixed size) and dynamic (variable size) objects based on their respective decoding methods and offsets.
*/
func UnmarshalSSZ(buf []byte, version int, schema ...interface{}) (err error) {
- defer func() {
- if err2 := recover(); err2 != nil {
- err = fmt.Errorf("panic while decoding: %v", err2)
- }
- }()
-
+ // defer func() {
+ // if err2 := recover(); err2 != nil {
+ // err = fmt.Errorf("panic while decoding: %v", err2)
+ // }
+ // }()
position := 0
offsets := []int{}
dynamicObjs := []SizedObjectSSZ{}
diff --git a/cl/ssz/encode.go b/cl/ssz/encode.go
index 2f851f1d208..38b57a8a11c 100644
--- a/cl/ssz/encode.go
+++ b/cl/ssz/encode.go
@@ -57,6 +57,7 @@ func MarshalSSZ(buf []byte, schema ...any) (dst []byte, err error) {
err = fmt.Errorf("panic while encoding: %v", err2)
}
}()
+
dst = buf
currentOffset := 0
dynamicComponents := []SizedObjectSSZ{}
diff --git a/cl/transition/impl/eth2/block_processing_test.go b/cl/transition/impl/eth2/block_processing_test.go
index bfae09874c9..7838ce55ae2 100644
--- a/cl/transition/impl/eth2/block_processing_test.go
+++ b/cl/transition/impl/eth2/block_processing_test.go
@@ -22,7 +22,7 @@ var capellaState []byte
func TestBlockProcessing(t *testing.T) {
s := state.New(&clparams.MainnetBeaconConfig)
require.NoError(t, utils.DecodeSSZSnappy(s, capellaState, int(clparams.CapellaVersion)))
- block := &cltypes.SignedBeaconBlock{}
+ block := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig)
require.NoError(t, utils.DecodeSSZSnappy(block, capellaBlock, int(clparams.CapellaVersion)))
require.NoError(t, transition.TransitionState(s, block, true)) // All checks already made in transition state
}
diff --git a/cl/transition/impl/eth2/operations.go b/cl/transition/impl/eth2/operations.go
index 673590a22ae..13335129d4b 100644
--- a/cl/transition/impl/eth2/operations.go
+++ b/cl/transition/impl/eth2/operations.go
@@ -4,12 +4,13 @@ import (
"bytes"
"errors"
"fmt"
- "github.com/ledgerwatch/erigon/cl/abstract"
"reflect"
"time"
+ "github.com/ledgerwatch/erigon-lib/metrics"
+ "github.com/ledgerwatch/erigon/cl/abstract"
+
"github.com/ledgerwatch/erigon/cl/transition/impl/eth2/statechange"
- "github.com/ledgerwatch/erigon/metrics/methelp"
"golang.org/x/exp/slices"
"github.com/ledgerwatch/erigon-lib/common"
@@ -38,16 +39,8 @@ func (I *impl) ProcessProposerSlashing(s abstract.BeaconState, propSlashing *clt
return fmt.Errorf("non-matching proposer indices proposer slashing: %d != %d", h1.ProposerIndex, h2.ProposerIndex)
}
- h1Root, err := h1.HashSSZ()
- if err != nil {
- return fmt.Errorf("unable to hash header1: %v", err)
- }
- h2Root, err := h2.HashSSZ()
- if err != nil {
- return fmt.Errorf("unable to hash header2: %v", err)
- }
- if h1Root == h2Root {
- return fmt.Errorf("propose slashing headers are the same: %v == %v", h1Root, h2Root)
+ if *h1 == *h2 {
+ return fmt.Errorf("proposee slashing headers are the same")
}
proposer, err := s.ValidatorForValidatorIndex(int(h1.ProposerIndex))
@@ -172,7 +165,7 @@ func (I *impl) ProcessDeposit(s abstract.BeaconState, deposit *cltypes.Deposit)
if err != nil {
return err
}
- signedRoot := utils.Keccak256(depositMessageRoot[:], domain)
+ signedRoot := utils.Sha256(depositMessageRoot[:], domain)
// Perform BLS verification and if successful noice.
valid, err := bls.Verify(deposit.Data.Signature[:], signedRoot[:], publicKey[:])
// Literally you can input it trash.
@@ -197,7 +190,7 @@ func (I *impl) ProcessDeposit(s abstract.BeaconState, deposit *cltypes.Deposit)
// ProcessVoluntaryExit takes a voluntary exit and applies state transition.
func (I *impl) ProcessVoluntaryExit(s abstract.BeaconState, signedVoluntaryExit *cltypes.SignedVoluntaryExit) error {
// Sanity checks so that we know it is good.
- voluntaryExit := signedVoluntaryExit.VolunaryExit
+ voluntaryExit := signedVoluntaryExit.VoluntaryExit
currentEpoch := state.Epoch(s)
validator, err := s.ValidatorForValidatorIndex(int(voluntaryExit.ValidatorIndex))
if err != nil {
@@ -241,7 +234,7 @@ func (I *impl) ProcessVoluntaryExit(s abstract.BeaconState, signedVoluntaryExit
// ProcessWithdrawals processes withdrawals by decreasing the balance of each validator
// and updating the next withdrawal index and validator index.
-func (I *impl) ProcessWithdrawals(s abstract.BeaconState, withdrawals *solid.ListSSZ[*types.Withdrawal]) error {
+func (I *impl) ProcessWithdrawals(s abstract.BeaconState, withdrawals *solid.ListSSZ[*cltypes.Withdrawal]) error {
// Get the list of withdrawals, the expected withdrawals (if performing full validation),
// and the beacon configuration.
beaconConfig := s.BeaconConfig()
@@ -253,8 +246,8 @@ func (I *impl) ProcessWithdrawals(s abstract.BeaconState, withdrawals *solid.Lis
if len(expectedWithdrawals) != withdrawals.Len() {
return fmt.Errorf("ProcessWithdrawals: expected %d withdrawals, but got %d", len(expectedWithdrawals), withdrawals.Len())
}
- if err := solid.RangeErr[*types.Withdrawal](withdrawals, func(i int, w *types.Withdrawal, _ int) error {
- if !expectedWithdrawals[i].Equal(w) {
+ if err := solid.RangeErr[*cltypes.Withdrawal](withdrawals, func(i int, w *cltypes.Withdrawal, _ int) error {
+ if *expectedWithdrawals[i] != *w {
return fmt.Errorf("ProcessWithdrawals: withdrawal %d does not match expected withdrawal", i)
}
return nil
@@ -263,7 +256,7 @@ func (I *impl) ProcessWithdrawals(s abstract.BeaconState, withdrawals *solid.Lis
}
}
- if err := solid.RangeErr[*types.Withdrawal](withdrawals, func(_ int, w *types.Withdrawal, _ int) error {
+ if err := solid.RangeErr[*cltypes.Withdrawal](withdrawals, func(_ int, w *cltypes.Withdrawal, _ int) error {
if err := state.DecreaseBalance(s, w.Validator, w.Amount); err != nil {
return err
}
@@ -327,7 +320,7 @@ func (I *impl) ProcessSyncAggregate(s abstract.BeaconState, sync *cltypes.SyncAg
if err != nil {
return err
}
- msg := utils.Keccak256(blockRoot[:], domain)
+ msg := utils.Sha256(blockRoot[:], domain)
isValid, err := bls.VerifyAggregate(sync.SyncCommiteeSignature[:], msg[:], votedKeys)
if err != nil {
return err
@@ -372,7 +365,7 @@ func processSyncAggregate(s abstract.BeaconState, sync *cltypes.SyncAggregate) (
vIdx, exists := s.ValidatorIndexByPubkey(committeeKeys[currPubKeyIndex])
// Impossible scenario.
if !exists {
- return nil, errors.New("validator public key does not exist in state")
+ return nil, fmt.Errorf("validator public key does not exist in state: %x", committeeKeys[currPubKeyIndex])
}
if syncAggregateBits[i]&byte(bit) > 0 {
votedKeys = append(votedKeys, committeeKeys[currPubKeyIndex][:])
@@ -411,7 +404,7 @@ func (I *impl) ProcessBlsToExecutionChange(s abstract.BeaconState, signedChange
}
// Check the validator's withdrawal credentials against the provided message.
- hashedFrom := utils.Keccak256(change.From[:])
+ hashedFrom := utils.Sha256(change.From[:])
if !bytes.Equal(hashedFrom[1:], wc[1:]) {
return fmt.Errorf("invalid withdrawal credentials")
}
@@ -479,7 +472,7 @@ func (I *impl) VerifyKzgCommitmentsAgainstTransactions(transactions *solid.Trans
func (I *impl) ProcessAttestations(s abstract.BeaconState, attestations *solid.ListSSZ[*solid.Attestation]) error {
attestingIndiciesSet := make([][]uint64, attestations.Len())
- h := methelp.NewHistTimer("beacon_process_attestations")
+ h := metrics.NewHistTimer("beacon_process_attestations")
baseRewardPerIncrement := s.BaseRewardPerIncrement()
c := h.Tag("attestation_step", "process")
@@ -518,10 +511,10 @@ func processAttestationPostAltair(s abstract.BeaconState, attestation *solid.Att
stateSlot := s.Slot()
beaconConfig := s.BeaconConfig()
- h := methelp.NewHistTimer("beacon_process_attestation_post_altair")
+ h := metrics.NewHistTimer("beacon_process_attestation_post_altair")
c := h.Tag("step", "get_participation_flag")
- participationFlagsIndicies, err := s.GetAttestationParticipationFlagIndicies(attestation.AttestantionData(), stateSlot-data.Slot())
+ participationFlagsIndicies, err := s.GetAttestationParticipationFlagIndicies(data, stateSlot-data.Slot(), false)
if err != nil {
return nil, err
}
@@ -529,7 +522,7 @@ func processAttestationPostAltair(s abstract.BeaconState, attestation *solid.Att
c = h.Tag("step", "get_attesting_indices")
- attestingIndicies, err := s.GetAttestingIndicies(attestation.AttestantionData(), attestation.AggregationBits(), true)
+ attestingIndicies, err := s.GetAttestingIndicies(data, attestation.AggregationBits(), true)
if err != nil {
return nil, err
}
@@ -707,8 +700,12 @@ func processAttestation(s abstract.BeaconState, attestation *solid.Attestation,
func verifyAttestations(s abstract.BeaconState, attestations *solid.ListSSZ[*solid.Attestation], attestingIndicies [][]uint64) (bool, error) {
indexedAttestations := make([]*cltypes.IndexedAttestation, 0, attestations.Len())
+ commonBuffer := make([]byte, 8*2048)
attestations.Range(func(idx int, a *solid.Attestation, _ int) bool {
- indexedAttestations = append(indexedAttestations, state.GetIndexedAttestation(a, attestingIndicies[idx]))
+ idxAttestations := state.GetIndexedAttestation(a, attestingIndicies[idx])
+ idxAttestations.AttestingIndices.SetReusableHashBuffer(commonBuffer)
+ idxAttestations.HashSSZ()
+ indexedAttestations = append(indexedAttestations, idxAttestations)
return true
})
@@ -817,7 +814,7 @@ func (I *impl) ProcessRandao(s abstract.BeaconState, randao [96]byte, proposerIn
}
randaoMixes := s.GetRandaoMixes(epoch)
- randaoHash := utils.Keccak256(randao[:])
+ randaoHash := utils.Sha256(randao[:])
mix := [32]byte{}
for i := range mix {
mix[i] = randaoMixes[i] ^ randaoHash[i]
@@ -849,7 +846,7 @@ func (I *impl) ProcessSlots(s abstract.BeaconState, slot uint64) error {
beaconConfig := s.BeaconConfig()
sSlot := s.Slot()
if slot <= sSlot {
- return fmt.Errorf("new slot: %d not greater than s slot: %d", slot, sSlot)
+ return fmt.Errorf("new slot: %d not greater than current slot: %d", slot, sSlot)
}
// Process each slot.
for i := sSlot; i < slot; i++ {
@@ -857,15 +854,15 @@ func (I *impl) ProcessSlots(s abstract.BeaconState, slot uint64) error {
if err != nil {
return fmt.Errorf("unable to process slot transition: %v", err)
}
- // TODO(Someone): Add epoch transition.
+
if (sSlot+1)%beaconConfig.SlotsPerEpoch == 0 {
start := time.Now()
if err := statechange.ProcessEpoch(s); err != nil {
return err
}
- log.Debug("Processed new epoch successfully", "epoch", state.Epoch(s), "process_epoch_elpsed", time.Since(start))
+ log.Trace("Processed new epoch successfully", "epoch", state.Epoch(s), "process_epoch_elpsed", time.Since(start))
}
- // TODO: add logic to process epoch updates.
+
sSlot += 1
s.SetSlot(sSlot)
if sSlot%beaconConfig.SlotsPerEpoch != 0 {
diff --git a/cl/transition/impl/eth2/statechange/finalization_and_justification.go b/cl/transition/impl/eth2/statechange/finalization_and_justification.go
index 97844b569cf..b590ebd69e0 100644
--- a/cl/transition/impl/eth2/statechange/finalization_and_justification.go
+++ b/cl/transition/impl/eth2/statechange/finalization_and_justification.go
@@ -59,16 +59,15 @@ func weighJustificationAndFinalization(s abstract.BeaconState, previousEpochTarg
return nil
}
-func ProcessJustificationBitsAndFinality(s abstract.BeaconState) error {
+func ProcessJustificationBitsAndFinality(s abstract.BeaconState, unslashedParticipatingIndicies [][]bool) error {
currentEpoch := state.Epoch(s)
- previousEpoch := state.PreviousEpoch(s)
beaconConfig := s.BeaconConfig()
// Skip for first 2 epochs
if currentEpoch <= beaconConfig.GenesisEpoch+1 {
return nil
}
var previousTargetBalance, currentTargetBalance uint64
-
+ previousEpoch := state.PreviousEpoch(s)
if s.Version() == clparams.Phase0Version {
var err error
s.ForEachValidator(func(validator solid.Validator, idx, total int) bool {
@@ -99,18 +98,24 @@ func ProcessJustificationBitsAndFinality(s abstract.BeaconState) error {
}
} else {
// Use bitlists to determine finality.
- previousParticipation, currentParticipation := s.EpochParticipation(false), s.EpochParticipation(true)
+ currentParticipation, previousParticipation := s.EpochParticipation(true), s.EpochParticipation(false)
s.ForEachValidator(func(validator solid.Validator, i, total int) bool {
if validator.Slashed() {
return true
}
- if validator.Active(previousEpoch) &&
+ effectiveBalance := validator.EffectiveBalance()
+ if unslashedParticipatingIndicies != nil {
+ if unslashedParticipatingIndicies[beaconConfig.TimelyTargetFlagIndex][i] {
+ previousTargetBalance += effectiveBalance
+ }
+ } else if validator.Active(previousEpoch) &&
cltypes.ParticipationFlags(previousParticipation.Get(i)).HasFlag(int(beaconConfig.TimelyTargetFlagIndex)) {
- previousTargetBalance += validator.EffectiveBalance()
+ previousTargetBalance += effectiveBalance
}
+
if validator.Active(currentEpoch) &&
cltypes.ParticipationFlags(currentParticipation.Get(i)).HasFlag(int(beaconConfig.TimelyTargetFlagIndex)) {
- currentTargetBalance += validator.EffectiveBalance()
+ currentTargetBalance += effectiveBalance
}
return true
})
diff --git a/cl/transition/impl/eth2/statechange/process_epoch.go b/cl/transition/impl/eth2/statechange/process_epoch.go
index ffae063d98e..304a218ebc7 100644
--- a/cl/transition/impl/eth2/statechange/process_epoch.go
+++ b/cl/transition/impl/eth2/statechange/process_epoch.go
@@ -3,45 +3,83 @@ package statechange
import (
"github.com/ledgerwatch/erigon/cl/abstract"
"github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/cltypes/solid"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state"
)
+func GetUnslashedIndiciesSet(s abstract.BeaconState) [][]bool {
+ if s.Version() == clparams.Phase0Version {
+ return nil
+ }
+ weights := s.BeaconConfig().ParticipationWeights()
+ flagsUnslashedIndiciesSet := make([][]bool, len(weights))
+ for i := range weights {
+ flagsUnslashedIndiciesSet[i] = make([]bool, s.ValidatorLength())
+ }
+ previousEpoch := state.PreviousEpoch(s)
+
+ s.ForEachValidator(func(validator solid.Validator, validatorIndex, total int) bool {
+ for i := range weights {
+ flagsUnslashedIndiciesSet[i][validatorIndex] = state.IsUnslashedParticipatingIndex(s, previousEpoch, uint64(validatorIndex), i)
+ }
+ return true
+ })
+ return flagsUnslashedIndiciesSet
+}
+
// ProcessEpoch process epoch transition.
-func ProcessEpoch(state abstract.BeaconState) error {
- if err := ProcessJustificationBitsAndFinality(state); err != nil {
+func ProcessEpoch(s abstract.BeaconState) error {
+ eligibleValidators := state.EligibleValidatorsIndicies(s)
+ // start := time.Now()
+
+ unslashedIndiciesSet := GetUnslashedIndiciesSet(s)
+ if err := ProcessJustificationBitsAndFinality(s, unslashedIndiciesSet); err != nil {
return err
}
- if state.Version() >= clparams.AltairVersion {
- if err := ProcessInactivityScores(state); err != nil {
+ // fmt.Println("ProcessJustificationBitsAndFinality", time.Since(start))
+ // start = time.Now()
+
+ if s.Version() >= clparams.AltairVersion {
+ if err := ProcessInactivityScores(s, eligibleValidators, unslashedIndiciesSet); err != nil {
return err
}
}
- if err := ProcessRewardsAndPenalties(state); err != nil {
+ // fmt.Println("ProcessInactivityScores", time.Since(start))
+ // start = time.Now()
+ if err := ProcessRewardsAndPenalties(s, eligibleValidators, unslashedIndiciesSet); err != nil {
return err
}
- if err := ProcessRegistryUpdates(state); err != nil {
+ // fmt.Println("ProcessRewardsAndPenalties", time.Since(start))
+ // start = time.Now()
+ if err := ProcessRegistryUpdates(s); err != nil {
return err
}
- if err := ProcessSlashings(state); err != nil {
+ // fmt.Println("ProcessRegistryUpdates", time.Since(start))
+ // start = time.Now()
+ if err := ProcessSlashings(s); err != nil {
return err
}
- ProcessEth1DataReset(state)
- if err := ProcessEffectiveBalanceUpdates(state); err != nil {
+ // fmt.Println("ProcessSlashings", time.Since(start))
+ ProcessEth1DataReset(s)
+ // start = time.Now()
+ if err := ProcessEffectiveBalanceUpdates(s); err != nil {
return err
}
- ProcessSlashingsReset(state)
- ProcessRandaoMixesReset(state)
- if err := ProcessHistoricalRootsUpdate(state); err != nil {
+ // fmt.Println("ProcessEffectiveBalanceUpdates", time.Since(start))
+ ProcessSlashingsReset(s)
+ ProcessRandaoMixesReset(s)
+ if err := ProcessHistoricalRootsUpdate(s); err != nil {
return err
}
- if state.Version() == clparams.Phase0Version {
- if err := ProcessParticipationRecordUpdates(state); err != nil {
+ if s.Version() == clparams.Phase0Version {
+ if err := ProcessParticipationRecordUpdates(s); err != nil {
return err
}
}
- if state.Version() >= clparams.AltairVersion {
- ProcessParticipationFlagUpdates(state)
- if err := ProcessSyncCommitteeUpdate(state); err != nil {
+ if s.Version() >= clparams.AltairVersion {
+ ProcessParticipationFlagUpdates(s)
+ if err := ProcessSyncCommitteeUpdate(s); err != nil {
return err
}
}
diff --git a/cl/transition/impl/eth2/statechange/process_epoch_test.go b/cl/transition/impl/eth2/statechange/process_epoch_test.go
index dba790e89b7..18c7377afff 100644
--- a/cl/transition/impl/eth2/statechange/process_epoch_test.go
+++ b/cl/transition/impl/eth2/statechange/process_epoch_test.go
@@ -2,9 +2,10 @@ package statechange
import (
_ "embed"
- "github.com/ledgerwatch/erigon/cl/abstract"
"testing"
+ "github.com/ledgerwatch/erigon/cl/abstract"
+
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/phase1/core/state"
"github.com/ledgerwatch/erigon/cl/utils"
@@ -90,7 +91,7 @@ var startingSlashingsResetState []byte
func TestProcessRewardsAndPenalties(t *testing.T) {
runEpochTransitionConsensusTest(t, startingRewardsPenaltyState, expectedRewardsPenaltyState, func(s abstract.BeaconState) error {
- return ProcessRewardsAndPenalties(s)
+ return ProcessRewardsAndPenalties(s, state.EligibleValidatorsIndicies(s), GetUnslashedIndiciesSet(s))
})
}
@@ -127,7 +128,7 @@ func TestProcessSlashings(t *testing.T) {
func TestProcessJustificationAndFinality(t *testing.T) {
runEpochTransitionConsensusTest(t, startingJustificationAndFinalityState, expectedJustificationAndFinalityState, func(s abstract.BeaconState) error {
- return ProcessJustificationBitsAndFinality(s)
+ return ProcessJustificationBitsAndFinality(s, nil)
})
}
@@ -160,6 +161,6 @@ var startingInactivityScoresState []byte
func TestInactivityScores(t *testing.T) {
runEpochTransitionConsensusTest(t, startingInactivityScoresState, expectedInactivityScoresState, func(s abstract.BeaconState) error {
- return ProcessInactivityScores(s)
+ return ProcessInactivityScores(s, state.EligibleValidatorsIndicies(s), GetUnslashedIndiciesSet(s))
})
}
diff --git a/cl/transition/impl/eth2/statechange/process_historical_roots_update.go b/cl/transition/impl/eth2/statechange/process_historical_roots_update.go
index c6aa216db85..0f48fe6bee0 100644
--- a/cl/transition/impl/eth2/statechange/process_historical_roots_update.go
+++ b/cl/transition/impl/eth2/statechange/process_historical_roots_update.go
@@ -37,7 +37,7 @@ func ProcessHistoricalRootsUpdate(s abstract.BeaconState) error {
StateSummaryRoot: stateRootsLeaf,
})
} else {
- historicalRoot := utils.Keccak256(blockRootsLeaf[:], stateRootsLeaf[:])
+ historicalRoot := utils.Sha256(blockRootsLeaf[:], stateRootsLeaf[:])
s.AddHistoricalRoot(historicalRoot)
}
diff --git a/cl/transition/impl/eth2/statechange/process_inactivity_scores.go b/cl/transition/impl/eth2/statechange/process_inactivity_scores.go
index d55250b6b31..bc81c5de789 100644
--- a/cl/transition/impl/eth2/statechange/process_inactivity_scores.go
+++ b/cl/transition/impl/eth2/statechange/process_inactivity_scores.go
@@ -7,18 +7,18 @@ import (
)
// ProcessInactivityScores will updates the inactivity registry of each validator.
-func ProcessInactivityScores(s abstract.BeaconState) error {
+func ProcessInactivityScores(s abstract.BeaconState, eligibleValidatorsIndicies []uint64, unslashedIndicies [][]bool) error {
if state.Epoch(s) == s.BeaconConfig().GenesisEpoch {
return nil
}
- previousEpoch := state.PreviousEpoch(s)
- for _, validatorIndex := range state.EligibleValidatorsIndicies(s) {
+
+ for _, validatorIndex := range eligibleValidatorsIndicies {
// retrieve validator inactivity score index.
score, err := s.ValidatorInactivityScore(int(validatorIndex))
if err != nil {
return err
}
- if state.IsUnslashedParticipatingIndex(s, previousEpoch, validatorIndex, int(s.BeaconConfig().TimelyTargetFlagIndex)) {
+ if unslashedIndicies[s.BeaconConfig().TimelyTargetFlagIndex][validatorIndex] {
score -= utils.Min64(1, score)
} else {
score += s.BeaconConfig().InactivityScoreBias
diff --git a/cl/transition/impl/eth2/statechange/process_registry_updates.go b/cl/transition/impl/eth2/statechange/process_registry_updates.go
index 64915c1d88b..c22c442266b 100644
--- a/cl/transition/impl/eth2/statechange/process_registry_updates.go
+++ b/cl/transition/impl/eth2/statechange/process_registry_updates.go
@@ -1,9 +1,10 @@
package statechange
import (
- "github.com/ledgerwatch/erigon/cl/abstract"
"sort"
+ "github.com/ledgerwatch/erigon/cl/abstract"
+
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
"github.com/ledgerwatch/erigon/cl/phase1/core/state"
@@ -15,26 +16,38 @@ func computeActivationExitEpoch(beaconConfig *clparams.BeaconChainConfig, epoch
return epoch + 1 + beaconConfig.MaxSeedLookahead
}
+type minimizeQueuedValidator struct {
+ validatorIndex uint64
+ activationEligibilityEpoch uint64
+}
+
// ProcessRegistyUpdates updates every epoch the activation status of validators. Specs at: https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#registry-updates.
func ProcessRegistryUpdates(s abstract.BeaconState) error {
beaconConfig := s.BeaconConfig()
currentEpoch := state.Epoch(s)
// start also initializing the activation queue.
- activationQueue := make([]uint64, 0)
+ activationQueue := make([]minimizeQueuedValidator, 0)
// Process activation eligibility and ejections.
var err error
s.ForEachValidator(func(validator solid.Validator, validatorIndex, total int) bool {
- if state.IsValidatorEligibleForActivationQueue(s, validator) {
+ activationEligibilityEpoch := validator.ActivationEligibilityEpoch()
+ effectivaBalance := validator.EffectiveBalance()
+ if activationEligibilityEpoch == s.BeaconConfig().FarFutureEpoch &&
+ validator.EffectiveBalance() == s.BeaconConfig().MaxEffectiveBalance {
s.SetActivationEligibilityEpochForValidatorAtIndex(validatorIndex, currentEpoch+1)
}
- if validator.Active(currentEpoch) && validator.EffectiveBalance() <= beaconConfig.EjectionBalance {
+ if validator.Active(currentEpoch) && effectivaBalance <= beaconConfig.EjectionBalance {
if err = s.InitiateValidatorExit(uint64(validatorIndex)); err != nil {
return false
}
}
// Insert in the activation queue in case.
- if state.IsValidatorEligibleForActivation(s, validator) {
- activationQueue = append(activationQueue, uint64(validatorIndex))
+ if activationEligibilityEpoch <= s.FinalizedCheckpoint().Epoch() &&
+ validator.ActivationEpoch() == s.BeaconConfig().FarFutureEpoch {
+ activationQueue = append(activationQueue, minimizeQueuedValidator{
+ validatorIndex: uint64(validatorIndex),
+ activationEligibilityEpoch: activationEligibilityEpoch,
+ })
}
return true
})
@@ -44,20 +57,18 @@ func ProcessRegistryUpdates(s abstract.BeaconState) error {
// order the queue accordingly.
sort.Slice(activationQueue, func(i, j int) bool {
// Order by the sequence of activation_eligibility_epoch setting and then index.
- validatori, _ := s.ValidatorForValidatorIndex(int(activationQueue[i]))
- validatorj, _ := s.ValidatorForValidatorIndex(int(activationQueue[j]))
- if validatori.ActivationEligibilityEpoch() != validatorj.ActivationEligibilityEpoch() {
- return validatori.ActivationEligibilityEpoch() < validatorj.ActivationEligibilityEpoch()
+ if activationQueue[i].activationEligibilityEpoch != activationQueue[j].activationEligibilityEpoch {
+ return activationQueue[i].activationEligibilityEpoch < activationQueue[j].activationEligibilityEpoch
}
- return activationQueue[i] < activationQueue[j]
+ return activationQueue[i].validatorIndex < activationQueue[j].validatorIndex
})
activationQueueLength := s.GetValidatorChurnLimit()
if len(activationQueue) > int(activationQueueLength) {
activationQueue = activationQueue[:activationQueueLength]
}
// Only process up to epoch limit.
- for _, validatorIndex := range activationQueue {
- s.SetActivationEpochForValidatorAtIndex(int(validatorIndex), computeActivationExitEpoch(beaconConfig, currentEpoch))
+ for _, entry := range activationQueue {
+ s.SetActivationEpochForValidatorAtIndex(int(entry.validatorIndex), computeActivationExitEpoch(beaconConfig, currentEpoch))
}
return nil
}
diff --git a/cl/transition/impl/eth2/statechange/process_rewards_and_penalties.go b/cl/transition/impl/eth2/statechange/process_rewards_and_penalties.go
index df6c5cd51bc..63788a2ec8b 100644
--- a/cl/transition/impl/eth2/statechange/process_rewards_and_penalties.go
+++ b/cl/transition/impl/eth2/statechange/process_rewards_and_penalties.go
@@ -7,21 +7,19 @@ import (
"github.com/ledgerwatch/erigon/cl/phase1/core/state"
)
-func processRewardsAndPenaltiesPostAltair(s abstract.BeaconState) (err error) {
+func processRewardsAndPenaltiesPostAltair(s abstract.BeaconState, eligibleValidators []uint64, flagsUnslashedIndiciesSet [][]bool) (err error) {
beaconConfig := s.BeaconConfig()
weights := beaconConfig.ParticipationWeights()
- eligibleValidators := state.EligibleValidatorsIndicies(s)
+
// Initialize variables
totalActiveBalance := s.GetTotalActiveBalance()
- previousEpoch := state.PreviousEpoch(s)
// Inactivity penalties denominator.
inactivityPenaltyDenominator := beaconConfig.InactivityScoreBias * beaconConfig.GetPenaltyQuotient(s.Version())
// Make buffer for flag indexes total balances.
flagsTotalBalances := make([]uint64, len(weights))
- // Compute all total balances for each enable unslashed validator indicies with all flags on.
s.ForEachValidator(func(validator solid.Validator, validatorIndex, total int) bool {
for i := range weights {
- if state.IsUnslashedParticipatingIndex(s, previousEpoch, uint64(validatorIndex), i) {
+ if flagsUnslashedIndiciesSet[i][validatorIndex] {
flagsTotalBalances[i] += validator.EffectiveBalance()
}
}
@@ -34,27 +32,24 @@ func processRewardsAndPenaltiesPostAltair(s abstract.BeaconState) (err error) {
}
rewardDenominator := (totalActiveBalance / beaconConfig.EffectiveBalanceIncrement) * beaconConfig.WeightDenominator
var baseReward uint64
+ inactivityLeaking := state.InactivityLeaking(s)
// Now process deltas and whats nots.
for _, index := range eligibleValidators {
baseReward, err = s.BaseReward(index)
if err != nil {
return
}
+ delta := int64(0)
for flagIdx := range weights {
- if state.IsUnslashedParticipatingIndex(s, previousEpoch, index, flagIdx) {
- if !state.InactivityLeaking(s) {
- rewardNumerator := baseReward * rewardMultipliers[flagIdx]
- if err := state.IncreaseBalance(s, index, rewardNumerator/rewardDenominator); err != nil {
- return err
- }
+ if flagsUnslashedIndiciesSet[flagIdx][index] {
+ if !inactivityLeaking {
+ delta += int64((baseReward * rewardMultipliers[flagIdx]) / rewardDenominator)
}
} else if flagIdx != int(beaconConfig.TimelyHeadFlagIndex) {
- if err := state.DecreaseBalance(s, index, baseReward*weights[flagIdx]/beaconConfig.WeightDenominator); err != nil {
- return err
- }
+ delta -= int64(baseReward * weights[flagIdx] / beaconConfig.WeightDenominator)
}
}
- if !state.IsUnslashedParticipatingIndex(s, previousEpoch, index, int(beaconConfig.TimelyTargetFlagIndex)) {
+ if !flagsUnslashedIndiciesSet[beaconConfig.TimelyTargetFlagIndex][index] {
inactivityScore, err := s.ValidatorInactivityScore(int(index))
if err != nil {
return err
@@ -64,19 +59,25 @@ func processRewardsAndPenaltiesPostAltair(s abstract.BeaconState) (err error) {
if err != nil {
return err
}
- state.DecreaseBalance(s, index, (effectiveBalance*inactivityScore)/inactivityPenaltyDenominator)
+ delta -= int64((effectiveBalance * inactivityScore) / inactivityPenaltyDenominator)
+ }
+ if delta > 0 {
+ if err := state.IncreaseBalance(s, index, uint64(delta)); err != nil {
+ return err
+ }
+ } else if err := state.DecreaseBalance(s, index, uint64(-delta)); err != nil {
+ return err
}
}
return
}
// processRewardsAndPenaltiesPhase0 process rewards and penalties for phase0 state.
-func processRewardsAndPenaltiesPhase0(s abstract.BeaconState) (err error) {
+func processRewardsAndPenaltiesPhase0(s abstract.BeaconState, eligibleValidators []uint64) (err error) {
beaconConfig := s.BeaconConfig()
if state.Epoch(s) == beaconConfig.GenesisEpoch {
return nil
}
- eligibleValidators := state.EligibleValidatorsIndicies(s)
// Initialize variables
rewardDenominator := s.GetTotalActiveBalance() / beaconConfig.EffectiveBalanceIncrement
// Make buffer for flag indexes totTargetal balances.
@@ -150,49 +151,34 @@ func processRewardsAndPenaltiesPhase0(s abstract.BeaconState) (err error) {
}
missed = 3 - attested
}
-
+ currentBalance, err := s.ValidatorBalance(int(index))
+ if err != nil {
+ return err
+ }
// If we attested then we reward the validator.
if state.InactivityLeaking(s) {
- if err := state.IncreaseBalance(s, index, baseReward*attested); err != nil {
- return err
- }
+ currentBalance += baseReward * attested
} else {
if !currentValidator.Slashed() && previousMatchingSourceAttester {
- rewardNumerator := baseReward * unslashedMatchingSourceBalanceIncrements
- if err := state.IncreaseBalance(s, index, rewardNumerator/rewardDenominator); err != nil {
- return err
- }
+ currentBalance += (baseReward * unslashedMatchingSourceBalanceIncrements) / rewardDenominator
}
if !currentValidator.Slashed() && previousMatchingTargetAttester {
- rewardNumerator := baseReward * unslashedMatchingTargetBalanceIncrements
- if err := state.IncreaseBalance(s, index, rewardNumerator/rewardDenominator); err != nil {
- return err
- }
+ currentBalance += (baseReward * unslashedMatchingTargetBalanceIncrements) / rewardDenominator
}
if !currentValidator.Slashed() && previousMatchingHeadAttester {
- rewardNumerator := baseReward * unslashedMatchingHeadBalanceIncrements
- if err := state.IncreaseBalance(s, index, rewardNumerator/rewardDenominator); err != nil {
- return err
- }
+ currentBalance += (baseReward * unslashedMatchingHeadBalanceIncrements) / rewardDenominator
}
}
// Process inactivity of the network as a whole finalities.
if state.InactivityLeaking(s) {
proposerReward := baseReward / beaconConfig.ProposerRewardQuotient
- // Neutralize rewards.
- if state.DecreaseBalance(s, index, beaconConfig.BaseRewardsPerEpoch*baseReward-proposerReward); err != nil {
- return err
- }
+ currentBalance -= beaconConfig.BaseRewardsPerEpoch*baseReward - proposerReward
if currentValidator.Slashed() || !previousMatchingTargetAttester {
- // Increase penalities linearly if network is leaking.
- if state.DecreaseBalance(s, index, currentValidator.EffectiveBalance()*state.FinalityDelay(s)/beaconConfig.InactivityPenaltyQuotient); err != nil {
- return err
- }
+ currentBalance -= currentValidator.EffectiveBalance() * state.FinalityDelay(s) / beaconConfig.InactivityPenaltyQuotient
}
}
-
- // For each missed duty we penalize the validator.
- if state.DecreaseBalance(s, index, baseReward*missed); err != nil {
+ currentBalance -= baseReward * missed
+ if err = s.SetValidatorBalance(int(index), currentBalance); err != nil {
return err
}
@@ -234,12 +220,12 @@ func processRewardsAndPenaltiesPhase0(s abstract.BeaconState) (err error) {
}
// ProcessRewardsAndPenalties applies rewards/penalties accumulated during previous epoch.
-func ProcessRewardsAndPenalties(s abstract.BeaconState) error {
+func ProcessRewardsAndPenalties(s abstract.BeaconState, eligibleValidators []uint64, unslashedIndicies [][]bool) error {
if state.Epoch(s) == s.BeaconConfig().GenesisEpoch {
return nil
}
if s.Version() == clparams.Phase0Version {
- return processRewardsAndPenaltiesPhase0(s)
+ return processRewardsAndPenaltiesPhase0(s, eligibleValidators)
}
- return processRewardsAndPenaltiesPostAltair(s)
+ return processRewardsAndPenaltiesPostAltair(s, eligibleValidators, unslashedIndicies)
}
diff --git a/cl/transition/impl/eth2/statechange/process_slashings.go b/cl/transition/impl/eth2/statechange/process_slashings.go
index 0d33f6a1bf9..745994ceada 100644
--- a/cl/transition/impl/eth2/statechange/process_slashings.go
+++ b/cl/transition/impl/eth2/statechange/process_slashings.go
@@ -44,43 +44,6 @@ func processSlashings(s abstract.BeaconState, slashingMultiplier uint64) error {
return nil
}
-func processSlashings2(s abstract.BeaconState, slashingMultiplier uint64) error {
- // Get the current epoch
- epoch := state.Epoch(s)
- // Get the total active balance
- totalBalance := s.GetTotalActiveBalance()
- // Calculate the total slashing amount
- // by summing all slashings and multiplying by the provided multiplier
- slashing := state.GetTotalSlashingAmount(s) * slashingMultiplier
- // Adjust the total slashing amount to be no greater than the total active balance
- if totalBalance < slashing {
- slashing = totalBalance
- }
- beaconConfig := s.BeaconConfig()
- // Apply penalties to validators who have been slashed and reached the withdrawable epoch
- var err error
- s.ForEachValidator(func(validator solid.Validator, i, total int) bool {
- if !validator.Slashed() || epoch+beaconConfig.EpochsPerSlashingsVector/2 != validator.WithdrawableEpoch() {
- return true
- }
- // Get the effective balance increment
- increment := beaconConfig.EffectiveBalanceIncrement
- // Calculate the penalty numerator by multiplying the validator's effective balance by the total slashing amount
- penaltyNumerator := validator.EffectiveBalance() / increment * slashing
- // Calculate the penalty by dividing the penalty numerator by the total balance and multiplying by the increment
- penalty := penaltyNumerator / totalBalance * increment
- // Decrease the validator's balance by the calculated penalty
- if err = state.DecreaseBalance(s, uint64(i), penalty); err != nil {
- return false
- }
- return true
- })
- if err != nil {
- return err
- }
- return nil
-}
-
func ProcessSlashings(state abstract.BeaconState) error {
// Depending on the version of the state, use different multipliers
switch state.Version() {
diff --git a/cl/transition/impl/eth2/statechange/process_sync_committee_update_test.go b/cl/transition/impl/eth2/statechange/process_sync_committee_update_test.go
index a7f4b62fe5f..79f646e1c19 100644
--- a/cl/transition/impl/eth2/statechange/process_sync_committee_update_test.go
+++ b/cl/transition/impl/eth2/statechange/process_sync_committee_update_test.go
@@ -2,13 +2,13 @@ package statechange_test
import (
"encoding/binary"
+ "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/cl/transition/impl/eth2/statechange"
"testing"
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
"github.com/ledgerwatch/erigon/cl/phase1/core/state"
- "github.com/ledgerwatch/erigon/common"
"github.com/stretchr/testify/require"
)
diff --git a/cl/transition/impl/eth2/utils.go b/cl/transition/impl/eth2/utils.go
index a23addfb70c..13078d7d076 100644
--- a/cl/transition/impl/eth2/utils.go
+++ b/cl/transition/impl/eth2/utils.go
@@ -15,7 +15,7 @@ const VERSIONED_HASH_VERSION_KZG byte = byte(1)
func kzgCommitmentToVersionedHash(kzgCommitment *cltypes.KZGCommitment) (libcommon.Hash, error) {
versionedHash := [32]byte{}
- kzgCommitmentHash := utils.Keccak256(kzgCommitment[:])
+ kzgCommitmentHash := utils.Sha256(kzgCommitment[:])
buf := append([]byte{}, VERSIONED_HASH_VERSION_KZG)
buf = append(buf, kzgCommitmentHash[1:]...)
@@ -61,7 +61,7 @@ func txPeekBlobVersionedHashes(txBytes []byte) []libcommon.Hash {
func computeSigningRootEpoch(epoch uint64, domain []byte) (libcommon.Hash, error) {
b := make([]byte, 32)
binary.LittleEndian.PutUint64(b, epoch)
- return utils.Keccak256(b, domain), nil
+ return utils.Sha256(b, domain), nil
}
// transitionSlot is called each time there is a new slot to process
diff --git a/cl/transition/impl/eth2/validation.go b/cl/transition/impl/eth2/validation.go
index 8ffd58e43f1..8930134b8f2 100644
--- a/cl/transition/impl/eth2/validation.go
+++ b/cl/transition/impl/eth2/validation.go
@@ -2,6 +2,7 @@ package eth2
import (
"fmt"
+
"github.com/Giulio2002/bls"
"github.com/ledgerwatch/erigon/cl/abstract"
"github.com/ledgerwatch/erigon/cl/cltypes"
diff --git a/cl/transition/impl/funcmap/impl.go b/cl/transition/impl/funcmap/impl.go
index ad9de7cd65c..073ef35fbce 100644
--- a/cl/transition/impl/funcmap/impl.go
+++ b/cl/transition/impl/funcmap/impl.go
@@ -5,7 +5,6 @@ import (
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
"github.com/ledgerwatch/erigon/cl/transition/machine"
- "github.com/ledgerwatch/erigon/core/types"
)
var _ machine.Interface = (*Impl)(nil)
@@ -15,7 +14,7 @@ type Impl struct {
FnVerifyTransition func(s abstract.BeaconState, block *cltypes.BeaconBlock) error
FnProcessSlots func(s abstract.BeaconState, slot uint64) error
FnProcessBlockHeader func(s abstract.BeaconState, block *cltypes.BeaconBlock) error
- FnProcessWithdrawals func(s abstract.BeaconState, withdrawals *solid.ListSSZ[*types.Withdrawal]) error
+ FnProcessWithdrawals func(s abstract.BeaconState, withdrawals *solid.ListSSZ[*cltypes.Withdrawal]) error
FnProcessExecutionPayload func(s abstract.BeaconState, payload *cltypes.Eth1Block) error
FnProcessRandao func(s abstract.BeaconState, randao [96]byte, proposerIndex uint64) error
FnProcessEth1Data func(state abstract.BeaconState, eth1Data *cltypes.Eth1Data) error
@@ -41,7 +40,7 @@ func (i Impl) ProcessBlockHeader(s abstract.BeaconState, block *cltypes.BeaconBl
return i.FnProcessBlockHeader(s, block)
}
-func (i Impl) ProcessWithdrawals(s abstract.BeaconState, withdrawals *solid.ListSSZ[*types.Withdrawal]) error {
+func (i Impl) ProcessWithdrawals(s abstract.BeaconState, withdrawals *solid.ListSSZ[*cltypes.Withdrawal]) error {
return i.FnProcessWithdrawals(s, withdrawals)
}
diff --git a/cl/transition/machine/block.go b/cl/transition/machine/block.go
index 78a573af6aa..1bfaa452a52 100644
--- a/cl/transition/machine/block.go
+++ b/cl/transition/machine/block.go
@@ -3,12 +3,13 @@ package machine
import (
"errors"
"fmt"
+
+ "github.com/ledgerwatch/erigon-lib/metrics"
"github.com/ledgerwatch/erigon/cl/abstract"
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
- "github.com/ledgerwatch/erigon/metrics/methelp"
)
// ProcessBlock processes a block with the block processor
@@ -19,7 +20,7 @@ func ProcessBlock(impl BlockProcessor, s abstract.BeaconState, signedBlock *clty
if signedBlock.Version() != version {
return fmt.Errorf("processBlock: wrong state version for block at slot %d", block.Slot)
}
- h := methelp.NewHistTimer("beacon_process_block")
+ h := metrics.NewHistTimer("beacon_process_block")
// Process the block header.
if err := impl.ProcessBlockHeader(s, block); err != nil {
return fmt.Errorf("processBlock: failed to process block header: %v", err)
diff --git a/cl/transition/machine/machine.go b/cl/transition/machine/machine.go
index 7fa122c2c96..408b62bdcb6 100644
--- a/cl/transition/machine/machine.go
+++ b/cl/transition/machine/machine.go
@@ -5,7 +5,6 @@ import (
"github.com/ledgerwatch/erigon/cl/abstract"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/cltypes/solid"
- "github.com/ledgerwatch/erigon/core/types"
)
type Interface interface {
@@ -30,7 +29,7 @@ type SlotProcessor interface {
type BlockHeaderProcessor interface {
ProcessBlockHeader(s abstract.BeaconState, block *cltypes.BeaconBlock) error
- ProcessWithdrawals(s abstract.BeaconState, withdrawals *solid.ListSSZ[*types.Withdrawal]) error
+ ProcessWithdrawals(s abstract.BeaconState, withdrawals *solid.ListSSZ[*cltypes.Withdrawal]) error
ProcessExecutionPayload(s abstract.BeaconState, payload *cltypes.Eth1Block) error
ProcessRandao(s abstract.BeaconState, randao [96]byte, proposerIndex uint64) error
ProcessEth1Data(state abstract.BeaconState, eth1Data *cltypes.Eth1Data) error
diff --git a/cl/utils/bytes.go b/cl/utils/bytes.go
index 417fe4b583c..387bad6f5de 100644
--- a/cl/utils/bytes.go
+++ b/cl/utils/bytes.go
@@ -15,6 +15,7 @@ package utils
import (
"encoding/binary"
+ "io"
"math/bits"
"github.com/ledgerwatch/erigon-lib/types/ssz"
@@ -103,3 +104,17 @@ func GetBitlistLength(b []byte) int {
// bit. Subtract this value by 1 to determine the length of the bitlist.
return 8*(len(b)-1) + msb - 1
}
+
+func ReadZSTD(r io.Reader, out []byte) (int, error) {
+ n := 0
+ var err error
+ for n != len(out) {
+ var m int
+ m, err = r.Read(out[n:])
+ n += m
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+}
diff --git a/cl/utils/bytes_test.go b/cl/utils/bytes_test.go
index d9f15a76124..9b61947b7f7 100644
--- a/cl/utils/bytes_test.go
+++ b/cl/utils/bytes_test.go
@@ -1,11 +1,11 @@
package utils_test
import (
+ "github.com/ledgerwatch/erigon-lib/common"
"testing"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/utils"
- "github.com/ledgerwatch/erigon/common"
"github.com/stretchr/testify/require"
)
diff --git a/cl/utils/crypto.go b/cl/utils/crypto.go
index 619246dfed6..e8481d0c3ee 100644
--- a/cl/utils/crypto.go
+++ b/cl/utils/crypto.go
@@ -27,8 +27,8 @@ var hasherPool = sync.Pool{
},
}
-// General purpose Keccak256
-func Keccak256(data []byte, extras ...[]byte) [32]byte {
+// General purpose Sha256
+func Sha256(data []byte, extras ...[]byte) [32]byte {
h, ok := hasherPool.Get().(hash.Hash)
if !ok {
h = sha256.New()
@@ -46,24 +46,9 @@ func Keccak256(data []byte, extras ...[]byte) [32]byte {
return b
}
-// Optimized Keccak256, avoid pool.put/pool.get, meant for intensive operations.
-func OptimizedKeccak256() HashFunc {
- h := sha256.New()
- return func(data []byte, extras ...[]byte) [32]byte {
- h.Reset()
- var b [32]byte
- h.Write(data)
- for _, extra := range extras {
- h.Write(extra)
- }
- h.Sum(b[:0])
- return b
- }
-}
-
-// Optimized Keccak256, avoid pool.put/pool.get, meant for intensive operations.
+// Optimized Sha256, avoid pool.put/pool.get, meant for intensive operations.
// this version is not thread safe
-func OptimizedKeccak256NotThreadSafe() HashFunc {
+func OptimizedSha256NotThreadSafe() HashFunc {
h := sha256.New()
var b [32]byte
return func(data []byte, extras ...[]byte) [32]byte {
diff --git a/cl/utils/crypto_test.go b/cl/utils/crypto_test.go
index 6d7a9f2c0c5..ec9b80681a3 100644
--- a/cl/utils/crypto_test.go
+++ b/cl/utils/crypto_test.go
@@ -13,12 +13,12 @@ func TestKeccak256(t *testing.T) {
[]byte("extra2"),
}
- expectedHash := utils.Keccak256(data, extras...)
- hashFunc := utils.OptimizedKeccak256()
+ expectedHash := utils.Sha256(data, extras...)
+ hashFunc := utils.OptimizedSha256NotThreadSafe()
expectedOptimizedHash := hashFunc(data, extras...)
// Test Keccak256 function
- hash := utils.Keccak256(data, extras...)
+ hash := utils.Sha256(data, extras...)
if hash != expectedHash {
t.Errorf("Keccak256 returned an incorrect hash. Expected: %x, Got: %x", expectedHash, hash)
}
@@ -37,12 +37,12 @@ func TestOptimizedKeccak256NotThreadSafe(t *testing.T) {
[]byte("extra2"),
}
- expectedHash := utils.Keccak256(data, extras...)
- hashFunc := utils.OptimizedKeccak256NotThreadSafe()
+ expectedHash := utils.Sha256(data, extras...)
+ hashFunc := utils.OptimizedSha256NotThreadSafe()
expectedOptimizedHash := hashFunc(data, extras...)
// Test OptimizedKeccak256NotThreadSafe function
- hash := utils.Keccak256(data, extras...)
+ hash := utils.Sha256(data, extras...)
if hash != expectedHash {
t.Errorf("Keccak256 returned an incorrect hash. Expected: %x, Got: %x", expectedHash, hash)
}
diff --git a/cl/utils/merkle.go b/cl/utils/merkle.go
index 2e63a59b095..a1d667975d9 100644
--- a/cl/utils/merkle.go
+++ b/cl/utils/merkle.go
@@ -9,9 +9,9 @@ func IsValidMerkleBranch(leaf libcommon.Hash, branch []libcommon.Hash, depth uin
value := leaf
for i := uint64(0); i < depth; i++ {
if (index / PowerOf2(i) % 2) == 1 {
- value = Keccak256(append(branch[i][:], value[:]...))
+ value = Sha256(append(branch[i][:], value[:]...))
} else {
- value = Keccak256(append(value[:], branch[i][:]...))
+ value = Sha256(append(value[:], branch[i][:]...))
}
}
return value == root
diff --git a/cl/utils/time.go b/cl/utils/time.go
index 1d129568fd4..b9ed288f794 100644
--- a/cl/utils/time.go
+++ b/cl/utils/time.go
@@ -15,6 +15,12 @@ package utils
import "time"
+// compute time of slot.
+func GetSlotTime(genesisTime uint64, secondsPerSlot uint64, slot uint64) time.Time {
+ slotTime := genesisTime + secondsPerSlot*slot
+ return time.Unix(int64(slotTime), 0)
+}
+
// compute current slot.
func GetCurrentSlot(genesisTime uint64, secondsPerSlot uint64) uint64 {
now := uint64(time.Now().Unix())
diff --git a/cmd/abigen/main.go b/cmd/abigen/main.go
index 6d361b48388..35d23bd1d15 100644
--- a/cmd/abigen/main.go
+++ b/cmd/abigen/main.go
@@ -25,6 +25,8 @@ import (
"regexp"
"strings"
+ "github.com/ledgerwatch/erigon-lib/common"
+
"github.com/ledgerwatch/log/v3"
"github.com/urfave/cli/v2"
@@ -180,7 +182,7 @@ func abigen(c *cli.Context) error {
} else {
// Generate the list of types to exclude from binding
exclude := make(map[string]bool)
- for _, kind := range utils.SplitAndTrim(c.String(excFlag.Name)) {
+ for _, kind := range common.CliString2Array(c.String(excFlag.Name)) {
exclude[strings.ToLower(kind)] = true
}
var err error
diff --git a/cmd/bootnode/main.go b/cmd/bootnode/main.go
index ee1bb44e62c..eedde266ad4 100644
--- a/cmd/bootnode/main.go
+++ b/cmd/bootnode/main.go
@@ -117,7 +117,10 @@ func main() {
printNotice(&nodeKey.PublicKey, *realaddr)
- db, err := enode.OpenDB("" /* path */, "" /* tmpDir */)
+ ctx, cancel := common.RootContext()
+ defer cancel()
+
+ db, err := enode.OpenDB(ctx, "" /* path */, "" /* tmpDir */)
if err != nil {
panic(err)
}
@@ -127,15 +130,12 @@ func main() {
NetRestrict: restrictList,
}
- ctx, cancel := common.RootContext()
- defer cancel()
-
if *runv5 {
- if _, err := discover.ListenV5(ctx, conn, ln, cfg); err != nil {
+ if _, err := discover.ListenV5(ctx, "any", conn, ln, cfg); err != nil {
utils.Fatalf("%v", err)
}
} else {
- if _, err := discover.ListenUDP(ctx, conn, ln, cfg); err != nil {
+ if _, err := discover.ListenUDP(ctx, "any", conn, ln, cfg); err != nil {
utils.Fatalf("%v", err)
}
}
diff --git a/cmd/capcli/cli.go b/cmd/capcli/cli.go
index eca589f2fa0..d6f239d2ca1 100644
--- a/cmd/capcli/cli.go
+++ b/cmd/capcli/cli.go
@@ -1,26 +1,81 @@
package main
import (
+ "context"
"fmt"
+ "math"
"os"
"strings"
+ "time"
+
+ "github.com/ledgerwatch/erigon/eth/ethconfig/estimate"
+ "github.com/ledgerwatch/erigon/turbo/debug"
+
+ lg "github.com/anacrolix/log"
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/direct"
+ downloader3 "github.com/ledgerwatch/erigon-lib/downloader"
+ "github.com/ledgerwatch/erigon-lib/metrics"
+ state2 "github.com/ledgerwatch/erigon-lib/state"
+
+ "github.com/c2h5oh/datasize"
+ "github.com/ledgerwatch/erigon-lib/chain/snapcfg"
+ "github.com/ledgerwatch/erigon-lib/downloader"
- "github.com/ledgerwatch/erigon-lib/gointerfaces/sentinel"
"github.com/ledgerwatch/erigon/cl/abstract"
+ "github.com/ledgerwatch/erigon/cl/antiquary"
"github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/clparams/initial_state"
"github.com/ledgerwatch/erigon/cl/cltypes"
+ persistence2 "github.com/ledgerwatch/erigon/cl/persistence"
+ "github.com/ledgerwatch/erigon/cmd/caplin/caplin1"
+ "github.com/ledgerwatch/erigon/eth/ethconfig"
+ "github.com/ledgerwatch/erigon/params"
+ "github.com/ledgerwatch/erigon/turbo/snapshotsync"
+ "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks"
+
+ "github.com/ledgerwatch/erigon-lib/common/datadir"
+ "github.com/ledgerwatch/erigon-lib/downloader/downloadercfg"
+ "github.com/ledgerwatch/erigon-lib/downloader/snaptype"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/mdbx"
+ "github.com/ledgerwatch/erigon/cl/persistence"
+ "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies"
+ "github.com/ledgerwatch/erigon/cl/persistence/db_config"
+ "github.com/ledgerwatch/erigon/cl/persistence/format/snapshot_format"
+ "github.com/ledgerwatch/erigon/cl/persistence/format/snapshot_format/getters"
+ state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state"
+ "github.com/ledgerwatch/erigon/cl/persistence/state/historical_states_reader"
+ "github.com/ledgerwatch/erigon/cl/phase1/core"
"github.com/ledgerwatch/erigon/cl/phase1/core/state"
+ "github.com/ledgerwatch/erigon/cl/phase1/network"
+ "github.com/ledgerwatch/erigon/cl/phase1/stages"
"github.com/ledgerwatch/erigon/cl/rpc"
+ "github.com/ledgerwatch/erigon/cl/sentinel/peers"
"github.com/ledgerwatch/erigon/cl/transition/impl/eth2"
"github.com/ledgerwatch/erigon/cl/transition/machine"
+ "github.com/ledgerwatch/erigon/cl/utils"
+
+ "github.com/jedib0t/go-pretty/v6/progress"
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/sentinel"
+ "github.com/ledgerwatch/log/v3"
"github.com/spf13/afero"
+ "golang.org/x/sync/errgroup"
"google.golang.org/grpc"
)
var CLI struct {
Migrate Migrate `cmd:"" help:"migrate from one state to another"`
- Blocks Blocks `cmd:"" help:"download blocks from gossip network"`
+ Blocks Blocks `cmd:"" help:"download blocks from reqresp network"`
+ Epochs Epochs `cmd:"" help:"download epochs from reqresp network"`
+
+ Chain Chain `cmd:"" help:"download the entire chain from reqresp network"`
+ DumpSnapshots DumpSnapshots `cmd:"" help:"generate caplin snapshots"`
+ CheckSnapshots CheckSnapshots `cmd:"" help:"check snapshot folder against content of chain data"`
+ DownloadSnapshots DownloadSnapshots `cmd:"" help:"download snapshots from webseed"`
+ LoopSnapshots LoopSnapshots `cmd:"" help:"loop over snapshots"`
+ RetrieveHistoricalState RetrieveHistoricalState `cmd:"" help:"retrieve historical state from db"`
}
type chainCfg struct {
@@ -33,23 +88,34 @@ func (c *chainCfg) configs() (beaconConfig *clparams.BeaconChainConfig, genesisC
}
type outputFolder struct {
- Output string `help:"where to output to, defaults to tmp directory" default:"/tmp" short:"o"`
+ Datadir string `help:"datadir" default:"~/.local/share/erigon" type:"existingdir"`
}
type withSentinel struct {
Sentinel string `help:"sentinel url" default:"localhost:7777"`
}
+type withPPROF struct {
+ Pprof bool `help:"enable pprof" default:"false"`
+}
+
+func (w *withPPROF) withProfile() {
+ if w.Pprof {
+ debug.StartPProf("localhost:6060", metrics.Setup("localhost:6060", log.Root()))
+ }
+}
+
func (w *withSentinel) connectSentinel() (sentinel.SentinelClient, error) {
- gconn, err := grpc.Dial(w.Sentinel, grpc.WithInsecure())
+ // YOLO message size
+ gconn, err := grpc.Dial(w.Sentinel, grpc.WithInsecure(), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt)))
if err != nil {
return nil, err
}
return sentinel.NewSentinelClient(gconn), nil
}
-func openFs(fsName string) (afero.Fs, error) {
- return afero.NewBasePathFs(afero.NewOsFs(), fsName), nil
+func openFs(fsName string, path string) (afero.Fs, error) {
+ return afero.NewBasePathFs(afero.NewBasePathFs(afero.NewOsFs(), fsName), path), nil
}
type Blocks struct {
@@ -72,7 +138,6 @@ func (b *Blocks) Run(ctx *Context) error {
}
beacon := rpc.NewBeaconRpcP2P(ctx, s, beaconConfig, genesisConfig)
-
err = beacon.SetStatus(
genesisConfig.GenesisValidatorRoot,
beaconConfig.GenesisEpoch,
@@ -81,43 +146,34 @@ func (b *Blocks) Run(ctx *Context) error {
if err != nil {
return err
}
+
+ if b.ToBlock < 0 {
+ b.ToBlock = int(utils.GetCurrentSlot(genesisConfig.GenesisTime, beaconConfig.SecondsPerSlot))
+ }
+
resp, _, err := beacon.SendBeaconBlocksByRangeReq(ctx, uint64(b.FromBlock), uint64(b.ToBlock))
if err != nil {
return fmt.Errorf("error get beacon blocks: %w", err)
}
- d, err := openFs(b.Output)
+ aferoFS, err := openFs(b.Datadir, "caplin/beacon")
if err != nil {
return err
}
+
+ db := mdbx.MustOpen("caplin/db")
+ if err != nil {
+ return err
+ }
+ defer db.Close()
+
+ tx, err := db.BeginRw(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ beaconDB := persistence2.NewBeaconChainDatabaseFilesystem(persistence2.NewAferoRawBlockSaver(aferoFS, beaconConfig), nil, beaconConfig)
for _, vv := range resp {
- v := vv
- err := func() error {
- fname := fmt.Sprintf("b%08d.ssz", v.Block.Slot)
- info, err := d.Stat(fname)
- if err == nil {
- if info.Size() > 0 {
- fmt.Fprintf(os.Stderr, "skipping %s since non 0 file\n", fname)
- }
- }
- bts, err := v.EncodeSSZ(nil)
- if err != nil {
- return err
- }
- fp, err := d.Create(fname)
- if err != nil {
- return err
- }
- defer fp.Close()
- err = fp.Truncate(0)
- if err != nil {
- return err
- }
- _, err = fp.Write(bts)
- if err != nil {
- return err
- }
- return nil
- }()
+ err := beaconDB.WriteBlock(ctx, tx, vv, true)
if err != nil {
return err
}
@@ -125,11 +181,133 @@ func (b *Blocks) Run(ctx *Context) error {
return nil
}
+type Epochs struct {
+ chainCfg
+ outputFolder
+ withSentinel
+
+ Concurrency int `help:"number of epochs to ask concurrently for" name:"concurrency" short:"c" default:"4"`
+
+ FromEpoch int `arg:"" name:"from" default:"0"`
+ ToEpoch int `arg:"" name:"to" default:"-1"`
+}
+
+func (b *Epochs) Run(cctx *Context) error {
+ ctx := cctx.Context
+ s, err := b.withSentinel.connectSentinel()
+ if err != nil {
+ return err
+ }
+ beaconConfig, genesisConfig, err := b.configs()
+ if err != nil {
+ return err
+ }
+
+ aferoFS, err := openFs(b.Datadir, "caplin/beacon")
+ if err != nil {
+ return err
+ }
+
+ beaconDB := persistence.NewBeaconChainDatabaseFilesystem(persistence.NewAferoRawBlockSaver(aferoFS, beaconConfig), nil, beaconConfig)
+
+ beacon := rpc.NewBeaconRpcP2P(ctx, s, beaconConfig, genesisConfig)
+ rpcSource := persistence2.NewBeaconRpcSource(beacon)
+
+ err = beacon.SetStatus(
+ genesisConfig.GenesisValidatorRoot,
+ beaconConfig.GenesisEpoch,
+ genesisConfig.GenesisValidatorRoot,
+ beaconConfig.GenesisSlot)
+ if err != nil {
+ return err
+ }
+
+ if b.ToEpoch < 0 {
+ b.ToEpoch = int(utils.GetCurrentEpoch(genesisConfig.GenesisTime, beaconConfig.SecondsPerSlot, beaconConfig.SlotsPerEpoch))
+ }
+
+ ctx, cn := context.WithCancel(ctx)
+ defer cn()
+ egg, ctx := errgroup.WithContext(ctx)
+
+ totalEpochs := (b.ToEpoch - b.FromEpoch + 1)
+ pw := progress.NewWriter()
+ pw.SetTrackerLength(50)
+ pw.SetMessageWidth(24)
+ pw.SetStyle(progress.StyleDefault)
+ pw.SetUpdateFrequency(time.Millisecond * 100)
+ pw.SetTrackerPosition(progress.PositionRight)
+ pw.Style().Visibility.Percentage = true
+ pw.Style().Visibility.Speed = true
+ pw.Style().Visibility.Value = true
+ pw.Style().Visibility.ETA = true
+ pw.Style().Visibility.ETAOverall = false
+ pw.Style().Visibility.Tracker = true
+ pw.Style().Visibility.TrackerOverall = false
+ pw.Style().Visibility.SpeedOverall = true
+ pw.Style().Options.Separator = ""
+
+ go pw.Render()
+
+ total := int64(uint64(totalEpochs) * beaconConfig.SlotsPerEpoch)
+ tk := &progress.Tracker{
+ Message: fmt.Sprintf("downloading %d blocks", total),
+ Total: total,
+ Units: progress.UnitsDefault,
+ }
+ pw.AppendTracker(tk)
+ tk.UpdateTotal(total)
+
+ egg.SetLimit(b.Concurrency)
+
+ db := mdbx.MustOpen("caplin/db")
+ if err != nil {
+ return err
+ }
+ defer db.Close()
+
+ tx, err := db.BeginRw(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ defer cn()
+ for i := b.FromEpoch; i <= b.ToEpoch; i = i + 1 {
+ ii := i
+ egg.Go(func() error {
+ var blocks *peers.PeeredObject[[]*cltypes.SignedBeaconBlock]
+ for {
+ blocks, err = rpcSource.GetRange(ctx, tx, uint64(ii)*beaconConfig.SlotsPerEpoch, beaconConfig.SlotsPerEpoch)
+ if err != nil {
+ log.Error("dl error", "err", err, "epoch", ii)
+ } else {
+ break
+ }
+ }
+ for _, v := range blocks.Data {
+ tk.Increment(1)
+ _, _ = beaconDB, v
+ err := beaconDB.WriteBlock(ctx, tx, v, true)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+ }
+ err = egg.Wait()
+ if err != nil {
+ return err
+ }
+ tk.MarkAsDone()
+ return tx.Commit()
+}
+
type Migrate struct {
outputFolder
chainCfg
- State string `arg:"" help:"state to start from (can be url to checkpoint or a file)"`
+ State string `arg:"" help:"state to start from (file or later url to checkpoint)"`
Blocks []string `arg:"" name:"blocks" help:"blocks to migrate, in order" type:"path"`
}
@@ -163,7 +341,11 @@ func (m *Migrate) getBlock(ctx *Context, block string) (*cltypes.SignedBeaconBlo
if err != nil {
return nil, err
}
- blk := &cltypes.SignedBeaconBlock{}
+ b, _, err := m.chainCfg.configs()
+ if err != nil {
+ return nil, err
+ }
+ blk := cltypes.NewSignedBeaconBlock(b)
err = blk.DecodeSSZ(bts, 0)
if err != nil {
return nil, err
@@ -193,3 +375,379 @@ func (m *Migrate) Run(ctx *Context) error {
}
return nil
}
+
+type Chain struct {
+ chainCfg
+ withSentinel
+ outputFolder
+}
+
+func (c *Chain) Run(ctx *Context) error {
+ s, err := c.withSentinel.connectSentinel()
+ if err != nil {
+ return err
+ }
+
+ genesisConfig, _, beaconConfig, networkType, err := clparams.GetConfigsByNetworkName(c.Chain)
+ if err != nil {
+ return err
+ }
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StderrHandler))
+ log.Info("Started chain download", "chain", c.Chain)
+
+ dirs := datadir.New(c.Datadir)
+ csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, dirs.Snap, log.Root())
+
+ rawDB, _ := persistence.AferoRawBeaconBlockChainFromOsPath(beaconConfig, dirs.CaplinHistory)
+ beaconDB, db, err := caplin1.OpenCaplinDatabase(ctx, db_config.DatabaseConfiguration{PruneDepth: math.MaxUint64}, beaconConfig, rawDB, dirs.CaplinIndexing, nil, false)
+ if err != nil {
+ return err
+ }
+ defer db.Close()
+
+ beacon := rpc.NewBeaconRpcP2P(ctx, s, beaconConfig, genesisConfig)
+
+ bs, err := core.RetrieveBeaconState(ctx, beaconConfig, genesisConfig, clparams.GetCheckpointSyncEndpoint(networkType))
+ if err != nil {
+ return err
+ }
+
+ bRoot, err := bs.BlockRoot()
+ if err != nil {
+ return err
+ }
+
+ if err := db.Update(ctx, func(tx kv.RwTx) error {
+ return beacon_indicies.WriteHighestFinalized(tx, bs.Slot())
+ }); err != nil {
+ return err
+ }
+
+ err = beacon.SetStatus(
+ genesisConfig.GenesisValidatorRoot,
+ beaconConfig.GenesisEpoch,
+ genesisConfig.GenesisValidatorRoot,
+ beaconConfig.GenesisSlot)
+ if err != nil {
+ return err
+ }
+
+ downloader := network.NewBackwardBeaconDownloader(ctx, beacon, db)
+ cfg := stages.StageHistoryReconstruction(downloader, antiquary.NewAntiquary(ctx, nil, nil, nil, dirs, nil, nil, nil, nil, nil, nil, false, nil), csn, beaconDB, db, nil, genesisConfig, beaconConfig, true, true, bRoot, bs.Slot(), "/tmp", log.Root())
+ return stages.SpawnStageHistoryDownload(cfg, ctx, log.Root())
+}
+
+type DumpSnapshots struct {
+ chainCfg
+ outputFolder
+}
+
+func (c *DumpSnapshots) Run(ctx *Context) error {
+ _, _, beaconConfig, _, err := clparams.GetConfigsByNetworkName(c.Chain)
+ if err != nil {
+ return err
+ }
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlDebug, log.StderrHandler))
+ log.Info("Started chain download", "chain", c.Chain)
+
+ dirs := datadir.New(c.Datadir)
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StderrHandler))
+
+ rawDB, _ := persistence.AferoRawBeaconBlockChainFromOsPath(beaconConfig, dirs.CaplinHistory)
+ beaconDB, db, err := caplin1.OpenCaplinDatabase(ctx, db_config.DatabaseConfiguration{PruneDepth: math.MaxUint64}, beaconConfig, rawDB, dirs.CaplinIndexing, nil, false)
+ if err != nil {
+ return err
+ }
+ var to uint64
+ db.View(ctx, func(tx kv.Tx) (err error) {
+ to, err = beacon_indicies.ReadHighestFinalized(tx)
+ return
+ })
+
+ return freezeblocks.DumpBeaconBlocks(ctx, db, beaconDB, 0, to, snaptype.Erigon2RecentMergeLimit, dirs.Tmp, dirs.Snap, estimate.CompressSnapshot.Workers(), log.LvlInfo, log.Root())
+}
+
+type CheckSnapshots struct {
+ chainCfg
+ outputFolder
+ withPPROF
+}
+
+func (c *CheckSnapshots) Run(ctx *Context) error {
+ _, _, beaconConfig, _, err := clparams.GetConfigsByNetworkName(c.Chain)
+ if err != nil {
+ return err
+ }
+ c.withProfile()
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlDebug, log.StderrHandler))
+ log.Info("Started the checking process", "chain", c.Chain)
+ dirs := datadir.New(c.Datadir)
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StderrHandler))
+
+ rawDB, _ := persistence.AferoRawBeaconBlockChainFromOsPath(beaconConfig, dirs.CaplinHistory)
+ _, db, err := caplin1.OpenCaplinDatabase(ctx, db_config.DatabaseConfiguration{PruneDepth: math.MaxUint64}, beaconConfig, rawDB, dirs.CaplinIndexing, nil, false)
+ if err != nil {
+ return err
+ }
+ var to uint64
+ tx, err := db.BeginRo(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ to, err = beacon_indicies.ReadHighestFinalized(tx)
+ if err != nil {
+ return err
+ }
+
+ to = (to / snaptype.Erigon2RecentMergeLimit) * snaptype.Erigon2RecentMergeLimit
+
+ csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, dirs.Snap, log.Root())
+ if err := csn.ReopenFolder(); err != nil {
+ return err
+ }
+
+ genesisHeader, _, _, err := csn.ReadHeader(0)
+ if err != nil {
+ return err
+ }
+ previousBlockRoot, err := genesisHeader.Header.HashSSZ()
+ if err != nil {
+ return err
+ }
+ previousBlockSlot := genesisHeader.Header.Slot
+ for i := uint64(1); i < to; i++ {
+ if utils.Min64(0, i-320) > previousBlockSlot {
+ return fmt.Errorf("snapshot %d has invalid slot", i)
+ }
+ // Checking of snapshots is a chain contiguity problem
+ currentHeader, _, _, err := csn.ReadHeader(i)
+ if err != nil {
+ return err
+ }
+ if currentHeader == nil {
+ continue
+ }
+ if currentHeader.Header.ParentRoot != previousBlockRoot {
+ return fmt.Errorf("snapshot %d has invalid parent root", i)
+ }
+ previousBlockRoot, err = currentHeader.Header.HashSSZ()
+ if err != nil {
+ return err
+ }
+ previousBlockSlot = currentHeader.Header.Slot
+ if i%2000 == 0 {
+ log.Info("Successfully checked", "slot", i)
+ }
+ }
+ return nil
+}
+
+type LoopSnapshots struct {
+ chainCfg
+ outputFolder
+ withPPROF
+
+ Slot uint64 `name:"slot" help:"slot to check"`
+}
+
+func (c *LoopSnapshots) Run(ctx *Context) error {
+ c.withProfile()
+
+ _, _, beaconConfig, _, err := clparams.GetConfigsByNetworkName(c.Chain)
+ if err != nil {
+ return err
+ }
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlDebug, log.StderrHandler))
+ log.Info("Started the checking process", "chain", c.Chain)
+
+ dirs := datadir.New(c.Datadir)
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StderrHandler))
+
+ rawDB, _ := persistence.AferoRawBeaconBlockChainFromOsPath(beaconConfig, dirs.CaplinHistory)
+ beaconDB, db, err := caplin1.OpenCaplinDatabase(ctx, db_config.DatabaseConfiguration{PruneDepth: math.MaxUint64}, beaconConfig, rawDB, dirs.CaplinIndexing, nil, false)
+ if err != nil {
+ return err
+ }
+ var to uint64
+ tx, err := db.BeginRo(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ to, err = beacon_indicies.ReadHighestFinalized(tx)
+ if err != nil {
+ return err
+ }
+
+ to = (to / snaptype.Erigon2RecentMergeLimit) * snaptype.Erigon2RecentMergeLimit
+
+ csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, dirs.Snap, log.Root())
+ if err := csn.ReopenFolder(); err != nil {
+ return err
+ }
+
+ br := &snapshot_format.MockBlockReader{}
+ snReader := freezeblocks.NewBeaconSnapshotReader(csn, br, beaconDB, beaconConfig)
+ start := time.Now()
+ for i := c.Slot; i < to; i++ {
+ snReader.ReadBlockBySlot(ctx, tx, i)
+ }
+ log.Info("Successfully checked", "slot", c.Slot, "time", time.Since(start))
+ return nil
+}
+
+type DownloadSnapshots struct {
+ chainCfg
+ outputFolder
+}
+
+func (d *DownloadSnapshots) Run(ctx *Context) error {
+ webSeeds := snapcfg.KnownWebseeds[d.Chain]
+ dirs := datadir.New(d.Datadir)
+
+ _, _, beaconConfig, _, err := clparams.GetConfigsByNetworkName(d.Chain)
+ if err != nil {
+ return err
+ }
+
+ rawDB, _ := persistence.AferoRawBeaconBlockChainFromOsPath(beaconConfig, dirs.CaplinHistory)
+
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlDebug, log.StderrHandler))
+
+ _, db, err := caplin1.OpenCaplinDatabase(ctx, db_config.DatabaseConfiguration{PruneDepth: math.MaxUint64}, beaconConfig, rawDB, dirs.CaplinIndexing, nil, false)
+ if err != nil {
+ return err
+ }
+ tx, err := db.BeginRw(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ downloadRate, err := datasize.ParseString("16mb")
+ if err != nil {
+ return err
+ }
+
+ uploadRate, err := datasize.ParseString("0mb")
+ if err != nil {
+ return err
+ }
+ version := "erigon: " + params.VersionWithCommit(params.GitCommit)
+
+ downloaderCfg, err := downloadercfg.New(dirs, version, lg.Info, downloadRate, uploadRate, 42069, 10, 3, nil, webSeeds, d.Chain)
+ if err != nil {
+ return err
+ }
+ downloaderCfg.DownloadTorrentFilesFromWebseed = true
+ downlo, err := downloader.New(ctx, downloaderCfg, dirs, log.Root(), log.LvlInfo, true)
+ if err != nil {
+ return err
+ }
+ s, err := state2.NewAggregatorV3(ctx, dirs.Tmp, dirs.Tmp, 200000, db, log.Root())
+ if err != nil {
+ return err
+ }
+ downlo.MainLoopInBackground(false)
+ bittorrentServer, err := downloader3.NewGrpcServer(downlo)
+ if err != nil {
+ return fmt.Errorf("new server: %w", err)
+ }
+ return snapshotsync.WaitForDownloader("CapCliDownloader", ctx, false, snapshotsync.OnlyCaplin, s, tx, freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.NewSnapCfg(false, false, false), dirs.Snap, log.Root()), freezeblocks.NewBorRoSnapshots(ethconfig.NewSnapCfg(false, false, false), dirs.Snap, log.Root())), params.ChainConfigByChainName(d.Chain), direct.NewDownloaderClient(bittorrentServer))
+}
+
+type RetrieveHistoricalState struct {
+ chainCfg
+ outputFolder
+ CompareFile string `help:"compare file" default:""`
+ CompareSlot uint64 `help:"compare slot" default:"0"`
+}
+
+func (r *RetrieveHistoricalState) Run(ctx *Context) error {
+ vt := state_accessors.NewStaticValidatorTable()
+ _, _, beaconConfig, t, err := clparams.GetConfigsByNetworkName(r.Chain)
+ if err != nil {
+ return err
+ }
+ dirs := datadir.New(r.Datadir)
+ rawDB, fs := persistence.AferoRawBeaconBlockChainFromOsPath(beaconConfig, dirs.CaplinHistory)
+ beaconDB, db, err := caplin1.OpenCaplinDatabase(ctx, db_config.DatabaseConfiguration{PruneDepth: math.MaxUint64}, beaconConfig, rawDB, dirs.CaplinIndexing, nil, false)
+ if err != nil {
+ return err
+ }
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlDebug, log.StderrHandler))
+
+ tx, err := db.BeginRo(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ allSnapshots := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{}, dirs.Snap, log.Root())
+ if err := allSnapshots.ReopenFolder(); err != nil {
+ return err
+ }
+ if err := state_accessors.ReadValidatorsTable(tx, vt); err != nil {
+ return err
+ }
+ fmt.Println(allSnapshots.BlocksAvailable(), allSnapshots.Dir())
+
+ var bor *freezeblocks.BorRoSnapshots
+ blockReader := freezeblocks.NewBlockReader(allSnapshots, bor)
+ eth1Getter := getters.NewExecutionSnapshotReader(ctx, blockReader, db)
+ csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, dirs.Snap, log.Root())
+ if err := csn.ReopenFolder(); err != nil {
+ return err
+ }
+ snr := freezeblocks.NewBeaconSnapshotReader(csn, eth1Getter, beaconDB, beaconConfig)
+ gSpot, err := initial_state.GetGenesisState(t)
+ if err != nil {
+ return err
+ }
+
+ hr := historical_states_reader.NewHistoricalStatesReader(beaconConfig, snr, vt, fs, gSpot)
+ start := time.Now()
+ haveState, err := hr.ReadHistoricalState(ctx, tx, r.CompareSlot)
+ if err != nil {
+ return err
+ }
+
+ v := haveState.Version()
+ // encode and decode the state
+ enc, err := haveState.EncodeSSZ(nil)
+ if err != nil {
+ return err
+ }
+ haveState = state.New(beaconConfig)
+ if err := haveState.DecodeSSZ(enc, int(v)); err != nil {
+ return err
+ }
+ endTime := time.Since(start)
+ hRoot, err := haveState.HashSSZ()
+ if err != nil {
+ return err
+ }
+ log.Info("Got state", "slot", haveState.Slot(), "root", libcommon.Hash(hRoot), "elapsed", endTime)
+ if r.CompareFile == "" {
+ return nil
+ }
+ // Read the content of CompareFile in a []byte without afero
+ rawBytes, err := os.ReadFile(r.CompareFile)
+ if err != nil {
+ return err
+ }
+ // Decode the []byte into a state
+ wantState := state.New(beaconConfig)
+ if err := wantState.DecodeSSZ(rawBytes, int(haveState.Version())); err != nil {
+ return err
+ }
+ wRoot, err := wantState.HashSSZ()
+ if err != nil {
+ return err
+ }
+ if hRoot != wRoot {
+ return fmt.Errorf("state mismatch: got %s, want %s", libcommon.Hash(hRoot), libcommon.Hash(wRoot))
+ }
+ return nil
+}
diff --git a/cmd/caplin-phase1/caplin1/run.go b/cmd/caplin-phase1/caplin1/run.go
deleted file mode 100644
index b867c25b2d7..00000000000
--- a/cmd/caplin-phase1/caplin1/run.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package caplin1
-
-import (
- "context"
-
- "github.com/ledgerwatch/erigon/cl/cltypes/solid"
- "github.com/ledgerwatch/erigon/cl/freezer"
- "github.com/ledgerwatch/erigon/cl/phase1/core/state"
- "github.com/ledgerwatch/erigon/cl/phase1/execution_client"
- "github.com/ledgerwatch/erigon/cl/phase1/forkchoice"
- network2 "github.com/ledgerwatch/erigon/cl/phase1/network"
- "github.com/ledgerwatch/erigon/cl/phase1/stages"
-
- "github.com/Giulio2002/bls"
- "github.com/ledgerwatch/erigon-lib/gointerfaces/sentinel"
- "github.com/ledgerwatch/erigon/cl/clparams"
- "github.com/ledgerwatch/erigon/cl/rpc"
- "github.com/ledgerwatch/log/v3"
-
- "github.com/ledgerwatch/erigon/eth/stagedsync"
-)
-
-func RunCaplinPhase1(ctx context.Context, sentinel sentinel.SentinelClient, beaconConfig *clparams.BeaconChainConfig, genesisConfig *clparams.GenesisConfig,
- engine execution_client.ExecutionEngine, state *state.CachingBeaconState, caplinFreezer freezer.Freezer) error {
- beaconRpc := rpc.NewBeaconRpcP2P(ctx, sentinel, beaconConfig, genesisConfig)
- downloader := network2.NewForwardBeaconDownloader(ctx, beaconRpc)
-
- if caplinFreezer != nil {
- if err := freezer.PutObjectSSZIntoFreezer("beaconState", "caplin_core", 0, state, caplinFreezer); err != nil {
- return err
- }
- }
- forkChoice, err := forkchoice.NewForkChoiceStore(state, engine, caplinFreezer, true)
- if err != nil {
- log.Error("Could not create forkchoice", "err", err)
- return err
- }
- bls.SetEnabledCaching(true)
- state.ForEachValidator(func(v solid.Validator, idx, total int) bool {
- pk := v.PublicKey()
- if err := bls.LoadPublicKeyIntoCache(pk[:], false); err != nil {
- panic(err)
- }
- return true
- })
- gossipManager := network2.NewGossipReceiver(ctx, sentinel, forkChoice, beaconConfig, genesisConfig, caplinFreezer)
- return stages.SpawnStageForkChoice(stages.StageForkChoice(nil, downloader, genesisConfig, beaconConfig, state, nil, gossipManager, forkChoice, caplinFreezer), &stagedsync.StageState{ID: "Caplin"}, nil, ctx)
-}
diff --git a/cmd/caplin-regression/main.go b/cmd/caplin-regression/main.go
index 7a3abb9c9ca..c9a6d7a9343 100644
--- a/cmd/caplin-regression/main.go
+++ b/cmd/caplin-regression/main.go
@@ -2,11 +2,12 @@ package main
import (
"flag"
+ "github.com/ledgerwatch/erigon-lib/metrics"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/phase1/forkchoice"
"github.com/ledgerwatch/erigon/turbo/debug"
- "github.com/ledgerwatch/erigon/cl/cltypes"
- "github.com/ledgerwatch/erigon/cl/phase1/forkchoice"
"github.com/ledgerwatch/erigon/cmd/caplin-regression/regression"
"github.com/ledgerwatch/log/v3"
"golang.org/x/exp/slices"
@@ -42,7 +43,7 @@ func main() {
)
if *pprof {
// Server for pprof
- debug.StartPProf("localhost:6060", true)
+ debug.StartPProf("localhost:6060", metrics.Setup("localhost:6060", log.Root()))
}
if err != nil {
diff --git a/cmd/caplin-regression/regression/reader.go b/cmd/caplin-regression/regression/reader.go
index 723a82aea38..c21f72a2adf 100644
--- a/cmd/caplin-regression/regression/reader.go
+++ b/cmd/caplin-regression/regression/reader.go
@@ -2,24 +2,24 @@ package regression
import (
"io/fs"
- "io/ioutil"
+ "os"
"path"
"path/filepath"
"sort"
- "github.com/ledgerwatch/erigon/cl/clparams"
+ clparams2 "github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/phase1/core/state"
"github.com/ledgerwatch/erigon/cl/utils"
)
func (r *RegressionTester) readStartingState() (*state.CachingBeaconState, error) {
- stateFile, err := ioutil.ReadFile(path.Join(r.testDirectory, regressionPath, startingStatePath))
+ stateFile, err := os.ReadFile(path.Join(r.testDirectory, regressionPath, startingStatePath))
if err != nil {
return nil, err
}
- s := state.New(&clparams.MainnetBeaconConfig)
- if err := utils.DecodeSSZSnappy(s, stateFile, int(clparams.CapellaVersion)); err != nil {
+ s := state.New(&clparams2.MainnetBeaconConfig)
+ if err := utils.DecodeSSZSnappy(s, stateFile, int(clparams2.CapellaVersion)); err != nil {
return nil, err
}
return s, nil
@@ -34,12 +34,12 @@ func (r *RegressionTester) initBlocks() error {
if info == nil || info.IsDir() || info.Name() != "data.bin" {
return nil
}
- f, err := ioutil.ReadFile(path)
+ f, err := os.ReadFile(path)
if err != nil {
return err
}
b := new(cltypes.SignedBeaconBlock)
- if err := utils.DecodeSSZSnappy(b, f, int(clparams.CapellaVersion)); err != nil {
+ if err := utils.DecodeSSZSnappy(b, f, int(clparams2.CapellaVersion)); err != nil {
return err
}
r.blockList = append(r.blockList, b)
diff --git a/cmd/caplin-regression/regression/tester.go b/cmd/caplin-regression/regression/tester.go
index 4003e04dd40..badaebe5413 100644
--- a/cmd/caplin-regression/regression/tester.go
+++ b/cmd/caplin-regression/regression/tester.go
@@ -1,16 +1,22 @@
package regression
import (
+ "context"
"runtime"
"time"
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ solid2 "github.com/ledgerwatch/erigon/cl/cltypes/solid"
+ "github.com/ledgerwatch/erigon/cl/phase1/forkchoice"
+ "github.com/ledgerwatch/erigon/cl/pool"
+
"github.com/Giulio2002/bls"
"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/dbg"
- "github.com/ledgerwatch/erigon/cl/cltypes"
- "github.com/ledgerwatch/erigon/cl/cltypes/solid"
- "github.com/ledgerwatch/erigon/cl/phase1/forkchoice"
+ "github.com/ledgerwatch/erigon/cl/phase1/forkchoice/fork_graph"
"github.com/ledgerwatch/log/v3"
+ "github.com/spf13/afero"
)
const (
@@ -38,13 +44,13 @@ func (r *RegressionTester) Run(name string, fn func(*forkchoice.ForkChoiceStore,
if err != nil {
return err
}
- store, err := forkchoice.NewForkChoiceStore(state, nil, nil, true)
+ store, err := forkchoice.NewForkChoiceStore(context.Background(), state, nil, nil, pool.NewOperationsPool(&clparams.MainnetBeaconConfig), fork_graph.NewForkGraphDisk(state, afero.NewMemMapFs()))
if err != nil {
return err
}
log.Info("Loading public keys into memory")
bls.SetEnabledCaching(true)
- state.ForEachValidator(func(v solid.Validator, idx, total int) bool {
+ state.ForEachValidator(func(v solid2.Validator, idx, total int) bool {
pk := v.PublicKey()
if err := bls.LoadPublicKeyIntoCache(pk[:], false); err != nil {
panic(err)
@@ -81,7 +87,7 @@ func TestRegressionWithValidation(store *forkchoice.ForkChoiceStore, block *clty
if err := store.OnBlock(block, false, true); err != nil {
return err
}
- block.Block.Body.Attestations.Range(func(index int, value *solid.Attestation, length int) bool {
+ block.Block.Body.Attestations.Range(func(index int, value *solid2.Attestation, length int) bool {
store.OnAttestation(value, true)
return true
})
diff --git a/cmd/caplin/caplin1/run.go b/cmd/caplin/caplin1/run.go
new file mode 100644
index 00000000000..0aba101d066
--- /dev/null
+++ b/cmd/caplin/caplin1/run.go
@@ -0,0 +1,239 @@
+package caplin1
+
+import (
+ "context"
+ "os"
+ "path"
+ "time"
+
+ proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader"
+ "github.com/ledgerwatch/erigon/cl/antiquary"
+ "github.com/ledgerwatch/erigon/cl/beacon"
+ "github.com/ledgerwatch/erigon/cl/beacon/beacon_router_configuration"
+ "github.com/ledgerwatch/erigon/cl/beacon/handler"
+ "github.com/ledgerwatch/erigon/cl/beacon/synced_data"
+ "github.com/ledgerwatch/erigon/cl/beacon/validatorapi"
+ "github.com/ledgerwatch/erigon/cl/clparams/initial_state"
+ "github.com/ledgerwatch/erigon/cl/cltypes/solid"
+ "github.com/ledgerwatch/erigon/cl/freezer"
+ freezer2 "github.com/ledgerwatch/erigon/cl/freezer"
+ "github.com/ledgerwatch/erigon/eth/ethconfig"
+ "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks"
+
+ "github.com/ledgerwatch/erigon/cl/persistence"
+ persistence2 "github.com/ledgerwatch/erigon/cl/persistence"
+ "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies"
+ "github.com/ledgerwatch/erigon/cl/persistence/db_config"
+ "github.com/ledgerwatch/erigon/cl/persistence/format/snapshot_format"
+ state_accessors "github.com/ledgerwatch/erigon/cl/persistence/state"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state"
+ "github.com/ledgerwatch/erigon/cl/phase1/execution_client"
+ "github.com/ledgerwatch/erigon/cl/phase1/forkchoice"
+ "github.com/ledgerwatch/erigon/cl/phase1/forkchoice/fork_graph"
+ "github.com/ledgerwatch/erigon/cl/phase1/network"
+ "github.com/ledgerwatch/erigon/cl/phase1/stages"
+ "github.com/ledgerwatch/erigon/cl/pool"
+ "github.com/ledgerwatch/erigon/cl/rpc"
+ "github.com/spf13/afero"
+
+ "github.com/Giulio2002/bls"
+ "github.com/ledgerwatch/erigon-lib/common/datadir"
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/sentinel"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/mdbx"
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/log/v3"
+)
+
+func OpenCaplinDatabase(ctx context.Context,
+ databaseConfig db_config.DatabaseConfiguration,
+ beaconConfig *clparams.BeaconChainConfig,
+ rawBeaconChain persistence2.RawBeaconBlockChain,
+ dbPath string,
+ engine execution_client.ExecutionEngine,
+ wipeout bool,
+) (persistence.BeaconChainDatabase, kv.RwDB, error) {
+ dataDirIndexer := path.Join(dbPath, "beacon_indicies")
+ if wipeout {
+ os.RemoveAll(dataDirIndexer)
+ }
+
+ os.MkdirAll(dbPath, 0700)
+
+ db := mdbx.MustOpen(dataDirIndexer)
+
+ tx, err := db.BeginRw(ctx)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer tx.Rollback()
+
+ if err := db_config.WriteConfigurationIfNotExist(ctx, tx, databaseConfig); err != nil {
+ return nil, nil, err
+ }
+
+ if err := tx.Commit(); err != nil {
+ return nil, nil, err
+ }
+ { // start ticking forkChoice
+ go func() {
+ <-ctx.Done()
+ db.Close() // close sql database here
+ }()
+ }
+ return persistence2.NewBeaconChainDatabaseFilesystem(rawBeaconChain, engine, beaconConfig), db, nil
+}
+
+func RunCaplinPhase1(ctx context.Context, sentinel sentinel.SentinelClient, engine execution_client.ExecutionEngine,
+ beaconConfig *clparams.BeaconChainConfig, genesisConfig *clparams.GenesisConfig, state *state.CachingBeaconState,
+ caplinFreezer freezer.Freezer, dirs datadir.Dirs, cfg beacon_router_configuration.RouterConfiguration, eth1Getter snapshot_format.ExecutionBlockReaderByNumber,
+ snDownloader proto_downloader.DownloaderClient, backfilling bool, states bool) error {
+ rawDB, af := persistence.AferoRawBeaconBlockChainFromOsPath(beaconConfig, dirs.CaplinHistory)
+ beaconDB, db, err := OpenCaplinDatabase(ctx, db_config.DefaultDatabaseConfiguration, beaconConfig, rawDB, dirs.CaplinIndexing, engine, false)
+ if err != nil {
+ return err
+ }
+ ctx, cn := context.WithCancel(ctx)
+ defer cn()
+
+ beaconRpc := rpc.NewBeaconRpcP2P(ctx, sentinel, beaconConfig, genesisConfig)
+
+ logger := log.New("app", "caplin")
+
+ csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, dirs.Snap, logger)
+ rcsn := freezeblocks.NewBeaconSnapshotReader(csn, eth1Getter, beaconDB, beaconConfig)
+
+ if caplinFreezer != nil {
+ if err := freezer2.PutObjectSSZIntoFreezer("beaconState", "caplin_core", 0, state, caplinFreezer); err != nil {
+ return err
+ }
+ }
+
+ pool := pool.NewOperationsPool(beaconConfig)
+
+ caplinFcuPath := path.Join(dirs.Tmp, "caplin-forkchoice")
+ os.RemoveAll(caplinFcuPath)
+ err = os.MkdirAll(caplinFcuPath, 0o755)
+ if err != nil {
+ return err
+ }
+ fcuFs := afero.NewBasePathFs(afero.NewOsFs(), caplinFcuPath)
+
+ forkChoice, err := forkchoice.NewForkChoiceStore(ctx, state, engine, caplinFreezer, pool, fork_graph.NewForkGraphDisk(state, fcuFs))
+ if err != nil {
+ logger.Error("Could not create forkchoice", "err", err)
+ return err
+ }
+ bls.SetEnabledCaching(true)
+ state.ForEachValidator(func(v solid.Validator, idx, total int) bool {
+ pk := v.PublicKey()
+ if err := bls.LoadPublicKeyIntoCache(pk[:], false); err != nil {
+ panic(err)
+ }
+ return true
+ })
+ gossipManager := network.NewGossipReceiver(sentinel, forkChoice, beaconConfig, genesisConfig, caplinFreezer)
+ { // start ticking forkChoice
+ go func() {
+ tickInterval := time.NewTicker(50 * time.Millisecond)
+ for {
+ select {
+ case <-tickInterval.C:
+ forkChoice.OnTick(uint64(time.Now().Unix()))
+ case <-ctx.Done():
+ return
+ }
+
+ }
+ }()
+ }
+
+ syncedDataManager := synced_data.NewSyncedDataManager(cfg.Active, beaconConfig)
+ if cfg.Active {
+ apiHandler := handler.NewApiHandler(genesisConfig, beaconConfig, rawDB, db, forkChoice, pool, rcsn, syncedDataManager)
+ headApiHandler := &validatorapi.ValidatorApiHandler{
+ FC: forkChoice,
+ BeaconChainCfg: beaconConfig,
+ GenesisCfg: genesisConfig,
+ }
+ go beacon.ListenAndServe(&beacon.LayeredBeaconHandler{
+ ValidatorApi: headApiHandler,
+ ArchiveApi: apiHandler,
+ }, cfg)
+ log.Info("Beacon API started", "addr", cfg.Address)
+ }
+
+ { // start the gossip manager
+ go gossipManager.Start(ctx)
+ logger.Info("Started Ethereum 2.0 Gossip Service")
+ }
+
+ { // start logging peers
+ go func() {
+ logIntervalPeers := time.NewTicker(1 * time.Minute)
+ for {
+ select {
+ case <-logIntervalPeers.C:
+ if peerCount, err := beaconRpc.Peers(); err == nil {
+ logger.Info("P2P", "peers", peerCount)
+ }
+ case <-ctx.Done():
+ return
+ }
+ }
+ }()
+ }
+
+ tx, err := db.BeginRw(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ dbConfig, err := db_config.ReadConfiguration(ctx, tx)
+ if err != nil {
+ return err
+ }
+
+ if err := state_accessors.InitializeStaticTables(tx, state); err != nil {
+ return err
+ }
+ if err := beacon_indicies.WriteHighestFinalized(tx, 0); err != nil {
+ return err
+ }
+
+ vTables := state_accessors.NewStaticValidatorTable()
+ // Read the the current table
+ if states {
+ if err := state_accessors.ReadValidatorsTable(tx, vTables); err != nil {
+ return err
+ }
+ }
+ // get the initial state
+ genesisState, err := initial_state.GetGenesisState(clparams.NetworkType(beaconConfig.DepositNetworkID))
+ if err != nil {
+ return err
+ }
+ antiq := antiquary.NewAntiquary(ctx, genesisState, vTables, beaconConfig, dirs, snDownloader, db, csn, rcsn, beaconDB, logger, states, af)
+ // Create the antiquary
+ go func() {
+ if err := antiq.Loop(); err != nil {
+ logger.Error("Antiquary failed", "err", err)
+ }
+ }()
+
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+
+ stageCfg := stages.ClStagesCfg(beaconRpc, antiq, genesisConfig, beaconConfig, state, engine, gossipManager, forkChoice, beaconDB, db, csn, dirs.Tmp, dbConfig, backfilling, syncedDataManager)
+ sync := stages.ConsensusClStages(ctx, stageCfg)
+
+ logger.Info("[Caplin] starting clstages loop")
+ err = sync.StartWithStage(ctx, "WaitForPeers", logger, stageCfg)
+ logger.Info("[Caplin] exiting clstages loop")
+ if err != nil {
+ return err
+ }
+ return err
+}
diff --git a/cmd/caplin/caplincli/config.go b/cmd/caplin/caplincli/config.go
new file mode 100644
index 00000000000..46238b7c3dc
--- /dev/null
+++ b/cmd/caplin/caplincli/config.go
@@ -0,0 +1,124 @@
+package caplincli
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/ledgerwatch/erigon-lib/common/datadir"
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cl/phase1/core/state"
+ "github.com/ledgerwatch/erigon/cmd/caplin/caplinflags"
+ "github.com/ledgerwatch/erigon/cmd/sentinel/sentinelcli"
+ "github.com/ledgerwatch/erigon/cmd/sentinel/sentinelflags"
+ "github.com/ledgerwatch/erigon/cmd/utils"
+ "github.com/ledgerwatch/erigon/common"
+ "github.com/ledgerwatch/log/v3"
+ "github.com/urfave/cli/v2"
+)
+
+type CaplinCliCfg struct {
+ *sentinelcli.SentinelCliCfg
+
+ CheckpointUri string `json:"checkpoint_uri"`
+ Chaindata string `json:"chaindata"`
+ ErigonPrivateApi string `json:"erigon_private_api"`
+ TransitionChain bool `json:"transition_chain"`
+ InitialSync bool `json:"initial_sync"`
+ NoBeaconApi bool `json:"no_beacon_api"`
+ BeaconApiReadTimeout time.Duration `json:"beacon_api_read_timeout"`
+ BeaconApiWriteTimeout time.Duration `json:"beacon_api_write_timeout"`
+ BeaconAddr string `json:"beacon_addr"`
+ BeaconProtocol string `json:"beacon_protocol"`
+ RecordMode bool `json:"record_mode"`
+ RecordDir string `json:"record_dir"`
+ DataDir string `json:"data_dir"`
+ RunEngineAPI bool `json:"run_engine_api"`
+ EngineAPIAddr string `json:"engine_api_addr"`
+ EngineAPIPort int `json:"engine_api_port"`
+ JwtSecret []byte
+
+ InitalState *state.CachingBeaconState
+ Dirs datadir.Dirs
+}
+
+func SetupCaplinCli(ctx *cli.Context) (cfg *CaplinCliCfg, err error) {
+ cfg = &CaplinCliCfg{}
+ cfg.SentinelCliCfg, err = sentinelcli.SetupSentinelCli(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ cfg.ErigonPrivateApi = ctx.String(caplinflags.ErigonPrivateApiFlag.Name)
+
+ if ctx.String(sentinelflags.BeaconConfigFlag.Name) != "" {
+ var stateByte []byte
+ // Now parse genesis time and genesis fork
+ if *cfg.GenesisCfg, stateByte, err = clparams.ParseGenesisSSZToGenesisConfig(
+ ctx.String(sentinelflags.GenesisSSZFlag.Name),
+ cfg.BeaconCfg.GetCurrentStateVersion(0)); err != nil {
+ return nil, err
+ }
+
+ cfg.InitalState = state.New(cfg.BeaconCfg)
+ if cfg.InitalState.DecodeSSZ(stateByte, int(cfg.BeaconCfg.GetCurrentStateVersion(0))); err != nil {
+ return nil, err
+ }
+ }
+
+ cfg.NoBeaconApi = ctx.Bool(caplinflags.NoBeaconApi.Name)
+ cfg.BeaconApiReadTimeout = time.Duration(ctx.Uint64(caplinflags.BeaconApiReadTimeout.Name)) * time.Second
+ cfg.BeaconApiWriteTimeout = time.Duration(ctx.Uint(caplinflags.BeaconApiWriteTimeout.Name)) * time.Second
+ cfg.BeaconAddr = fmt.Sprintf("%s:%d", ctx.String(caplinflags.BeaconApiAddr.Name), ctx.Int(caplinflags.BeaconApiPort.Name))
+ cfg.BeaconProtocol = "tcp"
+ cfg.RecordMode = ctx.Bool(caplinflags.RecordModeFlag.Name)
+ cfg.RecordDir = ctx.String(caplinflags.RecordModeDir.Name)
+ cfg.DataDir = ctx.String(utils.DataDirFlag.Name)
+ cfg.Dirs = datadir.New(cfg.DataDir)
+
+ cfg.RunEngineAPI = ctx.Bool(caplinflags.RunEngineAPI.Name)
+ cfg.EngineAPIAddr = ctx.String(caplinflags.EngineApiHostFlag.Name)
+ cfg.EngineAPIPort = ctx.Int(caplinflags.EngineApiPortFlag.Name)
+ if cfg.RunEngineAPI {
+ secret, err := ObtainJwtSecret(ctx)
+ if err != nil {
+ log.Error("Failed to obtain jwt secret", "err", err)
+ cfg.RunEngineAPI = false
+ } else {
+ cfg.JwtSecret = secret
+ }
+ }
+
+ if ctx.String(caplinflags.CheckpointSyncUrlFlag.Name) != "" {
+ cfg.CheckpointUri = ctx.String(caplinflags.CheckpointSyncUrlFlag.Name)
+ } else {
+ cfg.CheckpointUri = clparams.GetCheckpointSyncEndpoint(cfg.NetworkType)
+ }
+
+ cfg.Chaindata = ctx.String(caplinflags.ChaindataFlag.Name)
+
+ cfg.TransitionChain = ctx.Bool(caplinflags.TransitionChainFlag.Name)
+ cfg.InitialSync = ctx.Bool(caplinflags.InitSyncFlag.Name)
+
+ return cfg, err
+}
+
+func ObtainJwtSecret(ctx *cli.Context) ([]byte, error) {
+ path := ctx.String(caplinflags.JwtSecret.Name)
+ if len(strings.TrimSpace(path)) == 0 {
+ return nil, errors.New("Missing jwt secret path")
+ }
+
+ data, err := os.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ jwtSecret := common.FromHex(strings.TrimSpace(string(data)))
+ if len(jwtSecret) == 32 {
+ return jwtSecret, nil
+ }
+
+ return nil, fmt.Errorf("Invalid JWT secret at %s, invalid size", path)
+}
diff --git a/cmd/sentinel/cli/flags/flags.go b/cmd/caplin/caplinflags/flags.go
similarity index 58%
rename from cmd/sentinel/cli/flags/flags.go
rename to cmd/caplin/caplinflags/flags.go
index cb2284a2d3c..2d3d9361f78 100644
--- a/cmd/sentinel/cli/flags/flags.go
+++ b/cmd/caplin/caplinflags/flags.go
@@ -1,34 +1,35 @@
-package flags
+package caplinflags
import (
+ "github.com/ledgerwatch/erigon/cmd/utils"
"github.com/urfave/cli/v2"
)
+var CliFlags = []cli.Flag{
+ &NoBeaconApi,
+ &BeaconApiReadTimeout,
+ &BeaconApiWriteTimeout,
+ &BeaconApiPort,
+ &BeaconApiAddr,
+ &ChaindataFlag,
+ &BeaconDBModeFlag,
+ &CheckpointSyncUrlFlag,
+ &TransitionChainFlag,
+ &InitSyncFlag,
+ &RecordModeDir,
+ &RecordModeFlag,
+ &RunEngineAPI,
+ &EngineApiHostFlag,
+ &EngineApiPortFlag,
+ &JwtSecret,
+ &utils.DataDirFlag,
+}
+
var (
- SentinelDiscoveryPort = cli.IntFlag{
- Name: "discovery.port",
- Usage: "sets the lightclient port",
- Value: 4000,
- }
- SentinelDiscoveryAddr = cli.StringFlag{
- Name: "discovery.addr",
- Usage: "sets the lightclient discovery addr",
- Value: "127.0.0.1",
- }
- SentinelTcpPort = cli.UintFlag{
- Name: "sentinel.tcp.port",
- Usage: "sets lightclient tcp port",
- Value: 4001,
- }
- SentinelServerPort = cli.IntFlag{
- Name: "sentinel.port",
- Usage: "sets the lightclient server port",
- Value: 7777,
- }
- SentinelServerAddr = cli.StringFlag{
- Name: "sentinel.addr",
- Usage: "sets the lightclient server host addr",
- Value: "localhost",
+ ChaindataFlag = cli.StringFlag{
+ Name: "chaindata",
+ Usage: "chaindata of database",
+ Value: "",
}
NoBeaconApi = cli.BoolFlag{
Name: "no-beacon-api",
@@ -38,12 +39,12 @@ var (
BeaconApiReadTimeout = cli.Uint64Flag{
Name: "beacon.api.read.timeout",
Usage: "Sets the seconds for a read time out in the beacon api",
- Value: 5,
+ Value: 60,
}
BeaconApiWriteTimeout = cli.Uint64Flag{
Name: "beacon.api.write.timeout",
Usage: "Sets the seconds for a write time out in the beacon api",
- Value: 5,
+ Value: 60,
}
BeaconApiAddr = cli.StringFlag{
Name: "beacon.api.addr",
@@ -55,41 +56,7 @@ var (
Usage: "sets the port to listen for beacon api requests",
Value: 5555,
}
- BootnodesFlag = cli.StringFlag{
- Name: "sentinel.bootnodes",
- Usage: "Comma separated enode URLs for P2P discovery bootstrap",
- Value: "",
- }
- BeaconConfigFlag = cli.StringFlag{
- Name: "beacon-config",
- Usage: "Path to beacon config",
- Value: "",
- }
- GenesisSSZFlag = cli.StringFlag{
- Name: "genesis-ssz",
- Usage: "Path to genesis ssz",
- Value: "",
- }
- Chain = cli.StringFlag{
- Name: "chain",
- Usage: "sets the chain specs for the lightclient",
- Value: "mainnet",
- }
- NoDiscovery = cli.BoolFlag{
- Name: "no-discovery",
- Usage: "turn off or on the lightclient finding peers",
- Value: false,
- }
- LocalDiscovery = cli.BoolFlag{
- Name: "local-discovery",
- Usage: "enable to also attempt to find peers over private ips. turning this on may cause issues with hosts such as hetzner",
- Value: false,
- }
- ChaindataFlag = cli.StringFlag{
- Name: "chaindata",
- Usage: "chaindata of database",
- Value: "",
- }
+
BeaconDBModeFlag = cli.StringFlag{
Name: "beacon-db-mode",
Usage: "level of storing on beacon chain, minimal(only 500k blocks stored), full (all blocks stored), light (no blocks stored)",
@@ -100,6 +67,25 @@ var (
Usage: "checkpoint sync endpoint",
Value: "",
}
+ TransitionChainFlag = cli.BoolFlag{
+ Name: "transition-chain",
+ Usage: "enable chain transition",
+ }
+ InitSyncFlag = cli.BoolFlag{
+ Value: false,
+ Name: "initial-sync",
+ Usage: "use initial-sync",
+ }
+ RecordModeFlag = cli.BoolFlag{
+ Value: false,
+ Name: "record-mode",
+ Usage: "enable/disable record mode",
+ }
+ RecordModeDir = cli.StringFlag{
+ Value: "caplin-recordings",
+ Name: "record-dir",
+ Usage: "directory for states and block recordings",
+ }
ErigonPrivateApiFlag = cli.StringFlag{
Name: "private.api.addr",
Usage: "connect to existing erigon instance",
@@ -125,28 +111,4 @@ var (
Usage: "Path to the token that ensures safe connection between CL and EL",
Value: "",
}
- SentinelStaticPeersFlag = cli.StringFlag{
- Name: "sentinel.staticpeers",
- Usage: "connect to comma-separated Consensus static peers",
- Value: "",
- }
- TransitionChainFlag = cli.BoolFlag{
- Name: "transition-chain",
- Usage: "enable chain transition",
- }
- InitSyncFlag = cli.BoolFlag{
- Value: false,
- Name: "initial-sync",
- Usage: "use initial-sync",
- }
- RecordModeFlag = cli.BoolFlag{
- Value: false,
- Name: "record-mode",
- Usage: "enable/disable record mode",
- }
- RecordModeDir = cli.StringFlag{
- Value: "caplin-recordings",
- Name: "record-dir",
- Usage: "directory for states and block recordings",
- }
)
diff --git a/cmd/caplin-phase1/main.go b/cmd/caplin/main.go
similarity index 51%
rename from cmd/caplin-phase1/main.go
rename to cmd/caplin/main.go
index 562de8c625b..91b3c0b35e4 100644
--- a/cmd/caplin-phase1/main.go
+++ b/cmd/caplin/main.go
@@ -1,15 +1,13 @@
-/*
- Copyright 2022 Erigon-Lightclient contributors
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
+// Copyright 2022 Erigon-Lightclient contributors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
package main
@@ -18,29 +16,30 @@ import (
"fmt"
"os"
- "github.com/ledgerwatch/erigon/cl/beacon"
- "github.com/ledgerwatch/erigon/cl/beacon/handler"
- "github.com/ledgerwatch/erigon/cl/freezer"
+ "github.com/ledgerwatch/erigon/cl/beacon/beacon_router_configuration"
+ "github.com/ledgerwatch/erigon/cl/cltypes"
+ "github.com/ledgerwatch/erigon/cl/fork"
+ freezer2 "github.com/ledgerwatch/erigon/cl/freezer"
"github.com/ledgerwatch/erigon/cl/phase1/core"
"github.com/ledgerwatch/erigon/cl/phase1/core/state"
- "github.com/ledgerwatch/erigon/cl/phase1/execution_client"
+ execution_client2 "github.com/ledgerwatch/erigon/cl/phase1/execution_client"
+ "github.com/ledgerwatch/erigon/cl/sentinel"
+ "github.com/ledgerwatch/erigon/cl/sentinel/service"
"github.com/ledgerwatch/log/v3"
"github.com/urfave/cli/v2"
- "github.com/ledgerwatch/erigon/cl/cltypes"
- "github.com/ledgerwatch/erigon/cl/fork"
- "github.com/ledgerwatch/erigon/cmd/caplin-phase1/caplin1"
- lcCli "github.com/ledgerwatch/erigon/cmd/sentinel/cli"
- "github.com/ledgerwatch/erigon/cmd/sentinel/cli/flags"
- "github.com/ledgerwatch/erigon/cmd/sentinel/sentinel"
- "github.com/ledgerwatch/erigon/cmd/sentinel/sentinel/service"
- lightclientapp "github.com/ledgerwatch/erigon/turbo/app"
+ "github.com/ledgerwatch/erigon/cmd/caplin/caplin1"
+ "github.com/ledgerwatch/erigon/cmd/caplin/caplincli"
+ "github.com/ledgerwatch/erigon/cmd/caplin/caplinflags"
+ "github.com/ledgerwatch/erigon/cmd/sentinel/sentinelflags"
+ "github.com/ledgerwatch/erigon/cmd/utils"
+ "github.com/ledgerwatch/erigon/turbo/app"
"github.com/ledgerwatch/erigon/turbo/debug"
)
func main() {
- app := lightclientapp.MakeApp("caplin-phase1", runCaplinNode, flags.CLDefaultFlags)
+ app := app.MakeApp("caplin", runCaplinNode, append(caplinflags.CliFlags, sentinelflags.CliFlags...))
if err := app.Run(os.Args); err != nil {
_, printErr := fmt.Fprintln(os.Stderr, err)
if printErr != nil {
@@ -51,18 +50,20 @@ func main() {
}
func runCaplinNode(cliCtx *cli.Context) error {
- ctx := context.Background()
- cfg, err := lcCli.SetupConsensusClientCfg(cliCtx)
+ cfg, err := caplincli.SetupCaplinCli(cliCtx)
if err != nil {
log.Error("[Phase1] Could not initialize caplin", "err", err)
+ return err
}
- if _, err := debug.Setup(cliCtx, true /* root logger */); err != nil {
+ if _, _, err := debug.Setup(cliCtx, true /* root logger */); err != nil {
return err
}
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(cfg.LogLvl), log.StderrHandler))
- log.Info("[Phase1]", "chain", cliCtx.String(flags.Chain.Name))
+ log.Info("[Phase1]", "chain", cliCtx.String(utils.ChainFlag.Name))
log.Info("[Phase1] Running Caplin")
// Either start from genesis or a checkpoint
+ ctx, cn := context.WithCancel(context.Background())
+ defer cn()
var state *state.CachingBeaconState
if cfg.InitialSync {
state = cfg.InitalState
@@ -103,10 +104,9 @@ func runCaplinNode(cliCtx *cli.Context) error {
log.Error("[Checkpoint Sync] Failed", "reason", err)
return err
}
- var executionEngine execution_client.ExecutionEngine
+ var executionEngine execution_client2.ExecutionEngine
if cfg.RunEngineAPI {
- fmt.Println(cfg.EngineAPIAddr)
- cc, err := execution_client.NewExecutionClientRPC(ctx, cfg.JwtSecret, cfg.EngineAPIAddr, cfg.EngineAPIPort)
+ cc, err := execution_client2.NewExecutionClientRPC(ctx, cfg.JwtSecret, cfg.EngineAPIAddr, cfg.EngineAPIPort)
if err != nil {
log.Error("could not start engine api", "err", err)
}
@@ -114,24 +114,19 @@ func runCaplinNode(cliCtx *cli.Context) error {
executionEngine = cc
}
- if !cfg.NoBeaconApi {
- apiHandler := handler.NewApiHandler(cfg.GenesisCfg, cfg.BeaconCfg)
- go beacon.ListenAndServe(apiHandler, &beacon.RouterConfiguration{
- Protocol: cfg.BeaconProtocol,
- Address: cfg.BeaconAddr,
- ReadTimeTimeout: cfg.BeaconApiReadTimeout,
- WriteTimeout: cfg.BeaconApiWriteTimeout,
- IdleTimeout: cfg.BeaconApiWriteTimeout,
- })
- log.Info("Beacon API started", "addr", cfg.BeaconAddr)
- }
-
- var caplinFreezer freezer.Freezer
+ var caplinFreezer freezer2.Freezer
if cfg.RecordMode {
- caplinFreezer = &freezer.RootPathOsFs{
+ caplinFreezer = &freezer2.RootPathOsFs{
Root: cfg.RecordDir,
}
}
- return caplin1.RunCaplinPhase1(ctx, sentinel, cfg.BeaconCfg, cfg.GenesisCfg, executionEngine, state, caplinFreezer)
+ return caplin1.RunCaplinPhase1(ctx, sentinel, executionEngine, cfg.BeaconCfg, cfg.GenesisCfg, state, caplinFreezer, cfg.Dirs, beacon_router_configuration.RouterConfiguration{
+ Protocol: cfg.BeaconProtocol,
+ Address: cfg.BeaconAddr,
+ ReadTimeTimeout: cfg.BeaconApiReadTimeout,
+ WriteTimeout: cfg.BeaconApiWriteTimeout,
+ IdleTimeout: cfg.BeaconApiWriteTimeout,
+ Active: !cfg.NoBeaconApi,
+ }, nil, nil, false, false)
}
diff --git a/cmd/devnet/README.md b/cmd/devnet/README.md
index 74ddc735e4a..a364d567ea0 100644
--- a/cmd/devnet/README.md
+++ b/cmd/devnet/README.md
@@ -9,7 +9,7 @@ The devnet code performs 3 main functions:
* It allows for the specification of a series of scenarios which will be run against the nodes on that internal network
* It can optionally run a `support` connection which allows the nodes on the network to be connected to the Erigon diagnostic system
-The specification of both nodes and scenarios for the devenet is done by specifying configuraion objects. These objects are currently build in code using go `structs` but are cabable of being read as configuration.
+The specification of both nodes and scenarios for the devenet is done by specifying configuration objects. These objects are currently build in code using go `structs` but are capable of being read as configuration.
## Devnet runtime start-up
@@ -23,12 +23,12 @@ The devnet runs as a single `go` process which can be started with the following
| metrics | N | false | Enable metrics collection and reporting from devnet nodes |
| metrics.node | N | 0 | At the moment only one node on the network can produce metrics. This value specifies index of the node in the cluster to attach to |
| metrics.port | N | 6060 | The network port of the node to connect to for gather ing metrics |
-| diagnostics.url | N | | URL of the diagnostics system provided by the support team, include unique session PIN, if this is specified the devnet will start a `support` tunnel and connect to the diagnostics platform to provide metrics from the specified node on the devnet |
-| insecure | N | false | Used if `diagnostics.url` is set to allow communication with diagnostics system using self-signed TLS certificates |
+| diagnostics.addr | N | | Address of the diagnostics system provided by the support team, include unique session PIN, if this is specified the devnet will start a `support` tunnel and connect to the diagnostics platform to provide metrics from the specified node on the devnet |
+| insecure | N | false | Used if `diagnostics.addr` is set to allow communication with diagnostics system
## Network Configuration
-Networks configurations are currently specified in code in `main.go` in the `selectNetwork` function. This contains a series of `structs` with the following structue, for eample:
+Networks configurations are currently specified in code in `main.go` in the `selectNetwork` function. This contains a series of `structs` with the following structure, for example:
```go
return &devnet.Network{
@@ -55,11 +55,11 @@ Networks configurations are currently specified in code in `main.go` in the `sel
}, nil
```
-Base IP's and addresses are iterated for each node in the network - to ensure that when the network starts there are no port clashes as the entire nework operates in a single process, hence shares a common host. Individual nodes will be configured with a default set of command line arguments dependent on type. To see the default arguments per node look at the `args\node.go` file where these are specified as tags on the struct members.
+Base IP's and addresses are iterated for each node in the network - to ensure that when the network starts there are no port clashes as the entire network operates in a single process, hence shares a common host. Individual nodes will be configured with a default set of command line arguments dependent on type. To see the default arguments per node look at the `args\node.go` file where these are specified as tags on the struct members.
## Scenario Configuration
-Scenarios are similarly specified in code in `main.go` in the `action` function. This is the initial configration:
+Scenarios are similarly specified in code in `main.go` in the `action` function. This is the initial configuration:
```go
scenarios.Scenario{
@@ -74,9 +74,9 @@ Scenarios are similarly specified in code in `main.go` in the `action` function.
})
```
-Scenarios are created a groups of steps which are created by regestering a `step` handler too see an example of this take a look at the `commands\ping.go` file which adds a ping rpc method (see `PingErigonRpc` above).
+Scenarios are created a groups of steps which are created by registering a `step` handler too see an example of this take a look at the `commands\ping.go` file which adds a ping rpc method (see `PingErigonRpc` above).
-This illustrates the registratio process. The `init` function in the file registers the method with the `scenarios` package - which uses the function name as the default step name. Others can be added with additional string arguments fo the `StepHandler` call where they will treated as regular expressions to be matched when processing scenario steps.
+This illustrates the registration process. The `init` function in the file registers the method with the `scenarios` package - which uses the function name as the default step name. Others can be added with additional string arguments fo the `StepHandler` call where they will treated as regular expressions to be matched when processing scenario steps.
```go
func init() {
diff --git a/cmd/devnet/accounts/accounts.go b/cmd/devnet/accounts/accounts.go
index 032cebdc616..097ca74daf5 100644
--- a/cmd/devnet/accounts/accounts.go
+++ b/cmd/devnet/accounts/accounts.go
@@ -20,6 +20,14 @@ func init() {
core.DevnetSignKey = func(addr libcommon.Address) *ecdsa.PrivateKey {
return SigKey(addr)
}
+
+ devnetEtherbaseAccount := &Account{
+ "DevnetEtherbase",
+ core.DevnetEtherbase,
+ core.DevnetSignPrivateKey,
+ }
+ accountsByAddress[core.DevnetEtherbase] = devnetEtherbaseAccount
+ accountsByName[devnetEtherbaseAccount.Name] = devnetEtherbaseAccount
}
var accountsByAddress = map[libcommon.Address]*Account{}
diff --git a/cmd/devnet/accounts/steps/steps.go b/cmd/devnet/accounts/steps/steps.go
index 5a3d65741a4..3547c2363c8 100644
--- a/cmd/devnet/accounts/steps/steps.go
+++ b/cmd/devnet/accounts/steps/steps.go
@@ -15,6 +15,7 @@ import (
"github.com/ledgerwatch/erigon/cmd/devnet/scenarios"
"github.com/ledgerwatch/erigon/cmd/devnet/services"
"github.com/ledgerwatch/erigon/cmd/devnet/transactions"
+ "github.com/ledgerwatch/erigon/rpc"
"github.com/ledgerwatch/erigon/turbo/adapter/ethapi"
)
@@ -119,7 +120,7 @@ func SendFunds(ctx context.Context, chainName string, name string, ethAmount flo
logger.Info("Faucet account details", "address", faucet.Address(), "account", accountResult)
- accountCode, err := node.GetCode(faucet.Address(), requests.BlockNumber(traceResult.BlockHash.Hex()))
+ accountCode, err := node.GetCode(faucet.Address(), rpc.AsBlockReference(traceResult.BlockHash))
if err != nil {
return 0, fmt.Errorf("Send transaction failure: get account code failed: %w", err)
@@ -127,7 +128,7 @@ func SendFunds(ctx context.Context, chainName string, name string, ethAmount flo
logger.Info("Faucet account code", "address", faucet.Address(), "code", accountCode)
- callResults, err := node.TraceCall(fmt.Sprintf("0x%x", blockNum), ethapi.CallArgs{
+ callResults, err := node.TraceCall(rpc.AsBlockReference(blockNum), ethapi.CallArgs{
From: &traceResult.Action.From,
To: &traceResult.Action.To,
Data: &traceResult.Action.Input,
@@ -152,7 +153,7 @@ func SendFunds(ctx context.Context, chainName string, name string, ethAmount flo
return 0, fmt.Errorf("Unexpected post transfer faucet balance got: %s:, expected: %s", balance, (&big.Int{}).Sub(facuetStartingBalance, sent))
}
- balance, err = node.GetBalance(account.Address, requests.BlockNumbers.Latest)
+ balance, err = node.GetBalance(account.Address, rpc.LatestBlock)
if err != nil {
return 0, fmt.Errorf("Failed to get post transfer balance: %w", err)
@@ -165,7 +166,7 @@ func SendFunds(ctx context.Context, chainName string, name string, ethAmount flo
return balance.Uint64(), nil
}
-func GetBalance(ctx context.Context, accountName string, blockNum requests.BlockNumber) (uint64, error) {
+func GetBalance(ctx context.Context, accountName string, blockNum rpc.BlockNumber) (uint64, error) {
logger := devnet.Logger(ctx)
node := devnet.CurrentNode(ctx)
@@ -184,7 +185,7 @@ func GetBalance(ctx context.Context, accountName string, blockNum requests.Block
logger.Info("Getting balance", "address", account.Address)
- bal, err := node.GetBalance(account.Address, blockNum)
+ bal, err := node.GetBalance(account.Address, rpc.AsBlockReference(blockNum))
if err != nil {
logger.Error("FAILURE", "error", err)
@@ -203,7 +204,7 @@ func GetNonce(ctx context.Context, address libcommon.Address) (uint64, error) {
node = devnet.SelectBlockProducer(ctx)
}
- res, err := node.GetTransactionCount(address, requests.BlockNumbers.Latest)
+ res, err := node.GetTransactionCount(address, rpc.LatestBlock)
if err != nil {
return 0, fmt.Errorf("failed to get transaction count for address 0x%x: %v", address, err)
diff --git a/cmd/devnet/args/node.go b/cmd/devnet/args/node_args.go
similarity index 71%
rename from cmd/devnet/args/node.go
rename to cmd/devnet/args/node_args.go
index a3bc7fa0c7e..50c73c0e96f 100644
--- a/cmd/devnet/args/node.go
+++ b/cmd/devnet/args/node_args.go
@@ -1,18 +1,25 @@
package args
import (
+ "crypto/ecdsa"
+ "encoding/hex"
"fmt"
"math/big"
"net"
"path/filepath"
"strconv"
+ "github.com/ledgerwatch/erigon/core"
+ "github.com/ledgerwatch/erigon/crypto"
+ "github.com/ledgerwatch/erigon/p2p/enode"
+ "github.com/ledgerwatch/erigon/params"
+
+ "github.com/ledgerwatch/erigon-lib/chain/networkname"
"github.com/ledgerwatch/erigon/cmd/devnet/accounts"
"github.com/ledgerwatch/erigon/cmd/devnet/requests"
- "github.com/ledgerwatch/erigon/params/networkname"
)
-type Node struct {
+type NodeArgs struct {
requests.RequestGenerator `arg:"-"`
Name string `arg:"-"`
BuildDir string `arg:"positional" default:"./build/bin/devnet" json:"builddir"`
@@ -43,12 +50,15 @@ type Node struct {
MetricsAddr string `arg:"--metrics.addr" json:"metrics.addr,omitempty"`
StaticPeers string `arg:"--staticpeers" json:"staticpeers,omitempty"`
WithoutHeimdall bool `arg:"--bor.withoutheimdall" flag:"" default:"false" json:"bor.withoutheimdall,omitempty"`
- HeimdallGRpc string `arg:"--bor.heimdallgRPC" json:"bor.heimdallgRPC,omitempty"`
+ HeimdallGrpcAddr string `arg:"--bor.heimdallgRPC" json:"bor.heimdallgRPC,omitempty"`
+ WithHeimdallMilestones bool `arg:"--bor.milestone" json:"bor.milestone"`
VMDebug bool `arg:"--vmdebug" flag:"" default:"false" json:"dmdebug"`
-}
-func (node *Node) configure(base Node, nodeNumber int) error {
+ NodeKey *ecdsa.PrivateKey `arg:"-"`
+ NodeKeyHex string `arg:"--nodekeyhex" json:"nodekeyhex,omitempty"`
+}
+func (node *NodeArgs) Configure(base NodeArgs, nodeNumber int) error {
if len(node.Name) == 0 {
node.Name = fmt.Sprintf("%s-%d", base.Chain, nodeNumber)
}
@@ -62,14 +72,19 @@ func (node *Node) configure(base Node, nodeNumber int) error {
node.StaticPeers = base.StaticPeers
+ var err error
+ node.NodeKey, err = crypto.GenerateKey()
+ if err != nil {
+ return err
+ }
+ node.NodeKeyHex = hex.EncodeToString(crypto.FromECDSA(node.NodeKey))
+
node.Metrics = base.Metrics
node.MetricsPort = base.MetricsPort
node.MetricsAddr = base.MetricsAddr
node.Snapshots = base.Snapshots
- var err error
-
node.PrivateApiAddr, _, err = portFromBase(base.PrivateApiAddr, nodeNumber, 1)
if err != nil {
@@ -86,15 +101,39 @@ func (node *Node) configure(base Node, nodeNumber int) error {
node.Port = base.Port + nodeNumber
+ node.WithHeimdallMilestones = base.WithHeimdallMilestones
+
return nil
}
-func (node Node) ChainID() *big.Int {
- return &big.Int{}
+func (node *NodeArgs) GetName() string {
+ return node.Name
+}
+
+func (node *NodeArgs) ChainID() *big.Int {
+ config := params.ChainConfigByChainName(node.Chain)
+ if config == nil {
+ return nil
+ }
+ return config.ChainID
+}
+
+func (node *NodeArgs) GetHttpPort() int {
+ return node.HttpPort
+}
+
+func (node *NodeArgs) GetEnodeURL() string {
+ port := node.Port
+ return enode.NewV4(&node.NodeKey.PublicKey, net.ParseIP("127.0.0.1"), port, port).URLv4()
+}
+
+func (node *NodeArgs) EnableMetrics(port int) {
+ node.Metrics = true
+ node.MetricsPort = port
}
type BlockProducer struct {
- Node
+ NodeArgs
Mine bool `arg:"--mine" flag:"true"`
Etherbase string `arg:"--miner.etherbase"`
DevPeriod int `arg:"--dev.period"`
@@ -105,11 +144,10 @@ type BlockProducer struct {
account *accounts.Account
}
-func (m BlockProducer) Configure(baseNode Node, nodeNumber int) (int, interface{}, error) {
- err := m.configure(baseNode, nodeNumber)
-
+func (m *BlockProducer) Configure(baseNode NodeArgs, nodeNumber int) error {
+ err := m.NodeArgs.Configure(baseNode, nodeNumber)
if err != nil {
- return -1, nil, err
+ return err
}
switch m.Chain {
@@ -117,57 +155,45 @@ func (m BlockProducer) Configure(baseNode Node, nodeNumber int) (int, interface{
if m.DevPeriod == 0 {
m.DevPeriod = 30
}
- m.account = accounts.NewAccount(m.Name() + "-etherbase")
+ m.account = accounts.NewAccount(m.GetName() + "-etherbase")
+ core.DevnetEtherbase = m.account.Address
+ core.DevnetSignPrivateKey = m.account.SigKey()
case networkname.BorDevnetChainName:
- m.account = accounts.NewAccount(m.Name() + "-etherbase")
+ m.account = accounts.NewAccount(m.GetName() + "-etherbase")
+
+ if len(m.HttpApi) == 0 {
+ m.HttpApi = "admin,eth,erigon,web3,net,debug,trace,txpool,parity,ots,bor"
+ }
}
if m.account != nil {
m.Etherbase = m.account.Address.Hex()
}
- return m.HttpPort, m, nil
-}
-
-func (n BlockProducer) Name() string {
- return n.Node.Name
+ return nil
}
-func (n BlockProducer) Account() *accounts.Account {
+func (n *BlockProducer) Account() *accounts.Account {
return n.account
}
-func (n BlockProducer) IsBlockProducer() bool {
+func (n *BlockProducer) IsBlockProducer() bool {
return true
}
type NonBlockProducer struct {
- Node
+ NodeArgs
HttpApi string `arg:"--http.api" default:"admin,eth,debug,net,trace,web3,erigon,txpool" json:"http.api"`
TorrentPort string `arg:"--torrent.port" default:"42070" json:"torrent.port"`
NoDiscover string `arg:"--nodiscover" flag:"" default:"true" json:"nodiscover"`
}
-func (n NonBlockProducer) Configure(baseNode Node, nodeNumber int) (int, interface{}, error) {
- err := n.configure(baseNode, nodeNumber)
-
- if err != nil {
- return -1, nil, err
- }
-
- return n.HttpPort, n, nil
-}
-
-func (n NonBlockProducer) Name() string {
- return n.Node.Name
-}
-
-func (n NonBlockProducer) IsBlockProducer() bool {
+func (n *NonBlockProducer) IsBlockProducer() bool {
return false
}
-func (n NonBlockProducer) Account() *accounts.Account {
+func (n *NonBlockProducer) Account() *accounts.Account {
return nil
}
diff --git a/cmd/devnet/args/node_test.go b/cmd/devnet/args/node_args_test.go
similarity index 89%
rename from cmd/devnet/args/node_test.go
rename to cmd/devnet/args/node_args_test.go
index ddd7de4c8c9..a67370b19ea 100644
--- a/cmd/devnet/args/node_test.go
+++ b/cmd/devnet/args/node_args_test.go
@@ -13,7 +13,7 @@ func TestNodeArgs(t *testing.T) {
asMap := map[string]struct{}{}
nodeArgs, _ := args.AsArgs(args.BlockProducer{
- Node: args.Node{
+ NodeArgs: args.NodeArgs{
DataDir: filepath.Join("data", fmt.Sprintf("%d", 1)),
PrivateApiAddr: "localhost:9092",
},
@@ -37,7 +37,7 @@ func TestNodeArgs(t *testing.T) {
}
nodeArgs, _ = args.AsArgs(args.NonBlockProducer{
- Node: args.Node{
+ NodeArgs: args.NodeArgs{
DataDir: filepath.Join("data", fmt.Sprintf("%d", 2)),
StaticPeers: "enode",
PrivateApiAddr: "localhost:9091",
@@ -162,8 +162,26 @@ func producingNodeArgs(dataDir string, nodeNumber int) []string {
authrpcPortArg, _ := parameterFromArgument("--authrpc.port", "8551")
natArg, _ := parameterFromArgument("--nat", "none")
accountSlotsArg, _ := parameterFromArgument("--txpool.accountslots", "16")
-
- return []string{buildDirArg, dataDirArg, chainType, privateApiAddr, httpPortArg, authrpcPortArg, mine, httpApi, ws, natArg, devPeriod, consoleVerbosity, p2pProtocol, downloaderArg, accountSlotsArg}
+ withHeimdallMilestonesArg, _ := parameterFromArgument("--bor.milestone", "false")
+
+ return []string{
+ buildDirArg,
+ dataDirArg,
+ chainType,
+ privateApiAddr,
+ httpPortArg,
+ authrpcPortArg,
+ mine,
+ httpApi,
+ ws,
+ natArg,
+ devPeriod,
+ consoleVerbosity,
+ p2pProtocol,
+ downloaderArg,
+ accountSlotsArg,
+ withHeimdallMilestonesArg,
+ }
}
// nonMiningNodeArgs returns custom args for starting a non-mining node
@@ -182,6 +200,24 @@ func nonProducingNodeArgs(dataDir string, nodeNumber int, enode string) []string
authrpcPortArg, _ := parameterFromArgument("--authrpc.port", "8551")
natArg, _ := parameterFromArgument("--nat", "none")
ws := wsArg
-
- return []string{buildDirArg, dataDirArg, chainType, privateApiAddr, httpPortArg, authrpcPortArg, httpApi, ws, natArg, staticPeers, noDiscover, consoleVerbosity, torrentPort, p2pProtocol, downloaderArg}
+ withHeimdallMilestonesArg, _ := parameterFromArgument("--bor.milestone", "false")
+
+ return []string{
+ buildDirArg,
+ dataDirArg,
+ chainType,
+ privateApiAddr,
+ httpPortArg,
+ authrpcPortArg,
+ httpApi,
+ ws,
+ natArg,
+ staticPeers,
+ noDiscover,
+ consoleVerbosity,
+ torrentPort,
+ p2pProtocol,
+ downloaderArg,
+ withHeimdallMilestonesArg,
+ }
}
diff --git a/cmd/devnet/blocks/checks.go b/cmd/devnet/blocks/checks.go
index 1891e19729a..3f143a69f03 100644
--- a/cmd/devnet/blocks/checks.go
+++ b/cmd/devnet/blocks/checks.go
@@ -4,22 +4,21 @@ import (
"context"
"fmt"
- libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/cmd/devnet/devnet"
"github.com/ledgerwatch/erigon/cmd/devnet/requests"
+ "github.com/ledgerwatch/erigon/turbo/jsonrpc"
)
var CompletionChecker = BlockHandlerFunc(
- func(ctx context.Context, node devnet.Node, block *requests.BlockResult, transaction *requests.Transaction) error {
- transactionHash := libcommon.HexToHash(transaction.Hash)
- traceResults, err := node.TraceTransaction(transactionHash)
+ func(ctx context.Context, node devnet.Node, block *requests.Block, transaction *jsonrpc.RPCTransaction) error {
+ traceResults, err := node.TraceTransaction(transaction.Hash)
if err != nil {
return fmt.Errorf("Failed to trace transaction: %s: %w", transaction.Hash, err)
}
for _, traceResult := range traceResults {
- if traceResult.TransactionHash == transactionHash {
+ if traceResult.TransactionHash == transaction.Hash {
if len(traceResult.Error) != 0 {
return fmt.Errorf("Transaction error: %s", traceResult.Error)
}
diff --git a/cmd/devnet/blocks/fees.go b/cmd/devnet/blocks/fees.go
index 94170ab1649..a4820849a73 100644
--- a/cmd/devnet/blocks/fees.go
+++ b/cmd/devnet/blocks/fees.go
@@ -5,21 +5,15 @@ import (
"fmt"
"github.com/ledgerwatch/erigon/cmd/devnet/devnet"
- "github.com/ledgerwatch/erigon/cmd/devnet/devnetutils"
+ "github.com/ledgerwatch/erigon/rpc"
)
func BaseFeeFromBlock(ctx context.Context) (uint64, error) {
- var val uint64
- res, err := devnet.SelectNode(ctx).GetBlockDetailsByNumber("latest", false)
+ res, err := devnet.SelectNode(ctx).GetBlockByNumber(ctx, rpc.LatestBlockNumber, false)
+
if err != nil {
return 0, fmt.Errorf("failed to get base fee from block: %v\n", err)
}
- if v, ok := res["baseFeePerGas"]; !ok {
- return val, fmt.Errorf("baseFeePerGas field missing from response")
- } else {
- val = devnetutils.HexToInt(v.(string))
- }
-
- return val, err
+ return res.BaseFee.Uint64(), err
}
diff --git a/cmd/devnet/blocks/generator.go b/cmd/devnet/blocks/generator.go
new file mode 100644
index 00000000000..341f248d699
--- /dev/null
+++ b/cmd/devnet/blocks/generator.go
@@ -0,0 +1,65 @@
+package blocks
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "fmt"
+ "testing"
+
+ "github.com/ledgerwatch/erigon/accounts/abi/bind"
+ "github.com/ledgerwatch/erigon/accounts/abi/bind/backends"
+ "github.com/ledgerwatch/erigon/core"
+ "github.com/ledgerwatch/erigon/core/types"
+ "github.com/ledgerwatch/erigon/crypto"
+ "github.com/ledgerwatch/erigon/turbo/stages/mock"
+)
+
+type TxFn func(_ *core.BlockGen, backend bind.ContractBackend) (types.Transaction, bool)
+
+type TxGen struct {
+ Fn TxFn
+ Key *ecdsa.PrivateKey
+}
+
+func GenerateBlocks(t *testing.T, gspec *types.Genesis, blocks int, txs map[int]TxGen, txPerBlock func(int) int) (*mock.MockSentry, *core.ChainPack, error) {
+ key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ m := mock.MockWithGenesis(t, gspec, key, false)
+
+ contractBackend := backends.NewTestSimulatedBackendWithConfig(t, gspec.Alloc, gspec.Config, gspec.GasLimit)
+
+ chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, blocks, func(blockNum int, block *core.BlockGen) {
+ var tx types.Transaction
+ var isContractCall bool
+ signer := types.LatestSignerForChainID(nil)
+
+ txCount := txPerBlock(blockNum)
+
+ for i := 0; i < txCount; i++ {
+ if txToSend, ok := txs[i%len(txs)]; ok {
+ tx, isContractCall = txToSend.Fn(block, contractBackend)
+ var err error
+ tx, err = types.SignTx(tx, *signer, txToSend.Key)
+ if err != nil {
+ return
+ }
+ }
+
+ if tx != nil {
+ if !isContractCall {
+ err := contractBackend.SendTransaction(context.Background(), tx)
+ if err != nil {
+ return
+ }
+ }
+
+ block.AddTx(tx)
+ }
+ }
+
+ contractBackend.Commit()
+ })
+ if err != nil {
+ return nil, nil, fmt.Errorf("generate chain: %w", err)
+ }
+ return m, chain, err
+}
diff --git a/cmd/devnet/blocks/waiter.go b/cmd/devnet/blocks/waiter.go
index c9e85124a3a..05b4280ba1a 100644
--- a/cmd/devnet/blocks/waiter.go
+++ b/cmd/devnet/blocks/waiter.go
@@ -8,20 +8,22 @@ import (
"github.com/ledgerwatch/erigon/cmd/devnet/devnet"
"github.com/ledgerwatch/erigon/cmd/devnet/requests"
"github.com/ledgerwatch/erigon/core/types"
+ "github.com/ledgerwatch/erigon/rpc"
+ "github.com/ledgerwatch/erigon/turbo/jsonrpc"
"github.com/ledgerwatch/log/v3"
)
type BlockHandler interface {
- Handle(ctx context.Context, node devnet.Node, block *requests.BlockResult, transaction *requests.Transaction) error
+ Handle(ctx context.Context, node devnet.Node, block *requests.Block, transaction *jsonrpc.RPCTransaction) error
}
-type BlockHandlerFunc func(ctx context.Context, node devnet.Node, block *requests.BlockResult, transaction *requests.Transaction) error
+type BlockHandlerFunc func(ctx context.Context, node devnet.Node, block *requests.Block, transaction *jsonrpc.RPCTransaction) error
-func (f BlockHandlerFunc) Handle(ctx context.Context, node devnet.Node, block *requests.BlockResult, transaction *requests.Transaction) error {
+func (f BlockHandlerFunc) Handle(ctx context.Context, node devnet.Node, block *requests.Block, transaction *jsonrpc.RPCTransaction) error {
return f(ctx, node, block, transaction)
}
-type BlockMap map[libcommon.Hash]*requests.BlockResult
+type BlockMap map[libcommon.Hash]*requests.Block
type waitResult struct {
err error
@@ -38,7 +40,7 @@ type blockWaiter struct {
}
type Waiter interface {
- Await(libcommon.Hash) (*requests.BlockResult, error)
+ Await(libcommon.Hash) (*requests.Block, error)
AwaitMany(...libcommon.Hash) (BlockMap, error)
}
@@ -46,7 +48,7 @@ type waitError struct {
err error
}
-func (w waitError) Await(libcommon.Hash) (*requests.BlockResult, error) {
+func (w waitError) Await(libcommon.Hash) (*requests.Block, error) {
return nil, w.err
}
@@ -58,7 +60,7 @@ type wait struct {
waiter *blockWaiter
}
-func (w wait) Await(hash libcommon.Hash) (*requests.BlockResult, error) {
+func (w wait) Await(hash libcommon.Hash) (*requests.Block, error) {
w.waiter.hashes <- map[libcommon.Hash]struct{}{hash: {}}
res := <-w.waiter.result
@@ -116,11 +118,12 @@ func BlockWaiter(ctx context.Context, handler BlockHandler) (Waiter, context.Can
}
func (c *blockWaiter) receive(ctx context.Context, node devnet.Node, headers chan types.Header) {
- blockMap := map[libcommon.Hash]*requests.BlockResult{}
+ blockMap := map[libcommon.Hash]*requests.Block{}
defer close(c.result)
for header := range headers {
+
select {
case <-ctx.Done():
c.headersSub.Unsubscribe()
@@ -129,9 +132,7 @@ func (c *blockWaiter) receive(ctx context.Context, node devnet.Node, headers cha
default:
}
- blockNum := header.Number
-
- block, err := node.GetBlockByNumber(blockNum.Uint64(), true)
+ block, err := node.GetBlockByNumber(ctx, rpc.AsBlockNumber(header.Number), true)
if err != nil {
c.logger.Error("Block waiter failed to get block", "err", err)
@@ -143,14 +144,12 @@ func (c *blockWaiter) receive(ctx context.Context, node devnet.Node, headers cha
}
for i := range block.Transactions {
- tx := &block.Transactions[i] // avoid implicit memory aliasing
-
- txHash := libcommon.HexToHash(tx.Hash)
+ tx := block.Transactions[i] // avoid implicit memory aliasing
- if _, ok := c.waitHashes[txHash]; ok {
- c.logger.Info("Tx included into block", "txHash", txHash, "blockNum", block.BlockNumber)
- blockMap[txHash] = block
- delete(c.waitHashes, txHash)
+ if _, ok := c.waitHashes[tx.Hash]; ok {
+ c.logger.Info("Tx included into block", "txHash", tx.Hash, "blockNum", block.Number)
+ blockMap[tx.Hash] = block
+ delete(c.waitHashes, tx.Hash)
if len(c.waitHashes) == 0 {
c.headersSub.Unsubscribe()
diff --git a/cmd/devnet/contracts/backend.go b/cmd/devnet/contracts/backend.go
index d321ec9ef80..950e6db078d 100644
--- a/cmd/devnet/contracts/backend.go
+++ b/cmd/devnet/contracts/backend.go
@@ -7,10 +7,13 @@ import (
ethereum "github.com/ledgerwatch/erigon"
libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
+ "github.com/ledgerwatch/erigon-lib/common/hexutility"
"github.com/ledgerwatch/erigon/accounts/abi/bind"
"github.com/ledgerwatch/erigon/cmd/devnet/devnet"
- "github.com/ledgerwatch/erigon/cmd/devnet/requests"
"github.com/ledgerwatch/erigon/core/types"
+ "github.com/ledgerwatch/erigon/rpc"
+ "github.com/ledgerwatch/erigon/turbo/adapter/ethapi"
)
func NewBackend(node devnet.Node) bind.ContractBackend {
@@ -22,19 +25,44 @@ type contractBackend struct {
}
func (cb contractBackend) CodeAt(ctx context.Context, contract libcommon.Address, blockNumber *big.Int) ([]byte, error) {
- return cb.node.GetCode(contract, requests.AsBlockNumber(blockNumber))
+ return cb.node.GetCode(contract, rpc.AsBlockReference(blockNumber))
}
func (cb contractBackend) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
- return nil, fmt.Errorf("TODO")
+ var gasPrice *hexutil.Big
+ var value *hexutil.Big
+
+ if call.Value != nil {
+ value = (*hexutil.Big)(call.Value.ToBig())
+ }
+
+ if call.GasPrice != nil {
+ gasPrice = (*hexutil.Big)(call.GasPrice.ToBig())
+ }
+
+ var blockRef rpc.BlockReference
+ if blockNumber != nil {
+ blockRef = rpc.AsBlockReference(blockNumber)
+ } else {
+ blockRef = rpc.LatestBlock
+ }
+
+ return cb.node.Call(ethapi.CallArgs{
+ From: &call.From,
+ To: call.To,
+ Gas: (*hexutil.Uint64)(&call.Gas),
+ GasPrice: gasPrice,
+ Value: value,
+ Data: (*hexutility.Bytes)(&call.Data),
+ }, blockRef, nil)
}
func (cb contractBackend) PendingCodeAt(ctx context.Context, account libcommon.Address) ([]byte, error) {
- return cb.node.GetCode(account, requests.BlockNumbers.Pending)
+ return cb.node.GetCode(account, rpc.PendingBlock)
}
func (cb contractBackend) PendingNonceAt(ctx context.Context, account libcommon.Address) (uint64, error) {
- res, err := cb.node.GetTransactionCount(account, requests.BlockNumbers.Pending)
+ res, err := cb.node.GetTransactionCount(account, rpc.PendingBlock)
if err != nil {
return 0, fmt.Errorf("failed to get transaction count for address 0x%x: %v", account, err)
diff --git a/cmd/devnet/contracts/build/ChainIdMixin.abi b/cmd/devnet/contracts/build/ChainIdMixin.abi
new file mode 100644
index 00000000000..0679dc7f982
--- /dev/null
+++ b/cmd/devnet/contracts/build/ChainIdMixin.abi
@@ -0,0 +1 @@
+[{"inputs":[],"name":"CHAINID","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"networkId","outputs":[{"internalType":"bytes","name":"","type":"bytes"}],"stateMutability":"view","type":"function"}]
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/ChainIdMixin.bin b/cmd/devnet/contracts/build/ChainIdMixin.bin
new file mode 100644
index 00000000000..a42f094958c
--- /dev/null
+++ b/cmd/devnet/contracts/build/ChainIdMixin.bin
@@ -0,0 +1 @@
+608060405234801561001057600080fd5b50610102806100206000396000f3fe6080604052348015600f57600080fd5b506004361060325760003560e01c80639025e64c146037578063cc79f97b14606b575b600080fd5b605760405180604001604052806002815260200161053960f01b81525081565b604051606291906080565b60405180910390f35b607361053981565b6040519081526020016062565b600060208083528351808285015260005b8181101560ab578581018301518582016040015282016091565b506000604082860101526040601f19601f830116850101925050509291505056fea2646970667358221220e6870cdfde407f0cde56918e0f6a3b3176e22f8f29210f65969323fb68f9a05b64736f6c63430008140033
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/ChildSender.abi b/cmd/devnet/contracts/build/ChildSender.abi
new file mode 100644
index 00000000000..c0fb931d95a
--- /dev/null
+++ b/cmd/devnet/contracts/build/ChildSender.abi
@@ -0,0 +1 @@
+[{"inputs":[{"internalType":"address","name":"childStateReceiver_","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"bytes","name":"message","type":"bytes"}],"name":"MessageSent","type":"event"},{"inputs":[{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"sendToRoot","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"sent","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"}]
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/ChildSender.bin b/cmd/devnet/contracts/build/ChildSender.bin
new file mode 100644
index 00000000000..d0b73572077
--- /dev/null
+++ b/cmd/devnet/contracts/build/ChildSender.bin
@@ -0,0 +1 @@
+608060405234801561001057600080fd5b506040516102b33803806102b383398101604081905261002f91610054565b600080546001600160a01b0319166001600160a01b0392909216919091179055610084565b60006020828403121561006657600080fd5b81516001600160a01b038116811461007d57600080fd5b9392505050565b610220806100936000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c80637bf786f81461003b5780638152e5021461006d575b600080fd5b61005b61004936600461012c565b60016020526000908152604090205481565b60405190815260200160405180910390f35b61008061007b36600461015c565b610082565b005b3360009081526001602052604090205461009c8282610175565b33600081815260016020908152604080832094909455905483516001600160a01b039091169181019190915291820152606081018390526100ee906080016040516020818303038152906040526100f2565b5050565b7f8c5261668696ce22758910d05bab8f186d6eb247ceac2af2e82c7dc17669b03681604051610121919061019c565b60405180910390a150565b60006020828403121561013e57600080fd5b81356001600160a01b038116811461015557600080fd5b9392505050565b60006020828403121561016e57600080fd5b5035919050565b8082018082111561019657634e487b7160e01b600052601160045260246000fd5b92915050565b600060208083528351808285015260005b818110156101c9578581018301518582016040015282016101ad565b506000604082860101526040601f19601f830116850101925050509291505056fea26469706673582212202b5e4ad44349bb7aa70272a65afd939d928b9e646835ef4b7e65acff3d07b21364736f6c63430008140033
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/ExitPayloadReader.abi b/cmd/devnet/contracts/build/ExitPayloadReader.abi
new file mode 100644
index 00000000000..0637a088a01
--- /dev/null
+++ b/cmd/devnet/contracts/build/ExitPayloadReader.abi
@@ -0,0 +1 @@
+[]
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/ExitPayloadReader.bin b/cmd/devnet/contracts/build/ExitPayloadReader.bin
new file mode 100644
index 00000000000..0e7370cd4ad
--- /dev/null
+++ b/cmd/devnet/contracts/build/ExitPayloadReader.bin
@@ -0,0 +1 @@
+60566037600b82828239805160001a607314602a57634e487b7160e01b600052600060045260246000fd5b30600052607381538281f3fe73000000000000000000000000000000000000000030146080604052600080fdfea2646970667358221220bd0c6a06e3532455fce52c3e139cac58317944b0cd3296a2f68dc6791cc2b16c64736f6c63430008140033
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/Governable.abi b/cmd/devnet/contracts/build/Governable.abi
new file mode 100644
index 00000000000..5be7d7d5b1b
--- /dev/null
+++ b/cmd/devnet/contracts/build/Governable.abi
@@ -0,0 +1 @@
+[{"inputs":[{"internalType":"address","name":"_governance","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[],"name":"governance","outputs":[{"internalType":"contract IGovernance","name":"","type":"address"}],"stateMutability":"view","type":"function"}]
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/Governable.bin b/cmd/devnet/contracts/build/Governable.bin
new file mode 100644
index 00000000000..a57aa5e0966
--- /dev/null
+++ b/cmd/devnet/contracts/build/Governable.bin
@@ -0,0 +1 @@
+608060405234801561001057600080fd5b5060405161012338038061012383398101604081905261002f91610054565b600080546001600160a01b0319166001600160a01b0392909216919091179055610084565b60006020828403121561006657600080fd5b81516001600160a01b038116811461007d57600080fd5b9392505050565b6091806100926000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c80635aa6e67514602d575b600080fd5b600054603f906001600160a01b031681565b6040516001600160a01b03909116815260200160405180910390f3fea26469706673582212205573b7ff38baa0f309eb23dd31fd1b16c4f5bf2da9f9ffe920ee2553aab47bf664736f6c63430008140033
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/ICheckpointManager.abi b/cmd/devnet/contracts/build/ICheckpointManager.abi
new file mode 100644
index 00000000000..67ba5980f82
--- /dev/null
+++ b/cmd/devnet/contracts/build/ICheckpointManager.abi
@@ -0,0 +1 @@
+[{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"headerBlocks","outputs":[{"internalType":"bytes32","name":"root","type":"bytes32"},{"internalType":"uint256","name":"start","type":"uint256"},{"internalType":"uint256","name":"end","type":"uint256"},{"internalType":"uint256","name":"createdAt","type":"uint256"},{"internalType":"address","name":"proposer","type":"address"}],"stateMutability":"view","type":"function"}]
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/ICheckpointManager.bin b/cmd/devnet/contracts/build/ICheckpointManager.bin
new file mode 100644
index 00000000000..8597a1990b2
--- /dev/null
+++ b/cmd/devnet/contracts/build/ICheckpointManager.bin
@@ -0,0 +1 @@
+608060405234801561001057600080fd5b5060f38061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806341539d4a14602d575b600080fd5b6070603836600460a5565b60006020819052908152604090208054600182015460028301546003840154600490940154929391929091906001600160a01b031685565b6040805195865260208601949094529284019190915260608301526001600160a01b0316608082015260a00160405180910390f35b60006020828403121560b657600080fd5b503591905056fea26469706673582212206d025d9e83266d3f4dc870d2f3be47196b093117ab5e4367f14e44e42c9b146564736f6c63430008140033
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/IGovernance.abi b/cmd/devnet/contracts/build/IGovernance.abi
new file mode 100644
index 00000000000..8221cabdf28
--- /dev/null
+++ b/cmd/devnet/contracts/build/IGovernance.abi
@@ -0,0 +1 @@
+[{"inputs":[{"internalType":"address","name":"target","type":"address"},{"internalType":"bytes","name":"data","type":"bytes"}],"name":"update","outputs":[],"stateMutability":"nonpayable","type":"function"}]
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/IGovernance.bin b/cmd/devnet/contracts/build/IGovernance.bin
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/cmd/devnet/contracts/build/IRootChain.abi b/cmd/devnet/contracts/build/IRootChain.abi
new file mode 100644
index 00000000000..e100705743c
--- /dev/null
+++ b/cmd/devnet/contracts/build/IRootChain.abi
@@ -0,0 +1 @@
+[{"inputs":[],"name":"currentHeaderBlock","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getLastChildBlock","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"slash","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes","name":"data","type":"bytes"},{"internalType":"uint256[3][]","name":"sigs","type":"uint256[3][]"}],"name":"submitCheckpoint","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes","name":"data","type":"bytes"},{"internalType":"bytes","name":"sigs","type":"bytes"}],"name":"submitHeaderBlock","outputs":[],"stateMutability":"nonpayable","type":"function"}]
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/IRootChain.bin b/cmd/devnet/contracts/build/IRootChain.bin
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/cmd/devnet/contracts/build/Merkle.abi b/cmd/devnet/contracts/build/Merkle.abi
new file mode 100644
index 00000000000..0637a088a01
--- /dev/null
+++ b/cmd/devnet/contracts/build/Merkle.abi
@@ -0,0 +1 @@
+[]
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/Merkle.bin b/cmd/devnet/contracts/build/Merkle.bin
new file mode 100644
index 00000000000..df2eab89501
--- /dev/null
+++ b/cmd/devnet/contracts/build/Merkle.bin
@@ -0,0 +1 @@
+60566037600b82828239805160001a607314602a57634e487b7160e01b600052600060045260246000fd5b30600052607381538281f3fe73000000000000000000000000000000000000000030146080604052600080fdfea26469706673582212207739c5fda7060eb97027fb86aa71b29b91315b4cad140f6db0f65d635eb1338764736f6c63430008140033
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/MerklePatriciaProof.abi b/cmd/devnet/contracts/build/MerklePatriciaProof.abi
new file mode 100644
index 00000000000..0637a088a01
--- /dev/null
+++ b/cmd/devnet/contracts/build/MerklePatriciaProof.abi
@@ -0,0 +1 @@
+[]
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/MerklePatriciaProof.bin b/cmd/devnet/contracts/build/MerklePatriciaProof.bin
new file mode 100644
index 00000000000..613cb8ffd67
--- /dev/null
+++ b/cmd/devnet/contracts/build/MerklePatriciaProof.bin
@@ -0,0 +1 @@
+60566037600b82828239805160001a607314602a57634e487b7160e01b600052600060045260246000fd5b30600052607381538281f3fe73000000000000000000000000000000000000000030146080604052600080fdfea2646970667358221220412fe39cabba782d636d2bc17109d343bfc3f003512a1188914f2742f22e22b364736f6c63430008140033
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/ProxyStorage.abi b/cmd/devnet/contracts/build/ProxyStorage.abi
new file mode 100644
index 00000000000..0637a088a01
--- /dev/null
+++ b/cmd/devnet/contracts/build/ProxyStorage.abi
@@ -0,0 +1 @@
+[]
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/ProxyStorage.bin b/cmd/devnet/contracts/build/ProxyStorage.bin
new file mode 100644
index 00000000000..108862e1f4d
--- /dev/null
+++ b/cmd/devnet/contracts/build/ProxyStorage.bin
@@ -0,0 +1 @@
+6080604052348015600f57600080fd5b50603f80601d6000396000f3fe6080604052600080fdfea2646970667358221220f19fe3ff1b547e638b245a1dfab869004f680ce7af6744181151cfa632254b1564736f6c63430008140033
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/RLPReader.abi b/cmd/devnet/contracts/build/RLPReader.abi
new file mode 100644
index 00000000000..0637a088a01
--- /dev/null
+++ b/cmd/devnet/contracts/build/RLPReader.abi
@@ -0,0 +1 @@
+[]
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/RLPReader.bin b/cmd/devnet/contracts/build/RLPReader.bin
new file mode 100644
index 00000000000..fe1e4b7272d
--- /dev/null
+++ b/cmd/devnet/contracts/build/RLPReader.bin
@@ -0,0 +1 @@
+60566037600b82828239805160001a607314602a57634e487b7160e01b600052600060045260246000fd5b30600052607381538281f3fe73000000000000000000000000000000000000000030146080604052600080fdfea264697066735822122052e9a349bc8a4fd9c5d36d064e612b59e39ba032ed6620df6cc57822b5d7171164736f6c63430008140033
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/Registry.abi b/cmd/devnet/contracts/build/Registry.abi
new file mode 100644
index 00000000000..f44f3952dab
--- /dev/null
+++ b/cmd/devnet/contracts/build/Registry.abi
@@ -0,0 +1 @@
+[{"inputs":[{"internalType":"address","name":"_governance","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"bytes32","name":"key","type":"bytes32"},{"indexed":true,"internalType":"address","name":"previousContract","type":"address"},{"indexed":true,"internalType":"address","name":"newContract","type":"address"}],"name":"ContractMapUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"predicate","type":"address"},{"indexed":true,"internalType":"address","name":"from","type":"address"}],"name":"PredicateAdded","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"predicate","type":"address"},{"indexed":true,"internalType":"address","name":"from","type":"address"}],"name":"PredicateRemoved","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"validator","type":"address"},{"indexed":true,"internalType":"address","name":"from","type":"address"}],"name":"ProofValidatorAdded","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"validator","type":"address"},{"indexed":true,"internalType":"address","name":"from","type":"address"}],"name":"ProofValidatorRemoved","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"rootToken","type":"address"},{"indexed":true,"internalType":"address","name":"childToken","type":"address"}],"name":"TokenMapped","type":"event"},{"inputs":[{"internalType":"address","name":"predicate","type":"address"}],"name":"addErc20Predicate","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"predicate","type":"address"}],"name":"addErc721Predicate","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"predicate","type":"address"},{"internalType":"enum Registry.Type","name":"_type","type":"uint8"}],"name":"addPredicate","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"childToRootToken","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"name":"contractMap","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"erc20Predicate","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"erc721Predicate","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getChildChainAndStateSender","outputs":[{"internalType":"address","name":"","type":"address"},{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getDepositManagerAddress","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getSlashingManagerAddress","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getStakeManagerAddress","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getValidatorShareAddress","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getWethTokenAddress","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getWithdrawManagerAddress","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"governance","outputs":[{"internalType":"contract IGovernance","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"childToken","type":"address"}],"name":"isChildTokenErc721","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"isERC721","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_token","type":"address"}],"name":"isTokenMapped","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_token","type":"address"}],"name":"isTokenMappedAndGetPredicate","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_token","type":"address"}],"name":"isTokenMappedAndIsErc721","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_rootToken","type":"address"},{"internalType":"address","name":"_childToken","type":"address"},{"internalType":"bool","name":"_isERC721","type":"bool"}],"name":"mapToken","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"predicates","outputs":[{"internalType":"enum Registry.Type","name":"_type","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"proofValidatorContracts","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"predicate","type":"address"}],"name":"removePredicate","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"rootToChildToken","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes32","name":"_key","type":"bytes32"},{"internalType":"address","name":"_address","type":"address"}],"name":"updateContractMap","outputs":[],"stateMutability":"nonpayable","type":"function"}]
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/Registry.bin b/cmd/devnet/contracts/build/Registry.bin
new file mode 100644
index 00000000000..a051310fa6e
--- /dev/null
+++ b/cmd/devnet/contracts/build/Registry.bin
@@ -0,0 +1 @@
+608060405234801561001057600080fd5b50604051610e17380380610e1783398101604081905261002f91610054565b600080546001600160a01b0319166001600160a01b0392909216919091179055610084565b60006020828403121561006657600080fd5b81516001600160a01b038116811461007d57600080fd5b9392505050565b610d84806100936000396000f3fe608060405234801561001057600080fd5b506004361061018e5760003560e01c80636e86b770116100de578063c881560f11610097578063d580b41811610071578063d580b418146105b0578063daa09e5414610609578063e117694b1461062c578063ea60c7c41461063f57600080fd5b8063c881560f14610561578063cac39a0514610574578063ce2611861461059d57600080fd5b80636e86b7701461043d5780638b9c948914610466578063930df82e146104bf578063b686497614610518578063bbfe7cd31461052b578063c4b875d31461053e57600080fd5b80632d4e1dc71161014b5780635aa6e675116101255780635aa6e67514610358578063627942da1461036b5780636416c1831461037e578063648b8178146103ac57600080fd5b80632d4e1dc71461030257806337b1d585146103155780633af395e51461034557600080fd5b806301f07db51461019357806305f20595146101bb5780630c9effd0146101d05780631c9486ef1461023d5780632026cfdc14610296578063287be3e4146102a9575b600080fd5b6101a66101a1366004610c22565b610668565b60405190151581526020015b60405180910390f35b6101ce6101c9366004610c22565b6106e8565b005b7f396a39c7e290685f408e5373e677285002a403b06145527a7a84a38a30d9ef1060005260036020527fd600d169c07fd47997cb07cc95ab0ac285b9f541f65f50c3956e76fb037128e4546001600160a01b03165b6040516001600160a01b0390911681526020016101b2565b7ff32233bced9bbd82f0754425f51b5ffaf897dacec3c8ac3384a66e38ea701ec860005260036020527f2c73d66689d1be6372940b8899dbabec50bc3330f12d75d9fc6019218a930993546001600160a01b0316610225565b6101ce6102a4366004610c44565b6107b5565b7f56e86af72b94d3aa725a2e35243d6acbf3dc1ada7212033defd5140c5fcb6a9d60005260036020527f239c91ef4f470b7eb660973e3444cb6cdcc8c384fbcc19cf4b3d1698f5c0fa6e546001600160a01b0316610225565b6101ce610310366004610c22565b610832565b610338610323366004610c22565b60086020526000908152604090205460ff1681565b6040516101b29190610c86565b6101ce610353366004610cae565b6108c7565b600054610225906001600160a01b031681565b610225610379366004610c22565b6109bd565b6101a661038c366004610c22565b6001600160a01b0390811660009081526004602052604090205416151590565b600360209081527f2dff30286899e5fc9aa64cfe1341ad29e9a16800a4191daec50e82fc1b6875ca547fa6604f6f9e958c3372fa784685d6216654aef3be0a2255a92dfbab50f7d0b8546000527f0672ac9c59252897b175d7a0a887cab7e9f75ad2b91a0c45d23da560c8a3c9a054604080516001600160a01b0393841681529290911692820192909252016101b2565b61022561044b366004610c22565b6005602052600090815260409020546001600160a01b031681565b7f9fa53c3a84542bc4f793667c49cfa1cbb5e8df2ae0612ada001973a5f448154b60005260036020527f89ad1c8eaa942d5b27028437c407c5982b47bd810a15834238f23ac6ed250edd546001600160a01b0316610225565b7f261885af88107524c32b47256ca1d87cafbd893a7e8cc972ae41fdfb0270335e60005260036020527f2f04f48dbb401768947a64fe05ee6ccaac2d5a350d2beacfdf4d30893026edcb546001600160a01b0316610225565b600154610225906001600160a01b031681565b6101a6610539366004610c22565b6109ef565b6101a661054c366004610c22565b60066020526000908152604090205460ff1681565b600254610225906001600160a01b031681565b610225610582366004610ce9565b6003602052600090815260409020546001600160a01b031681565b6101ce6105ab366004610c22565b610a7a565b7f1ca32e38cf142cb762bc7468b9a3eac49626b43585fcbd6d3b807227216286c260005260036020527f5f5d97228f36044d803d42ad8e4b63042a170d1d6f8a046f7c944b93cc6dbd81546001600160a01b0316610225565b6101a6610617366004610c22565b60076020526000908152604090205460ff1681565b6101ce61063a366004610d02565b610aa8565b61022561064d366004610c22565b6004602052600090815260409020546001600160a01b031681565b6001600160a01b038082166000908152600460205260408120549091166106c95760405162461bcd60e51b815260206004820152601060248201526f1513d2d15397d393d517d3505414115160821b60448201526064015b60405180910390fd5b506001600160a01b031660009081526007602052604090205460ff1690565b6106f0610b9b565b6001600160a01b03811660009081526008602052604081205460ff16600381111561071d5761071d610c70565b0361076a5760405162461bcd60e51b815260206004820152601860248201527f50726564696361746520646f6573206e6f74206578697374000000000000000060448201526064016106c0565b6001600160a01b038116600081815260086020526040808220805460ff19169055513392917fd8b3c0235cefc5e19393dedb56c1ece6b41447ef932d7c6b34eb150a4b5d5f4991a350565b6107bd610b9b565b6000828152600360205260408082205490516001600160a01b038085169392169185917fffb8cfd9cecbede837eec100fb8e17560ea22bf018e065366ee5e2ff5e0bd10c9190a460009182526003602052604090912080546001600160a01b0319166001600160a01b03909216919091179055565b61083a610b9b565b6001600160a01b03811661089e5760405162461bcd60e51b815260206004820152602560248201527f43616e206e6f7420616464206e756c6c20616464726573732061732070726564604482015264696361746560d81b60648201526084016106c0565b600180546001600160a01b0319166001600160a01b0383161781556108c49082906108c7565b50565b6108cf610b9b565b6001600160a01b03821660009081526008602052604081205460ff1660038111156108fc576108fc610c70565b146109495760405162461bcd60e51b815260206004820152601760248201527f50726564696361746520616c726561647920616464656400000000000000000060448201526064016106c0565b6001600160a01b0382166000908152600860205260409020805482919060ff1916600183600381111561097e5761097e610c70565b021790555060405133906001600160a01b038416907f0ea727f9bef04eb9a0e0da4d8fbb5b5319ddac03834baded53f84e0dcdddfedf90600090a35050565b60006109c882610668565b156109de5750506002546001600160a01b031690565b50506001546001600160a01b031690565b6001600160a01b0380821660009081526005602052604081205490911680610a595760405162461bcd60e51b815260206004820152601960248201527f4368696c6420746f6b656e206973206e6f74206d61707065640000000000000060448201526064016106c0565b6001600160a01b031660009081526007602052604090205460ff1692915050565b610a82610b9b565b600280546001600160a01b0319166001600160a01b0383161781556108c49082906108c7565b610ab0610b9b565b6001600160a01b03831615801590610ad057506001600160a01b03821615155b610b145760405162461bcd60e51b8152602060048201526015602482015274494e56414c49445f544f4b454e5f4144445245535360581b60448201526064016106c0565b6001600160a01b03838116600081815260046020908152604080832080546001600160a01b0319908116968916968717909155858452600583528184208054909116851790558383526007909152808220805460ff1916861515179055517f85920d35e6c72f6b2affffa04298b0cecfeba86e4a9f407df661f1cb8ab5e6179190a3505050565b6000546001600160a01b03163314610c045760405162461bcd60e51b815260206004820152602660248201527f4f6e6c7920676f7665726e616e636520636f6e747261637420697320617574686044820152651bdc9a5e995960d21b60648201526084016106c0565b565b80356001600160a01b0381168114610c1d57600080fd5b919050565b600060208284031215610c3457600080fd5b610c3d82610c06565b9392505050565b60008060408385031215610c5757600080fd5b82359150610c6760208401610c06565b90509250929050565b634e487b7160e01b600052602160045260246000fd5b6020810160048310610ca857634e487b7160e01b600052602160045260246000fd5b91905290565b60008060408385031215610cc157600080fd5b610cca83610c06565b9150602083013560048110610cde57600080fd5b809150509250929050565b600060208284031215610cfb57600080fd5b5035919050565b600080600060608486031215610d1757600080fd5b610d2084610c06565b9250610d2e60208501610c06565b915060408401358015158114610d4357600080fd5b80915050925092509256fea26469706673582212209592b53634fe553b451696a4b71664cb9e1d3952c10f1c50ab3bb728dac3c4a364736f6c63430008140033
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/RootChain.abi b/cmd/devnet/contracts/build/RootChain.abi
new file mode 100644
index 00000000000..8a0765be67a
--- /dev/null
+++ b/cmd/devnet/contracts/build/RootChain.abi
@@ -0,0 +1 @@
+[{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"proposer","type":"address"},{"indexed":true,"internalType":"uint256","name":"headerBlockId","type":"uint256"},{"indexed":true,"internalType":"uint256","name":"reward","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"start","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"end","type":"uint256"},{"indexed":false,"internalType":"bytes32","name":"root","type":"bytes32"}],"name":"NewHeaderBlock","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"proposer","type":"address"},{"indexed":true,"internalType":"uint256","name":"headerBlockId","type":"uint256"}],"name":"ResetHeaderBlock","type":"event"},{"inputs":[],"name":"CHAINID","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"VOTE_TYPE","outputs":[{"internalType":"uint8","name":"","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"_nextHeaderBlock","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"currentHeaderBlock","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getLastChildBlock","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"headerBlocks","outputs":[{"internalType":"bytes32","name":"root","type":"bytes32"},{"internalType":"uint256","name":"start","type":"uint256"},{"internalType":"uint256","name":"end","type":"uint256"},{"internalType":"uint256","name":"createdAt","type":"uint256"},{"internalType":"address","name":"proposer","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"heimdallId","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"networkId","outputs":[{"internalType":"bytes","name":"","type":"bytes"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"string","name":"_heimdallId","type":"string"}],"name":"setHeimdallId","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"_value","type":"uint256"}],"name":"setNextHeaderBlock","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"slash","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes","name":"data","type":"bytes"},{"internalType":"uint256[3][]","name":"","type":"uint256[3][]"}],"name":"submitCheckpoint","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes","name":"","type":"bytes"},{"internalType":"bytes","name":"","type":"bytes"}],"name":"submitHeaderBlock","outputs":[],"stateMutability":"pure","type":"function"},{"inputs":[{"internalType":"uint256","name":"numDeposits","type":"uint256"}],"name":"updateDepositId","outputs":[{"internalType":"uint256","name":"depositId","type":"uint256"}],"stateMutability":"nonpayable","type":"function"}]
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/RootChain.bin b/cmd/devnet/contracts/build/RootChain.bin
new file mode 100644
index 00000000000..e9039019449
--- /dev/null
+++ b/cmd/devnet/contracts/build/RootChain.bin
@@ -0,0 +1 @@
+6080604052612710600255600160035534801561001b57600080fd5b50610aa48061002b6000396000f3fe608060405234801561001057600080fd5b50600436106100ea5760003560e01c8063b87e1b661161008c578063d5b844eb11610066578063d5b844eb14610207578063ea0688b314610221578063ec7e485514610234578063fbc3dd361461023c57600080fd5b8063b87e1b66146101e3578063cc79f97b146101eb578063cf24a0ea146101f457600080fd5b80635391f483116100c85780635391f483146101815780636a791f11146101a25780638d978d88146101b05780639025e64c146101b957600080fd5b80632da25de3146100ef57806341539d4a146100f15780634e43e4951461016e575b600080fd5b005b6101386100ff3660046106e0565b6004602081905260009182526040909120805460018201546002830154600384015493909401549193909290916001600160a01b031685565b6040805195865260208601949094529284019190915260608301526001600160a01b0316608082015260a0015b60405180910390f35b6100ef61017c366004610742565b610245565b61019461018f3660046106e0565b610375565b604051908152602001610165565b6100ef6100ea3660046107dc565b61019460025481565b6101d6604051806040016040528060028152602001600081525081565b604051610165919061086c565b6101946104c0565b6101946104d281565b6100ef6102023660046106e0565b6104e5565b61020f600281565b60405160ff9091168152602001610165565b6100ef61022f3660046108b5565b6105c0565b6101946105ef565b61019460015481565b600080808080610257888a018a61097e565b9550509450945094509450806104d2146102af5760405162461bcd60e51b8152602060048201526014602482015273125b9d985b1a5908189bdc8818da185a5b881a5960621b60448201526064015b60405180910390fd5b6102bb85858585610607565b6102ff5760405162461bcd60e51b8152602060048201526015602482015274494e434f52524543545f4845414445525f4441544160581b60448201526064016102a6565b6002546040805186815260208101869052908101849052600091906001600160a01b038816907fba5de06d22af2685c6c7765f60067f7d2b08c2d29f53cdf14d67f6d1c9bfb5279060600160405180910390a460025461036290612710906109e0565b6002555050600160035550505050505050565b6005546040805162c9effd60e41b815290516000926001600160a01b031691630c9effd09160048083019260209291908290030181865afa1580156103be573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906103e291906109f9565b6001600160a01b0316336001600160a01b03161461044c5760405162461bcd60e51b815260206004820152602160248201527f554e415554484f52495a45445f4445504f5349545f4d414e414745525f4f4e4c6044820152605960f81b60648201526084016102a6565b6003546104576105ef565b61046191906109e0565b90508160035461047191906109e0565b600381905561271010156104bb5760405162461bcd60e51b8152602060048201526011602482015270544f4f5f4d414e595f4445504f5349545360781b60448201526064016102a6565b919050565b6000600460006104ce6105ef565b815260200190815260200160002060020154905090565b6104f161271082610a1d565b1561052e5760405162461bcd60e51b815260206004820152600d60248201526c496e76616c69642076616c756560981b60448201526064016102a6565b805b6002548110156105855760008181526004602081905260408220828155600181018390556002810183905560038101929092550180546001600160a01b031916905561057e612710826109e0565b9050610530565b5060028190556001600355604051819033907fca1d8316287f938830e225956a7bb10fd5a1a1506dd2eb3a476751a48811720590600090a350565b806040516020016105d19190610a3f565b60408051601f19818403018152919052805160209091012060015550565b6002546000906106029061271090610a5b565b905090565b60008061271061ffff16600254111561064757600460006106266105ef565b815260200190815260200160002060020154600161064491906109e0565b90505b8481146106585760009150506106d8565b6040805160a081018252848152602080820193845281830187815242606084019081526001600160a01b038b811660808601908152600280546000908152600496879052979097209551865596516001808701919091559251958501959095555160038401559351910180546001600160a01b0319169190921617905590505b949350505050565b6000602082840312156106f257600080fd5b5035919050565b60008083601f84011261070b57600080fd5b50813567ffffffffffffffff81111561072357600080fd5b60208301915083602082850101111561073b57600080fd5b9250929050565b6000806000806040858703121561075857600080fd5b843567ffffffffffffffff8082111561077057600080fd5b61077c888389016106f9565b9096509450602087013591508082111561079557600080fd5b818701915087601f8301126107a957600080fd5b8135818111156107b857600080fd5b8860206060830285010111156107cd57600080fd5b95989497505060200194505050565b600080600080604085870312156107f257600080fd5b843567ffffffffffffffff8082111561080a57600080fd5b610816888389016106f9565b9096509450602087013591508082111561082f57600080fd5b5061083c878288016106f9565b95989497509550505050565b60005b8381101561086357818101518382015260200161084b565b50506000910152565b602081526000825180602084015261088b816040850160208701610848565b601f01601f19169190910160400192915050565b634e487b7160e01b600052604160045260246000fd5b6000602082840312156108c757600080fd5b813567ffffffffffffffff808211156108df57600080fd5b818401915084601f8301126108f357600080fd5b8135818111156109055761090561089f565b604051601f8201601f19908116603f0116810190838211818310171561092d5761092d61089f565b8160405282815287602084870101111561094657600080fd5b826020860160208301376000928101602001929092525095945050505050565b6001600160a01b038116811461097b57600080fd5b50565b60008060008060008060c0878903121561099757600080fd5b86356109a281610966565b9860208801359850604088013597606081013597506080810135965060a00135945092505050565b634e487b7160e01b600052601160045260246000fd5b808201808211156109f3576109f36109ca565b92915050565b600060208284031215610a0b57600080fd5b8151610a1681610966565b9392505050565b600082610a3a57634e487b7160e01b600052601260045260246000fd5b500690565b60008251610a51818460208701610848565b9190910192915050565b818103818111156109f3576109f36109ca56fea2646970667358221220b0082d800e411bf71e97a7dba6c22d98a3bd14f4c8522096b1dfdc5b76803ccf64736f6c63430008140033
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/RootChainHeader.abi b/cmd/devnet/contracts/build/RootChainHeader.abi
new file mode 100644
index 00000000000..bc0b2ec7754
--- /dev/null
+++ b/cmd/devnet/contracts/build/RootChainHeader.abi
@@ -0,0 +1 @@
+[{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"proposer","type":"address"},{"indexed":true,"internalType":"uint256","name":"headerBlockId","type":"uint256"},{"indexed":true,"internalType":"uint256","name":"reward","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"start","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"end","type":"uint256"},{"indexed":false,"internalType":"bytes32","name":"root","type":"bytes32"}],"name":"NewHeaderBlock","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"proposer","type":"address"},{"indexed":true,"internalType":"uint256","name":"headerBlockId","type":"uint256"}],"name":"ResetHeaderBlock","type":"event"}]
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/RootChainHeader.bin b/cmd/devnet/contracts/build/RootChainHeader.bin
new file mode 100644
index 00000000000..cbf7ad2624b
--- /dev/null
+++ b/cmd/devnet/contracts/build/RootChainHeader.bin
@@ -0,0 +1 @@
+6080604052348015600f57600080fd5b50603f80601d6000396000f3fe6080604052600080fdfea26469706673582212200c8ac7f24c4ac2062b97f586926948ab59c95f6377be277888fca7551590093a64736f6c63430008140033
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/RootChainStorage.abi b/cmd/devnet/contracts/build/RootChainStorage.abi
new file mode 100644
index 00000000000..f74f62e19d8
--- /dev/null
+++ b/cmd/devnet/contracts/build/RootChainStorage.abi
@@ -0,0 +1 @@
+[{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"proposer","type":"address"},{"indexed":true,"internalType":"uint256","name":"headerBlockId","type":"uint256"},{"indexed":true,"internalType":"uint256","name":"reward","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"start","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"end","type":"uint256"},{"indexed":false,"internalType":"bytes32","name":"root","type":"bytes32"}],"name":"NewHeaderBlock","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"proposer","type":"address"},{"indexed":true,"internalType":"uint256","name":"headerBlockId","type":"uint256"}],"name":"ResetHeaderBlock","type":"event"},{"inputs":[],"name":"CHAINID","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"VOTE_TYPE","outputs":[{"internalType":"uint8","name":"","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"_nextHeaderBlock","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"headerBlocks","outputs":[{"internalType":"bytes32","name":"root","type":"bytes32"},{"internalType":"uint256","name":"start","type":"uint256"},{"internalType":"uint256","name":"end","type":"uint256"},{"internalType":"uint256","name":"createdAt","type":"uint256"},{"internalType":"address","name":"proposer","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"heimdallId","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"networkId","outputs":[{"internalType":"bytes","name":"","type":"bytes"}],"stateMutability":"view","type":"function"}]
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/RootChainStorage.bin b/cmd/devnet/contracts/build/RootChainStorage.bin
new file mode 100644
index 00000000000..4eb00bc7919
--- /dev/null
+++ b/cmd/devnet/contracts/build/RootChainStorage.bin
@@ -0,0 +1 @@
+6080604052612710600255600160035534801561001b57600080fd5b506101f28061002b6000396000f3fe608060405234801561001057600080fd5b50600436106100625760003560e01c806341539d4a146100675780638d978d88146100e45780639025e64c146100fb578063cc79f97b14610129578063d5b844eb14610132578063fbc3dd361461014c575b600080fd5b6100ae610075366004610155565b6004602081905260009182526040909120805460018201546002830154600384015493909401549193909290916001600160a01b031685565b6040805195865260208601949094529284019190915260608301526001600160a01b0316608082015260a0015b60405180910390f35b6100ed60025481565b6040519081526020016100db565b61011c60405180604001604052806002815260200161053960f01b81525081565b6040516100db919061016e565b6100ed61053981565b61013a600281565b60405160ff90911681526020016100db565b6100ed60015481565b60006020828403121561016757600080fd5b5035919050565b600060208083528351808285015260005b8181101561019b5785810183015185820160400152820161017f565b506000604082860101526040601f19601f830116850101925050509291505056fea264697066735822122071950929b53ae66c9034d5ed38e7212ee33978b3a0467a495ec9c37f901c391064736f6c63430008140033
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/RootReceiver.abi b/cmd/devnet/contracts/build/RootReceiver.abi
new file mode 100644
index 00000000000..ed62067d186
--- /dev/null
+++ b/cmd/devnet/contracts/build/RootReceiver.abi
@@ -0,0 +1 @@
+[{"inputs":[{"internalType":"address","name":"_checkpointManager","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"_source","type":"address"},{"indexed":false,"internalType":"uint256","name":"_amount","type":"uint256"}],"name":"received","type":"event"},{"inputs":[],"name":"SEND_MESSAGE_EVENT_SIG","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"checkpointManager","outputs":[{"internalType":"contract ICheckpointManager","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"name":"processedExits","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes","name":"inputData","type":"bytes"}],"name":"receiveMessage","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"senders","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"}]
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/RootReceiver.bin b/cmd/devnet/contracts/build/RootReceiver.bin
new file mode 100644
index 00000000000..fc8e458bbb1
--- /dev/null
+++ b/cmd/devnet/contracts/build/RootReceiver.bin
@@ -0,0 +1 @@
+608060405234801561001057600080fd5b50604051611ed1380380611ed183398101604081905261002f91610054565b600080546001600160a01b0319166001600160a01b0392909216919091179055610084565b60006020828403121561006657600080fd5b81516001600160a01b038116811461007d57600080fd5b9392505050565b611e3e806100936000396000f3fe608060405234801561001057600080fd5b50600436106100575760003560e01c80630e387de61461005c578063607f2d4214610096578063982fb9d8146100c9578063c0857ba0146100e9578063f953cec714610114575b600080fd5b6100837f8c5261668696ce22758910d05bab8f186d6eb247ceac2af2e82c7dc17669b03681565b6040519081526020015b60405180910390f35b6100b96100a436600461196a565b60016020526000908152604090205460ff1681565b604051901515815260200161008d565b6100836100d736600461199b565b60026020526000908152604090205481565b6000546100fc906001600160a01b031681565b6040516001600160a01b03909116815260200161008d565b610127610122366004611a25565b610129565b005b60008061013583610148565b9150915061014382826103cf565b505050565b600060606000610157846104bb565b905060006101648261051a565b9050600061017183610549565b905060008161017f84610572565b6101888661072e565b60405160200161019a93929190611ac8565b60408051601f1981840301815291815281516020928301206000818152600190935291205490915060ff16156102235760405162461bcd60e51b8152602060048201526024808201527f4678526f6f7454756e6e656c3a20455849545f414c52454144595f50524f434560448201526314d4d15160e21b60648201526084015b60405180910390fd5b60008181526001602081905260408220805460ff191690911790556102478561074a565b9050600061025482610893565b9050600061026187610923565b9050610281610271846020015190565b8761027b8a61093f565b8461095b565b6102d95760405162461bcd60e51b815260206004820152602360248201527f4678526f6f7454756e6e656c3a20494e56414c49445f524543454950545f505260448201526227a7a360e91b606482015260840161021a565b610307856102e689610c28565b6102ef8a610c44565b846102f98c610c60565b6103028d610c7c565b610c98565b600061031283610db2565b90507f8c5261668696ce22758910d05bab8f186d6eb247ceac2af2e82c7dc17669b036610348610343836000610dee565b610e26565b146103955760405162461bcd60e51b815260206004820152601f60248201527f4678526f6f7454756e6e656c3a20494e56414c49445f5349474e415455524500604482015260640161021a565b60006103a084610ea1565b8060200190518101906103b39190611af5565b90506103be84610ebd565b9c909b509950505050505050505050565b6000806000838060200190518101906103e89190611b6b565b919450925090506001600160a01b038316301461043a5760405162461bcd60e51b815260206004820152601060248201526f24b73b30b634b2103932b1b2b4bb32b960811b604482015260640161021a565b6001600160a01b03821660009081526002602052604090205461045d8282611bc4565b6001600160a01b0384166000818152600260209081526040918290209390935580519182529181018490527ff11e547d796cc64acdf758e7cee90439494fd886a19159454aa61e473fdbafef910160405180910390a1505050505050565b60408051602081019091526060815260006105056105008460408051808201825260008082526020918201528151808301909252825182529182019181019190915290565b610ee6565b60408051602081019091529081529392505050565b6060610543826000015160088151811061053657610536611bd7565b6020026020010151610ffb565b92915050565b6000610543826000015160028151811061056557610565611bd7565b6020026020010151610e26565b604080516020810190915260008152815160609190156105435760008061059a600086611097565b60f81c905060018114806105b157508060ff166003145b15610658576001855160026105c69190611bed565b6105d09190611c04565b6001600160401b038111156105e7576105e76119b8565b6040519080825280601f01601f191660200182016040528015610611576020820181803683370190505b5092506000610621600187611097565b9050808460008151811061063757610637611bd7565b60200101906001600160f81b031916908160001a90535060019250506106bb565b6002855160026106689190611bed565b6106729190611c04565b6001600160401b03811115610689576106896119b8565b6040519080825280601f01601f1916602001820160405280156106b3576020820181803683370190505b509250600091505b60ff82165b8351811015610725576106ea6106d960ff851683611c04565b6106e4906002611bc4565b87611097565b8482815181106106fc576106fc611bd7565b60200101906001600160f81b031916908160001a9053508061071d81611c17565b9150506106c0565b50505092915050565b6000610543826000015160098151811061056557610565611bd7565b61076e60405180606001604052806060815260200160608152602001600081525090565b610788826000015160068151811061053657610536611bd7565b6020828101829052604080518082018252600080825290830152805180820190915282518152918101908201526107be81611118565b156107d3576107cc81610ee6565b825261087f565b602082015180516000906107e990600190611c04565b6001600160401b03811115610800576108006119b8565b6040519080825280601f01601f19166020018201604052801561082a576020820181803683370190505b50905060008083602101915082602001905061084882828551611153565b60408051808201825260008082526020918201528151808301909252845182528085019082015261087890610ee6565b8652505050505b6108888361072e565b604083015250919050565b6040805160808101825260009181018281526060808301939093528152602081019190915260006108e183600001516003815181106108d4576108d4611bd7565b6020026020010151610ee6565b8360400151815181106108f6576108f6611bd7565b60200260200101519050604051806040016040528082815260200161091a83610ee6565b90529392505050565b6000610543826000015160058151811061056557610565611bd7565b6060610543826000015160078151811061053657610536611bd7565b60008061098f8460408051808201825260008082526020918201528151808301909252825182529182019181019190915290565b9050600061099c826111de565b9050606080856000806109ae8b610572565b905080516000036109c9576000975050505050505050610c20565b60005b8651811015610c175781518311156109ef57600098505050505050505050610c20565b610a11878281518110610a0457610a04611bd7565b60200260200101516112e8565b955085805190602001208414610a3257600098505050505050505050610c20565b610a54878281518110610a4757610a47611bd7565b60200260200101516111de565b94508451601103610b335781518303610ac0578c80519060200120610a9286601081518110610a8557610a85611bd7565b6020026020010151611366565b8051906020012003610aaf57600198505050505050505050610c20565b600098505050505050505050610c20565b6000828481518110610ad457610ad4611bd7565b016020015160f81c90506010811115610af95760009950505050505050505050610c20565b610b1e868260ff1681518110610b1157610b11611bd7565b6020026020010151611402565b9450610b2b600185611bc4565b935050610c05565b8451600203610aaf576000610b5e610b5787600081518110610a8557610a85611bd7565b8486611430565b8351909150610b6d8286611bc4565b03610bc0578d80519060200120610b9087600181518110610a8557610a85611bd7565b8051906020012003610bae5760019950505050505050505050610c20565b60009950505050505050505050610c20565b80600003610bda5760009950505050505050505050610c20565b610be48185611bc4565b9350610bfc86600181518110610b1157610b11611bd7565b9450610c059050565b80610c0f81611c17565b9150506109cc565b50505050505050505b949350505050565b6000610543826000015160038151811061056557610565611bd7565b6000610543826000015160048151811061056557610565611bd7565b6000610543826000015160008151811061056557610565611bd7565b6060610543826000015160018151811061053657610536611bd7565b600080546040516320a9cea560e11b81526004810185905282916001600160a01b0316906341539d4a9060240160a060405180830381865afa158015610ce2573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610d069190611c30565b50505091509150610d5c8189610d1c9190611c04565b6040805160208082018d90528183018c9052606082018b905260808083018b90528351808403909101815260a09092019092528051910120908486611537565b610da85760405162461bcd60e51b815260206004820152601c60248201527f4678526f6f7454756e6e656c3a20494e56414c49445f48454144455200000000604482015260640161021a565b5050505050505050565b6040805160208101909152606081526040518060200160405280610de684602001516001815181106108d4576108d4611bd7565b905292915050565b60408051808201909152600080825260208201528251805183908110610e1657610e16611bd7565b6020026020010151905092915050565b805160009015801590610e3b57508151602110155b610e4457600080fd5b6000610e53836020015161169f565b90506000818460000151610e679190611c04565b9050600080838660200151610e7c9190611bc4565b9050805191506020831015610e9857826020036101000a820491505b50949350505050565b6060610543826020015160028151811061053657610536611bd7565b60006105438260200151600081518110610ed957610ed9611bd7565b6020026020010151611721565b6060610ef182611118565b610efa57600080fd5b6000610f058361173b565b90506000816001600160401b03811115610f2157610f216119b8565b604051908082528060200260200182016040528015610f6657816020015b6040805180820190915260008082526020820152815260200190600190039081610f3f5790505b5090506000610f78856020015161169f565b8560200151610f879190611bc4565b90506000805b84811015610ff057610f9e836117c0565b9150604051806040016040528083815260200184815250848281518110610fc757610fc7611bd7565b6020908102919091010152610fdc8284611bc4565b925080610fe881611c17565b915050610f8d565b509195945050505050565b805160609061100957600080fd5b6000611018836020015161169f565b9050600081846000015161102c9190611c04565b90506000816001600160401b03811115611048576110486119b8565b6040519080825280601f01601f191660200182016040528015611072576020820181803683370190505b5090506000816020019050610e988487602001516110909190611bc4565b8285611864565b60006110a4600284611c93565b156110de576010826110b7600286611ca7565b815181106110c7576110c7611bd7565b01602001516110d9919060f81c611cbb565b61110e565b6010826110ec600286611ca7565b815181106110fc576110fc611bd7565b016020015161110e919060f81c611cdd565b60f81b9392505050565b8051600090810361112b57506000919050565b6020820151805160001a9060c0821015611149575060009392505050565b5060019392505050565b8060000361116057505050565b602081106111985782518252611177602084611bc4565b9250611184602083611bc4565b9150611191602082611c04565b9050611160565b806000036111a557505050565b600060016111b4836020611c04565b6111c090610100611de3565b6111ca9190611c04565b935183518516941916939093179091525050565b60606111e982611118565b6111f257600080fd5b60006111fd836118a9565b90506000816001600160401b03811115611219576112196119b8565b60405190808252806020026020018201604052801561125e57816020015b60408051808201909152600080825260208201528152602001906001900390816112375790505b5090506000611270856020015161169f565b856020015161127f9190611bc4565b90506000805b84811015610ff057611296836117c0565b91506040518060400160405280838152602001848152508482815181106112bf576112bf611bd7565b60209081029190910101526112d48284611bc4565b9250806112e081611c17565b915050611285565b6060600082600001516001600160401b03811115611308576113086119b8565b6040519080825280601f01601f191660200182016040528015611332576020820181803683370190505b50905080516000036113445792915050565b600081602001905061135f8460200151828660000151611925565b5092915050565b805160609061137457600080fd5b6000611383836020015161169f565b905060008184600001516113979190611c04565b90506000816001600160401b038111156113b3576113b36119b8565b6040519080825280601f01601f1916602001820160405280156113dd576020820181803683370190505b5090506000816020019050610e988487602001516113fb9190611bc4565b8285611925565b805160009060211461141357600080fd5b600080836020015160016114279190611bc4565b51949350505050565b6000808061143d86610572565b9050600081516001600160401b0381111561145a5761145a6119b8565b6040519080825280601f01601f191660200182016040528015611484576020820181803683370190505b509050845b82516114959087611bc4565b8110156115085760008782815181106114b0576114b0611bd7565b01602001516001600160f81b031916905080836114cd8985611c04565b815181106114dd576114dd611bd7565b60200101906001600160f81b031916908160001a90535050808061150090611c17565b915050611489565b508080519060200120828051906020012003611527578151925061152c565b600092505b509095945050505050565b6000602082516115479190611c93565b1561158b5760405162461bcd60e51b8152602060048201526014602482015273092dcecc2d8d2c840e0e4dedecc40d8cadccee8d60631b604482015260640161021a565b60006020835161159b9190611ca7565b90506115a8816002611de3565b85106115ee5760405162461bcd60e51b81526020600482015260156024820152744c65616620696e64657820697320746f6f2062696760581b604482015260640161021a565b60008660205b855181116116915785810151925061160d600289611c93565b600003611645576040805160208101849052908101849052606001604051602081830303815290604052805190602001209150611672565b60408051602081018590529081018390526060016040516020818303038152906040528051906020012091505b61167d600289611ca7565b975061168a602082611bc4565b90506115f4565b509094149695505050505050565b8051600090811a60808110156116b85750600092915050565b60b88110806116d3575060c081108015906116d3575060f881105b156116e15750600192915050565b60c0811015611715576116f6600160b8611def565b6117039060ff1682611c04565b61170e906001611bc4565b9392505050565b6116f6600160f8611def565b805160009060151461173257600080fd5b61054382610e26565b8051600090810361174e57506000919050565b60008061175e846020015161169f565b846020015161176d9190611bc4565b90506000846000015185602001516117859190611bc4565b90505b808210156117b757611799826117c0565b6117a39083611bc4565b9150826117af81611c17565b935050611788565b50909392505050565b80516000908190811a60808110156117db576001915061135f565b60b8811015611801576117ef608082611c04565b6117fa906001611bc4565b915061135f565b60c081101561182e5760b78103600185019450806020036101000a8551046001820181019350505061135f565b60f8811015611842576117ef60c082611c04565b60019390930151602084900360f7016101000a900490920160f5190192915050565b8060000361187157505050565b602081106111985782518252611888602084611bc4565b9250611895602083611bc4565b91506118a2602082611c04565b9050611871565b805160009081036118bc57506000919050565b6000806118cc846020015161169f565b84602001516118db9190611bc4565b90506000846000015185602001516118f39190611bc4565b90505b808210156117b757611907826117c0565b6119119083611bc4565b91508261191d81611c17565b9350506118f6565b8060000361193257505050565b602081106111985782518252611949602084611bc4565b9250611956602083611bc4565b9150611963602082611c04565b9050611932565b60006020828403121561197c57600080fd5b5035919050565b6001600160a01b038116811461199857600080fd5b50565b6000602082840312156119ad57600080fd5b813561170e81611983565b634e487b7160e01b600052604160045260246000fd5b604051601f8201601f191681016001600160401b03811182821017156119f6576119f66119b8565b604052919050565b60006001600160401b03821115611a1757611a176119b8565b50601f01601f191660200190565b600060208284031215611a3757600080fd5b81356001600160401b03811115611a4d57600080fd5b8201601f81018413611a5e57600080fd5b8035611a71611a6c826119fe565b6119ce565b818152856020838501011115611a8657600080fd5b81602084016020830137600091810160200191909152949350505050565b60005b83811015611abf578181015183820152602001611aa7565b50506000910152565b83815260008351611ae0816020850160208801611aa4565b60209201918201929092526040019392505050565b600060208284031215611b0757600080fd5b81516001600160401b03811115611b1d57600080fd5b8201601f81018413611b2e57600080fd5b8051611b3c611a6c826119fe565b818152856020838501011115611b5157600080fd5b611b62826020830160208601611aa4565b95945050505050565b600080600060608486031215611b8057600080fd5b8351611b8b81611983565b6020850151909350611b9c81611983565b80925050604084015190509250925092565b634e487b7160e01b600052601160045260246000fd5b8082018082111561054357610543611bae565b634e487b7160e01b600052603260045260246000fd5b808202811582820484141761054357610543611bae565b8181038181111561054357610543611bae565b600060018201611c2957611c29611bae565b5060010190565b600080600080600060a08688031215611c4857600080fd5b855194506020860151935060408601519250606086015191506080860151611c6f81611983565b809150509295509295909350565b634e487b7160e01b600052601260045260246000fd5b600082611ca257611ca2611c7d565b500690565b600082611cb657611cb6611c7d565b500490565b600060ff831680611cce57611cce611c7d565b8060ff84160691505092915050565b600060ff831680611cf057611cf0611c7d565b8060ff84160491505092915050565b600181815b80851115611d3a578160001904821115611d2057611d20611bae565b80851615611d2d57918102915b93841c9390800290611d04565b509250929050565b600082611d5157506001610543565b81611d5e57506000610543565b8160018114611d745760028114611d7e57611d9a565b6001915050610543565b60ff841115611d8f57611d8f611bae565b50506001821b610543565b5060208310610133831016604e8410600b8410161715611dbd575081810a610543565b611dc78383611cff565b8060001904821115611ddb57611ddb611bae565b029392505050565b600061170e8383611d42565b60ff828116828216039081111561054357610543611bae56fea2646970667358221220a924e520bf4f9d5629bc95702236e2702455bf9b57c4e9e4e344c7c7d7576a2b64736f6c63430008140033
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/SafeMath.abi b/cmd/devnet/contracts/build/SafeMath.abi
new file mode 100644
index 00000000000..0637a088a01
--- /dev/null
+++ b/cmd/devnet/contracts/build/SafeMath.abi
@@ -0,0 +1 @@
+[]
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/SafeMath.bin b/cmd/devnet/contracts/build/SafeMath.bin
new file mode 100644
index 00000000000..2e4b4c031c1
--- /dev/null
+++ b/cmd/devnet/contracts/build/SafeMath.bin
@@ -0,0 +1 @@
+60566037600b82828239805160001a607314602a57634e487b7160e01b600052600060045260246000fd5b30600052607381538281f3fe73000000000000000000000000000000000000000030146080604052600080fdfea26469706673582212201a043f7e2f0c8bbcbf3cc5dab09f7bd56ae68a8e71ec23dc15074186793c7ead64736f6c63430008140033
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/TestRootChain.abi b/cmd/devnet/contracts/build/TestRootChain.abi
new file mode 100644
index 00000000000..8a0765be67a
--- /dev/null
+++ b/cmd/devnet/contracts/build/TestRootChain.abi
@@ -0,0 +1 @@
+[{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"proposer","type":"address"},{"indexed":true,"internalType":"uint256","name":"headerBlockId","type":"uint256"},{"indexed":true,"internalType":"uint256","name":"reward","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"start","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"end","type":"uint256"},{"indexed":false,"internalType":"bytes32","name":"root","type":"bytes32"}],"name":"NewHeaderBlock","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"proposer","type":"address"},{"indexed":true,"internalType":"uint256","name":"headerBlockId","type":"uint256"}],"name":"ResetHeaderBlock","type":"event"},{"inputs":[],"name":"CHAINID","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"VOTE_TYPE","outputs":[{"internalType":"uint8","name":"","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"_nextHeaderBlock","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"currentHeaderBlock","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getLastChildBlock","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"headerBlocks","outputs":[{"internalType":"bytes32","name":"root","type":"bytes32"},{"internalType":"uint256","name":"start","type":"uint256"},{"internalType":"uint256","name":"end","type":"uint256"},{"internalType":"uint256","name":"createdAt","type":"uint256"},{"internalType":"address","name":"proposer","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"heimdallId","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"networkId","outputs":[{"internalType":"bytes","name":"","type":"bytes"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"string","name":"_heimdallId","type":"string"}],"name":"setHeimdallId","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"_value","type":"uint256"}],"name":"setNextHeaderBlock","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"slash","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes","name":"data","type":"bytes"},{"internalType":"uint256[3][]","name":"","type":"uint256[3][]"}],"name":"submitCheckpoint","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes","name":"","type":"bytes"},{"internalType":"bytes","name":"","type":"bytes"}],"name":"submitHeaderBlock","outputs":[],"stateMutability":"pure","type":"function"},{"inputs":[{"internalType":"uint256","name":"numDeposits","type":"uint256"}],"name":"updateDepositId","outputs":[{"internalType":"uint256","name":"depositId","type":"uint256"}],"stateMutability":"nonpayable","type":"function"}]
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/TestRootChain.bin b/cmd/devnet/contracts/build/TestRootChain.bin
new file mode 100644
index 00000000000..c9eb0e144aa
--- /dev/null
+++ b/cmd/devnet/contracts/build/TestRootChain.bin
@@ -0,0 +1 @@
+6080604052612710600255600160035534801561001b57600080fd5b50610af88061002b6000396000f3fe608060405234801561001057600080fd5b50600436106100ea5760003560e01c8063b87e1b661161008c578063d5b844eb11610066578063d5b844eb1461020b578063ea0688b314610225578063ec7e485514610238578063fbc3dd361461024057600080fd5b8063b87e1b66146101e7578063cc79f97b146101ef578063cf24a0ea146101f857600080fd5b80635391f483116100c85780635391f483146101815780636a791f11146101a25780638d978d88146101b05780639025e64c146101b957600080fd5b80632da25de3146100ef57806341539d4a146100f15780634e43e4951461016e575b600080fd5b005b6101386100ff36600461072b565b6004602081905260009182526040909120805460018201546002830154600384015493909401549193909290916001600160a01b031685565b6040805195865260208601949094529284019190915260608301526001600160a01b0316608082015260a0015b60405180910390f35b6100ef61017c36600461078d565b610249565b61019461018f36600461072b565b61037b565b604051908152602001610165565b6100ef6100ea366004610827565b61019460025481565b6101da60405180604001604052806002815260200161053960f01b81525081565b60405161016591906108b7565b6101946104c5565b61019461053981565b6100ef61020636600461072b565b6104ea565b610213600281565b60405160ff9091168152602001610165565b6100ef610233366004610900565b6105c5565b6101946105f4565b61019460015481565b6000808080808061025c898b018b6109c9565b95509550955095509550955080610539146102b55760405162461bcd60e51b8152602060048201526014602482015273125b9d985b1a5908189bdc8818da185a5b881a5960621b60448201526064015b60405180910390fd5b6102c18686868661060b565b6103055760405162461bcd60e51b8152602060048201526015602482015274494e434f52524543545f4845414445525f4441544160581b60448201526064016102ac565b6002546040805187815260208101879052908101859052600091906001600160a01b038916907fba5de06d22af2685c6c7765f60067f7d2b08c2d29f53cdf14d67f6d1c9bfb5279060600160405180910390a4600254610367906127106106e4565b600255505060016003555050505050505050565b6005546040805162c9effd60e41b815290516000926001600160a01b031691630c9effd09160048083019260209291908290030181865afa1580156103c4573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906103e89190610a15565b6001600160a01b0316336001600160a01b0316146104525760405162461bcd60e51b815260206004820152602160248201527f554e415554484f52495a45445f4445504f5349545f4d414e414745525f4f4e4c6044820152605960f81b60648201526084016102ac565b6104666003546104606105f4565b906106e4565b60035490915061047690836106e4565b600381905561271010156104c05760405162461bcd60e51b8152602060048201526011602482015270544f4f5f4d414e595f4445504f5349545360781b60448201526064016102ac565b919050565b6000600460006104d36105f4565b815260200190815260200160002060020154905090565b6104f661271082610a32565b156105335760405162461bcd60e51b815260206004820152600d60248201526c496e76616c69642076616c756560981b60448201526064016102ac565b805b60025481101561058a5760008181526004602081905260408220828155600181018390556002810183905560038101929092550180546001600160a01b031916905561058361271082610a6a565b9050610535565b5060028190556001600355604051819033907fca1d8316287f938830e225956a7bb10fd5a1a1506dd2eb3a476751a48811720590600090a350565b806040516020016105d69190610a7d565b60408051601f19818403018152919052805160209091012060015550565b60025460009061060690612710610708565b905090565b60008061271061ffff16600254111561064b576004600061062a6105f4565b81526020019081526020016000206002015460016106489190610a6a565b90505b84811461065c5760009150506106dc565b6040805160a081018252848152602080820193845281830187815242606084019081526001600160a01b038b811660808601908152600280546000908152600496879052979097209551865596516001808701919091559251958501959095555160038401559351910180546001600160a01b0319169190921617905590505b949350505050565b60006106f08284610a6a565b90508281101561070257610702610a99565b92915050565b60008282111561071a5761071a610a99565b6107248284610aaf565b9392505050565b60006020828403121561073d57600080fd5b5035919050565b60008083601f84011261075657600080fd5b50813567ffffffffffffffff81111561076e57600080fd5b60208301915083602082850101111561078657600080fd5b9250929050565b600080600080604085870312156107a357600080fd5b843567ffffffffffffffff808211156107bb57600080fd5b6107c788838901610744565b909650945060208701359150808211156107e057600080fd5b818701915087601f8301126107f457600080fd5b81358181111561080357600080fd5b88602060608302850101111561081857600080fd5b95989497505060200194505050565b6000806000806040858703121561083d57600080fd5b843567ffffffffffffffff8082111561085557600080fd5b61086188838901610744565b9096509450602087013591508082111561087a57600080fd5b5061088787828801610744565b95989497509550505050565b60005b838110156108ae578181015183820152602001610896565b50506000910152565b60208152600082518060208401526108d6816040850160208701610893565b601f01601f19169190910160400192915050565b634e487b7160e01b600052604160045260246000fd5b60006020828403121561091257600080fd5b813567ffffffffffffffff8082111561092a57600080fd5b818401915084601f83011261093e57600080fd5b813581811115610950576109506108ea565b604051601f8201601f19908116603f01168101908382118183101715610978576109786108ea565b8160405282815287602084870101111561099157600080fd5b826020860160208301376000928101602001929092525095945050505050565b6001600160a01b03811681146109c657600080fd5b50565b60008060008060008060c087890312156109e257600080fd5b86356109ed816109b1565b9860208801359850604088013597606081013597506080810135965060a00135945092505050565b600060208284031215610a2757600080fd5b8151610724816109b1565b600082610a4f57634e487b7160e01b600052601260045260246000fd5b500690565b634e487b7160e01b600052601160045260246000fd5b8082018082111561070257610702610a54565b60008251610a8f818460208701610893565b9190910192915050565b634e487b7160e01b600052600160045260246000fd5b8181038181111561070257610702610a5456fea2646970667358221220e8aee67b63507e8745850c7b73e998c6ef6b5d41b72b45f8f1316e80e79a1ec964736f6c63430008140033
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/lib_RLPReader_sol_RLPReader.abi b/cmd/devnet/contracts/build/lib_RLPReader_sol_RLPReader.abi
new file mode 100644
index 00000000000..0637a088a01
--- /dev/null
+++ b/cmd/devnet/contracts/build/lib_RLPReader_sol_RLPReader.abi
@@ -0,0 +1 @@
+[]
\ No newline at end of file
diff --git a/cmd/devnet/contracts/build/lib_RLPReader_sol_RLPReader.bin b/cmd/devnet/contracts/build/lib_RLPReader_sol_RLPReader.bin
new file mode 100644
index 00000000000..fe1e4b7272d
--- /dev/null
+++ b/cmd/devnet/contracts/build/lib_RLPReader_sol_RLPReader.bin
@@ -0,0 +1 @@
+60566037600b82828239805160001a607314602a57634e487b7160e01b600052600060045260246000fd5b30600052607381538281f3fe73000000000000000000000000000000000000000030146080604052600080fdfea264697066735822122052e9a349bc8a4fd9c5d36d064e612b59e39ba032ed6620df6cc57822b5d7171164736f6c63430008140033
\ No newline at end of file
diff --git a/cmd/devnet/contracts/childsender.sol b/cmd/devnet/contracts/childsender.sol
new file mode 100644
index 00000000000..67492baa730
--- /dev/null
+++ b/cmd/devnet/contracts/childsender.sol
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: LGPL-3.0
+
+pragma solidity ^0.8.6;
+
+contract ChildSender {
+ address rootStateReceiver;
+ mapping(address => uint) public sent;
+
+ // MessageTunnel on L1 will get data from this event
+ event MessageSent(bytes message);
+
+ constructor(address childStateReceiver_) {
+ rootStateReceiver = childStateReceiver_;
+ }
+
+ function _sendMessageToRoot(bytes memory message) internal {
+ emit MessageSent(message);
+ }
+
+ function sendToRoot(uint amount) external {
+ uint total = sent[msg.sender];
+ sent[msg.sender] = total + amount;
+
+ _sendMessageToRoot(
+ abi.encode(rootStateReceiver, msg.sender, amount)
+ );
+ }
+}
diff --git a/cmd/devnet/contracts/gen.go b/cmd/devnet/contracts/gen.go
index 4c485c77ba8..b971b6da56b 100644
--- a/cmd/devnet/contracts/gen.go
+++ b/cmd/devnet/contracts/gen.go
@@ -4,14 +4,26 @@ package contracts
//go:generate solc --evm-version paris --allow-paths ., --abi --bin --overwrite --optimize -o build rootsender.sol
//go:generate abigen -abi build/RootSender.abi -bin build/RootSender.bin -pkg contracts -type RootSender -out ./gen_rootsender.go
+// childsender.sol
+//go:generate solc --evm-version paris --allow-paths ., --abi --bin --overwrite --optimize -o build childsender.sol
+//go:generate abigen -abi build/ChildSender.abi -bin build/ChildSender.bin -pkg contracts -type ChildSender -out ./gen_childsender.go
+
// teststatesender.sol
//go:generate solc --evm-version paris --allow-paths ., --abi --bin --overwrite --optimize -o build teststatesender.sol
//go:generate abigen -abi build/TestStateSender.abi -bin build/TestStateSender.bin -pkg contracts -type TestStateSender -out ./gen_teststatesender.go
+// rootreceiver.sol
+//go:generate solc --evm-version paris --allow-paths ., --abi --bin --overwrite --optimize -o build rootreceiver.sol
+//go:generate abigen -abi build/RootReceiver.abi -bin build/RootReceiver.bin -pkg contracts -type RootReceiver -out ./gen_rootreceiver.go
+
// childreceiver.sol
//go:generate solc --evm-version paris --allow-paths ., --abi --bin --overwrite --optimize -o build childreceiver.sol
//go:generate abigen -abi build/ChildReceiver.abi -bin build/ChildReceiver.bin -pkg contracts -type ChildReceiver -out ./gen_childreceiver.go
+// testrootchain.sol
+//go:generate solc --evm-version paris --allow-paths ., --abi --bin --overwrite --optimize -o build testrootchain.sol
+//go:generate abigen -abi build/TestRootChain.abi -bin build/TestRootChain.bin -pkg contracts -type TestRootChain -out ./gen_testrootchain.go
+
// faucet.sol
//go:generate solc --evm-version paris --allow-paths ., --abi --bin --overwrite --optimize -o build faucet.sol
//go:generate abigen -abi build/Faucet.abi -bin build/Faucet.bin -pkg contracts -type Faucet -out ./gen_faucet.go
diff --git a/cmd/devnet/contracts/gen_childsender.go b/cmd/devnet/contracts/gen_childsender.go
new file mode 100644
index 00000000000..4928cf39f51
--- /dev/null
+++ b/cmd/devnet/contracts/gen_childsender.go
@@ -0,0 +1,414 @@
+// Code generated - DO NOT EDIT.
+// This file is a generated binding and any manual changes will be lost.
+
+package contracts
+
+import (
+ "fmt"
+ "math/big"
+ "reflect"
+ "strings"
+
+ ethereum "github.com/ledgerwatch/erigon"
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon/accounts/abi"
+ "github.com/ledgerwatch/erigon/accounts/abi/bind"
+ "github.com/ledgerwatch/erigon/core/types"
+ "github.com/ledgerwatch/erigon/event"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var (
+ _ = big.NewInt
+ _ = strings.NewReader
+ _ = ethereum.NotFound
+ _ = bind.Bind
+ _ = libcommon.Big1
+ _ = types.BloomLookup
+ _ = event.NewSubscription
+)
+
+// ChildSenderABI is the input ABI used to generate the binding from.
+const ChildSenderABI = "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"childStateReceiver_\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"}],\"name\":\"MessageSent\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"sendToRoot\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"name\":\"sent\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]"
+
+// ChildSenderBin is the compiled bytecode used for deploying new contracts.
+var ChildSenderBin = "0x608060405234801561001057600080fd5b506040516102b33803806102b383398101604081905261002f91610054565b600080546001600160a01b0319166001600160a01b0392909216919091179055610084565b60006020828403121561006657600080fd5b81516001600160a01b038116811461007d57600080fd5b9392505050565b610220806100936000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c80637bf786f81461003b5780638152e5021461006d575b600080fd5b61005b61004936600461012c565b60016020526000908152604090205481565b60405190815260200160405180910390f35b61008061007b36600461015c565b610082565b005b3360009081526001602052604090205461009c8282610175565b33600081815260016020908152604080832094909455905483516001600160a01b039091169181019190915291820152606081018390526100ee906080016040516020818303038152906040526100f2565b5050565b7f8c5261668696ce22758910d05bab8f186d6eb247ceac2af2e82c7dc17669b03681604051610121919061019c565b60405180910390a150565b60006020828403121561013e57600080fd5b81356001600160a01b038116811461015557600080fd5b9392505050565b60006020828403121561016e57600080fd5b5035919050565b8082018082111561019657634e487b7160e01b600052601160045260246000fd5b92915050565b600060208083528351808285015260005b818110156101c9578581018301518582016040015282016101ad565b506000604082860101526040601f19601f830116850101925050509291505056fea26469706673582212202b5e4ad44349bb7aa70272a65afd939d928b9e646835ef4b7e65acff3d07b21364736f6c63430008140033"
+
+// DeployChildSender deploys a new Ethereum contract, binding an instance of ChildSender to it.
+func DeployChildSender(auth *bind.TransactOpts, backend bind.ContractBackend, childStateReceiver_ libcommon.Address) (libcommon.Address, types.Transaction, *ChildSender, error) {
+ parsed, err := abi.JSON(strings.NewReader(ChildSenderABI))
+ if err != nil {
+ return libcommon.Address{}, nil, nil, err
+ }
+
+ address, tx, contract, err := bind.DeployContract(auth, parsed, libcommon.FromHex(ChildSenderBin), backend, childStateReceiver_)
+ if err != nil {
+ return libcommon.Address{}, nil, nil, err
+ }
+ return address, tx, &ChildSender{ChildSenderCaller: ChildSenderCaller{contract: contract}, ChildSenderTransactor: ChildSenderTransactor{contract: contract}, ChildSenderFilterer: ChildSenderFilterer{contract: contract}}, nil
+}
+
+// ChildSender is an auto generated Go binding around an Ethereum contract.
+type ChildSender struct {
+ ChildSenderCaller // Read-only binding to the contract
+ ChildSenderTransactor // Write-only binding to the contract
+ ChildSenderFilterer // Log filterer for contract events
+}
+
+// ChildSenderCaller is an auto generated read-only Go binding around an Ethereum contract.
+type ChildSenderCaller struct {
+ contract *bind.BoundContract // Generic contract wrapper for the low level calls
+}
+
+// ChildSenderTransactor is an auto generated write-only Go binding around an Ethereum contract.
+type ChildSenderTransactor struct {
+ contract *bind.BoundContract // Generic contract wrapper for the low level calls
+}
+
+// ChildSenderFilterer is an auto generated log filtering Go binding around an Ethereum contract events.
+type ChildSenderFilterer struct {
+ contract *bind.BoundContract // Generic contract wrapper for the low level calls
+}
+
+// ChildSenderSession is an auto generated Go binding around an Ethereum contract,
+// with pre-set call and transact options.
+type ChildSenderSession struct {
+ Contract *ChildSender // Generic contract binding to set the session for
+ CallOpts bind.CallOpts // Call options to use throughout this session
+ TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
+}
+
+// ChildSenderCallerSession is an auto generated read-only Go binding around an Ethereum contract,
+// with pre-set call options.
+type ChildSenderCallerSession struct {
+ Contract *ChildSenderCaller // Generic contract caller binding to set the session for
+ CallOpts bind.CallOpts // Call options to use throughout this session
+}
+
+// ChildSenderTransactorSession is an auto generated write-only Go binding around an Ethereum contract,
+// with pre-set transact options.
+type ChildSenderTransactorSession struct {
+ Contract *ChildSenderTransactor // Generic contract transactor binding to set the session for
+ TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
+}
+
+// ChildSenderRaw is an auto generated low-level Go binding around an Ethereum contract.
+type ChildSenderRaw struct {
+ Contract *ChildSender // Generic contract binding to access the raw methods on
+}
+
+// ChildSenderCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract.
+type ChildSenderCallerRaw struct {
+ Contract *ChildSenderCaller // Generic read-only contract binding to access the raw methods on
+}
+
+// ChildSenderTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract.
+type ChildSenderTransactorRaw struct {
+ Contract *ChildSenderTransactor // Generic write-only contract binding to access the raw methods on
+}
+
+// NewChildSender creates a new instance of ChildSender, bound to a specific deployed contract.
+func NewChildSender(address libcommon.Address, backend bind.ContractBackend) (*ChildSender, error) {
+ contract, err := bindChildSender(address, backend, backend, backend)
+ if err != nil {
+ return nil, err
+ }
+ return &ChildSender{ChildSenderCaller: ChildSenderCaller{contract: contract}, ChildSenderTransactor: ChildSenderTransactor{contract: contract}, ChildSenderFilterer: ChildSenderFilterer{contract: contract}}, nil
+}
+
+// NewChildSenderCaller creates a new read-only instance of ChildSender, bound to a specific deployed contract.
+func NewChildSenderCaller(address libcommon.Address, caller bind.ContractCaller) (*ChildSenderCaller, error) {
+ contract, err := bindChildSender(address, caller, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ return &ChildSenderCaller{contract: contract}, nil
+}
+
+// NewChildSenderTransactor creates a new write-only instance of ChildSender, bound to a specific deployed contract.
+func NewChildSenderTransactor(address libcommon.Address, transactor bind.ContractTransactor) (*ChildSenderTransactor, error) {
+ contract, err := bindChildSender(address, nil, transactor, nil)
+ if err != nil {
+ return nil, err
+ }
+ return &ChildSenderTransactor{contract: contract}, nil
+}
+
+// NewChildSenderFilterer creates a new log filterer instance of ChildSender, bound to a specific deployed contract.
+func NewChildSenderFilterer(address libcommon.Address, filterer bind.ContractFilterer) (*ChildSenderFilterer, error) {
+ contract, err := bindChildSender(address, nil, nil, filterer)
+ if err != nil {
+ return nil, err
+ }
+ return &ChildSenderFilterer{contract: contract}, nil
+}
+
+// bindChildSender binds a generic wrapper to an already deployed contract.
+func bindChildSender(address libcommon.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {
+ parsed, err := abi.JSON(strings.NewReader(ChildSenderABI))
+ if err != nil {
+ return nil, err
+ }
+ return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil
+}
+
+// Call invokes the (constant) contract method with params as input values and
+// sets the output to result. The result type might be a single field for simple
+// returns, a slice of interfaces for anonymous returns and a struct for named
+// returns.
+func (_ChildSender *ChildSenderRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
+ return _ChildSender.Contract.ChildSenderCaller.contract.Call(opts, result, method, params...)
+}
+
+// Transfer initiates a plain transaction to move funds to the contract, calling
+// its default method if one is available.
+func (_ChildSender *ChildSenderRaw) Transfer(opts *bind.TransactOpts) (types.Transaction, error) {
+ return _ChildSender.Contract.ChildSenderTransactor.contract.Transfer(opts)
+}
+
+// Transact invokes the (paid) contract method with params as input values.
+func (_ChildSender *ChildSenderRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (types.Transaction, error) {
+ return _ChildSender.Contract.ChildSenderTransactor.contract.Transact(opts, method, params...)
+}
+
+// Call invokes the (constant) contract method with params as input values and
+// sets the output to result. The result type might be a single field for simple
+// returns, a slice of interfaces for anonymous returns and a struct for named
+// returns.
+func (_ChildSender *ChildSenderCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
+ return _ChildSender.Contract.contract.Call(opts, result, method, params...)
+}
+
+// Transfer initiates a plain transaction to move funds to the contract, calling
+// its default method if one is available.
+func (_ChildSender *ChildSenderTransactorRaw) Transfer(opts *bind.TransactOpts) (types.Transaction, error) {
+ return _ChildSender.Contract.contract.Transfer(opts)
+}
+
+// Transact invokes the (paid) contract method with params as input values.
+func (_ChildSender *ChildSenderTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (types.Transaction, error) {
+ return _ChildSender.Contract.contract.Transact(opts, method, params...)
+}
+
+// Sent is a free data retrieval call binding the contract method 0x7bf786f8.
+//
+// Solidity: function sent(address ) view returns(uint256)
+func (_ChildSender *ChildSenderCaller) Sent(opts *bind.CallOpts, arg0 libcommon.Address) (*big.Int, error) {
+ var out []interface{}
+ err := _ChildSender.contract.Call(opts, &out, "sent", arg0)
+
+ if err != nil {
+ return *new(*big.Int), err
+ }
+
+ out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
+
+ return out0, err
+
+}
+
+// Sent is a free data retrieval call binding the contract method 0x7bf786f8.
+//
+// Solidity: function sent(address ) view returns(uint256)
+func (_ChildSender *ChildSenderSession) Sent(arg0 libcommon.Address) (*big.Int, error) {
+ return _ChildSender.Contract.Sent(&_ChildSender.CallOpts, arg0)
+}
+
+// Sent is a free data retrieval call binding the contract method 0x7bf786f8.
+//
+// Solidity: function sent(address ) view returns(uint256)
+func (_ChildSender *ChildSenderCallerSession) Sent(arg0 libcommon.Address) (*big.Int, error) {
+ return _ChildSender.Contract.Sent(&_ChildSender.CallOpts, arg0)
+}
+
+// SendToRoot is a paid mutator transaction binding the contract method 0x8152e502.
+//
+// Solidity: function sendToRoot(uint256 amount) returns()
+func (_ChildSender *ChildSenderTransactor) SendToRoot(opts *bind.TransactOpts, amount *big.Int) (types.Transaction, error) {
+ return _ChildSender.contract.Transact(opts, "sendToRoot", amount)
+}
+
+// SendToRoot is a paid mutator transaction binding the contract method 0x8152e502.
+//
+// Solidity: function sendToRoot(uint256 amount) returns()
+func (_ChildSender *ChildSenderSession) SendToRoot(amount *big.Int) (types.Transaction, error) {
+ return _ChildSender.Contract.SendToRoot(&_ChildSender.TransactOpts, amount)
+}
+
+// SendToRoot is a paid mutator transaction binding the contract method 0x8152e502.
+//
+// Solidity: function sendToRoot(uint256 amount) returns()
+func (_ChildSender *ChildSenderTransactorSession) SendToRoot(amount *big.Int) (types.Transaction, error) {
+ return _ChildSender.Contract.SendToRoot(&_ChildSender.TransactOpts, amount)
+}
+
+// SendToRootParams is an auto generated read-only Go binding of transcaction calldata params
+type SendToRootParams struct {
+ Param_amount *big.Int
+}
+
+// Parse SendToRoot method from calldata of a transaction
+//
+// Solidity: function sendToRoot(uint256 amount) returns()
+func ParseSendToRoot(calldata []byte) (*SendToRootParams, error) {
+ if len(calldata) <= 4 {
+ return nil, fmt.Errorf("invalid calldata input")
+ }
+
+ _abi, err := abi.JSON(strings.NewReader(ChildSenderABI))
+ if err != nil {
+ return nil, fmt.Errorf("failed to get abi of registry metadata: %w", err)
+ }
+
+ out, err := _abi.Methods["sendToRoot"].Inputs.Unpack(calldata[4:])
+ if err != nil {
+ return nil, fmt.Errorf("failed to unpack sendToRoot params data: %w", err)
+ }
+
+ var paramsResult = new(SendToRootParams)
+ value := reflect.ValueOf(paramsResult).Elem()
+
+ if value.NumField() != len(out) {
+ return nil, fmt.Errorf("failed to match calldata with param field number")
+ }
+
+ out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
+
+ return &SendToRootParams{
+ Param_amount: out0,
+ }, nil
+}
+
+// ChildSenderMessageSentIterator is returned from FilterMessageSent and is used to iterate over the raw logs and unpacked data for MessageSent events raised by the ChildSender contract.
+type ChildSenderMessageSentIterator struct {
+ Event *ChildSenderMessageSent // Event containing the contract specifics and raw log
+
+ contract *bind.BoundContract // Generic contract to use for unpacking event data
+ event string // Event name to use for unpacking event data
+
+ logs chan types.Log // Log channel receiving the found contract events
+ sub ethereum.Subscription // Subscription for errors, completion and termination
+ done bool // Whether the subscription completed delivering logs
+ fail error // Occurred error to stop iteration
+}
+
+// Next advances the iterator to the subsequent event, returning whether there
+// are any more events found. In case of a retrieval or parsing error, false is
+// returned and Error() can be queried for the exact failure.
+func (it *ChildSenderMessageSentIterator) Next() bool {
+ // If the iterator failed, stop iterating
+ if it.fail != nil {
+ return false
+ }
+ // If the iterator completed, deliver directly whatever's available
+ if it.done {
+ select {
+ case log := <-it.logs:
+ it.Event = new(ChildSenderMessageSent)
+ if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+ it.fail = err
+ return false
+ }
+ it.Event.Raw = log
+ return true
+
+ default:
+ return false
+ }
+ }
+ // Iterator still in progress, wait for either a data or an error event
+ select {
+ case log := <-it.logs:
+ it.Event = new(ChildSenderMessageSent)
+ if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+ it.fail = err
+ return false
+ }
+ it.Event.Raw = log
+ return true
+
+ case err := <-it.sub.Err():
+ it.done = true
+ it.fail = err
+ return it.Next()
+ }
+}
+
+// Error returns any retrieval or parsing error occurred during filtering.
+func (it *ChildSenderMessageSentIterator) Error() error {
+ return it.fail
+}
+
+// Close terminates the iteration process, releasing any pending underlying
+// resources.
+func (it *ChildSenderMessageSentIterator) Close() error {
+ it.sub.Unsubscribe()
+ return nil
+}
+
+// ChildSenderMessageSent represents a MessageSent event raised by the ChildSender contract.
+type ChildSenderMessageSent struct {
+ Message []byte
+ Raw types.Log // Blockchain specific contextual infos
+}
+
+// FilterMessageSent is a free log retrieval operation binding the contract event 0x8c5261668696ce22758910d05bab8f186d6eb247ceac2af2e82c7dc17669b036.
+//
+// Solidity: event MessageSent(bytes message)
+func (_ChildSender *ChildSenderFilterer) FilterMessageSent(opts *bind.FilterOpts) (*ChildSenderMessageSentIterator, error) {
+
+ logs, sub, err := _ChildSender.contract.FilterLogs(opts, "MessageSent")
+ if err != nil {
+ return nil, err
+ }
+ return &ChildSenderMessageSentIterator{contract: _ChildSender.contract, event: "MessageSent", logs: logs, sub: sub}, nil
+}
+
+// WatchMessageSent is a free log subscription operation binding the contract event 0x8c5261668696ce22758910d05bab8f186d6eb247ceac2af2e82c7dc17669b036.
+//
+// Solidity: event MessageSent(bytes message)
+func (_ChildSender *ChildSenderFilterer) WatchMessageSent(opts *bind.WatchOpts, sink chan<- *ChildSenderMessageSent) (event.Subscription, error) {
+
+ logs, sub, err := _ChildSender.contract.WatchLogs(opts, "MessageSent")
+ if err != nil {
+ return nil, err
+ }
+ return event.NewSubscription(func(quit <-chan struct{}) error {
+ defer sub.Unsubscribe()
+ for {
+ select {
+ case log := <-logs:
+ // New log arrived, parse the event and forward to the user
+ event := new(ChildSenderMessageSent)
+ if err := _ChildSender.contract.UnpackLog(event, "MessageSent", log); err != nil {
+ return err
+ }
+ event.Raw = log
+
+ select {
+ case sink <- event:
+ case err := <-sub.Err():
+ return err
+ case <-quit:
+ return nil
+ }
+ case err := <-sub.Err():
+ return err
+ case <-quit:
+ return nil
+ }
+ }
+ }), nil
+}
+
+// ParseMessageSent is a log parse operation binding the contract event 0x8c5261668696ce22758910d05bab8f186d6eb247ceac2af2e82c7dc17669b036.
+//
+// Solidity: event MessageSent(bytes message)
+func (_ChildSender *ChildSenderFilterer) ParseMessageSent(log types.Log) (*ChildSenderMessageSent, error) {
+ event := new(ChildSenderMessageSent)
+ if err := _ChildSender.contract.UnpackLog(event, "MessageSent", log); err != nil {
+ return nil, err
+ }
+ event.Raw = log
+ return event, nil
+}
diff --git a/cmd/devnet/contracts/gen_rootreceiver.go b/cmd/devnet/contracts/gen_rootreceiver.go
new file mode 100644
index 00000000000..5f9f39c9aac
--- /dev/null
+++ b/cmd/devnet/contracts/gen_rootreceiver.go
@@ -0,0 +1,508 @@
+// Code generated - DO NOT EDIT.
+// This file is a generated binding and any manual changes will be lost.
+
+package contracts
+
+import (
+ "fmt"
+ "math/big"
+ "reflect"
+ "strings"
+
+ ethereum "github.com/ledgerwatch/erigon"
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon/accounts/abi"
+ "github.com/ledgerwatch/erigon/accounts/abi/bind"
+ "github.com/ledgerwatch/erigon/core/types"
+ "github.com/ledgerwatch/erigon/event"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var (
+ _ = big.NewInt
+ _ = strings.NewReader
+ _ = ethereum.NotFound
+ _ = bind.Bind
+ _ = libcommon.Big1
+ _ = types.BloomLookup
+ _ = event.NewSubscription
+)
+
+// RootReceiverABI is the input ABI used to generate the binding from.
+const RootReceiverABI = "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_checkpointManager\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_source\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"}],\"name\":\"received\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"SEND_MESSAGE_EVENT_SIG\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"checkpointManager\",\"outputs\":[{\"internalType\":\"contractICheckpointManager\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"processedExits\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"inputData\",\"type\":\"bytes\"}],\"name\":\"receiveMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"name\":\"senders\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]"
+
+// RootReceiverBin is the compiled bytecode used for deploying new contracts.
+var RootReceiverBin = "0x608060405234801561001057600080fd5b50604051611ed1380380611ed183398101604081905261002f91610054565b600080546001600160a01b0319166001600160a01b0392909216919091179055610084565b60006020828403121561006657600080fd5b81516001600160a01b038116811461007d57600080fd5b9392505050565b611e3e806100936000396000f3fe608060405234801561001057600080fd5b50600436106100575760003560e01c80630e387de61461005c578063607f2d4214610096578063982fb9d8146100c9578063c0857ba0146100e9578063f953cec714610114575b600080fd5b6100837f8c5261668696ce22758910d05bab8f186d6eb247ceac2af2e82c7dc17669b03681565b6040519081526020015b60405180910390f35b6100b96100a436600461196a565b60016020526000908152604090205460ff1681565b604051901515815260200161008d565b6100836100d736600461199b565b60026020526000908152604090205481565b6000546100fc906001600160a01b031681565b6040516001600160a01b03909116815260200161008d565b610127610122366004611a25565b610129565b005b60008061013583610148565b9150915061014382826103cf565b505050565b600060606000610157846104bb565b905060006101648261051a565b9050600061017183610549565b905060008161017f84610572565b6101888661072e565b60405160200161019a93929190611ac8565b60408051601f1981840301815291815281516020928301206000818152600190935291205490915060ff16156102235760405162461bcd60e51b8152602060048201526024808201527f4678526f6f7454756e6e656c3a20455849545f414c52454144595f50524f434560448201526314d4d15160e21b60648201526084015b60405180910390fd5b60008181526001602081905260408220805460ff191690911790556102478561074a565b9050600061025482610893565b9050600061026187610923565b9050610281610271846020015190565b8761027b8a61093f565b8461095b565b6102d95760405162461bcd60e51b815260206004820152602360248201527f4678526f6f7454756e6e656c3a20494e56414c49445f524543454950545f505260448201526227a7a360e91b606482015260840161021a565b610307856102e689610c28565b6102ef8a610c44565b846102f98c610c60565b6103028d610c7c565b610c98565b600061031283610db2565b90507f8c5261668696ce22758910d05bab8f186d6eb247ceac2af2e82c7dc17669b036610348610343836000610dee565b610e26565b146103955760405162461bcd60e51b815260206004820152601f60248201527f4678526f6f7454756e6e656c3a20494e56414c49445f5349474e415455524500604482015260640161021a565b60006103a084610ea1565b8060200190518101906103b39190611af5565b90506103be84610ebd565b9c909b509950505050505050505050565b6000806000838060200190518101906103e89190611b6b565b919450925090506001600160a01b038316301461043a5760405162461bcd60e51b815260206004820152601060248201526f24b73b30b634b2103932b1b2b4bb32b960811b604482015260640161021a565b6001600160a01b03821660009081526002602052604090205461045d8282611bc4565b6001600160a01b0384166000818152600260209081526040918290209390935580519182529181018490527ff11e547d796cc64acdf758e7cee90439494fd886a19159454aa61e473fdbafef910160405180910390a1505050505050565b60408051602081019091526060815260006105056105008460408051808201825260008082526020918201528151808301909252825182529182019181019190915290565b610ee6565b60408051602081019091529081529392505050565b6060610543826000015160088151811061053657610536611bd7565b6020026020010151610ffb565b92915050565b6000610543826000015160028151811061056557610565611bd7565b6020026020010151610e26565b604080516020810190915260008152815160609190156105435760008061059a600086611097565b60f81c905060018114806105b157508060ff166003145b15610658576001855160026105c69190611bed565b6105d09190611c04565b6001600160401b038111156105e7576105e76119b8565b6040519080825280601f01601f191660200182016040528015610611576020820181803683370190505b5092506000610621600187611097565b9050808460008151811061063757610637611bd7565b60200101906001600160f81b031916908160001a90535060019250506106bb565b6002855160026106689190611bed565b6106729190611c04565b6001600160401b03811115610689576106896119b8565b6040519080825280601f01601f1916602001820160405280156106b3576020820181803683370190505b509250600091505b60ff82165b8351811015610725576106ea6106d960ff851683611c04565b6106e4906002611bc4565b87611097565b8482815181106106fc576106fc611bd7565b60200101906001600160f81b031916908160001a9053508061071d81611c17565b9150506106c0565b50505092915050565b6000610543826000015160098151811061056557610565611bd7565b61076e60405180606001604052806060815260200160608152602001600081525090565b610788826000015160068151811061053657610536611bd7565b6020828101829052604080518082018252600080825290830152805180820190915282518152918101908201526107be81611118565b156107d3576107cc81610ee6565b825261087f565b602082015180516000906107e990600190611c04565b6001600160401b03811115610800576108006119b8565b6040519080825280601f01601f19166020018201604052801561082a576020820181803683370190505b50905060008083602101915082602001905061084882828551611153565b60408051808201825260008082526020918201528151808301909252845182528085019082015261087890610ee6565b8652505050505b6108888361072e565b604083015250919050565b6040805160808101825260009181018281526060808301939093528152602081019190915260006108e183600001516003815181106108d4576108d4611bd7565b6020026020010151610ee6565b8360400151815181106108f6576108f6611bd7565b60200260200101519050604051806040016040528082815260200161091a83610ee6565b90529392505050565b6000610543826000015160058151811061056557610565611bd7565b6060610543826000015160078151811061053657610536611bd7565b60008061098f8460408051808201825260008082526020918201528151808301909252825182529182019181019190915290565b9050600061099c826111de565b9050606080856000806109ae8b610572565b905080516000036109c9576000975050505050505050610c20565b60005b8651811015610c175781518311156109ef57600098505050505050505050610c20565b610a11878281518110610a0457610a04611bd7565b60200260200101516112e8565b955085805190602001208414610a3257600098505050505050505050610c20565b610a54878281518110610a4757610a47611bd7565b60200260200101516111de565b94508451601103610b335781518303610ac0578c80519060200120610a9286601081518110610a8557610a85611bd7565b6020026020010151611366565b8051906020012003610aaf57600198505050505050505050610c20565b600098505050505050505050610c20565b6000828481518110610ad457610ad4611bd7565b016020015160f81c90506010811115610af95760009950505050505050505050610c20565b610b1e868260ff1681518110610b1157610b11611bd7565b6020026020010151611402565b9450610b2b600185611bc4565b935050610c05565b8451600203610aaf576000610b5e610b5787600081518110610a8557610a85611bd7565b8486611430565b8351909150610b6d8286611bc4565b03610bc0578d80519060200120610b9087600181518110610a8557610a85611bd7565b8051906020012003610bae5760019950505050505050505050610c20565b60009950505050505050505050610c20565b80600003610bda5760009950505050505050505050610c20565b610be48185611bc4565b9350610bfc86600181518110610b1157610b11611bd7565b9450610c059050565b80610c0f81611c17565b9150506109cc565b50505050505050505b949350505050565b6000610543826000015160038151811061056557610565611bd7565b6000610543826000015160048151811061056557610565611bd7565b6000610543826000015160008151811061056557610565611bd7565b6060610543826000015160018151811061053657610536611bd7565b600080546040516320a9cea560e11b81526004810185905282916001600160a01b0316906341539d4a9060240160a060405180830381865afa158015610ce2573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610d069190611c30565b50505091509150610d5c8189610d1c9190611c04565b6040805160208082018d90528183018c9052606082018b905260808083018b90528351808403909101815260a09092019092528051910120908486611537565b610da85760405162461bcd60e51b815260206004820152601c60248201527f4678526f6f7454756e6e656c3a20494e56414c49445f48454144455200000000604482015260640161021a565b5050505050505050565b6040805160208101909152606081526040518060200160405280610de684602001516001815181106108d4576108d4611bd7565b905292915050565b60408051808201909152600080825260208201528251805183908110610e1657610e16611bd7565b6020026020010151905092915050565b805160009015801590610e3b57508151602110155b610e4457600080fd5b6000610e53836020015161169f565b90506000818460000151610e679190611c04565b9050600080838660200151610e7c9190611bc4565b9050805191506020831015610e9857826020036101000a820491505b50949350505050565b6060610543826020015160028151811061053657610536611bd7565b60006105438260200151600081518110610ed957610ed9611bd7565b6020026020010151611721565b6060610ef182611118565b610efa57600080fd5b6000610f058361173b565b90506000816001600160401b03811115610f2157610f216119b8565b604051908082528060200260200182016040528015610f6657816020015b6040805180820190915260008082526020820152815260200190600190039081610f3f5790505b5090506000610f78856020015161169f565b8560200151610f879190611bc4565b90506000805b84811015610ff057610f9e836117c0565b9150604051806040016040528083815260200184815250848281518110610fc757610fc7611bd7565b6020908102919091010152610fdc8284611bc4565b925080610fe881611c17565b915050610f8d565b509195945050505050565b805160609061100957600080fd5b6000611018836020015161169f565b9050600081846000015161102c9190611c04565b90506000816001600160401b03811115611048576110486119b8565b6040519080825280601f01601f191660200182016040528015611072576020820181803683370190505b5090506000816020019050610e988487602001516110909190611bc4565b8285611864565b60006110a4600284611c93565b156110de576010826110b7600286611ca7565b815181106110c7576110c7611bd7565b01602001516110d9919060f81c611cbb565b61110e565b6010826110ec600286611ca7565b815181106110fc576110fc611bd7565b016020015161110e919060f81c611cdd565b60f81b9392505050565b8051600090810361112b57506000919050565b6020820151805160001a9060c0821015611149575060009392505050565b5060019392505050565b8060000361116057505050565b602081106111985782518252611177602084611bc4565b9250611184602083611bc4565b9150611191602082611c04565b9050611160565b806000036111a557505050565b600060016111b4836020611c04565b6111c090610100611de3565b6111ca9190611c04565b935183518516941916939093179091525050565b60606111e982611118565b6111f257600080fd5b60006111fd836118a9565b90506000816001600160401b03811115611219576112196119b8565b60405190808252806020026020018201604052801561125e57816020015b60408051808201909152600080825260208201528152602001906001900390816112375790505b5090506000611270856020015161169f565b856020015161127f9190611bc4565b90506000805b84811015610ff057611296836117c0565b91506040518060400160405280838152602001848152508482815181106112bf576112bf611bd7565b60209081029190910101526112d48284611bc4565b9250806112e081611c17565b915050611285565b6060600082600001516001600160401b03811115611308576113086119b8565b6040519080825280601f01601f191660200182016040528015611332576020820181803683370190505b50905080516000036113445792915050565b600081602001905061135f8460200151828660000151611925565b5092915050565b805160609061137457600080fd5b6000611383836020015161169f565b905060008184600001516113979190611c04565b90506000816001600160401b038111156113b3576113b36119b8565b6040519080825280601f01601f1916602001820160405280156113dd576020820181803683370190505b5090506000816020019050610e988487602001516113fb9190611bc4565b8285611925565b805160009060211461141357600080fd5b600080836020015160016114279190611bc4565b51949350505050565b6000808061143d86610572565b9050600081516001600160401b0381111561145a5761145a6119b8565b6040519080825280601f01601f191660200182016040528015611484576020820181803683370190505b509050845b82516114959087611bc4565b8110156115085760008782815181106114b0576114b0611bd7565b01602001516001600160f81b031916905080836114cd8985611c04565b815181106114dd576114dd611bd7565b60200101906001600160f81b031916908160001a90535050808061150090611c17565b915050611489565b508080519060200120828051906020012003611527578151925061152c565b600092505b509095945050505050565b6000602082516115479190611c93565b1561158b5760405162461bcd60e51b8152602060048201526014602482015273092dcecc2d8d2c840e0e4dedecc40d8cadccee8d60631b604482015260640161021a565b60006020835161159b9190611ca7565b90506115a8816002611de3565b85106115ee5760405162461bcd60e51b81526020600482015260156024820152744c65616620696e64657820697320746f6f2062696760581b604482015260640161021a565b60008660205b855181116116915785810151925061160d600289611c93565b600003611645576040805160208101849052908101849052606001604051602081830303815290604052805190602001209150611672565b60408051602081018590529081018390526060016040516020818303038152906040528051906020012091505b61167d600289611ca7565b975061168a602082611bc4565b90506115f4565b509094149695505050505050565b8051600090811a60808110156116b85750600092915050565b60b88110806116d3575060c081108015906116d3575060f881105b156116e15750600192915050565b60c0811015611715576116f6600160b8611def565b6117039060ff1682611c04565b61170e906001611bc4565b9392505050565b6116f6600160f8611def565b805160009060151461173257600080fd5b61054382610e26565b8051600090810361174e57506000919050565b60008061175e846020015161169f565b846020015161176d9190611bc4565b90506000846000015185602001516117859190611bc4565b90505b808210156117b757611799826117c0565b6117a39083611bc4565b9150826117af81611c17565b935050611788565b50909392505050565b80516000908190811a60808110156117db576001915061135f565b60b8811015611801576117ef608082611c04565b6117fa906001611bc4565b915061135f565b60c081101561182e5760b78103600185019450806020036101000a8551046001820181019350505061135f565b60f8811015611842576117ef60c082611c04565b60019390930151602084900360f7016101000a900490920160f5190192915050565b8060000361187157505050565b602081106111985782518252611888602084611bc4565b9250611895602083611bc4565b91506118a2602082611c04565b9050611871565b805160009081036118bc57506000919050565b6000806118cc846020015161169f565b84602001516118db9190611bc4565b90506000846000015185602001516118f39190611bc4565b90505b808210156117b757611907826117c0565b6119119083611bc4565b91508261191d81611c17565b9350506118f6565b8060000361193257505050565b602081106111985782518252611949602084611bc4565b9250611956602083611bc4565b9150611963602082611c04565b9050611932565b60006020828403121561197c57600080fd5b5035919050565b6001600160a01b038116811461199857600080fd5b50565b6000602082840312156119ad57600080fd5b813561170e81611983565b634e487b7160e01b600052604160045260246000fd5b604051601f8201601f191681016001600160401b03811182821017156119f6576119f66119b8565b604052919050565b60006001600160401b03821115611a1757611a176119b8565b50601f01601f191660200190565b600060208284031215611a3757600080fd5b81356001600160401b03811115611a4d57600080fd5b8201601f81018413611a5e57600080fd5b8035611a71611a6c826119fe565b6119ce565b818152856020838501011115611a8657600080fd5b81602084016020830137600091810160200191909152949350505050565b60005b83811015611abf578181015183820152602001611aa7565b50506000910152565b83815260008351611ae0816020850160208801611aa4565b60209201918201929092526040019392505050565b600060208284031215611b0757600080fd5b81516001600160401b03811115611b1d57600080fd5b8201601f81018413611b2e57600080fd5b8051611b3c611a6c826119fe565b818152856020838501011115611b5157600080fd5b611b62826020830160208601611aa4565b95945050505050565b600080600060608486031215611b8057600080fd5b8351611b8b81611983565b6020850151909350611b9c81611983565b80925050604084015190509250925092565b634e487b7160e01b600052601160045260246000fd5b8082018082111561054357610543611bae565b634e487b7160e01b600052603260045260246000fd5b808202811582820484141761054357610543611bae565b8181038181111561054357610543611bae565b600060018201611c2957611c29611bae565b5060010190565b600080600080600060a08688031215611c4857600080fd5b855194506020860151935060408601519250606086015191506080860151611c6f81611983565b809150509295509295909350565b634e487b7160e01b600052601260045260246000fd5b600082611ca257611ca2611c7d565b500690565b600082611cb657611cb6611c7d565b500490565b600060ff831680611cce57611cce611c7d565b8060ff84160691505092915050565b600060ff831680611cf057611cf0611c7d565b8060ff84160491505092915050565b600181815b80851115611d3a578160001904821115611d2057611d20611bae565b80851615611d2d57918102915b93841c9390800290611d04565b509250929050565b600082611d5157506001610543565b81611d5e57506000610543565b8160018114611d745760028114611d7e57611d9a565b6001915050610543565b60ff841115611d8f57611d8f611bae565b50506001821b610543565b5060208310610133831016604e8410600b8410161715611dbd575081810a610543565b611dc78383611cff565b8060001904821115611ddb57611ddb611bae565b029392505050565b600061170e8383611d42565b60ff828116828216039081111561054357610543611bae56fea2646970667358221220a924e520bf4f9d5629bc95702236e2702455bf9b57c4e9e4e344c7c7d7576a2b64736f6c63430008140033"
+
+// DeployRootReceiver deploys a new Ethereum contract, binding an instance of RootReceiver to it.
+func DeployRootReceiver(auth *bind.TransactOpts, backend bind.ContractBackend, _checkpointManager libcommon.Address) (libcommon.Address, types.Transaction, *RootReceiver, error) {
+ parsed, err := abi.JSON(strings.NewReader(RootReceiverABI))
+ if err != nil {
+ return libcommon.Address{}, nil, nil, err
+ }
+
+ address, tx, contract, err := bind.DeployContract(auth, parsed, libcommon.FromHex(RootReceiverBin), backend, _checkpointManager)
+ if err != nil {
+ return libcommon.Address{}, nil, nil, err
+ }
+ return address, tx, &RootReceiver{RootReceiverCaller: RootReceiverCaller{contract: contract}, RootReceiverTransactor: RootReceiverTransactor{contract: contract}, RootReceiverFilterer: RootReceiverFilterer{contract: contract}}, nil
+}
+
+// RootReceiver is an auto generated Go binding around an Ethereum contract.
+type RootReceiver struct {
+ RootReceiverCaller // Read-only binding to the contract
+ RootReceiverTransactor // Write-only binding to the contract
+ RootReceiverFilterer // Log filterer for contract events
+}
+
+// RootReceiverCaller is an auto generated read-only Go binding around an Ethereum contract.
+type RootReceiverCaller struct {
+ contract *bind.BoundContract // Generic contract wrapper for the low level calls
+}
+
+// RootReceiverTransactor is an auto generated write-only Go binding around an Ethereum contract.
+type RootReceiverTransactor struct {
+ contract *bind.BoundContract // Generic contract wrapper for the low level calls
+}
+
+// RootReceiverFilterer is an auto generated log filtering Go binding around an Ethereum contract events.
+type RootReceiverFilterer struct {
+ contract *bind.BoundContract // Generic contract wrapper for the low level calls
+}
+
+// RootReceiverSession is an auto generated Go binding around an Ethereum contract,
+// with pre-set call and transact options.
+type RootReceiverSession struct {
+ Contract *RootReceiver // Generic contract binding to set the session for
+ CallOpts bind.CallOpts // Call options to use throughout this session
+ TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
+}
+
+// RootReceiverCallerSession is an auto generated read-only Go binding around an Ethereum contract,
+// with pre-set call options.
+type RootReceiverCallerSession struct {
+ Contract *RootReceiverCaller // Generic contract caller binding to set the session for
+ CallOpts bind.CallOpts // Call options to use throughout this session
+}
+
+// RootReceiverTransactorSession is an auto generated write-only Go binding around an Ethereum contract,
+// with pre-set transact options.
+type RootReceiverTransactorSession struct {
+ Contract *RootReceiverTransactor // Generic contract transactor binding to set the session for
+ TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
+}
+
+// RootReceiverRaw is an auto generated low-level Go binding around an Ethereum contract.
+type RootReceiverRaw struct {
+ Contract *RootReceiver // Generic contract binding to access the raw methods on
+}
+
+// RootReceiverCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract.
+type RootReceiverCallerRaw struct {
+ Contract *RootReceiverCaller // Generic read-only contract binding to access the raw methods on
+}
+
+// RootReceiverTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract.
+type RootReceiverTransactorRaw struct {
+ Contract *RootReceiverTransactor // Generic write-only contract binding to access the raw methods on
+}
+
+// NewRootReceiver creates a new instance of RootReceiver, bound to a specific deployed contract.
+func NewRootReceiver(address libcommon.Address, backend bind.ContractBackend) (*RootReceiver, error) {
+ contract, err := bindRootReceiver(address, backend, backend, backend)
+ if err != nil {
+ return nil, err
+ }
+ return &RootReceiver{RootReceiverCaller: RootReceiverCaller{contract: contract}, RootReceiverTransactor: RootReceiverTransactor{contract: contract}, RootReceiverFilterer: RootReceiverFilterer{contract: contract}}, nil
+}
+
+// NewRootReceiverCaller creates a new read-only instance of RootReceiver, bound to a specific deployed contract.
+func NewRootReceiverCaller(address libcommon.Address, caller bind.ContractCaller) (*RootReceiverCaller, error) {
+ contract, err := bindRootReceiver(address, caller, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ return &RootReceiverCaller{contract: contract}, nil
+}
+
+// NewRootReceiverTransactor creates a new write-only instance of RootReceiver, bound to a specific deployed contract.
+func NewRootReceiverTransactor(address libcommon.Address, transactor bind.ContractTransactor) (*RootReceiverTransactor, error) {
+ contract, err := bindRootReceiver(address, nil, transactor, nil)
+ if err != nil {
+ return nil, err
+ }
+ return &RootReceiverTransactor{contract: contract}, nil
+}
+
+// NewRootReceiverFilterer creates a new log filterer instance of RootReceiver, bound to a specific deployed contract.
+func NewRootReceiverFilterer(address libcommon.Address, filterer bind.ContractFilterer) (*RootReceiverFilterer, error) {
+ contract, err := bindRootReceiver(address, nil, nil, filterer)
+ if err != nil {
+ return nil, err
+ }
+ return &RootReceiverFilterer{contract: contract}, nil
+}
+
+// bindRootReceiver binds a generic wrapper to an already deployed contract.
+func bindRootReceiver(address libcommon.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {
+ parsed, err := abi.JSON(strings.NewReader(RootReceiverABI))
+ if err != nil {
+ return nil, err
+ }
+ return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil
+}
+
+// Call invokes the (constant) contract method with params as input values and
+// sets the output to result. The result type might be a single field for simple
+// returns, a slice of interfaces for anonymous returns and a struct for named
+// returns.
+func (_RootReceiver *RootReceiverRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
+ return _RootReceiver.Contract.RootReceiverCaller.contract.Call(opts, result, method, params...)
+}
+
+// Transfer initiates a plain transaction to move funds to the contract, calling
+// its default method if one is available.
+func (_RootReceiver *RootReceiverRaw) Transfer(opts *bind.TransactOpts) (types.Transaction, error) {
+ return _RootReceiver.Contract.RootReceiverTransactor.contract.Transfer(opts)
+}
+
+// Transact invokes the (paid) contract method with params as input values.
+func (_RootReceiver *RootReceiverRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (types.Transaction, error) {
+ return _RootReceiver.Contract.RootReceiverTransactor.contract.Transact(opts, method, params...)
+}
+
+// Call invokes the (constant) contract method with params as input values and
+// sets the output to result. The result type might be a single field for simple
+// returns, a slice of interfaces for anonymous returns and a struct for named
+// returns.
+func (_RootReceiver *RootReceiverCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
+ return _RootReceiver.Contract.contract.Call(opts, result, method, params...)
+}
+
+// Transfer initiates a plain transaction to move funds to the contract, calling
+// its default method if one is available.
+func (_RootReceiver *RootReceiverTransactorRaw) Transfer(opts *bind.TransactOpts) (types.Transaction, error) {
+ return _RootReceiver.Contract.contract.Transfer(opts)
+}
+
+// Transact invokes the (paid) contract method with params as input values.
+func (_RootReceiver *RootReceiverTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (types.Transaction, error) {
+ return _RootReceiver.Contract.contract.Transact(opts, method, params...)
+}
+
+// SENDMESSAGEEVENTSIG is a free data retrieval call binding the contract method 0x0e387de6.
+//
+// Solidity: function SEND_MESSAGE_EVENT_SIG() view returns(bytes32)
+func (_RootReceiver *RootReceiverCaller) SENDMESSAGEEVENTSIG(opts *bind.CallOpts) ([32]byte, error) {
+ var out []interface{}
+ err := _RootReceiver.contract.Call(opts, &out, "SEND_MESSAGE_EVENT_SIG")
+
+ if err != nil {
+ return *new([32]byte), err
+ }
+
+ out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte)
+
+ return out0, err
+
+}
+
+// SENDMESSAGEEVENTSIG is a free data retrieval call binding the contract method 0x0e387de6.
+//
+// Solidity: function SEND_MESSAGE_EVENT_SIG() view returns(bytes32)
+func (_RootReceiver *RootReceiverSession) SENDMESSAGEEVENTSIG() ([32]byte, error) {
+ return _RootReceiver.Contract.SENDMESSAGEEVENTSIG(&_RootReceiver.CallOpts)
+}
+
+// SENDMESSAGEEVENTSIG is a free data retrieval call binding the contract method 0x0e387de6.
+//
+// Solidity: function SEND_MESSAGE_EVENT_SIG() view returns(bytes32)
+func (_RootReceiver *RootReceiverCallerSession) SENDMESSAGEEVENTSIG() ([32]byte, error) {
+ return _RootReceiver.Contract.SENDMESSAGEEVENTSIG(&_RootReceiver.CallOpts)
+}
+
+// CheckpointManager is a free data retrieval call binding the contract method 0xc0857ba0.
+//
+// Solidity: function checkpointManager() view returns(address)
+func (_RootReceiver *RootReceiverCaller) CheckpointManager(opts *bind.CallOpts) (libcommon.Address, error) {
+ var out []interface{}
+ err := _RootReceiver.contract.Call(opts, &out, "checkpointManager")
+
+ if err != nil {
+ return *new(libcommon.Address), err
+ }
+
+ out0 := *abi.ConvertType(out[0], new(libcommon.Address)).(*libcommon.Address)
+
+ return out0, err
+
+}
+
+// CheckpointManager is a free data retrieval call binding the contract method 0xc0857ba0.
+//
+// Solidity: function checkpointManager() view returns(address)
+func (_RootReceiver *RootReceiverSession) CheckpointManager() (libcommon.Address, error) {
+ return _RootReceiver.Contract.CheckpointManager(&_RootReceiver.CallOpts)
+}
+
+// CheckpointManager is a free data retrieval call binding the contract method 0xc0857ba0.
+//
+// Solidity: function checkpointManager() view returns(address)
+func (_RootReceiver *RootReceiverCallerSession) CheckpointManager() (libcommon.Address, error) {
+ return _RootReceiver.Contract.CheckpointManager(&_RootReceiver.CallOpts)
+}
+
+// ProcessedExits is a free data retrieval call binding the contract method 0x607f2d42.
+//
+// Solidity: function processedExits(bytes32 ) view returns(bool)
+func (_RootReceiver *RootReceiverCaller) ProcessedExits(opts *bind.CallOpts, arg0 [32]byte) (bool, error) {
+ var out []interface{}
+ err := _RootReceiver.contract.Call(opts, &out, "processedExits", arg0)
+
+ if err != nil {
+ return *new(bool), err
+ }
+
+ out0 := *abi.ConvertType(out[0], new(bool)).(*bool)
+
+ return out0, err
+
+}
+
+// ProcessedExits is a free data retrieval call binding the contract method 0x607f2d42.
+//
+// Solidity: function processedExits(bytes32 ) view returns(bool)
+func (_RootReceiver *RootReceiverSession) ProcessedExits(arg0 [32]byte) (bool, error) {
+ return _RootReceiver.Contract.ProcessedExits(&_RootReceiver.CallOpts, arg0)
+}
+
+// ProcessedExits is a free data retrieval call binding the contract method 0x607f2d42.
+//
+// Solidity: function processedExits(bytes32 ) view returns(bool)
+func (_RootReceiver *RootReceiverCallerSession) ProcessedExits(arg0 [32]byte) (bool, error) {
+ return _RootReceiver.Contract.ProcessedExits(&_RootReceiver.CallOpts, arg0)
+}
+
+// Senders is a free data retrieval call binding the contract method 0x982fb9d8.
+//
+// Solidity: function senders(address ) view returns(uint256)
+func (_RootReceiver *RootReceiverCaller) Senders(opts *bind.CallOpts, arg0 libcommon.Address) (*big.Int, error) {
+ var out []interface{}
+ err := _RootReceiver.contract.Call(opts, &out, "senders", arg0)
+
+ if err != nil {
+ return *new(*big.Int), err
+ }
+
+ out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
+
+ return out0, err
+
+}
+
+// Senders is a free data retrieval call binding the contract method 0x982fb9d8.
+//
+// Solidity: function senders(address ) view returns(uint256)
+func (_RootReceiver *RootReceiverSession) Senders(arg0 libcommon.Address) (*big.Int, error) {
+ return _RootReceiver.Contract.Senders(&_RootReceiver.CallOpts, arg0)
+}
+
+// Senders is a free data retrieval call binding the contract method 0x982fb9d8.
+//
+// Solidity: function senders(address ) view returns(uint256)
+func (_RootReceiver *RootReceiverCallerSession) Senders(arg0 libcommon.Address) (*big.Int, error) {
+ return _RootReceiver.Contract.Senders(&_RootReceiver.CallOpts, arg0)
+}
+
+// ReceiveMessage is a paid mutator transaction binding the contract method 0xf953cec7.
+//
+// Solidity: function receiveMessage(bytes inputData) returns()
+func (_RootReceiver *RootReceiverTransactor) ReceiveMessage(opts *bind.TransactOpts, inputData []byte) (types.Transaction, error) {
+ return _RootReceiver.contract.Transact(opts, "receiveMessage", inputData)
+}
+
+// ReceiveMessage is a paid mutator transaction binding the contract method 0xf953cec7.
+//
+// Solidity: function receiveMessage(bytes inputData) returns()
+func (_RootReceiver *RootReceiverSession) ReceiveMessage(inputData []byte) (types.Transaction, error) {
+ return _RootReceiver.Contract.ReceiveMessage(&_RootReceiver.TransactOpts, inputData)
+}
+
+// ReceiveMessage is a paid mutator transaction binding the contract method 0xf953cec7.
+//
+// Solidity: function receiveMessage(bytes inputData) returns()
+func (_RootReceiver *RootReceiverTransactorSession) ReceiveMessage(inputData []byte) (types.Transaction, error) {
+ return _RootReceiver.Contract.ReceiveMessage(&_RootReceiver.TransactOpts, inputData)
+}
+
+// ReceiveMessageParams is an auto generated read-only Go binding of transcaction calldata params
+type ReceiveMessageParams struct {
+ Param_inputData []byte
+}
+
+// Parse ReceiveMessage method from calldata of a transaction
+//
+// Solidity: function receiveMessage(bytes inputData) returns()
+func ParseReceiveMessage(calldata []byte) (*ReceiveMessageParams, error) {
+ if len(calldata) <= 4 {
+ return nil, fmt.Errorf("invalid calldata input")
+ }
+
+ _abi, err := abi.JSON(strings.NewReader(RootReceiverABI))
+ if err != nil {
+ return nil, fmt.Errorf("failed to get abi of registry metadata: %w", err)
+ }
+
+ out, err := _abi.Methods["receiveMessage"].Inputs.Unpack(calldata[4:])
+ if err != nil {
+ return nil, fmt.Errorf("failed to unpack receiveMessage params data: %w", err)
+ }
+
+ var paramsResult = new(ReceiveMessageParams)
+ value := reflect.ValueOf(paramsResult).Elem()
+
+ if value.NumField() != len(out) {
+ return nil, fmt.Errorf("failed to match calldata with param field number")
+ }
+
+ out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte)
+
+ return &ReceiveMessageParams{
+ Param_inputData: out0,
+ }, nil
+}
+
+// RootReceiverReceivedIterator is returned from FilterReceived and is used to iterate over the raw logs and unpacked data for Received events raised by the RootReceiver contract.
+type RootReceiverReceivedIterator struct {
+ Event *RootReceiverReceived // Event containing the contract specifics and raw log
+
+ contract *bind.BoundContract // Generic contract to use for unpacking event data
+ event string // Event name to use for unpacking event data
+
+ logs chan types.Log // Log channel receiving the found contract events
+ sub ethereum.Subscription // Subscription for errors, completion and termination
+ done bool // Whether the subscription completed delivering logs
+ fail error // Occurred error to stop iteration
+}
+
+// Next advances the iterator to the subsequent event, returning whether there
+// are any more events found. In case of a retrieval or parsing error, false is
+// returned and Error() can be queried for the exact failure.
+func (it *RootReceiverReceivedIterator) Next() bool {
+ // If the iterator failed, stop iterating
+ if it.fail != nil {
+ return false
+ }
+ // If the iterator completed, deliver directly whatever's available
+ if it.done {
+ select {
+ case log := <-it.logs:
+ it.Event = new(RootReceiverReceived)
+ if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+ it.fail = err
+ return false
+ }
+ it.Event.Raw = log
+ return true
+
+ default:
+ return false
+ }
+ }
+ // Iterator still in progress, wait for either a data or an error event
+ select {
+ case log := <-it.logs:
+ it.Event = new(RootReceiverReceived)
+ if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+ it.fail = err
+ return false
+ }
+ it.Event.Raw = log
+ return true
+
+ case err := <-it.sub.Err():
+ it.done = true
+ it.fail = err
+ return it.Next()
+ }
+}
+
+// Error returns any retrieval or parsing error occurred during filtering.
+func (it *RootReceiverReceivedIterator) Error() error {
+ return it.fail
+}
+
+// Close terminates the iteration process, releasing any pending underlying
+// resources.
+func (it *RootReceiverReceivedIterator) Close() error {
+ it.sub.Unsubscribe()
+ return nil
+}
+
+// RootReceiverReceived represents a Received event raised by the RootReceiver contract.
+type RootReceiverReceived struct {
+ Source libcommon.Address
+ Amount *big.Int
+ Raw types.Log // Blockchain specific contextual infos
+}
+
+// FilterReceived is a free log retrieval operation binding the contract event 0xf11e547d796cc64acdf758e7cee90439494fd886a19159454aa61e473fdbafef.
+//
+// Solidity: event received(address _source, uint256 _amount)
+func (_RootReceiver *RootReceiverFilterer) FilterReceived(opts *bind.FilterOpts) (*RootReceiverReceivedIterator, error) {
+
+ logs, sub, err := _RootReceiver.contract.FilterLogs(opts, "received")
+ if err != nil {
+ return nil, err
+ }
+ return &RootReceiverReceivedIterator{contract: _RootReceiver.contract, event: "received", logs: logs, sub: sub}, nil
+}
+
+// WatchReceived is a free log subscription operation binding the contract event 0xf11e547d796cc64acdf758e7cee90439494fd886a19159454aa61e473fdbafef.
+//
+// Solidity: event received(address _source, uint256 _amount)
+func (_RootReceiver *RootReceiverFilterer) WatchReceived(opts *bind.WatchOpts, sink chan<- *RootReceiverReceived) (event.Subscription, error) {
+
+ logs, sub, err := _RootReceiver.contract.WatchLogs(opts, "received")
+ if err != nil {
+ return nil, err
+ }
+ return event.NewSubscription(func(quit <-chan struct{}) error {
+ defer sub.Unsubscribe()
+ for {
+ select {
+ case log := <-logs:
+ // New log arrived, parse the event and forward to the user
+ event := new(RootReceiverReceived)
+ if err := _RootReceiver.contract.UnpackLog(event, "received", log); err != nil {
+ return err
+ }
+ event.Raw = log
+
+ select {
+ case sink <- event:
+ case err := <-sub.Err():
+ return err
+ case <-quit:
+ return nil
+ }
+ case err := <-sub.Err():
+ return err
+ case <-quit:
+ return nil
+ }
+ }
+ }), nil
+}
+
+// ParseReceived is a log parse operation binding the contract event 0xf11e547d796cc64acdf758e7cee90439494fd886a19159454aa61e473fdbafef.
+//
+// Solidity: event received(address _source, uint256 _amount)
+func (_RootReceiver *RootReceiverFilterer) ParseReceived(log types.Log) (*RootReceiverReceived, error) {
+ event := new(RootReceiverReceived)
+ if err := _RootReceiver.contract.UnpackLog(event, "received", log); err != nil {
+ return nil, err
+ }
+ event.Raw = log
+ return event, nil
+}
diff --git a/cmd/devnet/contracts/gen_testrootchain.go b/cmd/devnet/contracts/gen_testrootchain.go
new file mode 100644
index 00000000000..24d82755a52
--- /dev/null
+++ b/cmd/devnet/contracts/gen_testrootchain.go
@@ -0,0 +1,1070 @@
+// Code generated - DO NOT EDIT.
+// This file is a generated binding and any manual changes will be lost.
+
+package contracts
+
+import (
+ "fmt"
+ "math/big"
+ "reflect"
+ "strings"
+
+ ethereum "github.com/ledgerwatch/erigon"
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon/accounts/abi"
+ "github.com/ledgerwatch/erigon/accounts/abi/bind"
+ "github.com/ledgerwatch/erigon/core/types"
+ "github.com/ledgerwatch/erigon/event"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var (
+ _ = big.NewInt
+ _ = strings.NewReader
+ _ = ethereum.NotFound
+ _ = bind.Bind
+ _ = libcommon.Big1
+ _ = types.BloomLookup
+ _ = event.NewSubscription
+)
+
+// TestRootChainABI is the input ABI used to generate the binding from.
+const TestRootChainABI = "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"proposer\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"headerBlockId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"reward\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"start\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"end\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"root\",\"type\":\"bytes32\"}],\"name\":\"NewHeaderBlock\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"proposer\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"headerBlockId\",\"type\":\"uint256\"}],\"name\":\"ResetHeaderBlock\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"CHAINID\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"VOTE_TYPE\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"_nextHeaderBlock\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"currentHeaderBlock\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLastChildBlock\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"headerBlocks\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"root\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"start\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"end\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"createdAt\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"proposer\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"heimdallId\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"networkId\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"_heimdallId\",\"type\":\"string\"}],\"name\":\"setHeimdallId\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"}],\"name\":\"setNextHeaderBlock\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"slash\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"},{\"internalType\":\"uint256[3][]\",\"name\":\"\",\"type\":\"uint256[3][]\"}],\"name\":\"submitCheckpoint\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"submitHeaderBlock\",\"outputs\":[],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"numDeposits\",\"type\":\"uint256\"}],\"name\":\"updateDepositId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"depositId\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]"
+
+// TestRootChainBin is the compiled bytecode used for deploying new contracts.
+var TestRootChainBin = "0x6080604052612710600255600160035534801561001b57600080fd5b50610af88061002b6000396000f3fe608060405234801561001057600080fd5b50600436106100ea5760003560e01c8063b87e1b661161008c578063d5b844eb11610066578063d5b844eb1461020b578063ea0688b314610225578063ec7e485514610238578063fbc3dd361461024057600080fd5b8063b87e1b66146101e7578063cc79f97b146101ef578063cf24a0ea146101f857600080fd5b80635391f483116100c85780635391f483146101815780636a791f11146101a25780638d978d88146101b05780639025e64c146101b957600080fd5b80632da25de3146100ef57806341539d4a146100f15780634e43e4951461016e575b600080fd5b005b6101386100ff36600461072b565b6004602081905260009182526040909120805460018201546002830154600384015493909401549193909290916001600160a01b031685565b6040805195865260208601949094529284019190915260608301526001600160a01b0316608082015260a0015b60405180910390f35b6100ef61017c36600461078d565b610249565b61019461018f36600461072b565b61037b565b604051908152602001610165565b6100ef6100ea366004610827565b61019460025481565b6101da60405180604001604052806002815260200161053960f01b81525081565b60405161016591906108b7565b6101946104c5565b61019461053981565b6100ef61020636600461072b565b6104ea565b610213600281565b60405160ff9091168152602001610165565b6100ef610233366004610900565b6105c5565b6101946105f4565b61019460015481565b6000808080808061025c898b018b6109c9565b95509550955095509550955080610539146102b55760405162461bcd60e51b8152602060048201526014602482015273125b9d985b1a5908189bdc8818da185a5b881a5960621b60448201526064015b60405180910390fd5b6102c18686868661060b565b6103055760405162461bcd60e51b8152602060048201526015602482015274494e434f52524543545f4845414445525f4441544160581b60448201526064016102ac565b6002546040805187815260208101879052908101859052600091906001600160a01b038916907fba5de06d22af2685c6c7765f60067f7d2b08c2d29f53cdf14d67f6d1c9bfb5279060600160405180910390a4600254610367906127106106e4565b600255505060016003555050505050505050565b6005546040805162c9effd60e41b815290516000926001600160a01b031691630c9effd09160048083019260209291908290030181865afa1580156103c4573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906103e89190610a15565b6001600160a01b0316336001600160a01b0316146104525760405162461bcd60e51b815260206004820152602160248201527f554e415554484f52495a45445f4445504f5349545f4d414e414745525f4f4e4c6044820152605960f81b60648201526084016102ac565b6104666003546104606105f4565b906106e4565b60035490915061047690836106e4565b600381905561271010156104c05760405162461bcd60e51b8152602060048201526011602482015270544f4f5f4d414e595f4445504f5349545360781b60448201526064016102ac565b919050565b6000600460006104d36105f4565b815260200190815260200160002060020154905090565b6104f661271082610a32565b156105335760405162461bcd60e51b815260206004820152600d60248201526c496e76616c69642076616c756560981b60448201526064016102ac565b805b60025481101561058a5760008181526004602081905260408220828155600181018390556002810183905560038101929092550180546001600160a01b031916905561058361271082610a6a565b9050610535565b5060028190556001600355604051819033907fca1d8316287f938830e225956a7bb10fd5a1a1506dd2eb3a476751a48811720590600090a350565b806040516020016105d69190610a7d565b60408051601f19818403018152919052805160209091012060015550565b60025460009061060690612710610708565b905090565b60008061271061ffff16600254111561064b576004600061062a6105f4565b81526020019081526020016000206002015460016106489190610a6a565b90505b84811461065c5760009150506106dc565b6040805160a081018252848152602080820193845281830187815242606084019081526001600160a01b038b811660808601908152600280546000908152600496879052979097209551865596516001808701919091559251958501959095555160038401559351910180546001600160a01b0319169190921617905590505b949350505050565b60006106f08284610a6a565b90508281101561070257610702610a99565b92915050565b60008282111561071a5761071a610a99565b6107248284610aaf565b9392505050565b60006020828403121561073d57600080fd5b5035919050565b60008083601f84011261075657600080fd5b50813567ffffffffffffffff81111561076e57600080fd5b60208301915083602082850101111561078657600080fd5b9250929050565b600080600080604085870312156107a357600080fd5b843567ffffffffffffffff808211156107bb57600080fd5b6107c788838901610744565b909650945060208701359150808211156107e057600080fd5b818701915087601f8301126107f457600080fd5b81358181111561080357600080fd5b88602060608302850101111561081857600080fd5b95989497505060200194505050565b6000806000806040858703121561083d57600080fd5b843567ffffffffffffffff8082111561085557600080fd5b61086188838901610744565b9096509450602087013591508082111561087a57600080fd5b5061088787828801610744565b95989497509550505050565b60005b838110156108ae578181015183820152602001610896565b50506000910152565b60208152600082518060208401526108d6816040850160208701610893565b601f01601f19169190910160400192915050565b634e487b7160e01b600052604160045260246000fd5b60006020828403121561091257600080fd5b813567ffffffffffffffff8082111561092a57600080fd5b818401915084601f83011261093e57600080fd5b813581811115610950576109506108ea565b604051601f8201601f19908116603f01168101908382118183101715610978576109786108ea565b8160405282815287602084870101111561099157600080fd5b826020860160208301376000928101602001929092525095945050505050565b6001600160a01b03811681146109c657600080fd5b50565b60008060008060008060c087890312156109e257600080fd5b86356109ed816109b1565b9860208801359850604088013597606081013597506080810135965060a00135945092505050565b600060208284031215610a2757600080fd5b8151610724816109b1565b600082610a4f57634e487b7160e01b600052601260045260246000fd5b500690565b634e487b7160e01b600052601160045260246000fd5b8082018082111561070257610702610a54565b60008251610a8f818460208701610893565b9190910192915050565b634e487b7160e01b600052600160045260246000fd5b8181038181111561070257610702610a5456fea2646970667358221220e8aee67b63507e8745850c7b73e998c6ef6b5d41b72b45f8f1316e80e79a1ec964736f6c63430008140033"
+
+// DeployTestRootChain deploys a new Ethereum contract, binding an instance of TestRootChain to it.
+func DeployTestRootChain(auth *bind.TransactOpts, backend bind.ContractBackend) (libcommon.Address, types.Transaction, *TestRootChain, error) {
+ parsed, err := abi.JSON(strings.NewReader(TestRootChainABI))
+ if err != nil {
+ return libcommon.Address{}, nil, nil, err
+ }
+
+ address, tx, contract, err := bind.DeployContract(auth, parsed, libcommon.FromHex(TestRootChainBin), backend)
+ if err != nil {
+ return libcommon.Address{}, nil, nil, err
+ }
+ return address, tx, &TestRootChain{TestRootChainCaller: TestRootChainCaller{contract: contract}, TestRootChainTransactor: TestRootChainTransactor{contract: contract}, TestRootChainFilterer: TestRootChainFilterer{contract: contract}}, nil
+}
+
+// TestRootChain is an auto generated Go binding around an Ethereum contract.
+type TestRootChain struct {
+ TestRootChainCaller // Read-only binding to the contract
+ TestRootChainTransactor // Write-only binding to the contract
+ TestRootChainFilterer // Log filterer for contract events
+}
+
+// TestRootChainCaller is an auto generated read-only Go binding around an Ethereum contract.
+type TestRootChainCaller struct {
+ contract *bind.BoundContract // Generic contract wrapper for the low level calls
+}
+
+// TestRootChainTransactor is an auto generated write-only Go binding around an Ethereum contract.
+type TestRootChainTransactor struct {
+ contract *bind.BoundContract // Generic contract wrapper for the low level calls
+}
+
+// TestRootChainFilterer is an auto generated log filtering Go binding around an Ethereum contract events.
+type TestRootChainFilterer struct {
+ contract *bind.BoundContract // Generic contract wrapper for the low level calls
+}
+
+// TestRootChainSession is an auto generated Go binding around an Ethereum contract,
+// with pre-set call and transact options.
+type TestRootChainSession struct {
+ Contract *TestRootChain // Generic contract binding to set the session for
+ CallOpts bind.CallOpts // Call options to use throughout this session
+ TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
+}
+
+// TestRootChainCallerSession is an auto generated read-only Go binding around an Ethereum contract,
+// with pre-set call options.
+type TestRootChainCallerSession struct {
+ Contract *TestRootChainCaller // Generic contract caller binding to set the session for
+ CallOpts bind.CallOpts // Call options to use throughout this session
+}
+
+// TestRootChainTransactorSession is an auto generated write-only Go binding around an Ethereum contract,
+// with pre-set transact options.
+type TestRootChainTransactorSession struct {
+ Contract *TestRootChainTransactor // Generic contract transactor binding to set the session for
+ TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
+}
+
+// TestRootChainRaw is an auto generated low-level Go binding around an Ethereum contract.
+type TestRootChainRaw struct {
+ Contract *TestRootChain // Generic contract binding to access the raw methods on
+}
+
+// TestRootChainCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract.
+type TestRootChainCallerRaw struct {
+ Contract *TestRootChainCaller // Generic read-only contract binding to access the raw methods on
+}
+
+// TestRootChainTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract.
+type TestRootChainTransactorRaw struct {
+ Contract *TestRootChainTransactor // Generic write-only contract binding to access the raw methods on
+}
+
+// NewTestRootChain creates a new instance of TestRootChain, bound to a specific deployed contract.
+func NewTestRootChain(address libcommon.Address, backend bind.ContractBackend) (*TestRootChain, error) {
+ contract, err := bindTestRootChain(address, backend, backend, backend)
+ if err != nil {
+ return nil, err
+ }
+ return &TestRootChain{TestRootChainCaller: TestRootChainCaller{contract: contract}, TestRootChainTransactor: TestRootChainTransactor{contract: contract}, TestRootChainFilterer: TestRootChainFilterer{contract: contract}}, nil
+}
+
+// NewTestRootChainCaller creates a new read-only instance of TestRootChain, bound to a specific deployed contract.
+func NewTestRootChainCaller(address libcommon.Address, caller bind.ContractCaller) (*TestRootChainCaller, error) {
+ contract, err := bindTestRootChain(address, caller, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ return &TestRootChainCaller{contract: contract}, nil
+}
+
+// NewTestRootChainTransactor creates a new write-only instance of TestRootChain, bound to a specific deployed contract.
+func NewTestRootChainTransactor(address libcommon.Address, transactor bind.ContractTransactor) (*TestRootChainTransactor, error) {
+ contract, err := bindTestRootChain(address, nil, transactor, nil)
+ if err != nil {
+ return nil, err
+ }
+ return &TestRootChainTransactor{contract: contract}, nil
+}
+
+// NewTestRootChainFilterer creates a new log filterer instance of TestRootChain, bound to a specific deployed contract.
+func NewTestRootChainFilterer(address libcommon.Address, filterer bind.ContractFilterer) (*TestRootChainFilterer, error) {
+ contract, err := bindTestRootChain(address, nil, nil, filterer)
+ if err != nil {
+ return nil, err
+ }
+ return &TestRootChainFilterer{contract: contract}, nil
+}
+
+// bindTestRootChain binds a generic wrapper to an already deployed contract.
+func bindTestRootChain(address libcommon.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {
+ parsed, err := abi.JSON(strings.NewReader(TestRootChainABI))
+ if err != nil {
+ return nil, err
+ }
+ return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil
+}
+
+// Call invokes the (constant) contract method with params as input values and
+// sets the output to result. The result type might be a single field for simple
+// returns, a slice of interfaces for anonymous returns and a struct for named
+// returns.
+func (_TestRootChain *TestRootChainRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
+ return _TestRootChain.Contract.TestRootChainCaller.contract.Call(opts, result, method, params...)
+}
+
+// Transfer initiates a plain transaction to move funds to the contract, calling
+// its default method if one is available.
+func (_TestRootChain *TestRootChainRaw) Transfer(opts *bind.TransactOpts) (types.Transaction, error) {
+ return _TestRootChain.Contract.TestRootChainTransactor.contract.Transfer(opts)
+}
+
+// Transact invokes the (paid) contract method with params as input values.
+func (_TestRootChain *TestRootChainRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (types.Transaction, error) {
+ return _TestRootChain.Contract.TestRootChainTransactor.contract.Transact(opts, method, params...)
+}
+
+// Call invokes the (constant) contract method with params as input values and
+// sets the output to result. The result type might be a single field for simple
+// returns, a slice of interfaces for anonymous returns and a struct for named
+// returns.
+func (_TestRootChain *TestRootChainCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
+ return _TestRootChain.Contract.contract.Call(opts, result, method, params...)
+}
+
+// Transfer initiates a plain transaction to move funds to the contract, calling
+// its default method if one is available.
+func (_TestRootChain *TestRootChainTransactorRaw) Transfer(opts *bind.TransactOpts) (types.Transaction, error) {
+ return _TestRootChain.Contract.contract.Transfer(opts)
+}
+
+// Transact invokes the (paid) contract method with params as input values.
+func (_TestRootChain *TestRootChainTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (types.Transaction, error) {
+ return _TestRootChain.Contract.contract.Transact(opts, method, params...)
+}
+
+// CHAINID is a free data retrieval call binding the contract method 0xcc79f97b.
+//
+// Solidity: function CHAINID() view returns(uint256)
+func (_TestRootChain *TestRootChainCaller) CHAINID(opts *bind.CallOpts) (*big.Int, error) {
+ var out []interface{}
+ err := _TestRootChain.contract.Call(opts, &out, "CHAINID")
+
+ if err != nil {
+ return *new(*big.Int), err
+ }
+
+ out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
+
+ return out0, err
+
+}
+
+// CHAINID is a free data retrieval call binding the contract method 0xcc79f97b.
+//
+// Solidity: function CHAINID() view returns(uint256)
+func (_TestRootChain *TestRootChainSession) CHAINID() (*big.Int, error) {
+ return _TestRootChain.Contract.CHAINID(&_TestRootChain.CallOpts)
+}
+
+// CHAINID is a free data retrieval call binding the contract method 0xcc79f97b.
+//
+// Solidity: function CHAINID() view returns(uint256)
+func (_TestRootChain *TestRootChainCallerSession) CHAINID() (*big.Int, error) {
+ return _TestRootChain.Contract.CHAINID(&_TestRootChain.CallOpts)
+}
+
+// VOTETYPE is a free data retrieval call binding the contract method 0xd5b844eb.
+//
+// Solidity: function VOTE_TYPE() view returns(uint8)
+func (_TestRootChain *TestRootChainCaller) VOTETYPE(opts *bind.CallOpts) (uint8, error) {
+ var out []interface{}
+ err := _TestRootChain.contract.Call(opts, &out, "VOTE_TYPE")
+
+ if err != nil {
+ return *new(uint8), err
+ }
+
+ out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8)
+
+ return out0, err
+
+}
+
+// VOTETYPE is a free data retrieval call binding the contract method 0xd5b844eb.
+//
+// Solidity: function VOTE_TYPE() view returns(uint8)
+func (_TestRootChain *TestRootChainSession) VOTETYPE() (uint8, error) {
+ return _TestRootChain.Contract.VOTETYPE(&_TestRootChain.CallOpts)
+}
+
+// VOTETYPE is a free data retrieval call binding the contract method 0xd5b844eb.
+//
+// Solidity: function VOTE_TYPE() view returns(uint8)
+func (_TestRootChain *TestRootChainCallerSession) VOTETYPE() (uint8, error) {
+ return _TestRootChain.Contract.VOTETYPE(&_TestRootChain.CallOpts)
+}
+
+// NextHeaderBlock is a free data retrieval call binding the contract method 0x8d978d88.
+//
+// Solidity: function _nextHeaderBlock() view returns(uint256)
+func (_TestRootChain *TestRootChainCaller) NextHeaderBlock(opts *bind.CallOpts) (*big.Int, error) {
+ var out []interface{}
+ err := _TestRootChain.contract.Call(opts, &out, "_nextHeaderBlock")
+
+ if err != nil {
+ return *new(*big.Int), err
+ }
+
+ out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
+
+ return out0, err
+
+}
+
+// NextHeaderBlock is a free data retrieval call binding the contract method 0x8d978d88.
+//
+// Solidity: function _nextHeaderBlock() view returns(uint256)
+func (_TestRootChain *TestRootChainSession) NextHeaderBlock() (*big.Int, error) {
+ return _TestRootChain.Contract.NextHeaderBlock(&_TestRootChain.CallOpts)
+}
+
+// NextHeaderBlock is a free data retrieval call binding the contract method 0x8d978d88.
+//
+// Solidity: function _nextHeaderBlock() view returns(uint256)
+func (_TestRootChain *TestRootChainCallerSession) NextHeaderBlock() (*big.Int, error) {
+ return _TestRootChain.Contract.NextHeaderBlock(&_TestRootChain.CallOpts)
+}
+
+// CurrentHeaderBlock is a free data retrieval call binding the contract method 0xec7e4855.
+//
+// Solidity: function currentHeaderBlock() view returns(uint256)
+func (_TestRootChain *TestRootChainCaller) CurrentHeaderBlock(opts *bind.CallOpts) (*big.Int, error) {
+ var out []interface{}
+ err := _TestRootChain.contract.Call(opts, &out, "currentHeaderBlock")
+
+ if err != nil {
+ return *new(*big.Int), err
+ }
+
+ out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
+
+ return out0, err
+
+}
+
+// CurrentHeaderBlock is a free data retrieval call binding the contract method 0xec7e4855.
+//
+// Solidity: function currentHeaderBlock() view returns(uint256)
+func (_TestRootChain *TestRootChainSession) CurrentHeaderBlock() (*big.Int, error) {
+ return _TestRootChain.Contract.CurrentHeaderBlock(&_TestRootChain.CallOpts)
+}
+
+// CurrentHeaderBlock is a free data retrieval call binding the contract method 0xec7e4855.
+//
+// Solidity: function currentHeaderBlock() view returns(uint256)
+func (_TestRootChain *TestRootChainCallerSession) CurrentHeaderBlock() (*big.Int, error) {
+ return _TestRootChain.Contract.CurrentHeaderBlock(&_TestRootChain.CallOpts)
+}
+
+// GetLastChildBlock is a free data retrieval call binding the contract method 0xb87e1b66.
+//
+// Solidity: function getLastChildBlock() view returns(uint256)
+func (_TestRootChain *TestRootChainCaller) GetLastChildBlock(opts *bind.CallOpts) (*big.Int, error) {
+ var out []interface{}
+ err := _TestRootChain.contract.Call(opts, &out, "getLastChildBlock")
+
+ if err != nil {
+ return *new(*big.Int), err
+ }
+
+ out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
+
+ return out0, err
+
+}
+
+// GetLastChildBlock is a free data retrieval call binding the contract method 0xb87e1b66.
+//
+// Solidity: function getLastChildBlock() view returns(uint256)
+func (_TestRootChain *TestRootChainSession) GetLastChildBlock() (*big.Int, error) {
+ return _TestRootChain.Contract.GetLastChildBlock(&_TestRootChain.CallOpts)
+}
+
+// GetLastChildBlock is a free data retrieval call binding the contract method 0xb87e1b66.
+//
+// Solidity: function getLastChildBlock() view returns(uint256)
+func (_TestRootChain *TestRootChainCallerSession) GetLastChildBlock() (*big.Int, error) {
+ return _TestRootChain.Contract.GetLastChildBlock(&_TestRootChain.CallOpts)
+}
+
+// HeaderBlocks is a free data retrieval call binding the contract method 0x41539d4a.
+//
+// Solidity: function headerBlocks(uint256 ) view returns(bytes32 root, uint256 start, uint256 end, uint256 createdAt, address proposer)
+func (_TestRootChain *TestRootChainCaller) HeaderBlocks(opts *bind.CallOpts, arg0 *big.Int) (struct {
+ Root [32]byte
+ Start *big.Int
+ End *big.Int
+ CreatedAt *big.Int
+ Proposer libcommon.Address
+}, error) {
+ var out []interface{}
+ err := _TestRootChain.contract.Call(opts, &out, "headerBlocks", arg0)
+
+ outstruct := new(struct {
+ Root [32]byte
+ Start *big.Int
+ End *big.Int
+ CreatedAt *big.Int
+ Proposer libcommon.Address
+ })
+ if err != nil {
+ return *outstruct, err
+ }
+
+ outstruct.Root = *abi.ConvertType(out[0], new([32]byte)).(*[32]byte)
+ outstruct.Start = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int)
+ outstruct.End = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int)
+ outstruct.CreatedAt = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int)
+ outstruct.Proposer = *abi.ConvertType(out[4], new(libcommon.Address)).(*libcommon.Address)
+
+ return *outstruct, err
+
+}
+
+// HeaderBlocks is a free data retrieval call binding the contract method 0x41539d4a.
+//
+// Solidity: function headerBlocks(uint256 ) view returns(bytes32 root, uint256 start, uint256 end, uint256 createdAt, address proposer)
+func (_TestRootChain *TestRootChainSession) HeaderBlocks(arg0 *big.Int) (struct {
+ Root [32]byte
+ Start *big.Int
+ End *big.Int
+ CreatedAt *big.Int
+ Proposer libcommon.Address
+}, error) {
+ return _TestRootChain.Contract.HeaderBlocks(&_TestRootChain.CallOpts, arg0)
+}
+
+// HeaderBlocks is a free data retrieval call binding the contract method 0x41539d4a.
+//
+// Solidity: function headerBlocks(uint256 ) view returns(bytes32 root, uint256 start, uint256 end, uint256 createdAt, address proposer)
+func (_TestRootChain *TestRootChainCallerSession) HeaderBlocks(arg0 *big.Int) (struct {
+ Root [32]byte
+ Start *big.Int
+ End *big.Int
+ CreatedAt *big.Int
+ Proposer libcommon.Address
+}, error) {
+ return _TestRootChain.Contract.HeaderBlocks(&_TestRootChain.CallOpts, arg0)
+}
+
+// HeimdallId is a free data retrieval call binding the contract method 0xfbc3dd36.
+//
+// Solidity: function heimdallId() view returns(bytes32)
+func (_TestRootChain *TestRootChainCaller) HeimdallId(opts *bind.CallOpts) ([32]byte, error) {
+ var out []interface{}
+ err := _TestRootChain.contract.Call(opts, &out, "heimdallId")
+
+ if err != nil {
+ return *new([32]byte), err
+ }
+
+ out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte)
+
+ return out0, err
+
+}
+
+// HeimdallId is a free data retrieval call binding the contract method 0xfbc3dd36.
+//
+// Solidity: function heimdallId() view returns(bytes32)
+func (_TestRootChain *TestRootChainSession) HeimdallId() ([32]byte, error) {
+ return _TestRootChain.Contract.HeimdallId(&_TestRootChain.CallOpts)
+}
+
+// HeimdallId is a free data retrieval call binding the contract method 0xfbc3dd36.
+//
+// Solidity: function heimdallId() view returns(bytes32)
+func (_TestRootChain *TestRootChainCallerSession) HeimdallId() ([32]byte, error) {
+ return _TestRootChain.Contract.HeimdallId(&_TestRootChain.CallOpts)
+}
+
+// NetworkId is a free data retrieval call binding the contract method 0x9025e64c.
+//
+// Solidity: function networkId() view returns(bytes)
+func (_TestRootChain *TestRootChainCaller) NetworkId(opts *bind.CallOpts) ([]byte, error) {
+ var out []interface{}
+ err := _TestRootChain.contract.Call(opts, &out, "networkId")
+
+ if err != nil {
+ return *new([]byte), err
+ }
+
+ out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte)
+
+ return out0, err
+
+}
+
+// NetworkId is a free data retrieval call binding the contract method 0x9025e64c.
+//
+// Solidity: function networkId() view returns(bytes)
+func (_TestRootChain *TestRootChainSession) NetworkId() ([]byte, error) {
+ return _TestRootChain.Contract.NetworkId(&_TestRootChain.CallOpts)
+}
+
+// NetworkId is a free data retrieval call binding the contract method 0x9025e64c.
+//
+// Solidity: function networkId() view returns(bytes)
+func (_TestRootChain *TestRootChainCallerSession) NetworkId() ([]byte, error) {
+ return _TestRootChain.Contract.NetworkId(&_TestRootChain.CallOpts)
+}
+
+// SubmitHeaderBlock is a free data retrieval call binding the contract method 0x6a791f11.
+//
+// Solidity: function submitHeaderBlock(bytes , bytes ) pure returns()
+func (_TestRootChain *TestRootChainCaller) SubmitHeaderBlock(opts *bind.CallOpts, arg0 []byte, arg1 []byte) error {
+ var out []interface{}
+ err := _TestRootChain.contract.Call(opts, &out, "submitHeaderBlock", arg0, arg1)
+
+ if err != nil {
+ return err
+ }
+
+ return err
+
+}
+
+// SubmitHeaderBlock is a free data retrieval call binding the contract method 0x6a791f11.
+//
+// Solidity: function submitHeaderBlock(bytes , bytes ) pure returns()
+func (_TestRootChain *TestRootChainSession) SubmitHeaderBlock(arg0 []byte, arg1 []byte) error {
+ return _TestRootChain.Contract.SubmitHeaderBlock(&_TestRootChain.CallOpts, arg0, arg1)
+}
+
+// SubmitHeaderBlock is a free data retrieval call binding the contract method 0x6a791f11.
+//
+// Solidity: function submitHeaderBlock(bytes , bytes ) pure returns()
+func (_TestRootChain *TestRootChainCallerSession) SubmitHeaderBlock(arg0 []byte, arg1 []byte) error {
+ return _TestRootChain.Contract.SubmitHeaderBlock(&_TestRootChain.CallOpts, arg0, arg1)
+}
+
+// SetHeimdallId is a paid mutator transaction binding the contract method 0xea0688b3.
+//
+// Solidity: function setHeimdallId(string _heimdallId) returns()
+func (_TestRootChain *TestRootChainTransactor) SetHeimdallId(opts *bind.TransactOpts, _heimdallId string) (types.Transaction, error) {
+ return _TestRootChain.contract.Transact(opts, "setHeimdallId", _heimdallId)
+}
+
+// SetHeimdallId is a paid mutator transaction binding the contract method 0xea0688b3.
+//
+// Solidity: function setHeimdallId(string _heimdallId) returns()
+func (_TestRootChain *TestRootChainSession) SetHeimdallId(_heimdallId string) (types.Transaction, error) {
+ return _TestRootChain.Contract.SetHeimdallId(&_TestRootChain.TransactOpts, _heimdallId)
+}
+
+// SetHeimdallId is a paid mutator transaction binding the contract method 0xea0688b3.
+//
+// Solidity: function setHeimdallId(string _heimdallId) returns()
+func (_TestRootChain *TestRootChainTransactorSession) SetHeimdallId(_heimdallId string) (types.Transaction, error) {
+ return _TestRootChain.Contract.SetHeimdallId(&_TestRootChain.TransactOpts, _heimdallId)
+}
+
+// SetNextHeaderBlock is a paid mutator transaction binding the contract method 0xcf24a0ea.
+//
+// Solidity: function setNextHeaderBlock(uint256 _value) returns()
+func (_TestRootChain *TestRootChainTransactor) SetNextHeaderBlock(opts *bind.TransactOpts, _value *big.Int) (types.Transaction, error) {
+ return _TestRootChain.contract.Transact(opts, "setNextHeaderBlock", _value)
+}
+
+// SetNextHeaderBlock is a paid mutator transaction binding the contract method 0xcf24a0ea.
+//
+// Solidity: function setNextHeaderBlock(uint256 _value) returns()
+func (_TestRootChain *TestRootChainSession) SetNextHeaderBlock(_value *big.Int) (types.Transaction, error) {
+ return _TestRootChain.Contract.SetNextHeaderBlock(&_TestRootChain.TransactOpts, _value)
+}
+
+// SetNextHeaderBlock is a paid mutator transaction binding the contract method 0xcf24a0ea.
+//
+// Solidity: function setNextHeaderBlock(uint256 _value) returns()
+func (_TestRootChain *TestRootChainTransactorSession) SetNextHeaderBlock(_value *big.Int) (types.Transaction, error) {
+ return _TestRootChain.Contract.SetNextHeaderBlock(&_TestRootChain.TransactOpts, _value)
+}
+
+// Slash is a paid mutator transaction binding the contract method 0x2da25de3.
+//
+// Solidity: function slash() returns()
+func (_TestRootChain *TestRootChainTransactor) Slash(opts *bind.TransactOpts) (types.Transaction, error) {
+ return _TestRootChain.contract.Transact(opts, "slash")
+}
+
+// Slash is a paid mutator transaction binding the contract method 0x2da25de3.
+//
+// Solidity: function slash() returns()
+func (_TestRootChain *TestRootChainSession) Slash() (types.Transaction, error) {
+ return _TestRootChain.Contract.Slash(&_TestRootChain.TransactOpts)
+}
+
+// Slash is a paid mutator transaction binding the contract method 0x2da25de3.
+//
+// Solidity: function slash() returns()
+func (_TestRootChain *TestRootChainTransactorSession) Slash() (types.Transaction, error) {
+ return _TestRootChain.Contract.Slash(&_TestRootChain.TransactOpts)
+}
+
+// SubmitCheckpoint is a paid mutator transaction binding the contract method 0x4e43e495.
+//
+// Solidity: function submitCheckpoint(bytes data, uint256[3][] ) returns()
+func (_TestRootChain *TestRootChainTransactor) SubmitCheckpoint(opts *bind.TransactOpts, data []byte, arg1 [][3]*big.Int) (types.Transaction, error) {
+ return _TestRootChain.contract.Transact(opts, "submitCheckpoint", data, arg1)
+}
+
+// SubmitCheckpoint is a paid mutator transaction binding the contract method 0x4e43e495.
+//
+// Solidity: function submitCheckpoint(bytes data, uint256[3][] ) returns()
+func (_TestRootChain *TestRootChainSession) SubmitCheckpoint(data []byte, arg1 [][3]*big.Int) (types.Transaction, error) {
+ return _TestRootChain.Contract.SubmitCheckpoint(&_TestRootChain.TransactOpts, data, arg1)
+}
+
+// SubmitCheckpoint is a paid mutator transaction binding the contract method 0x4e43e495.
+//
+// Solidity: function submitCheckpoint(bytes data, uint256[3][] ) returns()
+func (_TestRootChain *TestRootChainTransactorSession) SubmitCheckpoint(data []byte, arg1 [][3]*big.Int) (types.Transaction, error) {
+ return _TestRootChain.Contract.SubmitCheckpoint(&_TestRootChain.TransactOpts, data, arg1)
+}
+
+// UpdateDepositId is a paid mutator transaction binding the contract method 0x5391f483.
+//
+// Solidity: function updateDepositId(uint256 numDeposits) returns(uint256 depositId)
+func (_TestRootChain *TestRootChainTransactor) UpdateDepositId(opts *bind.TransactOpts, numDeposits *big.Int) (types.Transaction, error) {
+ return _TestRootChain.contract.Transact(opts, "updateDepositId", numDeposits)
+}
+
+// UpdateDepositId is a paid mutator transaction binding the contract method 0x5391f483.
+//
+// Solidity: function updateDepositId(uint256 numDeposits) returns(uint256 depositId)
+func (_TestRootChain *TestRootChainSession) UpdateDepositId(numDeposits *big.Int) (types.Transaction, error) {
+ return _TestRootChain.Contract.UpdateDepositId(&_TestRootChain.TransactOpts, numDeposits)
+}
+
+// UpdateDepositId is a paid mutator transaction binding the contract method 0x5391f483.
+//
+// Solidity: function updateDepositId(uint256 numDeposits) returns(uint256 depositId)
+func (_TestRootChain *TestRootChainTransactorSession) UpdateDepositId(numDeposits *big.Int) (types.Transaction, error) {
+ return _TestRootChain.Contract.UpdateDepositId(&_TestRootChain.TransactOpts, numDeposits)
+}
+
+// SetHeimdallIdParams is an auto generated read-only Go binding of transcaction calldata params
+type SetHeimdallIdParams struct {
+ Param__heimdallId string
+}
+
+// Parse SetHeimdallId method from calldata of a transaction
+//
+// Solidity: function setHeimdallId(string _heimdallId) returns()
+func ParseSetHeimdallId(calldata []byte) (*SetHeimdallIdParams, error) {
+ if len(calldata) <= 4 {
+ return nil, fmt.Errorf("invalid calldata input")
+ }
+
+ _abi, err := abi.JSON(strings.NewReader(TestRootChainABI))
+ if err != nil {
+ return nil, fmt.Errorf("failed to get abi of registry metadata: %w", err)
+ }
+
+ out, err := _abi.Methods["setHeimdallId"].Inputs.Unpack(calldata[4:])
+ if err != nil {
+ return nil, fmt.Errorf("failed to unpack setHeimdallId params data: %w", err)
+ }
+
+ var paramsResult = new(SetHeimdallIdParams)
+ value := reflect.ValueOf(paramsResult).Elem()
+
+ if value.NumField() != len(out) {
+ return nil, fmt.Errorf("failed to match calldata with param field number")
+ }
+
+ out0 := *abi.ConvertType(out[0], new(string)).(*string)
+
+ return &SetHeimdallIdParams{
+ Param__heimdallId: out0,
+ }, nil
+}
+
+// SetNextHeaderBlockParams is an auto generated read-only Go binding of transcaction calldata params
+type SetNextHeaderBlockParams struct {
+ Param__value *big.Int
+}
+
+// Parse SetNextHeaderBlock method from calldata of a transaction
+//
+// Solidity: function setNextHeaderBlock(uint256 _value) returns()
+func ParseSetNextHeaderBlock(calldata []byte) (*SetNextHeaderBlockParams, error) {
+ if len(calldata) <= 4 {
+ return nil, fmt.Errorf("invalid calldata input")
+ }
+
+ _abi, err := abi.JSON(strings.NewReader(TestRootChainABI))
+ if err != nil {
+ return nil, fmt.Errorf("failed to get abi of registry metadata: %w", err)
+ }
+
+ out, err := _abi.Methods["setNextHeaderBlock"].Inputs.Unpack(calldata[4:])
+ if err != nil {
+ return nil, fmt.Errorf("failed to unpack setNextHeaderBlock params data: %w", err)
+ }
+
+ var paramsResult = new(SetNextHeaderBlockParams)
+ value := reflect.ValueOf(paramsResult).Elem()
+
+ if value.NumField() != len(out) {
+ return nil, fmt.Errorf("failed to match calldata with param field number")
+ }
+
+ out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
+
+ return &SetNextHeaderBlockParams{
+ Param__value: out0,
+ }, nil
+}
+
+// SubmitCheckpointParams is an auto generated read-only Go binding of transcaction calldata params
+type SubmitCheckpointParams struct {
+ Param_data []byte
+ Param_arg1 [][3]*big.Int
+}
+
+// Parse SubmitCheckpoint method from calldata of a transaction
+//
+// Solidity: function submitCheckpoint(bytes data, uint256[3][] ) returns()
+func ParseSubmitCheckpoint(calldata []byte) (*SubmitCheckpointParams, error) {
+ if len(calldata) <= 4 {
+ return nil, fmt.Errorf("invalid calldata input")
+ }
+
+ _abi, err := abi.JSON(strings.NewReader(TestRootChainABI))
+ if err != nil {
+ return nil, fmt.Errorf("failed to get abi of registry metadata: %w", err)
+ }
+
+ out, err := _abi.Methods["submitCheckpoint"].Inputs.Unpack(calldata[4:])
+ if err != nil {
+ return nil, fmt.Errorf("failed to unpack submitCheckpoint params data: %w", err)
+ }
+
+ var paramsResult = new(SubmitCheckpointParams)
+ value := reflect.ValueOf(paramsResult).Elem()
+
+ if value.NumField() != len(out) {
+ return nil, fmt.Errorf("failed to match calldata with param field number")
+ }
+
+ out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte)
+ out1 := *abi.ConvertType(out[1], new([][3]*big.Int)).(*[][3]*big.Int)
+
+ return &SubmitCheckpointParams{
+ Param_data: out0, Param_arg1: out1,
+ }, nil
+}
+
+// UpdateDepositIdParams is an auto generated read-only Go binding of transcaction calldata params
+type UpdateDepositIdParams struct {
+ Param_numDeposits *big.Int
+}
+
+// Parse UpdateDepositId method from calldata of a transaction
+//
+// Solidity: function updateDepositId(uint256 numDeposits) returns(uint256 depositId)
+func ParseUpdateDepositId(calldata []byte) (*UpdateDepositIdParams, error) {
+ if len(calldata) <= 4 {
+ return nil, fmt.Errorf("invalid calldata input")
+ }
+
+ _abi, err := abi.JSON(strings.NewReader(TestRootChainABI))
+ if err != nil {
+ return nil, fmt.Errorf("failed to get abi of registry metadata: %w", err)
+ }
+
+ out, err := _abi.Methods["updateDepositId"].Inputs.Unpack(calldata[4:])
+ if err != nil {
+ return nil, fmt.Errorf("failed to unpack updateDepositId params data: %w", err)
+ }
+
+ var paramsResult = new(UpdateDepositIdParams)
+ value := reflect.ValueOf(paramsResult).Elem()
+
+ if value.NumField() != len(out) {
+ return nil, fmt.Errorf("failed to match calldata with param field number")
+ }
+
+ out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
+
+ return &UpdateDepositIdParams{
+ Param_numDeposits: out0,
+ }, nil
+}
+
+// TestRootChainNewHeaderBlockIterator is returned from FilterNewHeaderBlock and is used to iterate over the raw logs and unpacked data for NewHeaderBlock events raised by the TestRootChain contract.
+type TestRootChainNewHeaderBlockIterator struct {
+ Event *TestRootChainNewHeaderBlock // Event containing the contract specifics and raw log
+
+ contract *bind.BoundContract // Generic contract to use for unpacking event data
+ event string // Event name to use for unpacking event data
+
+ logs chan types.Log // Log channel receiving the found contract events
+ sub ethereum.Subscription // Subscription for errors, completion and termination
+ done bool // Whether the subscription completed delivering logs
+ fail error // Occurred error to stop iteration
+}
+
+// Next advances the iterator to the subsequent event, returning whether there
+// are any more events found. In case of a retrieval or parsing error, false is
+// returned and Error() can be queried for the exact failure.
+func (it *TestRootChainNewHeaderBlockIterator) Next() bool {
+ // If the iterator failed, stop iterating
+ if it.fail != nil {
+ return false
+ }
+ // If the iterator completed, deliver directly whatever's available
+ if it.done {
+ select {
+ case log := <-it.logs:
+ it.Event = new(TestRootChainNewHeaderBlock)
+ if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+ it.fail = err
+ return false
+ }
+ it.Event.Raw = log
+ return true
+
+ default:
+ return false
+ }
+ }
+ // Iterator still in progress, wait for either a data or an error event
+ select {
+ case log := <-it.logs:
+ it.Event = new(TestRootChainNewHeaderBlock)
+ if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+ it.fail = err
+ return false
+ }
+ it.Event.Raw = log
+ return true
+
+ case err := <-it.sub.Err():
+ it.done = true
+ it.fail = err
+ return it.Next()
+ }
+}
+
+// Error returns any retrieval or parsing error occurred during filtering.
+func (it *TestRootChainNewHeaderBlockIterator) Error() error {
+ return it.fail
+}
+
+// Close terminates the iteration process, releasing any pending underlying
+// resources.
+func (it *TestRootChainNewHeaderBlockIterator) Close() error {
+ it.sub.Unsubscribe()
+ return nil
+}
+
+// TestRootChainNewHeaderBlock represents a NewHeaderBlock event raised by the TestRootChain contract.
+type TestRootChainNewHeaderBlock struct {
+ Proposer libcommon.Address
+ HeaderBlockId *big.Int
+ Reward *big.Int
+ Start *big.Int
+ End *big.Int
+ Root [32]byte
+ Raw types.Log // Blockchain specific contextual infos
+}
+
+// FilterNewHeaderBlock is a free log retrieval operation binding the contract event 0xba5de06d22af2685c6c7765f60067f7d2b08c2d29f53cdf14d67f6d1c9bfb527.
+//
+// Solidity: event NewHeaderBlock(address indexed proposer, uint256 indexed headerBlockId, uint256 indexed reward, uint256 start, uint256 end, bytes32 root)
+func (_TestRootChain *TestRootChainFilterer) FilterNewHeaderBlock(opts *bind.FilterOpts, proposer []libcommon.Address, headerBlockId []*big.Int, reward []*big.Int) (*TestRootChainNewHeaderBlockIterator, error) {
+
+ var proposerRule []interface{}
+ for _, proposerItem := range proposer {
+ proposerRule = append(proposerRule, proposerItem)
+ }
+ var headerBlockIdRule []interface{}
+ for _, headerBlockIdItem := range headerBlockId {
+ headerBlockIdRule = append(headerBlockIdRule, headerBlockIdItem)
+ }
+ var rewardRule []interface{}
+ for _, rewardItem := range reward {
+ rewardRule = append(rewardRule, rewardItem)
+ }
+
+ logs, sub, err := _TestRootChain.contract.FilterLogs(opts, "NewHeaderBlock", proposerRule, headerBlockIdRule, rewardRule)
+ if err != nil {
+ return nil, err
+ }
+ return &TestRootChainNewHeaderBlockIterator{contract: _TestRootChain.contract, event: "NewHeaderBlock", logs: logs, sub: sub}, nil
+}
+
+// WatchNewHeaderBlock is a free log subscription operation binding the contract event 0xba5de06d22af2685c6c7765f60067f7d2b08c2d29f53cdf14d67f6d1c9bfb527.
+//
+// Solidity: event NewHeaderBlock(address indexed proposer, uint256 indexed headerBlockId, uint256 indexed reward, uint256 start, uint256 end, bytes32 root)
+func (_TestRootChain *TestRootChainFilterer) WatchNewHeaderBlock(opts *bind.WatchOpts, sink chan<- *TestRootChainNewHeaderBlock, proposer []libcommon.Address, headerBlockId []*big.Int, reward []*big.Int) (event.Subscription, error) {
+
+ var proposerRule []interface{}
+ for _, proposerItem := range proposer {
+ proposerRule = append(proposerRule, proposerItem)
+ }
+ var headerBlockIdRule []interface{}
+ for _, headerBlockIdItem := range headerBlockId {
+ headerBlockIdRule = append(headerBlockIdRule, headerBlockIdItem)
+ }
+ var rewardRule []interface{}
+ for _, rewardItem := range reward {
+ rewardRule = append(rewardRule, rewardItem)
+ }
+
+ logs, sub, err := _TestRootChain.contract.WatchLogs(opts, "NewHeaderBlock", proposerRule, headerBlockIdRule, rewardRule)
+ if err != nil {
+ return nil, err
+ }
+ return event.NewSubscription(func(quit <-chan struct{}) error {
+ defer sub.Unsubscribe()
+ for {
+ select {
+ case log := <-logs:
+ // New log arrived, parse the event and forward to the user
+ event := new(TestRootChainNewHeaderBlock)
+ if err := _TestRootChain.contract.UnpackLog(event, "NewHeaderBlock", log); err != nil {
+ return err
+ }
+ event.Raw = log
+
+ select {
+ case sink <- event:
+ case err := <-sub.Err():
+ return err
+ case <-quit:
+ return nil
+ }
+ case err := <-sub.Err():
+ return err
+ case <-quit:
+ return nil
+ }
+ }
+ }), nil
+}
+
+// ParseNewHeaderBlock is a log parse operation binding the contract event 0xba5de06d22af2685c6c7765f60067f7d2b08c2d29f53cdf14d67f6d1c9bfb527.
+//
+// Solidity: event NewHeaderBlock(address indexed proposer, uint256 indexed headerBlockId, uint256 indexed reward, uint256 start, uint256 end, bytes32 root)
+func (_TestRootChain *TestRootChainFilterer) ParseNewHeaderBlock(log types.Log) (*TestRootChainNewHeaderBlock, error) {
+ event := new(TestRootChainNewHeaderBlock)
+ if err := _TestRootChain.contract.UnpackLog(event, "NewHeaderBlock", log); err != nil {
+ return nil, err
+ }
+ event.Raw = log
+ return event, nil
+}
+
+// TestRootChainResetHeaderBlockIterator is returned from FilterResetHeaderBlock and is used to iterate over the raw logs and unpacked data for ResetHeaderBlock events raised by the TestRootChain contract.
+type TestRootChainResetHeaderBlockIterator struct {
+ Event *TestRootChainResetHeaderBlock // Event containing the contract specifics and raw log
+
+ contract *bind.BoundContract // Generic contract to use for unpacking event data
+ event string // Event name to use for unpacking event data
+
+ logs chan types.Log // Log channel receiving the found contract events
+ sub ethereum.Subscription // Subscription for errors, completion and termination
+ done bool // Whether the subscription completed delivering logs
+ fail error // Occurred error to stop iteration
+}
+
+// Next advances the iterator to the subsequent event, returning whether there
+// are any more events found. In case of a retrieval or parsing error, false is
+// returned and Error() can be queried for the exact failure.
+func (it *TestRootChainResetHeaderBlockIterator) Next() bool {
+ // If the iterator failed, stop iterating
+ if it.fail != nil {
+ return false
+ }
+ // If the iterator completed, deliver directly whatever's available
+ if it.done {
+ select {
+ case log := <-it.logs:
+ it.Event = new(TestRootChainResetHeaderBlock)
+ if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+ it.fail = err
+ return false
+ }
+ it.Event.Raw = log
+ return true
+
+ default:
+ return false
+ }
+ }
+ // Iterator still in progress, wait for either a data or an error event
+ select {
+ case log := <-it.logs:
+ it.Event = new(TestRootChainResetHeaderBlock)
+ if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+ it.fail = err
+ return false
+ }
+ it.Event.Raw = log
+ return true
+
+ case err := <-it.sub.Err():
+ it.done = true
+ it.fail = err
+ return it.Next()
+ }
+}
+
+// Error returns any retrieval or parsing error occurred during filtering.
+func (it *TestRootChainResetHeaderBlockIterator) Error() error {
+ return it.fail
+}
+
+// Close terminates the iteration process, releasing any pending underlying
+// resources.
+func (it *TestRootChainResetHeaderBlockIterator) Close() error {
+ it.sub.Unsubscribe()
+ return nil
+}
+
+// TestRootChainResetHeaderBlock represents a ResetHeaderBlock event raised by the TestRootChain contract.
+type TestRootChainResetHeaderBlock struct {
+ Proposer libcommon.Address
+ HeaderBlockId *big.Int
+ Raw types.Log // Blockchain specific contextual infos
+}
+
+// FilterResetHeaderBlock is a free log retrieval operation binding the contract event 0xca1d8316287f938830e225956a7bb10fd5a1a1506dd2eb3a476751a488117205.
+//
+// Solidity: event ResetHeaderBlock(address indexed proposer, uint256 indexed headerBlockId)
+func (_TestRootChain *TestRootChainFilterer) FilterResetHeaderBlock(opts *bind.FilterOpts, proposer []libcommon.Address, headerBlockId []*big.Int) (*TestRootChainResetHeaderBlockIterator, error) {
+
+ var proposerRule []interface{}
+ for _, proposerItem := range proposer {
+ proposerRule = append(proposerRule, proposerItem)
+ }
+ var headerBlockIdRule []interface{}
+ for _, headerBlockIdItem := range headerBlockId {
+ headerBlockIdRule = append(headerBlockIdRule, headerBlockIdItem)
+ }
+
+ logs, sub, err := _TestRootChain.contract.FilterLogs(opts, "ResetHeaderBlock", proposerRule, headerBlockIdRule)
+ if err != nil {
+ return nil, err
+ }
+ return &TestRootChainResetHeaderBlockIterator{contract: _TestRootChain.contract, event: "ResetHeaderBlock", logs: logs, sub: sub}, nil
+}
+
+// WatchResetHeaderBlock is a free log subscription operation binding the contract event 0xca1d8316287f938830e225956a7bb10fd5a1a1506dd2eb3a476751a488117205.
+//
+// Solidity: event ResetHeaderBlock(address indexed proposer, uint256 indexed headerBlockId)
+func (_TestRootChain *TestRootChainFilterer) WatchResetHeaderBlock(opts *bind.WatchOpts, sink chan<- *TestRootChainResetHeaderBlock, proposer []libcommon.Address, headerBlockId []*big.Int) (event.Subscription, error) {
+
+ var proposerRule []interface{}
+ for _, proposerItem := range proposer {
+ proposerRule = append(proposerRule, proposerItem)
+ }
+ var headerBlockIdRule []interface{}
+ for _, headerBlockIdItem := range headerBlockId {
+ headerBlockIdRule = append(headerBlockIdRule, headerBlockIdItem)
+ }
+
+ logs, sub, err := _TestRootChain.contract.WatchLogs(opts, "ResetHeaderBlock", proposerRule, headerBlockIdRule)
+ if err != nil {
+ return nil, err
+ }
+ return event.NewSubscription(func(quit <-chan struct{}) error {
+ defer sub.Unsubscribe()
+ for {
+ select {
+ case log := <-logs:
+ // New log arrived, parse the event and forward to the user
+ event := new(TestRootChainResetHeaderBlock)
+ if err := _TestRootChain.contract.UnpackLog(event, "ResetHeaderBlock", log); err != nil {
+ return err
+ }
+ event.Raw = log
+
+ select {
+ case sink <- event:
+ case err := <-sub.Err():
+ return err
+ case <-quit:
+ return nil
+ }
+ case err := <-sub.Err():
+ return err
+ case <-quit:
+ return nil
+ }
+ }
+ }), nil
+}
+
+// ParseResetHeaderBlock is a log parse operation binding the contract event 0xca1d8316287f938830e225956a7bb10fd5a1a1506dd2eb3a476751a488117205.
+//
+// Solidity: event ResetHeaderBlock(address indexed proposer, uint256 indexed headerBlockId)
+func (_TestRootChain *TestRootChainFilterer) ParseResetHeaderBlock(log types.Log) (*TestRootChainResetHeaderBlock, error) {
+ event := new(TestRootChainResetHeaderBlock)
+ if err := _TestRootChain.contract.UnpackLog(event, "ResetHeaderBlock", log); err != nil {
+ return nil, err
+ }
+ event.Raw = log
+ return event, nil
+}
diff --git a/cmd/devnet/contracts/lib/exitpayloadreader.sol b/cmd/devnet/contracts/lib/exitpayloadreader.sol
new file mode 100644
index 00000000000..3a59a3429d1
--- /dev/null
+++ b/cmd/devnet/contracts/lib/exitpayloadreader.sol
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: MIT
+pragma solidity ^0.8.0;
+
+import {RLPReader} from "./rlpreader.sol";
+
+library ExitPayloadReader {
+ using RLPReader for bytes;
+ using RLPReader for RLPReader.RLPItem;
+
+ uint8 constant WORD_SIZE = 32;
+
+ struct ExitPayload {
+ RLPReader.RLPItem[] data;
+ }
+
+ struct Receipt {
+ RLPReader.RLPItem[] data;
+ bytes raw;
+ uint256 logIndex;
+ }
+
+ struct Log {
+ RLPReader.RLPItem data;
+ RLPReader.RLPItem[] list;
+ }
+
+ struct LogTopics {
+ RLPReader.RLPItem[] data;
+ }
+
+ // copy paste of private copy() from RLPReader to avoid changing of existing contracts
+ function copy(uint256 src, uint256 dest, uint256 len) private pure {
+ if (len == 0) return;
+
+ // copy as many word sizes as possible
+ for (; len >= WORD_SIZE; len -= WORD_SIZE) {
+ assembly {
+ mstore(dest, mload(src))
+ }
+
+ src += WORD_SIZE;
+ dest += WORD_SIZE;
+ }
+
+ if (len == 0) return;
+
+ // left over bytes. Mask is used to remove unwanted bytes from the word
+ uint256 mask = 256 ** (WORD_SIZE - len) - 1;
+ assembly {
+ let srcpart := and(mload(src), not(mask)) // zero out src
+ let destpart := and(mload(dest), mask) // retrieve the bytes
+ mstore(dest, or(destpart, srcpart))
+ }
+ }
+
+ function toExitPayload(bytes memory data) internal pure returns (ExitPayload memory) {
+ RLPReader.RLPItem[] memory payloadData = data.toRlpItem().toList();
+
+ return ExitPayload(payloadData);
+ }
+
+ function getHeaderNumber(ExitPayload memory payload) internal pure returns (uint256) {
+ return payload.data[0].toUint();
+ }
+
+ function getBlockProof(ExitPayload memory payload) internal pure returns (bytes memory) {
+ return payload.data[1].toBytes();
+ }
+
+ function getBlockNumber(ExitPayload memory payload) internal pure returns (uint256) {
+ return payload.data[2].toUint();
+ }
+
+ function getBlockTime(ExitPayload memory payload) internal pure returns (uint256) {
+ return payload.data[3].toUint();
+ }
+
+ function getTxRoot(ExitPayload memory payload) internal pure returns (bytes32) {
+ return bytes32(payload.data[4].toUint());
+ }
+
+ function getReceiptRoot(ExitPayload memory payload) internal pure returns (bytes32) {
+ return bytes32(payload.data[5].toUint());
+ }
+
+ function getReceipt(ExitPayload memory payload) internal pure returns (Receipt memory receipt) {
+ receipt.raw = payload.data[6].toBytes();
+ RLPReader.RLPItem memory receiptItem = receipt.raw.toRlpItem();
+
+ if (receiptItem.isList()) {
+ // legacy tx
+ receipt.data = receiptItem.toList();
+ } else {
+ // pop first byte before parsing receipt
+ bytes memory typedBytes = receipt.raw;
+ bytes memory result = new bytes(typedBytes.length - 1);
+ uint256 srcPtr;
+ uint256 destPtr;
+ assembly {
+ srcPtr := add(33, typedBytes)
+ destPtr := add(0x20, result)
+ }
+
+ copy(srcPtr, destPtr, result.length);
+ receipt.data = result.toRlpItem().toList();
+ }
+
+ receipt.logIndex = getReceiptLogIndex(payload);
+ return receipt;
+ }
+
+ function getReceiptProof(ExitPayload memory payload) internal pure returns (bytes memory) {
+ return payload.data[7].toBytes();
+ }
+
+ function getBranchMaskAsBytes(ExitPayload memory payload) internal pure returns (bytes memory) {
+ return payload.data[8].toBytes();
+ }
+
+ function getBranchMaskAsUint(ExitPayload memory payload) internal pure returns (uint256) {
+ return payload.data[8].toUint();
+ }
+
+ function getReceiptLogIndex(ExitPayload memory payload) internal pure returns (uint256) {
+ return payload.data[9].toUint();
+ }
+
+ // Receipt methods
+ function toBytes(Receipt memory receipt) internal pure returns (bytes memory) {
+ return receipt.raw;
+ }
+
+ function getLog(Receipt memory receipt) internal pure returns (Log memory) {
+ RLPReader.RLPItem memory logData = receipt.data[3].toList()[receipt.logIndex];
+ return Log(logData, logData.toList());
+ }
+
+ // Log methods
+ function getEmitter(Log memory log) internal pure returns (address) {
+ return RLPReader.toAddress(log.list[0]);
+ }
+
+ function getTopics(Log memory log) internal pure returns (LogTopics memory) {
+ return LogTopics(log.list[1].toList());
+ }
+
+ function getData(Log memory log) internal pure returns (bytes memory) {
+ return log.list[2].toBytes();
+ }
+
+ function toRlpBytes(Log memory log) internal pure returns (bytes memory) {
+ return log.data.toRlpBytes();
+ }
+
+ // LogTopics methods
+ function getField(LogTopics memory topics, uint256 index) internal pure returns (RLPReader.RLPItem memory) {
+ return topics.data[index];
+ }
+}
\ No newline at end of file
diff --git a/cmd/devnet/contracts/lib/merkle.sol b/cmd/devnet/contracts/lib/merkle.sol
new file mode 100644
index 00000000000..876988ce2d7
--- /dev/null
+++ b/cmd/devnet/contracts/lib/merkle.sol
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: MIT
+pragma solidity ^0.8.0;
+
+library Merkle {
+ function checkMembership(
+ bytes32 leaf,
+ uint256 index,
+ bytes32 rootHash,
+ bytes memory proof
+ ) internal pure returns (bool) {
+ require(proof.length % 32 == 0, "Invalid proof length");
+ uint256 proofHeight = proof.length / 32;
+ // Proof of size n means, height of the tree is n+1.
+ // In a tree of height n+1, max #leafs possible is 2 ^ n
+ require(index < 2 ** proofHeight, "Leaf index is too big");
+
+ bytes32 proofElement;
+ bytes32 computedHash = leaf;
+ for (uint256 i = 32; i <= proof.length; i += 32) {
+ assembly {
+ proofElement := mload(add(proof, i))
+ }
+
+ if (index % 2 == 0) {
+ computedHash = keccak256(abi.encodePacked(computedHash, proofElement));
+ } else {
+ computedHash = keccak256(abi.encodePacked(proofElement, computedHash));
+ }
+
+ index = index / 2;
+ }
+ return computedHash == rootHash;
+ }
+}
\ No newline at end of file
diff --git a/cmd/devnet/contracts/lib/merklepatriciaproof.sol b/cmd/devnet/contracts/lib/merklepatriciaproof.sol
new file mode 100644
index 00000000000..41dbc50cce7
--- /dev/null
+++ b/cmd/devnet/contracts/lib/merklepatriciaproof.sol
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: MIT
+pragma solidity ^0.8.0;
+
+import {RLPReader} from "./RLPReader.sol";
+
+library MerklePatriciaProof {
+ /*
+ * @dev Verifies a merkle patricia proof.
+ * @param value The terminating value in the trie.
+ * @param encodedPath The path in the trie leading to value.
+ * @param rlpParentNodes The rlp encoded stack of nodes.
+ * @param root The root hash of the trie.
+ * @return The boolean validity of the proof.
+ */
+ function verify(
+ bytes memory value,
+ bytes memory encodedPath,
+ bytes memory rlpParentNodes,
+ bytes32 root
+ ) internal pure returns (bool verified) {
+ RLPReader.RLPItem memory item = RLPReader.toRlpItem(rlpParentNodes);
+ RLPReader.RLPItem[] memory parentNodes = RLPReader.toList(item);
+
+ bytes memory currentNode;
+ RLPReader.RLPItem[] memory currentNodeList;
+
+ bytes32 nodeKey = root;
+ uint256 pathPtr = 0;
+
+ bytes memory path = _getNibbleArray(encodedPath);
+ if (path.length == 0) {
+ return false;
+ }
+
+ for (uint256 i = 0; i < parentNodes.length; i++) {
+ if (pathPtr > path.length) {
+ return false;
+ }
+
+ currentNode = RLPReader.toRlpBytes(parentNodes[i]);
+ if (nodeKey != keccak256(currentNode)) {
+ return false;
+ }
+ currentNodeList = RLPReader.toList(parentNodes[i]);
+
+ if (currentNodeList.length == 17) {
+ if (pathPtr == path.length) {
+ if (keccak256(RLPReader.toBytes(currentNodeList[16])) == keccak256(value)) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ uint8 nextPathNibble = uint8(path[pathPtr]);
+ if (nextPathNibble > 16) {
+ return false;
+ }
+ nodeKey = bytes32(RLPReader.toUintStrict(currentNodeList[nextPathNibble]));
+ pathPtr += 1;
+ } else if (currentNodeList.length == 2) {
+ uint256 traversed = _nibblesToTraverse(RLPReader.toBytes(currentNodeList[0]), path, pathPtr);
+ if (pathPtr + traversed == path.length) {
+ //leaf node
+ if (keccak256(RLPReader.toBytes(currentNodeList[1])) == keccak256(value)) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ //extension node
+ if (traversed == 0) {
+ return false;
+ }
+
+ pathPtr += traversed;
+ nodeKey = bytes32(RLPReader.toUintStrict(currentNodeList[1]));
+ } else {
+ return false;
+ }
+ }
+ }
+
+ function _nibblesToTraverse(
+ bytes memory encodedPartialPath,
+ bytes memory path,
+ uint256 pathPtr
+ ) private pure returns (uint256) {
+ uint256 len = 0;
+ // encodedPartialPath has elements that are each two hex characters (1 byte), but partialPath
+ // and slicedPath have elements that are each one hex character (1 nibble)
+ bytes memory partialPath = _getNibbleArray(encodedPartialPath);
+ bytes memory slicedPath = new bytes(partialPath.length);
+
+ // pathPtr counts nibbles in path
+ // partialPath.length is a number of nibbles
+ for (uint256 i = pathPtr; i < pathPtr + partialPath.length; i++) {
+ bytes1 pathNibble = path[i];
+ slicedPath[i - pathPtr] = pathNibble;
+ }
+
+ if (keccak256(partialPath) == keccak256(slicedPath)) {
+ len = partialPath.length;
+ } else {
+ len = 0;
+ }
+ return len;
+ }
+
+ // bytes b must be hp encoded
+ function _getNibbleArray(bytes memory b) internal pure returns (bytes memory) {
+ bytes memory nibbles = "";
+ if (b.length > 0) {
+ uint8 offset;
+ uint8 hpNibble = uint8(_getNthNibbleOfBytes(0, b));
+ if (hpNibble == 1 || hpNibble == 3) {
+ nibbles = new bytes(b.length * 2 - 1);
+ bytes1 oddNibble = _getNthNibbleOfBytes(1, b);
+ nibbles[0] = oddNibble;
+ offset = 1;
+ } else {
+ nibbles = new bytes(b.length * 2 - 2);
+ offset = 0;
+ }
+
+ for (uint256 i = offset; i < nibbles.length; i++) {
+ nibbles[i] = _getNthNibbleOfBytes(i - offset + 2, b);
+ }
+ }
+ return nibbles;
+ }
+
+ function _getNthNibbleOfBytes(uint256 n, bytes memory str) private pure returns (bytes1) {
+ return bytes1(n % 2 == 0 ? uint8(str[n / 2]) / 0x10 : uint8(str[n / 2]) % 0x10);
+ }
+}
\ No newline at end of file
diff --git a/cmd/devnet/contracts/lib/rlpreader.sol b/cmd/devnet/contracts/lib/rlpreader.sol
new file mode 100644
index 00000000000..b2760333e77
--- /dev/null
+++ b/cmd/devnet/contracts/lib/rlpreader.sol
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+ * @author Hamdi Allam hamdi.allam97@gmail.com
+ * Please reach out with any questions or concerns
+ */
+
+pragma solidity ^0.8.0;
+
+library RLPReader {
+ uint8 constant STRING_SHORT_START = 0x80;
+ uint8 constant STRING_LONG_START = 0xb8;
+ uint8 constant LIST_SHORT_START = 0xc0;
+ uint8 constant LIST_LONG_START = 0xf8;
+ uint8 constant WORD_SIZE = 32;
+
+ struct RLPItem {
+ uint256 len;
+ uint256 memPtr;
+ }
+
+ struct Iterator {
+ RLPItem item; // Item that's being iterated over.
+ uint256 nextPtr; // Position of the next item in the list.
+ }
+
+ /*
+ * @dev Returns the next element in the iteration. Reverts if it has not next element.
+ * @param self The iterator.
+ * @return The next element in the iteration.
+ */
+ function next(Iterator memory self) internal pure returns (RLPItem memory) {
+ require(hasNext(self));
+
+ uint256 ptr = self.nextPtr;
+ uint256 itemLength = _itemLength(ptr);
+ self.nextPtr = ptr + itemLength;
+
+ return RLPItem(itemLength, ptr);
+ }
+
+ /*
+ * @dev Returns true if the iteration has more elements.
+ * @param self The iterator.
+ * @return true if the iteration has more elements.
+ */
+ function hasNext(Iterator memory self) internal pure returns (bool) {
+ RLPItem memory item = self.item;
+ return self.nextPtr < item.memPtr + item.len;
+ }
+
+ /*
+ * @param item RLP encoded bytes
+ */
+ function toRlpItem(bytes memory item) internal pure returns (RLPItem memory) {
+ uint256 memPtr;
+ assembly {
+ memPtr := add(item, 0x20)
+ }
+
+ return RLPItem(item.length, memPtr);
+ }
+
+ /*
+ * @dev Create an iterator. Reverts if item is not a list.
+ * @param self The RLP item.
+ * @return An 'Iterator' over the item.
+ */
+ function iterator(RLPItem memory self) internal pure returns (Iterator memory) {
+ require(isList(self));
+
+ uint256 ptr = self.memPtr + _payloadOffset(self.memPtr);
+ return Iterator(self, ptr);
+ }
+
+ /*
+ * @param item RLP encoded bytes
+ */
+ function rlpLen(RLPItem memory item) internal pure returns (uint256) {
+ return item.len;
+ }
+
+ /*
+ * @param item RLP encoded bytes
+ */
+ function payloadLen(RLPItem memory item) internal pure returns (uint256) {
+ return item.len - _payloadOffset(item.memPtr);
+ }
+
+ /*
+ * @param item RLP encoded list in bytes
+ */
+ function toList(RLPItem memory item) internal pure returns (RLPItem[] memory) {
+ require(isList(item));
+
+ uint256 items = numItems(item);
+ RLPItem[] memory result = new RLPItem[](items);
+
+ uint256 memPtr = item.memPtr + _payloadOffset(item.memPtr);
+ uint256 dataLen;
+ for (uint256 i = 0; i < items; i++) {
+ dataLen = _itemLength(memPtr);
+ result[i] = RLPItem(dataLen, memPtr);
+ memPtr = memPtr + dataLen;
+ }
+
+ return result;
+ }
+
+ // @return indicator whether encoded payload is a list. negate this function call for isData.
+ function isList(RLPItem memory item) internal pure returns (bool) {
+ if (item.len == 0) return false;
+
+ uint8 byte0;
+ uint256 memPtr = item.memPtr;
+ assembly {
+ byte0 := byte(0, mload(memPtr))
+ }
+
+ if (byte0 < LIST_SHORT_START) return false;
+ return true;
+ }
+
+ /*
+ * @dev A cheaper version of keccak256(toRlpBytes(item)) that avoids copying memory.
+ * @return keccak256 hash of RLP encoded bytes.
+ */
+ function rlpBytesKeccak256(RLPItem memory item) internal pure returns (bytes32) {
+ uint256 ptr = item.memPtr;
+ uint256 len = item.len;
+ bytes32 result;
+ assembly {
+ result := keccak256(ptr, len)
+ }
+ return result;
+ }
+
+ function payloadLocation(RLPItem memory item) internal pure returns (uint256, uint256) {
+ uint256 offset = _payloadOffset(item.memPtr);
+ uint256 memPtr = item.memPtr + offset;
+ uint256 len = item.len - offset; // data length
+ return (memPtr, len);
+ }
+
+ /*
+ * @dev A cheaper version of keccak256(toBytes(item)) that avoids copying memory.
+ * @return keccak256 hash of the item payload.
+ */
+ function payloadKeccak256(RLPItem memory item) internal pure returns (bytes32) {
+ (uint256 memPtr, uint256 len) = payloadLocation(item);
+ bytes32 result;
+ assembly {
+ result := keccak256(memPtr, len)
+ }
+ return result;
+ }
+
+ /** RLPItem conversions into data types **/
+
+ // @returns raw rlp encoding in bytes
+ function toRlpBytes(RLPItem memory item) internal pure returns (bytes memory) {
+ bytes memory result = new bytes(item.len);
+ if (result.length == 0) return result;
+
+ uint256 ptr;
+ assembly {
+ ptr := add(0x20, result)
+ }
+
+ copy(item.memPtr, ptr, item.len);
+ return result;
+ }
+
+ // any non-zero byte < 128 is considered true
+ function toBoolean(RLPItem memory item) internal pure returns (bool) {
+ require(item.len == 1);
+ uint256 result;
+ uint256 memPtr = item.memPtr;
+ assembly {
+ result := byte(0, mload(memPtr))
+ }
+
+ return result == 0 ? false : true;
+ }
+
+ function toAddress(RLPItem memory item) internal pure returns (address) {
+ // 1 byte for the length prefix
+ require(item.len == 21);
+
+ return address(uint160(toUint(item)));
+ }
+
+ function toUint(RLPItem memory item) internal pure returns (uint256) {
+ require(item.len > 0 && item.len <= 33);
+
+ uint256 offset = _payloadOffset(item.memPtr);
+ uint256 len = item.len - offset;
+
+ uint256 result;
+ uint256 memPtr = item.memPtr + offset;
+ assembly {
+ result := mload(memPtr)
+
+ // shift to the correct location if neccesary
+ if lt(len, 32) {
+ result := div(result, exp(256, sub(32, len)))
+ }
+ }
+
+ return result;
+ }
+
+ // enforces 32 byte length
+ function toUintStrict(RLPItem memory item) internal pure returns (uint256) {
+ // one byte prefix
+ require(item.len == 33);
+
+ uint256 result;
+ uint256 memPtr = item.memPtr + 1;
+ assembly {
+ result := mload(memPtr)
+ }
+
+ return result;
+ }
+
+ function toBytes(RLPItem memory item) internal pure returns (bytes memory) {
+ require(item.len > 0);
+
+ uint256 offset = _payloadOffset(item.memPtr);
+ uint256 len = item.len - offset; // data length
+ bytes memory result = new bytes(len);
+
+ uint256 destPtr;
+ assembly {
+ destPtr := add(0x20, result)
+ }
+
+ copy(item.memPtr + offset, destPtr, len);
+ return result;
+ }
+
+ /*
+ * Private Helpers
+ */
+
+ // @return number of payload items inside an encoded list.
+ function numItems(RLPItem memory item) private pure returns (uint256) {
+ if (item.len == 0) return 0;
+
+ uint256 count = 0;
+ uint256 currPtr = item.memPtr + _payloadOffset(item.memPtr);
+ uint256 endPtr = item.memPtr + item.len;
+ while (currPtr < endPtr) {
+ currPtr = currPtr + _itemLength(currPtr); // skip over an item
+ count++;
+ }
+
+ return count;
+ }
+
+ // @return entire rlp item byte length
+ function _itemLength(uint256 memPtr) private pure returns (uint256) {
+ uint256 itemLen;
+ uint256 byte0;
+ assembly {
+ byte0 := byte(0, mload(memPtr))
+ }
+
+ if (byte0 < STRING_SHORT_START) itemLen = 1;
+ else if (byte0 < STRING_LONG_START) itemLen = byte0 - STRING_SHORT_START + 1;
+ else if (byte0 < LIST_SHORT_START) {
+ assembly {
+ let byteLen := sub(byte0, 0xb7) // # of bytes the actual length is
+ memPtr := add(memPtr, 1) // skip over the first byte
+ /* 32 byte word size */
+ let dataLen := div(mload(memPtr), exp(256, sub(32, byteLen))) // right shifting to get the len
+ itemLen := add(dataLen, add(byteLen, 1))
+ }
+ } else if (byte0 < LIST_LONG_START) {
+ itemLen = byte0 - LIST_SHORT_START + 1;
+ } else {
+ assembly {
+ let byteLen := sub(byte0, 0xf7)
+ memPtr := add(memPtr, 1)
+
+ let dataLen := div(mload(memPtr), exp(256, sub(32, byteLen))) // right shifting to the correct length
+ itemLen := add(dataLen, add(byteLen, 1))
+ }
+ }
+
+ return itemLen;
+ }
+
+ // @return number of bytes until the data
+ function _payloadOffset(uint256 memPtr) private pure returns (uint256) {
+ uint256 byte0;
+ assembly {
+ byte0 := byte(0, mload(memPtr))
+ }
+
+ if (byte0 < STRING_SHORT_START) return 0;
+ else if (byte0 < STRING_LONG_START || (byte0 >= LIST_SHORT_START && byte0 < LIST_LONG_START)) return 1;
+ else if (byte0 < LIST_SHORT_START)
+ // being explicit
+ return byte0 - (STRING_LONG_START - 1) + 1;
+ else return byte0 - (LIST_LONG_START - 1) + 1;
+ }
+
+ /*
+ * @param src Pointer to source
+ * @param dest Pointer to destination
+ * @param len Amount of memory to copy from the source
+ */
+ function copy(uint256 src, uint256 dest, uint256 len) private pure {
+ if (len == 0) return;
+
+ // copy as many word sizes as possible
+ for (; len >= WORD_SIZE; len -= WORD_SIZE) {
+ assembly {
+ mstore(dest, mload(src))
+ }
+
+ src += WORD_SIZE;
+ dest += WORD_SIZE;
+ }
+
+ if (len == 0) return;
+
+ // left over bytes. Mask is used to remove unwanted bytes from the word
+ uint256 mask = 256 ** (WORD_SIZE - len) - 1;
+
+ assembly {
+ let srcpart := and(mload(src), not(mask)) // zero out src
+ let destpart := and(mload(dest), mask) // retrieve the bytes
+ mstore(dest, or(destpart, srcpart))
+ }
+ }
+}
\ No newline at end of file
diff --git a/cmd/devnet/contracts/lib/safemath.sol b/cmd/devnet/contracts/lib/safemath.sol
new file mode 100644
index 00000000000..0a83a12b8ba
--- /dev/null
+++ b/cmd/devnet/contracts/lib/safemath.sol
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: MIT
+// https://github.com/ConsenSysMesh/openzeppelin-solidity/blob/master/contracts/math/SafeMath.sol
+pragma solidity ^0.8.0;
+
+
+/**
+ * @title SafeMath
+ * @dev Math operations with safety checks that throw on error
+ */
+library SafeMath {
+
+ /**
+ * @dev Multiplies two numbers, throws on overflow.
+ */
+ function mul(uint256 a, uint256 b) internal pure returns (uint256 c) {
+ if (a == 0) {
+ return 0;
+ }
+ c = a * b;
+ assert(c / a == b);
+ return c;
+ }
+
+ /**
+ * @dev Integer division of two numbers, truncating the quotient.
+ */
+ function div(uint256 a, uint256 b) internal pure returns (uint256) {
+ // assert(b > 0); // Solidity automatically throws when dividing by 0
+ // uint256 c = a / b;
+ // assert(a == b * c + a % b); // There is no case in which this doesn't hold
+ return a / b;
+ }
+
+ /**
+ * @dev Subtracts two numbers, throws on overflow (i.e. if subtrahend is greater than minuend).
+ */
+ function sub(uint256 a, uint256 b) internal pure returns (uint256) {
+ assert(b <= a);
+ return a - b;
+ }
+
+ /**
+ * @dev Adds two numbers, throws on overflow.
+ */
+ function add(uint256 a, uint256 b) internal pure returns (uint256 c) {
+ c = a + b;
+ assert(c >= a);
+ return c;
+ }
+}
diff --git a/cmd/devnet/contracts/rootreceiver.sol b/cmd/devnet/contracts/rootreceiver.sol
new file mode 100644
index 00000000000..855b042af1b
--- /dev/null
+++ b/cmd/devnet/contracts/rootreceiver.sol
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: MIT
+pragma solidity ^0.8.0;
+
+import {RLPReader} from "lib/rlpreader.sol";
+import {MerklePatriciaProof} from "lib/merklepatriciaproof.sol";
+import {Merkle} from "lib/Merkle.sol";
+import "lib/exitpayloadreader.sol";
+
+contract ICheckpointManager {
+ struct HeaderBlock {
+ bytes32 root;
+ uint256 start;
+ uint256 end;
+ uint256 createdAt;
+ address proposer;
+ }
+
+ /**
+ * @notice mapping of checkpoint header numbers to block details
+ * @dev These checkpoints are submited by plasma contracts
+ */
+ mapping(uint256 => HeaderBlock) public headerBlocks;
+}
+
+contract RootReceiver {
+ using RLPReader for RLPReader.RLPItem;
+ using Merkle for bytes32;
+ using ExitPayloadReader for bytes;
+ using ExitPayloadReader for ExitPayloadReader.ExitPayload;
+ using ExitPayloadReader for ExitPayloadReader.Log;
+ using ExitPayloadReader for ExitPayloadReader.LogTopics;
+ using ExitPayloadReader for ExitPayloadReader.Receipt;
+
+ // keccak256(MessageSent(bytes))
+ bytes32 public constant SEND_MESSAGE_EVENT_SIG = 0x8c5261668696ce22758910d05bab8f186d6eb247ceac2af2e82c7dc17669b036;
+
+ // root chain manager
+ ICheckpointManager public checkpointManager;
+
+ // storage to avoid duplicate exits
+ mapping(bytes32 => bool) public processedExits;
+ mapping(address => uint) public senders;
+
+ event received(address _source, uint256 _amount);
+
+ constructor(address _checkpointManager) {
+ checkpointManager = ICheckpointManager(_checkpointManager);
+ }
+
+ function _validateAndExtractMessage(bytes memory inputData) internal returns (address, bytes memory) {
+ ExitPayloadReader.ExitPayload memory payload = inputData.toExitPayload();
+
+ bytes memory branchMaskBytes = payload.getBranchMaskAsBytes();
+ uint256 blockNumber = payload.getBlockNumber();
+ // checking if exit has already been processed
+ // unique exit is identified using hash of (blockNumber, branchMask, receiptLogIndex)
+ bytes32 exitHash = keccak256(
+ abi.encodePacked(
+ blockNumber,
+ // first 2 nibbles are dropped while generating nibble array
+ // this allows branch masks that are valid but bypass exitHash check (changing first 2 nibbles only)
+ // so converting to nibble array and then hashing it
+ MerklePatriciaProof._getNibbleArray(branchMaskBytes),
+ payload.getReceiptLogIndex()
+ )
+ );
+ require(processedExits[exitHash] == false, "FxRootTunnel: EXIT_ALREADY_PROCESSED");
+ processedExits[exitHash] = true;
+
+ ExitPayloadReader.Receipt memory receipt = payload.getReceipt();
+ ExitPayloadReader.Log memory log = receipt.getLog();
+
+ // check child tunnel
+ //require(fxChildTunnel == log.getEmitter(), "FxRootTunnel: INVALID_FX_CHILD_TUNNEL");
+
+ bytes32 receiptRoot = payload.getReceiptRoot();
+ // verify receipt inclusion
+ require(
+ MerklePatriciaProof.verify(receipt.toBytes(), branchMaskBytes, payload.getReceiptProof(), receiptRoot),
+ "RootTunnel: INVALID_RECEIPT_PROOF"
+ );
+
+ // verify checkpoint inclusion
+ _checkBlockMembershipInCheckpoint(
+ blockNumber,
+ payload.getBlockTime(),
+ payload.getTxRoot(),
+ receiptRoot,
+ payload.getHeaderNumber(),
+ payload.getBlockProof()
+ );
+
+ ExitPayloadReader.LogTopics memory topics = log.getTopics();
+
+ require(
+ bytes32(topics.getField(0).toUint()) == SEND_MESSAGE_EVENT_SIG, // topic0 is event sig
+ "FxRootTunnel: INVALID_SIGNATURE"
+ );
+
+ // received message data
+ bytes memory message = abi.decode(log.getData(), (bytes)); // event decodes params again, so decoding bytes to get message
+ return (log.getEmitter(), message);
+ }
+
+ function _checkBlockMembershipInCheckpoint(
+ uint256 blockNumber,
+ uint256 blockTime,
+ bytes32 txRoot,
+ bytes32 receiptRoot,
+ uint256 headerNumber,
+ bytes memory blockProof
+ ) private view {
+ (bytes32 headerRoot, uint256 startBlock, , , ) = checkpointManager.headerBlocks(headerNumber);
+
+ require(
+ keccak256(abi.encodePacked(blockNumber, blockTime, txRoot, receiptRoot)).checkMembership(
+ blockNumber - startBlock,
+ headerRoot,
+ blockProof
+ ),
+ "FxRootTunnel: INVALID_HEADER"
+ );
+ }
+
+ /**
+ * @notice receive message from L2 to L1, validated by proof
+ * @dev This function verifies if the transaction actually happened on child chain
+ *
+ * @param inputData RLP encoded data of the reference tx containing following list of fields
+ * 0 - headerNumber - Checkpoint header block number containing the reference tx
+ * 1 - blockProof - Proof that the block header (in the child chain) is a leaf in the submitted merkle root
+ * 2 - blockNumber - Block number containing the reference tx on child chain
+ * 3 - blockTime - Reference tx block time
+ * 4 - txRoot - Transactions root of block
+ * 5 - receiptRoot - Receipts root of block
+ * 6 - receipt - Receipt of the reference transaction
+ * 7 - receiptProof - Merkle proof of the reference receipt
+ * 8 - branchMask - 32 bits denoting the path of receipt in merkle tree
+ * 9 - receiptLogIndex - Log Index to read from the receipt
+ */
+ function receiveMessage(bytes memory inputData) public virtual {
+ (address sender, bytes memory message) = _validateAndExtractMessage(inputData);
+ _processMessageFromChild(sender, message);
+ }
+
+ function _processMessageFromChild(address /*sender*/, bytes memory data) internal {
+ (address receiver, address from, uint amount) = abi.decode(data, (address, address, uint));
+ require(receiver == address(this), "Invalid receiver");
+ uint total = senders[from];
+ senders[from] = total + amount;
+
+ emit received(from, amount);
+ }
+}
\ No newline at end of file
diff --git a/cmd/devnet/contracts/steps/l1l2transfers.go b/cmd/devnet/contracts/steps/l1l2transfers.go
index 3cf6d41b771..74cb162a02f 100644
--- a/cmd/devnet/contracts/steps/l1l2transfers.go
+++ b/cmd/devnet/contracts/steps/l1l2transfers.go
@@ -8,6 +8,7 @@ import (
"math"
"math/big"
+ "github.com/ledgerwatch/erigon-lib/chain/networkname"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/accounts/abi"
"github.com/ledgerwatch/erigon/accounts/abi/bind"
@@ -18,7 +19,7 @@ import (
"github.com/ledgerwatch/erigon/cmd/devnet/requests"
"github.com/ledgerwatch/erigon/cmd/devnet/scenarios"
"github.com/ledgerwatch/erigon/cmd/devnet/services"
- "github.com/ledgerwatch/erigon/params/networkname"
+ "github.com/ledgerwatch/erigon/rpc"
"github.com/ledgerwatch/erigon/turbo/adapter/ethapi"
)
@@ -27,8 +28,8 @@ func init() {
scenarios.StepHandler(DeployChildChainReceiver),
scenarios.StepHandler(DeployRootChainSender),
scenarios.StepHandler(GenerateSyncEvents),
- scenarios.StepHandler(ProcessTransfers),
- scenarios.StepHandler(BatchProcessTransfers),
+ scenarios.StepHandler(ProcessRootTransfers),
+ scenarios.StepHandler(BatchProcessRootTransfers),
)
}
@@ -44,9 +45,6 @@ func GenerateSyncEvents(ctx context.Context, senderName string, numberOfTransfer
heimdall := services.Heimdall(ctx)
- waiter, cancel := blocks.BlockWaiter(ctx, contracts.DeploymentChecker)
- defer cancel()
-
stateSender := heimdall.StateSenderContract()
receiver, _ := scenarios.Param[*contracts.ChildReceiver](ctx, "childReceiver")
@@ -69,56 +67,67 @@ func GenerateSyncEvents(ctx context.Context, senderName string, numberOfTransfer
{Name: "amount", Type: Uint256},
}
- //for i := 0; i < numberOfTransfers; i++ {
- sendData, err := args.Pack(sender.Address, big.NewInt(int64(minTransfer)))
+ for i := 0; i < numberOfTransfers; i++ {
+ err := func() error {
+ sendData, err := args.Pack(sender.Address, big.NewInt(int64(minTransfer)))
- if err != nil {
- return err
- }
+ if err != nil {
+ return err
+ }
- transaction, err := stateSender.SyncState(auth, receiverAddress, sendData)
+ waiter, cancel := blocks.BlockWaiter(ctx, contracts.DeploymentChecker)
+ defer cancel()
- if err != nil {
- return err
- }
+ transaction, err := stateSender.SyncState(auth, receiverAddress, sendData)
- block, err := waiter.Await(transaction.Hash())
+ if err != nil {
+ return err
+ }
- if err != nil {
- return fmt.Errorf("Failed to wait for sync block: %w", err)
- }
+ block, err := waiter.Await(transaction.Hash())
- blockNum := block.BlockNumber.Uint64()
+ if err != nil {
+ return fmt.Errorf("Failed to wait for sync block: %w", err)
+ }
- logs, err := stateSender.FilterStateSynced(&bind.FilterOpts{
- Start: blockNum,
- End: &blockNum,
- }, nil, nil)
+ blockNum := block.Number.Uint64()
- if err != nil {
- return fmt.Errorf("Failed to get post sync logs: %w", err)
- }
+ logs, err := stateSender.FilterStateSynced(&bind.FilterOpts{
+ Start: blockNum,
+ End: &blockNum,
+ }, nil, nil)
- sendConfirmed := false
+ if err != nil {
+ return fmt.Errorf("Failed to get post sync logs: %w", err)
+ }
- for logs.Next() {
- if logs.Event.ContractAddress != receiverAddress {
- return fmt.Errorf("Receiver address mismatched: expected: %s, got: %s", receiverAddress, logs.Event.ContractAddress)
- }
+ sendConfirmed := false
- if !bytes.Equal(logs.Event.Data, sendData) {
- return fmt.Errorf("Send data mismatched: expected: %s, got: %s", sendData, logs.Event.Data)
- }
+ for logs.Next() {
+ if logs.Event.ContractAddress != receiverAddress {
+ return fmt.Errorf("Receiver address mismatched: expected: %s, got: %s", receiverAddress, logs.Event.ContractAddress)
+ }
- sendConfirmed = true
- }
+ if !bytes.Equal(logs.Event.Data, sendData) {
+ return fmt.Errorf("Send data mismatched: expected: %s, got: %s", sendData, logs.Event.Data)
+ }
- if !sendConfirmed {
- return fmt.Errorf("No post sync log received")
- }
+ sendConfirmed = true
+ }
+
+ if !sendConfirmed {
+ return fmt.Errorf("No post sync log received")
+ }
- // auth.Nonce = (&big.Int{}).Add(auth.Nonce, big.NewInt(1))
- //}
+ auth.Nonce = (&big.Int{}).Add(auth.Nonce, big.NewInt(1))
+
+ return nil
+ }()
+
+ if err != nil {
+ return err
+ }
+ }
receivedCount := 0
@@ -134,7 +143,7 @@ func GenerateSyncEvents(ctx context.Context, senderName string, numberOfTransfer
}
receivedCount++
- if receivedCount == 1 /*numberOfTransfers*/ {
+ if receivedCount == numberOfTransfers {
break
}
}
@@ -171,7 +180,7 @@ func DeployRootChainSender(ctx context.Context, deployerName string) (context.Co
return nil, err
}
- devnet.Logger(ctx).Info("RootSender deployed", "chain", networkname.BorDevnetChainName, "block", block.BlockNumber, "addr", address)
+ devnet.Logger(ctx).Info("RootSender deployed", "chain", networkname.DevChainName, "block", block.Number, "addr", address)
return scenarios.WithParam(ctx, "rootSenderAddress", address).
WithParam("rootSender", contract), nil
@@ -196,13 +205,13 @@ func DeployChildChainReceiver(ctx context.Context, deployerName string) (context
return nil, err
}
- devnet.Logger(ctx).Info("ChildReceiver deployed", "chain", networkname.BorDevnetChainName, "block", block.BlockNumber, "addr", address)
+ devnet.Logger(ctx).Info("ChildReceiver deployed", "chain", networkname.BorDevnetChainName, "block", block.Number, "addr", address)
return scenarios.WithParam(ctx, "childReceiverAddress", address).
WithParam("childReceiver", contract), nil
}
-func ProcessTransfers(ctx context.Context, sourceName string, numberOfTransfers int, minTransfer int, maxTransfer int) error {
+func ProcessRootTransfers(ctx context.Context, sourceName string, numberOfTransfers int, minTransfer int, maxTransfer int) error {
source := accounts.GetAccount(sourceName)
ctx = devnet.WithCurrentNetwork(ctx, networkname.DevChainName)
@@ -260,7 +269,7 @@ func ProcessTransfers(ctx context.Context, sourceName string, numberOfTransfers
}
for _, traceResult := range traceResults {
- callResults, err := node.TraceCall(string(block.BlockNumber), ethapi.CallArgs{
+ callResults, err := node.TraceCall(rpc.AsBlockReference(block.Number), ethapi.CallArgs{
From: &traceResult.Action.From,
To: &traceResult.Action.To,
Data: &traceResult.Action.Input,
@@ -277,7 +286,7 @@ func ProcessTransfers(ctx context.Context, sourceName string, numberOfTransfers
return terr
}
- blockNum := block.BlockNumber.Uint64()
+ blockNum := block.Number.Uint64()
logs, err := stateSender.FilterStateSynced(&bind.FilterOpts{
Start: blockNum,
@@ -352,7 +361,7 @@ func ProcessTransfers(ctx context.Context, sourceName string, numberOfTransfers
return nil
}
-func BatchProcessTransfers(ctx context.Context, sourceName string, batches int, transfersPerBatch, minTransfer int, maxTransfer int) error {
+func BatchProcessRootTransfers(ctx context.Context, sourceName string, batches int, transfersPerBatch, minTransfer int, maxTransfer int) error {
source := accounts.GetAccount(sourceName)
ctx = devnet.WithCurrentNetwork(ctx, networkname.DevChainName)
@@ -416,7 +425,7 @@ func BatchProcessTransfers(ctx context.Context, sourceName string, batches int,
endBlock := uint64(0)
for _, block := range blocks {
- blockNum := block.BlockNumber.Uint64()
+ blockNum := block.Number.Uint64()
if blockNum < startBlock {
startBlock = blockNum
diff --git a/cmd/devnet/contracts/steps/l2l1transfers.go b/cmd/devnet/contracts/steps/l2l1transfers.go
new file mode 100644
index 00000000000..59481c8d022
--- /dev/null
+++ b/cmd/devnet/contracts/steps/l2l1transfers.go
@@ -0,0 +1,297 @@
+package contracts_steps
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math/big"
+
+ "github.com/ledgerwatch/erigon-lib/chain/networkname"
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon/accounts/abi"
+ "github.com/ledgerwatch/erigon/accounts/abi/bind"
+ "github.com/ledgerwatch/erigon/cmd/devnet/accounts"
+ "github.com/ledgerwatch/erigon/cmd/devnet/blocks"
+ "github.com/ledgerwatch/erigon/cmd/devnet/contracts"
+ "github.com/ledgerwatch/erigon/cmd/devnet/devnet"
+ "github.com/ledgerwatch/erigon/cmd/devnet/requests"
+ "github.com/ledgerwatch/erigon/cmd/devnet/scenarios"
+ "github.com/ledgerwatch/erigon/cmd/devnet/services"
+ "github.com/ledgerwatch/erigon/rpc"
+ "github.com/ledgerwatch/erigon/turbo/adapter/ethapi"
+)
+
+func init() {
+ scenarios.MustRegisterStepHandlers(
+ scenarios.StepHandler(DeployChildChainSender),
+ scenarios.StepHandler(DeployRootChainReceiver),
+ scenarios.StepHandler(ProcessChildTransfers),
+ )
+}
+
+func DeployChildChainSender(ctx context.Context, deployerName string) (context.Context, error) {
+ deployer := accounts.GetAccount(deployerName)
+ ctx = devnet.WithCurrentNetwork(ctx, networkname.BorDevnetChainName)
+
+ auth, backend, err := contracts.DeploymentTransactor(ctx, deployer.Address)
+
+ if err != nil {
+ return nil, err
+ }
+
+ receiverAddress, _ := scenarios.Param[libcommon.Address](ctx, "rootReceiverAddress")
+
+ waiter, cancel := blocks.BlockWaiter(ctx, contracts.DeploymentChecker)
+ defer cancel()
+
+ address, transaction, contract, err := contracts.DeployChildSender(auth, backend, receiverAddress)
+
+ if err != nil {
+ return nil, err
+ }
+
+ block, err := waiter.Await(transaction.Hash())
+
+ if err != nil {
+ return nil, err
+ }
+
+ devnet.Logger(ctx).Info("ChildSender deployed", "chain", networkname.BorDevnetChainName, "block", block.Number, "addr", address)
+
+ return scenarios.WithParam(ctx, "childSenderAddress", address).
+ WithParam("childSender", contract), nil
+}
+
+func DeployRootChainReceiver(ctx context.Context, deployerName string) (context.Context, error) {
+ deployer := accounts.GetAccount(deployerName)
+ ctx = devnet.WithCurrentNetwork(ctx, networkname.DevChainName)
+
+ auth, backend, err := contracts.DeploymentTransactor(ctx, deployer.Address)
+
+ if err != nil {
+ return nil, err
+ }
+
+ waiter, cancel := blocks.BlockWaiter(ctx, contracts.DeploymentChecker)
+ defer cancel()
+
+ heimdall := services.Heimdall(ctx)
+
+ address, transaction, contract, err := contracts.DeployChildSender(auth, backend, heimdall.RootChainAddress())
+
+ if err != nil {
+ return nil, err
+ }
+
+ block, err := waiter.Await(transaction.Hash())
+
+ if err != nil {
+ return nil, err
+ }
+
+ devnet.Logger(ctx).Info("RootReceiver deployed", "chain", networkname.BorDevnetChainName, "block", block.Number, "addr", address)
+
+ return scenarios.WithParam(ctx, "rootReceiverAddress", address).
+ WithParam("rootReceiver", contract), nil
+}
+
+func ProcessChildTransfers(ctx context.Context, sourceName string, numberOfTransfers int, minTransfer int, maxTransfer int) error {
+ source := accounts.GetAccount(sourceName)
+ ctx = devnet.WithCurrentNetwork(ctx, networkname.DevChainName)
+
+ auth, err := contracts.TransactOpts(ctx, source.Address)
+
+ if err != nil {
+ return err
+ }
+
+ sender, _ := scenarios.Param[*contracts.ChildSender](ctx, "childSender")
+
+ receiver, _ := scenarios.Param[*contracts.RootReceiver](ctx, "rootReceiver")
+ receiverAddress, _ := scenarios.Param[libcommon.Address](ctx, "rootReceiverAddress")
+
+ receivedChan := make(chan *contracts.RootReceiverReceived)
+ receiverSubscription, err := receiver.WatchReceived(&bind.WatchOpts{}, receivedChan)
+
+ if err != nil {
+ return fmt.Errorf("Receiver subscription failed: %w", err)
+ }
+
+ defer receiverSubscription.Unsubscribe()
+
+ Uint256, _ := abi.NewType("uint256", "", nil)
+ Address, _ := abi.NewType("address", "", nil)
+
+ args := abi.Arguments{
+ {Name: "from", Type: Address},
+ {Name: "amount", Type: Uint256},
+ }
+
+ heimdall := services.Heimdall(ctx)
+ proofGenerator := services.ProofGenerator(ctx)
+
+ var sendTxHashes []libcommon.Hash
+ var lastTxBlockNum *big.Int
+ var receiptTopic libcommon.Hash
+
+ zeroHash := libcommon.Hash{}
+
+ for i := 0; i < numberOfTransfers; i++ {
+ amount := accounts.EtherAmount(float64(minTransfer))
+
+ err = func() error {
+ waiter, cancel := blocks.BlockWaiter(ctx, blocks.CompletionChecker)
+ defer cancel()
+
+ transaction, err := sender.SendToRoot(auth, amount)
+
+ if err != nil {
+ return err
+ }
+
+ block, terr := waiter.Await(transaction.Hash())
+
+ if terr != nil {
+ node := devnet.SelectBlockProducer(ctx)
+
+ traceResults, err := node.TraceTransaction(transaction.Hash())
+
+ if err != nil {
+ return fmt.Errorf("Send transaction failure: transaction trace failed: %w", err)
+ }
+
+ for _, traceResult := range traceResults {
+ callResults, err := node.TraceCall(rpc.AsBlockReference(block.Number), ethapi.CallArgs{
+ From: &traceResult.Action.From,
+ To: &traceResult.Action.To,
+ Data: &traceResult.Action.Input,
+ }, requests.TraceOpts.StateDiff, requests.TraceOpts.Trace, requests.TraceOpts.VmTrace)
+
+ if err != nil {
+ return fmt.Errorf("Send transaction failure: trace call failed: %w", err)
+ }
+
+ results, _ := json.MarshalIndent(callResults, " ", " ")
+ fmt.Println(string(results))
+ }
+
+ return terr
+ }
+
+ sendTxHashes = append(sendTxHashes, transaction.Hash())
+ lastTxBlockNum = block.Number
+
+ blockNum := block.Number.Uint64()
+
+ logs, err := sender.FilterMessageSent(&bind.FilterOpts{
+ Start: blockNum,
+ End: &blockNum,
+ })
+
+ if err != nil {
+ return fmt.Errorf("Failed to get post sync logs: %w", err)
+ }
+
+ for logs.Next() {
+ values, err := args.Unpack(logs.Event.Message)
+
+ if err != nil {
+ return fmt.Errorf("Failed unpack log args: %w", err)
+ }
+
+ recceiverAddressValue, ok := values[0].(libcommon.Address)
+
+ if !ok {
+ return fmt.Errorf("Unexpected arg type: expected: %T, got %T", libcommon.Address{}, values[0])
+ }
+
+ sender, ok := values[1].(libcommon.Address)
+
+ if !ok {
+ return fmt.Errorf("Unexpected arg type: expected: %T, got %T", libcommon.Address{}, values[0])
+ }
+
+ sentAmount, ok := values[1].(*big.Int)
+
+ if !ok {
+ return fmt.Errorf("Unexpected arg type: expected: %T, got %T", &big.Int{}, values[1])
+ }
+
+ if recceiverAddressValue != receiverAddress {
+ return fmt.Errorf("Unexpected sender: expected: %s, got %s", receiverAddress, recceiverAddressValue)
+ }
+
+ if sender != source.Address {
+ return fmt.Errorf("Unexpected sender: expected: %s, got %s", source.Address, sender)
+ }
+
+ if amount.Cmp(sentAmount) != 0 {
+ return fmt.Errorf("Unexpected sent amount: expected: %s, got %s", amount, sentAmount)
+ }
+
+ if receiptTopic == zeroHash {
+ receiptTopic = logs.Event.Raw.Topics[0]
+ }
+ }
+
+ return nil
+ }()
+
+ if err != nil {
+ return err
+ }
+
+ auth.Nonce = (&big.Int{}).Add(auth.Nonce, big.NewInt(1))
+ }
+
+ devnet.Logger(ctx).Info("Waiting for checkpoint")
+
+ err = heimdall.AwaitCheckpoint(ctx, lastTxBlockNum)
+
+ if err != nil {
+ return err
+ }
+
+ for _, hash := range sendTxHashes {
+ payload, err := proofGenerator.GenerateExitPayload(ctx, hash, receiptTopic, 0)
+
+ waiter, cancel := blocks.BlockWaiter(ctx, blocks.CompletionChecker)
+ defer cancel()
+
+ if err != nil {
+ return err
+ }
+
+ transaction, err := receiver.ReceiveMessage(auth, payload)
+
+ if err != nil {
+ return err
+ }
+
+ if _, err := waiter.Await(transaction.Hash()); err != nil {
+ return err
+ }
+
+ }
+
+ receivedCount := 0
+
+ devnet.Logger(ctx).Info("Waiting for receive events")
+
+ for received := range receivedChan {
+ if received.Source != source.Address {
+ return fmt.Errorf("Source address mismatched: expected: %s, got: %s", source.Address, received.Source)
+ }
+
+ if received.Amount.Cmp(accounts.EtherAmount(float64(minTransfer))) != 0 {
+ return fmt.Errorf("Amount mismatched: expected: %s, got: %s", accounts.EtherAmount(float64(minTransfer)), received.Amount)
+ }
+
+ receivedCount++
+ if receivedCount == numberOfTransfers {
+ break
+ }
+ }
+
+ return nil
+}
diff --git a/cmd/devnet/contracts/steps/subscriber.go b/cmd/devnet/contracts/steps/subscriber.go
index 9fd85b50709..bf9299116b4 100644
--- a/cmd/devnet/contracts/steps/subscriber.go
+++ b/cmd/devnet/contracts/steps/subscriber.go
@@ -5,6 +5,8 @@ import (
"fmt"
"math/big"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
+
ethereum "github.com/ledgerwatch/erigon"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
@@ -18,8 +20,8 @@ import (
"github.com/ledgerwatch/erigon/cmd/devnet/requests"
"github.com/ledgerwatch/erigon/cmd/devnet/scenarios"
"github.com/ledgerwatch/erigon/cmd/devnet/transactions"
- "github.com/ledgerwatch/erigon/common/hexutil"
"github.com/ledgerwatch/erigon/core/types"
+ "github.com/ledgerwatch/erigon/rpc"
)
func init() {
@@ -60,7 +62,7 @@ func DeployAndCallLogSubscriber(ctx context.Context, deployer string) (*libcommo
blockNum := txToBlockMap[eventHash]
- block, err := node.GetBlockByNumber(blockNum, true)
+ block, err := node.GetBlockByNumber(ctx, rpc.AsBlockNumber(blockNum), true)
if err != nil {
return nil, err
@@ -126,7 +128,7 @@ func EmitFallbackEvent(node devnet.Node, subContract *contracts.Subscription, op
// initializeTransactOps initializes the transactOpts object for a contract transaction
func initializeTransactOps(node devnet.Node, transactor libcommon.Address) (*bind.TransactOpts, error) {
- count, err := node.GetTransactionCount(transactor, requests.BlockNumbers.Latest)
+ count, err := node.GetTransactionCount(transactor, rpc.LatestBlock)
if err != nil {
return nil, fmt.Errorf("failed to get transaction count for address 0x%x: %v", transactor, err)
diff --git a/cmd/devnet/contracts/testrootchain.sol b/cmd/devnet/contracts/testrootchain.sol
new file mode 100644
index 00000000000..1e11bf8c233
--- /dev/null
+++ b/cmd/devnet/contracts/testrootchain.sol
@@ -0,0 +1,329 @@
+
+// SPDX-License-Identifier: MIT
+pragma solidity ^0.8.0;
+
+import {RLPReader} from "lib/rlpreader.sol";
+import {SafeMath} from "lib/safemath.sol";
+
+interface IRootChain {
+ function slash() external;
+
+ function submitHeaderBlock(bytes calldata data, bytes calldata sigs)
+ external;
+
+ function submitCheckpoint(bytes calldata data, uint[3][] calldata sigs)
+ external;
+
+ function getLastChildBlock() external view returns (uint256);
+
+ function currentHeaderBlock() external view returns (uint256);
+}
+
+contract RootChainHeader {
+ event NewHeaderBlock(
+ address indexed proposer,
+ uint256 indexed headerBlockId,
+ uint256 indexed reward,
+ uint256 start,
+ uint256 end,
+ bytes32 root
+ );
+ // housekeeping event
+ event ResetHeaderBlock(address indexed proposer, uint256 indexed headerBlockId);
+ struct HeaderBlock {
+ bytes32 root;
+ uint256 start;
+ uint256 end;
+ uint256 createdAt;
+ address proposer;
+ }
+}
+
+contract ProxyStorage {
+ address internal proxyTo;
+}
+
+contract ChainIdMixin {
+ bytes constant public networkId = hex"0539";
+ uint256 constant public CHAINID = 1337;
+}
+
+interface IGovernance {
+ function update(address target, bytes calldata data) external;
+}
+
+contract Governable {
+ IGovernance public governance;
+
+ constructor(address _governance) {
+ governance = IGovernance(_governance);
+ }
+
+ modifier onlyGovernance() {
+ _assertGovernance();
+ _;
+ }
+
+ function _assertGovernance() private view {
+ require(
+ msg.sender == address(governance),
+ "Only governance contract is authorized"
+ );
+ }
+}
+
+contract Registry is Governable {
+ // @todo hardcode constants
+ bytes32 private constant WETH_TOKEN = keccak256("wethToken");
+ bytes32 private constant DEPOSIT_MANAGER = keccak256("depositManager");
+ bytes32 private constant STAKE_MANAGER = keccak256("stakeManager");
+ bytes32 private constant VALIDATOR_SHARE = keccak256("validatorShare");
+ bytes32 private constant WITHDRAW_MANAGER = keccak256("withdrawManager");
+ bytes32 private constant CHILD_CHAIN = keccak256("childChain");
+ bytes32 private constant STATE_SENDER = keccak256("stateSender");
+ bytes32 private constant SLASHING_MANAGER = keccak256("slashingManager");
+
+ address public erc20Predicate;
+ address public erc721Predicate;
+
+ mapping(bytes32 => address) public contractMap;
+ mapping(address => address) public rootToChildToken;
+ mapping(address => address) public childToRootToken;
+ mapping(address => bool) public proofValidatorContracts;
+ mapping(address => bool) public isERC721;
+
+ enum Type {Invalid, ERC20, ERC721, Custom}
+ struct Predicate {
+ Type _type;
+ }
+ mapping(address => Predicate) public predicates;
+
+ event TokenMapped(address indexed rootToken, address indexed childToken);
+ event ProofValidatorAdded(address indexed validator, address indexed from);
+ event ProofValidatorRemoved(address indexed validator, address indexed from);
+ event PredicateAdded(address indexed predicate, address indexed from);
+ event PredicateRemoved(address indexed predicate, address indexed from);
+ event ContractMapUpdated(bytes32 indexed key, address indexed previousContract, address indexed newContract);
+
+ constructor(address _governance) Governable(_governance) {}
+
+ function updateContractMap(bytes32 _key, address _address) external onlyGovernance {
+ emit ContractMapUpdated(_key, contractMap[_key], _address);
+ contractMap[_key] = _address;
+ }
+
+ /**
+ * @dev Map root token to child token
+ * @param _rootToken Token address on the root chain
+ * @param _childToken Token address on the child chain
+ * @param _isERC721 Is the token being mapped ERC721
+ */
+ function mapToken(
+ address _rootToken,
+ address _childToken,
+ bool _isERC721
+ ) external onlyGovernance {
+ require(_rootToken != address(0x0) && _childToken != address(0x0), "INVALID_TOKEN_ADDRESS");
+ rootToChildToken[_rootToken] = _childToken;
+ childToRootToken[_childToken] = _rootToken;
+ isERC721[_rootToken] = _isERC721;
+ //IWithdrawManager(contractMap[WITHDRAW_MANAGER]).createExitQueue(_rootToken);
+ emit TokenMapped(_rootToken, _childToken);
+ }
+
+ function addErc20Predicate(address predicate) public onlyGovernance {
+ require(predicate != address(0x0), "Can not add null address as predicate");
+ erc20Predicate = predicate;
+ addPredicate(predicate, Type.ERC20);
+ }
+
+ function addErc721Predicate(address predicate) public onlyGovernance {
+ erc721Predicate = predicate;
+ addPredicate(predicate, Type.ERC721);
+ }
+
+ function addPredicate(address predicate, Type _type) public onlyGovernance {
+ require(predicates[predicate]._type == Type.Invalid, "Predicate already added");
+ predicates[predicate]._type = _type;
+ emit PredicateAdded(predicate, msg.sender);
+ }
+
+ function removePredicate(address predicate) public onlyGovernance {
+ require(predicates[predicate]._type != Type.Invalid, "Predicate does not exist");
+ delete predicates[predicate];
+ emit PredicateRemoved(predicate, msg.sender);
+ }
+
+ function getValidatorShareAddress() public view returns (address) {
+ return contractMap[VALIDATOR_SHARE];
+ }
+
+ function getWethTokenAddress() public view returns (address) {
+ return contractMap[WETH_TOKEN];
+ }
+
+ function getDepositManagerAddress() public view returns (address) {
+ return contractMap[DEPOSIT_MANAGER];
+ }
+
+ function getStakeManagerAddress() public view returns (address) {
+ return contractMap[STAKE_MANAGER];
+ }
+
+ function getSlashingManagerAddress() public view returns (address) {
+ return contractMap[SLASHING_MANAGER];
+ }
+
+ function getWithdrawManagerAddress() public view returns (address) {
+ return contractMap[WITHDRAW_MANAGER];
+ }
+
+ function getChildChainAndStateSender() public view returns (address, address) {
+ return (contractMap[CHILD_CHAIN], contractMap[STATE_SENDER]);
+ }
+
+ function isTokenMapped(address _token) public view returns (bool) {
+ return rootToChildToken[_token] != address(0x0);
+ }
+
+ function isTokenMappedAndIsErc721(address _token) public view returns (bool) {
+ require(isTokenMapped(_token), "TOKEN_NOT_MAPPED");
+ return isERC721[_token];
+ }
+
+ function isTokenMappedAndGetPredicate(address _token) public view returns (address) {
+ if (isTokenMappedAndIsErc721(_token)) {
+ return erc721Predicate;
+ }
+ return erc20Predicate;
+ }
+
+ function isChildTokenErc721(address childToken) public view returns (bool) {
+ address rootToken = childToRootToken[childToken];
+ require(rootToken != address(0x0), "Child token is not mapped");
+ return isERC721[rootToken];
+ }
+}
+
+contract RootChainStorage is ProxyStorage, RootChainHeader, ChainIdMixin {
+ bytes32 public heimdallId;
+ uint8 public constant VOTE_TYPE = 2;
+
+ uint16 internal constant MAX_DEPOSITS = 10000;
+ uint256 public _nextHeaderBlock = MAX_DEPOSITS;
+ uint256 internal _blockDepositId = 1;
+ mapping(uint256 => HeaderBlock) public headerBlocks;
+ Registry internal registry;
+}
+
+contract TestRootChain is RootChainStorage, IRootChain {
+ using SafeMath for uint256;
+ using RLPReader for bytes;
+ using RLPReader for RLPReader.RLPItem;
+
+ modifier onlyDepositManager() {
+ require(msg.sender == registry.getDepositManagerAddress(), "UNAUTHORIZED_DEPOSIT_MANAGER_ONLY");
+ _;
+ }
+
+ function submitHeaderBlock(bytes calldata /*data*/, bytes calldata /*sigs*/) external pure {
+ revert();
+ }
+
+ function submitCheckpoint(bytes calldata data, uint[3][] calldata /*sigs*/) external {
+ (address proposer, uint256 start, uint256 end, bytes32 rootHash, bytes32 accountHash, uint256 borChainID) =
+ abi.decode(data, (address, uint256, uint256, bytes32, bytes32, uint256));
+ require(CHAINID == borChainID, "Invalid bor chain id");
+
+ require(_buildHeaderBlock(proposer, start, end, rootHash), "INCORRECT_HEADER_DATA");
+
+ // check if it is better to keep it in local storage instead
+ /*IStakeManager stakeManager = IStakeManager(registry.getStakeManagerAddress());
+ uint256 _reward = stakeManager.checkSignatures(
+ end.sub(start).add(1),
+ *//**
+ prefix 01 to data
+ 01 represents positive vote on data and 00 is negative vote
+ malicious validator can try to send 2/3 on negative vote so 01 is appended
+ *//*
+ keccak256(abi.encodePacked(bytes(hex"01"), data)),
+ accountHash,
+ proposer,
+ sigs
+ );*/
+
+ //require(_reward != 0, "Invalid checkpoint");
+ emit NewHeaderBlock(proposer, _nextHeaderBlock, 0 /*_reward*/, start, end, rootHash);
+ _nextHeaderBlock = _nextHeaderBlock.add(MAX_DEPOSITS);
+ _blockDepositId = 1;
+ }
+
+ function updateDepositId(uint256 numDeposits) external onlyDepositManager returns (uint256 depositId) {
+ depositId = currentHeaderBlock().add(_blockDepositId);
+ // deposit ids will be (_blockDepositId, _blockDepositId + 1, .... _blockDepositId + numDeposits - 1)
+ _blockDepositId = _blockDepositId.add(numDeposits);
+ require(
+ // Since _blockDepositId is initialized to 1; only (MAX_DEPOSITS - 1) deposits per header block are allowed
+ _blockDepositId <= MAX_DEPOSITS,
+ "TOO_MANY_DEPOSITS"
+ );
+ }
+
+ function getLastChildBlock() external view returns (uint256) {
+ return headerBlocks[currentHeaderBlock()].end;
+ }
+
+ function slash() external {
+ //TODO: future implementation
+ }
+
+ function currentHeaderBlock() public view returns (uint256) {
+ return _nextHeaderBlock.sub(MAX_DEPOSITS);
+ }
+
+ function _buildHeaderBlock(
+ address proposer,
+ uint256 start,
+ uint256 end,
+ bytes32 rootHash
+ ) private returns (bool) {
+ uint256 nextChildBlock;
+ /*
+ The ID of the 1st header block is MAX_DEPOSITS.
+ if _nextHeaderBlock == MAX_DEPOSITS, then the first header block is yet to be submitted, hence nextChildBlock = 0
+ */
+ if (_nextHeaderBlock > MAX_DEPOSITS) {
+ nextChildBlock = headerBlocks[currentHeaderBlock()].end + 1;
+ }
+ if (nextChildBlock != start) {
+ return false;
+ }
+
+ HeaderBlock memory headerBlock = HeaderBlock({
+ root: rootHash,
+ start: nextChildBlock,
+ end: end,
+ createdAt: block.timestamp,
+ proposer: proposer
+ });
+
+ headerBlocks[_nextHeaderBlock] = headerBlock;
+ return true;
+ }
+
+ // Housekeeping function. @todo remove later
+ function setNextHeaderBlock(uint256 _value) public /*onlyOwner*/ {
+ require(_value % MAX_DEPOSITS == 0, "Invalid value");
+ for (uint256 i = _value; i < _nextHeaderBlock; i += MAX_DEPOSITS) {
+ delete headerBlocks[i];
+ }
+ _nextHeaderBlock = _value;
+ _blockDepositId = 1;
+ emit ResetHeaderBlock(msg.sender, _nextHeaderBlock);
+ }
+
+ // Housekeeping function. @todo remove later
+ function setHeimdallId(string memory _heimdallId) public /*onlyOwner*/ {
+ heimdallId = keccak256(abi.encodePacked(_heimdallId));
+ }
+}
diff --git a/cmd/devnet/contracts/util.go b/cmd/devnet/contracts/util.go
index 5eb72a11b48..c9f0f173a77 100644
--- a/cmd/devnet/contracts/util.go
+++ b/cmd/devnet/contracts/util.go
@@ -10,6 +10,8 @@ import (
"github.com/ledgerwatch/erigon/cmd/devnet/devnet"
"github.com/ledgerwatch/erigon/cmd/devnet/requests"
"github.com/ledgerwatch/erigon/core/types"
+ "github.com/ledgerwatch/erigon/rpc"
+ "github.com/ledgerwatch/erigon/turbo/jsonrpc"
)
func TransactOpts(ctx context.Context, sender libcommon.Address) (*bind.TransactOpts, error) {
@@ -21,7 +23,7 @@ func TransactOpts(ctx context.Context, sender libcommon.Address) (*bind.Transact
return nil, err
}
- count, err := node.GetTransactionCount(sender, requests.BlockNumbers.Pending)
+ count, err := node.GetTransactionCount(sender, rpc.PendingBlock)
if err != nil {
return nil, err
@@ -57,7 +59,7 @@ func Deploy[C any](ctx context.Context, deployer libcommon.Address, deploy func(
func DeployWithOps[C any](ctx context.Context, auth *bind.TransactOpts, deploy func(auth *bind.TransactOpts, backend bind.ContractBackend) (libcommon.Address, types.Transaction, *C, error)) (libcommon.Address, types.Transaction, *C, error) {
node := devnet.SelectNode(ctx)
- count, err := node.GetTransactionCount(auth.From, requests.BlockNumbers.Pending)
+ count, err := node.GetTransactionCount(auth.From, rpc.PendingBlock)
if err != nil {
return libcommon.Address{}, nil, nil, err
@@ -72,7 +74,7 @@ func DeployWithOps[C any](ctx context.Context, auth *bind.TransactOpts, deploy f
}
var DeploymentChecker = blocks.BlockHandlerFunc(
- func(ctx context.Context, node devnet.Node, block *requests.BlockResult, transaction *requests.Transaction) error {
+ func(ctx context.Context, node devnet.Node, block *requests.Block, transaction *jsonrpc.RPCTransaction) error {
if err := blocks.CompletionChecker(ctx, node, block, transaction); err != nil {
return nil
}
diff --git a/cmd/devnet/devnet/context.go b/cmd/devnet/devnet/context.go
index 549bae33f66..54d9faccbc7 100644
--- a/cmd/devnet/devnet/context.go
+++ b/cmd/devnet/devnet/context.go
@@ -1,11 +1,10 @@
package devnet
import (
- context "context"
+ "context"
"math/big"
"github.com/ledgerwatch/log/v3"
- "github.com/urfave/cli/v2"
)
type ctxKey int
@@ -14,7 +13,6 @@ const (
ckLogger ctxKey = iota
ckNetwork
ckNode
- ckCliContext
ckDevnet
)
@@ -71,14 +69,30 @@ type cnet struct {
network *Network
}
-func WithDevnet(ctx context.Context, cliCtx *cli.Context, devnet Devnet, logger log.Logger) Context {
- return WithCliContext(
- context.WithValue(
- context.WithValue(ctx, ckDevnet, devnet),
- ckLogger, logger), cliCtx)
+func WithDevnet(ctx context.Context, devnet Devnet, logger log.Logger) Context {
+ ctx = context.WithValue(ctx, ckDevnet, devnet)
+ ctx = context.WithValue(ctx, ckLogger, logger)
+ return devnetContext{ctx}
}
func WithCurrentNetwork(ctx context.Context, selector interface{}) Context {
+ if current := CurrentNetwork(ctx); current != nil {
+ if devnet, ok := ctx.Value(ckDevnet).(Devnet); ok {
+ selected := devnet.SelectNetwork(ctx, selector)
+
+ if selected == current {
+ if ctx, ok := ctx.(devnetContext); ok {
+ return ctx
+ }
+ return devnetContext{ctx}
+ }
+ }
+ }
+
+ if current := CurrentNode(ctx); current != nil {
+ ctx = context.WithValue(ctx, ckNode, nil)
+ }
+
return devnetContext{context.WithValue(ctx, ckNetwork, &cnet{selector: selector})}
}
@@ -90,14 +104,6 @@ func WithCurrentNode(ctx context.Context, selector interface{}) Context {
return devnetContext{context.WithValue(ctx, ckNode, &cnode{selector: selector})}
}
-func WithCliContext(ctx context.Context, cliCtx *cli.Context) Context {
- return devnetContext{context.WithValue(ctx, ckCliContext, cliCtx)}
-}
-
-func CliContext(ctx context.Context) *cli.Context {
- return ctx.Value(ckCliContext).(*cli.Context)
-}
-
func CurrentChainID(ctx context.Context) *big.Int {
if network := CurrentNetwork(ctx); network != nil {
return network.ChainID()
@@ -134,7 +140,7 @@ func CurrentNetwork(ctx context.Context) *Network {
}
if current := CurrentNode(ctx); current != nil {
- if n, ok := current.(*node); ok {
+ if n, ok := current.(*devnetNode); ok {
return n.network
}
}
diff --git a/cmd/devnet/devnet/devnet.go b/cmd/devnet/devnet/devnet.go
index 310db056802..adb8030945e 100644
--- a/cmd/devnet/devnet/devnet.go
+++ b/cmd/devnet/devnet/devnet.go
@@ -1,13 +1,12 @@
package devnet
import (
- context "context"
+ "context"
"math/big"
"regexp"
"sync"
"github.com/ledgerwatch/log/v3"
- "github.com/urfave/cli/v2"
)
type Devnet []*Network
@@ -22,12 +21,12 @@ func (f NetworkSelectorFunc) Test(ctx context.Context, network *Network) bool {
return f(ctx, network)
}
-func (d Devnet) Start(ctx *cli.Context, logger log.Logger) (Context, error) {
+func (d Devnet) Start(logger log.Logger) (Context, error) {
var wg sync.WaitGroup
errors := make(chan error, len(d))
- runCtx := WithDevnet(context.Background(), ctx, d, logger)
+ runCtx := WithDevnet(context.Background(), d, logger)
for _, network := range d {
wg.Add(1)
diff --git a/cmd/devnet/devnet/network.go b/cmd/devnet/devnet/network.go
index 95b85ee5799..29eee727cdf 100644
--- a/cmd/devnet/devnet/network.go
+++ b/cmd/devnet/devnet/network.go
@@ -1,21 +1,19 @@
package devnet
import (
- context "context"
- "errors"
+ "context"
"fmt"
"math/big"
- "net"
- "net/url"
"os"
"reflect"
"strings"
"sync"
"time"
+ "github.com/ledgerwatch/erigon/cmd/utils"
+
"github.com/ledgerwatch/erigon-lib/common/dbg"
- "github.com/ledgerwatch/erigon/cmd/devnet/args"
- "github.com/ledgerwatch/erigon/cmd/devnet/devnetutils"
+ devnet_args "github.com/ledgerwatch/erigon/cmd/devnet/args"
"github.com/ledgerwatch/erigon/cmd/devnet/requests"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/params"
@@ -40,9 +38,13 @@ type Network struct {
BorStateSyncDelay time.Duration
BorPeriod time.Duration
BorMinBlockSize int
+ BorWithMilestones *bool
wg sync.WaitGroup
peers []string
namedNodes map[string]Node
+
+ // max number of blocks to look for a transaction in
+ MaxNumberOfEmptyBlockChecks int
}
func (nw *Network) ChainID() *big.Int {
@@ -55,11 +57,6 @@ func (nw *Network) ChainID() *big.Int {
// Start starts the process for multiple erigon nodes running on the dev chain
func (nw *Network) Start(ctx context.Context) error {
-
- type configurable interface {
- Configure(baseNode args.Node, nodeNumber int) (int, interface{}, error)
- }
-
for _, service := range nw.Services {
if err := service.Start(ctx); err != nil {
nw.Stop()
@@ -67,7 +64,7 @@ func (nw *Network) Start(ctx context.Context) error {
}
}
- baseNode := args.Node{
+ baseNode := devnet_args.NodeArgs{
DataDir: nw.DataDir,
Chain: nw.Chain,
Port: nw.BasePort,
@@ -76,35 +73,33 @@ func (nw *Network) Start(ctx context.Context) error {
Snapshots: nw.Snapshots,
}
- cliCtx := CliContext(ctx)
+ if nw.BorWithMilestones != nil {
+ baseNode.WithHeimdallMilestones = *nw.BorWithMilestones
+ } else {
+ baseNode.WithHeimdallMilestones = utils.WithHeimdallMilestones.Value
+ }
- metricsEnabled := cliCtx.Bool("metrics")
- metricsNode := cliCtx.Int("metrics.node")
nw.namedNodes = map[string]Node{}
- for i, node := range nw.Nodes {
- if configurable, ok := node.(configurable); ok {
+ for i, nodeArgs := range nw.Nodes {
+ {
+ baseNode.StaticPeers = strings.Join(nw.peers, ",")
- base := baseNode
-
- if metricsEnabled && metricsNode == i {
- base.Metrics = true
- base.MetricsPort = cliCtx.Int("metrics.port")
- }
-
- nodePort, args, err := configurable.Configure(base, i)
-
- if err == nil {
- node, err = nw.createNode(fmt.Sprintf("%s:%d", nw.BaseRPCHost, nodePort), args)
+ err := nodeArgs.Configure(baseNode, i)
+ if err != nil {
+ nw.Stop()
+ return err
}
+ node, err := nw.createNode(nodeArgs)
if err != nil {
nw.Stop()
return err
}
nw.Nodes[i] = node
- nw.namedNodes[node.Name()] = node
+ nw.namedNodes[node.GetName()] = node
+ nw.peers = append(nw.peers, nodeArgs.GetEnodeURL())
for _, service := range nw.Services {
service.NodeCreated(ctx, node)
@@ -114,7 +109,6 @@ func (nw *Network) Start(ctx context.Context) error {
for _, node := range nw.Nodes {
err := nw.startNode(node)
-
if err != nil {
nw.Stop()
return err
@@ -123,26 +117,6 @@ func (nw *Network) Start(ctx context.Context) error {
for _, service := range nw.Services {
service.NodeStarted(ctx, node)
}
-
- // get the enode of the node
- // - note this has the side effect of waiting for the node to start
- enode, err := getEnode(node)
-
- if err != nil {
- if errors.Is(err, devnetutils.ErrInvalidEnodeString) {
- continue
- }
-
- nw.Stop()
- return err
- }
-
- nw.peers = append(nw.peers, enode)
- baseNode.StaticPeers = strings.Join(nw.peers, ",")
-
- // TODO do we need to call AddPeer to the nodes to make them aware of this one
- // the current model only works for an appending node network where the peers gossip
- // connections - not sure if this is the case ?
}
return nil
@@ -150,11 +124,13 @@ func (nw *Network) Start(ctx context.Context) error {
var blockProducerFunds = (&big.Int{}).Mul(big.NewInt(1000), big.NewInt(params.Ether))
-func (nw *Network) createNode(nodeAddr string, cfg interface{}) (Node, error) {
- n := &node{
+func (nw *Network) createNode(nodeArgs Node) (Node, error) {
+ nodeAddr := fmt.Sprintf("%s:%d", nw.BaseRPCHost, nodeArgs.GetHttpPort())
+
+ n := &devnetNode{
sync.Mutex{},
requests.NewRequestGenerator(nodeAddr, nw.Logger),
- cfg,
+ nodeArgs,
&nw.wg,
nw,
make(chan error),
@@ -199,16 +175,15 @@ func copyFlags(flags []cli.Flag) []cli.Flag {
func (nw *Network) startNode(n Node) error {
nw.wg.Add(1)
- node := n.(*node)
-
- args, err := args.AsArgs(node.args)
+ node := n.(*devnetNode)
+ args, err := devnet_args.AsArgs(node.nodeArgs)
if err != nil {
return err
}
go func() {
- nw.Logger.Info("Running node", "name", node.Name(), "args", args)
+ nw.Logger.Info("Running node", "name", node.GetName(), "args", args)
// catch any errors and avoid panics if an error occurs
defer func() {
@@ -217,17 +192,17 @@ func (nw *Network) startNode(n Node) error {
return
}
- nw.Logger.Error("catch panic", "node", node.Name(), "err", panicResult, "stack", dbg.Stack())
+ nw.Logger.Error("catch panic", "node", node.GetName(), "err", panicResult, "stack", dbg.Stack())
nw.Stop()
os.Exit(1)
}()
// cli flags are not thread safe and assume only one copy of a flag
// variable is needed per process - which does not work here
- app := erigonapp.MakeApp(node.Name(), node.run, copyFlags(erigoncli.DefaultFlags))
+ app := erigonapp.MakeApp(node.GetName(), node.run, copyFlags(erigoncli.DefaultFlags))
if err := app.Run(args); err != nil {
- nw.Logger.Warn("App run returned error", "node", node.Name(), "err", err)
+ nw.Logger.Warn("App run returned error", "node", node.GetName(), "err", err)
}
}()
@@ -238,50 +213,6 @@ func (nw *Network) startNode(n Node) error {
return nil
}
-// getEnode returns the enode of the netowrk node
-func getEnode(n Node) (string, error) {
- reqCount := 0
-
- for {
- nodeInfo, err := n.AdminNodeInfo()
-
- if err != nil {
- if r, ok := n.(*node); ok {
- if !r.running() {
- return "", err
- }
- }
-
- if reqCount < 10 {
- var urlErr *url.Error
- if errors.As(err, &urlErr) {
- var opErr *net.OpError
- if errors.As(urlErr.Err, &opErr) {
- var callErr *os.SyscallError
- if errors.As(opErr.Err, &callErr) {
- if strings.HasPrefix(callErr.Syscall, "connect") {
- reqCount++
- time.Sleep(time.Duration(devnetutils.RandomInt(5)) * time.Second)
- continue
- }
- }
- }
- }
- }
-
- return "", err
- }
-
- enode, err := devnetutils.UniqueIDFromEnode(nodeInfo.Enode)
-
- if err != nil {
- return "", err
- }
-
- return enode, nil
- }
-}
-
func (nw *Network) Stop() {
type stoppable interface {
Stop()
diff --git a/cmd/devnet/devnet/node.go b/cmd/devnet/devnet/node.go
index 5f4c62c4de6..4c372721a03 100644
--- a/cmd/devnet/devnet/node.go
+++ b/cmd/devnet/devnet/node.go
@@ -1,15 +1,17 @@
package devnet
import (
- context "context"
+ "context"
"fmt"
"math/big"
+ "net/http"
"sync"
"github.com/c2h5oh/datasize"
"github.com/ledgerwatch/erigon/cmd/devnet/accounts"
"github.com/ledgerwatch/erigon/cmd/devnet/args"
"github.com/ledgerwatch/erigon/cmd/devnet/requests"
+ "github.com/ledgerwatch/erigon/diagnostics"
"github.com/ledgerwatch/erigon/eth/ethconfig"
"github.com/ledgerwatch/erigon/node/nodecfg"
"github.com/ledgerwatch/erigon/params"
@@ -21,10 +23,14 @@ import (
type Node interface {
requests.RequestGenerator
- Name() string
+ GetName() string
ChainID() *big.Int
+ GetHttpPort() int
+ GetEnodeURL() string
Account() *accounts.Account
IsBlockProducer() bool
+ Configure(baseNode args.NodeArgs, nodeNumber int) error
+ EnableMetrics(port int)
}
type NodeSelector interface {
@@ -38,7 +44,7 @@ func (f NodeSelectorFunc) Test(ctx context.Context, node Node) bool {
}
func HTTPHost(n Node) string {
- if n, ok := n.(*node); ok {
+ if n, ok := n.(*devnetNode); ok {
host := n.nodeCfg.Http.HttpListenAddress
if host == "" {
@@ -51,10 +57,10 @@ func HTTPHost(n Node) string {
return ""
}
-type node struct {
+type devnetNode struct {
sync.Mutex
requests.RequestGenerator
- args interface{}
+ nodeArgs Node
wg *sync.WaitGroup
network *Network
startErr chan error
@@ -63,7 +69,7 @@ type node struct {
ethNode *enode.ErigonNode
}
-func (n *node) Stop() {
+func (n *devnetNode) Stop() {
var toClose *enode.ErigonNode
n.Lock()
@@ -80,13 +86,13 @@ func (n *node) Stop() {
n.done()
}
-func (n *node) running() bool {
+func (n *devnetNode) running() bool {
n.Lock()
defer n.Unlock()
return n.startErr == nil && n.ethNode != nil
}
-func (n *node) done() {
+func (n *devnetNode) done() {
n.Lock()
defer n.Unlock()
if n.wg != nil {
@@ -96,39 +102,43 @@ func (n *node) done() {
}
}
-func (n *node) IsBlockProducer() bool {
- _, isBlockProducer := n.args.(args.BlockProducer)
- return isBlockProducer
+func (n *devnetNode) Configure(args.NodeArgs, int) error {
+ return nil
}
-func (n *node) Account() *accounts.Account {
- if miner, ok := n.args.(args.BlockProducer); ok {
- return miner.Account()
- }
+func (n *devnetNode) IsBlockProducer() bool {
+ return n.nodeArgs.IsBlockProducer()
+}
- return nil
+func (n *devnetNode) Account() *accounts.Account {
+ return n.nodeArgs.Account()
}
-func (n *node) Name() string {
- if named, ok := n.args.(interface{ Name() string }); ok {
- return named.Name()
- }
+func (n *devnetNode) GetName() string {
+ return n.nodeArgs.GetName()
+}
- return ""
+func (n *devnetNode) ChainID() *big.Int {
+ return n.nodeArgs.ChainID()
}
-func (n *node) ChainID() *big.Int {
- if n.ethCfg != nil {
- return n.ethCfg.Genesis.Config.ChainID
- }
+func (n *devnetNode) GetHttpPort() int {
+ return n.nodeArgs.GetHttpPort()
+}
- return nil
+func (n *devnetNode) GetEnodeURL() string {
+ return n.nodeArgs.GetEnodeURL()
+}
+
+func (n *devnetNode) EnableMetrics(int) {
+ panic("not implemented")
}
// run configures, creates and serves an erigon node
-func (n *node) run(ctx *cli.Context) error {
+func (n *devnetNode) run(ctx *cli.Context) error {
var logger log.Logger
var err error
+ var metricsMux *http.ServeMux
defer n.done()
defer func() {
@@ -141,7 +151,7 @@ func (n *node) run(ctx *cli.Context) error {
n.Unlock()
}()
- if logger, err = debug.Setup(ctx, false /* rootLogger */); err != nil {
+ if logger, metricsMux, err = debug.Setup(ctx, false /* rootLogger */); err != nil {
return err
}
@@ -164,7 +174,11 @@ func (n *node) run(ctx *cli.Context) error {
n.ethCfg.Bor.StateSyncConfirmationDelay = map[string]uint64{"0": uint64(n.network.BorStateSyncDelay.Seconds())}
}
- n.ethNode, err = enode.New(n.nodeCfg, n.ethCfg, logger)
+ n.ethNode, err = enode.New(ctx.Context, n.nodeCfg, n.ethCfg, logger)
+
+ if metricsMux != nil {
+ diagnostics.Setup(ctx, metricsMux, n.ethNode)
+ }
n.Lock()
if n.startErr != nil {
diff --git a/cmd/devnet/devnet/service.go b/cmd/devnet/devnet/service.go
index 5ec41a16fa0..520ce3fe740 100644
--- a/cmd/devnet/devnet/service.go
+++ b/cmd/devnet/devnet/service.go
@@ -1,6 +1,6 @@
package devnet
-import context "context"
+import "context"
type Service interface {
Start(context context.Context) error
diff --git a/cmd/devnet/devnetutils/utils.go b/cmd/devnet/devnetutils/utils.go
index 5e204a41771..6993eb90da4 100644
--- a/cmd/devnet/devnetutils/utils.go
+++ b/cmd/devnet/devnetutils/utils.go
@@ -5,7 +5,6 @@ import (
"encoding/binary"
"errors"
"fmt"
- "io/ioutil"
"net"
"os"
"path/filepath"
@@ -24,7 +23,7 @@ var ErrInvalidEnodeString = errors.New("invalid enode string")
func ClearDevDB(dataDir string, logger log.Logger) error {
logger.Info("Deleting nodes' data folders")
- files, err := ioutil.ReadDir(dataDir)
+ files, err := os.ReadDir(dataDir)
if err != nil {
return err
diff --git a/cmd/devnet/main.go b/cmd/devnet/main.go
index 841ac79f253..69f66e7a795 100644
--- a/cmd/devnet/main.go
+++ b/cmd/devnet/main.go
@@ -1,39 +1,34 @@
package main
import (
- "context"
"fmt"
"os"
"os/signal"
"path/filepath"
- "strings"
-
dbg "runtime/debug"
+ "strings"
"syscall"
"time"
+ "github.com/ledgerwatch/erigon/cmd/devnet/services"
+ "github.com/ledgerwatch/erigon/cmd/devnet/services/polygon"
+
+ "github.com/ledgerwatch/erigon-lib/chain/networkname"
+ "github.com/ledgerwatch/erigon-lib/common/metrics"
"github.com/ledgerwatch/erigon/cmd/devnet/accounts"
_ "github.com/ledgerwatch/erigon/cmd/devnet/accounts/steps"
_ "github.com/ledgerwatch/erigon/cmd/devnet/admin"
_ "github.com/ledgerwatch/erigon/cmd/devnet/contracts/steps"
- account_services "github.com/ledgerwatch/erigon/cmd/devnet/services/accounts"
- "github.com/ledgerwatch/erigon/cmd/devnet/services/bor"
- "github.com/ledgerwatch/erigon/cmd/devnet/transactions"
- "github.com/ledgerwatch/erigon/core/types"
-
- "github.com/ledgerwatch/erigon-lib/common/metrics"
- "github.com/ledgerwatch/erigon/cmd/devnet/args"
"github.com/ledgerwatch/erigon/cmd/devnet/devnet"
"github.com/ledgerwatch/erigon/cmd/devnet/devnetutils"
"github.com/ledgerwatch/erigon/cmd/devnet/requests"
"github.com/ledgerwatch/erigon/cmd/devnet/scenarios"
- "github.com/ledgerwatch/erigon/cmd/devnet/services"
- "github.com/ledgerwatch/erigon/params/networkname"
+ "github.com/ledgerwatch/erigon/cmd/devnet/tests"
"github.com/ledgerwatch/log/v3"
"github.com/ledgerwatch/erigon/cmd/utils/flags"
"github.com/ledgerwatch/erigon/params"
- "github.com/ledgerwatch/erigon/turbo/app"
+ erigon_app "github.com/ledgerwatch/erigon/turbo/app"
"github.com/ledgerwatch/erigon/turbo/debug"
"github.com/ledgerwatch/erigon/turbo/logging"
"github.com/urfave/cli/v2"
@@ -59,6 +54,18 @@ var (
Value: "dynamic-tx-node-0",
}
+ BaseRpcHostFlag = cli.StringFlag{
+ Name: "rpc.host",
+ Usage: "The host of the base RPC service",
+ Value: "localhost",
+ }
+
+ BaseRpcPortFlag = cli.IntFlag{
+ Name: "rpc.port",
+ Usage: "The port of the base RPC service",
+ Value: 8545,
+ }
+
WithoutHeimdallFlag = cli.BoolFlag{
Name: "bor.withoutheimdall",
Usage: "Run without Heimdall service",
@@ -69,10 +76,10 @@ var (
Usage: "Run with a devnet local Heimdall service",
}
- HeimdallgRPCAddressFlag = cli.StringFlag{
+ HeimdallGrpcAddressFlag = cli.StringFlag{
Name: "bor.heimdallgRPC",
Usage: "Address of Heimdall gRPC service",
- Value: "localhost:8540",
+ Value: polygon.HeimdallGrpcAddressDefault,
}
BorSprintSizeFlag = cli.IntFlag{
@@ -98,8 +105,8 @@ var (
}
DiagnosticsURLFlag = cli.StringFlag{
- Name: "diagnostics.url",
- Usage: "URL of the diagnostics system provided by the support team, include unique session PIN",
+ Name: "diagnostics.addr",
+ Usage: "Address of the diagnostics system provided by the support team, include unique session PIN",
}
insecureFlag = cli.BoolFlag{
@@ -108,7 +115,7 @@ var (
}
metricsURLsFlag = cli.StringSliceFlag{
- Name: "metrics.urls",
+ Name: "debug.urls",
Usage: "internal flag",
}
@@ -123,26 +130,24 @@ type PanicHandler struct {
func (ph PanicHandler) Log(r *log.Record) error {
fmt.Printf("Msg: %s\nStack: %s\n", r.Msg, dbg.Stack())
- os.Exit(1)
+ os.Exit(2)
return nil
}
func main() {
-
- debug.RaiseFdLimit()
-
app := cli.NewApp()
app.Version = params.VersionWithCommit(params.GitCommit)
- app.Action = func(ctx *cli.Context) error {
- return action(ctx)
- }
+ app.Action = mainContext
+
app.Flags = []cli.Flag{
&DataDirFlag,
&ChainFlag,
&ScenariosFlag,
+ &BaseRpcHostFlag,
+ &BaseRpcPortFlag,
&WithoutHeimdallFlag,
&LocalHeimdallFlag,
- &HeimdallgRPCAddressFlag,
+ &HeimdallGrpcAddressFlag,
&BorSprintSizeFlag,
&MetricsEnabledFlag,
&MetricsNodeFlag,
@@ -156,27 +161,18 @@ func main() {
&logging.LogDirVerbosityFlag,
}
- app.After = func(ctx *cli.Context) error {
- // unsubscribe from all the subscriptions made
- services.UnsubscribeAll()
- return nil
- }
if err := app.Run(os.Args); err != nil {
- fmt.Fprintln(os.Stderr, err)
+ _, _ = fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
}
}
-const (
- recipientAddress = "0x71562b71999873DB5b286dF957af199Ec94617F7"
- sendValue uint64 = 10000
-)
-
-func action(ctx *cli.Context) error {
- dataDir := ctx.String("datadir")
+func setupLogger(ctx *cli.Context) (log.Logger, error) {
+ dataDir := ctx.String(DataDirFlag.Name)
logsDir := filepath.Join(dataDir, "logs")
if err := os.MkdirAll(logsDir, 0755); err != nil {
- return err
+ return nil, err
}
logger := logging.SetupLoggerCtx("devnet", ctx, false /* rootLogger */)
@@ -184,65 +180,92 @@ func action(ctx *cli.Context) error {
// Make root logger fail
log.Root().SetHandler(PanicHandler{})
+ return logger, nil
+}
+
+func handleTerminationSignals(stopFunc func(), logger log.Logger) {
+ signalCh := make(chan os.Signal, 1)
+ signal.Notify(signalCh, syscall.SIGTERM, syscall.SIGINT)
+
+ switch s := <-signalCh; s {
+ case syscall.SIGTERM:
+ logger.Info("Stopping networks")
+ stopFunc()
+ case syscall.SIGINT:
+ logger.Info("Terminating network")
+ os.Exit(-int(syscall.SIGINT))
+ }
+}
+
+func connectDiagnosticsIfEnabled(ctx *cli.Context, logger log.Logger) {
+ metricsEnabled := ctx.Bool(MetricsEnabledFlag.Name)
+ diagnosticsUrl := ctx.String(DiagnosticsURLFlag.Name)
+ if metricsEnabled && len(diagnosticsUrl) > 0 {
+ err := erigon_app.ConnectDiagnostics(ctx, logger)
+ if err != nil {
+ logger.Error("app.ConnectDiagnostics failed", "err", err)
+ }
+ }
+}
+
+func mainContext(ctx *cli.Context) error {
+ debug.RaiseFdLimit()
+
+ logger, err := setupLogger(ctx)
+ if err != nil {
+ return err
+ }
+
// clear all the dev files
+ dataDir := ctx.String(DataDirFlag.Name)
if err := devnetutils.ClearDevDB(dataDir, logger); err != nil {
return err
}
network, err := initDevnet(ctx, logger)
-
if err != nil {
return err
}
- metrics := ctx.Bool("metrics")
-
- if metrics {
- // TODO should get this from the network as once we have multiple nodes we'll need to iterate the
- // nodes and create a series of urls - for the moment only one is supported
- ctx.Set("metrics.urls", fmt.Sprintf("http://localhost:%d/debug/metrics/", ctx.Int("metrics.port")))
+ if err = initDevnetMetrics(ctx, network); err != nil {
+ return err
}
- // start the network with each node in a go routine
logger.Info("Starting Devnet")
-
- runCtx, err := network.Start(ctx, logger)
-
+ runCtx, err := network.Start(logger)
if err != nil {
- return fmt.Errorf("Devnet start failed: %w", err)
+ return fmt.Errorf("devnet start failed: %w", err)
}
- go func() {
- signalCh := make(chan os.Signal, 1)
- signal.Notify(signalCh, syscall.SIGTERM, syscall.SIGINT)
-
- switch s := <-signalCh; s {
- case syscall.SIGTERM:
- logger.Info("Stopping networks")
- network.Stop()
- case syscall.SIGINT:
- logger.Info("Terminating network")
- os.Exit(-int(syscall.SIGINT))
- }
- }()
-
- diagnosticsUrl := ctx.String("diagnostics.url")
+ go handleTerminationSignals(network.Stop, logger)
+ go connectDiagnosticsIfEnabled(ctx, logger)
- if metrics && len(diagnosticsUrl) > 0 {
- go func() {
- app.ConnectDiagnostics(ctx, logger)
- }()
+ enabledScenarios := strings.Split(ctx.String(ScenariosFlag.Name), ",")
+ if err = allScenarios(runCtx).Run(runCtx, enabledScenarios...); err != nil {
+ return err
}
- if ctx.String(ChainFlag.Name) == networkname.DevChainName {
- transactions.MaxNumberOfEmptyBlockChecks = 30
+ if ctx.Bool(WaitFlag.Name) {
+ logger.Info("Waiting")
+ network.Wait()
+ } else {
+ logger.Info("Stopping Networks")
+ network.Stop()
}
- scenarios.Scenarios{
+ return nil
+}
+
+func allScenarios(runCtx devnet.Context) scenarios.Scenarios {
+ // unsubscribe from all the subscriptions made
+ defer services.UnsubscribeAll()
+
+ const recipientAddress = "0x71562b71999873DB5b286dF957af199Ec94617F7"
+ const sendValue uint64 = 10000
+
+ return scenarios.Scenarios{
"dynamic-tx-node-0": {
- Context: runCtx.
- WithCurrentNetwork(0).
- WithCurrentNode(0),
+ Context: runCtx.WithCurrentNetwork(0).WithCurrentNode(0),
Steps: []*scenarios.Step{
{Text: "InitSubscriptions", Args: []any{[]requests.SubMethod{requests.Methods.ETHNewHeads}}},
{Text: "PingErigonRpc"},
@@ -276,187 +299,66 @@ func action(ctx *cli.Context) error {
{Text: "DeployChildChainReceiver", Args: []any{"child-funder"}},
{Text: "DeployRootChainSender", Args: []any{"root-funder"}},
{Text: "GenerateSyncEvents", Args: []any{"root-funder", 10, 2, 2}},
- {Text: "ProcessTransfers", Args: []any{"root-funder", 10, 2, 2}},
- {Text: "BatchProcessTransfers", Args: []any{"root-funder", 1, 10, 2, 2}},
+ {Text: "ProcessRootTransfers", Args: []any{"root-funder", 10, 2, 2}},
+ {Text: "BatchProcessRootTransfers", Args: []any{"root-funder", 1, 10, 2, 2}},
+ },
+ },
+ "child-chain-exit": {
+ Steps: []*scenarios.Step{
+ {Text: "CreateAccountWithFunds", Args: []any{networkname.DevChainName, "root-funder", 200.0}},
+ {Text: "CreateAccountWithFunds", Args: []any{networkname.BorDevnetChainName, "child-funder", 200.0}},
+ {Text: "DeployRootChainReceiver", Args: []any{"root-funder"}},
+ {Text: "DeployChildChainSender", Args: []any{"child-funder"}},
+ {Text: "ProcessChildTransfers", Args: []any{"child-funder", 1, 2, 2}},
+ //{Text: "BatchProcessTransfers", Args: []any{"child-funder", 1, 10, 2, 2}},
},
},
- }.Run(runCtx, strings.Split(ctx.String("scenarios"), ",")...)
-
- if ctx.Bool("wait") || (metrics && len(diagnosticsUrl) > 0) {
- logger.Info("Waiting")
- network.Wait()
- } else {
- logger.Info("Stopping Networks")
- network.Stop()
}
-
- return nil
}
func initDevnet(ctx *cli.Context, logger log.Logger) (devnet.Devnet, error) {
dataDir := ctx.String(DataDirFlag.Name)
- chain := ctx.String(ChainFlag.Name)
-
- faucetSource := accounts.NewAccount("faucet-source")
+ chainName := ctx.String(ChainFlag.Name)
+ baseRpcHost := ctx.String(BaseRpcHostFlag.Name)
+ baseRpcPort := ctx.Int(BaseRpcPortFlag.Name)
- switch chain {
+ switch chainName {
case networkname.BorDevnetChainName:
if ctx.Bool(WithoutHeimdallFlag.Name) {
- return []*devnet.Network{
- {
- DataDir: dataDir,
- Chain: networkname.BorDevnetChainName,
- Logger: logger,
- BasePort: 30303,
- BasePrivateApiAddr: "localhost:10090",
- BaseRPCHost: "localhost",
- BaseRPCPort: 8545,
- //Snapshots: true,
- Alloc: types.GenesisAlloc{
- faucetSource.Address: {Balance: accounts.EtherAmount(200_000)},
- },
- Services: []devnet.Service{
- account_services.NewFaucet(networkname.BorDevnetChainName, faucetSource),
- },
- Nodes: []devnet.Node{
- args.BlockProducer{
- Node: args.Node{
- ConsoleVerbosity: "0",
- DirVerbosity: "5",
- WithoutHeimdall: true,
- },
- AccountSlots: 200,
- },
- args.NonBlockProducer{
- Node: args.Node{
- ConsoleVerbosity: "0",
- DirVerbosity: "5",
- WithoutHeimdall: true,
- },
- },
- },
- }}, nil
+ return tests.NewBorDevnetWithoutHeimdall(dataDir, baseRpcHost, baseRpcPort, logger), nil
+ } else if ctx.Bool(LocalHeimdallFlag.Name) {
+ heimdallGrpcAddr := ctx.String(HeimdallGrpcAddressFlag.Name)
+ sprintSize := uint64(ctx.Int(BorSprintSizeFlag.Name))
+ return tests.NewBorDevnetWithLocalHeimdall(dataDir, baseRpcHost, baseRpcPort, heimdallGrpcAddr, sprintSize, logger), nil
} else {
- var heimdallGrpc string
- var services []devnet.Service
+ return tests.NewBorDevnetWithRemoteHeimdall(dataDir, baseRpcHost, baseRpcPort, logger), nil
+ }
- if ctx.Bool(LocalHeimdallFlag.Name) {
- config := *params.BorDevnetChainConfig
+ case networkname.DevChainName:
+ return tests.NewDevDevnet(dataDir, baseRpcHost, baseRpcPort, logger), nil
- if sprintSize := uint64(ctx.Int(BorSprintSizeFlag.Name)); sprintSize > 0 {
- config.Bor.Sprint = map[string]uint64{"0": sprintSize}
- }
+ default:
+ return nil, fmt.Errorf("unknown network: '%s'", chainName)
+ }
+}
- services = append(services, bor.NewHeimdall(&config, logger))
+func initDevnetMetrics(ctx *cli.Context, network devnet.Devnet) error {
+ metricsEnabled := ctx.Bool(MetricsEnabledFlag.Name)
+ metricsNode := ctx.Int(MetricsNodeFlag.Name)
+ metricsPort := ctx.Int(MetricsPortFlag.Name)
- heimdallGrpc = bor.HeimdallGRpc(devnet.WithCliContext(context.Background(), ctx))
- }
+ if !metricsEnabled {
+ return nil
+ }
- return []*devnet.Network{
- {
- DataDir: dataDir,
- Chain: networkname.BorDevnetChainName,
- Logger: logger,
- BasePort: 30303,
- BasePrivateApiAddr: "localhost:10090",
- BaseRPCHost: "localhost",
- BaseRPCPort: 8545,
- BorStateSyncDelay: 30 * time.Second,
- Services: append(services, account_services.NewFaucet(networkname.BorDevnetChainName, faucetSource)),
- Alloc: types.GenesisAlloc{
- faucetSource.Address: {Balance: accounts.EtherAmount(200_000)},
- },
- Nodes: []devnet.Node{
- args.BlockProducer{
- Node: args.Node{
- ConsoleVerbosity: "0",
- DirVerbosity: "5",
- HeimdallGRpc: heimdallGrpc,
- },
- AccountSlots: 200,
- },
- args.BlockProducer{
- Node: args.Node{
- ConsoleVerbosity: "0",
- DirVerbosity: "5",
- HeimdallGRpc: heimdallGrpc,
- },
- AccountSlots: 200,
- },
- args.NonBlockProducer{
- Node: args.Node{
- ConsoleVerbosity: "0",
- DirVerbosity: "5",
- HeimdallGRpc: heimdallGrpc,
- },
- },
- },
- },
- {
- DataDir: dataDir,
- Chain: networkname.DevChainName,
- Logger: logger,
- BasePort: 30403,
- BasePrivateApiAddr: "localhost:10190",
- BaseRPCHost: "localhost",
- BaseRPCPort: 8645,
- Services: append(services, account_services.NewFaucet(networkname.DevChainName, faucetSource)),
- Alloc: types.GenesisAlloc{
- faucetSource.Address: {Balance: accounts.EtherAmount(200_000)},
- },
- Nodes: []devnet.Node{
- args.BlockProducer{
- Node: args.Node{
- ConsoleVerbosity: "0",
- DirVerbosity: "5",
- VMDebug: true,
- HttpCorsDomain: "*",
- },
- DevPeriod: 5,
- AccountSlots: 200,
- },
- args.NonBlockProducer{
- Node: args.Node{
- ConsoleVerbosity: "0",
- DirVerbosity: "3",
- },
- },
- },
- }}, nil
+ for _, nw := range network {
+ for i, nodeArgs := range nw.Nodes {
+ if metricsEnabled && (metricsNode == i) {
+ nodeArgs.EnableMetrics(metricsPort)
+ return nil
+ }
}
+ }
- case networkname.DevChainName:
- return []*devnet.Network{
- {
- DataDir: dataDir,
- Chain: networkname.DevChainName,
- Logger: logger,
- BasePrivateApiAddr: "localhost:10090",
- BaseRPCHost: "localhost",
- BaseRPCPort: 8545,
- Alloc: types.GenesisAlloc{
- faucetSource.Address: {Balance: accounts.EtherAmount(200_000)},
- },
- Services: []devnet.Service{
- account_services.NewFaucet(networkname.DevChainName, faucetSource),
- },
- Nodes: []devnet.Node{
- args.BlockProducer{
- Node: args.Node{
- ConsoleVerbosity: "0",
- DirVerbosity: "5",
- },
- AccountSlots: 200,
- },
- args.NonBlockProducer{
- Node: args.Node{
- ConsoleVerbosity: "0",
- DirVerbosity: "5",
- },
- },
- },
- }}, nil
- }
-
- return nil, fmt.Errorf(`Unknown network: "%s"`, chain)
+ return fmt.Errorf("initDevnetMetrics: not found %s=%d", MetricsNodeFlag.Name, metricsNode)
}
diff --git a/cmd/devnet/requests/account.go b/cmd/devnet/requests/account.go
index 329f10b4662..247bb6c9f35 100644
--- a/cmd/devnet/requests/account.go
+++ b/cmd/devnet/requests/account.go
@@ -1,24 +1,16 @@
package requests
import (
+ "context"
"fmt"
"math/big"
libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
- "github.com/ledgerwatch/erigon/common/hexutil"
+ "github.com/ledgerwatch/erigon/rpc"
)
-type EthBalance struct {
- CommonResponse
- Balance hexutil.Big `json:"result"`
-}
-
-type EthCode struct {
- CommonResponse
- Code hexutility.Bytes `json:"result"`
-}
-
type DebugAccountAt struct {
CommonResponse
Result AccountResult `json:"result"`
@@ -42,71 +34,41 @@ type StorageResult struct {
Proof []string `json:"proof"`
}
-func (reqGen *requestGenerator) GetCode(address libcommon.Address, blockRef BlockNumber) (hexutility.Bytes, error) {
- var b EthCode
+func (reqGen *requestGenerator) GetCode(address libcommon.Address, blockRef rpc.BlockReference) (hexutility.Bytes, error) {
+ var result hexutility.Bytes
- method, body := reqGen.getCode(address, blockRef)
- if res := reqGen.call(method, body, &b); res.Err != nil {
- return hexutility.Bytes{}, fmt.Errorf("failed to get code: %w", res.Err)
+ if err := reqGen.rpcCall(context.Background(), &result, Methods.ETHGetCode, address, blockRef); err != nil {
+ return nil, err
}
- if b.Error != nil {
- return hexutility.Bytes{}, fmt.Errorf("Failed to get code: rpc failed: %w", b.Error)
- }
-
- return b.Code, nil
-}
-
-func (req *requestGenerator) getCode(address libcommon.Address, blockRef BlockNumber) (RPCMethod, string) {
- const template = `{"jsonrpc":"2.0","method":%q,"params":["0x%x","%s"],"id":%d}`
- return Methods.ETHGetCode, fmt.Sprintf(template, Methods.ETHGetCode, address, blockRef, req.reqID)
+ return result, nil
}
-func (reqGen *requestGenerator) GetBalance(address libcommon.Address, blockNum BlockNumber) (*big.Int, error) {
- var b EthBalance
+func (reqGen *requestGenerator) GetBalance(address libcommon.Address, blockRef rpc.BlockReference) (*big.Int, error) {
+ var result hexutil.Big
- method, body := reqGen.getBalance(address, blockNum)
- if res := reqGen.call(method, body, &b); res.Err != nil {
- return &big.Int{}, fmt.Errorf("failed to get balance: %w", res.Err)
+ if err := reqGen.rpcCall(context.Background(), &result, Methods.ETHGetBalance, address, blockRef); err != nil {
+ return nil, err
}
- if b.Error != nil {
- return &big.Int{}, fmt.Errorf("Failed to get balance: rpc failed: %w", b.Error)
- }
-
- return b.Balance.ToInt(), nil
-}
-
-func (req *requestGenerator) getBalance(address libcommon.Address, blockNum BlockNumber) (RPCMethod, string) {
- const template = `{"jsonrpc":"2.0","method":%q,"params":["0x%x","%v"],"id":%d}`
- return Methods.ETHGetBalance, fmt.Sprintf(template, Methods.ETHGetBalance, address, blockNum, req.reqID)
+ return result.ToInt(), nil
}
-func (reqGen *requestGenerator) GetTransactionCount(address libcommon.Address, blockNum BlockNumber) (*big.Int, error) {
- var b EthGetTransactionCount
+func (reqGen *requestGenerator) GetTransactionCount(address libcommon.Address, blockRef rpc.BlockReference) (*big.Int, error) {
+ var result hexutil.Big
- method, body := reqGen.getTransactionCount(address, blockNum)
- if res := reqGen.call(method, body, &b); res.Err != nil {
- return nil, fmt.Errorf("error getting transaction count: %w", res.Err)
+ if err := reqGen.rpcCall(context.Background(), &result, Methods.ETHGetTransactionCount, address, blockRef); err != nil {
+ return nil, err
}
- if b.Error != nil {
- return nil, fmt.Errorf("error populating response object: %w", b.Error)
- }
-
- return big.NewInt(int64(b.Result)), nil
-}
-
-func (req *requestGenerator) getTransactionCount(address libcommon.Address, blockNum BlockNumber) (RPCMethod, string) {
- const template = `{"jsonrpc":"2.0","method":%q,"params":["0x%x","%v"],"id":%d}`
- return Methods.ETHGetTransactionCount, fmt.Sprintf(template, Methods.ETHGetTransactionCount, address, blockNum, req.reqID)
+ return result.ToInt(), nil
}
func (reqGen *requestGenerator) DebugAccountAt(blockHash libcommon.Hash, txIndex uint64, account libcommon.Address) (*AccountResult, error) {
var b DebugAccountAt
method, body := reqGen.debugAccountAt(blockHash, txIndex, account)
- if res := reqGen.call(method, body, &b); res.Err != nil {
+ if res := reqGen.rpcCallJSON(method, body, &b); res.Err != nil {
return nil, fmt.Errorf("failed to get account: %v", res.Err)
}
diff --git a/cmd/devnet/requests/admin.go b/cmd/devnet/requests/admin.go
index cba12a5720f..eca0309b08b 100644
--- a/cmd/devnet/requests/admin.go
+++ b/cmd/devnet/requests/admin.go
@@ -1,29 +1,17 @@
package requests
import (
- "fmt"
+ "context"
"github.com/ledgerwatch/erigon/p2p"
)
-// AdminNodeInfoResponse is the response for calls made to admin_nodeInfo
-type AdminNodeInfoResponse struct {
- CommonResponse
- Result p2p.NodeInfo `json:"result"`
-}
-
func (reqGen *requestGenerator) AdminNodeInfo() (p2p.NodeInfo, error) {
- var b AdminNodeInfoResponse
+ var result p2p.NodeInfo
- method, body := reqGen.adminNodeInfo()
- if res := reqGen.call(method, body, &b); res.Err != nil {
- return p2p.NodeInfo{}, fmt.Errorf("failed to get admin node info: %w", res.Err)
+ if err := reqGen.rpcCall(context.Background(), &result, Methods.AdminNodeInfo); err != nil {
+ return p2p.NodeInfo{}, err
}
- return b.Result, nil
-}
-
-func (req *requestGenerator) adminNodeInfo() (RPCMethod, string) {
- const template = `{"jsonrpc":"2.0","method":%q,"id":%d}`
- return Methods.AdminNodeInfo, fmt.Sprintf(template, Methods.AdminNodeInfo, req.reqID)
+ return result, nil
}
diff --git a/cmd/devnet/requests/block.go b/cmd/devnet/requests/block.go
index 4f061bae510..67c86c7d716 100644
--- a/cmd/devnet/requests/block.go
+++ b/cmd/devnet/requests/block.go
@@ -1,21 +1,20 @@
package requests
import (
- "fmt"
+ "context"
+ "encoding/json"
"math/big"
+ hexutil2 "github.com/ledgerwatch/erigon-lib/common/hexutil"
+
libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon-lib/common/hexutility"
- "github.com/ledgerwatch/erigon/common/hexutil"
"github.com/ledgerwatch/erigon/common/math"
+ "github.com/ledgerwatch/erigon/core/types"
+ "github.com/ledgerwatch/erigon/rpc"
+ "github.com/ledgerwatch/erigon/turbo/jsonrpc"
)
-type EthBlockNumber struct {
- CommonResponse
- Number hexutil.Uint64 `json:"result"`
-}
-
type BlockNumber string
func (bn BlockNumber) Uint64() uint64 {
@@ -27,7 +26,7 @@ func (bn BlockNumber) Uint64() uint64 {
}
func AsBlockNumber(n *big.Int) BlockNumber {
- return BlockNumber(hexutil.EncodeBig(n))
+ return BlockNumber(hexutil2.EncodeBig(n))
}
var BlockNumbers = struct {
@@ -43,107 +42,104 @@ var BlockNumbers = struct {
Pending: "pending",
}
-type EthBlockByNumber struct {
- CommonResponse
- Result BlockResult `json:"result"`
+type BlockWithTxHashes struct {
+ *types.Header
+ Hash libcommon.Hash `json:"hash"`
+ TransactionHashes []libcommon.Hash
}
-type BlockResult struct {
- BlockNumber BlockNumber `json:"number"`
- Difficulty hexutil.Big `json:"difficulty"`
- Miner libcommon.Address `json:"miner"`
- Transactions []Transaction `json:"transactions"`
- TxRoot libcommon.Hash `json:"transactionsRoot"`
- Hash libcommon.Hash `json:"hash"`
-}
+func (b *BlockWithTxHashes) UnmarshalJSON(input []byte) error {
+ var header types.Header
+ if err := json.Unmarshal(input, &header); err != nil {
+ return err
+ }
-type Transaction struct {
- From libcommon.Address `json:"from"`
- To *libcommon.Address `json:"to"` // Pointer because it might be missing
- Hash string `json:"hash"`
- Gas hexutil.Big `json:"gas"`
- GasPrice hexutil.Big `json:"gasPrice"`
- Input hexutility.Bytes `json:"input"`
- Value hexutil.Big `json:"value"`
-}
+ var bd struct {
+ Hash libcommon.Hash `json:"hash"`
+ TransactionHashes []libcommon.Hash `json:"transactions"`
+ }
+ if err := json.Unmarshal(input, &bd); err != nil {
+ return err
+ }
-type EthGetTransactionCount struct {
- CommonResponse
- Result hexutil.Uint64 `json:"result"`
+ b.Header = &header
+ b.Hash = bd.Hash
+ b.TransactionHashes = bd.TransactionHashes
+
+ return nil
}
-type EthSendRawTransaction struct {
- CommonResponse
- TxnHash libcommon.Hash `json:"result"`
+type Block struct {
+ BlockWithTxHashes
+ Transactions []*jsonrpc.RPCTransaction `json:"transactions"`
}
-func (reqGen *requestGenerator) BlockNumber() (uint64, error) {
- var b EthBlockNumber
+func (b *Block) UnmarshalJSON(input []byte) error {
+ var header types.Header
+ if err := json.Unmarshal(input, &header); err != nil {
+ return err
+ }
- method, body := reqGen.blockNumber()
- res := reqGen.call(method, body, &b)
- number := uint64(b.Number)
+ var bd struct {
+ Hash libcommon.Hash `json:"hash"`
+ Transactions []*jsonrpc.RPCTransaction `json:"transactions"`
+ }
+ if err := json.Unmarshal(input, &bd); err != nil {
+ return err
+ }
- if res.Err != nil {
- return number, fmt.Errorf("error getting current block number: %v", res.Err)
+ b.Header = &header
+ b.Hash = bd.Hash
+ b.Transactions = bd.Transactions
+
+ if bd.Transactions != nil {
+ b.TransactionHashes = make([]libcommon.Hash, len(b.Transactions))
+ for _, t := range bd.Transactions {
+ b.TransactionHashes = append(b.TransactionHashes, t.Hash)
+ }
}
- return number, nil
+ return nil
}
-func (req *requestGenerator) blockNumber() (RPCMethod, string) {
- const template = `{"jsonrpc":"2.0","method":%q,"id":%d}`
- return Methods.ETHBlockNumber, fmt.Sprintf(template, Methods.ETHBlockNumber, req.reqID)
+type EthGetTransactionCount struct {
+ CommonResponse
+ Result hexutil2.Uint64 `json:"result"`
}
-func (reqGen *requestGenerator) GetBlockByNumber(blockNum uint64, withTxs bool) (*BlockResult, error) {
- var b EthBlockByNumber
-
- method, body := reqGen.getBlockByNumber(blockNum, withTxs)
- res := reqGen.call(method, body, &b)
- if res.Err != nil {
- return nil, fmt.Errorf("error getting block by number: %v", res.Err)
- }
+func (reqGen *requestGenerator) BlockNumber() (uint64, error) {
+ var result hexutil2.Uint64
- if b.Error != nil {
- return nil, fmt.Errorf("error populating response object: %v", b.Error)
+ if err := reqGen.rpcCall(context.Background(), &result, Methods.ETHBlockNumber); err != nil {
+ return 0, err
}
- b.Result.BlockNumber = BlockNumber(fmt.Sprint(blockNum))
-
- return &b.Result, nil
-}
-
-func (req *requestGenerator) getBlockByNumber(blockNum uint64, withTxs bool) (RPCMethod, string) {
- const template = `{"jsonrpc":"2.0","method":%q,"params":["0x%x",%t],"id":%d}`
- return Methods.ETHGetBlockByNumber, fmt.Sprintf(template, Methods.ETHGetBlockByNumber, blockNum, withTxs, req.reqID)
+ return uint64(result), nil
}
-func (req *requestGenerator) getBlockByNumberI(blockNum string, withTxs bool) (RPCMethod, string) {
- const template = `{"jsonrpc":"2.0","method":%q,"params":["%s",%t],"id":%d}`
- return Methods.ETHGetBlockByNumber, fmt.Sprintf(template, Methods.ETHGetBlockByNumber, blockNum, withTxs, req.reqID)
-}
+func (reqGen *requestGenerator) GetBlockByNumber(ctx context.Context, blockNum rpc.BlockNumber, withTxs bool) (*Block, error) {
+ var result Block
+ var err error
-func (reqGen *requestGenerator) GetBlockDetailsByNumber(blockNum string, withTxs bool) (map[string]interface{}, error) {
- var b struct {
- CommonResponse
- Result interface{} `json:"result"`
+ if withTxs {
+ err = reqGen.rpcCall(ctx, &result, Methods.ETHGetBlockByNumber, blockNum, withTxs)
+ } else {
+ err = reqGen.rpcCall(ctx, &result.BlockWithTxHashes, Methods.ETHGetBlockByNumber, blockNum, withTxs)
}
- method, body := reqGen.getBlockByNumberI(blockNum, withTxs)
- res := reqGen.call(method, body, &b)
- if res.Err != nil {
- return nil, fmt.Errorf("error getting block by number: %v", res.Err)
+ if err != nil {
+ return nil, err
}
- if b.Error != nil {
- return nil, fmt.Errorf("error populating response object: %v", b.Error)
- }
+ return &result, nil
+}
+
+func (req *requestGenerator) GetRootHash(ctx context.Context, startBlock uint64, endBlock uint64) (libcommon.Hash, error) {
+ var result string
- m, ok := b.Result.(map[string]interface{})
- if !ok {
- return nil, fmt.Errorf("cannot convert type")
+ if err := req.rpcCall(ctx, &result, Methods.BorGetRootHash, startBlock, endBlock); err != nil {
+ return libcommon.Hash{}, err
}
- return m, nil
+ return libcommon.HexToHash(result), nil
}
diff --git a/cmd/devnet/requests/event.go b/cmd/devnet/requests/event.go
index 8e116e63f30..2e50a500f8e 100644
--- a/cmd/devnet/requests/event.go
+++ b/cmd/devnet/requests/event.go
@@ -5,10 +5,11 @@ import (
"encoding/json"
"fmt"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
+
ethereum "github.com/ledgerwatch/erigon"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
- "github.com/ledgerwatch/erigon/common/hexutil"
"github.com/ledgerwatch/erigon/core/types"
)
@@ -33,11 +34,6 @@ func Compare(expected types.Log, actual types.Log) ([]error, bool) {
return errs, len(errs) == 0
}
-type EthGetLogs struct {
- CommonResponse
- Result []types.Log `json:"result"`
-}
-
func NewLog(hash libcommon.Hash, blockNum uint64, address libcommon.Address, topics []libcommon.Hash, data hexutility.Bytes, txIndex uint, blockHash libcommon.Hash, index hexutil.Uint, removed bool) types.Log {
return types.Log{
Address: address,
@@ -53,14 +49,13 @@ func NewLog(hash libcommon.Hash, blockNum uint64, address libcommon.Address, top
}
func (reqGen *requestGenerator) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) {
- var b EthGetLogs
+ var result []types.Log
- method, body := reqGen.getLogs(query)
- if res := reqGen.call(method, body, &b); res.Err != nil {
- return nil, fmt.Errorf("failed to fetch logs: %v", res.Err)
+ if err := reqGen.rpcCall(ctx, &result, Methods.ETHGetLogs, query); err != nil {
+ return nil, err
}
- return b.Result, nil
+ return result, nil
}
func (reqGen *requestGenerator) SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) {
@@ -90,18 +85,3 @@ func hashSlicesAreEqual(s1, s2 []libcommon.Hash) bool {
return true
}
-
-func (req *requestGenerator) getLogs(query ethereum.FilterQuery) (RPCMethod, string) {
- if len(query.Addresses) == 0 {
- const template = `{"jsonrpc":"2.0","method":%q,"params":[{"fromBlock":"0x%x","toBlock":"0x%x"}],"id":%d}`
- return Methods.ETHGetLogs, fmt.Sprintf(template, Methods.ETHGetLogs, query.FromBlock.Uint64(), query.ToBlock.Uint64(), req.reqID)
- }
-
- const template = `{"jsonrpc":"2.0","method":%q,"params":[{"fromBlock":"0x%x","toBlock":"0x%x","address":"0x%x"}],"id":%d}`
- return Methods.ETHGetLogs, fmt.Sprintf(template, Methods.ETHGetLogs, query.FromBlock.Uint64(), query.ToBlock.Uint64(), query.Addresses[0], req.reqID)
-}
-
-func (req *requestGenerator) subscribeLogs(query ethereum.FilterQuery) (RPCMethod, string) {
- const template = `{"jsonrpc":"2.0","method":%q,"params":[{"fromBlock":"0x%x","toBlock":"0x%x","address":"0x%x"}],"id":%d}`
- return Methods.ETHGetLogs, fmt.Sprintf(template, Methods.ETHGetLogs, query.FromBlock.Uint64(), query.ToBlock.Uint64(), query.Addresses[0], req.reqID)
-}
diff --git a/cmd/devnet/requests/nopgenerator.go b/cmd/devnet/requests/nopgenerator.go
new file mode 100644
index 00000000000..6385876c4d1
--- /dev/null
+++ b/cmd/devnet/requests/nopgenerator.go
@@ -0,0 +1,105 @@
+package requests
+
+import (
+ "context"
+ "errors"
+ "math/big"
+
+ ethereum "github.com/ledgerwatch/erigon"
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/hexutility"
+ "github.com/ledgerwatch/erigon/core/types"
+ "github.com/ledgerwatch/erigon/p2p"
+ "github.com/ledgerwatch/erigon/rpc"
+ "github.com/ledgerwatch/erigon/turbo/adapter/ethapi"
+ "github.com/ledgerwatch/erigon/turbo/jsonrpc"
+)
+
+var ErrNotImplemented = errors.New("not implemented")
+
+type NopRequestGenerator struct {
+}
+
+func (n NopRequestGenerator) PingErigonRpc() PingResult {
+ return PingResult{}
+}
+
+func (n NopRequestGenerator) GetBalance(address libcommon.Address, blockRef rpc.BlockReference) (*big.Int, error) {
+ return nil, ErrNotImplemented
+}
+
+func (n NopRequestGenerator) AdminNodeInfo() (p2p.NodeInfo, error) {
+ return p2p.NodeInfo{}, ErrNotImplemented
+}
+
+func (n NopRequestGenerator) GetBlockByNumber(ctx context.Context, blockNum rpc.BlockNumber, withTxs bool) (*Block, error) {
+ return nil, ErrNotImplemented
+}
+
+func (n NopRequestGenerator) GetTransactionByHash(hash libcommon.Hash) (*jsonrpc.RPCTransaction, error) {
+ return nil, ErrNotImplemented
+}
+
+func (n NopRequestGenerator) GetTransactionReceipt(ctx context.Context, hash libcommon.Hash) (*types.Receipt, error) {
+ return nil, ErrNotImplemented
+}
+
+func (n NopRequestGenerator) TraceTransaction(hash libcommon.Hash) ([]TransactionTrace, error) {
+ return nil, ErrNotImplemented
+}
+
+func (n NopRequestGenerator) GetTransactionCount(address libcommon.Address, blockRef rpc.BlockReference) (*big.Int, error) {
+ return nil, ErrNotImplemented
+}
+
+func (n NopRequestGenerator) BlockNumber() (uint64, error) {
+ return 0, ErrNotImplemented
+}
+
+func (n NopRequestGenerator) SendTransaction(signedTx types.Transaction) (libcommon.Hash, error) {
+ return libcommon.Hash{}, ErrNotImplemented
+}
+
+func (n NopRequestGenerator) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) {
+ return nil, ErrNotImplemented
+}
+
+func (n NopRequestGenerator) SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) {
+ return nil, ErrNotImplemented
+}
+
+func (n NopRequestGenerator) Subscribe(ctx context.Context, method SubMethod, subChan interface{}, args ...interface{}) (ethereum.Subscription, error) {
+ return nil, ErrNotImplemented
+}
+
+func (n NopRequestGenerator) TxpoolContent() (int, int, int, error) {
+ return 0, 0, 0, ErrNotImplemented
+}
+
+func (n NopRequestGenerator) Call(args ethapi.CallArgs, blockRef rpc.BlockReference, overrides *ethapi.StateOverrides) ([]byte, error) {
+ return nil, ErrNotImplemented
+}
+
+func (n NopRequestGenerator) TraceCall(blockRef rpc.BlockReference, args ethapi.CallArgs, traceOpts ...TraceOpt) (*TraceCallResult, error) {
+ return nil, ErrNotImplemented
+}
+
+func (n NopRequestGenerator) DebugAccountAt(blockHash libcommon.Hash, txIndex uint64, account libcommon.Address) (*AccountResult, error) {
+ return nil, ErrNotImplemented
+}
+
+func (n NopRequestGenerator) GetCode(address libcommon.Address, blockRef rpc.BlockReference) (hexutility.Bytes, error) {
+ return nil, ErrNotImplemented
+}
+
+func (n NopRequestGenerator) EstimateGas(args ethereum.CallMsg, blockNum BlockNumber) (uint64, error) {
+ return 0, ErrNotImplemented
+}
+
+func (n NopRequestGenerator) GasPrice() (*big.Int, error) {
+ return nil, ErrNotImplemented
+}
+
+func (n NopRequestGenerator) GetRootHash(ctx context.Context, startBlock uint64, endBlock uint64) (libcommon.Hash, error) {
+ return libcommon.Hash{}, ErrNotImplemented
+}
diff --git a/cmd/devnet/requests/request_generator.go b/cmd/devnet/requests/request_generator.go
index 90e2898347e..1c1e04628d1 100644
--- a/cmd/devnet/requests/request_generator.go
+++ b/cmd/devnet/requests/request_generator.go
@@ -3,9 +3,11 @@ package requests
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"io"
"math/big"
+ "net"
"net/http"
"strings"
"sync"
@@ -19,6 +21,7 @@ import (
"github.com/ledgerwatch/erigon/p2p"
"github.com/ledgerwatch/erigon/rpc"
"github.com/ledgerwatch/erigon/turbo/adapter/ethapi"
+ "github.com/ledgerwatch/erigon/turbo/jsonrpc"
"github.com/ledgerwatch/log/v3"
"github.com/valyala/fastjson"
)
@@ -51,23 +54,27 @@ func (e EthError) Error() string {
type RequestGenerator interface {
PingErigonRpc() PingResult
- GetBalance(address libcommon.Address, blockNum BlockNumber) (*big.Int, error)
+ GetBalance(address libcommon.Address, blockRef rpc.BlockReference) (*big.Int, error)
AdminNodeInfo() (p2p.NodeInfo, error)
- GetBlockDetailsByNumber(blockNum string, withTxs bool) (map[string]interface{}, error)
- GetBlockByNumber(blockNum uint64, withTxs bool) (*BlockResult, error)
+ GetBlockByNumber(ctx context.Context, blockNum rpc.BlockNumber, withTxs bool) (*Block, error)
+ GetTransactionByHash(hash libcommon.Hash) (*jsonrpc.RPCTransaction, error)
+ GetTransactionReceipt(ctx context.Context, hash libcommon.Hash) (*types.Receipt, error)
+ TraceTransaction(hash libcommon.Hash) ([]TransactionTrace, error)
+ GetTransactionCount(address libcommon.Address, blockRef rpc.BlockReference) (*big.Int, error)
BlockNumber() (uint64, error)
- GetTransactionCount(address libcommon.Address, blockNum BlockNumber) (*big.Int, error)
SendTransaction(signedTx types.Transaction) (libcommon.Hash, error)
FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error)
SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error)
- TxpoolContent() (int, int, int, error)
Subscribe(ctx context.Context, method SubMethod, subChan interface{}, args ...interface{}) (ethereum.Subscription, error)
- TraceCall(blockRef string, args ethapi.CallArgs, traceOpts ...TraceOpt) (*TraceCallResult, error)
- TraceTransaction(hash libcommon.Hash) ([]TransactionTrace, error)
+ TxpoolContent() (int, int, int, error)
+ Call(args ethapi.CallArgs, blockRef rpc.BlockReference, overrides *ethapi.StateOverrides) ([]byte, error)
+ TraceCall(blockRef rpc.BlockReference, args ethapi.CallArgs, traceOpts ...TraceOpt) (*TraceCallResult, error)
DebugAccountAt(blockHash libcommon.Hash, txIndex uint64, account libcommon.Address) (*AccountResult, error)
- GetCode(address libcommon.Address, blockNum BlockNumber) (hexutility.Bytes, error)
+ GetCode(address libcommon.Address, blockRef rpc.BlockReference) (hexutility.Bytes, error)
EstimateGas(args ethereum.CallMsg, blockNum BlockNumber) (uint64, error)
GasPrice() (*big.Int, error)
+
+ GetRootHash(ctx context.Context, startBlock uint64, endBlock uint64) (libcommon.Hash, error)
}
type requestGenerator struct {
@@ -75,6 +82,7 @@ type requestGenerator struct {
reqID int
client *http.Client
subscriptionClient *rpc.Client
+ requestClient *rpc.Client
logger log.Logger
target string
}
@@ -108,40 +116,52 @@ var Methods = struct {
// OTSGetBlockDetails represents the ots_getBlockDetails method
OTSGetBlockDetails RPCMethod
// ETHNewHeads represents the eth_newHeads sub method
- ETHNewHeads SubMethod
- ETHLogs SubMethod
- TraceCall RPCMethod
- TraceTransaction RPCMethod
- DebugAccountAt RPCMethod
- ETHGetCode RPCMethod
- ETHEstimateGas RPCMethod
- ETHGasPrice RPCMethod
+ ETHNewHeads SubMethod
+ ETHLogs SubMethod
+ TraceCall RPCMethod
+ TraceTransaction RPCMethod
+ DebugAccountAt RPCMethod
+ ETHGetCode RPCMethod
+ ETHEstimateGas RPCMethod
+ ETHGasPrice RPCMethod
+ ETHGetTransactionByHash RPCMethod
+ ETHGetTransactionReceipt RPCMethod
+ BorGetRootHash RPCMethod
+ ETHCall RPCMethod
}{
- ETHGetTransactionCount: "eth_getTransactionCount",
- ETHGetBalance: "eth_getBalance",
- ETHSendRawTransaction: "eth_sendRawTransaction",
- ETHGetBlockByNumber: "eth_getBlockByNumber",
- ETHGetBlock: "eth_getBlock",
- ETHGetLogs: "eth_getLogs",
- ETHBlockNumber: "eth_blockNumber",
- AdminNodeInfo: "admin_nodeInfo",
- TxpoolContent: "txpool_content",
- OTSGetBlockDetails: "ots_getBlockDetails",
- ETHNewHeads: "eth_newHeads",
- ETHLogs: "eth_logs",
- TraceCall: "trace_call",
- TraceTransaction: "trace_transaction",
- DebugAccountAt: "debug_accountAt",
- ETHGetCode: "eth_getCode",
- ETHEstimateGas: "eth_estimateGas",
- ETHGasPrice: "eth_gasPrice",
+ ETHGetTransactionCount: "eth_getTransactionCount",
+ ETHGetBalance: "eth_getBalance",
+ ETHSendRawTransaction: "eth_sendRawTransaction",
+ ETHGetBlockByNumber: "eth_getBlockByNumber",
+ ETHGetBlock: "eth_getBlock",
+ ETHGetLogs: "eth_getLogs",
+ ETHBlockNumber: "eth_blockNumber",
+ AdminNodeInfo: "admin_nodeInfo",
+ TxpoolContent: "txpool_content",
+ OTSGetBlockDetails: "ots_getBlockDetails",
+ ETHNewHeads: "eth_newHeads",
+ ETHLogs: "eth_logs",
+ TraceCall: "trace_call",
+ TraceTransaction: "trace_transaction",
+ DebugAccountAt: "debug_accountAt",
+ ETHGetCode: "eth_getCode",
+ ETHEstimateGas: "eth_estimateGas",
+ ETHGasPrice: "eth_gasPrice",
+ ETHGetTransactionByHash: "eth_getTransactionByHash",
+ ETHGetTransactionReceipt: "eth_getTransactionReceipt",
+ BorGetRootHash: "bor_getRootHash",
+ ETHCall: "eth_call",
}
-func (req *requestGenerator) call(method RPCMethod, body string, response interface{}) callResult {
+func (req *requestGenerator) rpcCallJSON(method RPCMethod, body string, response interface{}) callResult {
+ ctx := context.Background()
+ req.reqID++
start := time.Now()
targetUrl := "http://" + req.target
- err := post(req.client, targetUrl, string(method), body, response, req.logger)
- req.reqID++
+
+ err := retryConnects(ctx, func(ctx context.Context) error {
+ return post(ctx, req.client, targetUrl, string(method), body, response, req.logger)
+ })
return callResult{
RequestBody: body,
@@ -153,6 +173,57 @@ func (req *requestGenerator) call(method RPCMethod, body string, response interf
}
}
+func (req *requestGenerator) rpcCall(ctx context.Context, result interface{}, method RPCMethod, args ...interface{}) error {
+ client, err := req.rpcClient(ctx)
+ if err != nil {
+ return err
+ }
+
+ return retryConnects(ctx, func(ctx context.Context) error {
+ return client.CallContext(ctx, result, string(method), args...)
+ })
+}
+
+const connectionTimeout = time.Second * 5
+
+func isConnectionError(err error) bool {
+ var opErr *net.OpError
+ if errors.As(err, &opErr) {
+ return opErr.Op == "dial"
+ }
+ return false
+}
+
+func retryConnects(ctx context.Context, op func(context.Context) error) error {
+ ctx, cancel := context.WithTimeout(ctx, connectionTimeout)
+ defer cancel()
+ return retry(ctx, op, isConnectionError, time.Millisecond*200, nil)
+}
+
+func retry(ctx context.Context, op func(context.Context) error, isRecoverableError func(error) bool, delay time.Duration, lastErr error) error {
+ err := op(ctx)
+ if err == nil {
+ return nil
+ }
+ if errors.Is(err, context.DeadlineExceeded) && lastErr != nil {
+ return lastErr
+ }
+ if !isRecoverableError(err) {
+ return err
+ }
+
+ delayTimer := time.NewTimer(delay)
+ select {
+ case <-delayTimer.C:
+ return retry(ctx, op, isRecoverableError, delay, err)
+ case <-ctx.Done():
+ if errors.Is(ctx.Err(), context.DeadlineExceeded) {
+ return err
+ }
+ return ctx.Err()
+ }
+}
+
type PingResult callResult
func (req *requestGenerator) PingErigonRpc() PingResult {
@@ -207,20 +278,40 @@ func NewRequestGenerator(target string, logger log.Logger) RequestGenerator {
client: &http.Client{
Timeout: time.Second * 10,
},
- reqID: 1,
logger: logger,
target: target,
}
}
-func post(client *http.Client, url, method, request string, response interface{}, logger log.Logger) error {
+func (req *requestGenerator) rpcClient(ctx context.Context) (*rpc.Client, error) {
+ if req.requestClient == nil {
+ var err error
+ req.requestClient, err = rpc.DialContext(ctx, "http://"+req.target, req.logger)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return req.requestClient, nil
+}
+
+func post(ctx context.Context, client *http.Client, url, method, request string, response interface{}, logger log.Logger) error {
start := time.Now()
- r, err := client.Post(url, "application/json", strings.NewReader(request)) // nolint:bodyclose
+
+ req, err := http.NewRequest("POST", url, strings.NewReader(request))
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "application/json")
+ req = req.WithContext(ctx)
+
+ r, err := client.Do(req) // nolint:bodyclose
if err != nil {
return fmt.Errorf("client failed to make post request: %w", err)
}
- defer func(Body io.ReadCloser) {
- closeErr := Body.Close()
+
+ defer func(body io.ReadCloser) {
+ closeErr := body.Close()
if closeErr != nil {
logger.Warn("body close", "err", closeErr)
}
@@ -251,11 +342,12 @@ func post(client *http.Client, url, method, request string, response interface{}
// subscribe connects to a websocket client and returns the subscription handler and a channel buffer
func (req *requestGenerator) Subscribe(ctx context.Context, method SubMethod, subChan interface{}, args ...interface{}) (ethereum.Subscription, error) {
- var err error
-
if req.subscriptionClient == nil {
- req.subscriptionClient, err = rpc.DialWebsocket(ctx, "ws://"+req.target, "", req.logger)
-
+ err := retryConnects(ctx, func(ctx context.Context) error {
+ var err error
+ req.subscriptionClient, err = rpc.DialWebsocket(ctx, "ws://"+req.target, "", req.logger)
+ return err
+ })
if err != nil {
return nil, fmt.Errorf("failed to dial websocket: %v", err)
}
diff --git a/cmd/devnet/requests/request_generator_test.go b/cmd/devnet/requests/request_generator_test.go
index a937b85ffb1..d48b328057b 100644
--- a/cmd/devnet/requests/request_generator_test.go
+++ b/cmd/devnet/requests/request_generator_test.go
@@ -1,11 +1,8 @@
package requests
import (
- "math/big"
"testing"
- ethereum "github.com/ledgerwatch/erigon"
- libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/stretchr/testify/require"
)
@@ -16,197 +13,6 @@ func MockRequestGenerator(reqId int) *requestGenerator {
}
}
-func TestRequestGenerator_GetAdminNodeInfo(t *testing.T) {
- testCases := []struct {
- reqId int
- expected string
- }{
- {1, `{"jsonrpc":"2.0","method":"admin_nodeInfo","id":1}`},
- {2, `{"jsonrpc":"2.0","method":"admin_nodeInfo","id":2}`},
- {3, `{"jsonrpc":"2.0","method":"admin_nodeInfo","id":3}`},
- }
-
- for _, testCase := range testCases {
- reqGen := MockRequestGenerator(testCase.reqId)
- _, got := reqGen.adminNodeInfo()
- require.EqualValues(t, testCase.expected, got)
- }
-}
-
-func TestRequestGenerator_GetBalance(t *testing.T) {
- testCases := []struct {
- reqId int
- address libcommon.Address
- blockNum BlockNumber
- expected string
- }{
- {
- 1,
- libcommon.HexToAddress("0x67b1d87101671b127f5f8714789c7192f7ad340e"),
- BlockNumbers.Latest,
- `{"jsonrpc":"2.0","method":"eth_getBalance","params":["0x67b1d87101671b127f5f8714789c7192f7ad340e","latest"],"id":1}`,
- },
- {
- 2,
- libcommon.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7"),
- BlockNumbers.Earliest,
- `{"jsonrpc":"2.0","method":"eth_getBalance","params":["0x71562b71999873db5b286df957af199ec94617f7","earliest"],"id":2}`,
- },
- {
- 3,
- libcommon.HexToAddress("0x1b5fd2fed153fa7fac43300273c70c068bfa406a"),
- BlockNumbers.Pending,
- `{"jsonrpc":"2.0","method":"eth_getBalance","params":["0x1b5fd2fed153fa7fac43300273c70c068bfa406a","pending"],"id":3}`,
- },
- }
-
- for _, testCase := range testCases {
- reqGen := MockRequestGenerator(testCase.reqId)
- _, got := reqGen.getBalance(testCase.address, testCase.blockNum)
- require.EqualValues(t, testCase.expected, got)
- }
-}
-
-func TestRequestGenerator_GetBlockByNumber(t *testing.T) {
- testCases := []struct {
- reqId int
- blockNum uint64
- withTxs bool
- expected string
- }{
- {
- 1,
- 2,
- false,
- `{"jsonrpc":"2.0","method":"eth_getBlockByNumber","params":["0x2",false],"id":1}`,
- },
- {
- 2,
- 16,
- false,
- `{"jsonrpc":"2.0","method":"eth_getBlockByNumber","params":["0x10",false],"id":2}`,
- },
- {
- 3,
- 100,
- true,
- `{"jsonrpc":"2.0","method":"eth_getBlockByNumber","params":["0x64",true],"id":3}`,
- },
- }
-
- for _, testCase := range testCases {
- reqGen := MockRequestGenerator(testCase.reqId)
- _, got := reqGen.getBlockByNumber(testCase.blockNum, testCase.withTxs)
- require.EqualValues(t, testCase.expected, got)
- }
-}
-
-func TestRequestGenerator_GetLogs(t *testing.T) {
- testCases := []struct {
- reqId int
- fromBlock uint64
- toBlock uint64
- address libcommon.Address
- expected string
- }{
- {
- 1,
- 1843,
- 1848,
- libcommon.HexToAddress("0x67b1d87101671b127f5f8714789c7192f7ad340e"),
- `{"jsonrpc":"2.0","method":"eth_getLogs","params":[{"fromBlock":"0x733","toBlock":"0x738","address":"0x67b1d87101671b127f5f8714789c7192f7ad340e"}],"id":1}`,
- },
- {
- 2,
- 12,
- 12,
- libcommon.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7"),
- `{"jsonrpc":"2.0","method":"eth_getLogs","params":[{"fromBlock":"0xc","toBlock":"0xc","address":"0x71562b71999873db5b286df957af199ec94617f7"}],"id":2}`,
- },
- {
- 3,
- 0,
- 123456789,
- libcommon.HexToAddress("0x1b5fd2fed153fa7fac43300273c70c068bfa406a"),
- `{"jsonrpc":"2.0","method":"eth_getLogs","params":[{"fromBlock":"0x0","toBlock":"0x75bcd15","address":"0x1b5fd2fed153fa7fac43300273c70c068bfa406a"}],"id":3}`,
- },
- }
-
- for _, testCase := range testCases {
- reqGen := MockRequestGenerator(testCase.reqId)
- _, got := reqGen.getLogs(ethereum.FilterQuery{
- FromBlock: big.NewInt(int64(testCase.fromBlock)),
- ToBlock: big.NewInt(int64(testCase.toBlock)),
- Addresses: []libcommon.Address{testCase.address},
- })
- require.EqualValues(t, testCase.expected, got)
- }
-}
-
-func TestRequestGenerator_GetTransactionCount(t *testing.T) {
- testCases := []struct {
- reqId int
- address libcommon.Address
- blockNum BlockNumber
- expected string
- }{
- {
- 1,
- libcommon.HexToAddress("0x67b1d87101671b127f5f8714789c7192f7ad340e"),
- BlockNumbers.Latest,
- `{"jsonrpc":"2.0","method":"eth_getTransactionCount","params":["0x67b1d87101671b127f5f8714789c7192f7ad340e","latest"],"id":1}`,
- },
- {
- 2,
- libcommon.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7"),
- BlockNumbers.Earliest,
- `{"jsonrpc":"2.0","method":"eth_getTransactionCount","params":["0x71562b71999873db5b286df957af199ec94617f7","earliest"],"id":2}`,
- },
- {
- 3,
- libcommon.HexToAddress("0x1b5fd2fed153fa7fac43300273c70c068bfa406a"),
- BlockNumbers.Pending,
- `{"jsonrpc":"2.0","method":"eth_getTransactionCount","params":["0x1b5fd2fed153fa7fac43300273c70c068bfa406a","pending"],"id":3}`,
- },
- }
-
- for _, testCase := range testCases {
- reqGen := MockRequestGenerator(testCase.reqId)
- _, got := reqGen.getTransactionCount(testCase.address, testCase.blockNum)
- require.EqualValues(t, testCase.expected, got)
- }
-}
-
-func TestRequestGenerator_SendRawTransaction(t *testing.T) {
- testCases := []struct {
- reqId int
- signedTx []byte
- expected string
- }{
- {
- 1,
- libcommon.HexToHash("0x1cd73c7adf5b31f3cf94c67b9e251e699559d91c27664463fb5978b97f8b2d1b").Bytes(),
- `{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0x1cd73c7adf5b31f3cf94c67b9e251e699559d91c27664463fb5978b97f8b2d1b"],"id":1}`,
- },
- {
- 2,
- libcommon.HexToHash("0x1cfe7ce95a1694d8969365cb472ce4a0d3eed812c540fd7708bbe6941e34c4de").Bytes(),
- `{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0x1cfe7ce95a1694d8969365cb472ce4a0d3eed812c540fd7708bbe6941e34c4de"],"id":2}`,
- },
- {
- 3,
- libcommon.HexToHash("0x6f9e34c00812a80fa87df26208bbe69411e36d6a9f00b35444ef4181f6c483ca").Bytes(),
- `{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0x6f9e34c00812a80fa87df26208bbe69411e36d6a9f00b35444ef4181f6c483ca"],"id":3}`,
- },
- }
-
- for _, testCase := range testCases {
- reqGen := MockRequestGenerator(testCase.reqId)
- _, got := reqGen.sendRawTransaction(testCase.signedTx)
- require.EqualValues(t, testCase.expected, got)
- }
-}
-
func TestRequestGenerator_TxpoolContent(t *testing.T) {
testCases := []struct {
reqId int
diff --git a/cmd/devnet/requests/trace.go b/cmd/devnet/requests/trace.go
index 1409e28e7ea..415cfb2ea11 100644
--- a/cmd/devnet/requests/trace.go
+++ b/cmd/devnet/requests/trace.go
@@ -1,12 +1,15 @@
package requests
import (
+ "context"
"encoding/json"
"fmt"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
+
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
- "github.com/ledgerwatch/erigon/common/hexutil"
+ "github.com/ledgerwatch/erigon/rpc"
"github.com/ledgerwatch/erigon/turbo/adapter/ethapi"
)
@@ -63,11 +66,6 @@ type TraceCallStateDiffStorage struct {
To libcommon.Hash `json:"to"`
}
-type TraceTransaction struct {
- CommonResponse
- Result []TransactionTrace `json:"result"`
-}
-
type TransactionTrace struct {
Type string `json:"type"`
Action TraceCallAction `json:"action"`
@@ -93,7 +91,7 @@ var TraceOpts = struct {
StateDiff: "stateDiff",
}
-func (reqGen *requestGenerator) TraceCall(blockRef string, args ethapi.CallArgs, traceOpts ...TraceOpt) (*TraceCallResult, error) {
+func (reqGen *requestGenerator) TraceCall(blockRef rpc.BlockReference, args ethapi.CallArgs, traceOpts ...TraceOpt) (*TraceCallResult, error) {
var b TraceCall
if args.Data == nil {
@@ -117,7 +115,7 @@ func (reqGen *requestGenerator) TraceCall(blockRef string, args ethapi.CallArgs,
}
method, body := reqGen.traceCall(blockRef, string(argsVal), string(optsVal))
- res := reqGen.call(method, body, &b)
+ res := reqGen.rpcCallJSON(method, body, &b)
if res.Err != nil {
return nil, fmt.Errorf("TraceCall rpc failed: %w", res.Err)
@@ -130,29 +128,17 @@ func (reqGen *requestGenerator) TraceCall(blockRef string, args ethapi.CallArgs,
return &b.Result, nil
}
-func (req *requestGenerator) traceCall(blockRef string, callArgs string, traceOpts string) (RPCMethod, string) {
+func (req *requestGenerator) traceCall(blockRef rpc.BlockReference, callArgs string, traceOpts string) (RPCMethod, string) {
const template = `{"jsonrpc":"2.0","method":%q,"params":[%s,%s,"%s"],"id":%d}`
- return Methods.TraceCall, fmt.Sprintf(template, Methods.TraceCall, callArgs, traceOpts, blockRef, req.reqID)
+ return Methods.TraceCall, fmt.Sprintf(template, Methods.TraceCall, callArgs, traceOpts, blockRef.String(), req.reqID)
}
func (reqGen *requestGenerator) TraceTransaction(hash libcommon.Hash) ([]TransactionTrace, error) {
- var b TraceTransaction
-
- method, body := reqGen.traceTransaction(hash)
- res := reqGen.call(method, body, &b)
-
- if res.Err != nil {
- return nil, fmt.Errorf("TraceTransaction rpc failed: %w", res.Err)
- }
+ var result []TransactionTrace
- if b.Error != nil {
- return nil, fmt.Errorf("TraceTransaction rpc failed: %w", b.Error)
+ if err := reqGen.rpcCall(context.Background(), &result, Methods.TraceTransaction, hash); err != nil {
+ return nil, err
}
- return b.Result, nil
-}
-
-func (req *requestGenerator) traceTransaction(hash libcommon.Hash) (RPCMethod, string) {
- const template = `{"jsonrpc":"2.0","method":%q,"params":[%q],"id":%d}`
- return Methods.TraceTransaction, fmt.Sprintf(template, Methods.TraceTransaction, hash.Hex(), req.reqID)
+ return result, nil
}
diff --git a/cmd/devnet/requests/transaction.go b/cmd/devnet/requests/transaction.go
index 789a3f84823..e463265e010 100644
--- a/cmd/devnet/requests/transaction.go
+++ b/cmd/devnet/requests/transaction.go
@@ -2,16 +2,20 @@ package requests
import (
"bytes"
+ "context"
"encoding/json"
"fmt"
"math/big"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
+
ethereum "github.com/ledgerwatch/erigon"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
- "github.com/ledgerwatch/erigon/common/hexutil"
"github.com/ledgerwatch/erigon/core/types"
+ "github.com/ledgerwatch/erigon/rpc"
"github.com/ledgerwatch/erigon/turbo/adapter/ethapi"
+ "github.com/ledgerwatch/erigon/turbo/jsonrpc"
)
type ETHEstimateGas struct {
@@ -19,11 +23,6 @@ type ETHEstimateGas struct {
Number hexutil.Uint64 `json:"result"`
}
-type ETHGasPrice struct {
- CommonResponse
- Price hexutil.Big `json:"result"`
-}
-
func (reqGen *requestGenerator) EstimateGas(args ethereum.CallMsg, blockRef BlockNumber) (uint64, error) {
var b ETHEstimateGas
@@ -81,7 +80,7 @@ func (reqGen *requestGenerator) EstimateGas(args ethereum.CallMsg, blockRef Bloc
}
method, body := reqGen.estimateGas(string(argsVal), blockRef)
- res := reqGen.call(method, body, &b)
+ res := reqGen.rpcCallJSON(method, body, &b)
if res.Err != nil {
return 0, fmt.Errorf("EstimateGas rpc failed: %w", res.Err)
@@ -101,41 +100,40 @@ func (req *requestGenerator) estimateGas(callArgs string, blockRef BlockNumber)
}
func (reqGen *requestGenerator) GasPrice() (*big.Int, error) {
- var b ETHGasPrice
+ var result hexutil.Big
- method, body := reqGen.gasPrice()
- if res := reqGen.call(method, body, &b); res.Err != nil {
- return nil, fmt.Errorf("failed to get gas price: %w", res.Err)
+ if err := reqGen.rpcCall(context.Background(), &result, Methods.ETHGasPrice); err != nil {
+ return nil, err
}
- return b.Price.ToInt(), nil
+ return result.ToInt(), nil
}
-func (req *requestGenerator) gasPrice() (RPCMethod, string) {
- const template = `{"jsonrpc":"2.0","method":%q,"id":%d}`
- return Methods.ETHGasPrice, fmt.Sprintf(template, Methods.ETHGasPrice, req.reqID)
+func (reqGen *requestGenerator) Call(args ethapi.CallArgs, blockRef rpc.BlockReference, overrides *ethapi.StateOverrides) ([]byte, error) {
+ var result hexutility.Bytes
+
+ if err := reqGen.rpcCall(context.Background(), &result, Methods.ETHCall, args, blockRef, overrides); err != nil {
+ return nil, err
+ }
+
+ return result, nil
}
func (reqGen *requestGenerator) SendTransaction(signedTx types.Transaction) (libcommon.Hash, error) {
- var b EthSendRawTransaction
+ var result libcommon.Hash
var buf bytes.Buffer
if err := signedTx.MarshalBinary(&buf); err != nil {
return libcommon.Hash{}, fmt.Errorf("failed to marshal binary: %v", err)
}
- method, body := reqGen.sendRawTransaction(buf.Bytes())
- if res := reqGen.call(method, body, &b); res.Err != nil {
- return libcommon.Hash{}, fmt.Errorf("could not make to request to eth_sendRawTransaction: %v", res.Err)
- }
-
- if b.Error != nil {
- return libcommon.Hash{}, fmt.Errorf("SendTransaction rpc failed: %w", b.Error)
+ if err := reqGen.rpcCall(context.Background(), &result, Methods.ETHSendRawTransaction, hexutility.Bytes(buf.Bytes())); err != nil {
+ return libcommon.Hash{}, err
}
zeroHash := true
- for _, hb := range b.TxnHash {
+ for _, hb := range result {
if hb != 0 {
zeroHash = false
break
@@ -143,13 +141,28 @@ func (reqGen *requestGenerator) SendTransaction(signedTx types.Transaction) (lib
}
if zeroHash {
- return libcommon.Hash{}, fmt.Errorf("Request: %d, hash: %s, nonce %d: returned a zero transaction hash", b.RequestId, signedTx.Hash().Hex(), signedTx.GetNonce())
+ return libcommon.Hash{}, fmt.Errorf("hash: %s, nonce %d: returned a zero transaction hash", signedTx.Hash().Hex(), signedTx.GetNonce())
}
- return b.TxnHash, nil
+ return result, nil
}
-func (req *requestGenerator) sendRawTransaction(signedTx []byte) (RPCMethod, string) {
- const template = `{"jsonrpc":"2.0","method":%q,"params":["0x%x"],"id":%d}`
- return Methods.ETHSendRawTransaction, fmt.Sprintf(template, Methods.ETHSendRawTransaction, signedTx, req.reqID)
+func (req *requestGenerator) GetTransactionByHash(hash libcommon.Hash) (*jsonrpc.RPCTransaction, error) {
+ var result jsonrpc.RPCTransaction
+
+ if err := req.rpcCall(context.Background(), &result, Methods.ETHGetTransactionByHash, hash); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+func (req *requestGenerator) GetTransactionReceipt(ctx context.Context, hash libcommon.Hash) (*types.Receipt, error) {
+ var result types.Receipt
+
+ if err := req.rpcCall(ctx, &result, Methods.ETHGetTransactionReceipt, hash); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
}
diff --git a/cmd/devnet/requests/tx.go b/cmd/devnet/requests/tx.go
index 2d32a776221..24e4b3ebb37 100644
--- a/cmd/devnet/requests/tx.go
+++ b/cmd/devnet/requests/tx.go
@@ -18,14 +18,14 @@ func (reqGen *requestGenerator) TxpoolContent() (int, int, int, error) {
)
method, body := reqGen.txpoolContent()
- if res := reqGen.call(method, body, &b); res.Err != nil {
+ if res := reqGen.rpcCallJSON(method, body, &b); res.Err != nil {
return len(pending), len(queued), len(baseFee), fmt.Errorf("failed to fetch txpool content: %v", res.Err)
}
resp, ok := b.Result.(map[string]interface{})
if !ok {
- return 0, 0, 0, fmt.Errorf("Unexpected result type: %T", b.Result)
+ return 0, 0, 0, fmt.Errorf("unexpected result type: %T", b.Result)
}
pendingLen := 0
diff --git a/cmd/devnet/services/accounts/faucet.go b/cmd/devnet/services/accounts/faucet.go
index 65d4d4870fa..5a0b88b6dd4 100644
--- a/cmd/devnet/services/accounts/faucet.go
+++ b/cmd/devnet/services/accounts/faucet.go
@@ -13,7 +13,7 @@ import (
"github.com/ledgerwatch/erigon/cmd/devnet/blocks"
"github.com/ledgerwatch/erigon/cmd/devnet/contracts"
"github.com/ledgerwatch/erigon/cmd/devnet/devnet"
- "github.com/ledgerwatch/erigon/cmd/devnet/requests"
+ "github.com/ledgerwatch/erigon/rpc"
)
type Faucet struct {
@@ -64,7 +64,7 @@ func (d *deployer) deploy(ctx context.Context, node devnet.Node) {
return
}
- logger.Info("Faucet deployed", "chain", d.faucet.chainName, "block", block.BlockNumber, "addr", address)
+ logger.Info("Faucet deployed", "chain", d.faucet.chainName, "block", block.Number, "addr", address)
d.faucet.contractAddress = address
d.faucet.contract = contract
@@ -96,7 +96,7 @@ func (d *deployer) deploy(ctx context.Context, node devnet.Node) {
return
}
- logger.Info("Faucet funded", "chain", d.faucet.chainName, "block", block.BlockNumber, "addr", address, "received", received)
+ logger.Info("Faucet funded", "chain", d.faucet.chainName, "block", block.Number, "addr", address, "received", received)
d.faucet.Lock()
defer d.faucet.Unlock()
@@ -139,7 +139,12 @@ func (f *Faucet) Balance(ctx context.Context) (*big.Int, error) {
}
node := devnet.SelectBlockProducer(devnet.WithCurrentNetwork(ctx, f.chainName))
- return node.GetBalance(f.contractAddress, requests.BlockNumbers.Latest)
+
+ if node == nil {
+ return nil, fmt.Errorf("%s has no block producers", f.chainName)
+ }
+
+ return node.GetBalance(f.contractAddress, rpc.LatestBlock)
}
func (f *Faucet) Send(ctx context.Context, destination *accounts.Account, eth float64) (*big.Int, libcommon.Hash, error) {
@@ -157,7 +162,7 @@ func (f *Faucet) Send(ctx context.Context, destination *accounts.Account, eth fl
node := devnet.SelectNode(ctx)
- count, err := node.GetTransactionCount(f.source.Address, requests.BlockNumbers.Pending)
+ count, err := node.GetTransactionCount(f.source.Address, rpc.PendingBlock)
if err != nil {
return nil, libcommon.Hash{}, err
@@ -184,7 +189,7 @@ func (f *Faucet) Receive(ctx context.Context, source *accounts.Account, eth floa
return nil, libcommon.Hash{}, err
}
- count, err := node.GetTransactionCount(f.source.Address, requests.BlockNumbers.Pending)
+ count, err := node.GetTransactionCount(f.source.Address, rpc.PendingBlock)
if err != nil {
return nil, libcommon.Hash{}, err
@@ -209,7 +214,7 @@ func (f *Faucet) NodeCreated(ctx context.Context, node devnet.Node) {
func (f *Faucet) NodeStarted(ctx context.Context, node devnet.Node) {
logger := devnet.Logger(ctx)
- if strings.HasPrefix(node.Name(), f.chainName) && node.IsBlockProducer() {
+ if strings.HasPrefix(node.GetName(), f.chainName) && node.IsBlockProducer() {
f.Lock()
defer f.Unlock()
diff --git a/cmd/devnet/services/bor/heimdall.go b/cmd/devnet/services/bor/heimdall.go
deleted file mode 100644
index fc4e5fdcb7b..00000000000
--- a/cmd/devnet/services/bor/heimdall.go
+++ /dev/null
@@ -1,302 +0,0 @@
-package bor
-
-import (
- "context"
- "fmt"
- "strings"
- "sync"
-
- ethereum "github.com/ledgerwatch/erigon"
- "github.com/ledgerwatch/erigon-lib/chain"
- libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon/accounts/abi/bind"
- "github.com/ledgerwatch/erigon/cmd/devnet/accounts"
- "github.com/ledgerwatch/erigon/cmd/devnet/blocks"
- "github.com/ledgerwatch/erigon/cmd/devnet/contracts"
- "github.com/ledgerwatch/erigon/cmd/devnet/devnet"
- "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint"
- "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span"
- "github.com/ledgerwatch/erigon/consensus/bor/heimdallgrpc"
- "github.com/ledgerwatch/erigon/consensus/bor/valset"
- "github.com/ledgerwatch/log/v3"
-)
-
-type BridgeEvent string
-
-var BridgeEvents = struct {
- StakingEvent BridgeEvent
- TopupEvent BridgeEvent
- ClerkEvent BridgeEvent
- SlashingEvent BridgeEvent
-}{
- StakingEvent: "staking",
- TopupEvent: "topup",
- ClerkEvent: "clerk",
- SlashingEvent: "slashing",
-}
-
-type syncRecordKey struct {
- hash libcommon.Hash
- index uint64
-}
-
-type Heimdall struct {
- sync.Mutex
- currentSpan *span.HeimdallSpan
- chainConfig *chain.Config
- validatorSet *valset.ValidatorSet
- spans map[uint64]*span.HeimdallSpan
- logger log.Logger
- cancelFunc context.CancelFunc
- syncChan chan *contracts.TestStateSenderStateSynced
- syncContractAddress libcommon.Address
- syncContractBinding *contracts.TestStateSender
- syncSubscription ethereum.Subscription
- pendingSyncRecords map[syncRecordKey]*EventRecordWithBlock
-}
-
-func NewHeimdall(chainConfig *chain.Config, logger log.Logger) *Heimdall {
- return &Heimdall{
- chainConfig: chainConfig,
- spans: map[uint64]*span.HeimdallSpan{},
- pendingSyncRecords: map[syncRecordKey]*EventRecordWithBlock{},
- logger: logger}
-}
-
-func (h *Heimdall) Span(ctx context.Context, spanID uint64) (*span.HeimdallSpan, error) {
- h.Lock()
- defer h.Unlock()
-
- if span, ok := h.spans[spanID]; ok {
- h.currentSpan = span
- return span, nil
- }
-
- var nextSpan = span.Span{
- ID: spanID,
- }
-
- if h.currentSpan == nil || spanID == 0 {
- nextSpan.StartBlock = 1 //256
- } else {
- if spanID != h.currentSpan.ID+1 {
- return nil, fmt.Errorf("Can't initialize span: non consecutive span")
- }
-
- nextSpan.StartBlock = h.currentSpan.EndBlock + 1
- }
-
- nextSpan.EndBlock = nextSpan.StartBlock + (100 * h.chainConfig.Bor.CalculateSprint(nextSpan.StartBlock)) - 1
-
- // TODO we should use a subset here - see: https://wiki.polygon.technology/docs/pos/bor/
-
- selectedProducers := make([]valset.Validator, len(h.validatorSet.Validators))
-
- for i, v := range h.validatorSet.Validators {
- selectedProducers[i] = *v
- }
-
- h.currentSpan = &span.HeimdallSpan{
- Span: nextSpan,
- ValidatorSet: *h.validatorSet,
- SelectedProducers: selectedProducers,
- ChainID: h.chainConfig.ChainID.String(),
- }
-
- h.spans[h.currentSpan.ID] = h.currentSpan
-
- return h.currentSpan, nil
-}
-
-func (h *Heimdall) currentSprintLength() int {
- if h.currentSpan != nil {
- return int(h.chainConfig.Bor.CalculateSprint(h.currentSpan.StartBlock))
- }
-
- return int(h.chainConfig.Bor.CalculateSprint(256))
-}
-
-func (h *Heimdall) getSpanOverrideHeight() uint64 {
- return 0
- //MainChain: 8664000
- //MumbaiChain: 10205000
-}
-
-func (h *Heimdall) FetchCheckpoint(ctx context.Context, number int64) (*checkpoint.Checkpoint, error) {
- return nil, fmt.Errorf("TODO")
-}
-
-func (h *Heimdall) FetchCheckpointCount(ctx context.Context) (int64, error) {
- return 0, fmt.Errorf("TODO")
-}
-
-func (h *Heimdall) Close() {
-}
-
-func (h *Heimdall) StateSenderAddress() libcommon.Address {
- return h.syncContractAddress
-}
-
-func (f *Heimdall) StateSenderContract() *contracts.TestStateSender {
- return f.syncContractBinding
-}
-
-func (h *Heimdall) NodeCreated(ctx context.Context, node devnet.Node) {
- h.Lock()
- defer h.Unlock()
-
- if strings.HasPrefix(node.Name(), "bor") && node.IsBlockProducer() && node.Account() != nil {
- // TODO configurable voting power
- h.addValidator(node.Account().Address, 1000, 0)
- }
-}
-
-func (h *Heimdall) NodeStarted(ctx context.Context, node devnet.Node) {
- if !strings.HasPrefix(node.Name(), "bor") && node.IsBlockProducer() {
- h.Lock()
- defer h.Unlock()
-
- if h.syncChan != nil {
- return
- }
-
- h.syncChan = make(chan *contracts.TestStateSenderStateSynced, 100)
-
- go func() {
- transactOpts, err := bind.NewKeyedTransactorWithChainID(accounts.SigKey(node.Account().Address), node.ChainID())
-
- if err != nil {
- h.Lock()
- defer h.Unlock()
-
- h.syncChan = nil
- h.logger.Error("Failed to deploy state sender", "err", err)
- return
- }
-
- deployCtx := devnet.WithCurrentNode(ctx, node)
- waiter, cancel := blocks.BlockWaiter(deployCtx, contracts.DeploymentChecker)
- defer cancel()
-
- // deploy the contract and get the contract handler
- address, transaction, contract, err := contracts.DeployWithOps(deployCtx, transactOpts, contracts.DeployTestStateSender)
-
- if err != nil {
- h.Lock()
- defer h.Unlock()
-
- h.syncChan = nil
- h.logger.Error("Failed to deploy state sender", "err", err)
- return
- }
-
- h.syncContractAddress = address
- h.syncContractBinding = contract
-
- block, err := waiter.Await(transaction.Hash())
-
- if err != nil {
- h.Lock()
- defer h.Unlock()
-
- h.syncChan = nil
- h.logger.Error("Failed to deploy state sender", "err", err)
- return
- }
-
- h.logger.Info("StateSender deployed", "chain", h.chainConfig.ChainName, "block", block.BlockNumber, "addr", address)
-
- h.syncSubscription, err = contract.WatchStateSynced(&bind.WatchOpts{}, h.syncChan, nil, nil)
-
- if err != nil {
- h.Lock()
- defer h.Unlock()
-
- h.syncChan = nil
- h.logger.Error("Failed to subscribe to sync events", "err", err)
- return
- }
-
- for stateSyncedEvent := range h.syncChan {
- if err := h.handleStateSynced(stateSyncedEvent); err != nil {
- h.logger.Error("L1 sync event processing failed", "event", stateSyncedEvent.Raw.Index, "err", err)
- }
- }
-
- h.logger.Info("Sync event channel closed")
- }()
- }
-}
-
-func (h *Heimdall) addValidator(validatorAddress libcommon.Address, votingPower int64, proposerPriority int64) {
-
- if h.validatorSet == nil {
- h.validatorSet = valset.NewValidatorSet([]*valset.Validator{
- {
- ID: 1,
- Address: validatorAddress,
- VotingPower: votingPower,
- ProposerPriority: proposerPriority,
- },
- }, h.logger)
- } else {
- h.validatorSet.UpdateWithChangeSet([]*valset.Validator{
- {
- ID: uint64(len(h.validatorSet.Validators) + 1),
- Address: validatorAddress,
- VotingPower: votingPower,
- ProposerPriority: proposerPriority,
- },
- }, h.logger)
- }
-}
-
-func (h *Heimdall) Start(ctx context.Context) error {
- h.Lock()
- if h.cancelFunc != nil {
- h.Unlock()
- return nil
- }
- ctx, h.cancelFunc = context.WithCancel(ctx)
- h.Unlock()
-
- if h.syncSubscription != nil {
- h.syncSubscription.Unsubscribe()
- h.syncSubscription = nil
- }
-
- if h.syncChan != nil {
- close(h.syncChan)
- h.syncChan = nil
- }
-
- return heimdallgrpc.StartHeimdallServer(ctx, h, HeimdallGRpc(ctx), h.logger)
-}
-
-func HeimdallGRpc(ctx context.Context) string {
- addr := "localhost:8540"
-
- if cli := devnet.CliContext(ctx); cli != nil {
- if grpcAddr := cli.String("bor.heimdallgRPC"); len(grpcAddr) > 0 {
- addr = grpcAddr
- }
- }
-
- return addr
-}
-
-func (h *Heimdall) Stop() {
- var cancel context.CancelFunc
-
- h.Lock()
- if h.cancelFunc != nil {
- cancel = h.cancelFunc
- h.cancelFunc = nil
- }
-
- h.Unlock()
-
- if cancel != nil {
- cancel()
- }
-}
diff --git a/cmd/devnet/services/context.go b/cmd/devnet/services/context.go
index 6dba14cd964..625cdb6ce38 100644
--- a/cmd/devnet/services/context.go
+++ b/cmd/devnet/services/context.go
@@ -5,7 +5,7 @@ import (
"github.com/ledgerwatch/erigon/cmd/devnet/devnet"
"github.com/ledgerwatch/erigon/cmd/devnet/services/accounts"
- "github.com/ledgerwatch/erigon/cmd/devnet/services/bor"
+ "github.com/ledgerwatch/erigon/cmd/devnet/services/polygon"
)
type ctxKey int
@@ -26,10 +26,10 @@ func Faucet(ctx context.Context) *accounts.Faucet {
return nil
}
-func Heimdall(ctx context.Context) *bor.Heimdall {
+func Heimdall(ctx context.Context) *polygon.Heimdall {
if network := devnet.CurrentNetwork(ctx); network != nil {
for _, service := range network.Services {
- if heimdall, ok := service.(*bor.Heimdall); ok {
+ if heimdall, ok := service.(*polygon.Heimdall); ok {
return heimdall
}
}
@@ -37,3 +37,15 @@ func Heimdall(ctx context.Context) *bor.Heimdall {
return nil
}
+
+func ProofGenerator(ctx context.Context) *polygon.ProofGenerator {
+ if network := devnet.CurrentNetwork(ctx); network != nil {
+ for _, service := range network.Services {
+ if proofGenerator, ok := service.(*polygon.ProofGenerator); ok {
+ return proofGenerator
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/cmd/devnet/services/polygon/checkpoint.go b/cmd/devnet/services/polygon/checkpoint.go
new file mode 100644
index 00000000000..5a39652cdf3
--- /dev/null
+++ b/cmd/devnet/services/polygon/checkpoint.go
@@ -0,0 +1,596 @@
+package polygon
+
+import (
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "math/big"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/ledgerwatch/erigon-lib/chain/networkname"
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/hexutility"
+ "github.com/ledgerwatch/erigon/accounts/abi/bind"
+ "github.com/ledgerwatch/erigon/cmd/devnet/accounts"
+ "github.com/ledgerwatch/erigon/cmd/devnet/blocks"
+ "github.com/ledgerwatch/erigon/cmd/devnet/contracts"
+ "github.com/ledgerwatch/erigon/cmd/devnet/devnet"
+ "github.com/ledgerwatch/erigon/cmd/devnet/requests"
+ "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint"
+ "github.com/ledgerwatch/erigon/core/types"
+ "github.com/ledgerwatch/erigon/crypto"
+)
+
+type CheckpointBlock struct {
+ Proposer libcommon.Address `json:"proposer"`
+ StartBlock uint64 `json:"start_block"`
+ EndBlock uint64 `json:"end_block"`
+ RootHash libcommon.Hash `json:"root_hash"`
+ AccountRootHash libcommon.Hash `json:"account_root_hash"`
+ BorChainID string `json:"bor_chain_id"`
+}
+
+func (c CheckpointBlock) GetSigners() []byte {
+ return c.Proposer[:]
+}
+
+func (c CheckpointBlock) GetSignBytes() ([]byte, error) {
+ /*b, err := ModuleCdc.MarshalJSON(msg)
+
+ if err != nil {
+ nil, err
+ }
+
+ return sdk.SortJSON(b)*/
+ return nil, fmt.Errorf("TODO")
+}
+
+type CheckpointAck struct {
+ From libcommon.Address `json:"from"`
+ Number uint64 `json:"number"`
+ Proposer libcommon.Address `json:"proposer"`
+ StartBlock uint64 `json:"start_block"`
+ EndBlock uint64 `json:"end_block"`
+ RootHash libcommon.Hash `json:"root_hash"`
+ TxHash libcommon.Hash `json:"tx_hash"`
+ LogIndex uint64 `json:"log_index"`
+}
+
+var zeroHash libcommon.Hash
+var zeroAddress libcommon.Address
+
+func (c CheckpointBlock) ValidateBasic() error {
+
+ if c.RootHash == zeroHash {
+ return fmt.Errorf("Invalid rootHash %v", c.RootHash.String())
+ }
+
+ if c.Proposer == zeroAddress {
+ return fmt.Errorf("Invalid proposer %v", c.Proposer.String())
+ }
+
+ if c.StartBlock >= c.EndBlock || c.EndBlock == 0 {
+ return fmt.Errorf("Invalid startBlock %v or/and endBlock %v", c.StartBlock, c.EndBlock)
+ }
+
+ return nil
+}
+
+func (c CheckpointBlock) GetSideSignBytes() []byte {
+ borChainID, _ := strconv.ParseUint(c.BorChainID, 10, 64)
+
+ return appendBytes32(
+ c.Proposer.Bytes(),
+ (&big.Int{}).SetUint64(c.StartBlock).Bytes(),
+ (&big.Int{}).SetUint64(c.EndBlock).Bytes(),
+ c.RootHash.Bytes(),
+ c.AccountRootHash.Bytes(),
+ (&big.Int{}).SetUint64(borChainID).Bytes(),
+ )
+}
+
+func appendBytes32(data ...[]byte) []byte {
+ var result []byte
+
+ for _, v := range data {
+ l := len(v)
+
+ var padded [32]byte
+
+ if l > 0 && l <= 32 {
+ copy(padded[32-l:], v)
+ }
+
+ result = append(result, padded[:]...)
+ }
+
+ return result
+}
+
+func (h *Heimdall) startChildHeaderSubscription(ctx context.Context) {
+
+ node := devnet.SelectBlockProducer(ctx)
+
+ var err error
+
+ childHeaderChan := make(chan *types.Header)
+ h.childHeaderSub, err = node.Subscribe(ctx, requests.Methods.ETHNewHeads, childHeaderChan)
+
+ if err != nil {
+ h.unsubscribe()
+ h.logger.Error("Failed to subscribe to child chain headers", "err", err)
+ }
+
+ for childHeader := range childHeaderChan {
+ if err := h.handleChildHeader(ctx, childHeader); err != nil {
+ if errors.Is(err, notEnoughChildChainTxConfirmationsError) {
+ h.logger.Info("L2 header processing skipped", "header", childHeader.Number, "err", err)
+ } else {
+ h.logger.Error("L2 header processing failed", "header", childHeader.Number, "err", err)
+ }
+ }
+ }
+}
+
+func (h *Heimdall) startRootHeaderBlockSubscription() {
+ var err error
+
+ rootHeaderBlockChan := make(chan *contracts.TestRootChainNewHeaderBlock)
+ h.rootHeaderBlockSub, err = h.rootChainBinding.WatchNewHeaderBlock(&bind.WatchOpts{}, rootHeaderBlockChan, nil, nil, nil)
+
+ if err != nil {
+ h.unsubscribe()
+ h.logger.Error("Failed to subscribe to root chain header blocks", "err", err)
+ }
+
+ for rootHeaderBlock := range rootHeaderBlockChan {
+ if err := h.handleRootHeaderBlock(rootHeaderBlock); err != nil {
+ h.logger.Error("L1 header block processing failed", "block", rootHeaderBlock.HeaderBlockId, "err", err)
+ }
+ }
+}
+
+var notEnoughChildChainTxConfirmationsError = errors.New("the chain doesn't have enough blocks for ChildChainTxConfirmations")
+
+func (h *Heimdall) handleChildHeader(ctx context.Context, header *types.Header) error {
+
+ h.logger.Debug("no of checkpoint confirmations required", "childChainTxConfirmations", h.checkpointConfig.ChildChainTxConfirmations)
+
+ latestConfirmedChildBlock := header.Number.Int64() - int64(h.checkpointConfig.ChildChainTxConfirmations)
+
+ if latestConfirmedChildBlock <= 0 {
+ return notEnoughChildChainTxConfirmationsError
+ }
+
+ timeStamp := uint64(time.Now().Unix())
+ checkpointBufferTime := uint64(h.checkpointConfig.CheckpointBufferTime.Seconds())
+
+ if h.pendingCheckpoint == nil {
+ expectedCheckpointState, err := h.nextExpectedCheckpoint(ctx, uint64(latestConfirmedChildBlock))
+
+ if err != nil {
+ h.logger.Error("Error while calculate next expected checkpoint", "error", err)
+ return err
+ }
+
+ h.pendingCheckpoint = &checkpoint.Checkpoint{
+ Timestamp: timeStamp,
+ StartBlock: big.NewInt(int64(expectedCheckpointState.newStart)),
+ EndBlock: big.NewInt(int64(expectedCheckpointState.newEnd)),
+ }
+ }
+
+ if header.Number.Cmp(h.pendingCheckpoint.EndBlock) < 0 {
+ return nil
+ }
+
+ h.pendingCheckpoint.EndBlock = header.Number
+
+ if !(h.pendingCheckpoint.Timestamp == 0 ||
+ ((timeStamp > h.pendingCheckpoint.Timestamp) && timeStamp-h.pendingCheckpoint.Timestamp >= checkpointBufferTime)) {
+ h.logger.Debug("Pendiing checkpoint awaiting buffer expiry",
+ "start", h.pendingCheckpoint.StartBlock,
+ "end", h.pendingCheckpoint.EndBlock,
+ "expiry", time.Unix(int64(h.pendingCheckpoint.Timestamp+checkpointBufferTime), 0))
+ return nil
+ }
+
+ start := h.pendingCheckpoint.StartBlock.Uint64()
+ end := h.pendingCheckpoint.EndBlock.Uint64()
+
+ shouldSend, err := h.shouldSendCheckpoint(start, end)
+
+ if err != nil {
+ return err
+ }
+
+ if shouldSend {
+ // TODO simulate tendermint chain stats
+ txHash := libcommon.Hash{}
+ blockHeight := int64(0)
+
+ if err := h.createAndSendCheckpointToRootchain(ctx, start, end, blockHeight, txHash); err != nil {
+ h.logger.Error("Error sending checkpoint to rootchain", "error", err)
+ return err
+ }
+
+ h.pendingCheckpoint = nil
+ }
+
+ return nil
+}
+
+type ContractCheckpoint struct {
+ newStart uint64
+ newEnd uint64
+ currentHeaderBlock *HeaderBlock
+}
+
+type HeaderBlock struct {
+ start uint64
+ end uint64
+ number *big.Int
+ checkpointTime uint64
+}
+
+func (h *Heimdall) nextExpectedCheckpoint(ctx context.Context, latestChildBlock uint64) (*ContractCheckpoint, error) {
+
+ // fetch current header block from mainchain contract
+ currentHeaderBlock, err := h.currentHeaderBlock(h.checkpointConfig.ChildBlockInterval)
+
+ if err != nil {
+ h.logger.Error("Error while fetching current header block number from rootchain", "error", err)
+ return nil, err
+ }
+
+ // current header block
+ currentHeaderBlockNumber := big.NewInt(0).SetUint64(currentHeaderBlock)
+
+ // get header info
+ _, currentStart, currentEnd, lastCheckpointTime, _, err := h.getHeaderInfo(currentHeaderBlockNumber.Uint64())
+
+ if err != nil {
+ h.logger.Error("Error while fetching current header block object from rootchain", "error", err)
+ return nil, err
+ }
+
+ // find next start/end
+ var start, end uint64
+ start = currentEnd
+
+ // add 1 if start > 0
+ if start > 0 {
+ start = start + 1
+ }
+
+ // get diff
+ diff := int(latestChildBlock - start + 1)
+ // process if diff > 0 (positive)
+ if diff > 0 {
+ expectedDiff := diff - diff%int(h.checkpointConfig.AvgCheckpointLength)
+ if expectedDiff > 0 {
+ expectedDiff = expectedDiff - 1
+ }
+ // cap with max checkpoint length
+ if expectedDiff > int(h.checkpointConfig.MaxCheckpointLength-1) {
+ expectedDiff = int(h.checkpointConfig.MaxCheckpointLength - 1)
+ }
+ // get end result
+ end = uint64(expectedDiff) + start
+ h.logger.Debug("Calculating checkpoint eligibility",
+ "latest", latestChildBlock,
+ "start", start,
+ "end", end,
+ )
+ }
+
+ // Handle when block producers go down
+ if end == 0 || end == start || (0 < diff && diff < int(h.checkpointConfig.AvgCheckpointLength)) {
+ h.logger.Debug("Fetching last header block to calculate time")
+
+ currentTime := time.Now().UTC().Unix()
+ defaultForcePushInterval := h.checkpointConfig.MaxCheckpointLength * 2 // in seconds (1024 * 2 seconds)
+
+ if currentTime-int64(lastCheckpointTime) > int64(defaultForcePushInterval) {
+ end = latestChildBlock
+ h.logger.Info("Force push checkpoint",
+ "currentTime", currentTime,
+ "lastCheckpointTime", lastCheckpointTime,
+ "defaultForcePushInterval", defaultForcePushInterval,
+ "start", start,
+ "end", end,
+ )
+ }
+ }
+
+ return &ContractCheckpoint{
+ newStart: start,
+ newEnd: end,
+ currentHeaderBlock: &HeaderBlock{
+ start: currentStart,
+ end: currentEnd,
+ number: currentHeaderBlockNumber,
+ checkpointTime: lastCheckpointTime,
+ }}, nil
+}
+
+func (h *Heimdall) currentHeaderBlock(childBlockInterval uint64) (uint64, error) {
+ currentHeaderBlock, err := h.rootChainBinding.CurrentHeaderBlock(nil)
+
+ if err != nil {
+ h.logger.Error("Could not fetch current header block from rootChain contract", "error", err)
+ return 0, err
+ }
+
+ return currentHeaderBlock.Uint64() / childBlockInterval, nil
+}
+
+func (h *Heimdall) fetchDividendAccountRoot() (libcommon.Hash, error) {
+ //TODO
+ return crypto.Keccak256Hash([]byte("dividendaccountroot")), nil
+}
+
+func (h *Heimdall) getHeaderInfo(number uint64) (
+ root libcommon.Hash,
+ start uint64,
+ end uint64,
+ createdAt uint64,
+ proposer libcommon.Address,
+ err error,
+) {
+ // get header from rootChain
+ checkpointBigInt := big.NewInt(0).Mul(big.NewInt(0).SetUint64(number), big.NewInt(0).SetUint64(h.checkpointConfig.ChildBlockInterval))
+
+ headerBlock, err := h.rootChainBinding.HeaderBlocks(nil, checkpointBigInt)
+
+ if err != nil {
+ return root, start, end, createdAt, proposer, errors.New("unable to fetch checkpoint block")
+ }
+
+ createdAt = headerBlock.CreatedAt.Uint64()
+
+ if createdAt == 0 {
+ createdAt = uint64(h.startTime.Unix())
+ }
+
+ return headerBlock.Root,
+ headerBlock.Start.Uint64(),
+ headerBlock.End.Uint64(),
+ createdAt,
+ libcommon.BytesToAddress(headerBlock.Proposer.Bytes()),
+ nil
+}
+
+func (h *Heimdall) getRootHash(ctx context.Context, start uint64, end uint64) (libcommon.Hash, error) {
+ noOfBlock := end - start + 1
+
+ if start > end {
+ return libcommon.Hash{}, errors.New("start is greater than end")
+ }
+
+ if noOfBlock > h.checkpointConfig.MaxCheckpointLength {
+ return libcommon.Hash{}, errors.New("number of headers requested exceeds")
+ }
+
+ return devnet.SelectBlockProducer(devnet.WithCurrentNetwork(ctx, networkname.BorDevnetChainName)).GetRootHash(ctx, start, end)
+}
+
+func (h *Heimdall) shouldSendCheckpoint(start uint64, end uint64) (bool, error) {
+
+ // current child block from contract
+ lastChildBlock, err := h.rootChainBinding.GetLastChildBlock(nil)
+
+ if err != nil {
+ h.logger.Error("Error fetching current child block", "currentChildBlock", lastChildBlock, "error", err)
+ return false, err
+ }
+
+ h.logger.Debug("Fetched current child block", "currentChildBlock", lastChildBlock)
+
+ currentChildBlock := lastChildBlock.Uint64()
+
+ shouldSend := false
+ // validate if checkpoint needs to be pushed to rootchain and submit
+ h.logger.Info("Validating if checkpoint needs to be pushed", "commitedLastBlock", currentChildBlock, "startBlock", start)
+ // check if we need to send checkpoint or not
+ if ((currentChildBlock + 1) == start) || (currentChildBlock == 0 && start == 0) {
+ h.logger.Info("Checkpoint Valid", "startBlock", start)
+
+ shouldSend = true
+ } else if currentChildBlock > start {
+ h.logger.Info("Start block does not match, checkpoint already sent", "commitedLastBlock", currentChildBlock, "startBlock", start)
+ } else if currentChildBlock > end {
+ h.logger.Info("Checkpoint already sent", "commitedLastBlock", currentChildBlock, "startBlock", start)
+ } else {
+ h.logger.Info("No need to send checkpoint")
+ }
+
+ return shouldSend, nil
+}
+
+func (h *Heimdall) createAndSendCheckpointToRootchain(ctx context.Context, start uint64, end uint64, height int64, txHash libcommon.Hash) error {
+ h.logger.Info("Preparing checkpoint to be pushed on chain", "height", height, "txHash", txHash, "start", start, "end", end)
+
+ /*
+ // proof
+ tx, err := helper.QueryTxWithProof(cp.cliCtx, txHash)
+ if err != nil {
+ h.logger.Error("Error querying checkpoint tx proof", "txHash", txHash)
+ return err
+ }
+
+ // fetch side txs sigs
+ decoder := helper.GetTxDecoder(authTypes.ModuleCdc)
+
+ stdTx, err := decoder(tx.Tx)
+ if err != nil {
+ h.logger.Error("Error while decoding checkpoint tx", "txHash", tx.Tx.Hash(), "error", err)
+ return err
+ }
+
+ cmsg := stdTx.GetMsgs()[0]
+
+ sideMsg, ok := cmsg.(hmTypes.SideTxMsg)
+ if !ok {
+ h.logger.Error("Invalid side-tx msg", "txHash", tx.Tx.Hash())
+ return err
+ }
+ */
+
+ shouldSend, err := h.shouldSendCheckpoint(start, end)
+
+ if err != nil {
+ return err
+ }
+
+ if shouldSend {
+ accountRoot, err := h.fetchDividendAccountRoot()
+
+ if err != nil {
+ return err
+ }
+
+ h.pendingCheckpoint.RootHash, err = h.getRootHash(ctx, start, end)
+
+ if err != nil {
+ return err
+ }
+
+ checkpoint := CheckpointBlock{
+ Proposer: h.checkpointConfig.CheckpointAccount.Address,
+ StartBlock: start,
+ EndBlock: end,
+ RootHash: h.pendingCheckpoint.RootHash,
+ AccountRootHash: accountRoot,
+ BorChainID: h.chainConfig.ChainID.String(),
+ }
+
+ // side-tx data
+ sideTxData := checkpoint.GetSideSignBytes()
+
+ // get sigs
+ sigs /*, err*/ := [][3]*big.Int{} //helper.FetchSideTxSigs(cp.httpClient, height, tx.Tx.Hash(), sideTxData)
+
+ /*
+ if err != nil {
+ h.logger.Error("Error fetching votes for checkpoint tx", "height", height)
+ return err
+ }*/
+
+ if err := h.sendCheckpoint(ctx, sideTxData, sigs); err != nil {
+ h.logger.Info("Error submitting checkpoint to rootchain", "error", err)
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (h *Heimdall) sendCheckpoint(ctx context.Context, signedData []byte, sigs [][3]*big.Int) error {
+
+ s := make([]string, 0)
+ for i := 0; i < len(sigs); i++ {
+ s = append(s, fmt.Sprintf("[%s,%s,%s]", sigs[i][0].String(), sigs[i][1].String(), sigs[i][2].String()))
+ }
+
+ h.logger.Debug("Sending new checkpoint",
+ "sigs", strings.Join(s, ","),
+ "data", hex.EncodeToString(signedData),
+ )
+
+ node := devnet.SelectBlockProducer(ctx)
+
+ auth, err := bind.NewKeyedTransactorWithChainID(accounts.SigKey(h.checkpointConfig.CheckpointAccount.Address), node.ChainID())
+
+ if err != nil {
+ h.logger.Error("Error while getting auth to submit checkpoint", "err", err)
+ return err
+ }
+
+ waiter, cancel := blocks.BlockWaiter(ctx, blocks.CompletionChecker)
+ defer cancel()
+
+ tx, err := h.rootChainBinding.SubmitCheckpoint(auth, signedData, sigs)
+
+ if err != nil {
+ h.logger.Error("Error while submitting checkpoint", "err", err)
+ return err
+ }
+
+ block, err := waiter.Await(tx.Hash())
+
+ if err != nil {
+ h.logger.Error("Error while submitting checkpoint", "err", err)
+ return err
+ }
+
+ h.logger.Info("Submitted new checkpoint to rootchain successfully", "txHash", tx.Hash().String(), "block", block.Number)
+
+ return nil
+}
+
+func (h *Heimdall) handleRootHeaderBlock(event *contracts.TestRootChainNewHeaderBlock) error {
+ h.logger.Info("Received root header")
+
+ checkpointNumber := big.NewInt(0).Div(event.HeaderBlockId, big.NewInt(0).SetUint64(h.checkpointConfig.ChildBlockInterval))
+
+ h.logger.Info(
+ "✅ Received checkpoint-ack for heimdall",
+ "event", "NewHeaderBlock",
+ "start", event.Start,
+ "end", event.End,
+ "reward", event.Reward,
+ "root", hexutility.Bytes(event.Root[:]),
+ "proposer", event.Proposer.Hex(),
+ "checkpointNumber", checkpointNumber,
+ "txHash", event.Raw.TxHash,
+ "logIndex", uint64(event.Raw.Index),
+ )
+
+ // event checkpoint is older than or equal to latest checkpoint
+ if h.latestCheckpoint != nil && h.latestCheckpoint.EndBlock >= event.End.Uint64() {
+ h.logger.Debug("Checkpoint ack is already submitted", "start", event.Start, "end", event.End)
+ return nil
+ }
+
+ // create msg checkpoint ack message
+ ack := CheckpointAck{
+ //From libcommon.Address `json:"from"`
+ Number: checkpointNumber.Uint64(),
+ Proposer: event.Proposer,
+ StartBlock: event.Start.Uint64(),
+ EndBlock: event.End.Uint64(),
+ RootHash: event.Root,
+ TxHash: event.Raw.TxHash,
+ LogIndex: uint64(event.Raw.Index),
+ }
+
+ if ack.StartBlock != h.pendingCheckpoint.StartBlock.Uint64() {
+ h.logger.Error("Invalid start block", "startExpected", h.pendingCheckpoint.StartBlock, "startReceived", ack.StartBlock)
+ return fmt.Errorf("Invalid Checkpoint Ack: Invalid start block")
+ }
+
+ // Return err if start and end matches but contract root hash doesn't match
+ if ack.StartBlock == h.pendingCheckpoint.StartBlock.Uint64() &&
+ ack.EndBlock == h.pendingCheckpoint.EndBlock.Uint64() && ack.RootHash != h.pendingCheckpoint.RootHash {
+ h.logger.Error("Invalid ACK",
+ "startExpected", h.pendingCheckpoint.StartBlock,
+ "startReceived", ack.StartBlock,
+ "endExpected", h.pendingCheckpoint.EndBlock,
+ "endReceived", ack.StartBlock,
+ "rootExpected", h.pendingCheckpoint.RootHash.String(),
+ "rootRecieved", ack.RootHash.String(),
+ )
+
+ return fmt.Errorf("Invalid Checkpoint Ack: Invalid root hash")
+ }
+
+ h.latestCheckpoint = &ack
+
+ h.ackWaiter.Broadcast()
+
+ return nil
+}
diff --git a/cmd/devnet/services/polygon/heimdall.go b/cmd/devnet/services/polygon/heimdall.go
new file mode 100644
index 00000000000..0581a703949
--- /dev/null
+++ b/cmd/devnet/services/polygon/heimdall.go
@@ -0,0 +1,435 @@
+package polygon
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "strings"
+ "sync"
+ "time"
+
+ ethereum "github.com/ledgerwatch/erigon"
+ "github.com/ledgerwatch/erigon-lib/chain"
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon/accounts/abi/bind"
+ "github.com/ledgerwatch/erigon/cmd/devnet/accounts"
+ "github.com/ledgerwatch/erigon/cmd/devnet/blocks"
+ "github.com/ledgerwatch/erigon/cmd/devnet/contracts"
+ "github.com/ledgerwatch/erigon/cmd/devnet/devnet"
+ "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint"
+ "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone"
+ "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span"
+ "github.com/ledgerwatch/erigon/consensus/bor/heimdallgrpc"
+ "github.com/ledgerwatch/erigon/consensus/bor/valset"
+ "github.com/ledgerwatch/log/v3"
+)
+
+type BridgeEvent string
+
+var BridgeEvents = struct {
+ StakingEvent BridgeEvent
+ TopupEvent BridgeEvent
+ ClerkEvent BridgeEvent
+ SlashingEvent BridgeEvent
+}{
+ StakingEvent: "staking",
+ TopupEvent: "topup",
+ ClerkEvent: "clerk",
+ SlashingEvent: "slashing",
+}
+
+type syncRecordKey struct {
+ hash libcommon.Hash
+ index uint64
+}
+
+const (
+ DefaultRootChainTxConfirmations uint64 = 6
+ DefaultChildChainTxConfirmations uint64 = 10
+ DefaultAvgCheckpointLength uint64 = 256
+ DefaultMaxCheckpointLength uint64 = 1024
+ DefaultChildBlockInterval uint64 = 10000
+ DefaultCheckpointBufferTime time.Duration = 1000 * time.Second
+)
+
+const HeimdallGrpcAddressDefault = "localhost:8540"
+
+type CheckpointConfig struct {
+ RootChainTxConfirmations uint64
+ ChildChainTxConfirmations uint64
+ ChildBlockInterval uint64
+ AvgCheckpointLength uint64
+ MaxCheckpointLength uint64
+ CheckpointBufferTime time.Duration
+ CheckpointAccount *accounts.Account
+}
+
+type Heimdall struct {
+ sync.Mutex
+ chainConfig *chain.Config
+ grpcAddr string
+ validatorSet *valset.ValidatorSet
+ pendingCheckpoint *checkpoint.Checkpoint
+ latestCheckpoint *CheckpointAck
+ ackWaiter *sync.Cond
+ currentSpan *span.HeimdallSpan
+ spans map[uint64]*span.HeimdallSpan
+ logger log.Logger
+ cancelFunc context.CancelFunc
+ syncSenderAddress libcommon.Address
+ syncSenderBinding *contracts.TestStateSender
+ rootChainAddress libcommon.Address
+ rootChainBinding *contracts.TestRootChain
+ syncSubscription ethereum.Subscription
+ rootHeaderBlockSub ethereum.Subscription
+ childHeaderSub ethereum.Subscription
+ pendingSyncRecords map[syncRecordKey]*EventRecordWithBlock
+ checkpointConfig CheckpointConfig
+ startTime time.Time
+}
+
+func NewHeimdall(
+ chainConfig *chain.Config,
+ grpcAddr string,
+ checkpointConfig *CheckpointConfig,
+ logger log.Logger,
+) *Heimdall {
+ heimdall := &Heimdall{
+ chainConfig: chainConfig,
+ grpcAddr: grpcAddr,
+ checkpointConfig: *checkpointConfig,
+ spans: map[uint64]*span.HeimdallSpan{},
+ pendingSyncRecords: map[syncRecordKey]*EventRecordWithBlock{},
+ logger: logger}
+
+ heimdall.ackWaiter = sync.NewCond(heimdall)
+
+ if heimdall.checkpointConfig.RootChainTxConfirmations == 0 {
+ heimdall.checkpointConfig.RootChainTxConfirmations = DefaultRootChainTxConfirmations
+ }
+
+ if heimdall.checkpointConfig.ChildChainTxConfirmations == 0 {
+ heimdall.checkpointConfig.ChildChainTxConfirmations = DefaultChildChainTxConfirmations
+ }
+
+ if heimdall.checkpointConfig.ChildBlockInterval == 0 {
+ heimdall.checkpointConfig.ChildBlockInterval = DefaultChildBlockInterval
+ }
+
+ if heimdall.checkpointConfig.AvgCheckpointLength == 0 {
+ heimdall.checkpointConfig.AvgCheckpointLength = DefaultAvgCheckpointLength
+ }
+
+ if heimdall.checkpointConfig.MaxCheckpointLength == 0 {
+ heimdall.checkpointConfig.MaxCheckpointLength = DefaultMaxCheckpointLength
+ }
+
+ if heimdall.checkpointConfig.CheckpointBufferTime == 0 {
+ heimdall.checkpointConfig.CheckpointBufferTime = DefaultCheckpointBufferTime
+ }
+
+ if heimdall.checkpointConfig.CheckpointAccount == nil {
+ heimdall.checkpointConfig.CheckpointAccount = accounts.NewAccount("checkpoint-owner")
+ }
+
+ return heimdall
+}
+
+func (h *Heimdall) Span(ctx context.Context, spanID uint64) (*span.HeimdallSpan, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ if span, ok := h.spans[spanID]; ok {
+ h.currentSpan = span
+ return span, nil
+ }
+
+ var nextSpan = span.Span{
+ ID: spanID,
+ }
+
+ if h.currentSpan == nil || spanID == 0 {
+ nextSpan.StartBlock = 1 //256
+ } else {
+ if spanID != h.currentSpan.ID+1 {
+ return nil, fmt.Errorf("Can't initialize span: non consecutive span")
+ }
+
+ nextSpan.StartBlock = h.currentSpan.EndBlock + 1
+ }
+
+ nextSpan.EndBlock = nextSpan.StartBlock + (100 * h.chainConfig.Bor.CalculateSprint(nextSpan.StartBlock)) - 1
+
+ // TODO we should use a subset here - see: https://wiki.polygon.technology/docs/pos/bor/
+
+ selectedProducers := make([]valset.Validator, len(h.validatorSet.Validators))
+
+ for i, v := range h.validatorSet.Validators {
+ selectedProducers[i] = *v
+ }
+
+ h.currentSpan = &span.HeimdallSpan{
+ Span: nextSpan,
+ ValidatorSet: *h.validatorSet,
+ SelectedProducers: selectedProducers,
+ ChainID: h.chainConfig.ChainID.String(),
+ }
+
+ h.spans[h.currentSpan.ID] = h.currentSpan
+
+ return h.currentSpan, nil
+}
+
+func (h *Heimdall) currentSprintLength() int {
+ if h.currentSpan != nil {
+ return int(h.chainConfig.Bor.CalculateSprint(h.currentSpan.StartBlock))
+ }
+
+ return int(h.chainConfig.Bor.CalculateSprint(256))
+}
+
+func (h *Heimdall) getSpanOverrideHeight() uint64 {
+ return 0
+ //MainChain: 8664000
+ //MumbaiChain: 10205000
+}
+
+func (h *Heimdall) FetchCheckpoint(ctx context.Context, number int64) (*checkpoint.Checkpoint, error) {
+ return nil, fmt.Errorf("TODO")
+}
+
+func (h *Heimdall) FetchCheckpointCount(ctx context.Context) (int64, error) {
+ return 0, fmt.Errorf("TODO")
+}
+
+func (h *Heimdall) FetchMilestone(ctx context.Context) (*milestone.Milestone, error) {
+ return nil, fmt.Errorf("TODO")
+}
+
+func (h *Heimdall) FetchMilestoneCount(ctx context.Context) (int64, error) {
+ return 0, fmt.Errorf("TODO")
+}
+
+func (h *Heimdall) FetchNoAckMilestone(ctx context.Context, milestoneID string) error {
+ return fmt.Errorf("TODO")
+}
+
+func (h *Heimdall) FetchLastNoAckMilestone(ctx context.Context) (string, error) {
+ return "", fmt.Errorf("TODO")
+}
+
+func (h *Heimdall) FetchMilestoneID(ctx context.Context, milestoneID string) error {
+ return fmt.Errorf("TODO")
+}
+
+func (h *Heimdall) Close() {
+ h.unsubscribe()
+}
+
+func (h *Heimdall) unsubscribe() {
+ h.Lock()
+ defer h.Unlock()
+
+ if h.syncSubscription != nil {
+ syncSubscription := h.syncSubscription
+ h.syncSubscription = nil
+ syncSubscription.Unsubscribe()
+ }
+
+ if h.rootHeaderBlockSub != nil {
+ rootHeaderBlockSub := h.rootHeaderBlockSub
+ h.rootHeaderBlockSub = nil
+ rootHeaderBlockSub.Unsubscribe()
+ }
+
+ if h.childHeaderSub != nil {
+ childHeaderSub := h.childHeaderSub
+ h.childHeaderSub = nil
+ childHeaderSub.Unsubscribe()
+ }
+}
+
+func (h *Heimdall) StateSenderAddress() libcommon.Address {
+ return h.syncSenderAddress
+}
+
+func (f *Heimdall) StateSenderContract() *contracts.TestStateSender {
+ return f.syncSenderBinding
+}
+
+func (h *Heimdall) RootChainAddress() libcommon.Address {
+ return h.rootChainAddress
+}
+
+func (h *Heimdall) NodeCreated(ctx context.Context, node devnet.Node) {
+ h.Lock()
+ defer h.Unlock()
+
+ if strings.HasPrefix(node.GetName(), "bor") && node.IsBlockProducer() && node.Account() != nil {
+ // TODO configurable voting power
+ h.addValidator(node.Account().Address, 1000, 0)
+ }
+}
+
+func (h *Heimdall) NodeStarted(ctx context.Context, node devnet.Node) {
+ if h.validatorSet == nil {
+ panic("Heimdall devnet service: unexpected empty validator set! Call addValidator() before starting nodes.")
+ }
+
+ if !strings.HasPrefix(node.GetName(), "bor") && node.IsBlockProducer() {
+ h.Lock()
+ defer h.Unlock()
+
+ if h.syncSenderBinding != nil {
+ return
+ }
+
+ h.startTime = time.Now().UTC()
+
+ transactOpts, err := bind.NewKeyedTransactorWithChainID(accounts.SigKey(node.Account().Address), node.ChainID())
+
+ if err != nil {
+ h.Unlock()
+ h.unsubscribe()
+ h.Lock()
+ h.logger.Error("Failed to deploy state sender", "err", err)
+ return
+ }
+
+ deployCtx := devnet.WithCurrentNode(ctx, node)
+ waiter, cancel := blocks.BlockWaiter(deployCtx, contracts.DeploymentChecker)
+
+ address, syncTx, syncContract, err := contracts.DeployWithOps(deployCtx, transactOpts, contracts.DeployTestStateSender)
+
+ if err != nil {
+ h.logger.Error("Failed to deploy state sender", "err", err)
+ cancel()
+ return
+ }
+
+ h.syncSenderAddress = address
+ h.syncSenderBinding = syncContract
+
+ address, rootChainTx, rootChainContract, err := contracts.DeployWithOps(deployCtx, transactOpts, contracts.DeployTestRootChain)
+
+ if err != nil {
+ h.syncSenderBinding = nil
+ h.logger.Error("Failed to deploy root chain", "err", err)
+ cancel()
+ return
+ }
+
+ h.rootChainAddress = address
+ h.rootChainBinding = rootChainContract
+
+ go func() {
+ defer cancel()
+ blocks, err := waiter.AwaitMany(syncTx.Hash(), rootChainTx.Hash())
+
+ if err != nil {
+ h.syncSenderBinding = nil
+ h.logger.Error("Failed to deploy root contracts", "err", err)
+ return
+ }
+
+ h.logger.Info("RootChain deployed", "chain", h.chainConfig.ChainName, "block", blocks[syncTx.Hash()].Number, "addr", h.rootChainAddress)
+ h.logger.Info("StateSender deployed", "chain", h.chainConfig.ChainName, "block", blocks[syncTx.Hash()].Number, "addr", h.syncSenderAddress)
+
+ go h.startStateSyncSubscription()
+ go h.startChildHeaderSubscription(deployCtx)
+ go h.startRootHeaderBlockSubscription()
+ }()
+ }
+}
+
+func (h *Heimdall) addValidator(validatorAddress libcommon.Address, votingPower int64, proposerPriority int64) {
+
+ if h.validatorSet == nil {
+ h.validatorSet = valset.NewValidatorSet([]*valset.Validator{
+ {
+ ID: 1,
+ Address: validatorAddress,
+ VotingPower: votingPower,
+ ProposerPriority: proposerPriority,
+ },
+ }, h.logger)
+ } else {
+ h.validatorSet.UpdateWithChangeSet([]*valset.Validator{
+ {
+ ID: uint64(len(h.validatorSet.Validators) + 1),
+ Address: validatorAddress,
+ VotingPower: votingPower,
+ ProposerPriority: proposerPriority,
+ },
+ }, h.logger)
+ }
+}
+
+func (h *Heimdall) Start(ctx context.Context) error {
+ h.Lock()
+ if h.cancelFunc != nil {
+ h.Unlock()
+ return nil
+ }
+ ctx, h.cancelFunc = context.WithCancel(ctx)
+ h.Unlock()
+
+ // if this is a restart
+ h.unsubscribe()
+
+ return heimdallgrpc.StartHeimdallServer(ctx, h, h.grpcAddr, h.logger)
+}
+
+func (h *Heimdall) Stop() {
+ var cancel context.CancelFunc
+
+ h.Lock()
+ if h.cancelFunc != nil {
+ cancel = h.cancelFunc
+ h.cancelFunc = nil
+ }
+
+ h.Unlock()
+
+ if cancel != nil {
+ cancel()
+ }
+}
+
+func (h *Heimdall) AwaitCheckpoint(ctx context.Context, blockNumber *big.Int) error {
+ h.Lock()
+ defer h.Unlock()
+
+ if ctx.Done() != nil {
+ go func() {
+ defer h.ackWaiter.Broadcast()
+ <-ctx.Done()
+ }()
+ }
+
+ for h.latestCheckpoint == nil || h.latestCheckpoint.EndBlock < blockNumber.Uint64() {
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+
+ h.ackWaiter.Wait()
+ }
+
+ return nil
+}
+
+func (h *Heimdall) isOldTx(txHash libcommon.Hash, logIndex uint64, eventType BridgeEvent, event interface{}) (bool, error) {
+
+ // define the endpoint based on the type of event
+ var status bool
+
+ switch eventType {
+ case BridgeEvents.StakingEvent:
+ case BridgeEvents.TopupEvent:
+ case BridgeEvents.ClerkEvent:
+ _, status = h.pendingSyncRecords[syncRecordKey{txHash, logIndex}]
+ case BridgeEvents.SlashingEvent:
+ }
+
+ return status, nil
+}
diff --git a/cmd/devnet/services/polygon/proofgenerator.go b/cmd/devnet/services/polygon/proofgenerator.go
new file mode 100644
index 00000000000..5b8d207af91
--- /dev/null
+++ b/cmd/devnet/services/polygon/proofgenerator.go
@@ -0,0 +1,594 @@
+package polygon
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "math"
+ "math/big"
+ "strings"
+ "sync"
+
+ "github.com/ledgerwatch/erigon/cl/merkle_tree"
+ "golang.org/x/sync/errgroup"
+
+ "github.com/ledgerwatch/erigon-lib/chain/networkname"
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/hexutility"
+ "github.com/ledgerwatch/erigon/accounts/abi/bind"
+ "github.com/ledgerwatch/erigon/cmd/devnet/devnet"
+ "github.com/ledgerwatch/erigon/cmd/devnet/requests"
+ "github.com/ledgerwatch/erigon/core/types"
+ "github.com/ledgerwatch/erigon/crypto"
+ "github.com/ledgerwatch/erigon/rlp"
+ "github.com/ledgerwatch/erigon/rpc"
+ "github.com/ledgerwatch/erigon/turbo/jsonrpc"
+ "github.com/ledgerwatch/erigon/turbo/trie"
+)
+
+var ErrTokenIndexOutOfRange = errors.New("Index is grater than the number of tokens in transaction")
+
+type ProofGenerator struct {
+ heimdall *Heimdall
+}
+
+func NewProofGenerator() *ProofGenerator {
+ return &ProofGenerator{}
+}
+
+func (pg *ProofGenerator) NodeCreated(ctx context.Context, node devnet.Node) {
+
+ if pg.heimdall == nil {
+ if strings.HasPrefix(node.GetName(), "bor") {
+ if network := devnet.CurrentNetwork(ctx); network != nil {
+ for _, service := range network.Services {
+ if heimdall, ok := service.(*Heimdall); ok {
+ pg.heimdall = heimdall
+ }
+ }
+ }
+ }
+ }
+}
+
+func (pg *ProofGenerator) NodeStarted(ctx context.Context, node devnet.Node) {
+}
+
+func (pg *ProofGenerator) Start(ctx context.Context) error {
+ return nil
+}
+
+func (pg *ProofGenerator) Stop() {
+}
+
+func (pg *ProofGenerator) GenerateExitPayload(ctx context.Context, burnTxHash libcommon.Hash, eventSignature libcommon.Hash, tokenIndex int) ([]byte, error) {
+ logger := devnet.Logger(ctx)
+
+ if pg.heimdall == nil || pg.heimdall.rootChainBinding == nil {
+ return nil, fmt.Errorf("ProofGenerator not initialized")
+ }
+
+ logger.Info("Checking for checkpoint status", "hash", burnTxHash)
+
+ isCheckpointed, err := pg.isCheckPointed(ctx, burnTxHash)
+
+ if err != nil {
+ return nil, fmt.Errorf("error getting burn transaction: %w", err)
+ }
+
+ if !isCheckpointed {
+ return nil, fmt.Errorf("eurn transaction has not been checkpointed yet")
+ }
+
+ // build payload for exit
+ result, err := pg.buildPayloadForExit(ctx, burnTxHash, eventSignature, tokenIndex)
+
+ if err != nil {
+ if errors.Is(err, ErrTokenIndexOutOfRange) {
+ return nil, fmt.Errorf("block not included: %w", err)
+ }
+
+ return nil, fmt.Errorf("null receipt received")
+ }
+
+ if len(result) == 0 {
+ return nil, fmt.Errorf("null result received")
+ }
+
+ return result, nil
+}
+
+func (pg *ProofGenerator) getChainBlockInfo(ctx context.Context, burnTxHash libcommon.Hash) (uint64, uint64, error) {
+ childNode := devnet.SelectBlockProducer(devnet.WithCurrentNetwork(ctx, networkname.BorDevnetChainName))
+
+ var wg sync.WaitGroup
+
+ var lastChild *big.Int
+ var burnTransaction *jsonrpc.RPCTransaction
+ var err [2]error
+
+ // err group
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ lastChild, err[0] = pg.heimdall.rootChainBinding.GetLastChildBlock(&bind.CallOpts{})
+ }()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ burnTransaction, err[1] = childNode.GetTransactionByHash(burnTxHash)
+ }()
+
+ wg.Wait()
+
+ for _, err := range err {
+ if err != nil {
+ return 0, 0, err
+ }
+ }
+
+ return lastChild.Uint64(), burnTransaction.BlockNumber.Uint64(), nil
+}
+
+// lastchild block is greater equal to transacton block number;
+func (pg *ProofGenerator) isCheckPointed(ctx context.Context, burnTxHash libcommon.Hash) (bool, error) {
+ lastChildBlockNum, burnTxBlockNum, err := pg.getChainBlockInfo(ctx, burnTxHash)
+
+ if err != nil {
+ return false, err
+ }
+
+ return lastChildBlockNum >= burnTxBlockNum, nil
+}
+
+func (pg *ProofGenerator) buildPayloadForExit(ctx context.Context, burnTxHash libcommon.Hash, logEventSig libcommon.Hash, index int) ([]byte, error) {
+
+ node := devnet.SelectBlockProducer(ctx)
+
+ if node == nil {
+ return nil, fmt.Errorf("no node available")
+ }
+
+ if index < 0 {
+ return nil, fmt.Errorf("index must not negative")
+ }
+
+ var receipt *types.Receipt
+ var block *requests.Block
+
+ // step 1 - Get Block number from transaction hash
+ lastChildBlockNum, txBlockNum, err := pg.getChainBlockInfo(ctx, burnTxHash)
+
+ if err != nil {
+ return nil, err
+ }
+
+ if lastChildBlockNum < txBlockNum {
+ return nil, fmt.Errorf("burn transaction has not been checkpointed as yet")
+ }
+
+ // step 2- get transaction receipt from txhash and
+ // block information from block number
+
+ g, gctx := errgroup.WithContext(ctx)
+ g.SetLimit(2)
+
+ g.Go(func() error {
+ var err error
+ receipt, err = node.GetTransactionReceipt(gctx, burnTxHash)
+ return err
+ })
+
+ g.Go(func() error {
+ var err error
+ block, err = node.GetBlockByNumber(gctx, rpc.AsBlockNumber(txBlockNum), true)
+ return err
+ })
+
+ if err := g.Wait(); err != nil {
+ return nil, err
+ }
+
+ // step 3 - get information about block saved in parent chain
+ // step 4 - build block proof
+ var rootBlockNumber uint64
+ var start, end uint64
+
+ rootBlockNumber, start, end, err = pg.getRootBlockInfo(txBlockNum)
+
+ if err != nil {
+ return nil, err
+ }
+
+ blockProofs, err := getBlockProofs(ctx, node, txBlockNum, start, end)
+
+ if err != nil {
+ return nil, err
+ }
+
+ // step 5- create receipt proof
+ receiptProof, err := getReceiptProof(ctx, node, receipt, block, nil)
+
+ if err != nil {
+ return nil, err
+ }
+
+ // step 6 - encode payload, convert into hex
+ var logIndex int
+
+ if index > 0 {
+ logIndices := getAllLogIndices(logEventSig, receipt)
+
+ if index >= len(logIndices) {
+ return nil, ErrTokenIndexOutOfRange
+ }
+
+ logIndex = logIndices[index]
+ } else {
+ logIndex = getLogIndex(logEventSig, receipt)
+ }
+
+ if logIndex < 0 {
+ return nil, fmt.Errorf("log not found in receipt")
+ }
+
+ parentNodesBytes, err := rlp.EncodeToBytes(receiptProof.parentNodes)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return rlp.EncodeToBytes(
+ []interface{}{
+ rootBlockNumber,
+ hexutility.Encode(bytes.Join(blockProofs, []byte{})),
+ block.Number.Uint64(),
+ block.Time,
+ hexutility.Encode(block.TxHash[:]),
+ hexutility.Encode(block.ReceiptHash[:]),
+ hexutility.Encode(getReceiptBytes(receipt)), //rpl encoded
+ hexutility.Encode(parentNodesBytes),
+ hexutility.Encode(append([]byte{0}, receiptProof.path...)),
+ logIndex,
+ })
+}
+
+type receiptProof struct {
+ blockHash libcommon.Hash
+ parentNodes [][]byte
+ root []byte
+ path []byte
+ value interface{}
+}
+
+func getReceiptProof(ctx context.Context, node requests.RequestGenerator, receipt *types.Receipt, block *requests.Block, receipts []*types.Receipt) (*receiptProof, error) {
+ stateSyncTxHash := types.ComputeBorTxHash(block.Number.Uint64(), block.Hash)
+ receiptsTrie := trie.New(trie.EmptyRoot)
+
+ if len(receipts) == 0 {
+ g, gctx := errgroup.WithContext(ctx)
+ g.SetLimit(len(block.Transactions))
+
+ var lock sync.Mutex
+
+ for _, transaction := range block.Transactions {
+ if transaction.Hash == stateSyncTxHash {
+ // ignore if tx hash is bor state-sync tx
+ continue
+ }
+
+ hash := transaction.Hash
+ g.Go(func() error {
+ receipt, err := node.GetTransactionReceipt(gctx, hash)
+
+ if err != nil {
+ return err
+ }
+
+ path, _ := rlp.EncodeToBytes(receipt.TransactionIndex)
+ rawReceipt := getReceiptBytes(receipt)
+ lock.Lock()
+ defer lock.Unlock()
+ receiptsTrie.Update(path, rawReceipt)
+
+ return nil
+ })
+ }
+
+ if err := g.Wait(); err != nil {
+ return nil, err
+ }
+ } else {
+ for _, receipt := range receipts {
+ path, _ := rlp.EncodeToBytes(receipt.TransactionIndex)
+ rawReceipt := getReceiptBytes(receipt)
+ receiptsTrie.Update(path, rawReceipt)
+ }
+ }
+
+ path, _ := rlp.EncodeToBytes(receipt.TransactionIndex)
+ result, parents, ok := receiptsTrie.FindPath(path)
+
+ if !ok {
+ return nil, fmt.Errorf("node does not contain the key")
+ }
+
+ var nodeValue any
+
+ if isTypedReceipt(receipt) {
+ nodeValue = result
+ } else {
+ rlp.DecodeBytes(result, nodeValue)
+ }
+
+ return &receiptProof{
+ blockHash: receipt.BlockHash,
+ parentNodes: parents,
+ root: block.ReceiptHash[:],
+ path: path,
+ value: nodeValue,
+ }, nil
+}
+
+func getBlockProofs(ctx context.Context, node requests.RequestGenerator, blockNumber, startBlock, endBlock uint64) ([][]byte, error) {
+ merkleTreeDepth := int(math.Ceil(math.Log2(float64(endBlock - startBlock + 1))))
+
+ // We generate the proof root down, whereas we need from leaf up
+ var reversedProof [][]byte
+
+ offset := startBlock
+ targetIndex := blockNumber - offset
+ leftBound := uint64(0)
+ rightBound := endBlock - offset
+
+ // console.log("Searching for", targetIndex);
+ for depth := 0; depth < merkleTreeDepth; depth++ {
+ nLeaves := uint64(2) << (merkleTreeDepth - depth)
+
+ // The pivot leaf is the last leaf which is included in the left subtree
+ pivotLeaf := leftBound + nLeaves/2 - 1
+
+ if targetIndex > pivotLeaf {
+ // Get the root hash to the merkle subtree to the left
+ newLeftBound := pivotLeaf + 1
+ subTreeMerkleRoot, err := node.GetRootHash(ctx, offset+leftBound, offset+pivotLeaf)
+
+ if err != nil {
+ return nil, err
+ }
+
+ reversedProof = append(reversedProof, subTreeMerkleRoot[:])
+ leftBound = newLeftBound
+ } else {
+ // Things are more complex when querying to the right.
+ // Root hash may come some layers down so we need to build a full tree by padding with zeros
+ // Some trees may be completely empty
+
+ var newRightBound uint64
+
+ if rightBound <= pivotLeaf {
+ newRightBound = rightBound
+ } else {
+ newRightBound = pivotLeaf
+ }
+
+ // Expect the merkle tree to have a height one less than the current layer
+ expectedHeight := merkleTreeDepth - (depth + 1)
+ if rightBound <= pivotLeaf {
+ // Tree is empty so we repeatedly hash zero to correct height
+ subTreeMerkleRoot := recursiveZeroHash(expectedHeight)
+ reversedProof = append(reversedProof, subTreeMerkleRoot[:])
+ } else {
+ // Height of tree given by RPC node
+ subTreeHeight := int(math.Ceil(math.Log2(float64(rightBound - pivotLeaf))))
+
+ // Find the difference in height between this and the subtree we want
+ heightDifference := expectedHeight - subTreeHeight
+
+ // For every extra layer we need to fill 2*n leaves filled with the merkle root of a zero-filled Merkle tree
+ // We need to build a tree which has heightDifference layers
+
+ // The first leaf will hold the root hash as returned by the RPC
+ remainingNodesHash, err := node.GetRootHash(ctx, offset+pivotLeaf+1, offset+rightBound)
+
+ if err != nil {
+ return nil, err
+ }
+
+ // The remaining leaves will hold the merkle root of a zero-filled tree of height subTreeHeight
+ leafRoots := recursiveZeroHash(subTreeHeight)
+
+ // Build a merkle tree of correct size for the subtree using these merkle roots
+ var leafCount int
+
+ if heightDifference > 0 {
+ leafCount = 2 << heightDifference
+ } else {
+ leafCount = 1
+ }
+
+ leaves := make([]interface{}, leafCount)
+
+ leaves[0] = remainingNodesHash[:]
+
+ for i := 1; i < len(leaves); i++ {
+ leaves[i] = leafRoots[:]
+ }
+
+ subTreeMerkleRoot, err := merkle_tree.HashTreeRoot(leaves...)
+
+ if err != nil {
+ return nil, err
+ }
+
+ reversedProof = append(reversedProof, subTreeMerkleRoot[:])
+ }
+
+ rightBound = newRightBound
+ }
+ }
+
+ for i, j := 0, len(reversedProof)-1; i < j; i, j = i+1, j-1 {
+ reversedProof[i], reversedProof[j] = reversedProof[j], reversedProof[i]
+ }
+
+ return reversedProof, nil
+}
+
+func recursiveZeroHash(n int) libcommon.Hash {
+ if n == 0 {
+ return libcommon.Hash{}
+ }
+
+ subHash := recursiveZeroHash(n - 1)
+ bytes, _ := rlp.EncodeToBytes([]libcommon.Hash{subHash, subHash})
+ return crypto.Keccak256Hash(bytes)
+}
+
+func getAllLogIndices(logEventSig libcommon.Hash, receipt *types.Receipt) []int {
+ var logIndices []int
+
+ switch logEventSig.Hex() {
+ case "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef":
+ case "0xf94915c6d1fd521cee85359239227480c7e8776d7caf1fc3bacad5c269b66a14":
+ for index, log := range receipt.Logs {
+ if log.Topics[0] == logEventSig &&
+ log.Topics[2] == zeroHash {
+ logIndices = append(logIndices, index)
+ }
+ }
+ case "0xc3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62":
+ case "0x4a39dc06d4c0dbc64b70af90fd698a233a518aa5d07e595d983b8c0526c8f7fb":
+ for index, log := range receipt.Logs {
+ if log.Topics[0] == logEventSig &&
+ log.Topics[3] == zeroHash {
+ logIndices = append(logIndices, index)
+ }
+ }
+
+ case "0xf871896b17e9cb7a64941c62c188a4f5c621b86800e3d15452ece01ce56073df":
+ for index, log := range receipt.Logs {
+ if strings.EqualFold(hexutility.Encode(log.Topics[0][:]), "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef") &&
+ log.Topics[2] == zeroHash {
+ logIndices = append(logIndices, index)
+ }
+ }
+
+ default:
+ for index, log := range receipt.Logs {
+ if log.Topics[0] == logEventSig {
+ logIndices = append(logIndices, index)
+ }
+ }
+ }
+
+ return logIndices
+}
+
+func getLogIndex(logEventSig libcommon.Hash, receipt *types.Receipt) int {
+ switch logEventSig.Hex() {
+ case "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef":
+ case "0xf94915c6d1fd521cee85359239227480c7e8776d7caf1fc3bacad5c269b66a14":
+ for index, log := range receipt.Logs {
+ if log.Topics[0] == logEventSig &&
+ log.Topics[2] == zeroHash {
+ return index
+ }
+ }
+
+ case "0xc3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62":
+ case "0x4a39dc06d4c0dbc64b70af90fd698a233a518aa5d07e595d983b8c0526c8f7fb":
+ for index, log := range receipt.Logs {
+ if log.Topics[0] == logEventSig &&
+ log.Topics[3] == zeroHash {
+ return index
+ }
+ }
+
+ default:
+ for index, log := range receipt.Logs {
+ if log.Topics[0] == logEventSig {
+ return index
+ }
+ }
+ }
+
+ return -1
+}
+
+func (pg *ProofGenerator) getRootBlockInfo(txBlockNumber uint64) (rootBlockNumber uint64, start uint64, end uint64, err error) {
+ // find in which block child was included in parent
+ rootBlockNumber, err = pg.findRootBlockFromChild(txBlockNumber)
+
+ if err != nil {
+ return 0, 0, 0, err
+ }
+
+ headerBlock, err := pg.heimdall.rootChainBinding.HeaderBlocks(&bind.CallOpts{}, big.NewInt(int64(rootBlockNumber)))
+
+ if err != nil {
+ return 0, 0, 0, err
+ }
+
+ return rootBlockNumber, headerBlock.Start.Uint64(), headerBlock.End.Uint64(), nil
+}
+
+const checkPointInterval = uint64(10000)
+
+func (pg *ProofGenerator) findRootBlockFromChild(childBlockNumber uint64) (uint64, error) {
+ // first checkpoint id = start * 10000
+ start := uint64(1)
+
+ currentHeaderBlock, err := pg.heimdall.rootChainBinding.CurrentHeaderBlock(&bind.CallOpts{})
+
+ if err != nil {
+ return 0, err
+ }
+
+ end := currentHeaderBlock.Uint64() / checkPointInterval
+
+ // binary search on all the checkpoints to find the checkpoint that contains the childBlockNumber
+ var ans uint64
+
+ for start <= end {
+ if start == end {
+ ans = start
+ break
+ }
+
+ mid := (start + end) / 2
+ headerBlock, err := pg.heimdall.rootChainBinding.HeaderBlocks(&bind.CallOpts{}, big.NewInt(int64(mid*checkPointInterval)))
+
+ if err != nil {
+ return 0, err
+ }
+ headerStart := headerBlock.Start.Uint64()
+ headerEnd := headerBlock.End.Uint64()
+
+ if headerStart <= childBlockNumber && childBlockNumber <= headerEnd {
+ // if childBlockNumber is between the upper and lower bounds of the headerBlock, we found our answer
+ ans = mid
+ break
+ } else if headerStart > childBlockNumber {
+ // childBlockNumber was checkpointed before this header
+ end = mid - 1
+ } else if headerEnd < childBlockNumber {
+ // childBlockNumber was checkpointed after this header
+ start = mid + 1
+ }
+ }
+
+ return ans * checkPointInterval, nil
+}
+
+func isTypedReceipt(receipt *types.Receipt) bool {
+ return receipt.Status != 0 && receipt.Type != 0
+}
+
+func getReceiptBytes(receipt *types.Receipt) []byte {
+ buffer := &bytes.Buffer{}
+ receipt.EncodeRLP(buffer)
+ return buffer.Bytes()
+}
diff --git a/cmd/devnet/services/polygon/proofgenerator_test.go b/cmd/devnet/services/polygon/proofgenerator_test.go
new file mode 100644
index 00000000000..f5e53cbd58d
--- /dev/null
+++ b/cmd/devnet/services/polygon/proofgenerator_test.go
@@ -0,0 +1,465 @@
+package polygon
+
+import (
+ "bytes"
+ "context"
+ "crypto/ecdsa"
+ "fmt"
+ "math"
+ "math/big"
+ "sync"
+ "testing"
+
+ "github.com/holiman/uint256"
+ "github.com/ledgerwatch/erigon-lib/chain"
+ "github.com/ledgerwatch/erigon-lib/common"
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/hexutility"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/memdb"
+ "github.com/ledgerwatch/erigon/accounts/abi/bind"
+ "github.com/ledgerwatch/erigon/cmd/devnet/blocks"
+ "github.com/ledgerwatch/erigon/cmd/devnet/requests"
+ "github.com/ledgerwatch/erigon/consensus"
+ "github.com/ledgerwatch/erigon/consensus/bor"
+ "github.com/ledgerwatch/erigon/consensus/bor/contract"
+ "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span"
+ "github.com/ledgerwatch/erigon/consensus/bor/valset"
+ "github.com/ledgerwatch/erigon/core"
+ "github.com/ledgerwatch/erigon/core/rawdb"
+ "github.com/ledgerwatch/erigon/core/state"
+ "github.com/ledgerwatch/erigon/core/types"
+ "github.com/ledgerwatch/erigon/core/vm"
+ "github.com/ledgerwatch/erigon/crypto"
+ "github.com/ledgerwatch/erigon/params"
+ "github.com/ledgerwatch/erigon/rlp"
+ "github.com/ledgerwatch/erigon/rpc"
+ "github.com/ledgerwatch/erigon/turbo/jsonrpc"
+ "github.com/ledgerwatch/erigon/turbo/services"
+ "github.com/ledgerwatch/erigon/turbo/stages/mock"
+ "github.com/ledgerwatch/erigon/turbo/transactions"
+ "github.com/ledgerwatch/log/v3"
+ "github.com/pion/randutil"
+)
+
+type requestGenerator struct {
+ sync.Mutex
+ requests.NopRequestGenerator
+ sentry *mock.MockSentry
+ bor *bor.Bor
+ chain *core.ChainPack
+ txBlockMap map[libcommon.Hash]*types.Block
+}
+
+func newRequestGenerator(sentry *mock.MockSentry, chain *core.ChainPack) (*requestGenerator, error) {
+ db := memdb.New("")
+
+ if err := db.Update(context.Background(), func(tx kv.RwTx) error {
+ if err := rawdb.WriteHeader(tx, chain.TopBlock.Header()); err != nil {
+ return err
+ }
+ if err := rawdb.WriteHeadHeaderHash(tx, chain.TopBlock.Header().Hash()); err != nil {
+ return err
+ }
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+
+ reader := blockReader{
+ chain: chain,
+ }
+
+ return &requestGenerator{
+ chain: chain,
+ sentry: sentry,
+ bor: bor.NewRo(params.BorDevnetChainConfig, db, reader,
+ &spanner{
+ span.NewChainSpanner(contract.ValidatorSet(), params.BorDevnetChainConfig, false, log.Root()),
+ libcommon.Address{},
+ span.Span{}},
+ genesisContract{}, log.Root()),
+ txBlockMap: map[libcommon.Hash]*types.Block{},
+ }, nil
+}
+
+func (rg *requestGenerator) GetRootHash(ctx context.Context, startBlock uint64, endBlock uint64) (libcommon.Hash, error) {
+ tx, err := rg.bor.DB.BeginRo(context.Background())
+ if err != nil {
+ return libcommon.Hash{}, err
+ }
+ defer tx.Rollback()
+
+ result, err := rg.bor.GetRootHash(ctx, tx, startBlock, endBlock)
+
+ if err != nil {
+ return libcommon.Hash{}, err
+ }
+
+ return libcommon.HexToHash(result), nil
+}
+
+func (rg *requestGenerator) GetBlockByNumber(ctx context.Context, blockNum rpc.BlockNumber, withTxs bool) (*requests.Block, error) {
+ if bn := int(blockNum.Uint64()); bn < len(rg.chain.Blocks) {
+ block := rg.chain.Blocks[bn]
+
+ transactions := make([]*jsonrpc.RPCTransaction, len(block.Transactions()))
+
+ for i, tx := range block.Transactions() {
+ rg.txBlockMap[tx.Hash()] = block
+ transactions[i] = jsonrpc.NewRPCTransaction(tx, block.Hash(), blockNum.Uint64(), uint64(i), block.BaseFee())
+ }
+
+ return &requests.Block{
+ BlockWithTxHashes: requests.BlockWithTxHashes{
+ Header: block.Header(),
+ Hash: block.Hash(),
+ },
+ Transactions: transactions,
+ }, nil
+ }
+
+ return nil, fmt.Errorf("block %d not found", blockNum.Uint64())
+}
+
+func (rg *requestGenerator) GetTransactionReceipt(ctx context.Context, hash libcommon.Hash) (*types.Receipt, error) {
+ rg.Lock()
+ defer rg.Unlock()
+
+ block, ok := rg.txBlockMap[hash]
+
+ if !ok {
+ return nil, fmt.Errorf("can't find block to tx: %s", hash)
+ }
+
+ engine := rg.bor
+ chainConfig := params.BorDevnetChainConfig
+
+ reader := blockReader{
+ chain: rg.chain,
+ }
+
+ tx, err := rg.sentry.DB.BeginRo(context.Background())
+ if err != nil {
+ return nil, err
+ }
+ defer tx.Rollback()
+
+ _, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, reader, tx, 0, false)
+
+ if err != nil {
+ return nil, err
+ }
+
+ var usedGas uint64
+ var usedBlobGas uint64
+
+ gp := new(core.GasPool).AddGas(block.GasLimit()).AddBlobGas(chainConfig.GetMaxBlobGasPerBlock())
+
+ noopWriter := state.NewNoopWriter()
+
+ getHeader := func(hash common.Hash, number uint64) *types.Header {
+ h, e := reader.Header(ctx, tx, hash, number)
+ if e != nil {
+ log.Error("getHeader error", "number", number, "hash", hash, "err", e)
+ }
+ return h
+ }
+
+ header := block.Header()
+
+ for i, txn := range block.Transactions() {
+
+ ibs.SetTxContext(txn.Hash(), block.Hash(), i)
+
+ receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, noopWriter, header, txn, &usedGas, &usedBlobGas, vm.Config{})
+
+ if err != nil {
+ return nil, err
+ }
+
+ if txn.Hash() == hash {
+ receipt.BlockHash = block.Hash()
+ return receipt, nil
+ }
+ }
+
+ return nil, fmt.Errorf("tx not found in block")
+}
+
+type blockReader struct {
+ services.FullBlockReader
+ chain *core.ChainPack
+}
+
+func (reader blockReader) BlockByNumber(ctx context.Context, db kv.Tx, number uint64) (*types.Block, error) {
+ if int(number) < len(reader.chain.Blocks) {
+ return reader.chain.Blocks[number], nil
+ }
+
+ return nil, fmt.Errorf("block not found")
+}
+
+func (reader blockReader) HeaderByNumber(ctx context.Context, tx kv.Getter, blockNum uint64) (*types.Header, error) {
+ if int(blockNum) < len(reader.chain.Headers) {
+ return reader.chain.Headers[blockNum], nil
+ }
+
+ return nil, fmt.Errorf("header not found")
+}
+
+func TestMerkle(t *testing.T) {
+ startBlock := 1600
+ endBlock := 3200
+
+ if depth := int(math.Ceil(math.Log2(float64(endBlock - startBlock + 1)))); depth != 11 {
+ t.Fatal("Unexpected depth:", depth)
+ }
+
+ startBlock = 0
+ endBlock = 100000
+
+ if depth := int(math.Ceil(math.Log2(float64(endBlock - startBlock + 1)))); depth != 17 {
+ t.Fatal("Unexpected depth:", depth)
+ }
+
+ startBlock = 0
+ endBlock = 500000
+
+ if depth := int(math.Ceil(math.Log2(float64(endBlock - startBlock + 1)))); depth != 19 {
+ t.Fatal("Unexpected depth:", depth)
+ }
+}
+
+func TestBlockGeneration(t *testing.T) {
+
+ _, chain, err := generateBlocks(t, 1600)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ reader := blockReader{
+ chain: chain,
+ }
+
+ for number := uint64(0); number < 1600; number++ {
+ _, err = reader.BlockByNumber(context.Background(), nil, number)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ header, err := reader.HeaderByNumber(context.Background(), nil, number)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if header == nil {
+ t.Fatalf("block header not found: %d", number)
+ }
+ }
+}
+
+type genesisContract struct {
+}
+
+func (g genesisContract) CommitState(event rlp.RawValue, syscall consensus.SystemCall) error {
+ return nil
+}
+
+func (g genesisContract) LastStateId(syscall consensus.SystemCall) (*big.Int, error) {
+ return big.NewInt(0), nil
+}
+
+type spanner struct {
+ *span.ChainSpanner
+ validatorAddress libcommon.Address
+ currentSpan span.Span
+}
+
+func (c spanner) GetCurrentSpan(_ consensus.SystemCall) (*span.Span, error) {
+ return &c.currentSpan, nil
+}
+
+func (c *spanner) CommitSpan(heimdallSpan span.HeimdallSpan, syscall consensus.SystemCall) error {
+ c.currentSpan = heimdallSpan.Span
+ return nil
+}
+
+func (c *spanner) GetCurrentValidators(spanId uint64, signer libcommon.Address, chain consensus.ChainHeaderReader) ([]*valset.Validator, error) {
+ return []*valset.Validator{
+ {
+ ID: 1,
+ Address: c.validatorAddress,
+ VotingPower: 1000,
+ ProposerPriority: 1,
+ }}, nil
+}
+
+func TestBlockProof(t *testing.T) {
+ sentry, chain, err := generateBlocks(t, 1600)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ rg, err := newRequestGenerator(sentry, chain)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = rg.GetRootHash(context.Background(), 0, 1599)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ blockProofs, err := getBlockProofs(context.Background(), rg, 10, 0, 1599)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len := len(blockProofs); len != 11 {
+ t.Fatal("Unexpected block depth:", len)
+ }
+
+ if len := len(bytes.Join(blockProofs, []byte{})); len != 352 {
+ t.Fatal("Unexpected proof len:", len)
+ }
+}
+
+func TestReceiptProof(t *testing.T) {
+ sentry, chain, err := generateBlocks(t, 10)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ rg, err := newRequestGenerator(sentry, chain)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var block *requests.Block
+ var blockNo uint64
+
+ for block == nil {
+ block, err = rg.GetBlockByNumber(context.Background(), rpc.AsBlockNumber(blockNo), true)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(block.Transactions) == 0 {
+ block = nil
+ blockNo++
+ }
+ }
+
+ receipt, err := rg.GetTransactionReceipt(context.Background(), block.Transactions[len(block.Transactions)-1].Hash)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ receiptProof, err := getReceiptProof(context.Background(), rg, receipt, block, nil)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ parentNodesBytes, err := rlp.EncodeToBytes(receiptProof.parentNodes)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ fmt.Println(hexutility.Encode(parentNodesBytes), hexutility.Encode(append([]byte{0}, receiptProof.path...)))
+}
+
+func generateBlocks(t *testing.T, number int) (*mock.MockSentry, *core.ChainPack, error) {
+
+ data := getGenesis(3)
+
+ rand := randutil.NewMathRandomGenerator()
+
+ return blocks.GenerateBlocks(t, data.genesisSpec, number, map[int]blocks.TxGen{
+ 0: {
+ Fn: getBlockTx(data.addresses[0], data.addresses[1], uint256.NewInt(uint64(rand.Intn(5000))+1)),
+ Key: data.keys[0],
+ },
+ 1: {
+ Fn: getBlockTx(data.addresses[1], data.addresses[2], uint256.NewInt(uint64(rand.Intn(5000))+1)),
+ Key: data.keys[1],
+ },
+ 2: {
+ Fn: getBlockTx(data.addresses[2], data.addresses[0], uint256.NewInt(uint64(rand.Intn(5000))+1)),
+ Key: data.keys[2],
+ },
+ }, func(_ int) int {
+ return rand.Intn(10)
+ })
+}
+
+func getBlockTx(from libcommon.Address, to libcommon.Address, amount *uint256.Int) blocks.TxFn {
+ return func(block *core.BlockGen, _ bind.ContractBackend) (types.Transaction, bool) {
+ return types.NewTransaction(block.TxNonce(from), to, amount, 21000, new(uint256.Int), nil), false
+ }
+}
+
+type initialData struct {
+ keys []*ecdsa.PrivateKey
+ addresses []libcommon.Address
+ transactOpts []*bind.TransactOpts
+ genesisSpec *types.Genesis
+}
+
+func getGenesis(accounts int, funds ...*big.Int) initialData {
+ accountFunds := big.NewInt(1000000000)
+ if len(funds) > 0 {
+ accountFunds = funds[0]
+ }
+
+ keys := make([]*ecdsa.PrivateKey, accounts)
+
+ for i := 0; i < accounts; i++ {
+ keys[i], _ = crypto.GenerateKey()
+ }
+
+ addresses := make([]libcommon.Address, 0, len(keys))
+ transactOpts := make([]*bind.TransactOpts, 0, len(keys))
+ allocs := types.GenesisAlloc{}
+ for _, key := range keys {
+ addr := crypto.PubkeyToAddress(key.PublicKey)
+ addresses = append(addresses, addr)
+ to, err := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1))
+ if err != nil {
+ panic(err)
+ }
+ transactOpts = append(transactOpts, to)
+
+ allocs[addr] = types.GenesisAccount{Balance: accountFunds}
+ }
+
+ return initialData{
+ keys: keys,
+ addresses: addresses,
+ transactOpts: transactOpts,
+ genesisSpec: &types.Genesis{
+ Config: &chain.Config{
+ ChainID: big.NewInt(1),
+ HomesteadBlock: new(big.Int),
+ TangerineWhistleBlock: new(big.Int),
+ SpuriousDragonBlock: big.NewInt(1),
+ ByzantiumBlock: big.NewInt(1),
+ ConstantinopleBlock: big.NewInt(1),
+ },
+ Alloc: allocs,
+ },
+ }
+}
diff --git a/cmd/devnet/services/bor/statesync.go b/cmd/devnet/services/polygon/statesync.go
similarity index 77%
rename from cmd/devnet/services/bor/statesync.go
rename to cmd/devnet/services/polygon/statesync.go
index d212fdd4bfe..0429f5085db 100644
--- a/cmd/devnet/services/bor/statesync.go
+++ b/cmd/devnet/services/polygon/statesync.go
@@ -1,4 +1,4 @@
-package bor
+package polygon
import (
"context"
@@ -7,7 +7,7 @@ import (
"sort"
"time"
- libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon/accounts/abi/bind"
"github.com/ledgerwatch/erigon/cmd/devnet/contracts"
"github.com/ledgerwatch/erigon/consensus/bor/clerk"
)
@@ -23,6 +23,25 @@ type EventRecordWithBlock struct {
BlockNumber uint64
}
+func (h *Heimdall) startStateSyncSubscription() {
+ var err error
+ syncChan := make(chan *contracts.TestStateSenderStateSynced, 100)
+
+ h.syncSubscription, err = h.syncSenderBinding.WatchStateSynced(&bind.WatchOpts{}, syncChan, nil, nil)
+
+ if err != nil {
+ h.unsubscribe()
+ h.logger.Error("Failed to subscribe to sync events", "err", err)
+ return
+ }
+
+ for stateSyncedEvent := range syncChan {
+ if err := h.handleStateSynced(stateSyncedEvent); err != nil {
+ h.logger.Error("L1 sync event processing failed", "event", stateSyncedEvent.Raw.Index, "err", err)
+ }
+ }
+}
+
func (h *Heimdall) StateSyncEvents(ctx context.Context, fromID uint64, to int64, limit int) (uint64, []*clerk.EventRecordWithTime, error) {
h.Lock()
defer h.Unlock()
@@ -31,11 +50,19 @@ func (h *Heimdall) StateSyncEvents(ctx context.Context, fromID uint64, to int64,
//var removalKeys []syncRecordKey
+ var minEventTime *time.Time
+
for _ /*key*/, event := range h.pendingSyncRecords {
if event.ID >= fromID {
- if event.Time.Unix() <= to {
+ if event.Time.Unix() < to {
events = append(events, event)
}
+
+ eventTime := event.Time.Round(1 * time.Second)
+
+ if minEventTime == nil || eventTime.Before(*minEventTime) {
+ minEventTime = &eventTime
+ }
}
//else {
//removalKeys = append(removalKeys, key)
@@ -43,7 +70,8 @@ func (h *Heimdall) StateSyncEvents(ctx context.Context, fromID uint64, to int64,
}
if len(events) == 0 {
- h.logger.Info("Processed sync request", "from", fromID, "to", time.Unix(to, 0), "pending", len(h.pendingSyncRecords), "filtered", len(events))
+ h.logger.Info("Processed sync request", "from", fromID, "to", time.Unix(to, 0), "min-time", minEventTime,
+ "pending", len(h.pendingSyncRecords), "filtered", len(events))
return 0, nil, nil
}
@@ -66,7 +94,7 @@ func (h *Heimdall) StateSyncEvents(ctx context.Context, fromID uint64, to int64,
//}
h.logger.Info("Processed sync request",
- "from", fromID, "to", time.Unix(to, 0),
+ "from", fromID, "to", time.Unix(to, 0), "min-time", minEventTime,
"pending", len(h.pendingSyncRecords), "filtered", len(events),
"sent", fmt.Sprintf("%d-%d", events[0].ID, events[len(events)-1].ID))
@@ -132,19 +160,3 @@ func (h *Heimdall) handleStateSynced(event *contracts.TestStateSenderStateSynced
return nil
}
-
-func (h *Heimdall) isOldTx(txHash libcommon.Hash, logIndex uint64, eventType BridgeEvent, event interface{}) (bool, error) {
-
- // define the endpoint based on the type of event
- var status bool
-
- switch eventType {
- case BridgeEvents.StakingEvent:
- case BridgeEvents.TopupEvent:
- case BridgeEvents.ClerkEvent:
- _, status = h.pendingSyncRecords[syncRecordKey{txHash, logIndex}]
- case BridgeEvents.SlashingEvent:
- }
-
- return status, nil
-}
diff --git a/cmd/devnet/services/bor/util.go b/cmd/devnet/services/polygon/util.go
similarity index 99%
rename from cmd/devnet/services/bor/util.go
rename to cmd/devnet/services/polygon/util.go
index 386e63da5ba..2c5f2799313 100644
--- a/cmd/devnet/services/bor/util.go
+++ b/cmd/devnet/services/polygon/util.go
@@ -1,4 +1,4 @@
-package bor
+package polygon
import (
"errors"
diff --git a/cmd/devnet/tests/bor/devnet_test.go b/cmd/devnet/tests/bor/devnet_test.go
new file mode 100644
index 00000000000..ad43f982c28
--- /dev/null
+++ b/cmd/devnet/tests/bor/devnet_test.go
@@ -0,0 +1,88 @@
+//go:build integration
+
+package bor
+
+import (
+ "context"
+ "testing"
+
+ "github.com/ledgerwatch/erigon-lib/chain/networkname"
+ accounts_steps "github.com/ledgerwatch/erigon/cmd/devnet/accounts/steps"
+ contracts_steps "github.com/ledgerwatch/erigon/cmd/devnet/contracts/steps"
+ "github.com/ledgerwatch/erigon/cmd/devnet/requests"
+ "github.com/ledgerwatch/erigon/cmd/devnet/services"
+ "github.com/ledgerwatch/erigon/cmd/devnet/tests"
+ "github.com/stretchr/testify/require"
+)
+
+func TestStateSync(t *testing.T) {
+ t.Skip("FIXME: hangs in GenerateSyncEvents without any visible progress")
+
+ runCtx, err := tests.ContextStart(t, networkname.BorDevnetChainName)
+ require.Nil(t, err)
+ var ctx context.Context = runCtx
+
+ t.Run("InitSubscriptions", func(t *testing.T) {
+ services.InitSubscriptions(ctx, []requests.SubMethod{requests.Methods.ETHNewHeads})
+ })
+ t.Run("CreateAccountWithFunds", func(t *testing.T) {
+ _, err := accounts_steps.CreateAccountWithFunds(ctx, networkname.DevChainName, "root-funder", 200.0)
+ require.Nil(t, err)
+ })
+ t.Run("CreateAccountWithFunds", func(t *testing.T) {
+ _, err := accounts_steps.CreateAccountWithFunds(ctx, networkname.BorDevnetChainName, "child-funder", 200.0)
+ require.Nil(t, err)
+ })
+ t.Run("DeployChildChainReceiver", func(t *testing.T) {
+ var err error
+ ctx, err = contracts_steps.DeployChildChainReceiver(ctx, "child-funder")
+ require.Nil(t, err)
+ })
+ t.Run("DeployRootChainSender", func(t *testing.T) {
+ var err error
+ ctx, err = contracts_steps.DeployRootChainSender(ctx, "root-funder")
+ require.Nil(t, err)
+ })
+ t.Run("GenerateSyncEvents", func(t *testing.T) {
+ require.Nil(t, contracts_steps.GenerateSyncEvents(ctx, "root-funder", 10, 2, 2))
+ })
+ t.Run("ProcessRootTransfers", func(t *testing.T) {
+ require.Nil(t, contracts_steps.ProcessRootTransfers(ctx, "root-funder", 10, 2, 2))
+ })
+ t.Run("BatchProcessRootTransfers", func(t *testing.T) {
+ require.Nil(t, contracts_steps.BatchProcessRootTransfers(ctx, "root-funder", 1, 10, 2, 2))
+ })
+}
+
+func TestChildChainExit(t *testing.T) {
+ t.Skip("FIXME: step CreateAccountWithFunds fails: Failed to get transfer tx: failed to search reserves for hashes: no block heads subscription")
+
+ runCtx, err := tests.ContextStart(t, networkname.BorDevnetChainName)
+ require.Nil(t, err)
+ var ctx context.Context = runCtx
+
+ t.Run("CreateAccountWithFunds", func(t *testing.T) {
+ _, err := accounts_steps.CreateAccountWithFunds(ctx, networkname.DevChainName, "root-funder", 200.0)
+ require.Nil(t, err)
+ })
+ t.Run("CreateAccountWithFunds", func(t *testing.T) {
+ _, err := accounts_steps.CreateAccountWithFunds(ctx, networkname.BorDevnetChainName, "child-funder", 200.0)
+ require.Nil(t, err)
+ })
+ t.Run("DeployRootChainReceiver", func(t *testing.T) {
+ var err error
+ ctx, err = contracts_steps.DeployRootChainReceiver(ctx, "root-funder")
+ require.Nil(t, err)
+ })
+ t.Run("DeployChildChainSender", func(t *testing.T) {
+ var err error
+ ctx, err = contracts_steps.DeployChildChainSender(ctx, "child-funder")
+ require.Nil(t, err)
+ })
+ t.Run("ProcessChildTransfers", func(t *testing.T) {
+ require.Nil(t, contracts_steps.ProcessChildTransfers(ctx, "child-funder", 1, 2, 2))
+ })
+ //t.Run("BatchProcessTransfers", func(t *testing.T) {
+ // require.Nil(t, contracts_steps.BatchProcessTransfers(ctx, "child-funder", 1, 10, 2, 2))
+ //})
+}
diff --git a/cmd/devnet/tests/context.go b/cmd/devnet/tests/context.go
new file mode 100644
index 00000000000..7a1a27f645b
--- /dev/null
+++ b/cmd/devnet/tests/context.go
@@ -0,0 +1,66 @@
+package tests
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "testing"
+
+ "github.com/ledgerwatch/erigon-lib/chain/networkname"
+ "github.com/ledgerwatch/erigon/cmd/devnet/devnet"
+ "github.com/ledgerwatch/erigon/cmd/devnet/services"
+ "github.com/ledgerwatch/erigon/cmd/devnet/services/polygon"
+ "github.com/ledgerwatch/erigon/turbo/debug"
+ "github.com/ledgerwatch/log/v3"
+)
+
+func initDevnet(chainName string, dataDir string, logger log.Logger) (devnet.Devnet, error) {
+ const baseRpcHost = "localhost"
+ const baseRpcPort = 8545
+
+ switch chainName {
+ case networkname.BorDevnetChainName:
+ heimdallGrpcAddr := polygon.HeimdallGrpcAddressDefault
+ const sprintSize uint64 = 0
+ return NewBorDevnetWithLocalHeimdall(dataDir, baseRpcHost, baseRpcPort, heimdallGrpcAddr, sprintSize, logger), nil
+
+ case networkname.DevChainName:
+ return NewDevDevnet(dataDir, baseRpcHost, baseRpcPort, logger), nil
+
+ case "":
+ envChainName, _ := os.LookupEnv("DEVNET_CHAIN")
+ if envChainName == "" {
+ envChainName = networkname.DevChainName
+ }
+ return initDevnet(envChainName, dataDir, logger)
+
+ default:
+ return nil, fmt.Errorf("unknown network: '%s'", chainName)
+ }
+}
+
+func ContextStart(t *testing.T, chainName string) (devnet.Context, error) {
+ if runtime.GOOS == "windows" {
+ t.Skip("FIXME: TempDir RemoveAll cleanup error: remove dev-0\\clique\\db\\clique\\mdbx.dat: The process cannot access the file because it is being used by another process")
+ }
+
+ debug.RaiseFdLimit()
+ logger := log.New()
+ dataDir := t.TempDir()
+
+ var network devnet.Devnet
+ network, err := initDevnet(chainName, dataDir, logger)
+ if err != nil {
+ return nil, fmt.Errorf("ContextStart initDevnet failed: %w", err)
+ }
+
+ runCtx, err := network.Start(logger)
+ if err != nil {
+ return nil, fmt.Errorf("ContextStart devnet start failed: %w", err)
+ }
+
+ t.Cleanup(services.UnsubscribeAll)
+ t.Cleanup(network.Stop)
+
+ return runCtx, nil
+}
diff --git a/cmd/devnet/tests/devnet_bor.go b/cmd/devnet/tests/devnet_bor.go
new file mode 100644
index 00000000000..003c662742b
--- /dev/null
+++ b/cmd/devnet/tests/devnet_bor.go
@@ -0,0 +1,222 @@
+package tests
+
+import (
+ "time"
+
+ "github.com/ledgerwatch/erigon-lib/chain/networkname"
+ "github.com/ledgerwatch/erigon/cmd/devnet/accounts"
+ "github.com/ledgerwatch/erigon/cmd/devnet/args"
+ "github.com/ledgerwatch/erigon/cmd/devnet/devnet"
+ account_services "github.com/ledgerwatch/erigon/cmd/devnet/services/accounts"
+ "github.com/ledgerwatch/erigon/cmd/devnet/services/polygon"
+ "github.com/ledgerwatch/erigon/cmd/utils"
+ "github.com/ledgerwatch/erigon/core/types"
+ "github.com/ledgerwatch/erigon/params"
+ "github.com/ledgerwatch/log/v3"
+)
+
+func NewBorDevnetWithoutHeimdall(
+ dataDir string,
+ baseRpcHost string,
+ baseRpcPort int,
+ logger log.Logger,
+) devnet.Devnet {
+ faucetSource := accounts.NewAccount("faucet-source")
+
+ network := devnet.Network{
+ DataDir: dataDir,
+ Chain: networkname.BorDevnetChainName,
+ Logger: logger,
+ BasePort: 40303,
+ BasePrivateApiAddr: "localhost:10090",
+ BaseRPCHost: baseRpcHost,
+ BaseRPCPort: baseRpcPort,
+ //Snapshots: true,
+ Alloc: types.GenesisAlloc{
+ faucetSource.Address: {Balance: accounts.EtherAmount(200_000)},
+ },
+ Services: []devnet.Service{
+ account_services.NewFaucet(networkname.BorDevnetChainName, faucetSource),
+ },
+ Nodes: []devnet.Node{
+ &args.BlockProducer{
+ NodeArgs: args.NodeArgs{
+ ConsoleVerbosity: "0",
+ DirVerbosity: "5",
+ WithoutHeimdall: true,
+ },
+ AccountSlots: 200,
+ },
+ &args.NonBlockProducer{
+ NodeArgs: args.NodeArgs{
+ ConsoleVerbosity: "0",
+ DirVerbosity: "5",
+ WithoutHeimdall: true,
+ },
+ },
+ },
+ }
+
+ return devnet.Devnet{&network}
+}
+
+func NewBorDevnetWithHeimdall(
+ dataDir string,
+ baseRpcHost string,
+ baseRpcPort int,
+ heimdall *polygon.Heimdall,
+ heimdallGrpcAddr string,
+ checkpointOwner *accounts.Account,
+ withMilestones bool,
+ logger log.Logger,
+) devnet.Devnet {
+ faucetSource := accounts.NewAccount("faucet-source")
+
+ var services []devnet.Service
+ if heimdall != nil {
+ services = append(services, heimdall)
+ }
+
+ borNetwork := devnet.Network{
+ DataDir: dataDir,
+ Chain: networkname.BorDevnetChainName,
+ Logger: logger,
+ BasePort: 40303,
+ BasePrivateApiAddr: "localhost:10090",
+ BaseRPCHost: baseRpcHost,
+ BaseRPCPort: baseRpcPort,
+ BorStateSyncDelay: 5 * time.Second,
+ BorWithMilestones: &withMilestones,
+ Services: append(services, account_services.NewFaucet(networkname.BorDevnetChainName, faucetSource)),
+ Alloc: types.GenesisAlloc{
+ faucetSource.Address: {Balance: accounts.EtherAmount(200_000)},
+ },
+ Nodes: []devnet.Node{
+ &args.BlockProducer{
+ NodeArgs: args.NodeArgs{
+ ConsoleVerbosity: "0",
+ DirVerbosity: "5",
+ HeimdallGrpcAddr: heimdallGrpcAddr,
+ },
+ AccountSlots: 200,
+ },
+ &args.BlockProducer{
+ NodeArgs: args.NodeArgs{
+ ConsoleVerbosity: "0",
+ DirVerbosity: "5",
+ HeimdallGrpcAddr: heimdallGrpcAddr,
+ },
+ AccountSlots: 200,
+ },
+ /*&args.BlockProducer{
+ Node: args.Node{
+ ConsoleVerbosity: "0",
+ DirVerbosity: "5",
+ HeimdallGrpcAddr: heimdallGrpcAddr,
+ },
+ AccountSlots: 200,
+ },*/
+ &args.NonBlockProducer{
+ NodeArgs: args.NodeArgs{
+ ConsoleVerbosity: "0",
+ DirVerbosity: "5",
+ HeimdallGrpcAddr: heimdallGrpcAddr,
+ },
+ },
+ },
+ }
+
+ devNetwork := devnet.Network{
+ DataDir: dataDir,
+ Chain: networkname.DevChainName,
+ Logger: logger,
+ BasePort: 30403,
+ BasePrivateApiAddr: "localhost:10190",
+ BaseRPCHost: baseRpcHost,
+ BaseRPCPort: baseRpcPort + 1000,
+ Services: append(services, account_services.NewFaucet(networkname.DevChainName, faucetSource)),
+ Alloc: types.GenesisAlloc{
+ faucetSource.Address: {Balance: accounts.EtherAmount(200_000)},
+ checkpointOwner.Address: {Balance: accounts.EtherAmount(10_000)},
+ },
+ Nodes: []devnet.Node{
+ &args.BlockProducer{
+ NodeArgs: args.NodeArgs{
+ ConsoleVerbosity: "0",
+ DirVerbosity: "5",
+ VMDebug: true,
+ HttpCorsDomain: "*",
+ },
+ DevPeriod: 5,
+ AccountSlots: 200,
+ },
+ &args.NonBlockProducer{
+ NodeArgs: args.NodeArgs{
+ ConsoleVerbosity: "0",
+ DirVerbosity: "3",
+ },
+ },
+ },
+ }
+
+ return devnet.Devnet{
+ &borNetwork,
+ &devNetwork,
+ }
+}
+
+func NewBorDevnetWithRemoteHeimdall(
+ dataDir string,
+ baseRpcHost string,
+ baseRpcPort int,
+ logger log.Logger,
+) devnet.Devnet {
+ heimdallGrpcAddr := ""
+ checkpointOwner := accounts.NewAccount("checkpoint-owner")
+ withMilestones := utils.WithHeimdallMilestones.Value
+ return NewBorDevnetWithHeimdall(
+ dataDir,
+ baseRpcHost,
+ baseRpcPort,
+ nil,
+ heimdallGrpcAddr,
+ checkpointOwner,
+ withMilestones,
+ logger)
+}
+
+func NewBorDevnetWithLocalHeimdall(
+ dataDir string,
+ baseRpcHost string,
+ baseRpcPort int,
+ heimdallGrpcAddr string,
+ sprintSize uint64,
+ logger log.Logger,
+) devnet.Devnet {
+ config := *params.BorDevnetChainConfig
+ if sprintSize > 0 {
+ config.Bor.Sprint = map[string]uint64{"0": sprintSize}
+ }
+
+ checkpointOwner := accounts.NewAccount("checkpoint-owner")
+
+ heimdall := polygon.NewHeimdall(
+ &config,
+ heimdallGrpcAddr,
+ &polygon.CheckpointConfig{
+ CheckpointBufferTime: 60 * time.Second,
+ CheckpointAccount: checkpointOwner,
+ },
+ logger)
+
+ return NewBorDevnetWithHeimdall(
+ dataDir,
+ baseRpcHost,
+ baseRpcPort,
+ heimdall,
+ heimdallGrpcAddr,
+ checkpointOwner,
+ // milestones are not supported yet on the local heimdall
+ false,
+ logger)
+}
diff --git a/cmd/devnet/tests/devnet_dev.go b/cmd/devnet/tests/devnet_dev.go
new file mode 100644
index 00000000000..f4aeed1d0f7
--- /dev/null
+++ b/cmd/devnet/tests/devnet_dev.go
@@ -0,0 +1,53 @@
+package tests
+
+import (
+ "github.com/ledgerwatch/erigon-lib/chain/networkname"
+ "github.com/ledgerwatch/erigon/cmd/devnet/accounts"
+ "github.com/ledgerwatch/erigon/cmd/devnet/args"
+ "github.com/ledgerwatch/erigon/cmd/devnet/devnet"
+ account_services "github.com/ledgerwatch/erigon/cmd/devnet/services/accounts"
+ "github.com/ledgerwatch/erigon/core/types"
+ "github.com/ledgerwatch/log/v3"
+)
+
+func NewDevDevnet(
+ dataDir string,
+ baseRpcHost string,
+ baseRpcPort int,
+ logger log.Logger,
+) devnet.Devnet {
+ faucetSource := accounts.NewAccount("faucet-source")
+
+ network := devnet.Network{
+ DataDir: dataDir,
+ Chain: networkname.DevChainName,
+ Logger: logger,
+ BasePrivateApiAddr: "localhost:10090",
+ BaseRPCHost: baseRpcHost,
+ BaseRPCPort: baseRpcPort,
+ Alloc: types.GenesisAlloc{
+ faucetSource.Address: {Balance: accounts.EtherAmount(200_000)},
+ },
+ Services: []devnet.Service{
+ account_services.NewFaucet(networkname.DevChainName, faucetSource),
+ },
+ MaxNumberOfEmptyBlockChecks: 30,
+ Nodes: []devnet.Node{
+ &args.BlockProducer{
+ NodeArgs: args.NodeArgs{
+ ConsoleVerbosity: "0",
+ DirVerbosity: "5",
+ },
+ AccountSlots: 200,
+ },
+ &args.NonBlockProducer{
+ NodeArgs: args.NodeArgs{
+ ConsoleVerbosity: "0",
+ DirVerbosity: "5",
+ },
+ },
+ },
+ }
+
+ return devnet.Devnet{&network}
+}
diff --git a/cmd/devnet/tests/generic/devnet_test.go b/cmd/devnet/tests/generic/devnet_test.go
new file mode 100644
index 00000000000..8f0f944ab85
--- /dev/null
+++ b/cmd/devnet/tests/generic/devnet_test.go
@@ -0,0 +1,67 @@
+//go:build integration
+
+package generic
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/ledgerwatch/erigon/cmd/devnet/accounts"
+ "github.com/ledgerwatch/erigon/cmd/devnet/admin"
+ "github.com/ledgerwatch/erigon/cmd/devnet/contracts/steps"
+ "github.com/ledgerwatch/erigon/cmd/devnet/requests"
+ "github.com/ledgerwatch/erigon/cmd/devnet/services"
+ "github.com/ledgerwatch/erigon/cmd/devnet/tests"
+ "github.com/ledgerwatch/erigon/cmd/devnet/transactions"
+ "github.com/stretchr/testify/require"
+)
+
+func testDynamicTx(t *testing.T, ctx context.Context) {
+ t.Run("InitSubscriptions", func(t *testing.T) {
+ services.InitSubscriptions(ctx, []requests.SubMethod{requests.Methods.ETHNewHeads})
+ })
+ t.Run("PingErigonRpc", func(t *testing.T) {
+ require.Nil(t, admin.PingErigonRpc(ctx))
+ })
+ t.Run("CheckTxPoolContent", func(t *testing.T) {
+ transactions.CheckTxPoolContent(ctx, 0, 0, 0)
+ })
+ t.Run("SendTxWithDynamicFee", func(t *testing.T) {
+ const recipientAddress = "0x71562b71999873DB5b286dF957af199Ec94617F7"
+ const sendValue uint64 = 10000
+ _, err := transactions.SendTxWithDynamicFee(ctx, recipientAddress, accounts.DevAddress, sendValue)
+ require.Nil(t, err)
+ })
+ t.Run("AwaitBlocks", func(t *testing.T) {
+ require.Nil(t, transactions.AwaitBlocks(ctx, 2*time.Second))
+ })
+}
+
+func TestDynamicTxNode0(t *testing.T) {
+ runCtx, err := tests.ContextStart(t, "")
+ require.Nil(t, err)
+ testDynamicTx(t, runCtx.WithCurrentNetwork(0).WithCurrentNode(0))
+}
+
+func TestDynamicTxAnyNode(t *testing.T) {
+ runCtx, err := tests.ContextStart(t, "")
+ require.Nil(t, err)
+ testDynamicTx(t, runCtx.WithCurrentNetwork(0))
+}
+
+func TestCallContract(t *testing.T) {
+ t.Skip("FIXME: DeployAndCallLogSubscriber step fails: Log result is incorrect expected txIndex: 1, actual txIndex 2")
+
+ runCtx, err := tests.ContextStart(t, "")
+ require.Nil(t, err)
+ ctx := runCtx.WithCurrentNetwork(0)
+
+ t.Run("InitSubscriptions", func(t *testing.T) {
+ services.InitSubscriptions(ctx, []requests.SubMethod{requests.Methods.ETHNewHeads})
+ })
+ t.Run("DeployAndCallLogSubscriber", func(t *testing.T) {
+ _, err := contracts_steps.DeployAndCallLogSubscriber(ctx, accounts.DevAddress)
+ require.Nil(t, err)
+ })
+}
diff --git a/cmd/devnet/transactions/block.go b/cmd/devnet/transactions/block.go
index cf53d0292c5..cad6a359906 100644
--- a/cmd/devnet/transactions/block.go
+++ b/cmd/devnet/transactions/block.go
@@ -5,6 +5,8 @@ import (
"fmt"
"time"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
+
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/log/v3"
@@ -12,12 +14,11 @@ import (
"github.com/ledgerwatch/erigon/cmd/devnet/devnetutils"
"github.com/ledgerwatch/erigon/cmd/devnet/requests"
"github.com/ledgerwatch/erigon/cmd/devnet/services"
- "github.com/ledgerwatch/erigon/common/hexutil"
"github.com/ledgerwatch/erigon/rpc"
)
-// MaxNumberOfBlockChecks is the max number of blocks to look for a transaction in
-var MaxNumberOfEmptyBlockChecks = 25
+// max number of blocks to look for a transaction in
+const defaultMaxNumberOfEmptyBlockChecks = 25
func AwaitTransactions(ctx context.Context, hashes ...libcommon.Hash) (map[libcommon.Hash]uint64, error) {
devnet.Logger(ctx).Info("Awaiting transactions in confirmed blocks...")
@@ -28,7 +29,13 @@ func AwaitTransactions(ctx context.Context, hashes ...libcommon.Hash) (map[libco
hashmap[hash] = true
}
- m, err := searchBlockForHashes(ctx, hashmap)
+ maxNumberOfEmptyBlockChecks := defaultMaxNumberOfEmptyBlockChecks
+ network := devnet.CurrentNetwork(ctx)
+ if (network != nil) && (network.MaxNumberOfEmptyBlockChecks > 0) {
+ maxNumberOfEmptyBlockChecks = network.MaxNumberOfEmptyBlockChecks
+ }
+
+ m, err := searchBlockForHashes(ctx, hashmap, maxNumberOfEmptyBlockChecks)
if err != nil {
return nil, fmt.Errorf("failed to search reserves for hashes: %v", err)
}
@@ -36,7 +43,11 @@ func AwaitTransactions(ctx context.Context, hashes ...libcommon.Hash) (map[libco
return m, nil
}
-func searchBlockForHashes(ctx context.Context, hashmap map[libcommon.Hash]bool) (map[libcommon.Hash]uint64, error) {
+func searchBlockForHashes(
+ ctx context.Context,
+ hashmap map[libcommon.Hash]bool,
+ maxNumberOfEmptyBlockChecks int,
+) (map[libcommon.Hash]uint64, error) {
logger := devnet.Logger(ctx)
if len(hashmap) == 0 {
@@ -72,7 +83,7 @@ func searchBlockForHashes(ctx context.Context, hashmap map[libcommon.Hash]bool)
blockCount++ // increment the number of blocks seen to check against the max number of blocks to iterate over
}
- if blockCount == MaxNumberOfEmptyBlockChecks {
+ if blockCount == maxNumberOfEmptyBlockChecks {
for h := range hashmap {
logger.Error("Missing Tx", "txHash", h)
}
diff --git a/cmd/devnet/transactions/tx.go b/cmd/devnet/transactions/tx.go
index e6ba3f9586e..f56775094e9 100644
--- a/cmd/devnet/transactions/tx.go
+++ b/cmd/devnet/transactions/tx.go
@@ -13,8 +13,8 @@ import (
"github.com/ledgerwatch/erigon/cmd/devnet/blocks"
"github.com/ledgerwatch/erigon/cmd/devnet/devnet"
"github.com/ledgerwatch/erigon/cmd/devnet/devnetutils"
- "github.com/ledgerwatch/erigon/cmd/devnet/requests"
"github.com/ledgerwatch/erigon/cmd/devnet/scenarios"
+ "github.com/ledgerwatch/erigon/rpc"
"github.com/holiman/uint256"
@@ -41,7 +41,7 @@ func CheckTxPoolContent(ctx context.Context, expectedPendingSize, expectedQueued
}
if expectedPendingSize >= 0 && pendingSize != expectedPendingSize {
- logger.Error("FAILURE mismatched pending subpool size", "expected", expectedPendingSize, "got", pendingSize)
+ logger.Debug("FAILURE mismatched pending subpool size", "expected", expectedPendingSize, "got", pendingSize)
return
}
@@ -51,7 +51,7 @@ func CheckTxPoolContent(ctx context.Context, expectedPendingSize, expectedQueued
}
if expectedBaseFeeSize >= 0 && baseFeeSize != expectedBaseFeeSize {
- logger.Error("FAILURE mismatched basefee subpool size", "expected", expectedBaseFeeSize, "got", baseFeeSize)
+ logger.Debug("FAILURE mismatched basefee subpool size", "expected", expectedBaseFeeSize, "got", baseFeeSize)
}
logger.Info("Subpool sizes", "pending", pendingSize, "queued", queuedSize, "basefee", baseFeeSize)
@@ -219,7 +219,7 @@ func CreateTransaction(node devnet.Node, to, from string, value uint64) (types.T
return nil, libcommon.Address{}, fmt.Errorf("Unknown from account: %s", from)
}
- res, err := node.GetTransactionCount(fromAccount.Address, requests.BlockNumbers.Pending)
+ res, err := node.GetTransactionCount(fromAccount.Address, rpc.PendingBlock)
if err != nil {
return nil, libcommon.Address{}, fmt.Errorf("failed to get transaction count for address 0x%x: %v", fromAccount.Address, err)
@@ -241,7 +241,7 @@ func CreateTransaction(node devnet.Node, to, from string, value uint64) (types.T
func signEIP1559TxsLowerAndHigherThanBaseFee2(ctx context.Context, amountLower, amountHigher int, baseFeePerGas uint64, toAddress libcommon.Address, fromAddress libcommon.Address) ([]types.Transaction, []types.Transaction, error) {
node := devnet.SelectNode(ctx)
- res, err := node.GetTransactionCount(fromAddress, requests.BlockNumbers.Pending)
+ res, err := node.GetTransactionCount(fromAddress, rpc.PendingBlock)
if err != nil {
return nil, nil, fmt.Errorf("failed to get transaction count for address 0x%x: %v", fromAddress, err)
@@ -335,7 +335,12 @@ func signEIP1559TxsHigherThanBaseFee(ctx context.Context, n int, baseFeePerGas u
devnet.Logger(ctx).Info("HIGHER", "transaction", i, "nonce", transaction.Nonce, "value", transaction.Value, "feecap", transaction.FeeCap)
- signedTransaction, err := types.SignTx(transaction, signer, accounts.SigKey(fromAddress))
+ signerKey := accounts.SigKey(fromAddress)
+ if signerKey == nil {
+ return nil, fmt.Errorf("devnet.signEIP1559TxsHigherThanBaseFee failed to SignTx: private key not found for address %s", fromAddress)
+ }
+
+ signedTransaction, err := types.SignTx(transaction, signer, signerKey)
if err != nil {
return nil, err
}
diff --git a/cmd/downloader/downloadernat/nat.go b/cmd/downloader/downloadernat/nat.go
index 5aa0f29dbe9..f3b7075f599 100644
--- a/cmd/downloader/downloadernat/nat.go
+++ b/cmd/downloader/downloadernat/nat.go
@@ -1,12 +1,13 @@
package downloadernat
import (
- "github.com/ledgerwatch/erigon-lib/downloader/downloadercfg"
+ "github.com/anacrolix/torrent"
"github.com/ledgerwatch/erigon/p2p/nat"
"github.com/ledgerwatch/log/v3"
)
-func DoNat(natif nat.Interface, cfg *downloadercfg.Cfg, logger log.Logger) {
+// DoNat can mutate `cfg` parameter
+func DoNat(natif nat.Interface, cfg *torrent.ClientConfig, logger log.Logger) {
switch natif.(type) {
case nil:
// No NAT interface, do nothing.
diff --git a/cmd/downloader/main.go b/cmd/downloader/main.go
index 53de0e035f8..7dc310a0aa6 100644
--- a/cmd/downloader/main.go
+++ b/cmd/downloader/main.go
@@ -7,24 +7,24 @@ import (
"net"
"os"
"path/filepath"
+ "runtime"
+ "strings"
"time"
"github.com/anacrolix/torrent/metainfo"
"github.com/c2h5oh/datasize"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery"
+ "github.com/ledgerwatch/erigon-lib/chain/snapcfg"
"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/datadir"
+ "github.com/ledgerwatch/erigon-lib/common/dir"
"github.com/ledgerwatch/erigon-lib/downloader"
downloadercfg2 "github.com/ledgerwatch/erigon-lib/downloader/downloadercfg"
+ "github.com/ledgerwatch/erigon-lib/downloader/snaptype"
proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader"
- "github.com/ledgerwatch/erigon/cmd/downloader/downloadernat"
- "github.com/ledgerwatch/erigon/cmd/utils"
- "github.com/ledgerwatch/erigon/common/paths"
- "github.com/ledgerwatch/erigon/p2p/nat"
- "github.com/ledgerwatch/erigon/params"
- "github.com/ledgerwatch/erigon/turbo/debug"
- "github.com/ledgerwatch/erigon/turbo/logging"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/mdbx"
"github.com/ledgerwatch/log/v3"
"github.com/pelletier/go-toml/v2"
"github.com/spf13/cobra"
@@ -34,12 +34,34 @@ import (
"google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/reflection"
+
+ "github.com/ledgerwatch/erigon/cmd/downloader/downloadernat"
+ "github.com/ledgerwatch/erigon/cmd/hack/tool"
+ "github.com/ledgerwatch/erigon/cmd/utils"
+ "github.com/ledgerwatch/erigon/common/paths"
+ "github.com/ledgerwatch/erigon/p2p/nat"
+ "github.com/ledgerwatch/erigon/params"
+ "github.com/ledgerwatch/erigon/turbo/debug"
+ "github.com/ledgerwatch/erigon/turbo/logging"
)
+func main() {
+ ctx, cancel := common.RootContext()
+ defer cancel()
+
+ if err := rootCmd.ExecuteContext(ctx); err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+}
+
var (
- datadirCli string
+ webseeds string
+ datadirCli, chain string
+ filePath string
forceRebuild bool
forceVerify bool
+ forceVerifyFiles []string
downloaderApiAddr string
natSetting string
torrentVerbosity int
@@ -52,13 +74,15 @@ var (
targetFile string
disableIPV6 bool
disableIPV4 bool
+ seedbox bool
)
func init() {
utils.CobraFlags(rootCmd, debug.Flags, utils.MetricFlags, logging.Flags)
withDataDir(rootCmd)
-
+ rootCmd.Flags().StringVar(&chain, utils.ChainFlag.Name, utils.ChainFlag.Value, utils.ChainFlag.Usage)
+ rootCmd.Flags().StringVar(&webseeds, utils.WebSeedsFlag.Name, utils.WebSeedsFlag.Value, utils.WebSeedsFlag.Usage)
rootCmd.Flags().StringVar(&natSetting, "nat", utils.NATFlag.Value, utils.NATFlag.Usage)
rootCmd.Flags().StringVar(&downloaderApiAddr, "downloader.api.addr", "127.0.0.1:9093", "external downloader api network address, for example: 127.0.0.1:9093 serves remote downloader interface")
rootCmd.Flags().StringVar(&downloadRateStr, "torrent.download.rate", utils.TorrentDownloadRateFlag.Value, utils.TorrentDownloadRateFlag.Usage)
@@ -71,7 +95,16 @@ func init() {
rootCmd.Flags().StringVar(&staticPeersStr, utils.TorrentStaticPeersFlag.Name, utils.TorrentStaticPeersFlag.Value, utils.TorrentStaticPeersFlag.Usage)
rootCmd.Flags().BoolVar(&disableIPV6, "downloader.disable.ipv6", utils.DisableIPV6.Value, utils.DisableIPV6.Usage)
rootCmd.Flags().BoolVar(&disableIPV4, "downloader.disable.ipv4", utils.DisableIPV4.Value, utils.DisableIPV6.Usage)
- rootCmd.PersistentFlags().BoolVar(&forceVerify, "verify", false, "Force verify data files if have .torrent files")
+ rootCmd.Flags().BoolVar(&seedbox, "seedbox", false, "Turns downloader into independent (doesn't need Erigon) software which discover/download/seed new files - useful for Erigon network, and can work on very cheap hardware. It will: 1) download .torrent from webseed 2) download new files after upgrade 3) we planing add discovery of new files soon")
+ rootCmd.PersistentFlags().BoolVar(&forceVerify, "verify", false, "Verify files. All by default, or passed by --verify.files")
+ rootCmd.PersistentFlags().StringArrayVar(&forceVerifyFiles, "verify.files", nil, "Limit list of files to verify")
+
+ withDataDir(createTorrent)
+ withFile(createTorrent)
+ rootCmd.AddCommand(createTorrent)
+
+ rootCmd.AddCommand(torrentCat)
+ rootCmd.AddCommand(torrentMagnet)
withDataDir(printTorrentHashes)
printTorrentHashes.PersistentFlags().BoolVar(&forceRebuild, "rebuild", false, "Force re-create .torrent files")
@@ -79,8 +112,8 @@ func init() {
if err := printTorrentHashes.MarkFlagFilename("targetfile"); err != nil {
panic(err)
}
-
rootCmd.AddCommand(printTorrentHashes)
+
}
func withDataDir(cmd *cobra.Command) {
@@ -89,17 +122,14 @@ func withDataDir(cmd *cobra.Command) {
panic(err)
}
}
-
-func main() {
- ctx, cancel := common.RootContext()
- defer cancel()
-
- if err := rootCmd.ExecuteContext(ctx); err != nil {
- fmt.Println(err)
- os.Exit(1)
+func withFile(cmd *cobra.Command) {
+ cmd.Flags().StringVar(&filePath, "file", "", "")
+ if err := cmd.MarkFlagFilename(utils.DataDirFlag.Name); err != nil {
+ panic(err)
}
}
+var logger log.Logger
var rootCmd = &cobra.Command{
Use: "",
Short: "snapshot downloader",
@@ -107,8 +137,13 @@ var rootCmd = &cobra.Command{
PersistentPostRun: func(cmd *cobra.Command, args []string) {
debug.Exit()
},
+ PersistentPreRun: func(cmd *cobra.Command, args []string) {
+ if cmd.Name() != "torrent_cat" {
+ logger = debug.SetupCobra(cmd, "downloader")
+ logger.Info("Build info", "git_branch", params.GitBranch, "git_tag", params.GitTag, "git_commit", params.GitCommit)
+ }
+ },
Run: func(cmd *cobra.Command, args []string) {
- logger := debug.SetupCobra(cmd, "integration")
if err := Downloader(cmd.Context(), logger); err != nil {
if !errors.Is(err, context.Canceled) {
logger.Error(err.Error())
@@ -120,6 +155,12 @@ var rootCmd = &cobra.Command{
func Downloader(ctx context.Context, logger log.Logger) error {
dirs := datadir.New(datadirCli)
+ if err := datadir.ApplyMigrations(dirs); err != nil {
+ return err
+ }
+ if err := checkChainName(ctx, dirs, chain); err != nil {
+ return err
+ }
torrentLogLevel, _, err := downloadercfg2.Int2LogLevel(torrentVerbosity)
if err != nil {
return err
@@ -133,32 +174,51 @@ func Downloader(ctx context.Context, logger log.Logger) error {
return err
}
- logger.Info("Run snapshot downloader", "addr", downloaderApiAddr, "datadir", dirs.DataDir, "ipv6-enabled", !disableIPV6, "ipv4-enabled", !disableIPV4, "download.rate", downloadRate.String(), "upload.rate", uploadRate.String())
- natif, err := nat.Parse(natSetting)
- if err != nil {
- return fmt.Errorf("invalid nat option %s: %w", natSetting, err)
- }
- staticPeers := utils.SplitAndTrim(staticPeersStr)
+ logger.Info("[snapshots] cli flags", "chain", chain, "addr", downloaderApiAddr, "datadir", dirs.DataDir, "ipv6-enabled", !disableIPV6, "ipv4-enabled", !disableIPV4, "download.rate", downloadRate.String(), "upload.rate", uploadRate.String(), "webseed", webseeds)
+ staticPeers := common.CliString2Array(staticPeersStr)
version := "erigon: " + params.VersionWithCommit(params.GitCommit)
- cfg, err := downloadercfg2.New(dirs.Snap, version, torrentLogLevel, downloadRate, uploadRate, torrentPort, torrentConnsPerFile, torrentDownloadSlots, staticPeers)
+
+ webseedsList := common.CliString2Array(webseeds)
+ if known, ok := snapcfg.KnownWebseeds[chain]; ok {
+ webseedsList = append(webseedsList, known...)
+ }
+ cfg, err := downloadercfg2.New(dirs, version, torrentLogLevel, downloadRate, uploadRate, torrentPort, torrentConnsPerFile, torrentDownloadSlots, staticPeers, webseedsList, chain)
if err != nil {
return err
}
+ cfg.ClientConfig.PieceHashersPerTorrent = 32 * runtime.NumCPU()
cfg.ClientConfig.DisableIPv6 = disableIPV6
cfg.ClientConfig.DisableIPv4 = disableIPV4
- downloadernat.DoNat(natif, cfg, logger)
+ natif, err := nat.Parse(natSetting)
+ if err != nil {
+ return fmt.Errorf("invalid nat option %s: %w", natSetting, err)
+ }
+ downloadernat.DoNat(natif, cfg.ClientConfig, logger)
- d, err := downloader.New(ctx, cfg)
+ cfg.DownloadTorrentFilesFromWebseed = true // enable it only for standalone mode now. feature is not fully ready yet
+ d, err := downloader.New(ctx, cfg, dirs, logger, log.LvlInfo, seedbox)
if err != nil {
return err
}
defer d.Close()
- logger.Info("[torrent] Start", "my peerID", fmt.Sprintf("%x", d.Torrent().PeerID()))
+ logger.Info("[snapshots] Start bittorrent server", "my_peer_id", fmt.Sprintf("%x", d.TorrentClient().PeerID()))
- d.MainLoopInBackground(ctx, false)
+ if forceVerify { // remove and create .torrent files (will re-read all snapshots)
+ if err = d.VerifyData(ctx, forceVerifyFiles); err != nil {
+ return err
+ }
+ logger.Info("[snapshots] Verify done")
+ return nil
+ }
+
+ d.MainLoopInBackground(false)
+
+ if err := addPreConfiguredHashes(ctx, d); err != nil {
+ return err
+ }
bittorrentServer, err := downloader.NewGrpcServer(d)
if err != nil {
@@ -171,92 +231,150 @@ func Downloader(ctx context.Context, logger log.Logger) error {
}
defer grpcServer.GracefulStop()
- if forceVerify { // remove and create .torrent files (will re-read all snapshots)
- if err = d.VerifyData(ctx); err != nil {
- return err
- }
- }
-
<-ctx.Done()
return nil
}
+var createTorrent = &cobra.Command{
+ Use: "torrent_create",
+ Example: "go run ./cmd/downloader torrent_create --datadir= --file=",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ //logger := debug.SetupCobra(cmd, "integration")
+ dirs := datadir.New(datadirCli)
+ err := downloader.BuildTorrentFilesIfNeed(cmd.Context(), dirs)
+ if err != nil {
+ return err
+ }
+ return nil
+ },
+}
+
var printTorrentHashes = &cobra.Command{
Use: "torrent_hashes",
Example: "go run ./cmd/downloader torrent_hashes --datadir ",
RunE: func(cmd *cobra.Command, args []string) error {
- logger := debug.SetupCobra(cmd, "integration")
- dirs := datadir.New(datadirCli)
- ctx := cmd.Context()
-
- if forceRebuild { // remove and create .torrent files (will re-read all snapshots)
- //removePieceCompletionStorage(snapDir)
- files, err := downloader.AllTorrentPaths(dirs.Snap)
- if err != nil {
- return err
- }
- for _, filePath := range files {
- if err := os.Remove(filePath); err != nil {
- return err
- }
- }
- if _, err := downloader.BuildTorrentFilesIfNeed(ctx, dirs.Snap); err != nil {
- return err
- }
+ logger := debug.SetupCobra(cmd, "downloader")
+ if err := doPrintTorrentHashes(cmd.Context(), logger); err != nil {
+ log.Error(err.Error())
}
+ return nil
+ },
+}
- res := map[string]string{}
- files, err := downloader.AllTorrentPaths(dirs.Snap)
+var torrentVerify = &cobra.Command{
+ Use: "torrent_verify",
+ Example: "go run ./cmd/downloader torrent_verify ",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if len(args) == 0 {
+ return fmt.Errorf("please pass .torrent file path by first argument")
+ }
+ fPath := args[0]
+ mi, err := metainfo.LoadFromFile(fPath)
if err != nil {
- return err
+ return fmt.Errorf("LoadFromFile: %w, file=%s", err, fPath)
}
- for _, torrentFilePath := range files {
- mi, err := metainfo.LoadFromFile(torrentFilePath)
- if err != nil {
- return err
- }
- info, err := mi.UnmarshalInfo()
- if err != nil {
- return err
- }
- res[info.Name] = mi.HashInfoBytes().String()
+
+ fmt.Printf("%s\n", mi.HashInfoBytes())
+ return nil
+ },
+}
+var torrentCat = &cobra.Command{
+ Use: "torrent_cat",
+ Example: "go run ./cmd/downloader torrent_cat ",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if len(args) == 0 {
+ return fmt.Errorf("please pass .torrent file path by first argument")
}
- serialized, err := toml.Marshal(res)
+ fPath := args[0]
+ mi, err := metainfo.LoadFromFile(fPath)
if err != nil {
- return err
+ return fmt.Errorf("LoadFromFile: %w, file=%s", err, fPath)
}
- if targetFile == "" {
- fmt.Printf("%s\n", serialized)
- return nil
+ fmt.Printf("%s\n", mi.HashInfoBytes())
+ return nil
+ },
+}
+var torrentMagnet = &cobra.Command{
+ Use: "torrent_magnet",
+ Example: "go run ./cmd/downloader torrent_magnet ",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if len(args) == 0 {
+ return fmt.Errorf("please pass .torrent file path by first argument")
+ }
+ fPath := args[0]
+ mi, err := metainfo.LoadFromFile(fPath)
+ if err != nil {
+ return fmt.Errorf("LoadFromFile: %w, file=%s", err, fPath)
}
+ fmt.Printf("%s\n", mi.Magnet(nil, nil).String())
+ return nil
+ },
+}
+
+func doPrintTorrentHashes(ctx context.Context, logger log.Logger) error {
+ dirs := datadir.New(datadirCli)
+ if err := datadir.ApplyMigrations(dirs); err != nil {
+ return err
+ }
- oldContent, err := os.ReadFile(targetFile)
+ if forceRebuild { // remove and create .torrent files (will re-read all snapshots)
+ //removePieceCompletionStorage(snapDir)
+ files, err := downloader.AllTorrentPaths(dirs)
if err != nil {
return err
}
- oldLines := map[string]string{}
- if err := toml.Unmarshal(oldContent, &oldLines); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
+ for _, filePath := range files {
+ if err := os.Remove(filePath); err != nil {
+ return err
+ }
}
- if len(oldLines) >= len(res) {
- logger.Info("amount of lines in target file is equal or greater than amount of lines in snapshot dir", "old", len(oldLines), "new", len(res))
- return nil
+ if err := downloader.BuildTorrentFilesIfNeed(ctx, dirs); err != nil {
+ return fmt.Errorf("BuildTorrentFilesIfNeed: %w", err)
}
- if err := os.WriteFile(targetFile, serialized, 0644); err != nil { // nolint
- return err
+ }
+
+ res := map[string]string{}
+ torrents, err := downloader.AllTorrentSpecs(dirs)
+ if err != nil {
+ return err
+ }
+ for _, t := range torrents {
+ // we don't release commitment history in this time. let's skip it here.
+ if strings.HasPrefix(t.DisplayName, "history/commitment") {
+ continue
}
+ if strings.HasPrefix(t.DisplayName, "idx/commitment") {
+ continue
+ }
+ res[t.DisplayName] = t.InfoHash.String()
+ }
+ serialized, err := toml.Marshal(res)
+ if err != nil {
+ return err
+ }
+
+ if targetFile == "" {
+ fmt.Printf("%s\n", serialized)
return nil
- },
-}
+ }
-// nolint
-func removePieceCompletionStorage(snapDir string) {
- _ = os.RemoveAll(filepath.Join(snapDir, "db"))
- _ = os.RemoveAll(filepath.Join(snapDir, ".torrent.db"))
- _ = os.RemoveAll(filepath.Join(snapDir, ".torrent.bolt.db"))
- _ = os.RemoveAll(filepath.Join(snapDir, ".torrent.db-shm"))
- _ = os.RemoveAll(filepath.Join(snapDir, ".torrent.db-wal"))
+ oldContent, err := os.ReadFile(targetFile)
+ if err != nil {
+ return err
+ }
+ oldLines := map[string]string{}
+ if err := toml.Unmarshal(oldContent, &oldLines); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ if len(oldLines) >= len(res) {
+ logger.Info("amount of lines in target file is equal or greater than amount of lines in snapshot dir", "old", len(oldLines), "new", len(res))
+ return nil
+ }
+ if err := os.WriteFile(targetFile, serialized, 0644); err != nil { // nolint
+ return err
+ }
+ return nil
}
func StartGrpc(snServer *downloader.GrpcServer, addr string, creds *credentials.TransportCredentials, logger log.Logger) (*grpc.Server, error) {
@@ -313,3 +431,37 @@ func StartGrpc(snServer *downloader.GrpcServer, addr string, creds *credentials.
logger.Info("Started gRPC server", "on", addr)
return grpcServer, nil
}
+
+// Add pre-configured
+func addPreConfiguredHashes(ctx context.Context, d *downloader.Downloader) error {
+ for _, it := range snapcfg.KnownCfg(chain).Preverified {
+ if err := d.AddMagnetLink(ctx, snaptype.Hex2InfoHash(it.Hash), it.Name); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func checkChainName(ctx context.Context, dirs datadir.Dirs, chainName string) error {
+ if !dir.FileExist(filepath.Join(dirs.Chaindata, "mdbx.dat")) {
+ return nil
+ }
+ db, err := mdbx.NewMDBX(log.New()).
+ Path(dirs.Chaindata).Label(kv.ChainDB).
+ Accede().
+ Open(ctx)
+ if err != nil {
+ return err
+ }
+ defer db.Close()
+ if err := db.View(context.Background(), func(tx kv.Tx) error {
+ cc := tool.ChainConfig(tx)
+ if cc != nil && cc.ChainName != chainName {
+ return fmt.Errorf("datadir already was configured with --chain=%s. can't change to '%s'", cc.ChainName, chainName)
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/cmd/downloader/readme.md b/cmd/downloader/readme.md
index 6502a59b570..61fa4203a3e 100644
--- a/cmd/downloader/readme.md
+++ b/cmd/downloader/readme.md
@@ -1,10 +1,12 @@
# Downloader
-Service to seed/download historical data (snapshots, immutable .seg files) by Bittorrent protocol
+Service to seed/download historical data (snapshots, immutable .seg files) by
+Bittorrent protocol
## Start Erigon with snapshots support
-As many other Erigon components (txpool, sentry, rpc daemon) it may be built-into Erigon or run as separated process.
+As many other Erigon components (txpool, sentry, rpc daemon) it may be
+built-into Erigon or run as separated process.
```shell
# 1. Downloader by default run inside Erigon, by `--snapshots` flag:
@@ -28,8 +30,10 @@ Use `--snap.keepblocks=true` to don't delete retired blocks from DB
Any network/chain can start with snapshot sync:
-- node will download only snapshots registered in next repo https://github.com/ledgerwatch/erigon-snapshot
-- node will move old blocks from DB to snapshots of 1K blocks size, then merge snapshots to bigger range, until
+- node will download only snapshots registered in next
+ repo https://github.com/ledgerwatch/erigon-snapshot
+- node will move old blocks from DB to snapshots of 1K blocks size, then merge
+ snapshots to bigger range, until
snapshots of 500K blocks, then automatically start seeding new snapshot
Flag `--snapshots` is compatible with `--prune` flag
@@ -43,14 +47,14 @@ Flag `--snapshots` is compatible with `--prune` flag
# It will dump blocks from Database to .seg files:
erigon snapshots retire --datadir=
-# Create .torrent files (Downloader will seed automatically all .torrent files)
+# Create .torrent files (you can think about them as "checksum")
+downloader torrent_create --datadir=
+
# output format is compatible with https://github.com/ledgerwatch/erigon-snapshot
-downloader torrent_hashes --rebuild --datadir=
+downloader torrent_hashes --datadir=
-# Start downloader (seeds automatically)
+# Start downloader (read all .torrent files, and download/seed data)
downloader --downloader.api.addr=127.0.0.1:9093 --datadir=
-
-# Erigon is not required for snapshots seeding. But Erigon with --snapshots also does seeding.
```
Additional info:
@@ -67,39 +71,88 @@ erigon snapshots index --datadir=
## Architecture
-Downloader works based on /snapshots/*.torrent files. Such files can be created 4 ways:
+Downloader works based on /snapshots/*.torrent files. Such files
+can be created 4 ways:
-- Erigon can do grpc call downloader.Download(list_of_hashes), it will trigger creation of .torrent files
-- Erigon can create new .seg file, Downloader will scan .seg file and create .torrent
-- operator can manually copy .torrent files (rsync from other server or restore from backup)
-- operator can manually copy .seg file, Downloader will scan .seg file and create .torrent
+- Erigon can do grpc call downloader.Download(list_of_hashes), it will trigger
+ creation of .torrent files
+- Erigon can create new .seg file, Downloader will scan .seg file and create
+ .torrent
+- operator can manually copy .torrent files (rsync from other server or restore
+ from backup)
+- operator can manually copy .seg file, Downloader will scan .seg file and
+ create .torrent
Erigon does:
- connect to Downloader
- share list of hashes (see https://github.com/ledgerwatch/erigon-snapshot )
- wait for download of all snapshots
-- when .seg available - automatically create .idx files - secondary indices, for example to find block by hash
-- then switch to normal staged sync (which doesn't require connection to Downloader)
-- ensure that snapshot downloading happens only once: even if new Erigon version does include new pre-verified snapshot
- hashes, Erigon will not download them (to avoid unpredictable downtime) - but Erigon may produce them by self.
+- when .seg available - automatically create .idx files - secondary indices, for
+ example to find block by hash
+- then switch to normal staged sync (which doesn't require connection to
+ Downloader)
+- ensure that snapshot downloading happens only once: even if new Erigon version
+ does include new pre-verified snapshot
+ hashes, Erigon will not download them (to avoid unpredictable downtime) - but
+ Erigon may produce them by self.
Downloader does:
- Read .torrent files, download everything described by .torrent files
-- Use https://github.com/ngosang/trackerslist see [./trackers/embed.go](../../../erigon-lib/downloader/trackers/embed.go)
+- Use https://github.com/ngosang/trackerslist
+ see [./trackers/embed.go](../../../erigon-lib/downloader/trackers/embed.go)
- automatically seeding
Technical details:
-- To prevent attack - .idx creation using random Seed - all nodes will have different .idx file (and same .seg files)
-- If you add/remove any .seg file manually, also need remove `/snapshots/db` folder
+- To prevent attack - .idx creation using random Seed - all nodes will have
+ different .idx file (and same .seg files)
+- If you add/remove any .seg file manually, also need
+ remove `/downloader` folder
## How to verify that .seg files have the same checksum as current .torrent files
```
# Use it if you see weird behavior, bugs, bans, hardware issues, etc...
downloader --verify --datadir=
+downloader --verify --verify.files=v1-1-2-transaction.seg --datadir=
+```
+
+## Create cheap seedbox
+
+Usually Erigon's network is self-sufficient - peers automatically producing and
+seeding snapshots. But new network or new type of snapshots need Bootstraping
+step - no peers yet have this files.
+
+**Seedbox** - machie which ony seeding archive files:
+
+- Doesn't need synced erigon
+- Can work on very cheap disks, cpu, ram
+- It works exactly like Erigon node - downloading archive files and seed them
+
+```
+downloader --seedbox --datadir= --chain=mainnet
+```
+
+Seedbox can fallback to **Webseed** - HTTP url to centralized infrastructure. For example: private S3 bucket with
+signed_urls, or any HTTP server with files. Main idea: erigon decentralized infrastructure has higher prioriity than
+centralized (which used as **support/fallback**).
+
+```
+# Erigon has default webseed url's - and you can create own
+downloader --datadir= --chain=mainnet --webseed=
+# See also: `downloader --help` of `--webseed` flag. There is an option to pass it by `datadir/webseed.toml` file
+```
+
+---------
+
+## Utilities
+
+```
+downloader torrent_cat /path/to.torrent
+
+downloader torrent_magnet /path/to.torrent
```
## Faster rsync
@@ -118,3 +171,4 @@ crontab -e
```
It does push to branch `auto`, before release - merge `auto` to `main` manually
+
diff --git a/cmd/erigon-el-mock/main.go b/cmd/erigon-el-mock/main.go
deleted file mode 100644
index 98ec8e66a8f..00000000000
--- a/cmd/erigon-el-mock/main.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package main
-
-import (
- "flag"
- "net"
-
- "github.com/c2h5oh/datasize"
- "github.com/ledgerwatch/erigon-lib/common/datadir"
- "github.com/ledgerwatch/erigon-lib/gointerfaces/execution"
- "github.com/ledgerwatch/erigon-lib/kv"
- "github.com/ledgerwatch/erigon-lib/kv/mdbx"
- "github.com/ledgerwatch/erigon-lib/kv/memdb"
- "github.com/ledgerwatch/log/v3"
- "google.golang.org/grpc"
-
- "github.com/ledgerwatch/erigon/eth/ethconfig"
- "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks"
-)
-
-func main() {
- datadirPtr := flag.String("datadir2", "", "non in-memory db for EL simulation")
- flag.Parse()
- log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StderrHandler))
- lis, err := net.Listen("tcp", "127.0.0.1:8989")
- if err != nil {
- log.Warn("[Exec] could not serve service", "reason", err)
- }
- maxReceiveSize := 500 * datasize.MB
- dirs := datadir.New(*datadirPtr)
-
- s := grpc.NewServer(grpc.MaxRecvMsgSize(int(maxReceiveSize)))
- var db kv.RwDB
- if *datadirPtr == "" {
- db = memdb.New("")
- } else {
- db, err = mdbx.Open(dirs.DataDir, log.Root(), false)
- if err != nil {
- log.Error("Could not open database", "err", err)
- return
- }
- }
- blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()))
- execution.RegisterExecutionServer(s, NewEth1Execution(db, blockReader))
- log.Info("Serving mock Execution layer.")
- if err := s.Serve(lis); err != nil {
- log.Error("failed to serve", "err", err)
- }
-}
diff --git a/cmd/erigon-el-mock/server.go b/cmd/erigon-el-mock/server.go
deleted file mode 100644
index 78fb78b72c2..00000000000
--- a/cmd/erigon-el-mock/server.go
+++ /dev/null
@@ -1,314 +0,0 @@
-package main
-
-import (
- "context"
- "encoding/binary"
- "fmt"
- "math/big"
- "sync"
-
- "github.com/holiman/uint256"
- "github.com/ledgerwatch/erigon-lib/common"
- libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon-lib/gointerfaces"
- "github.com/ledgerwatch/erigon-lib/gointerfaces/execution"
- types2 "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
- "github.com/ledgerwatch/erigon-lib/kv"
- "github.com/ledgerwatch/erigon/turbo/engineapi/engine_types"
- "github.com/ledgerwatch/erigon/turbo/services"
-
- "github.com/ledgerwatch/erigon/core/rawdb"
- "github.com/ledgerwatch/erigon/core/types"
-)
-
-type Eth1Execution struct {
- execution.UnimplementedExecutionServer
-
- db kv.RwDB
- blockReader services.FullBlockReader
- mu sync.Mutex
-}
-
-func NewEth1Execution(db kv.RwDB, blockReader services.FullBlockReader) *Eth1Execution {
- return &Eth1Execution{
- db: db,
- blockReader: blockReader,
- }
-}
-
-func (e *Eth1Execution) InsertHeaders(ctx context.Context, req *execution.InsertHeadersRequest) (*execution.InsertionResult, error) {
- e.mu.Lock()
- defer e.mu.Unlock()
- tx, err := e.db.BeginRw(ctx)
- if err != nil {
- return nil, err
- }
- defer tx.Rollback()
-
- for _, header := range req.Headers {
- h, err := HeaderRpcToHeader(header)
- if err != nil {
- return nil, err
- }
- if err := rawdb.WriteHeader(tx, h); err != nil {
- return nil, err
- }
- }
- return &execution.InsertionResult{
- Result: execution.ExecutionStatus_Success,
- }, tx.Commit()
-}
-
-func (e *Eth1Execution) InsertBodies(ctx context.Context, req *execution.InsertBodiesRequest) (*execution.InsertionResult, error) {
- e.mu.Lock()
- defer e.mu.Unlock()
- tx, err := e.db.BeginRw(ctx)
- if err != nil {
- return nil, err
- }
- defer tx.Rollback()
-
- for _, body := range req.Bodies {
- uncles := make([]*types.Header, 0, len(body.Uncles))
- for _, uncle := range body.Uncles {
- h, err := HeaderRpcToHeader(uncle)
- if err != nil {
- return nil, err
- }
- uncles = append(uncles, h)
- }
- // Withdrawals processing
- withdrawals := make([]*types.Withdrawal, 0, len(body.Withdrawals))
- for _, withdrawal := range body.Withdrawals {
- withdrawals = append(withdrawals, &types.Withdrawal{
- Index: withdrawal.Index,
- Validator: withdrawal.ValidatorIndex,
- Address: gointerfaces.ConvertH160toAddress(withdrawal.Address),
- Amount: withdrawal.Amount,
- })
- }
- if _, err := rawdb.WriteRawBodyIfNotExists(tx, gointerfaces.ConvertH256ToHash(body.BlockHash),
- body.BlockNumber, &types.RawBody{
- Transactions: body.Transactions,
- Uncles: uncles,
- Withdrawals: withdrawals,
- }); err != nil {
- return nil, err
- }
- }
- return &execution.InsertionResult{
- Result: execution.ExecutionStatus_Success,
- }, tx.Commit()
-}
-
-type canonicalEntry struct {
- hash libcommon.Hash
- number uint64
-}
-
-func (e *Eth1Execution) UpdateForkChoice(ctx context.Context, fcu *execution.ForkChoice) (*execution.ForkChoiceReceipt, error) {
- e.mu.Lock()
- defer e.mu.Unlock()
- return &execution.ForkChoiceReceipt{
- LatestValidHash: fcu.HeadBlockHash,
- Status: execution.ExecutionStatus_Success,
- }, nil
-}
-
-func (e *Eth1Execution) GetHeader(ctx context.Context, req *execution.GetSegmentRequest) (*execution.GetHeaderResponse, error) {
- e.mu.Lock()
- defer e.mu.Unlock()
- tx, err := e.db.BeginRo(ctx)
- if err != nil {
- return nil, err
- }
- defer tx.Rollback()
-
- // Retrieve header
- var header *types.Header
- if req.BlockHash != nil && req.BlockNumber != nil {
- blockHash := gointerfaces.ConvertH256ToHash(req.BlockHash)
- header, err = e.blockReader.Header(ctx, tx, blockHash, *req.BlockNumber)
- if err != nil {
- return nil, err
- }
- } else if req.BlockHash != nil {
- blockHash := gointerfaces.ConvertH256ToHash(req.BlockHash)
- header, err = e.blockReader.HeaderByHash(ctx, tx, blockHash)
- if err != nil {
- return nil, err
- }
- } else if req.BlockNumber != nil {
- header, err = e.blockReader.HeaderByNumber(ctx, tx, *req.BlockNumber)
- if err != nil {
- return nil, err
- }
- }
- if err != nil {
- return nil, err
- }
- // Got nothing? return nothing :)
- if header == nil {
- return &execution.GetHeaderResponse{}, nil
- }
-
- return &execution.GetHeaderResponse{
- Header: HeaderToHeaderRPC(header),
- }, nil
-}
-
-func (e *Eth1Execution) GetBody(ctx context.Context, req *execution.GetSegmentRequest) (*execution.GetBodyResponse, error) {
- e.mu.Lock()
- defer e.mu.Unlock()
-
- tx, err := e.db.BeginRo(ctx)
- if err != nil {
- return nil, err
- }
- defer tx.Rollback()
- // Retrieve header
- var body *types.Body
- if req.BlockHash != nil && req.BlockNumber != nil {
- blockHash := gointerfaces.ConvertH256ToHash(req.BlockHash)
- if ok, _, err := rawdb.IsCanonicalHashDeprecated(tx, blockHash); err != nil {
- return nil, err
- } else if ok {
- body, err = e.blockReader.BodyWithTransactions(ctx, tx, blockHash, *req.BlockNumber)
- if err != nil {
- return nil, err
- }
- }
- } else if req.BlockHash != nil {
- blockHash := gointerfaces.ConvertH256ToHash(req.BlockHash)
- ok, blockNumber, err := rawdb.IsCanonicalHashDeprecated(tx, blockHash)
- if err != nil {
- return nil, err
- }
- if ok {
- body, err = e.blockReader.BodyWithTransactions(ctx, tx, blockHash, *blockNumber)
- if err != nil {
- return nil, err
- }
- }
- }
- if err != nil {
- return nil, err
- }
- if body == nil {
- return nil, nil
- }
- encodedTransactions, err := types.MarshalTransactionsBinary(body.Transactions)
- if err != nil {
- return nil, err
- }
- rpcWithdrawals := engine_types.ConvertWithdrawalsToRpc(body.Withdrawals)
- unclesRpc := make([]*execution.Header, 0, len(body.Uncles))
- for _, uncle := range body.Uncles {
- unclesRpc = append(unclesRpc, HeaderToHeaderRPC(uncle))
- }
- return &execution.GetBodyResponse{
- Body: &execution.BlockBody{
- Transactions: encodedTransactions,
- Withdrawals: rpcWithdrawals,
- Uncles: unclesRpc,
- },
- }, nil
-
-}
-
-func (e *Eth1Execution) IsCanonicalHash(ctx context.Context, req *types2.H256) (*execution.IsCanonicalResponse, error) {
- e.mu.Lock()
- defer e.mu.Unlock()
-
- return &execution.IsCanonicalResponse{Canonical: true}, nil
-}
-
-func (e *Eth1Execution) GetHeaderHashNumber(ctx context.Context, req *types2.H256) (*execution.GetHeaderHashNumberResponse, error) {
- e.mu.Lock()
- defer e.mu.Unlock()
-
- tx, err := e.db.BeginRo(ctx)
- if err != nil {
- return nil, err
- }
- defer tx.Rollback()
- return &execution.GetHeaderHashNumberResponse{
- BlockNumber: rawdb.ReadHeaderNumber(tx, gointerfaces.ConvertH256ToHash(req)),
- }, nil
-}
-
-func HeaderRpcToHeader(header *execution.Header) (*types.Header, error) {
- var blockNonce types.BlockNonce
- binary.BigEndian.PutUint64(blockNonce[:], header.Nonce)
- var baseFee *big.Int
- var withdrawalHash *common.Hash
- if header.BaseFeePerGas != nil {
- baseFee = gointerfaces.ConvertH256ToUint256Int(header.BaseFeePerGas).ToBig()
- }
- if header.WithdrawalHash != nil {
- withdrawalHash = new(libcommon.Hash)
- *withdrawalHash = gointerfaces.ConvertH256ToHash(header.WithdrawalHash)
- }
- h := &types.Header{
- ParentHash: gointerfaces.ConvertH256ToHash(header.ParentHash),
- UncleHash: gointerfaces.ConvertH256ToHash(header.OmmerHash),
- Coinbase: gointerfaces.ConvertH160toAddress(header.Coinbase),
- Root: gointerfaces.ConvertH256ToHash(header.StateRoot),
- TxHash: gointerfaces.ConvertH256ToHash(header.TransactionHash),
- ReceiptHash: gointerfaces.ConvertH256ToHash(header.ReceiptRoot),
- Bloom: gointerfaces.ConvertH2048ToBloom(header.LogsBloom),
- Difficulty: gointerfaces.ConvertH256ToUint256Int(header.Difficulty).ToBig(),
- Number: big.NewInt(int64(header.BlockNumber)),
- GasLimit: header.GasLimit,
- GasUsed: header.GasUsed,
- Time: header.Timestamp,
- Extra: header.ExtraData,
- MixDigest: gointerfaces.ConvertH256ToHash(header.PrevRandao),
- Nonce: blockNonce,
- BaseFee: baseFee,
- WithdrawalsHash: withdrawalHash,
- }
-
- blockHash := gointerfaces.ConvertH256ToHash(header.BlockHash)
- if blockHash != h.Hash() {
- return nil, fmt.Errorf("block %d, %x has invalid hash. expected: %x", header.BlockNumber, h.Hash(), blockHash)
- }
- return types.CopyHeader(h), nil
-}
-
-func HeaderToHeaderRPC(header *types.Header) *execution.Header {
- difficulty := new(uint256.Int)
- difficulty.SetFromBig(header.Difficulty)
-
- var baseFeeReply *types2.H256
- if header.BaseFee != nil {
- var baseFee uint256.Int
- baseFee.SetFromBig(header.BaseFee)
- baseFeeReply = gointerfaces.ConvertUint256IntToH256(&baseFee)
- }
- var withdrawalHashReply *types2.H256
- if header.WithdrawalsHash != nil {
- withdrawalHashReply = gointerfaces.ConvertHashToH256(*header.WithdrawalsHash)
- }
- return &execution.Header{
- ParentHash: gointerfaces.ConvertHashToH256(header.ParentHash),
- Coinbase: gointerfaces.ConvertAddressToH160(header.Coinbase),
- StateRoot: gointerfaces.ConvertHashToH256(header.Root),
- TransactionHash: gointerfaces.ConvertHashToH256(header.TxHash),
- LogsBloom: gointerfaces.ConvertBytesToH2048(header.Bloom[:]),
- ReceiptRoot: gointerfaces.ConvertHashToH256(header.ReceiptHash),
- PrevRandao: gointerfaces.ConvertHashToH256(header.MixDigest),
- BlockNumber: header.Number.Uint64(),
- Nonce: header.Nonce.Uint64(),
- GasLimit: header.GasLimit,
- GasUsed: header.GasUsed,
- Timestamp: header.Time,
- ExtraData: header.Extra,
- Difficulty: gointerfaces.ConvertUint256IntToH256(difficulty),
- BlockHash: gointerfaces.ConvertHashToH256(header.Hash()),
- OmmerHash: gointerfaces.ConvertHashToH256(header.UncleHash),
- BaseFeePerGas: baseFeeReply,
- WithdrawalHash: withdrawalHashReply,
- }
-
-}
diff --git a/cmd/erigon/main.go b/cmd/erigon/main.go
index 0fd23f8709e..b3dd55dcdb5 100644
--- a/cmd/erigon/main.go
+++ b/cmd/erigon/main.go
@@ -1,21 +1,17 @@
package main
import (
- "errors"
"fmt"
+ "net/http"
"os"
- "path/filepath"
- "reflect"
- "strings"
- "github.com/VictoriaMetrics/metrics"
- "github.com/ledgerwatch/erigon-lib/common/dbg"
"github.com/ledgerwatch/log/v3"
- "github.com/pelletier/go-toml"
"github.com/urfave/cli/v2"
- "gopkg.in/yaml.v2"
- "github.com/ledgerwatch/erigon/cmd/utils"
+ "github.com/ledgerwatch/erigon-lib/common/datadir"
+ "github.com/ledgerwatch/erigon-lib/common/dbg"
+ "github.com/ledgerwatch/erigon-lib/metrics"
+ "github.com/ledgerwatch/erigon/diagnostics"
"github.com/ledgerwatch/erigon/params"
erigonapp "github.com/ledgerwatch/erigon/turbo/app"
erigoncli "github.com/ledgerwatch/erigon/turbo/cli"
@@ -45,88 +41,40 @@ func main() {
}
func runErigon(cliCtx *cli.Context) error {
- configFilePath := cliCtx.String(utils.ConfigFlag.Name)
- if configFilePath != "" {
- if err := setFlagsFromConfigFile(cliCtx, configFilePath); err != nil {
- log.Warn("failed setting config flags from yaml/toml file", "err", err)
- }
- }
-
var logger log.Logger
var err error
- if logger, err = debug.Setup(cliCtx, true /* root logger */); err != nil {
+ var metricsMux *http.ServeMux
+
+ if logger, metricsMux, err = debug.Setup(cliCtx, true /* root logger */); err != nil {
return err
}
// initializing the node and providing the current git commit there
logger.Info("Build info", "git_branch", params.GitBranch, "git_tag", params.GitTag, "git_commit", params.GitCommit)
- erigonInfoGauge := metrics.GetOrCreateCounter(fmt.Sprintf(`erigon_info{version="%s",commit="%s"}`, params.Version, params.GitCommit))
+ erigonInfoGauge := metrics.GetOrCreateGauge(fmt.Sprintf(`erigon_info{version="%s",commit="%s"}`, params.Version, params.GitCommit))
erigonInfoGauge.Set(1)
nodeCfg := node.NewNodConfigUrfave(cliCtx, logger)
+ if err := datadir.ApplyMigrations(nodeCfg.Dirs); err != nil {
+ return err
+ }
+
ethCfg := node.NewEthConfigUrfave(cliCtx, nodeCfg, logger)
- ethNode, err := node.New(nodeCfg, ethCfg, logger)
+ ethNode, err := node.New(cliCtx.Context, nodeCfg, ethCfg, logger)
if err != nil {
log.Error("Erigon startup", "err", err)
return err
}
+
+ if metricsMux != nil {
+ diagnostics.Setup(cliCtx, metricsMux, ethNode)
+ }
+
err = ethNode.Serve()
if err != nil {
log.Error("error while serving an Erigon node", "err", err)
}
return err
}
-
-func setFlagsFromConfigFile(ctx *cli.Context, filePath string) error {
- fileExtension := filepath.Ext(filePath)
-
- fileConfig := make(map[string]interface{})
-
- if fileExtension == ".yaml" {
- yamlFile, err := os.ReadFile(filePath)
- if err != nil {
- return err
- }
- err = yaml.Unmarshal(yamlFile, fileConfig)
- if err != nil {
- return err
- }
- } else if fileExtension == ".toml" {
- tomlFile, err := os.ReadFile(filePath)
- if err != nil {
- return err
- }
- err = toml.Unmarshal(tomlFile, &fileConfig)
- if err != nil {
- return err
- }
- } else {
- return errors.New("config files only accepted are .yaml and .toml")
- }
- // sets global flags to value in yaml/toml file
- for key, value := range fileConfig {
- if !ctx.IsSet(key) {
- if reflect.ValueOf(value).Kind() == reflect.Slice {
- sliceInterface := value.([]interface{})
- s := make([]string, len(sliceInterface))
- for i, v := range sliceInterface {
- s[i] = fmt.Sprintf("%v", v)
- }
- err := ctx.Set(key, strings.Join(s, ","))
- if err != nil {
- return fmt.Errorf("failed setting %s flag with values=%s error=%s", key, s, err)
- }
- } else {
- err := ctx.Set(key, fmt.Sprintf("%v", value))
- if err != nil {
- return fmt.Errorf("failed setting %s flag with value=%v error=%s", key, value, err)
-
- }
- }
- }
- }
-
- return nil
-}
diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go
index 2cc5b531203..14401d0f94b 100644
--- a/cmd/evm/internal/t8ntool/transition.go
+++ b/cmd/evm/internal/t8ntool/transition.go
@@ -39,7 +39,6 @@ import (
"github.com/ledgerwatch/erigon-lib/common/length"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/kvcfg"
- "github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/common/math"
"github.com/ledgerwatch/erigon/consensus/ethash"
"github.com/ledgerwatch/erigon/consensus/merge"
@@ -48,6 +47,7 @@ import (
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/core/vm"
"github.com/ledgerwatch/erigon/crypto"
+ "github.com/ledgerwatch/erigon/eth/stagedsync"
trace_logger "github.com/ledgerwatch/erigon/eth/tracers/logger"
"github.com/ledgerwatch/erigon/rlp"
"github.com/ledgerwatch/erigon/tests"
@@ -308,7 +308,9 @@ func Main(ctx *cli.Context) error {
// redirects to the ethash engine based on the block number
engine := merge.New(ðash.FakeEthash{})
- result, err := core.ExecuteBlockEphemerally(chainConfig, &vmConfig, getHash, engine, block, reader, writer, nil, getTracer, log.New("t8ntool"))
+ t8logger := log.New("t8ntool")
+ chainReader := stagedsync.NewChainReaderImpl(chainConfig, tx, nil, t8logger)
+ result, err := core.ExecuteBlockEphemerally(chainConfig, &vmConfig, getHash, engine, block, reader, writer, chainReader, getTracer, t8logger)
if hashError != nil {
return NewError(ErrorMissingBlockhash, fmt.Errorf("blockhash error: %v", err))
@@ -607,8 +609,8 @@ func CalculateStateRoot(tx kv.RwTx) (*libcommon.Hash, error) {
if err != nil {
return nil, err
}
- h := common.NewHasher()
- defer common.ReturnHasherToPool(h)
+ h := libcommon.NewHasher()
+ defer libcommon.ReturnHasherToPool(h)
for k, v, err := c.First(); k != nil; k, v, err = c.Next() {
if err != nil {
return nil, fmt.Errorf("interate over plain state: %w", err)
@@ -631,11 +633,11 @@ func CalculateStateRoot(tx kv.RwTx) (*libcommon.Hash, error) {
h.Sha.Write(k[length.Addr+length.Incarnation:])
//nolint:errcheck
h.Sha.Read(newK[length.Hash+length.Incarnation:])
- if err = tx.Put(kv.HashedStorage, newK, common.CopyBytes(v)); err != nil {
+ if err = tx.Put(kv.HashedStorage, newK, libcommon.CopyBytes(v)); err != nil {
return nil, fmt.Errorf("insert hashed key: %w", err)
}
} else {
- if err = tx.Put(kv.HashedAccounts, newK, common.CopyBytes(v)); err != nil {
+ if err = tx.Put(kv.HashedAccounts, newK, libcommon.CopyBytes(v)); err != nil {
return nil, fmt.Errorf("insert hashed key: %w", err)
}
}
diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go
index 547dd534d8a..cde453423ba 100644
--- a/cmd/evm/runner.go
+++ b/cmd/evm/runner.go
@@ -43,7 +43,6 @@ import (
"github.com/ledgerwatch/erigon/cmd/evm/internal/compiler"
"github.com/ledgerwatch/erigon/cmd/utils"
- "github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/core"
"github.com/ledgerwatch/erigon/core/state"
"github.com/ledgerwatch/erigon/core/vm"
@@ -120,10 +119,12 @@ func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) (output []by
}
func runCmd(ctx *cli.Context) error {
- log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StderrHandler))
- //glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
- //glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
- //log.Root().SetHandler(glogger)
+ machineFriendlyOutput := ctx.Bool(MachineFlag.Name)
+ if machineFriendlyOutput {
+ log.Root().SetHandler(log.DiscardHandler())
+ } else {
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StderrHandler))
+ }
logconfig := &logger.LogConfig{
DisableMemory: ctx.Bool(DisableMemoryFlag.Name),
DisableStack: ctx.Bool(DisableStackFlag.Name),
@@ -141,7 +142,7 @@ func runCmd(ctx *cli.Context) error {
receiver = libcommon.BytesToAddress([]byte("receiver"))
genesisConfig *types.Genesis
)
- if ctx.Bool(MachineFlag.Name) {
+ if machineFriendlyOutput {
tracer = logger.NewJSONLogger(logconfig, os.Stdout)
} else if ctx.Bool(DebugFlag.Name) {
debugLogger = logger.NewStructLogger(logconfig)
@@ -217,7 +218,7 @@ func runCmd(ctx *cli.Context) error {
if err != nil {
return err
}
- code = common.Hex2Bytes(bin)
+ code = libcommon.Hex2Bytes(bin)
}
initialGas := ctx.Uint64(GasFlag.Name)
if genesisConfig.GasLimit != 0 {
diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go
index 5b0591417d8..151da18661f 100644
--- a/cmd/evm/staterunner.go
+++ b/cmd/evm/staterunner.go
@@ -25,10 +25,10 @@ import (
"path/filepath"
"github.com/c2h5oh/datasize"
+ mdbx2 "github.com/erigontech/mdbx-go/mdbx"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/kv/mdbx"
"github.com/ledgerwatch/log/v3"
- mdbx2 "github.com/torquem-ch/mdbx-go/mdbx"
"github.com/urfave/cli/v2"
"github.com/ledgerwatch/erigon/core/state"
@@ -57,8 +57,12 @@ type StatetestResult struct {
}
func stateTestCmd(ctx *cli.Context) error {
- // Configure the go-ethereum logger
- log.Root().SetHandler(log.LvlFilterHandler(log.LvlDebug, log.StderrHandler))
+ machineFriendlyOutput := ctx.Bool(MachineFlag.Name)
+ if machineFriendlyOutput {
+ log.Root().SetHandler(log.DiscardHandler())
+ } else {
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StderrHandler))
+ }
// Configure the EVM logger
config := &logger.LogConfig{
@@ -70,7 +74,7 @@ func stateTestCmd(ctx *cli.Context) error {
cfg := vm.Config{
Debug: ctx.Bool(DebugFlag.Name) || ctx.Bool(MachineFlag.Name),
}
- if ctx.Bool(MachineFlag.Name) {
+ if machineFriendlyOutput {
cfg.Tracer = logger.NewJSONLogger(config, os.Stderr)
} else if ctx.Bool(DebugFlag.Name) {
cfg.Tracer = logger.NewStructLogger(config)
diff --git a/cmd/evm/testdata/3/readme.md b/cmd/evm/testdata/3/readme.md
index 499f03d7aa7..47792de04c8 100644
--- a/cmd/evm/testdata/3/readme.md
+++ b/cmd/evm/testdata/3/readme.md
@@ -1,2 +1,2 @@
-These files examplify a transition where a transaction (excuted on block 5) requests
+These files examplify a transition where a transaction (executed on block 5) requests
the blockhash for block `1`.
diff --git a/cmd/evm/testdata/4/readme.md b/cmd/evm/testdata/4/readme.md
index 08840d37bd9..a298c32ec77 100644
--- a/cmd/evm/testdata/4/readme.md
+++ b/cmd/evm/testdata/4/readme.md
@@ -1,3 +1,3 @@
-These files examplify a transition where a transaction (excuted on block 5) requests
+These files examplify a transition where a transaction (executed on block 5) requests
the blockhash for block `4`, but where the hash for that block is missing.
It's expected that executing these should cause `exit` with errorcode `4`.
diff --git a/cmd/evm/testdata/5/readme.md b/cmd/evm/testdata/5/readme.md
index e2b608face9..1116bc94bd9 100644
--- a/cmd/evm/testdata/5/readme.md
+++ b/cmd/evm/testdata/5/readme.md
@@ -1 +1 @@
-These files examplify a transition where there are no transcations, two ommers, at block `N-1` (delta 1) and `N-2` (delta 2).
\ No newline at end of file
+These files examplify a transition where there are no transactions, two ommers, at block `N-1` (delta 1) and `N-2` (delta 2).
\ No newline at end of file
diff --git a/cmd/evm/testdata/8/readme.md b/cmd/evm/testdata/8/readme.md
index 778fc6151ab..52289f77d69 100644
--- a/cmd/evm/testdata/8/readme.md
+++ b/cmd/evm/testdata/8/readme.md
@@ -7,7 +7,7 @@ This test contains testcases for EIP-2930, which uses transactions with access l
The alloc portion contains one contract (`0x000000000000000000000000000000000000aaaa`), containing the
following code: `0x5854505854`: `PC ;SLOAD; POP; PC; SLOAD`.
-Essentialy, this contract does `SLOAD(0)` and `SLOAD(3)`.
+Essentially, this contract does `SLOAD(0)` and `SLOAD(3)`.
The alloc also contains some funds on `0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b`.
@@ -33,7 +33,7 @@ dir=./testdata/8 && ./evm t8n --state.fork=Berlin --input.alloc=$dir/alloc.json
```
-Simlarly, we can provide the input transactions via `stdin` instead of as file:
+Similarly, we can provide the input transactions via `stdin` instead of as file:
```
dir=./testdata/8 \
diff --git a/cmd/evm/testdata/9/readme.md b/cmd/evm/testdata/9/readme.md
index 88f0f12aaaa..3436c8135d6 100644
--- a/cmd/evm/testdata/9/readme.md
+++ b/cmd/evm/testdata/9/readme.md
@@ -7,7 +7,7 @@ This test contains testcases for EIP-1559, which uses an new transaction type an
The alloc portion contains one contract (`0x000000000000000000000000000000000000aaaa`), containing the
following code: `0x58585454`: `PC; PC; SLOAD; SLOAD`.
-Essentialy, this contract does `SLOAD(0)` and `SLOAD(1)`.
+Essentially, this contract does `SLOAD(0)` and `SLOAD(1)`.
The alloc also contains some funds on `0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b`.
diff --git a/cmd/hack/db/lmdb.go b/cmd/hack/db/lmdb.go
index 288bcf10208..b1aeaa95855 100644
--- a/cmd/hack/db/lmdb.go
+++ b/cmd/hack/db/lmdb.go
@@ -577,7 +577,7 @@ func generate6(_ kv.RwDB, tx kv.RwTx) (bool, error) {
}
func dropT(_ kv.RwDB, tx kv.RwTx) (bool, error) {
- if err := tx.(kv.BucketMigrator).ClearBucket("t"); err != nil {
+ if err := tx.ClearBucket("t"); err != nil {
return false, err
}
return true, nil
@@ -607,14 +607,14 @@ func generate7(_ kv.RwDB, tx kv.RwTx) (bool, error) {
}
func dropT1(_ kv.RwDB, tx kv.RwTx) (bool, error) {
- if err := tx.(kv.BucketMigrator).ClearBucket("t1"); err != nil {
+ if err := tx.ClearBucket("t1"); err != nil {
return false, err
}
return true, nil
}
func dropT2(_ kv.RwDB, tx kv.RwTx) (bool, error) {
- if err := tx.(kv.BucketMigrator).ClearBucket("t2"); err != nil {
+ if err := tx.ClearBucket("t2"); err != nil {
return false, err
}
return true, nil
@@ -624,7 +624,7 @@ func dropT2(_ kv.RwDB, tx kv.RwTx) (bool, error) {
func generate8(_ kv.RwDB, tx kv.RwTx) (bool, error) {
for i := 0; i < 100; i++ {
k := fmt.Sprintf("table_%05d", i)
- if err := tx.(kv.BucketMigrator).CreateBucket(k); err != nil {
+ if err := tx.CreateBucket(k); err != nil {
return false, err
}
}
@@ -656,7 +656,7 @@ func generate9(tx kv.RwTx, entries int) error {
func dropAll(_ kv.RwDB, tx kv.RwTx) (bool, error) {
for i := 0; i < 100; i++ {
k := fmt.Sprintf("table_%05d", i)
- if err := tx.(kv.BucketMigrator).DropBucket(k); err != nil {
+ if err := tx.DropBucket(k); err != nil {
return false, err
}
}
@@ -779,7 +779,7 @@ func defragSteps(filename string, bucketsCfg kv.TableCfg, generateFs ...func(kv.
var db kv.RwDB
db, err = kv2.NewMDBX(logger).Path(dir).WithTableCfg(func(kv.TableCfg) kv.TableCfg {
return bucketsCfg
- }).Open()
+ }).Open(context.Background())
if err != nil {
return fmt.Errorf("opening database: %w", err)
}
diff --git a/cmd/hack/hack.go b/cmd/hack/hack.go
index dcb757be04a..42aa5932bdf 100644
--- a/cmd/hack/hack.go
+++ b/cmd/hack/hack.go
@@ -8,6 +8,7 @@ import (
"encoding/json"
"flag"
"fmt"
+ "github.com/ledgerwatch/erigon-lib/kv/dbutils"
"math/big"
"net/http"
_ "net/http/pprof" //nolint:gosec
@@ -20,6 +21,9 @@ import (
"github.com/RoaringBitmap/roaring/roaring64"
"github.com/holiman/uint256"
+ "github.com/ledgerwatch/log/v3"
+ "golang.org/x/exp/slices"
+
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
"github.com/ledgerwatch/erigon-lib/common/length"
@@ -30,21 +34,15 @@ import (
"github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2"
"github.com/ledgerwatch/erigon-lib/recsplit"
"github.com/ledgerwatch/erigon-lib/recsplit/eliasfano32"
- librlp "github.com/ledgerwatch/erigon-lib/rlp"
- "github.com/ledgerwatch/erigon/core/rawdb/blockio"
- "github.com/ledgerwatch/erigon/turbo/services"
- "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks"
- "github.com/ledgerwatch/log/v3"
- "golang.org/x/exp/slices"
hackdb "github.com/ledgerwatch/erigon/cmd/hack/db"
"github.com/ledgerwatch/erigon/cmd/hack/flow"
"github.com/ledgerwatch/erigon/cmd/hack/tool"
"github.com/ledgerwatch/erigon/common"
- "github.com/ledgerwatch/erigon/common/dbutils"
"github.com/ledgerwatch/erigon/common/paths"
"github.com/ledgerwatch/erigon/core"
"github.com/ledgerwatch/erigon/core/rawdb"
+ "github.com/ledgerwatch/erigon/core/rawdb/blockio"
"github.com/ledgerwatch/erigon/core/state"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/crypto"
@@ -56,6 +54,8 @@ import (
"github.com/ledgerwatch/erigon/rlp"
"github.com/ledgerwatch/erigon/turbo/debug"
"github.com/ledgerwatch/erigon/turbo/logging"
+ "github.com/ledgerwatch/erigon/turbo/services"
+ "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks"
)
var (
@@ -138,7 +138,7 @@ func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) {
}); err != nil {
panic(err)
}
- br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()))
+ br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()), nil /* BorSnapshots */)
bw := blockio.NewBlockWriter(histV3)
return br, bw
}
@@ -421,7 +421,7 @@ func iterateOverCode(chaindata string) error {
if err := tx.ForEach(kv.Code, nil, func(k, v []byte) error {
if len(v) > 0 && v[0] == 0xef {
fmt.Printf("Found code with hash %x: %x\n", k, v)
- hashes[libcommon.BytesToHash(k)] = common.CopyBytes(v)
+ hashes[libcommon.BytesToHash(k)] = libcommon.CopyBytes(v)
}
return nil
}); err != nil {
@@ -1330,11 +1330,13 @@ func readSeg(chaindata string) error {
g := vDecomp.MakeGetter()
var buf []byte
var count int
+ var offset, nextPos uint64
for g.HasNext() {
- g.Next(buf[:0])
+ buf, nextPos = g.Next(buf[:0])
+ fmt.Printf("offset: %d, val: %x\n", offset, buf)
+ offset = nextPos
count++
}
- fmt.Printf("count=%d\n", count)
return nil
}
@@ -1354,33 +1356,6 @@ func dumpState(chaindata string) error {
return nil
}
-type NewPooledTransactionHashesPacket68 struct {
- Types []byte
- Sizes []uint32
- Hashes []libcommon.Hash
-}
-
-func rlptest() error {
- var p = NewPooledTransactionHashesPacket68{
- Types: []byte{44, 200},
- Sizes: []uint32{56, 57680},
- Hashes: []libcommon.Hash{{}, {}},
- }
- b, err := rlp.EncodeToBytes(&p)
- if err != nil {
- return err
- }
- fmt.Printf("%x\n", b)
- var hashes []byte
- for _, h := range p.Hashes {
- hashes = append(hashes, h[:]...)
- }
- b = make([]byte, librlp.AnnouncementsLen(p.Types, p.Sizes, hashes))
- l := librlp.EncodeAnnouncements(p.Types, p.Sizes, hashes, b)
- fmt.Printf("%x\n%d %d\n", b, len(b), l)
- return nil
-}
-
func main() {
debug.RaiseFdLimit()
flag.Parse()
@@ -1512,8 +1487,6 @@ func main() {
err = readSeg(*chaindata)
case "dumpState":
err = dumpState(*chaindata)
- case "rlptest":
- err = rlptest()
case "readAccountAtVersion":
err = readAccountAtVersion(*chaindata, *account, uint64(*block))
}
diff --git a/cmd/hack/tool/fromdb/tool.go b/cmd/hack/tool/fromdb/tool.go
index 32ae74e9652..8bcff3561ca 100644
--- a/cmd/hack/tool/fromdb/tool.go
+++ b/cmd/hack/tool/fromdb/tool.go
@@ -17,7 +17,7 @@ func ChainConfig(db kv.RoDB) (cc *chain.Config) {
})
tool.Check(err)
if cc == nil {
- panic("database is not initalized")
+ panic("database is not initialized")
}
return cc
}
diff --git a/cmd/integration/commands/refetence_db.go b/cmd/integration/commands/refetence_db.go
index d07ee259024..ac717c8f360 100644
--- a/cmd/integration/commands/refetence_db.go
+++ b/cmd/integration/commands/refetence_db.go
@@ -160,7 +160,7 @@ func init() {
func doWarmup(ctx context.Context, chaindata string, bucket string, logger log.Logger) error {
const ThreadsLimit = 5_000
- db := mdbx2.NewMDBX(log.New()).Path(chaindata).RoTxsLimiter(semaphore.NewWeighted(ThreadsLimit)).Readonly().MustOpen()
+ db := mdbx2.NewMDBX(log.New()).Path(chaindata).RoTxsLimiter(semaphore.NewWeighted(ThreadsLimit)).MustOpen()
defer db.Close()
var total uint64
@@ -389,7 +389,7 @@ MainLoop:
if !fileScanner.Scan() {
break MainLoop
}
- k := common.CopyBytes(fileScanner.Bytes())
+ k := common2.CopyBytes(fileScanner.Bytes())
if bytes.Equal(k, endData) {
break
}
@@ -397,7 +397,7 @@ MainLoop:
if !fileScanner.Scan() {
break MainLoop
}
- v := common.CopyBytes(fileScanner.Bytes())
+ v := common2.CopyBytes(fileScanner.Bytes())
v = common.FromHex(string(v[1:]))
if casted, ok := c.(kv.RwCursorDupSort); ok {
diff --git a/cmd/integration/commands/reset_state.go b/cmd/integration/commands/reset_state.go
index 0dd01f94a77..c281447bfd5 100644
--- a/cmd/integration/commands/reset_state.go
+++ b/cmd/integration/commands/reset_state.go
@@ -5,10 +5,11 @@ import (
"encoding/binary"
"errors"
"fmt"
- "github.com/ledgerwatch/erigon/turbo/backup"
"os"
"text/tabwriter"
+ "github.com/ledgerwatch/erigon/turbo/backup"
+
"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/kvcfg"
@@ -36,8 +37,9 @@ var cmdResetState = &cobra.Command{
}
ctx, _ := common.RootContext()
defer db.Close()
- sn, agg := allSnapshots(ctx, db, logger)
+ sn, borSn, agg := allSnapshots(ctx, db, logger)
defer sn.Close()
+ defer borSn.Close()
defer agg.Close()
if err := db.View(ctx, func(tx kv.Tx) error { return printStages(tx, sn, agg) }); err != nil {
@@ -119,7 +121,7 @@ func printStages(tx kv.Tx, snapshots *freezeblocks.RoSnapshots, agg *state.Aggre
}
fmt.Fprintf(w, "--\n")
fmt.Fprintf(w, "prune distance: %s\n\n", pm.String())
- fmt.Fprintf(w, "blocks.v2: blocks=%d, segments=%d, indices=%d\n\n", snapshots.BlocksAvailable(), snapshots.SegmentsMax(), snapshots.IndicesMax())
+ fmt.Fprintf(w, "blocks.v2: %t, blocks=%d, segments=%d, indices=%d\n\n", snapshots.Cfg().Enabled, snapshots.BlocksAvailable(), snapshots.SegmentsMax(), snapshots.IndicesMax())
h3, err := kvcfg.HistoryV3.Enabled(tx)
if err != nil {
return err
diff --git a/cmd/integration/commands/root.go b/cmd/integration/commands/root.go
index 911681c06df..95120c4f822 100644
--- a/cmd/integration/commands/root.go
+++ b/cmd/integration/commands/root.go
@@ -7,10 +7,8 @@ import (
"path/filepath"
"strings"
- "github.com/c2h5oh/datasize"
"github.com/ledgerwatch/log/v3"
"github.com/spf13/cobra"
- "github.com/torquem-ch/mdbx-go/mdbx"
"golang.org/x/sync/semaphore"
"github.com/ledgerwatch/erigon-lib/kv"
@@ -64,9 +62,10 @@ func dbCfg(label kv.Label, path string) kv2.MdbxOpts {
const ThreadsLimit = 9_000
limiterB := semaphore.NewWeighted(ThreadsLimit)
opts := kv2.NewMDBX(log.New()).Path(path).Label(label).RoTxsLimiter(limiterB)
- if label == kv.ChainDB {
- opts = opts.MapSize(8 * datasize.TB)
- }
+ // integration tool don't intent to create db, then easiest way to open db - it's pass mdbx.Accede flag, which allow
+ // to read all options from DB, instead of overriding them
+ opts = opts.Accede()
+
if databaseVerbosity != -1 {
opts = opts.DBVerbosity(kv.DBVerbosityLvl(databaseVerbosity))
}
@@ -74,10 +73,6 @@ func dbCfg(label kv.Label, path string) kv2.MdbxOpts {
}
func openDB(opts kv2.MdbxOpts, applyMigrations bool, logger log.Logger) (kv.RwDB, error) {
- // integration tool don't intent to create db, then easiest way to open db - it's pass mdbx.Accede flag, which allow
- // to read all options from DB, instead of overriding them
- opts = opts.Flags(func(f uint) uint { return f | mdbx.Accede })
-
db := opts.MustOpen()
if applyMigrations {
migrator := migrations.NewMigrator(opts.GetLabel())
@@ -110,7 +105,7 @@ func openDB(opts kv2.MdbxOpts, applyMigrations bool, logger log.Logger) (kv.RwDB
return nil, err
}
if h3 {
- _, agg := allSnapshots(context.Background(), db, logger)
+ _, _, agg := allSnapshots(context.Background(), db, logger)
tdb, err := temporal.New(db, agg, systemcontracts.SystemContractCodeLookup[chain])
if err != nil {
return nil, err
diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go
index 692fcb6ae3c..f497b7786af 100644
--- a/cmd/integration/commands/stages.go
+++ b/cmd/integration/commands/stages.go
@@ -11,8 +11,14 @@ import (
"time"
"github.com/c2h5oh/datasize"
+ "github.com/erigontech/mdbx-go/mdbx"
+ lru "github.com/hashicorp/golang-lru/arc/v2"
+ "github.com/ledgerwatch/erigon/consensus/bor"
+ "github.com/ledgerwatch/erigon/consensus/bor/heimdall"
+ "github.com/ledgerwatch/erigon/consensus/bor/heimdallgrpc"
"github.com/ledgerwatch/erigon/core/rawdb/blockio"
"github.com/ledgerwatch/erigon/node/nodecfg"
+ "github.com/ledgerwatch/erigon/p2p/sentry/sentry_multi_client"
"github.com/ledgerwatch/erigon/turbo/builder"
"github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks"
"github.com/ledgerwatch/log/v3"
@@ -23,6 +29,7 @@ import (
chain2 "github.com/ledgerwatch/erigon-lib/chain"
"github.com/ledgerwatch/erigon-lib/commitment"
common2 "github.com/ledgerwatch/erigon-lib/common"
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/cmp"
"github.com/ledgerwatch/erigon-lib/common/datadir"
"github.com/ledgerwatch/erigon-lib/common/dir"
@@ -31,7 +38,6 @@ import (
"github.com/ledgerwatch/erigon-lib/kv/rawdbv3"
libstate "github.com/ledgerwatch/erigon-lib/state"
"github.com/ledgerwatch/erigon/cmd/hack/tool/fromdb"
- "github.com/ledgerwatch/erigon/cmd/sentry/sentry"
"github.com/ledgerwatch/erigon/consensus"
"github.com/ledgerwatch/erigon/core"
"github.com/ledgerwatch/erigon/core/rawdb"
@@ -96,6 +102,27 @@ var cmdStageHeaders = &cobra.Command{
},
}
+var cmdStageBorHeimdall = &cobra.Command{
+ Use: "stage_bor_heimdall",
+ Short: "",
+ Run: func(cmd *cobra.Command, args []string) {
+ logger := debug.SetupCobra(cmd, "integration")
+ db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger)
+ if err != nil {
+ logger.Error("Opening DB", "error", err)
+ return
+ }
+ defer db.Close()
+
+ if err := stageBorHeimdall(db, cmd.Context(), logger); err != nil {
+ if !errors.Is(err, context.Canceled) {
+ logger.Error(err.Error())
+ }
+ return
+ }
+ },
+}
+
var cmdStageBodies = &cobra.Command{
Use: "stage_bodies",
Short: "",
@@ -291,7 +318,7 @@ var cmdPrintStages = &cobra.Command{
Short: "",
Run: func(cmd *cobra.Command, args []string) {
logger := debug.SetupCobra(cmd, "integration")
- db, err := openDB(dbCfg(kv.ChainDB, chaindata).Readonly(), false, logger)
+ db, err := openDB(dbCfg(kv.ChainDB, chaindata), false, logger)
if err != nil {
logger.Error("Opening DB", "error", err)
return
@@ -352,7 +379,9 @@ var cmdRunMigrations = &cobra.Command{
Short: "",
Run: func(cmd *cobra.Command, args []string) {
logger := debug.SetupCobra(cmd, "integration")
- db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger)
+ //non-accede and exclusive mode - to apply create new tables if need.
+ cfg := dbCfg(kv.ChainDB, chaindata).Flags(func(u uint) uint { return u &^ mdbx.Accede }).Exclusive()
+ db, err := openDB(cfg, true, logger)
if err != nil {
logger.Error("Opening DB", "error", err)
return
@@ -383,7 +412,7 @@ var cmdSetPrune = &cobra.Command{
}
var cmdSetSnap = &cobra.Command{
- Use: "force_set_snapshot",
+ Use: "force_set_snap",
Short: "Override existing --snapshots flag value (if you know what you are doing)",
Run: func(cmd *cobra.Command, args []string) {
logger := debug.SetupCobra(cmd, "integration")
@@ -393,12 +422,22 @@ var cmdSetSnap = &cobra.Command{
return
}
defer db.Close()
- sn, agg := allSnapshots(cmd.Context(), db, logger)
+ sn, borSn, agg := allSnapshots(cmd.Context(), db, logger)
defer sn.Close()
+ defer borSn.Close()
defer agg.Close()
+ cfg := sn.Cfg()
+ flags := cmd.Flags()
+ if flags.Lookup("snapshots") != nil {
+ cfg.Enabled, err = flags.GetBool("snapshots")
+ if err != nil {
+ panic(err)
+ }
+ }
+
if err := db.Update(context.Background(), func(tx kv.RwTx) error {
- return snap.ForceSetFlags(tx, sn.Cfg())
+ return snap.ForceSetFlags(tx, cfg)
}); err != nil {
if !errors.Is(err, context.Canceled) {
logger.Error(err.Error())
@@ -453,6 +492,7 @@ func init() {
rootCmd.AddCommand(cmdStageSnapshots)
withConfig(cmdStageHeaders)
+ withIntegrityChecks(cmdStageHeaders)
withDataDir(cmdStageHeaders)
withUnwind(cmdStageHeaders)
withReset(cmdStageHeaders)
@@ -460,6 +500,13 @@ func init() {
withHeimdall(cmdStageHeaders)
rootCmd.AddCommand(cmdStageHeaders)
+ withConfig(cmdStageBorHeimdall)
+ withDataDir(cmdStageBorHeimdall)
+ withReset(cmdStageBorHeimdall)
+ withChain(cmdStageBorHeimdall)
+ withHeimdall(cmdStageBorHeimdall)
+ rootCmd.AddCommand(cmdStageBorHeimdall)
+
withConfig(cmdStageBodies)
withDataDir(cmdStageBodies)
withUnwind(cmdStageBodies)
@@ -563,11 +610,14 @@ func init() {
withConfig(cmdSetSnap)
withDataDir2(cmdSetSnap)
withChain(cmdSetSnap)
+ cmdSetSnap.Flags().Bool("snapshots", false, "")
+ must(cmdSetSnap.MarkFlagRequired("snapshots"))
rootCmd.AddCommand(cmdSetSnap)
withConfig(cmdForceSetHistoryV3)
withDataDir2(cmdForceSetHistoryV3)
cmdForceSetHistoryV3.Flags().BoolVar(&_forceSetHistoryV3, "history.v3", false, "")
+ must(cmdForceSetHistoryV3.MarkFlagRequired("history.v3"))
rootCmd.AddCommand(cmdForceSetHistoryV3)
withConfig(cmdSetPrune)
@@ -603,18 +653,36 @@ func stageSnapshots(db kv.RwDB, ctx context.Context, logger log.Logger) error {
}
func stageHeaders(db kv.RwDB, ctx context.Context, logger log.Logger) error {
- sn, agg := allSnapshots(ctx, db, logger)
+ dirs := datadir.New(datadirCli)
+ if err := datadir.ApplyMigrations(dirs); err != nil {
+ return err
+ }
+
+ sn, borSn, agg := allSnapshots(ctx, db, logger)
defer sn.Close()
+ defer borSn.Close()
defer agg.Close()
br, bw := blocksIO(db, logger)
engine, _, _, _, _ := newSync(ctx, db, nil /* miningConfig */, logger)
chainConfig, _, _ := fromdb.ChainConfig(db), kvcfg.HistoryV3.FromDB(db), fromdb.PruneMode(db)
- return db.Update(ctx, func(tx kv.RwTx) error {
- if !(unwind > 0 || reset) {
- logger.Info("This command only works with --unwind or --reset options")
+ if integritySlow {
+ if err := db.View(ctx, func(tx kv.Tx) error {
+ log.Info("[integrity] no gaps in canonical headers")
+ integrity.NoGapsInCanonicalHeaders(tx, ctx, br)
+ return nil
+ }); err != nil {
+ return err
}
+ return nil
+ }
+
+ if !(unwind > 0 || reset) {
+ logger.Error("This command only works with --unwind or --reset options")
+ return nil
+ }
+ return db.Update(ctx, func(tx kv.RwTx) error {
if reset {
dirs := datadir.New(datadirCli)
if err := reset2.ResetBlocks(tx, db, agg, br, bw, dirs, *chainConfig, engine, logger); err != nil {
@@ -675,9 +743,22 @@ func stageHeaders(db kv.RwDB, ctx context.Context, logger log.Logger) error {
})
}
+func stageBorHeimdall(db kv.RwDB, ctx context.Context, logger log.Logger) error {
+ return db.Update(ctx, func(tx kv.RwTx) error {
+ if reset {
+ if err := reset2.ResetBorHeimdall(ctx, tx); err != nil {
+ return err
+ }
+ return nil
+ }
+ return nil
+ })
+}
+
func stageBodies(db kv.RwDB, ctx context.Context, logger log.Logger) error {
- sn, agg := allSnapshots(ctx, db, logger)
+ sn, borSn, agg := allSnapshots(ctx, db, logger)
defer sn.Close()
+ defer borSn.Close()
defer agg.Close()
chainConfig, historyV3 := fromdb.ChainConfig(db), kvcfg.HistoryV3.FromDB(db)
_, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger)
@@ -715,8 +796,9 @@ func stageBodies(db kv.RwDB, ctx context.Context, logger log.Logger) error {
func stageSenders(db kv.RwDB, ctx context.Context, logger log.Logger) error {
tmpdir := datadir.New(datadirCli).Tmp
chainConfig := fromdb.ChainConfig(db)
- sn, agg := allSnapshots(ctx, db, logger)
+ sn, borSn, agg := allSnapshots(ctx, db, logger)
defer sn.Close()
+ defer borSn.Close()
defer agg.Close()
_, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger)
@@ -806,10 +888,15 @@ func stageSenders(db kv.RwDB, ctx context.Context, logger log.Logger) error {
func stageExec(db kv.RwDB, ctx context.Context, logger log.Logger) error {
dirs := datadir.New(datadirCli)
+ if err := datadir.ApplyMigrations(dirs); err != nil {
+ return err
+ }
+
engine, vmConfig, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger)
must(sync.SetCurrentStage(stages.Execution))
- sn, agg := allSnapshots(ctx, db, logger)
+ sn, borSn, agg := allSnapshots(ctx, db, logger)
defer sn.Close()
+ defer borSn.Close()
defer agg.Close()
if warmup {
@@ -847,7 +934,7 @@ func stageExec(db kv.RwDB, ctx context.Context, logger log.Logger) error {
br, _ := blocksIO(db, logger)
cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, nil, chainConfig, engine, vmConfig, nil,
/*stateStream=*/ false,
- /*badBlockHalt=*/ false, historyV3, dirs, br, nil, genesis, syncCfg, agg)
+ /*badBlockHalt=*/ false, historyV3, dirs, br, nil, genesis, syncCfg, agg, nil)
var tx kv.RwTx //nil - means lower-level code (each stage) will manage transactions
if noCommit {
@@ -889,8 +976,9 @@ func stageExec(db kv.RwDB, ctx context.Context, logger log.Logger) error {
func stageTrie(db kv.RwDB, ctx context.Context, logger log.Logger) error {
dirs, pm, historyV3 := datadir.New(datadirCli), fromdb.PruneMode(db), kvcfg.HistoryV3.FromDB(db)
- sn, agg := allSnapshots(ctx, db, logger)
+ sn, borSn, agg := allSnapshots(ctx, db, logger)
defer sn.Close()
+ defer borSn.Close()
defer agg.Close()
_, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger)
must(sync.SetCurrentStage(stages.IntermediateHashes))
@@ -946,8 +1034,9 @@ func stageTrie(db kv.RwDB, ctx context.Context, logger log.Logger) error {
func stageHashState(db kv.RwDB, ctx context.Context, logger log.Logger) error {
dirs, pm, historyV3 := datadir.New(datadirCli), fromdb.PruneMode(db), kvcfg.HistoryV3.FromDB(db)
- sn, agg := allSnapshots(ctx, db, logger)
+ sn, borSn, agg := allSnapshots(ctx, db, logger)
defer sn.Close()
+ defer borSn.Close()
defer agg.Close()
_, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger)
must(sync.SetCurrentStage(stages.HashState))
@@ -1123,8 +1212,9 @@ func stageHistory(db kv.RwDB, ctx context.Context, logger log.Logger) error {
if historyV3 {
return fmt.Errorf("this stage is disable in --history.v3=true")
}
- sn, agg := allSnapshots(ctx, db, logger)
+ sn, borSn, agg := allSnapshots(ctx, db, logger)
defer sn.Close()
+ defer borSn.Close()
defer agg.Close()
_, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger)
must(sync.SetCurrentStage(stages.AccountHistoryIndex))
@@ -1198,8 +1288,9 @@ func stageTxLookup(db kv.RwDB, ctx context.Context, logger log.Logger) error {
_, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger)
chainConfig := fromdb.ChainConfig(db)
must(sync.SetCurrentStage(stages.TxLookup))
- sn, agg := allSnapshots(ctx, db, logger)
+ sn, borSn, agg := allSnapshots(ctx, db, logger)
defer sn.Close()
+ defer borSn.Close()
defer agg.Close()
if reset {
@@ -1247,8 +1338,9 @@ func stageTxLookup(db kv.RwDB, ctx context.Context, logger log.Logger) error {
}
func printAllStages(db kv.RoDB, ctx context.Context, logger log.Logger) error {
- sn, agg := allSnapshots(ctx, db, logger)
+ sn, borSn, agg := allSnapshots(ctx, db, logger)
defer sn.Close()
+ defer borSn.Close()
defer agg.Close()
return db.View(ctx, func(tx kv.Tx) error { return printStages(tx, sn, agg) })
}
@@ -1279,9 +1371,10 @@ func removeMigration(db kv.RwDB, ctx context.Context) error {
var openSnapshotOnce sync.Once
var _allSnapshotsSingleton *freezeblocks.RoSnapshots
+var _allBorSnapshotsSingleton *freezeblocks.BorRoSnapshots
var _aggSingleton *libstate.AggregatorV3
-func allSnapshots(ctx context.Context, db kv.RoDB, logger log.Logger) (*freezeblocks.RoSnapshots, *libstate.AggregatorV3) {
+func allSnapshots(ctx context.Context, db kv.RoDB, logger log.Logger) (*freezeblocks.RoSnapshots, *freezeblocks.BorRoSnapshots, *libstate.AggregatorV3) {
openSnapshotOnce.Do(func() {
var useSnapshots bool
_ = db.View(context.Background(), func(tx kv.Tx) error {
@@ -1293,6 +1386,7 @@ func allSnapshots(ctx context.Context, db kv.RoDB, logger log.Logger) (*freezebl
snapCfg := ethconfig.NewSnapCfg(useSnapshots, true, true)
_allSnapshotsSingleton = freezeblocks.NewRoSnapshots(snapCfg, dirs.Snap, logger)
+ _allBorSnapshotsSingleton = freezeblocks.NewBorRoSnapshots(snapCfg, dirs.Snap, logger)
var err error
_aggSingleton, err = libstate.NewAggregatorV3(ctx, dirs.SnapHistory, dirs.Tmp, ethconfig.HistoryV3AggregationStep, db, logger)
@@ -1309,6 +1403,10 @@ func allSnapshots(ctx context.Context, db kv.RoDB, logger log.Logger) (*freezebl
panic(err)
}
_allSnapshotsSingleton.LogStat()
+ if err := _allBorSnapshotsSingleton.ReopenFolder(); err != nil {
+ panic(err)
+ }
+ _allBorSnapshotsSingleton.LogStat()
db.View(context.Background(), func(tx kv.Tx) error {
_aggSingleton.LogStats(tx, func(endTxNumMinimax uint64) uint64 {
_, histBlockNumProgress, _ := rawdbv3.TxNums.FindBlockNum(tx, endTxNumMinimax)
@@ -1318,7 +1416,7 @@ func allSnapshots(ctx context.Context, db kv.RoDB, logger log.Logger) (*freezebl
})
}
})
- return _allSnapshotsSingleton, _aggSingleton
+ return _allSnapshotsSingleton, _allBorSnapshotsSingleton, _aggSingleton
}
var openBlockReaderOnce sync.Once
@@ -1327,9 +1425,9 @@ var _blockWriterSingleton *blockio.BlockWriter
func blocksIO(db kv.RoDB, logger log.Logger) (services.FullBlockReader, *blockio.BlockWriter) {
openBlockReaderOnce.Do(func() {
- sn, _ := allSnapshots(context.Background(), db, logger)
+ sn, borSn, _ := allSnapshots(context.Background(), db, logger)
histV3 := kvcfg.HistoryV3.FromDB(db)
- _blockReaderSingleton = freezeblocks.NewBlockReader(sn)
+ _blockReaderSingleton = freezeblocks.NewBlockReader(sn, borSn)
_blockWriterSingleton = blockio.NewBlockWriter(histV3)
})
return _blockReaderSingleton, _blockWriterSingleton
@@ -1407,7 +1505,8 @@ func newDomains(ctx context.Context, db kv.RwDB, stepSize uint64, mode libstate.
allSn, agg := allDomains(ctx, db, stepSize, mode, trie, logger)
cfg.Snapshot = allSn.Cfg()
- engine := initConsensusEngine(chainConfig, cfg.Dirs.DataDir, db, logger)
+ blockReader, _ := blocksIO(db, logger)
+ engine, _ := initConsensusEngine(ctx, chainConfig, cfg.Dirs.DataDir, db, blockReader, logger)
return engine, cfg, allSn, agg
}
@@ -1440,17 +1539,20 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig,
cfg.Miner = *miningConfig
}
cfg.Dirs = datadir.New(datadirCli)
- allSn, agg := allSnapshots(ctx, db, logger)
+ allSn, _, agg := allSnapshots(ctx, db, logger)
cfg.Snapshot = allSn.Cfg()
- engine := initConsensusEngine(chainConfig, cfg.Dirs.DataDir, db, logger)
-
blockReader, blockWriter := blocksIO(db, logger)
- sentryControlServer, err := sentry.NewMultiClient(
+ engine, heimdallClient := initConsensusEngine(ctx, chainConfig, cfg.Dirs.DataDir, db, blockReader, logger)
+
+ maxBlockBroadcastPeers := func(header *types.Header) uint { return 0 }
+
+ sentryControlServer, err := sentry_multi_client.NewMultiClient(
db,
"",
chainConfig,
genesisBlock.Hash(),
+ genesisBlock.Time(),
engine,
1,
nil,
@@ -1459,7 +1561,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig,
blockBufferSize,
false,
nil,
- ethconfig.Defaults.DropUselessPeers,
+ maxBlockBroadcastPeers,
logger,
)
if err != nil {
@@ -1469,7 +1571,18 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig,
notifications := &shards.Notifications{}
blockRetire := freezeblocks.NewBlockRetire(1, dirs, blockReader, blockWriter, db, notifications.Events, logger)
- stages := stages2.NewDefaultStages(context.Background(), db, p2p.Config{}, &cfg, sentryControlServer, notifications, nil, blockReader, blockRetire, agg, nil, logger)
+ var (
+ snapDb kv.RwDB
+ recents *lru.ARCCache[libcommon.Hash, *bor.Snapshot]
+ signatures *lru.ARCCache[libcommon.Hash, libcommon.Address]
+ )
+ if bor, ok := engine.(*bor.Bor); ok {
+ snapDb = bor.DB
+ recents = bor.Recents
+ signatures = bor.Signatures
+ }
+ stages := stages2.NewDefaultStages(context.Background(), db, snapDb, p2p.Config{}, &cfg, sentryControlServer, notifications, nil, blockReader, blockRetire, agg, nil, nil,
+ heimdallClient, recents, signatures, logger)
sync := stagedsync.New(stages, stagedsync.DefaultUnwindOrder, stagedsync.DefaultPruneOrder, logger)
miner := stagedsync.NewMiningState(&cfg.Miner)
@@ -1482,6 +1595,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig,
miningSync := stagedsync.New(
stagedsync.MiningStages(ctx,
stagedsync.StageMiningCreateBlockCfg(db, miner, *chainConfig, engine, nil, nil, dirs.Tmp, blockReader),
+ stagedsync.StageBorHeimdallCfg(db, snapDb, miner, *chainConfig, heimdallClient, blockReader, nil, nil, recents, signatures),
stagedsync.StageMiningExecCfg(db, miner, events, *chainConfig, engine, &vm.Config{}, dirs.Tmp, nil, 0, nil, nil, blockReader),
stagedsync.StageHashStateCfg(db, dirs, historyV3),
stagedsync.StageTrieCfg(db, false, true, false, dirs.Tmp, blockReader, nil, historyV3, agg),
@@ -1531,20 +1645,27 @@ func overrideStorageMode(db kv.RwDB, logger log.Logger) error {
})
}
-func initConsensusEngine(cc *chain2.Config, dir string, db kv.RwDB, logger log.Logger) (engine consensus.Engine) {
+func initConsensusEngine(ctx context.Context, cc *chain2.Config, dir string, db kv.RwDB, blockReader services.FullBlockReader, logger log.Logger) (engine consensus.Engine, heimdallClient heimdall.IHeimdallClient) {
config := ethconfig.Defaults
var consensusConfig interface{}
-
if cc.Clique != nil {
consensusConfig = params.CliqueSnapshot
} else if cc.Aura != nil {
consensusConfig = &config.Aura
} else if cc.Bor != nil {
consensusConfig = &config.Bor
+ config.HeimdallURL = HeimdallURL
+ if !config.WithoutHeimdall {
+ if config.HeimdallgRPCAddress != "" {
+ heimdallClient = heimdallgrpc.NewHeimdallGRPCClient(config.HeimdallgRPCAddress, logger)
+ } else {
+ heimdallClient = heimdall.NewHeimdallClient(config.HeimdallURL, logger)
+ }
+ }
} else {
consensusConfig = &config.Ethash
}
- return ethconsensusconfig.CreateConsensusEngine(&nodecfg.Config{Dirs: datadir.New(dir)}, cc, consensusConfig, config.Miner.Notify, config.Miner.Noverify,
- HeimdallgRPCAddress, HeimdallURL, config.WithoutHeimdall, db.ReadOnly(), logger)
+ return ethconsensusconfig.CreateConsensusEngine(ctx, &nodecfg.Config{Dirs: datadir.New(dir)}, cc, consensusConfig, config.Miner.Notify, config.Miner.Noverify,
+ heimdallClient, config.WithoutHeimdall, blockReader, db.ReadOnly(), logger), heimdallClient
}
diff --git a/cmd/integration/commands/state_domains.go b/cmd/integration/commands/state_domains.go
index 6c83dd87cc5..fabde89f2f4 100644
--- a/cmd/integration/commands/state_domains.go
+++ b/cmd/integration/commands/state_domains.go
@@ -1,69 +1,39 @@
package commands
import (
- "bytes"
"context"
"encoding/hex"
"errors"
"fmt"
- "os"
+ "github.com/ledgerwatch/erigon-lib/metrics"
"path/filepath"
"runtime"
"strings"
"time"
- "github.com/VictoriaMetrics/metrics"
"github.com/holiman/uint256"
"github.com/ledgerwatch/log/v3"
"github.com/spf13/cobra"
- chain2 "github.com/ledgerwatch/erigon-lib/chain"
"github.com/ledgerwatch/erigon-lib/commitment"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/datadir"
"github.com/ledgerwatch/erigon-lib/common/dbg"
- "github.com/ledgerwatch/erigon-lib/common/fixedgas"
"github.com/ledgerwatch/erigon-lib/common/length"
"github.com/ledgerwatch/erigon-lib/kv"
kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx"
libstate "github.com/ledgerwatch/erigon-lib/state"
- "github.com/ledgerwatch/erigon/cmd/hack/tool/fromdb"
- "github.com/ledgerwatch/erigon/cmd/state/exec3"
"github.com/ledgerwatch/erigon/cmd/utils"
- "github.com/ledgerwatch/erigon/consensus"
- "github.com/ledgerwatch/erigon/consensus/misc"
"github.com/ledgerwatch/erigon/core"
- "github.com/ledgerwatch/erigon/core/state"
- "github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/core/types/accounts"
- "github.com/ledgerwatch/erigon/core/vm"
"github.com/ledgerwatch/erigon/eth/ethconfig"
"github.com/ledgerwatch/erigon/node/nodecfg"
erigoncli "github.com/ledgerwatch/erigon/turbo/cli"
"github.com/ledgerwatch/erigon/turbo/debug"
- "github.com/ledgerwatch/erigon/turbo/services"
)
func init() {
- withConfig(stateDomains)
- withDataDir(stateDomains)
- withUnwind(stateDomains)
- withUnwindEvery(stateDomains)
- withBlock(stateDomains)
- withIntegrityChecks(stateDomains)
- withChain(stateDomains)
- withHeimdall(stateDomains)
- withWorkers(stateDomains)
- withStartTx(stateDomains)
- withCommitment(stateDomains)
- withTraceFromTx(stateDomains)
-
- stateDomains.Flags().Uint64Var(&stepSize, "step-size", ethconfig.HistoryV3AggregationStep, "size of aggregation step, tx")
- stateDomains.Flags().Uint64Var(&lastStep, "last-step", 0, "step of last aggregation, step=txnum/step-size, unsigned integers only")
-
- rootCmd.AddCommand(stateDomains)
-
withDataDir(readDomains)
withChain(readDomains)
withHeimdall(readDomains)
@@ -129,7 +99,7 @@ var readDomains = &cobra.Command{
}
defer chainDb.Close()
- stateDb, err := kv2.NewMDBX(log.New()).Path(filepath.Join(dirs.DataDir, "statedb")).WriteMap().Open()
+ stateDb, err := kv2.NewMDBX(log.New()).Path(filepath.Join(dirs.DataDir, "statedb")).WriteMap().Open(ctx)
if err != nil {
return
}
@@ -216,420 +186,6 @@ func requestDomains(chainDb, stateDb kv.RwDB, ctx context.Context, readDomain st
return nil
}
-// write command to just seek and query state by addr and domain from state db and files (if any)
-var stateDomains = &cobra.Command{
- Use: "state_domains",
- Short: `Run block execution and commitment with Domains.`,
- Example: "go run ./cmd/integration state_domains --datadir=... --verbosity=3 --unwind=100 --unwind.every=100000 --block=2000000",
- Run: func(cmd *cobra.Command, args []string) {
- logger := debug.SetupCobra(cmd, "integration")
- ctx, _ := libcommon.RootContext()
- cfg := &nodecfg.DefaultConfig
- utils.SetNodeConfigCobra(cmd, cfg)
- ethConfig := ðconfig.Defaults
- ethConfig.Genesis = core.GenesisBlockByChainName(chain)
- erigoncli.ApplyFlagsForEthConfigCobra(cmd.Flags(), ethConfig)
-
- dirs := datadir.New(datadirCli)
- chainDb, err := openDB(dbCfg(kv.ChainDB, dirs.Chaindata), true, logger)
- if err != nil {
- logger.Error("Opening DB", "error", err)
- return
- }
- defer chainDb.Close()
-
- //stateDB := kv.Label(6)
- //stateOpts := dbCfg(stateDB, filepath.Join(dirs.DataDir, "statedb")).WriteMap()
- //stateOpts.MapSize(1 * datasize.TB).WriteMap().DirtySpace(dirtySpaceThreshold)
- //stateDb := openDB(stateOpts, true)
- //defer stateDb.Close()
-
- stateDb, err := kv2.NewMDBX(log.New()).Path(filepath.Join(dirs.DataDir, "statedb")).WriteMap().Open()
- if err != nil {
- return
- }
- defer stateDb.Close()
-
- if err := loopProcessDomains(chainDb, stateDb, ctx, logger); err != nil {
- if !errors.Is(err, context.Canceled) {
- logger.Error(err.Error())
- }
- return
- }
- },
-}
-
-func loopProcessDomains(chainDb, stateDb kv.RwDB, ctx context.Context, logger log.Logger) error {
- trieVariant := commitment.ParseTrieVariant(commitmentTrie)
- if trieVariant != commitment.VariantHexPatriciaTrie {
- blockRootMismatchExpected = true
- }
- mode := libstate.ParseCommitmentMode(commitmentMode)
-
- engine, _, _, agg := newDomains(ctx, chainDb, stepSize, mode, trieVariant, logger)
- defer agg.Close()
-
- histTx, err := chainDb.BeginRo(ctx)
- must(err)
- defer histTx.Rollback()
-
- stateTx, err := stateDb.BeginRw(ctx)
- must(err)
- defer stateTx.Rollback()
-
- agg.SetTx(stateTx)
- defer agg.StartWrites().FinishWrites()
-
- latestBlock, latestTx, err := agg.SeekCommitment()
- if err != nil && startTxNum != 0 {
- return fmt.Errorf("failed to seek commitment to tx %d: %w", startTxNum, err)
- }
- if latestTx < startTxNum {
- return fmt.Errorf("latest available tx to start is %d and its less than start tx %d", latestTx, startTxNum)
- }
- if latestTx > 0 {
- logger.Info("aggregator files opened", "txn", latestTx, "block", latestBlock)
- }
-
- aggWriter, aggReader := WrapAggregator(agg, stateTx)
- br, _ := blocksIO(chainDb, logger)
- proc := blockProcessor{
- chainConfig: fromdb.ChainConfig(chainDb),
- vmConfig: vm.Config{},
- engine: engine,
- reader: aggReader,
- writer: aggWriter,
- blockReader: br,
- stateTx: stateTx,
- stateDb: stateDb,
- blockNum: latestBlock,
- txNum: latestTx,
- startTxNum: latestTx,
- histTx: histTx,
- agg: agg,
- logger: log.New(),
- stat: stat4{startedAt: time.Now()},
- }
- if proc.txNum > 0 {
- proc.txNum--
- }
- if proc.blockNum == 0 {
- proc.txNum = 2
- }
-
- mergedRoots := agg.AggregatedRoots()
- go proc.PrintStatsLoop(ctx, 30*time.Second)
-
- if proc.startTxNum == 0 {
- genesis := core.GenesisBlockByChainName(chain)
- if err := proc.ApplyGenesis(genesis); err != nil {
- return err
- }
- }
-
- for {
- // Check for interrupts
- select {
- case <-ctx.Done():
- logger.Info(fmt.Sprintf("interrupted, please wait for commitment and cleanup, next time start with --tx %d", proc.txNum))
- rh, err := proc.agg.ComputeCommitment(true, false)
- if err != nil {
- logger.Error("failed to compute commitment", "err", err)
- }
- logger.Info("commitment: state root computed", "root", hex.EncodeToString(rh))
- if err := agg.Flush(ctx); err != nil {
- logger.Error("failed to flush aggregator", "err", err)
- }
- os.Exit(0)
- case <-mergedRoots: // notified with rootHash of latest aggregation
- if err := proc.commit(ctx); err != nil {
- logger.Error("chainDb commit on merge", "err", err)
- }
- default:
- }
-
- if lastStep > 0 && proc.txNum/stepSize >= lastStep {
- logger.Info("last step reached")
- // Commit transaction only when interrupted or just before computing commitment (so it can be re-done)
- break
- }
-
- err := proc.ProcessNext(ctx)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-type blockProcessor struct {
- engine consensus.Engine
- agg *libstate.Aggregator
- blockReader services.FullBlockReader
- writer *WriterWrapper4
- reader *ReaderWrapper4
- stateDb kv.RwDB
- stateTx kv.RwTx
- histTx kv.Tx
- blockNum uint64
- startTxNum uint64
- txNum uint64
- stat stat4
- trace bool
- logger log.Logger
- vmConfig vm.Config
- chainConfig *chain2.Config
-}
-
-func (b *blockProcessor) getHeader(hash libcommon.Hash, number uint64) *types.Header {
- h, err := b.blockReader.Header(context.Background(), b.histTx, hash, number)
- if err != nil {
- panic(err)
- }
- return h
-}
-
-func (b *blockProcessor) commit(ctx context.Context) error {
- if b.stateDb == nil || b.stateTx == nil {
- return fmt.Errorf("commit failed due to invalid chainDb/rwTx")
- }
-
- s := time.Now()
- defer mxCommitTook.UpdateDuration(s)
-
- var spaceDirty uint64
- var err error
- if spaceDirty, _, err = b.stateTx.(*kv2.MdbxTx).SpaceDirty(); err != nil {
- return fmt.Errorf("retrieving spaceDirty: %w", err)
- }
- if spaceDirty >= dirtySpaceThreshold {
- b.logger.Info("Initiated tx commit", "block", b.blockNum, "space dirty", libcommon.ByteCount(spaceDirty))
- }
-
- b.logger.Info("database commitment", "block", b.blockNum, "txNum", b.txNum, "uptime", time.Since(b.stat.startedAt))
- if err := b.agg.Flush(ctx); err != nil {
- return err
- }
- if err = b.stateTx.Commit(); err != nil {
- return err
- }
-
- if b.stateTx, err = b.stateDb.BeginRw(ctx); err != nil {
- return err
- }
-
- b.agg.SetTx(b.stateTx)
- b.reader.SetTx(b.stateTx, b.agg.MakeContext())
-
- return nil
-}
-
-func (b *blockProcessor) PrintStatsLoop(ctx context.Context, interval time.Duration) {
- ticker := time.NewTicker(interval)
- defer ticker.Stop()
-
- for {
- select {
- case <-ctx.Done():
- return
- case <-ticker.C:
- b.stat.delta(b.blockNum, b.txNum).print(b.agg.Stats(), b.logger)
- }
- }
-}
-
-func (b *blockProcessor) ApplyGenesis(genesis *types.Genesis) error {
- b.logger.Info("apply genesis", "chain_id", genesis.Config.ChainID)
- genBlock, genesisIbs, err := core.GenesisToBlock(genesis, "")
- if err != nil {
- return err
- }
- b.agg.SetTxNum(0)
- if err = genesisIbs.CommitBlock(&chain2.Rules{}, b.writer); err != nil {
- return fmt.Errorf("cannot write state: %w", err)
- }
-
- blockRootHash, err := b.agg.ComputeCommitment(true, false)
- if err != nil {
- return err
- }
- if err = b.agg.FinishTx(); err != nil {
- return err
- }
-
- genesisRootHash := genBlock.Root()
- if !blockRootMismatchExpected && !bytes.Equal(blockRootHash, genesisRootHash[:]) {
- return fmt.Errorf("genesis root hash mismatch: expected %x got %x", genesisRootHash, blockRootHash)
- }
- return nil
-}
-
-func (b *blockProcessor) ProcessNext(ctx context.Context) error {
- b.blockNum++
- b.trace = traceFromTx > 0 && b.txNum == traceFromTx
-
- blockHash, err := b.blockReader.CanonicalHash(ctx, b.histTx, b.blockNum)
- if err != nil {
- return err
- }
-
- block, _, err := b.blockReader.BlockWithSenders(ctx, b.histTx, blockHash, b.blockNum)
- if err != nil {
- return err
- }
- if block == nil {
- b.logger.Info("history: block is nil", "block", b.blockNum)
- return fmt.Errorf("block %d is nil", b.blockNum)
- }
-
- b.agg.SetTx(b.stateTx)
- b.agg.SetTxNum(b.txNum)
- b.agg.SetBlockNum(b.blockNum)
-
- if _, err = b.applyBlock(ctx, block); err != nil {
- b.logger.Error("processing error", "block", b.blockNum, "err", err)
- return fmt.Errorf("processing block %d: %w", b.blockNum, err)
- }
- return err
-}
-
-func (b *blockProcessor) applyBlock(
- ctx context.Context,
- block *types.Block,
-) (types.Receipts, error) {
- defer mxBlockExecutionTimer.UpdateDuration(time.Now())
-
- header := block.Header()
- b.vmConfig.Debug = true
- gp := new(core.GasPool).AddGas(block.GasLimit()).AddBlobGas(fixedgas.MaxBlobGasPerBlock)
- usedGas := new(uint64)
- usedBlobGas := new(uint64)
- var receipts types.Receipts
- rules := b.chainConfig.Rules(block.NumberU64(), block.Time())
-
- b.blockNum = block.NumberU64()
- b.writer.w.SetTxNum(b.txNum)
-
- daoFork := b.txNum >= b.startTxNum && b.chainConfig.DAOForkBlock != nil && b.chainConfig.DAOForkBlock.Cmp(block.Number()) == 0
- if daoFork {
- ibs := state.New(b.reader)
- // TODO Actually add tracing to the DAO related accounts
- misc.ApplyDAOHardFork(ibs)
- if err := ibs.FinalizeTx(rules, b.writer); err != nil {
- return nil, err
- }
- if err := b.writer.w.FinishTx(); err != nil {
- return nil, fmt.Errorf("finish daoFork failed: %w", err)
- }
- }
-
- b.txNum++ // Pre-block transaction
- b.writer.w.SetTxNum(b.txNum)
- if err := b.writer.w.FinishTx(); err != nil {
- return nil, fmt.Errorf("finish pre-block tx %d (block %d) has failed: %w", b.txNum, block.NumberU64(), err)
- }
-
- getHashFn := core.GetHashFn(header, b.getHeader)
- for i, tx := range block.Transactions() {
- if b.txNum >= b.startTxNum {
- ibs := state.New(b.reader)
- ibs.SetTxContext(tx.Hash(), block.Hash(), i)
- ct := exec3.NewCallTracer()
- b.vmConfig.Tracer = ct
- receipt, _, err := core.ApplyTransaction(b.chainConfig, getHashFn, b.engine, nil, gp, ibs, b.writer, header, tx, usedGas, usedBlobGas, b.vmConfig)
- if err != nil {
- return nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err)
- }
- for from := range ct.Froms() {
- if err := b.writer.w.AddTraceFrom(from[:]); err != nil {
- return nil, err
- }
- }
- for to := range ct.Tos() {
- if err := b.writer.w.AddTraceTo(to[:]); err != nil {
- return nil, err
- }
- }
- receipts = append(receipts, receipt)
- for _, log := range receipt.Logs {
- if err = b.writer.w.AddLogAddr(log.Address[:]); err != nil {
- return nil, fmt.Errorf("adding event log for addr %x: %w", log.Address, err)
- }
- for _, topic := range log.Topics {
- if err = b.writer.w.AddLogTopic(topic[:]); err != nil {
- return nil, fmt.Errorf("adding event log for topic %x: %w", topic, err)
- }
- }
- }
- if err = b.writer.w.FinishTx(); err != nil {
- return nil, fmt.Errorf("finish tx %d [%x] failed: %w", i, tx.Hash(), err)
- }
- if b.trace {
- fmt.Printf("FinishTx called for blockNum=%d, txIndex=%d, txNum=%d txHash=[%x]\n", b.blockNum, i, b.txNum, tx.Hash())
- }
- }
- b.txNum++
- b.writer.w.SetTxNum(b.txNum)
- }
-
- if b.txNum >= b.startTxNum {
- if b.chainConfig.IsByzantium(block.NumberU64()) {
- receiptSha := types.DeriveSha(receipts)
- if receiptSha != block.ReceiptHash() {
- fmt.Printf("mismatched receipt headers for block %d\n", block.NumberU64())
- for j, receipt := range receipts {
- fmt.Printf("tx %d, used gas: %d\n", j, receipt.GasUsed)
- }
- }
- }
- ibs := state.New(b.reader)
- if err := b.writer.w.AddTraceTo(block.Coinbase().Bytes()); err != nil {
- return nil, fmt.Errorf("adding coinbase trace: %w", err)
- }
- for _, uncle := range block.Uncles() {
- if err := b.writer.w.AddTraceTo(uncle.Coinbase.Bytes()); err != nil {
- return nil, fmt.Errorf("adding uncle trace: %w", err)
- }
- }
-
- // Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
- if _, _, err := b.engine.Finalize(b.chainConfig, header, ibs, block.Transactions(), block.Uncles(), receipts, block.Withdrawals(), nil, nil, b.logger); err != nil {
- return nil, fmt.Errorf("finalize of block %d failed: %w", block.NumberU64(), err)
- }
-
- if err := ibs.CommitBlock(rules, b.writer); err != nil {
- return nil, fmt.Errorf("committing block %d failed: %w", block.NumberU64(), err)
- }
-
- if err := b.writer.w.FinishTx(); err != nil {
- return nil, fmt.Errorf("failed to finish tx: %w", err)
- }
- if b.trace {
- fmt.Printf("FinishTx called for %d block %d\n", b.txNum, block.NumberU64())
- }
- }
-
- b.txNum++ // Post-block transaction
- b.writer.w.SetTxNum(b.txNum)
- if b.txNum >= b.startTxNum {
- if block.Number().Uint64()%uint64(commitmentFreq) == 0 {
- rootHash, err := b.writer.w.ComputeCommitment(true, b.trace)
- if err != nil {
- return nil, err
- }
- if !blockRootMismatchExpected && !bytes.Equal(rootHash, header.Root[:]) {
- return nil, fmt.Errorf("invalid root hash for block %d: expected %x got %x", block.NumberU64(), header.Root, rootHash)
- }
- }
-
- if err := b.writer.w.FinishTx(); err != nil {
- return nil, fmt.Errorf("finish after-block tx %d (block %d) has failed: %w", b.txNum, block.NumberU64(), err)
- }
- }
-
- return receipts, nil
-}
-
// Implements StateReader and StateWriter
type ReaderWrapper4 struct {
roTx kv.Tx
@@ -640,10 +196,6 @@ type WriterWrapper4 struct {
w *libstate.Aggregator
}
-func WrapAggregator(agg *libstate.Aggregator, roTx kv.Tx) (*WriterWrapper4, *ReaderWrapper4) {
- return &WriterWrapper4{w: agg}, &ReaderWrapper4{ac: agg.MakeContext(), roTx: roTx}
-}
-
func (rw *ReaderWrapper4) SetTx(roTx kv.Tx, ctx *libstate.AggregatorContext) {
rw.roTx = roTx
rw.ac.Close()
diff --git a/cmd/integration/commands/state_stages.go b/cmd/integration/commands/state_stages.go
index 73990cd2a4b..3401cf669de 100644
--- a/cmd/integration/commands/state_stages.go
+++ b/cmd/integration/commands/state_stages.go
@@ -6,6 +6,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "github.com/ledgerwatch/erigon-lib/kv/dbutils"
"os"
"sort"
"time"
@@ -24,7 +25,6 @@ import (
"github.com/ledgerwatch/erigon/cmd/hack/tool/fromdb"
"github.com/ledgerwatch/erigon/cmd/utils"
- "github.com/ledgerwatch/erigon/common/dbutils"
"github.com/ledgerwatch/erigon/common/debugprint"
"github.com/ledgerwatch/erigon/core"
"github.com/ledgerwatch/erigon/core/state"
@@ -169,8 +169,13 @@ func init() {
func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context.Context, logger1 log.Logger) error {
dirs := datadir.New(datadirCli)
- sn, agg := allSnapshots(ctx, db, logger1)
+ if err := datadir.ApplyMigrations(dirs); err != nil {
+ return err
+ }
+
+ sn, borSn, agg := allSnapshots(ctx, db, logger1)
defer sn.Close()
+ defer borSn.Close()
defer agg.Close()
engine, vmConfig, stateStages, miningStages, miner := newSync(ctx, db, &miningConfig, logger1)
chainConfig, historyV3, pm := fromdb.ChainConfig(db), kvcfg.HistoryV3.FromDB(db), fromdb.PruneMode(db)
@@ -217,7 +222,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context.
br, _ := blocksIO(db, logger1)
execCfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, changeSetHook, chainConfig, engine, vmConfig, changesAcc, false, false, historyV3, dirs,
- br, nil, genesis, syncCfg, agg)
+ br, nil, genesis, syncCfg, agg, nil)
execUntilFunc := func(execToBlock uint64) func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx, logger log.Logger) error {
return func(firstCycle bool, badBlockUnwind bool, s *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx, logger log.Logger) error {
@@ -385,7 +390,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context.
}
to := execAtBlock - unwind
- stateStages.UnwindTo(to, common2.Hash{})
+ stateStages.UnwindTo(to, stagedsync.StagedUnwind)
if err := tx.Commit(); err != nil {
return err
@@ -445,8 +450,9 @@ func checkMinedBlock(b1, b2 *types.Block, chainConfig *chain2.Config) {
}
func loopIh(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) error {
- sn, agg := allSnapshots(ctx, db, logger)
+ sn, borSn, agg := allSnapshots(ctx, db, logger)
defer sn.Close()
+ defer borSn.Close()
defer agg.Close()
_, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger)
dirs := datadir.New(datadirCli)
@@ -518,8 +524,9 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) e
func loopExec(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) error {
chainConfig := fromdb.ChainConfig(db)
dirs, pm := datadir.New(datadirCli), fromdb.PruneMode(db)
- sn, agg := allSnapshots(ctx, db, logger)
+ sn, borSn, agg := allSnapshots(ctx, db, logger)
defer sn.Close()
+ defer borSn.Close()
defer agg.Close()
engine, vmConfig, sync, _, _ := newSync(ctx, db, nil, logger)
@@ -553,7 +560,7 @@ func loopExec(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger)
br, _ := blocksIO(db, logger)
cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, nil, chainConfig, engine, vmConfig, nil,
/*stateStream=*/ false,
- /*badBlockHalt=*/ false, historyV3, dirs, br, nil, genesis, syncCfg, agg)
+ /*badBlockHalt=*/ false, historyV3, dirs, br, nil, genesis, syncCfg, agg, nil)
// set block limit of execute stage
sync.MockExecFunc(stages.Execution, func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx, logger log.Logger) error {
diff --git a/cmd/observer/database/db_sqlite.go b/cmd/observer/database/db_sqlite.go
index ba056fbe8c1..d8065f992ed 100644
--- a/cmd/observer/database/db_sqlite.go
+++ b/cmd/observer/database/db_sqlite.go
@@ -5,11 +5,11 @@ import (
"database/sql"
"errors"
"fmt"
+ "github.com/ledgerwatch/erigon-lib/common"
"net"
"strings"
"time"
- "github.com/ledgerwatch/erigon/cmd/utils"
_ "modernc.org/sqlite"
)
@@ -709,7 +709,7 @@ func (db *DBSQLite) FindNeighborBucketKeys(ctx context.Context, id NodeID) ([]st
if !keysStr.Valid {
return nil, nil
}
- return utils.SplitAndTrim(keysStr.String), nil
+ return common.CliString2Array(keysStr.String), nil
}
func (db *DBSQLite) UpdateSentryCandidatesLastEventTime(ctx context.Context, value time.Time) error {
diff --git a/cmd/observer/main.go b/cmd/observer/main.go
index 12f613e1306..1e94135982a 100644
--- a/cmd/observer/main.go
+++ b/cmd/observer/main.go
@@ -16,7 +16,7 @@ import (
)
func mainWithFlags(ctx context.Context, flags observer.CommandFlags, logger log.Logger) error {
- server, err := observer.NewServer(flags, logger)
+ server, err := observer.NewServer(ctx, flags, logger)
if err != nil {
return err
}
diff --git a/cmd/observer/observer/crawler.go b/cmd/observer/observer/crawler.go
index e79b756a578..2794d0a6944 100644
--- a/cmd/observer/observer/crawler.go
+++ b/cmd/observer/observer/crawler.go
@@ -76,7 +76,11 @@ func NewCrawler(
return nil, fmt.Errorf("unknown chain %s", chain)
}
- forkFilter := forkid.NewStaticFilter(chainConfig, *genesisHash)
+ // TODO(yperbasis) This might be a problem for chains that have a time-based fork (Shanghai, Cancun, etc)
+ // in genesis already, e.g. Holesky.
+ genesisTime := uint64(0)
+
+ forkFilter := forkid.NewStaticFilter(chainConfig, *genesisHash, genesisTime)
diplomacy := NewDiplomacy(
database.NewDBRetrier(db, logger),
diff --git a/cmd/observer/observer/handshake.go b/cmd/observer/observer/handshake.go
index 041f8b42abe..c975e5086c0 100644
--- a/cmd/observer/observer/handshake.go
+++ b/cmd/observer/observer/handshake.go
@@ -1,12 +1,12 @@
package observer
import (
+ "bytes"
"context"
"crypto/ecdsa"
"fmt"
"math/big"
"net"
- "strings"
"time"
libcommon "github.com/ledgerwatch/erigon-lib/common"
@@ -215,15 +215,11 @@ func readMessage(conn *rlpx.Conn, expectedMessageID uint64, decodeError Handshak
return readMessage(conn, expectedMessageID, decodeError, message)
}
if messageID == RLPxMessageIDDisconnect {
- var reason [1]p2p.DiscReason
- err = rlp.DecodeBytes(data, &reason)
- if (err != nil) && strings.Contains(err.Error(), "rlp: expected input list") {
- err = rlp.DecodeBytes(data, &reason[0])
- }
+ reason, err := p2p.DisconnectMessagePayloadDecode(bytes.NewBuffer(data))
if err != nil {
return NewHandshakeError(HandshakeErrorIDDisconnectDecode, err, 0)
}
- return NewHandshakeError(HandshakeErrorIDDisconnect, reason[0], uint64(reason[0]))
+ return NewHandshakeError(HandshakeErrorIDDisconnect, reason, uint64(reason))
}
if messageID != expectedMessageID {
return NewHandshakeError(HandshakeErrorIDUnexpectedMessage, nil, messageID)
diff --git a/cmd/observer/observer/server.go b/cmd/observer/observer/server.go
index b10879a89ce..99c2cb4bbc2 100644
--- a/cmd/observer/observer/server.go
+++ b/cmd/observer/observer/server.go
@@ -33,7 +33,7 @@ type Server struct {
logger log.Logger
}
-func NewServer(flags CommandFlags, logger log.Logger) (*Server, error) {
+func NewServer(ctx context.Context, flags CommandFlags, logger log.Logger) (*Server, error) {
nodeDBPath := filepath.Join(flags.DataDir, "nodes", "eth66")
nodeKeyConfig := p2p.NodeKeyConfig{}
@@ -42,7 +42,7 @@ func NewServer(flags CommandFlags, logger log.Logger) (*Server, error) {
return nil, err
}
- localNode, err := makeLocalNode(nodeDBPath, privateKey, flags.Chain, logger)
+ localNode, err := makeLocalNode(ctx, nodeDBPath, privateKey, flags.Chain, logger)
if err != nil {
return nil, err
}
@@ -84,8 +84,8 @@ func NewServer(flags CommandFlags, logger log.Logger) (*Server, error) {
return &instance, nil
}
-func makeLocalNode(nodeDBPath string, privateKey *ecdsa.PrivateKey, chain string, logger log.Logger) (*enode.LocalNode, error) {
- db, err := enode.OpenDB(nodeDBPath, "")
+func makeLocalNode(ctx context.Context, nodeDBPath string, privateKey *ecdsa.PrivateKey, chain string, logger log.Logger) (*enode.LocalNode, error) {
+ db, err := enode.OpenDB(ctx, nodeDBPath, "")
if err != nil {
return nil, err
}
@@ -108,7 +108,11 @@ func makeForksENREntry(chain string) (enr.Entry, error) {
return nil, fmt.Errorf("unknown chain %s", chain)
}
- heightForks, timeForks := forkid.GatherForks(chainConfig)
+ // TODO(yperbasis) This might be a problem for chains that have a time-based fork (Shanghai, Cancun, etc)
+ // in genesis already, e.g. Holesky.
+ genesisTime := uint64(0)
+
+ heightForks, timeForks := forkid.GatherForks(chainConfig, genesisTime)
return eth.CurrentENREntryFromForks(heightForks, timeForks, *genesisHash, 0, 0), nil
}
@@ -179,5 +183,5 @@ func (server *Server) Listen(ctx context.Context) (*discover.UDPv4, error) {
server.logger.Debug("Discovery UDP listener is up", "addr", realAddr)
- return discover.ListenV4(ctx, conn, server.localNode, server.discConfig)
+ return discover.ListenV4(ctx, "any", conn, server.localNode, server.discConfig)
}
diff --git a/cmd/p2psim/main.go b/cmd/p2psim/main.go
index 6eb48e923b8..3f567847bbd 100644
--- a/cmd/p2psim/main.go
+++ b/cmd/p2psim/main.go
@@ -39,12 +39,12 @@ import (
"context"
"encoding/json"
"fmt"
+ "github.com/ledgerwatch/erigon-lib/common"
"io"
"os"
"strings"
"text/tabwriter"
- "github.com/ledgerwatch/erigon/cmd/utils"
"github.com/ledgerwatch/erigon/turbo/logging"
"github.com/urfave/cli/v2"
@@ -292,7 +292,7 @@ func createNode(ctx *cli.Context) error {
config.PrivateKey = privKey
}
if services := ctx.String("services"); services != "" {
- config.Lifecycles = utils.SplitAndTrim(services)
+ config.Lifecycles = common.CliString2Array(services)
}
node, err := client.CreateNode(config)
if err != nil {
diff --git a/cmd/pics/state.go b/cmd/pics/state.go
index 7c999e4f878..1c387162d61 100644
--- a/cmd/pics/state.go
+++ b/cmd/pics/state.go
@@ -430,13 +430,13 @@ func initialState1() error {
// BLOCKS
for i := 0; i < chain.Length(); i++ {
- if err = m2.InsertChain(chain.Slice(i, i+1), nil); err != nil {
+ if err = m2.InsertChain(chain.Slice(i, i+1)); err != nil {
return err
}
if err = stateDatabaseComparison(m.DB, m2.DB, i+1); err != nil {
return err
}
- if err = m.InsertChain(chain.Slice(i, i+1), nil); err != nil {
+ if err = m.InsertChain(chain.Slice(i, i+1)); err != nil {
return err
}
}
diff --git a/cmd/prometheus/Readme.md b/cmd/prometheus/Readme.md
index 853eb91b2a8..d6272e4a89d 100644
--- a/cmd/prometheus/Readme.md
+++ b/cmd/prometheus/Readme.md
@@ -13,7 +13,7 @@ Env variables:
- `ERIGON_GRAFANA_CONFIG` path to custom `grafana.ini file`. Default is: `./cmd/prometheus/grafana.ini`
To add custom Erigon host: copy `./cmd/prometheus/prometheus.yml`, modify, pass new location by:
-`ERIGON_PROMETHEUS_CONFIG=/new/location/prometheus.yml docker-compose up prometheus grafana`
+`ERIGON_PROMETHEUS_CONFIG=/new/location/prometheus.yml docker compose up prometheus grafana`
## For developers
diff --git a/cmd/prometheus/dashboards/erigon.json b/cmd/prometheus/dashboards/erigon.json
index 2c37e5a02b3..ac97e232f96 100644
--- a/cmd/prometheus/dashboards/erigon.json
+++ b/cmd/prometheus/dashboards/erigon.json
@@ -24,11 +24,12 @@
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
- "id": 1,
+ "id": 2,
"links": [],
"liveNow": false,
"panels": [
{
+ "collapsed": false,
"datasource": {
"type": "prometheus"
},
@@ -38,7 +39,8 @@
"x": 0,
"y": 0
},
- "id": 171,
+ "id": 4,
+ "panels": [],
"targets": [
{
"datasource": {
@@ -47,7 +49,7 @@
"refId": "A"
}
],
- "title": "Blocks execution",
+ "title": "Blockchain",
"type": "row"
},
{
@@ -73,17 +75,15 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
- "lineStyle": {
- "fill": "solid"
- },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
- "spanNulls": false,
+ "spanNulls": true,
"stacking": {
"group": "A",
"mode": "none"
@@ -92,7 +92,6 @@
"mode": "off"
}
},
- "decimals": 1,
"mappings": [],
"thresholds": {
"mode": "absolute",
@@ -112,40 +111,55 @@
"overrides": []
},
"gridPos": {
- "h": 10,
- "w": 8,
+ "h": 11,
+ "w": 5,
"x": 0,
"y": 1
},
- "id": 196,
+ "id": 110,
+ "links": [],
"options": {
"legend": {
"calcs": [
"lastNotNull"
],
- "displayMode": "list",
+ "displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
- "mode": "multi",
+ "mode": "single",
"sort": "none"
}
},
+ "pluginVersion": "8.0.6",
"targets": [
{
"datasource": {
"type": "prometheus"
},
"editorMode": "code",
- "expr": "sync{instance=~\"$instance\"}",
- "instant": false,
- "legendFormat": "{{ stage }}: {{instance}}",
+ "expr": "sync{instance=~\"$instance\",stage=\"headers\"}",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "header: {{instance}}",
"range": true,
"refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "chain_head_block{instance=~\"$instance\"}",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "block: {{instance}}",
+ "refId": "C"
}
],
- "title": "Sync Stages progress",
+ "title": "Chain head",
"type": "timeseries"
},
{
@@ -164,13 +178,14 @@
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
- "fillOpacity": 10,
+ "fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -201,24 +216,24 @@
}
]
},
- "unit": "ops"
+ "unit": "short"
},
"overrides": []
},
"gridPos": {
- "h": 5,
- "w": 8,
- "x": 8,
+ "h": 11,
+ "w": 5,
+ "x": 5,
"y": 1
},
- "id": 158,
+ "id": 116,
"links": [],
"options": {
"legend": {
"calcs": [
"mean"
],
- "displayMode": "list",
+ "displayMode": "table",
"placement": "bottom",
"showLegend": true
},
@@ -234,17 +249,41 @@
"type": "prometheus"
},
"editorMode": "code",
- "exemplar": true,
- "expr": "rate(sync{instance=~\"$instance\"}[$rate_interval])",
+ "expr": "txpool_pending{instance=~\"$instance\"}",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
- "legendFormat": "{{ stage }}: {{instance}}",
+ "legendFormat": "executable: {{instance}}",
"range": true,
"refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "txpool_basefee{instance=~\"$instance\"}",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "basefee: {{instance}}",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "txpool_queued{instance=~\"$instance\"}",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "gapped: {{instance}}",
+ "refId": "B"
}
],
- "title": "Sync Stages progress rate",
+ "title": "Transaction pool",
"type": "timeseries"
},
{
@@ -263,13 +302,14 @@
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
- "fillOpacity": 10,
+ "fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -300,22 +340,23 @@
}
]
},
- "unit": "ops"
+ "unit": "percent"
},
"overrides": []
},
"gridPos": {
- "h": 5,
- "w": 8,
- "x": 16,
+ "h": 11,
+ "w": 7,
+ "x": 10,
"y": 1
},
- "id": 195,
+ "id": 106,
"links": [],
"options": {
"legend": {
"calcs": [
- "mean"
+ "mean",
+ "lastNotNull"
],
"displayMode": "list",
"placement": "bottom",
@@ -334,22 +375,23 @@
},
"editorMode": "code",
"exemplar": true,
- "expr": "rate(exec_txs_done{instance=~\"$instance\"}[$rate_interval])",
+ "expr": "increase(process_cpu_seconds_total{instance=~\"$instance\"}[1m])",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
- "legendFormat": "txs apply: {{instance}}",
+ "legendFormat": "__auto",
"range": true,
"refId": "A"
}
],
- "title": "Exec v3: txs/s ",
+ "title": "CPU",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -369,6 +411,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -399,17 +442,17 @@
}
]
},
- "unit": "s"
+ "unit": "decbytes"
},
"overrides": []
},
"gridPos": {
- "h": 5,
- "w": 8,
- "x": 8,
- "y": 6
+ "h": 11,
+ "w": 7,
+ "x": 17,
+ "y": 1
},
- "id": 112,
+ "id": 154,
"links": [],
"options": {
"legend": {
@@ -417,7 +460,7 @@
"mean",
"lastNotNull"
],
- "displayMode": "list",
+ "displayMode": "table",
"placement": "bottom",
"showLegend": true
},
@@ -432,16 +475,94 @@
"datasource": {
"type": "prometheus"
},
+ "editorMode": "code",
"exemplar": true,
- "expr": "chain_execution_seconds{quantile=\"$quantile\",instance=~\"$instance\"}",
+ "expr": "go_memstats_stack_sys_bytes{instance=~\"$instance\"}",
"format": "time_series",
+ "hide": true,
"interval": "",
"intervalFactor": 1,
- "legendFormat": "execution: {{instance}}",
+ "legendFormat": "stack_sys: {{ instance }}",
+ "range": true,
"refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "go_memstats_sys_bytes{instance=~\"$instance\"}",
+ "format": "time_series",
+ "hide": true,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "max: {{ instance }}",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "go_memstats_stack_inuse_bytes{instance=~\"$instance\"}",
+ "format": "time_series",
+ "hide": true,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "stack_inuse: {{ instance }}",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "go_memstats_mspan_sys_bytes{instance=~\"$instance\"}",
+ "format": "time_series",
+ "hide": true,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "mspan_sys: {{ instance }}",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "go_memstats_mcache_sys_bytes{instance=~\"$instance\"}",
+ "format": "time_series",
+ "hide": true,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "mcache_sys: {{ instance }}",
+ "range": true,
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "go_memstats_heap_alloc_bytes{instance=~\"$instance\"}",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "current: {{ instance }}",
+ "range": true,
+ "refId": "F"
}
],
- "title": "Block Execution speed ",
+ "title": "Memory Use",
"type": "timeseries"
},
{
@@ -467,14 +588,18 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
- "spanNulls": true,
+ "spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
@@ -483,7 +608,7 @@
"mode": "off"
}
},
- "decimals": 2,
+ "decimals": 1,
"mappings": [],
"thresholds": {
"mode": "absolute",
@@ -498,25 +623,24 @@
}
]
},
- "unit": "percentunit"
+ "unit": "short"
},
"overrides": []
},
"gridPos": {
- "h": 5,
- "w": 8,
- "x": 16,
- "y": 6
+ "h": 19,
+ "w": 10,
+ "x": 0,
+ "y": 12
},
- "id": 194,
- "links": [],
+ "id": 196,
"options": {
"legend": {
"calcs": [
- "mean"
+ "lastNotNull"
],
- "displayMode": "list",
- "placement": "bottom",
+ "displayMode": "table",
+ "placement": "right",
"showLegend": true
},
"tooltip": {
@@ -524,65 +648,22 @@
"sort": "none"
}
},
- "pluginVersion": "8.0.6",
"targets": [
{
"datasource": {
"type": "prometheus"
},
"editorMode": "code",
- "exemplar": true,
- "expr": "rate(exec_repeats{instance=~\"$instance\"}[$rate_interval])/rate(exec_txs_done{instance=~\"$instance\"}[$rate_interval])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "repeats: {{instance}}",
+ "expr": "sync{instance=~\"$instance\"}",
+ "instant": false,
+ "legendFormat": "{{ stage }}: {{instance}}",
"range": true,
"refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "rate(exec_triggers{instance=~\"$instance\"}[$rate_interval])/rate(exec_txs_done{instance=~\"$instance\"}[$rate_interval])",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "triggers: {{instance}}",
- "range": true,
- "refId": "B"
}
],
- "title": "Exec v3",
+ "title": "Sync Stages progress",
"type": "timeseries"
},
- {
- "collapsed": false,
- "datasource": {
- "type": "prometheus"
- },
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 11
- },
- "id": 17,
- "panels": [],
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "refId": "A"
- }
- ],
- "title": "Database",
- "type": "row"
- },
{
"datasource": {
"type": "prometheus"
@@ -598,14 +679,15 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 10,
+ "drawStyle": "line",
+ "fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -623,7 +705,6 @@
}
},
"mappings": [],
- "min": 0.001,
"thresholds": {
"mode": "absolute",
"steps": [
@@ -637,21 +718,27 @@
}
]
},
- "unit": "ops"
+ "unit": "none"
},
"overrides": []
},
"gridPos": {
- "h": 5,
- "w": 8,
- "x": 0,
+ "h": 11,
+ "w": 7,
+ "x": 10,
"y": 12
},
- "id": 141,
+ "id": 77,
+ "links": [],
"options": {
"legend": {
- "calcs": [],
- "displayMode": "list",
+ "calcs": [
+ "mean",
+ "lastNotNull",
+ "max",
+ "min"
+ ],
+ "displayMode": "table",
"placement": "bottom",
"showLegend": true
},
@@ -666,23 +753,43 @@
"datasource": {
"type": "prometheus"
},
- "editorMode": "code",
- "exemplar": true,
- "expr": "rate(db_commit_seconds_count{phase=\"total\",instance=~\"$instance\"}[$rate_interval])",
+ "expr": "p2p_peers{instance=~\"$instance\"}",
+ "format": "time_series",
"interval": "",
- "legendFormat": "commit: {{instance}}",
- "range": true,
+ "intervalFactor": 1,
+ "legendFormat": "peers: {{instance}}",
"refId": "A"
- }
- ],
- "title": "Commit",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "description": "",
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "rate(p2p_dials{instance=~\"$instance\"}[1m])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "dials: {{instance}}",
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "rate(p2p_serves{instance=~\"$instance\"}[1m])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "serves: {{instance}}",
+ "refId": "C"
+ }
+ ],
+ "title": "Peers",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -702,9 +809,10 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
- "pointSize": 2,
+ "pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
@@ -732,23 +840,25 @@
}
]
},
- "unit": "s"
+ "unit": "Bps"
},
"overrides": []
},
"gridPos": {
- "h": 9,
- "w": 16,
- "x": 8,
+ "h": 11,
+ "w": 7,
+ "x": 17,
"y": 12
},
- "id": 166,
+ "id": 96,
+ "links": [],
"options": {
"legend": {
"calcs": [
- "mean"
+ "mean",
+ "lastNotNull"
],
- "displayMode": "list",
+ "displayMode": "table",
"placement": "bottom",
"showLegend": true
},
@@ -763,172 +873,29 @@
"datasource": {
"type": "prometheus"
},
- "editorMode": "code",
- "exemplar": true,
- "expr": "db_commit_seconds{phase=\"total\",quantile=\"$quantile\",instance=~\"$instance\"}",
- "interval": "",
- "legendFormat": "total: {{instance}}",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
"exemplar": true,
- "expr": "db_commit_seconds{phase=\"gc_wall_clock\",quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
+ "expr": "rate(p2p_ingress{instance=~\"$instance\"}[$rate_interval])",
+ "format": "time_series",
"interval": "",
- "legendFormat": "gc_wall_clock: {{instance}}",
- "range": true,
+ "intervalFactor": 1,
+ "legendFormat": "ingress: {{instance}}",
"refId": "B"
},
{
"datasource": {
"type": "prometheus"
},
- "editorMode": "code",
"exemplar": true,
- "expr": "db_commit_seconds{phase=\"write\",quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
+ "expr": "rate(p2p_egress{instance=~\"$instance\"}[$rate_interval])",
+ "format": "time_series",
+ "hide": true,
"interval": "",
- "legendFormat": "write: {{instance}}",
- "range": true,
+ "intervalFactor": 1,
+ "legendFormat": "egress: {{instance}}",
"refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "db_commit_seconds{phase=\"sync\",quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "sync: {{instance}}",
- "range": true,
- "refId": "D"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "db_gc_self_rtime_cpu{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "gc_self_rtime_cpu: {{instance}}",
- "range": true,
- "refId": "E"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "db_gc_work_rtime_cpu{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "gc_work_rtime_cpu: {{instance}}",
- "range": true,
- "refId": "F"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "db_gc_work_rtime{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "gc_work_rtime: {{instance}}",
- "range": true,
- "refId": "G"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "db_gc_self_rtime{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "gc_self_rtime: {{instance}}",
- "range": true,
- "refId": "H"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "db_commit_seconds{phase=\"gc_cpu_time\",quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "gc_cpu_time: {{instance}}",
- "range": true,
- "refId": "I"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "db_gc_self_xtime{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "gc_self_xtime: {{instance}}",
- "range": true,
- "refId": "J"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "db_gc_work_pnl_merge_time{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "work_pnl_merge_time: {{instance}}",
- "range": true,
- "refId": "K"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "db_gc_slef_pnl_merge_time{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "self_pnl_merge_time: {{instance}}",
- "range": true,
- "refId": "L"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "db_gc_work_xtime{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "gc_work_xtime: {{instance}}",
- "range": true,
- "refId": "M"
}
],
- "title": "Commit speed",
+ "title": "Network Traffic",
"type": "timeseries"
},
{
@@ -954,6 +921,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -961,10 +929,10 @@
"type": "linear"
},
"showPoints": "never",
- "spanNulls": true,
+ "spanNulls": false,
"stacking": {
"group": "A",
- "mode": "none"
+ "mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
@@ -984,21 +952,25 @@
}
]
},
- "unit": "decbytes"
+ "unit": "short"
},
"overrides": []
},
"gridPos": {
- "h": 5,
- "w": 8,
- "x": 0,
- "y": 17
+ "h": 8,
+ "w": 7,
+ "x": 10,
+ "y": 23
},
- "id": 159,
+ "id": 85,
+ "links": [],
"options": {
"legend": {
- "calcs": [],
- "displayMode": "list",
+ "calcs": [
+ "mean",
+ "lastNotNull"
+ ],
+ "displayMode": "table",
"placement": "bottom",
"showLegend": true
},
@@ -1007,31 +979,34 @@
"sort": "none"
}
},
- "pluginVersion": "8.4.7",
+ "pluginVersion": "8.0.6",
"targets": [
{
"datasource": {
"type": "prometheus"
},
- "expr": "db_size{instance=~\"$instance\"}",
+ "exemplar": true,
+ "expr": "rate(process_io_storage_read_bytes_total{instance=~\"$instance\"}[$rate_interval])",
+ "format": "time_series",
"interval": "",
- "legendFormat": "size: {{instance}}",
+ "intervalFactor": 1,
+ "legendFormat": "read: {{instance}}",
"refId": "A"
},
{
"datasource": {
"type": "prometheus"
},
- "editorMode": "code",
- "expr": "db_mi_last_pgno{instance=~\"$instance\"}",
- "hide": false,
+ "exemplar": true,
+ "expr": "rate(process_io_storage_written_bytes_total{instance=~\"$instance\"}[$rate_interval])",
+ "format": "time_series",
"interval": "",
- "legendFormat": "db_mi_last_pgno: {{instance}}",
- "range": true,
+ "intervalFactor": 1,
+ "legendFormat": "write: {{instance}}",
"refId": "B"
}
],
- "title": "DB Size",
+ "title": "Disk bytes/sec",
"type": "timeseries"
},
{
@@ -1057,6 +1032,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -1064,10 +1040,10 @@
"type": "linear"
},
"showPoints": "never",
- "spanNulls": false,
+ "spanNulls": true,
"stacking": {
"group": "A",
- "mode": "normal"
+ "mode": "none"
},
"thresholdsStyle": {
"mode": "off"
@@ -1087,23 +1063,23 @@
}
]
},
- "unit": "short"
+ "unit": "decbytes"
},
"overrides": []
},
"gridPos": {
- "h": 7,
- "w": 16,
- "x": 8,
- "y": 21
+ "h": 8,
+ "w": 7,
+ "x": 17,
+ "y": 23
},
- "id": 168,
+ "id": 159,
"options": {
"legend": {
"calcs": [
- "mean"
+ "lastNotNull"
],
- "displayMode": "list",
+ "displayMode": "table",
"placement": "bottom",
"showLegend": true
},
@@ -1112,190 +1088,56 @@
"sort": "none"
}
},
- "pluginVersion": "8.0.6",
+ "pluginVersion": "8.4.7",
"targets": [
{
"datasource": {
"type": "prometheus"
},
- "editorMode": "code",
- "exemplar": true,
- "expr": "rate(db_pgops{phase=\"newly\", instance=~\"$instance\"}[$rate_interval])",
- "hide": false,
+ "expr": "db_size{instance=~\"$instance\"}",
"interval": "",
- "legendFormat": "newly: {{instance}}",
- "range": true,
+ "legendFormat": "size: {{instance}}",
"refId": "A"
},
{
"datasource": {
"type": "prometheus"
},
- "exemplar": true,
- "expr": "rate(db_pgops{phase=\"cow\", instance=~\"$instance\"}[$rate_interval])",
+ "editorMode": "code",
+ "expr": "db_mi_last_pgno{instance=~\"$instance\"}",
"hide": false,
"interval": "",
- "legendFormat": "cow: {{instance}}",
+ "legendFormat": "db_mi_last_pgno: {{instance}}",
+ "range": true,
"refId": "B"
- },
+ }
+ ],
+ "title": "DB Size",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": false,
+ "datasource": {
+ "type": "prometheus"
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 31
+ },
+ "id": 183,
+ "panels": [],
+ "targets": [
{
"datasource": {
"type": "prometheus"
},
- "exemplar": true,
- "expr": "rate(db_pgops{phase=\"clone\", instance=~\"$instance\"}[$rate_interval])",
- "hide": false,
- "interval": "",
- "legendFormat": "clone: {{instance}}",
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "rate(db_pgops{phase=\"split\", instance=~\"$instance\"}[$rate_interval])",
- "hide": false,
- "interval": "",
- "legendFormat": "split: {{instance}}",
- "refId": "D"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "rate(db_pgops{phase=\"merge\", instance=~\"$instance\"}[$rate_interval])",
- "hide": false,
- "interval": "",
- "legendFormat": "merge: {{instance}}",
- "range": true,
- "refId": "E"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "rate(db_pgops{phase=\"spill\", instance=~\"$instance\"}[$rate_interval])",
- "hide": false,
- "interval": "",
- "legendFormat": "spill: {{instance}}",
- "refId": "F"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "rate(db_pgops{phase=\"wops\", instance=~\"$instance\"}[$rate_interval])",
- "hide": false,
- "interval": "",
- "legendFormat": "wops: {{instance}}",
- "refId": "G"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "rate(db_pgops{phase=\"unspill\", instance=~\"$instance\"}[$rate_interval])",
- "hide": false,
- "interval": "",
- "legendFormat": "unspill: {{instance}}",
- "refId": "H"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "rate(db_pgops{phase=\"gcrloops\", instance=~\"$instance\"}[$rate_interval])",
- "hide": false,
- "interval": "",
- "legendFormat": "gcrloops: {{instance}}",
- "range": true,
- "refId": "I"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "rate(db_pgops{phase=\"gcwloops\", instance=~\"$instance\"}[$rate_interval])",
- "hide": false,
- "interval": "",
- "legendFormat": "gcwloops: {{instance}}",
- "range": true,
- "refId": "J"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "rate(db_pgops{phase=\"gcxpages\", instance=~\"$instance\"}[$rate_interval])",
- "hide": false,
- "interval": "",
- "legendFormat": "gcxpages: {{instance}}",
- "range": true,
- "refId": "K"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "rate(db_pgops{phase=\"msync\", instance=~\"$instance\"}[$rate_interval])",
- "hide": false,
- "interval": "",
- "legendFormat": "msync: {{instance}}",
- "range": true,
- "refId": "L"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "rate(db_pgops{phase=\"fsync\", instance=~\"$instance\"}[$rate_interval])",
- "hide": false,
- "interval": "",
- "legendFormat": "fsync: {{instance}}",
- "range": true,
- "refId": "M"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "rate(db_pgops{phase=\"minicore\", instance=~\"$instance\"}[$rate_interval])",
- "hide": false,
- "interval": "",
- "legendFormat": "minicore: {{instance}}",
- "refId": "N"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "rate(db_pgops{phase=\"prefault\", instance=~\"$instance\"}[$rate_interval])",
- "hide": false,
- "interval": "",
- "legendFormat": "prefault: {{instance}}",
- "refId": "O"
+ "refId": "A"
}
],
- "title": "DB Pages Ops/sec",
- "type": "timeseries"
+ "title": "RPC",
+ "type": "row"
},
{
"datasource": {
@@ -1326,8 +1168,8 @@
"scaleDistribution": {
"type": "linear"
},
- "showPoints": "never",
- "spanNulls": true,
+ "showPoints": "auto",
+ "spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
@@ -1341,8 +1183,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1350,58 +1191,57 @@
}
]
},
- "unit": "decbytes"
+ "unit": "reqps"
},
"overrides": []
},
"gridPos": {
- "h": 6,
- "w": 8,
+ "h": 8,
+ "w": 12,
"x": 0,
- "y": 22
+ "y": 32
},
- "id": 167,
+ "id": 185,
"options": {
"legend": {
"calcs": [
- "mean"
+ "mean",
+ "last"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
- "mode": "multi",
+ "mode": "single",
"sort": "none"
}
},
- "pluginVersion": "8.0.6",
"targets": [
{
"datasource": {
"type": "prometheus"
},
- "editorMode": "code",
- "expr": "tx_limit{instance=~\"$instance\"}",
+ "exemplar": true,
+ "expr": "rate(rpc_duration_seconds_count{instance=~\"$instance\",success=\"success\"}[1m])",
"interval": "",
- "legendFormat": "limit: {{instance}}",
- "range": true,
+ "legendFormat": "success {{ method }} {{ instance }} ",
"refId": "A"
},
{
"datasource": {
"type": "prometheus"
},
- "editorMode": "code",
- "expr": "tx_dirty{instance=~\"$instance\"}",
+ "exemplar": true,
+ "expr": "rate(rpc_duration_seconds_count{instance=~\"$instance\",success=\"failure\"}[1m])",
"hide": false,
"interval": "",
- "legendFormat": "dirty: {{instance}}",
- "range": true,
+ "legendFormat": "failure {{ method }} {{ instance }} ",
"refId": "B"
}
],
- "title": "Tx Size",
+ "title": "RPS",
+ "transformations": [],
"type": "timeseries"
},
{
@@ -1437,7 +1277,7 @@
"spanNulls": false,
"stacking": {
"group": "A",
- "mode": "normal"
+ "mode": "none"
},
"thresholdsStyle": {
"mode": "off"
@@ -1448,8 +1288,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1457,74 +1296,52 @@
}
]
},
- "unit": "short"
+ "unit": "s"
},
"overrides": []
},
"gridPos": {
- "h": 6,
- "w": 8,
- "x": 0,
- "y": 28
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 32
},
- "id": 169,
+ "id": 187,
"options": {
"legend": {
- "calcs": [],
+ "calcs": [
+ "mean",
+ "last"
+ ],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
- "mode": "multi",
+ "mode": "single",
"sort": "none"
}
},
- "pluginVersion": "8.0.6",
"targets": [
{
"datasource": {
"type": "prometheus"
},
"exemplar": true,
- "expr": "db_gc_leaf{instance=~\"$instance\"}",
+ "expr": "rpc_duration_seconds{quantile=\"$quantile\",instance=~\"$instance\"}",
"interval": "",
- "legendFormat": "gc_leaf: {{instance}}",
+ "legendFormat": " {{ method }} {{ instance }} {{ success }}",
"refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "db_gc_overflow{instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "gc_overflow: {{instance}}",
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "exec_steps_in_db{instance=~\"$instance\"}/100",
- "hide": false,
- "interval": "",
- "legendFormat": "exec_steps_in_db: {{instance}}",
- "range": true,
- "refId": "E"
}
],
- "title": "GC and State",
+ "title": "Timings",
+ "transformations": [],
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus"
},
- "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -1550,11 +1367,11 @@
"scaleDistribution": {
"type": "linear"
},
- "showPoints": "never",
+ "showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
- "mode": "normal"
+ "mode": "none"
},
"thresholdsStyle": {
"mode": "off"
@@ -1565,8 +1382,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1579,34 +1395,37 @@
"overrides": []
},
"gridPos": {
- "h": 6,
- "w": 16,
- "x": 8,
- "y": 28
+ "h": 8,
+ "w": 7,
+ "x": 12,
+ "y": 40
},
- "id": 150,
+ "id": 189,
"options": {
"legend": {
- "calcs": [],
+ "calcs": [
+ "mean",
+ "last"
+ ],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
- "mode": "multi",
+ "mode": "single",
"sort": "none"
}
},
- "pluginVersion": "8.0.6",
"targets": [
{
"datasource": {
"type": "prometheus"
},
"exemplar": true,
- "expr": "rate(process_minor_pagefaults_total{instance=~\"$instance\"}[$rate_interval])",
+ "expr": "cache_keys_total{name=\"rpc\",instance=~\"$instance\"}",
+ "hide": false,
"interval": "",
- "legendFormat": "soft: {{instance}}",
+ "legendFormat": "keys: {{ instance }} ",
"refId": "A"
},
{
@@ -1614,14 +1433,36 @@
"type": "prometheus"
},
"exemplar": true,
- "expr": "rate(process_major_pagefaults_total{instance=~\"$instance\"}[$rate_interval])",
+ "expr": "cache_list_total{name=\"rpc\",instance=~\"$instance\"}",
"hide": false,
"interval": "",
- "legendFormat": "hard: {{instance}}",
+ "legendFormat": "list: {{ instance }} ",
"refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "cache_code_keys_total{name=\"rpc\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "code_keys: {{ instance }} ",
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "cache_code_list_total{name=\"rpc\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "code_list: {{ instance }} ",
+ "refId": "D"
}
],
- "title": "getrusage: minflt - soft page faults (reclaims), majflt - hard faults",
+ "title": "Cache keys",
"type": "timeseries"
},
{
@@ -1668,8 +1509,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1682,14 +1522,17 @@
},
"gridPos": {
"h": 8,
- "w": 16,
- "x": 8,
- "y": 34
+ "w": 5,
+ "x": 19,
+ "y": 40
},
- "id": 191,
+ "id": 184,
"options": {
"legend": {
- "calcs": [],
+ "calcs": [
+ "mean",
+ "last"
+ ],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
@@ -1699,7 +1542,6 @@
"sort": "none"
}
},
- "pluginVersion": "8.0.6",
"targets": [
{
"datasource": {
@@ -1707,4946 +1549,134 @@
},
"editorMode": "code",
"exemplar": true,
- "expr": "db_gc{phase=\"work_rxpages\", instance=~\"$instance\"}",
+ "expr": "sum(delta(cache_total{result=\"hit\",name=\"rpc\",instance=~\"$instance\"}[1m]))/sum(delta(cache_total{name=\"rpc\",instance=~\"$instance\"}[1m])) ",
"hide": false,
"interval": "",
- "legendFormat": "work_rxpages: {{instance}}",
+ "legendFormat": "hit rate: {{ instance }} ",
"range": true,
- "refId": "B"
+ "refId": "A"
},
{
"datasource": {
"type": "prometheus"
},
- "editorMode": "code",
"exemplar": true,
- "expr": "db_gc{phase=\"self_rsteps\", instance=~\"$instance\"}",
+ "expr": "sum(delta(cache_code_total{result=\"hit\",name=\"rpc\",instance=~\"$instance\"}[1m]))/sum(delta(cache_code_total{name=\"rpc\",instance=~\"$instance\"}[1m])) ",
"hide": false,
"interval": "",
- "legendFormat": "self_rsteps: {{instance}}",
- "range": true,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "db_gc{phase=\"wloop\", instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "wloop: {{instance}}",
- "range": true,
- "refId": "D"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "db_gc{phase=\"coalescences\", instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "coalescences: {{instance}}",
- "range": true,
- "refId": "E"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "db_gc{phase=\"wipes\", instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "wipes: {{instance}}",
- "range": true,
- "refId": "F"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "db_gc{phase=\"flushes\", instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "flushes: {{instance}}",
- "range": true,
- "refId": "G"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "db_gc{phase=\"kicks\", instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "kicks: {{instance}}",
- "range": true,
- "refId": "H"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "db_gc{phase=\"work_rsteps\", instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "gc_work_rsteps: {{instance}}",
- "range": true,
- "refId": "I"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "db_gc{phase=\"self_xpages\", instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "self_xpages: {{instance}}",
- "range": true,
- "refId": "J"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "db_gc{phase=\"work_majflt\", instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "gc_work_majflt: {{instance}}",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "db_gc{phase=\"self_majflt\", instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "gc_self_majflt: {{instance}}",
- "range": true,
- "refId": "K"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "db_gc{phase=\"self_counter\", instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "gc_self_counter: {{instance}}",
- "range": true,
- "refId": "L"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "db_gc{phase=\"work_counter\", instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "gc_work_counter: {{instance}}",
- "range": true,
- "refId": "M"
- }
- ],
- "title": "Commit counters",
- "type": "timeseries"
- },
- {
- "collapsed": false,
- "datasource": {
- "type": "prometheus"
- },
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 42
- },
- "id": 134,
- "panels": [],
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "refId": "A"
- }
- ],
- "title": "Process",
- "type": "row"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": []
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 18,
- "w": 8,
- "x": 0,
- "y": 43
- },
- "id": 165,
- "options": {
- "colorMode": "value",
- "graphMode": "none",
- "justifyMode": "auto",
- "orientation": "horizontal",
- "reduceOptions": {
- "calcs": [
- "range"
- ],
- "fields": "",
- "values": false
- },
- "text": {
- "titleSize": 14,
- "valueSize": 14
- },
- "textMode": "auto"
- },
- "pluginVersion": "10.0.1",
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "ru_inblock{instance=~\"$instance\"}",
- "interval": "",
- "legendFormat": "inblock: {{instance}}",
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "ru_outblock{instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "outblock: {{instance}}",
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "ru_minflt{instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "minflt: {{instance}}",
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "ru_majflt{instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "majflt: {{instance}}",
- "refId": "D"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "system_disk_readbytes{instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "system_disk_readbytes: {{instance}}",
- "refId": "E"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "system_disk_writebytes{instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "system_disk_writebytes: {{instance}}",
- "refId": "F"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "db_pgops_newly{instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "pgops_newly: {{instance}}",
- "refId": "H"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "db_pgops_cow{instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "pgops_cow: {{instance}}",
- "refId": "I"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "db_pgops_clone{instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "pgops_clone: {{instance}}",
- "refId": "J"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "db_pgops_split{instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "pgops_split: {{instance}}",
- "refId": "K"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "db_pgops_merge{instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "pgops_merge: {{instance}}",
- "refId": "L"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "db_pgops_spill{instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "pgops_spill: {{instance}}",
- "refId": "G"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "db_pgops_unspill{instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "pgops_unspill: {{instance}}",
- "refId": "M"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "db_pgops_wops{instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "pgops_wops: {{instance}}",
- "refId": "N"
- }
- ],
- "title": "Rusage Total (\"last value\" - \"first value\" on selected period)",
- "type": "stat"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "description": "",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "never",
- "spanNulls": true,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 6,
- "w": 8,
- "x": 8,
- "y": 43
- },
- "id": 155,
- "links": [],
- "options": {
- "legend": {
- "calcs": [
- "mean"
- ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
- },
- "pluginVersion": "8.0.6",
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "rate(process_io_write_syscalls_total{instance=~\"$instance\"}[$rate_interval])",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "in: {{instance}}",
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "rate(process_io_read_syscalls_total{instance=~\"$instance\"}[$rate_interval])",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "out: {{instance}}",
- "refId": "D"
- }
- ],
- "title": "Read/Write syscall/sec",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "description": "",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 10,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "never",
- "spanNulls": true,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "cps"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 6,
- "w": 8,
- "x": 16,
- "y": 43
- },
- "id": 153,
- "options": {
- "legend": {
- "calcs": [
- "mean"
- ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
- },
- "pluginVersion": "8.0.6",
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "rate(go_cgo_calls_count{instance=~\"$instance\"}[$rate_interval])",
- "interval": "",
- "legendFormat": "cgo_calls_count: {{instance}}",
- "range": true,
- "refId": "A"
- }
- ],
- "title": "cgo calls",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "never",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "normal"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 6,
- "w": 8,
- "x": 8,
- "y": 49
- },
- "id": 85,
- "links": [],
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
- },
- "pluginVersion": "8.0.6",
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "rate(process_io_storage_read_bytes_total{instance=~\"$instance\"}[$rate_interval])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "read: {{instance}}",
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "rate(process_io_storage_written_bytes_total{instance=~\"$instance\"}[$rate_interval])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "write: {{instance}}",
- "refId": "B"
- }
- ],
- "title": "Disk bytes/sec",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "never",
- "spanNulls": true,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 6,
- "w": 8,
- "x": 16,
- "y": 49
- },
- "id": 128,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
- },
- "pluginVersion": "8.0.6",
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "expr": "go_goroutines{instance=~\"$instance\"}",
- "instant": false,
- "interval": "",
- "legendFormat": "goroutines: {{instance}}",
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "expr": "go_threads{instance=~\"$instance\"}",
- "instant": false,
- "interval": "",
- "legendFormat": "threads: {{instance}}",
- "refId": "B"
- }
- ],
- "title": "GO Goroutines and Threads",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "description": "",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "never",
- "spanNulls": true,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "decbytes"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 6,
- "w": 8,
- "x": 8,
- "y": 55
- },
- "id": 154,
- "links": [],
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
- },
- "pluginVersion": "8.0.6",
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "go_memstats_stack_sys_bytes{instance=~\"$instance\"}",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "stack_sys: {{ instance }}",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "go_memstats_sys_bytes{instance=~\"$instance\"}",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "sys: {{ instance }}",
- "range": true,
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "go_memstats_stack_inuse_bytes{instance=~\"$instance\"}",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "stack_inuse: {{ instance }}",
- "range": true,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "go_memstats_mspan_sys_bytes{instance=~\"$instance\"}",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "mspan_sys: {{ instance }}",
- "range": true,
- "refId": "D"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "go_memstats_mcache_sys_bytes{instance=~\"$instance\"}",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "mcache_sys: {{ instance }}",
- "range": true,
- "refId": "E"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "go_memstats_heap_alloc_bytes{instance=~\"$instance\"}",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "heap_alloc: {{ instance }}",
- "range": true,
- "refId": "F"
- }
- ],
- "title": "go memstat",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 10,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "never",
- "spanNulls": true,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 5,
- "w": 8,
- "x": 16,
- "y": 55
- },
- "id": 124,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
- },
- "pluginVersion": "8.0.6",
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "rate(go_gc_duration_seconds{quantile=\"0.75\",instance=~\"$instance\"}[$rate_interval])",
- "instant": false,
- "interval": "",
- "legendFormat": "",
- "refId": "A"
- }
- ],
- "title": "GC Stop the World per sec",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "description": "",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "never",
- "spanNulls": true,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "decbytes"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 5,
- "w": 8,
- "x": 0,
- "y": 61
- },
- "id": 148,
- "options": {
- "legend": {
- "calcs": [
- "max"
- ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
- },
- "pluginVersion": "8.0.6",
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "process_virtual_memory_bytes{instance=~\"$instance\"}",
- "hide": true,
- "interval": "",
- "legendFormat": "resident virtual mem: {{instance}}",
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "process_resident_memory_anon_bytes{instance=~\"$instance\"}",
- "hide": true,
- "interval": "",
- "legendFormat": "resident anon mem: {{instance}}",
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "process_resident_memory_bytes{instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "resident mem: {{instance}}",
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "mem_data{instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "data: {{instance}}",
- "refId": "D"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "mem_stack{instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "stack: {{instance}}",
- "refId": "E"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "mem_locked{instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "locked: {{instance}}",
- "refId": "F"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "mem_swap{instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "swap: {{instance}}",
- "refId": "G"
- }
- ],
- "title": "mem: resident set size",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "description": "",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "never",
- "spanNulls": true,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 5,
- "w": 8,
- "x": 0,
- "y": 66
- },
- "id": 86,
- "links": [],
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
- },
- "pluginVersion": "8.0.6",
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "rate(go_memstats_mallocs_total{instance=~\"$instance\"}[$rate_interval])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "memstats_mallocs_total: {{ instance }}",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "rate(go_memstats_frees_total{instance=~\"$instance\"}[$rate_interval])",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "memstats_frees_total: {{ instance }}",
- "range": true,
- "refId": "B"
- }
- ],
- "title": "Process Mem: allocate objects/sec, free",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "never",
- "spanNulls": true,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "percent"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 5,
- "w": 8,
- "x": 0,
- "y": 71
- },
- "id": 106,
- "links": [],
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
- },
- "pluginVersion": "8.0.6",
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "increase(process_cpu_seconds_system_total{instance=~\"$instance\"}[1m])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "system: {{instance}}",
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "increase(process_cpu_seconds_user_total{instance=~\"$instance\"}[1m])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "iowait: {{instance}}",
- "refId": "B"
- }
- ],
- "title": "CPU",
- "type": "timeseries"
- },
- {
- "collapsed": false,
- "datasource": {
- "type": "prometheus"
- },
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 76
- },
- "id": 82,
- "panels": [],
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "refId": "A"
- }
- ],
- "title": "System",
- "type": "row"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "description": "",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 10,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "never",
- "spanNulls": true,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "decbytes"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 5,
- "w": 8,
- "x": 0,
- "y": 77
- },
- "id": 157,
- "links": [],
- "options": {
- "legend": {
- "calcs": [
- "mean"
- ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
- },
- "pluginVersion": "8.0.6",
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "vmem_total{instance=~\"$instance\"}",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "total: {{instance}}",
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "vmem_available{instance=~\"$instance\"}",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "available: {{instance}}",
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "vmem_used{instance=~\"$instance\"}",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "used: {{instance}}",
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "vmem_buffers{instance=~\"$instance\"}",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "buffers: {{instance}}",
- "refId": "D"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "vmem_cached{instance=~\"$instance\"}",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "cached: {{instance}}",
- "refId": "E"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "vmem_writeback{instance=~\"$instance\"}",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "writeback: {{instance}}",
- "refId": "F"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "vmem_dirty{instance=~\"$instance\"}",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "dirty: {{instance}}",
- "refId": "G"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "vmem_shared{instance=~\"$instance\"}",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "shared: {{instance}}",
- "refId": "H"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "vmem_mapped{instance=~\"$instance\"}",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "mapped: {{instance}}",
- "refId": "I"
- }
- ],
- "title": "Host VMem",
- "type": "timeseries"
- },
- {
- "collapsed": false,
- "datasource": {
- "type": "prometheus"
- },
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 82
- },
- "id": 173,
- "panels": [],
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "refId": "A"
- }
- ],
- "title": "TxPool v2",
- "type": "row"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 83
- },
- "id": 175,
- "options": {
- "legend": {
- "calcs": [
- "mean",
- "last"
- ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "pool_process_remote_txs{quantile=\"$quantile\",instance=~\"$instance\"}",
- "interval": "",
- "legendFormat": "process_remote_txs: {{ instance }}",
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "pool_add_remote_txs{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "add_remote_txs: {{ instance }}",
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "pool_new_block{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "new_block: {{ instance }}",
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "pool_write_to_db{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "write_to_db: {{ instance }}",
- "refId": "D"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "pool_propagate_to_new_peer{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "propagate_to_new_peer: {{ instance }}",
- "refId": "E"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "pool_propagate_new_txs{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "propagate_new_txs: {{ instance }}",
- "refId": "F"
- }
- ],
- "title": "Timings",
- "transformations": [],
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "reqps"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 83
- },
- "id": 177,
- "options": {
- "legend": {
- "calcs": [
- "mean",
- "last"
- ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "rate(pool_process_remote_txs_count{instance=~\"$instance\"}[$rate_interval])",
- "hide": false,
- "interval": "",
- "legendFormat": "pool_process_remote_txs_count: {{ instance }}",
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "rate(pool_add_remote_txs_count{instance=~\"$instance\"}[$rate_interval])",
- "hide": false,
- "interval": "",
- "legendFormat": "pool_add_remote_txs_count: {{ instance }}",
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "rate(pool_new_block_count{instance=~\"$instance\"}[$rate_interval])",
- "hide": false,
- "interval": "",
- "legendFormat": "pool_new_block_count: {{ instance }}",
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "rate(pool_write_to_db_count{instance=~\"$instance\"}[$rate_interval])",
- "hide": false,
- "interval": "",
- "legendFormat": "pool_write_to_db_count: {{ instance }}",
- "refId": "D"
- }
- ],
- "title": "RPS",
- "transformations": [],
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 6,
- "w": 8,
- "x": 0,
- "y": 91
- },
- "id": 176,
- "options": {
- "legend": {
- "calcs": [
- "mean",
- "last"
- ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "sum(delta(cache_total{result=\"hit\",name=\"txpool\",instance=~\"$instance\"}[1m]))/sum(delta(cache_total{name=\"txpool\",instance=~\"$instance\"}[1m])) ",
- "hide": false,
- "interval": "",
- "legendFormat": "hit rate: {{ instance }} ",
- "refId": "A"
- }
- ],
- "title": "Cache hit-rate",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 6,
- "w": 8,
- "x": 8,
- "y": 91
- },
- "id": 180,
- "options": {
- "legend": {
- "calcs": [
- "mean",
- "last"
- ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "rate(cache_total{name=\"txpool\",instance=~\"$instance\"}[1m])",
- "hide": false,
- "interval": "",
- "legendFormat": "{{ result }}: {{ instance }} ",
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "rate(cache_timeout_total{name=\"txpool\",instance=~\"$instance\"}[1m])",
- "hide": false,
- "interval": "",
- "legendFormat": "timeout: {{ instance }} ",
- "refId": "B"
- }
- ],
- "title": "Cache rps",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 6,
- "w": 8,
- "x": 16,
- "y": 91
- },
- "id": 181,
- "options": {
- "legend": {
- "calcs": [
- "mean",
- "last"
- ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "cache_keys_total{name=\"txpool\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "keys: {{ instance }} ",
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "cache_list_total{name=\"txpool\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "list: {{ instance }} ",
- "refId": "B"
- }
- ],
- "title": "Cache keys",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "binBps"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 6,
- "w": 8,
- "x": 0,
- "y": 97
- },
- "id": 178,
- "options": {
- "legend": {
- "calcs": [
- "mean",
- "last"
- ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "rate(pool_write_to_db_bytes{instance=~\"$instance\"}[$rate_interval])",
- "hide": false,
- "interval": "",
- "legendFormat": "pool_write_to_db_bytes: {{ instance }}",
- "refId": "A"
- }
- ],
- "title": "DB",
- "type": "timeseries"
- },
- {
- "collapsed": false,
- "datasource": {
- "type": "prometheus"
- },
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 103
- },
- "id": 183,
- "panels": [],
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "refId": "A"
- }
- ],
- "title": "RPC",
- "type": "row"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "reqps"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 104
- },
- "id": 185,
- "options": {
- "legend": {
- "calcs": [
- "mean",
- "last"
- ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "rate(rpc_duration_seconds_count{instance=~\"$instance\",success=\"success\"}[1m])",
- "interval": "",
- "legendFormat": "success {{ method }} {{ instance }} ",
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "rate(rpc_duration_seconds_count{instance=~\"$instance\",success=\"failure\"}[1m])",
- "hide": false,
- "interval": "",
- "legendFormat": "failure {{ method }} {{ instance }} ",
- "refId": "B"
- }
- ],
- "title": "RPS",
- "transformations": [],
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 104
- },
- "id": 186,
- "options": {
- "legend": {
- "calcs": [
- "mean",
- "last"
- ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "db_begin_seconds{quantile=\"$quantile\",instance=~\"$instance\"}",
- "interval": "",
- "legendFormat": "db_begin_seconds: {{ method }} {{ instance }}",
- "refId": "A"
- }
- ],
- "title": "DB begin",
- "transformations": [],
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 112
- },
- "id": 187,
- "options": {
- "legend": {
- "calcs": [
- "mean",
- "last"
- ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "rpc_duration_seconds{quantile=\"$quantile\",instance=~\"$instance\"}",
- "interval": "",
- "legendFormat": " {{ method }} {{ instance }} {{ success }}",
- "refId": "A"
- }
- ],
- "title": "Timings",
- "transformations": [],
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "never",
- "spanNulls": true,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 112
- },
- "id": 188,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
- },
- "pluginVersion": "8.0.6",
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "go_goroutines{instance=~\"$instance\"}",
- "instant": false,
- "interval": "",
- "legendFormat": "go/goroutines: {{instance}}",
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "go_threads{instance=~\"$instance\"}",
- "instant": false,
- "interval": "",
- "legendFormat": "go/threads: {{instance}}",
- "refId": "B"
- }
- ],
- "title": "GO Goroutines and Threads",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 6,
- "w": 8,
- "x": 8,
- "y": 120
- },
- "id": 189,
- "options": {
- "legend": {
- "calcs": [
- "mean",
- "last"
- ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "cache_keys_total{name=\"rpc\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "keys: {{ instance }} ",
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "cache_list_total{name=\"rpc\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "list: {{ instance }} ",
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "cache_code_keys_total{name=\"rpc\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "code_keys: {{ instance }} ",
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "cache_code_list_total{name=\"rpc\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "code_list: {{ instance }} ",
- "refId": "D"
- }
- ],
- "title": "Cache keys",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 6,
- "w": 8,
- "x": 16,
- "y": 120
- },
- "id": 184,
- "options": {
- "legend": {
- "calcs": [
- "mean",
- "last"
- ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "editorMode": "code",
- "exemplar": true,
- "expr": "sum(delta(cache_total{result=\"hit\",name=\"rpc\",instance=~\"$instance\"}[1m]))/sum(delta(cache_total{name=\"rpc\",instance=~\"$instance\"}[1m])) ",
- "hide": false,
- "interval": "",
- "legendFormat": "hit rate: {{ instance }} ",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "sum(delta(cache_code_total{result=\"hit\",name=\"rpc\",instance=~\"$instance\"}[1m]))/sum(delta(cache_code_total{name=\"rpc\",instance=~\"$instance\"}[1m])) ",
- "hide": false,
- "interval": "",
- "legendFormat": "code hit rate: {{ instance }} ",
- "refId": "B"
- }
- ],
- "title": "Cache hit-rate",
- "type": "timeseries"
- },
- {
- "collapsed": false,
- "datasource": {
- "type": "prometheus"
- },
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 126
- },
- "id": 146,
- "panels": [],
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "refId": "A"
- }
- ],
- "title": "Hidden",
- "type": "row"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": {
- "type": "prometheus"
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 5,
- "w": 8,
- "x": 0,
- "y": 127
- },
- "hiddenSeries": false,
- "id": 122,
- "legend": {
- "alignAsTable": true,
- "avg": true,
- "current": false,
- "hideEmpty": false,
- "hideZero": false,
- "max": false,
- "min": false,
- "rightSide": false,
- "show": true,
- "sort": "avg",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "10.0.1",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "trie_subtrieloader_flatdb{quantile=\"$quantile\",instance=~\"$instance\"}",
- "interval": "",
- "legendFormat": "trie_subtrieloader_flatdb: {{quantile}}, {{instance}}",
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "trie_subtrieloader_witnessdb{quantile=\"$quantile\",instance=~\"$instance\"}",
- "interval": "",
- "legendFormat": "trie_subtrieloader_witnessdb: {{quantile}}, {{instance}}",
- "refId": "C"
- }
- ],
- "thresholds": [],
- "timeRegions": [],
- "title": "Merkle Root calculation (stage 5)",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": 6,
- "mode": "time",
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "$$hashKey": "object:431",
- "format": "ns",
- "logBase": 1,
- "show": true
- },
- {
- "$$hashKey": "object:432",
- "format": "short",
- "logBase": 1,
- "show": false
- }
- ],
- "yaxis": {
- "align": false
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": {
- "type": "prometheus"
- },
- "description": "",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 5,
- "w": 8,
- "x": 8,
- "y": 127
- },
- "hiddenSeries": false,
- "id": 162,
- "legend": {
- "avg": false,
- "current": false,
- "hideEmpty": true,
- "hideZero": true,
- "max": false,
- "min": false,
- "rightSide": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "10.0.1",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "rate(db_op_set_count{instance=~\"$instance\"}[1m])",
- "interval": "",
- "legendFormat": "",
- "refId": "E"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "rate(db_op_set_range_count{instance=~\"$instance\"}[1m])",
- "interval": "",
- "legendFormat": "",
- "refId": "F"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "rate(db_op_get_count{instance=~\"$instance\"}[1m])",
- "interval": "",
- "legendFormat": "",
- "refId": "G"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "rate(db_op_get_both{instance=~\"$instance\"}[1m])",
- "hide": false,
- "interval": "",
- "legendFormat": "",
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "rate(db_op_get_both_range_count{instance=~\"$instance\"}[1m])",
- "hide": false,
- "interval": "",
- "legendFormat": "",
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "db_op_put{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "",
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "db_op_put_current{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "",
- "refId": "D"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "db_op_put_no_overwrite{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "",
- "refId": "H"
- }
- ],
- "thresholds": [],
- "timeRegions": [],
- "title": "AutoDupsort Call/Sec",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "mode": "time",
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "$$hashKey": "object:139",
- "format": "short",
- "logBase": 1,
- "show": true
- },
- {
- "$$hashKey": "object:140",
- "format": "short",
- "logBase": 1,
- "show": true
- }
- ],
- "yaxis": {
- "align": false
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": {
- "type": "prometheus"
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 4,
- "w": 8,
- "x": 16,
- "y": 127
- },
- "hiddenSeries": false,
- "id": 156,
- "legend": {
- "avg": true,
- "current": true,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "10.0.1",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "db_get{quantile=\"$quantile\",instance=~\"$instance\"}",
- "interval": "",
- "legendFormat": "get: {{quantile}}, {{instance}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeRegions": [],
- "title": "db.Get() latency",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "mode": "time",
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "$$hashKey": "object:887",
- "format": "ns",
- "logBase": 1,
- "show": true
- },
- {
- "$$hashKey": "object:888",
- "format": "short",
- "logBase": 1,
- "show": true
- }
- ],
- "yaxis": {
- "align": false
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": {
- "type": "prometheus"
- },
- "description": "",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 5,
- "w": 8,
- "x": 0,
- "y": 132
- },
- "hiddenSeries": false,
- "id": 143,
- "legend": {
- "avg": false,
- "current": false,
- "hideEmpty": true,
- "hideZero": true,
- "max": false,
- "min": false,
- "rightSide": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "10.0.1",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "db_op_set{quantile=\"$quantile\",instance=~\"$instance\"}",
- "interval": "",
- "legendFormat": "",
- "refId": "E"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "db_op_set_range{quantile=\"$quantile\",instance=~\"$instance\"}",
- "interval": "",
- "legendFormat": "",
- "refId": "F"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "db_op_get{quantile=\"$quantile\",instance=~\"$instance\"}",
- "interval": "",
- "legendFormat": "",
- "refId": "G"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "db_op_get_both{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "",
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "db_op_get_both_range{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "",
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "db_op_put{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "",
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "db_op_put_current{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "",
- "refId": "D"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "db_op_put_no_overwrite{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "",
- "refId": "H"
- }
- ],
- "thresholds": [],
- "timeRegions": [],
- "title": "AutoDupsort Call/Sec",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "mode": "time",
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "$$hashKey": "object:139",
- "format": "ns",
- "logBase": 1,
- "show": true
- },
- {
- "$$hashKey": "object:140",
- "format": "short",
- "logBase": 1,
- "show": true
- }
- ],
- "yaxis": {
- "align": false
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": {
- "type": "prometheus"
- },
- "description": "",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 5,
- "w": 8,
- "x": 8,
- "y": 132
- },
- "hiddenSeries": false,
- "id": 142,
- "legend": {
- "alignAsTable": false,
- "avg": true,
- "current": false,
- "hideEmpty": true,
- "hideZero": true,
- "max": false,
- "min": false,
- "rightSide": false,
- "show": true,
- "sort": "avg",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "10.0.1",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "mdbx_put_direct{quantile=\"$quantile\",instance=~\"$instance\"}",
- "instant": false,
- "interval": "",
- "legendFormat": "mdbx_put_direct: {{quantile}}, {{instance}}",
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "mdbx_put_direct{quantile=\"$quantile\",instance=~\"$instance\"}",
- "interval": "",
- "legendFormat": "mdbx_put_direct: {{quantile}}, {{instance}}",
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "mdbx_put_both_range{quantile=\"$quantile\",instance=~\"$instance\"}",
- "instant": false,
- "interval": "",
- "legendFormat": "mdbx_put_both_range: {{quantile}}, {{instance}}",
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "mdbx_put_both_range{quantile=\"$quantile\",instance=~\"$instance\"}",
- "interval": "",
- "legendFormat": "mdbx_put_both_range: {{quantile}}, {{instance}}",
- "refId": "D"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "mdbx_seek_exact{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "mdbx_seek_exact: {{quantile}}, {{instance}}",
- "refId": "I"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "mdbx_seek_exact{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "mdbx_seek_exact: {{quantile}}, {{instance}}",
- "refId": "J"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "mdbx_put_no_overwrite{quantile=\"$quantile\",instance=~\"$instance\"}",
- "interval": "",
- "legendFormat": "mdbx_put_no_overwrite: {{quantile}}, {{instance}}",
- "refId": "F"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "mdbx_put_no_overwrite{quantile=\"$quantile\",instance=~\"$instance\"}",
- "interval": "",
- "legendFormat": "mdbx_put_no_overwrite: {{quantile}}, {{instance}}",
- "refId": "E"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "mdbx_put_upsert{quantile=\"$quantile\",instance=~\"$instance\"}",
- "interval": "",
- "legendFormat": "mdbx_put_upsert: {{quantile}}, {{instance}}",
- "refId": "G"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "mdbx_put_upsert{quantile=\"$quantile\",instance=~\"$instance\"}",
- "interval": "",
- "legendFormat": "mdbx_put_upsert: {{quantile}}, {{instance}}",
- "refId": "H"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "mdbx_put_current2{quantile=\"$quantile\",instance=~\"$instance\"}",
- "interval": "",
- "legendFormat": "mdbx_put_current2: {{quantile}}, {{instance}}",
- "refId": "K"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "mdbx_put_current2{quantile=\"$quantile\",instance=~\"$instance\"}",
- "interval": "",
- "legendFormat": "mdbx_put_current2: {{quantile}}, {{instance}}",
- "refId": "L"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "mdbx_put_upsert2{quantile=\"$quantile\",instance=~\"$instance\"}",
- "interval": "",
- "legendFormat": "mdbx_put_upsert2: {{quantile}}, {{instance}}",
- "refId": "M"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "mdbx_put_upsert2{quantile=\"$quantile\",instance=~\"$instance\"}",
- "interval": "",
- "legendFormat": "mdbx_put_upsert2: {{quantile}}, {{instance}}",
- "refId": "N"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "mdbx_del_current{quantile=\"$quantile\",instance=~\"$instance\"}",
- "interval": "",
- "legendFormat": "mdbx_put_current: {{quantile}}, {{instance}}",
- "refId": "O"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "mdbx_del_current{quantile=\"$quantile\",instance=~\"$instance\"}",
- "interval": "",
- "legendFormat": "mdbx_del_current: {{quantile}}, {{instance}}",
- "refId": "P"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "mdbx_seek_exact2{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "mdbx_seek_exact2: {{quantile}}, {{instance}}",
- "refId": "Q"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "mdbx_seek_exact2{quantile=\"$quantile\",instance=~\"$instance\"}",
- "hide": false,
- "interval": "",
- "legendFormat": "mdbx_seek_exact2: {{quantile}}, {{instance}}",
- "refId": "R"
- }
- ],
- "thresholds": [],
- "timeRegions": [],
- "title": "AutoDupsort Put latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "mode": "time",
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "$$hashKey": "object:139",
- "format": "ns",
- "logBase": 1,
- "show": true
- },
- {
- "$$hashKey": "object:140",
- "format": "short",
- "logBase": 1,
- "show": false
- }
- ],
- "yaxis": {
- "align": false
- }
- },
- {
- "collapsed": false,
- "datasource": {
- "type": "prometheus"
- },
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 137
- },
- "id": 75,
- "panels": [],
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "refId": "A"
- }
- ],
- "title": "Network",
- "type": "row"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "never",
- "spanNulls": true,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "Bps"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 138
- },
- "id": 96,
- "links": [],
- "options": {
- "legend": {
- "calcs": [
- "mean",
- "lastNotNull",
- "max",
- "min"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
- },
- "pluginVersion": "8.0.6",
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "rate(p2p_ingress{instance=~\"$instance\"}[$rate_interval])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "ingress: {{instance}}",
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "rate(p2p_egress{instance=~\"$instance\"}[$rate_interval])",
- "format": "time_series",
- "hide": true,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "egress: {{instance}}",
- "refId": "C"
- }
- ],
- "title": "Traffic",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "never",
- "spanNulls": true,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 138
- },
- "id": 77,
- "links": [],
- "options": {
- "legend": {
- "calcs": [
- "mean",
- "lastNotNull",
- "max",
- "min"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
- },
- "pluginVersion": "8.0.6",
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "p2p_peers{instance=~\"$instance\"}",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "peers: {{instance}}",
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "rate(p2p_dials{instance=~\"$instance\"}[1m])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "dials: {{instance}}",
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "rate(p2p_serves{instance=~\"$instance\"}[1m])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "serves: {{instance}}",
- "refId": "C"
- }
- ],
- "title": "Peers",
- "type": "timeseries"
- },
- {
- "collapsed": false,
- "datasource": {
- "type": "prometheus"
- },
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 144
- },
- "id": 4,
- "panels": [],
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "refId": "A"
- }
- ],
- "title": "Blockchain",
- "type": "row"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "mappings": [
- {
- "options": {
- "match": "null",
- "result": {
- "text": "N/A"
- }
- },
- "type": "special"
- }
- ],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 3,
- "w": 5,
- "x": 0,
- "y": 145
- },
- "id": 108,
- "links": [],
- "maxDataPoints": 100,
- "options": {
- "colorMode": "value",
- "fieldOptions": {
- "calcs": [
- "lastNotNull"
- ]
- },
- "graphMode": "none",
- "justifyMode": "auto",
- "orientation": "horizontal",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
- "fields": "",
- "values": false
- },
- "text": {},
- "textMode": "auto"
- },
- "pluginVersion": "10.0.1",
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "sync{instance=~\"$instance\",stage=\"headers\"}",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "Header: {{instance}}",
- "refId": "A"
- }
- ],
- "title": "Latest header",
- "type": "stat"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "mappings": [
- {
- "options": {
- "match": "null",
- "result": {
- "text": "N/A"
- }
- },
- "type": "special"
- }
- ],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 3,
- "w": 5,
- "x": 5,
- "y": 145
- },
- "id": 109,
- "links": [],
- "maxDataPoints": 100,
- "options": {
- "colorMode": "value",
- "fieldOptions": {
- "calcs": [
- "lastNotNull"
- ]
- },
- "graphMode": "none",
- "justifyMode": "auto",
- "orientation": "horizontal",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
- "fields": "",
- "values": false
- },
- "text": {},
- "textMode": "auto"
- },
- "pluginVersion": "10.0.1",
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "sync{stage=\"headers\", instance=~\"$instance\"}",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "blocks:{{instance}}",
- "refId": "A"
- }
- ],
- "title": "Latest block",
- "type": "stat"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "mappings": [
- {
- "options": {
- "match": "null",
- "result": {
- "text": "N/A"
- }
- },
- "type": "special"
- }
- ],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 3,
- "w": 4,
- "x": 12,
- "y": 145
- },
- "id": 113,
- "links": [],
- "maxDataPoints": 100,
- "options": {
- "colorMode": "value",
- "fieldOptions": {
- "calcs": [
- "lastNotNull"
- ]
- },
- "graphMode": "none",
- "justifyMode": "auto",
- "orientation": "horizontal",
- "reduceOptions": {
- "calcs": [
- "mean"
- ],
- "fields": "",
- "values": false
- },
- "text": {},
- "textMode": "auto"
- },
- "pluginVersion": "10.0.1",
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "txpool_pending{instance=~\"$instance\"}",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "{{instance}}",
- "refId": "A"
- }
- ],
- "title": "Executable transactions",
- "type": "stat"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "mappings": [
- {
- "options": {
- "match": "null",
- "result": {
- "text": "N/A"
- }
- },
- "type": "special"
- }
- ],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 3,
- "w": 4,
- "x": 16,
- "y": 145
- },
- "id": 114,
- "links": [],
- "maxDataPoints": 100,
- "options": {
- "colorMode": "value",
- "fieldOptions": {
- "calcs": [
- "lastNotNull"
- ]
- },
- "graphMode": "none",
- "justifyMode": "auto",
- "orientation": "horizontal",
- "reduceOptions": {
- "calcs": [
- "mean"
- ],
- "fields": "",
- "values": false
- },
- "text": {},
- "textMode": "auto"
- },
- "pluginVersion": "10.0.1",
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "txpool_queued{instance=~\"$instance\"}",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "{{instance}}",
- "refId": "A"
- }
- ],
- "title": "Gapped transactions",
- "type": "stat"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "mappings": [
- {
- "options": {
- "match": "null",
- "result": {
- "text": "N/A"
- }
- },
- "type": "special"
- }
- ],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 3,
- "w": 4,
- "x": 20,
- "y": 145
- },
- "id": 115,
- "links": [],
- "maxDataPoints": 100,
- "options": {
- "colorMode": "value",
- "fieldOptions": {
- "calcs": [
- "lastNotNull"
- ]
- },
- "graphMode": "none",
- "justifyMode": "auto",
- "orientation": "horizontal",
- "reduceOptions": {
- "calcs": [
- "mean"
- ],
- "fields": "",
- "values": false
- },
- "text": {},
- "textMode": "auto"
- },
- "pluginVersion": "10.0.1",
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "exemplar": true,
- "expr": "txpool_local{instance=~\"$instance\"}",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "{{instance}}",
- "refId": "A"
- }
- ],
- "title": "Local transactions",
- "type": "stat"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 10,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "never",
- "spanNulls": true,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 148
- },
- "id": 110,
- "links": [],
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "pluginVersion": "8.0.6",
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "chain_head_header{instance=~\"$instance\"}",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "header: {{instance}}",
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "chain_head_receipt{instance=~\"$instance\"}",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "receipt: {{instance}}",
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "chain_head_block{instance=~\"$instance\"}",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "block: {{instance}}",
- "refId": "C"
- }
- ],
- "title": "Chain head",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 100,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "never",
- "spanNulls": true,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 148
- },
- "id": 116,
- "links": [],
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
- },
- "pluginVersion": "8.0.6",
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "txpool_pending{instance=~\"$instance\"}",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "executable: {{instance}}",
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "txpool_queued{instance=~\"$instance\"}",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "gapped: {{instance}}",
+ "legendFormat": "code hit rate: {{ instance }} ",
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "txpool_local{instance=~\"$instance\"}",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "local: {{instance}}",
- "refId": "C"
}
],
- "title": "Transaction pool",
+ "title": "Cache hit-rate",
"type": "timeseries"
},
{
+ "collapsed": true,
"datasource": {
"type": "prometheus"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 10,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "never",
- "spanNulls": true,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
"gridPos": {
- "h": 7,
+ "h": 1,
"w": 24,
"x": 0,
- "y": 154
+ "y": 48
},
- "id": 117,
- "links": [],
- "options": {
- "legend": {
- "calcs": [
- "mean",
- "lastNotNull",
- "max",
- "min"
+ "id": 138,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 124
+ },
+ "hiddenSeries": false,
+ "id": 136,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "10.0.3",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "sum by (grpc_service, grpc_method, instance) (rate(grpc_server_started_total{instance=~\"$instance\"}[1m]))",
+ "interval": "",
+ "legendFormat": "Calls: {{grpc_service}}.{{grpc_method}}, {{instance}}",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "sum by (grpc_service, grpc_method, instance) (rate(grpc_server_handled_total{instance=~\"$instance\",grpc_code!=\"OK\"}[1m])) ",
+ "interval": "",
+ "legendFormat": "Errors: {{grpc_service}}.{{grpc_method}}, {{instance}}",
+ "refId": "B"
+ }
],
- "displayMode": "table",
- "placement": "right",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "pluginVersion": "8.0.6",
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "rate(txpool_valid{instance=~\"$instance\"}[1m])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "valid: {{instance}}",
- "refId": "K"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "rate(txpool_invalid{instance=~\"$instance\"}[1m])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "invalid: {{instance}}",
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "rate(txpool_underpriced{instance=\"$instance\"}[1m])",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "underpriced",
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "rate(txpool_pending_discard{instance=\"$instance\"}[1m])",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "executable discard",
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "rate(txpool_pending_replace{instance=\"$instance\"}[1m])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "executable replace",
- "refId": "D"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "rate(txpool_pending_ratelimit{instance=\"$instance\"}[1m])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "executable ratelimit",
- "refId": "E"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "rate(txpool_pending_nofunds{instance=\"$instance\"}[1m])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "executable nofunds",
- "refId": "F"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "rate(txpool_queued_discard{instance=\"$instance\"}[1m])",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "gapped discard",
- "refId": "G"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "rate(txpool_queued_replace{instance=\"$instance\"}[1m])",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "gapped replace",
- "refId": "H"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "rate(txpool_queued_ratelimit{instance=\"$instance\"}[1m])",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "gapped ratelimit",
- "refId": "I"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "rate(txpool_queued_nofunds{instance=\"$instance\"}[1m])",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "gapped nofunds",
- "refId": "J"
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "gRPC call, error rates ",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "logBase": 1,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false
+ }
}
],
- "title": "Transaction propagation",
- "type": "timeseries"
- },
- {
- "collapsed": false,
- "datasource": {
- "type": "prometheus"
- },
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 161
- },
- "id": 138,
- "panels": [],
"targets": [
{
"datasource": {
@@ -6657,98 +1687,6 @@
],
"title": "Private api",
"type": "row"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": {
- "type": "prometheus"
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 162
- },
- "hiddenSeries": false,
- "id": 136,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "10.0.1",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "sum by (grpc_service, grpc_method, instance) (rate(grpc_server_started_total{instance=~\"$instance\"}[1m]))",
- "interval": "",
- "legendFormat": "Calls: {{grpc_service}}.{{grpc_method}}, {{instance}}",
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus"
- },
- "expr": "sum by (grpc_service, grpc_method, instance) (rate(grpc_server_handled_total{instance=~\"$instance\",grpc_code!=\"OK\"}[1m])) ",
- "interval": "",
- "legendFormat": "Errors: {{grpc_service}}.{{grpc_method}}, {{instance}}",
- "refId": "B"
- }
- ],
- "thresholds": [],
- "timeRegions": [],
- "title": "gRPC call, error rates ",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "mode": "time",
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "logBase": 1,
- "show": true
- },
- {
- "format": "short",
- "logBase": 1,
- "show": true
- }
- ],
- "yaxis": {
- "align": false
- }
}
],
"refresh": "30s",
@@ -6814,10 +1752,10 @@
"current": {
"selected": true,
"text": [
- "All"
+ "mumbai3-2:6061"
],
"value": [
- "$__all"
+ "mumbai3-2:6061"
]
},
"datasource": {
@@ -6921,7 +1859,7 @@
]
},
"time": {
- "from": "now-24h",
+ "from": "now-1h",
"to": "now"
},
"timepicker": {
@@ -6949,8 +1887,8 @@
]
},
"timezone": "",
- "title": "Erigon Prometheus",
+ "title": "Erigon",
"uid": "FPpjH6Hik",
- "version": 113,
+ "version": 7,
"weekStart": ""
}
\ No newline at end of file
diff --git a/cmd/prometheus/dashboards/erigon_internals.json b/cmd/prometheus/dashboards/erigon_internals.json
new file mode 100644
index 00000000000..e2a279cec43
--- /dev/null
+++ b/cmd/prometheus/dashboards/erigon_internals.json
@@ -0,0 +1,5512 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": {
+ "type": "datasource",
+ "uid": "grafana"
+ },
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "target": {
+ "limit": 100,
+ "matchAny": false,
+ "tags": [],
+ "type": "dashboard"
+ },
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": 1,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 171,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "refId": "A"
+ }
+ ],
+ "title": "Blocks execution",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 1,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 8,
+ "x": 0,
+ "y": 1
+ },
+ "id": 196,
+ "options": {
+ "legend": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "sync{instance=~\"$instance\"}",
+ "instant": false,
+ "legendFormat": "{{ stage }}: {{instance}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Sync Stages progress",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ops"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 8,
+ "y": 1
+ },
+ "id": 158,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.0.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "rate(sync{instance=~\"$instance\"}[$rate_interval])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{ stage }}: {{instance}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Sync Stages progress rate",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ops"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 16,
+ "y": 1
+ },
+ "id": 195,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.0.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "rate(exec_txs_done{instance=~\"$instance\"}[$rate_interval])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "txs apply: {{instance}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Exec v3: txs/s ",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisGridShow": true,
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 8,
+ "y": 6
+ },
+ "id": 112,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.3.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "idelta(domain_collate_took_sum{instance=~\"$instance\"}[$rate_interval])",
+ "format": "time_series",
+ "instant": false,
+ "legendFormat": "collation took: {{instance}}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "idelta(domain_step_took_sum{instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "legendFormat": "step took: {{instance}}",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "idelta(domain_prune_took_sum{instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "legendFormat": "prune took [{{type}}]: {{instance}}",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "idelta(domain_commitment_took_sum{instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "legendFormat": "commitment took: {{instance}}",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "idelta(domain_commitment_write_took_sum{instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "commitment update write took: {{instance}}",
+ "range": true,
+ "refId": "F"
+ }
+ ],
+ "title": "Time took",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 2,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 16,
+ "y": 6
+ },
+ "id": 194,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.0.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "rate(exec_repeats{instance=~\"$instance\"}[$rate_interval])/rate(exec_txs_done{instance=~\"$instance\"}[$rate_interval])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "repeats: {{instance}}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "rate(exec_triggers{instance=~\"$instance\"}[$rate_interval])/rate(exec_txs_done{instance=~\"$instance\"}[$rate_interval])",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "triggers: {{instance}}",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Exec v3",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 0,
+ "y": 11
+ },
+ "id": 199,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.0.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "chain_execution_seconds{quantile=\"$quantile\",instance=~\"$instance\"}",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "execution: {{instance}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Block Execution speed ",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 5,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 4,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ops"
+ },
+ "overrides": [
+ {
+ "__systemRef": "hideSeriesFrom",
+ "matcher": {
+ "id": "byNames",
+ "options": {
+ "mode": "exclude",
+ "names": [
+ "keys committed: mainnet-dev-awskii:6061"
+ ],
+ "prefix": "All except:",
+ "readOnly": true
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": false,
+ "tooltip": false,
+ "viz": true
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 8,
+ "y": 11
+ },
+ "id": 197,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.3.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "irate(domain_collation_size{instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "legendFormat": "collated [domain]: {{instance}}",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "irate(domain_collation_hist_size{instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "legendFormat": "collated [history]: {{instance}}",
+ "range": true,
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(domain_commitment_keys[$rate_interval])) by (instance)",
+ "hide": false,
+ "legendFormat": "keys committed: {{instance}}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "irate(domain_commitment_updates{instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "legendFormat": "commitment node updates: {{instance}}",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "irate(domain_commitment_updates_applied{instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "legendFormat": "commitment trie node updates: {{instance}}",
+ "range": true,
+ "refId": "F"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "irate(domain_prune_size{instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "legendFormat": "pruned keys [{{type}}]: {{instance}}",
+ "range": true,
+ "refId": "G"
+ }
+ ],
+ "title": "Collate/Prune/Merge/Commitment",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "smooth",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 16,
+ "y": 11
+ },
+ "id": 198,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "domain_running_merges{instance=~\"$instance\"}",
+ "legendFormat": "running merges: {{instance}}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "domain_running_collations{instance=~\"$instance\"}",
+ "hide": false,
+ "legendFormat": "running collations: {{instance}}",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "domain_pruning_progress{instance=~\"$instance\"}",
+ "hide": false,
+ "legendFormat": "running prunes: {{instance}}",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "domain_running_commitment{instance=~\"$instance\"}",
+ "hide": false,
+ "legendFormat": "running commitment: {{instance}}",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "domain_running_files_building{instance=~\"$instance\"}",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "running files building: {{instance}}",
+ "range": true,
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "domain_wal_flushes{instance=~\"$instance\"}",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "WAL flushes {{instance}}",
+ "range": true,
+ "refId": "F"
+ }
+ ],
+ "title": "Running Collations / Merges / Prunes",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": false,
+ "datasource": {
+ "type": "prometheus"
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 16
+ },
+ "id": 17,
+ "panels": [],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "refId": "A"
+ }
+ ],
+ "title": "Database",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "min": 0.001,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ops"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 0,
+ "y": 17
+ },
+ "id": 141,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.0.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "rate(db_commit_seconds_count{phase=\"total\",instance=~\"$instance\"}[$rate_interval])",
+ "interval": "",
+ "legendFormat": "commit: {{instance}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Commit",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 2,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 16,
+ "x": 8,
+ "y": 17
+ },
+ "id": 166,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.0.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_commit_seconds{phase=\"total\",quantile=\"$quantile\",instance=~\"$instance\"}",
+ "interval": "",
+ "legendFormat": "total: {{instance}}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_commit_seconds{phase=\"gc_wall_clock\",quantile=\"$quantile\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "gc_wall_clock: {{instance}}",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_commit_seconds{phase=\"write\",quantile=\"$quantile\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "write: {{instance}}",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_commit_seconds{phase=\"sync\",quantile=\"$quantile\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "sync: {{instance}}",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_gc_self_rtime_cpu{quantile=\"$quantile\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "gc_self_rtime_cpu: {{instance}}",
+ "range": true,
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_gc_work_rtime_cpu{quantile=\"$quantile\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "gc_work_rtime_cpu: {{instance}}",
+ "range": true,
+ "refId": "F"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_gc_work_rtime{quantile=\"$quantile\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "gc_work_rtime: {{instance}}",
+ "range": true,
+ "refId": "G"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_gc_self_rtime{quantile=\"$quantile\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "gc_self_rtime: {{instance}}",
+ "range": true,
+ "refId": "H"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_commit_seconds{phase=\"gc_cpu_time\",quantile=\"$quantile\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "gc_cpu_time: {{instance}}",
+ "range": true,
+ "refId": "I"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_gc_self_xtime{quantile=\"$quantile\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "gc_self_xtime: {{instance}}",
+ "range": true,
+ "refId": "J"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_gc_work_pnl_merge_time{quantile=\"$quantile\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "work_pnl_merge_time: {{instance}}",
+ "range": true,
+ "refId": "K"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_gc_slef_pnl_merge_time{quantile=\"$quantile\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "self_pnl_merge_time: {{instance}}",
+ "range": true,
+ "refId": "L"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_gc_work_xtime{quantile=\"$quantile\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "gc_work_xtime: {{instance}}",
+ "range": true,
+ "refId": "M"
+ }
+ ],
+ "title": "Commit speed",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "decbytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 0,
+ "y": 22
+ },
+ "id": 159,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.4.7",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "db_size{instance=~\"$instance\"}",
+ "interval": "",
+ "legendFormat": "size: {{instance}}",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "db_mi_last_pgno{instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "db_mi_last_pgno: {{instance}}",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "DB Size",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 16,
+ "x": 8,
+ "y": 26
+ },
+ "id": 168,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.0.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "rate(db_pgops{phase=\"newly\", instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "newly: {{instance}}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "rate(db_pgops{phase=\"cow\", instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "cow: {{instance}}",
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "rate(db_pgops{phase=\"clone\", instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "clone: {{instance}}",
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "rate(db_pgops{phase=\"split\", instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "split: {{instance}}",
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "rate(db_pgops{phase=\"merge\", instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "merge: {{instance}}",
+ "range": true,
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "rate(db_pgops{phase=\"spill\", instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "spill: {{instance}}",
+ "refId": "F"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "rate(db_pgops{phase=\"wops\", instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "wops: {{instance}}",
+ "refId": "G"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "rate(db_pgops{phase=\"unspill\", instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "unspill: {{instance}}",
+ "refId": "H"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "rate(db_pgops{phase=\"gcrloops\", instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "gcrloops: {{instance}}",
+ "range": true,
+ "refId": "I"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "rate(db_pgops{phase=\"gcwloops\", instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "gcwloops: {{instance}}",
+ "range": true,
+ "refId": "J"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "rate(db_pgops{phase=\"gcxpages\", instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "gcxpages: {{instance}}",
+ "range": true,
+ "refId": "K"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "rate(db_pgops{phase=\"msync\", instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "msync: {{instance}}",
+ "range": true,
+ "refId": "L"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "rate(db_pgops{phase=\"fsync\", instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "fsync: {{instance}}",
+ "range": true,
+ "refId": "M"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "rate(db_pgops{phase=\"minicore\", instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "minicore: {{instance}}",
+ "refId": "N"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "rate(db_pgops{phase=\"prefault\", instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "prefault: {{instance}}",
+ "refId": "O"
+ }
+ ],
+ "title": "DB Pages Ops/sec",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "decbytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 8,
+ "x": 0,
+ "y": 27
+ },
+ "id": 167,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.0.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "tx_limit{instance=~\"$instance\"}",
+ "interval": "",
+ "legendFormat": "limit: {{instance}}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "tx_dirty{instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "dirty: {{instance}}",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Tx Size",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "__systemRef": "hideSeriesFrom",
+ "matcher": {
+ "id": "byNames",
+ "options": {
+ "mode": "exclude",
+ "names": [
+ "gc_overflow: mainnet2-1:6061"
+ ],
+ "prefix": "All except:",
+ "readOnly": true
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": false,
+ "tooltip": false,
+ "viz": true
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 8,
+ "x": 0,
+ "y": 33
+ },
+ "id": 169,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.0.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "db_gc_leaf{instance=~\"$instance\"}",
+ "interval": "",
+ "legendFormat": "gc_leaf: {{instance}}",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "db_gc_overflow{instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "gc_overflow: {{instance}}",
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "exec_steps_in_db{instance=~\"$instance\"}/100",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "exec_steps_in_db: {{instance}}",
+ "range": true,
+ "refId": "E"
+ }
+ ],
+ "title": "GC and State",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 16,
+ "x": 8,
+ "y": 33
+ },
+ "id": 150,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.0.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "rate(process_minor_pagefaults_total{instance=~\"$instance\"}[$rate_interval])",
+ "interval": "",
+ "legendFormat": "soft: {{instance}}",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "rate(process_major_pagefaults_total{instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "hard: {{instance}}",
+ "refId": "B"
+ }
+ ],
+ "title": "getrusage: minflt - soft page faults (reclaims), majflt - hard faults",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 16,
+ "x": 8,
+ "y": 39
+ },
+ "id": 191,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.0.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_gc{phase=\"work_rxpages\", instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "work_rxpages: {{instance}}",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_gc{phase=\"self_rsteps\", instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "self_rsteps: {{instance}}",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_gc{phase=\"wloop\", instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "wloop: {{instance}}",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_gc{phase=\"coalescences\", instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "coalescences: {{instance}}",
+ "range": true,
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_gc{phase=\"wipes\", instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "wipes: {{instance}}",
+ "range": true,
+ "refId": "F"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_gc{phase=\"flushes\", instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "flushes: {{instance}}",
+ "range": true,
+ "refId": "G"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_gc{phase=\"kicks\", instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "kicks: {{instance}}",
+ "range": true,
+ "refId": "H"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_gc{phase=\"work_rsteps\", instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "gc_work_rsteps: {{instance}}",
+ "range": true,
+ "refId": "I"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_gc{phase=\"self_xpages\", instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "self_xpages: {{instance}}",
+ "range": true,
+ "refId": "J"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_gc{phase=\"work_majflt\", instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "gc_work_majflt: {{instance}}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_gc{phase=\"self_majflt\", instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "gc_self_majflt: {{instance}}",
+ "range": true,
+ "refId": "K"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_gc{phase=\"self_counter\", instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "gc_self_counter: {{instance}}",
+ "range": true,
+ "refId": "L"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "db_gc{phase=\"work_counter\", instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "gc_work_counter: {{instance}}",
+ "range": true,
+ "refId": "M"
+ }
+ ],
+ "title": "Commit counters",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": false,
+ "datasource": {
+ "type": "prometheus"
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 47
+ },
+ "id": 134,
+ "panels": [],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "refId": "A"
+ }
+ ],
+ "title": "Process",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": []
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 18,
+ "w": 8,
+ "x": 0,
+ "y": 48
+ },
+ "id": 165,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "range"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "text": {
+ "titleSize": 14,
+ "valueSize": 14
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "10.1.4",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "ru_inblock{instance=~\"$instance\"}",
+ "interval": "",
+ "legendFormat": "inblock: {{instance}}",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "ru_outblock{instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "outblock: {{instance}}",
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "ru_minflt{instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "minflt: {{instance}}",
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "ru_majflt{instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "majflt: {{instance}}",
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "system_disk_readbytes{instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "system_disk_readbytes: {{instance}}",
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "system_disk_writebytes{instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "system_disk_writebytes: {{instance}}",
+ "refId": "F"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "db_pgops_newly{instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "pgops_newly: {{instance}}",
+ "refId": "H"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "db_pgops_cow{instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "pgops_cow: {{instance}}",
+ "refId": "I"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "db_pgops_clone{instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "pgops_clone: {{instance}}",
+ "refId": "J"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "db_pgops_split{instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "pgops_split: {{instance}}",
+ "refId": "K"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "db_pgops_merge{instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "pgops_merge: {{instance}}",
+ "refId": "L"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "db_pgops_spill{instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "pgops_spill: {{instance}}",
+ "refId": "G"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "db_pgops_unspill{instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "pgops_unspill: {{instance}}",
+ "refId": "M"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "db_pgops_wops{instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "pgops_wops: {{instance}}",
+ "refId": "N"
+ }
+ ],
+ "title": "Rusage Total (\"last value\" - \"first value\" on selected period)",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 8,
+ "x": 8,
+ "y": 48
+ },
+ "id": 155,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.0.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "rate(process_io_write_syscalls_total{instance=~\"$instance\"}[$rate_interval])",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "in: {{instance}}",
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "rate(process_io_read_syscalls_total{instance=~\"$instance\"}[$rate_interval])",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "out: {{instance}}",
+ "refId": "D"
+ }
+ ],
+ "title": "Read/Write syscall/sec",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "cps"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 8,
+ "x": 16,
+ "y": 48
+ },
+ "id": 153,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.0.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "rate(go_cgo_calls_count{instance=~\"$instance\"}[$rate_interval])",
+ "interval": "",
+ "legendFormat": "cgo_calls_count: {{instance}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "cgo calls",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 8,
+ "x": 8,
+ "y": 54
+ },
+ "id": 85,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.0.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "rate(process_io_storage_read_bytes_total{instance=~\"$instance\"}[$rate_interval])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "read: {{instance}}",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "rate(process_io_storage_written_bytes_total{instance=~\"$instance\"}[$rate_interval])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "write: {{instance}}",
+ "refId": "B"
+ }
+ ],
+ "title": "Disk bytes/sec",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 8,
+ "x": 16,
+ "y": 54
+ },
+ "id": 128,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.0.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "go_goroutines{instance=~\"$instance\"}",
+ "instant": false,
+ "interval": "",
+ "legendFormat": "goroutines: {{instance}}",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "go_threads{instance=~\"$instance\"}",
+ "instant": false,
+ "interval": "",
+ "legendFormat": "threads: {{instance}}",
+ "refId": "B"
+ }
+ ],
+ "title": "GO Goroutines and Threads",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "decbytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 8,
+ "x": 8,
+ "y": 60
+ },
+ "id": 154,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.0.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "go_memstats_stack_sys_bytes{instance=~\"$instance\"}",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "stack_sys: {{ instance }}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "go_memstats_sys_bytes{instance=~\"$instance\"}",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "sys: {{ instance }}",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "go_memstats_stack_inuse_bytes{instance=~\"$instance\"}",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "stack_inuse: {{ instance }}",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "go_memstats_mspan_sys_bytes{instance=~\"$instance\"}",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "mspan_sys: {{ instance }}",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "go_memstats_mcache_sys_bytes{instance=~\"$instance\"}",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "mcache_sys: {{ instance }}",
+ "range": true,
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "go_memstats_heap_alloc_bytes{instance=~\"$instance\"}",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "heap_alloc: {{ instance }}",
+ "range": true,
+ "refId": "F"
+ }
+ ],
+ "title": "go memstat",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 16,
+ "y": 60
+ },
+ "id": 124,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.0.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "rate(go_gc_duration_seconds{quantile=\"0.75\",instance=~\"$instance\"}[$rate_interval])",
+ "instant": false,
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "GC Stop the World per sec",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "decbytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 0,
+ "y": 66
+ },
+ "id": 148,
+ "options": {
+ "legend": {
+ "calcs": [
+ "max"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.0.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "process_virtual_memory_bytes{instance=~\"$instance\"}",
+ "hide": true,
+ "interval": "",
+ "legendFormat": "resident virtual mem: {{instance}}",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "process_resident_memory_anon_bytes{instance=~\"$instance\"}",
+ "hide": true,
+ "interval": "",
+ "legendFormat": "resident anon mem: {{instance}}",
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "process_resident_memory_bytes{instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "resident mem: {{instance}}",
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "mem_data{instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "data: {{instance}}",
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "mem_stack{instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "stack: {{instance}}",
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "mem_locked{instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "locked: {{instance}}",
+ "refId": "F"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "mem_swap{instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "swap: {{instance}}",
+ "refId": "G"
+ }
+ ],
+ "title": "mem: resident set size",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 0,
+ "y": 71
+ },
+ "id": 86,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.0.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "rate(go_memstats_mallocs_total{instance=~\"$instance\"}[$rate_interval])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "memstats_mallocs_total: {{ instance }}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "rate(go_memstats_frees_total{instance=~\"$instance\"}[$rate_interval])",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "memstats_frees_total: {{ instance }}",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Process Mem: allocate objects/sec, free",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percent"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 0,
+ "y": 76
+ },
+ "id": 106,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.0.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "increase(process_cpu_seconds_total{instance=~\"$instance\"}[1m])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "system: {{instance}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "CPU",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": false,
+ "datasource": {
+ "type": "prometheus"
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 81
+ },
+ "id": 173,
+ "panels": [],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "refId": "A"
+ }
+ ],
+ "title": "TxPool",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 82
+ },
+ "id": 175,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "last"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "pool_process_remote_txs{quantile=\"$quantile\",instance=~\"$instance\"}",
+ "interval": "",
+ "legendFormat": "process_remote_txs: {{ instance }}",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "pool_add_remote_txs{quantile=\"$quantile\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "add_remote_txs: {{ instance }}",
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "pool_new_block{quantile=\"$quantile\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "new_block: {{ instance }}",
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "pool_write_to_db{quantile=\"$quantile\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "write_to_db: {{ instance }}",
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "pool_propagate_to_new_peer{quantile=\"$quantile\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "propagate_to_new_peer: {{ instance }}",
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "pool_propagate_new_txs{quantile=\"$quantile\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "propagate_new_txs: {{ instance }}",
+ "refId": "F"
+ }
+ ],
+ "title": "Timings",
+ "transformations": [],
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "reqps"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 82
+ },
+ "id": 177,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "last"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "rate(pool_process_remote_txs_count{instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "pool_process_remote_txs_count: {{ instance }}",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "rate(pool_add_remote_txs_count{instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "pool_add_remote_txs_count: {{ instance }}",
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "rate(pool_new_block_count{instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "pool_new_block_count: {{ instance }}",
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "rate(pool_write_to_db_count{instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "pool_write_to_db_count: {{ instance }}",
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "rate(pool_p2p_out{instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "E"
+ }
+ ],
+ "title": "RPS",
+ "transformations": [],
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 8,
+ "x": 0,
+ "y": 90
+ },
+ "id": 176,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "last"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "sum(delta(cache_total{result=\"hit\",name=\"txpool\",instance=~\"$instance\"}[1m]))/sum(delta(cache_total{name=\"txpool\",instance=~\"$instance\"}[1m])) ",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "hit rate: {{ instance }} ",
+ "refId": "A"
+ }
+ ],
+ "title": "Cache hit-rate",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 8,
+ "x": 8,
+ "y": 90
+ },
+ "id": 180,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "last"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "rate(cache_total{name=\"txpool\",instance=~\"$instance\"}[1m])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "{{ result }}: {{ instance }} ",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "rate(cache_timeout_total{name=\"txpool\",instance=~\"$instance\"}[1m])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "timeout: {{ instance }} ",
+ "refId": "B"
+ }
+ ],
+ "title": "Cache rps",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 8,
+ "x": 16,
+ "y": 90
+ },
+ "id": 181,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "last"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "cache_keys_total{name=\"txpool\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "keys: {{ instance }} ",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "cache_list_total{name=\"txpool\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "list: {{ instance }} ",
+ "refId": "B"
+ }
+ ],
+ "title": "Cache keys",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "binBps"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 8,
+ "x": 0,
+ "y": 96
+ },
+ "id": 178,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "last"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "rate(pool_write_to_db_bytes{instance=~\"$instance\"}[$rate_interval])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "pool_write_to_db_bytes: {{ instance }}",
+ "refId": "A"
+ }
+ ],
+ "title": "DB",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": false,
+ "datasource": {
+ "type": "prometheus"
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 102
+ },
+ "id": 183,
+ "panels": [],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "refId": "A"
+ }
+ ],
+ "title": "RPC",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "reqps"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 103
+ },
+ "id": 185,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "last"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "rate(rpc_duration_seconds_count{instance=~\"$instance\",success=\"success\"}[1m])",
+ "interval": "",
+ "legendFormat": "success {{ method }} {{ instance }} ",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "rate(rpc_duration_seconds_count{instance=~\"$instance\",success=\"failure\"}[1m])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "failure {{ method }} {{ instance }} ",
+ "refId": "B"
+ }
+ ],
+ "title": "RPS",
+ "transformations": [],
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 103
+ },
+ "id": 186,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "last"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "db_begin_seconds{quantile=\"$quantile\",instance=~\"$instance\"}",
+ "interval": "",
+ "legendFormat": "db_begin_seconds: {{ method }} {{ instance }}",
+ "refId": "A"
+ }
+ ],
+ "title": "DB begin",
+ "transformations": [],
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 111
+ },
+ "id": 187,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "last"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "rpc_duration_seconds{quantile=\"$quantile\",instance=~\"$instance\"}",
+ "interval": "",
+ "legendFormat": " {{ method }} {{ instance }} {{ success }}",
+ "refId": "A"
+ }
+ ],
+ "title": "Timings",
+ "transformations": [],
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 111
+ },
+ "id": 188,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.0.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "go_goroutines{instance=~\"$instance\"}",
+ "instant": false,
+ "interval": "",
+ "legendFormat": "go/goroutines: {{instance}}",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "go_threads{instance=~\"$instance\"}",
+ "instant": false,
+ "interval": "",
+ "legendFormat": "go/threads: {{instance}}",
+ "refId": "B"
+ }
+ ],
+ "title": "GO Goroutines and Threads",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 8,
+ "x": 8,
+ "y": 119
+ },
+ "id": 189,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "last"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "cache_keys_total{name=\"rpc\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "keys: {{ instance }} ",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "cache_list_total{name=\"rpc\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "list: {{ instance }} ",
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "cache_code_keys_total{name=\"rpc\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "code_keys: {{ instance }} ",
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "cache_code_list_total{name=\"rpc\",instance=~\"$instance\"}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "code_list: {{ instance }} ",
+ "refId": "D"
+ }
+ ],
+ "title": "Cache keys",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 8,
+ "x": 16,
+ "y": 119
+ },
+ "id": 184,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "last"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "sum(delta(cache_total{result=\"hit\",name=\"rpc\",instance=~\"$instance\"}[1m]))/sum(delta(cache_total{name=\"rpc\",instance=~\"$instance\"}[1m])) ",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "hit rate: {{ instance }} ",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "exemplar": true,
+ "expr": "sum(delta(cache_code_total{result=\"hit\",name=\"rpc\",instance=~\"$instance\"}[1m]))/sum(delta(cache_code_total{name=\"rpc\",instance=~\"$instance\"}[1m])) ",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "code hit rate: {{ instance }} ",
+ "refId": "B"
+ }
+ ],
+ "title": "Cache hit-rate",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": false,
+ "datasource": {
+ "type": "prometheus"
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 125
+ },
+ "id": 75,
+ "panels": [],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "refId": "A"
+ }
+ ],
+ "title": "Network",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [
+ {
+ "__systemRef": "hideSeriesFrom",
+ "matcher": {
+ "id": "byNames",
+ "options": {
+ "mode": "exclude",
+ "names": [
+ "egress: mainnet2-1:6061"
+ ],
+ "prefix": "All except:",
+ "readOnly": true
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": false,
+ "tooltip": false,
+ "viz": true
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 126
+ },
+ "id": 96,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull",
+ "max",
+ "min"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.0.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "rate(p2p_ingress{instance=~\"$instance\"}[$rate_interval])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "ingress: {{instance}}",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "rate(p2p_egress{instance=~\"$instance\"}[$rate_interval])",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "egress: {{instance}}",
+ "range": true,
+ "refId": "C"
+ }
+ ],
+ "title": "Traffic",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 12,
+ "y": 126
+ },
+ "id": 77,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull",
+ "max",
+ "min"
+ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.0.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "p2p_peers{instance=~\"$instance\"}",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "peers: {{instance}}",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "rate(p2p_dials{instance=~\"$instance\"}[1m])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "dials: {{instance}}",
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus"
+ },
+ "expr": "rate(p2p_serves{instance=~\"$instance\"}[1m])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "serves: {{instance}}",
+ "refId": "C"
+ }
+ ],
+ "title": "Peers",
+ "type": "timeseries"
+ }
+ ],
+ "refresh": "",
+ "revision": 1,
+ "schemaVersion": 38,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": false,
+ "text": "0.97",
+ "value": "0.97"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "multi": false,
+ "name": "quantile",
+ "options": [
+ {
+ "selected": false,
+ "text": "0.0",
+ "value": "0.0"
+ },
+ {
+ "selected": false,
+ "text": "0.25",
+ "value": "0.25"
+ },
+ {
+ "selected": false,
+ "text": "0.5",
+ "value": "0.5"
+ },
+ {
+ "selected": false,
+ "text": "0.9",
+ "value": "0.9"
+ },
+ {
+ "selected": true,
+ "text": "0.97",
+ "value": "0.97"
+ },
+ {
+ "selected": false,
+ "text": "0.99",
+ "value": "0.99"
+ },
+ {
+ "selected": false,
+ "text": "1",
+ "value": "1"
+ }
+ ],
+ "query": "0.0,0.25,0.5, 0.9, 0.97, 0.99, 1",
+ "queryValue": "",
+ "skipUrlSync": false,
+ "type": "custom"
+ },
+ {
+ "current": {
+ "selected": true,
+ "text": [
+ "mainnet-dev-awskii:6061"
+ ],
+ "value": [
+ "mainnet-dev-awskii:6061"
+ ]
+ },
+ "datasource": {
+ "type": "prometheus"
+ },
+ "definition": "go_goroutines",
+ "hide": 0,
+ "includeAll": true,
+ "label": "instance",
+ "multi": true,
+ "name": "instance",
+ "options": [],
+ "query": {
+ "query": "go_goroutines",
+ "refId": "StandardVariableQuery"
+ },
+ "refresh": 1,
+ "regex": "/.*instance=\"([^\"]*).*/",
+ "skipUrlSync": false,
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "auto": false,
+ "auto_count": 30,
+ "auto_min": "10s",
+ "current": {
+ "selected": false,
+ "text": "1m",
+ "value": "1m"
+ },
+ "hide": 0,
+ "label": "Rate Interval",
+ "name": "rate_interval",
+ "options": [
+ {
+ "selected": true,
+ "text": "1m",
+ "value": "1m"
+ },
+ {
+ "selected": false,
+ "text": "10m",
+ "value": "10m"
+ },
+ {
+ "selected": false,
+ "text": "30m",
+ "value": "30m"
+ },
+ {
+ "selected": false,
+ "text": "1h",
+ "value": "1h"
+ },
+ {
+ "selected": false,
+ "text": "3h",
+ "value": "3h"
+ },
+ {
+ "selected": false,
+ "text": "6h",
+ "value": "6h"
+ },
+ {
+ "selected": false,
+ "text": "12h",
+ "value": "12h"
+ },
+ {
+ "selected": false,
+ "text": "1d",
+ "value": "1d"
+ },
+ {
+ "selected": false,
+ "text": "7d",
+ "value": "7d"
+ },
+ {
+ "selected": false,
+ "text": "14d",
+ "value": "14d"
+ },
+ {
+ "selected": false,
+ "text": "30d",
+ "value": "30d"
+ }
+ ],
+ "query": "1m,10m,30m,1h,3h,6h,12h,1d,7d,14d,30d",
+ "queryValue": "",
+ "refresh": 2,
+ "skipUrlSync": false,
+ "type": "interval"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-1h",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "Erigon Internals",
+ "uid": "b42a61d7-02b1-416c-8ab4-b9c864356174",
+ "version": 14,
+ "weekStart": ""
+}
\ No newline at end of file
diff --git a/cmd/release/go.mod b/cmd/release/go.mod
index fac38415e31..99bfb880ce5 100644
--- a/cmd/release/go.mod
+++ b/cmd/release/go.mod
@@ -1,5 +1,5 @@
module github.com/ledgerwatch/erigon/cmd/release/v2
-go 1.19
+go 1.20
require github.com/hashicorp/go-version v1.6.0
diff --git a/cmd/rpcdaemon/README.md b/cmd/rpcdaemon/README.md
index 116ef65d3e9..7a275d2ac5b 100644
--- a/cmd/rpcdaemon/README.md
+++ b/cmd/rpcdaemon/README.md
@@ -117,7 +117,7 @@ If the healthcheck is successful it will return a 200 status code.
If the healthcheck fails for any reason a status 500 will be returned. This is true if one of the criteria requested
fails its check.
-You can set any number of values on the `X-ERIGON-HEALTHCHECK` header. Ones that are not included are skipped in the
+You can set any number of values on the `X-ERIGON-HEALTHCHECK` header. Ones that are not included are skipped in the
checks.
Available Options:
@@ -186,6 +186,38 @@ By default data pruned after 90K blocks, can change it by flags like `--prune.hi
Some methods, if not found historical data in DB, can fallback to old blocks re-execution - but it requires `h`.
+### The --http.url flag
+
+the `--http.url` flag is an optional flag which allows one to bind the HTTP server to a socket, for example, `tcp6://:8545` or `unix:///erigon_http.socket`
+
+If the `--http.url` flag is set, then `--http.addr` and `--http.port` with both be ignored.
+
+note that this is NOT geth-style IPC. for that, read the next section, IPC endpoint(geth-compatible)
+
+
+### HTTPS, HTTP2, and H2C
+
+Erigon supports HTTPS, HTTP2, and H2C out of the box. H2C is served by the default HTTP handler.
+
+To enable the HTTPS+HTTP2 server, add flag `--https.enabled`, along with providing flags `-https.cert="/path/to.cert"` and `--https.key=/path/to.key`
+
+By default, the HTTPS server will run on the HTTP port + 363. use flag `--https.port` to set the port
+
+The HTTPS server will inherit all other configuration parameters from http, for instance, enabling the websocket server, cors domains, or enabled namespaces
+
+If the `--https.url` flag is set, then `--https.addr` and `--https.port` with both be ignored.
+
+
+### IPC endpoint (geth compatible)
+
+erigon supports the geth-style unix socket IPC. you can enable this with `--socket.enabled` flag,
+and setting the `--socket.url` flag. For instance, if you wanted the socket to exist at `/var/run/erigon.ipc`,
+you would do `--socket.url=unix:///var/run/erigon.ipc`
+
+you can also use `--socket.url=tcp://:` to serve the raw jsonrpc2 protocol over tcp
+
+the socket will inherit the namespaces from `http.api`
+
### RPC Implementation Status
Label "remote" means: `--private.api.addr` flag is required.
@@ -196,6 +228,7 @@ The following table shows the current implementation status of Erigon's RPC daem
| ------------------------------------------ |---------|--------------------------------------|
| admin_nodeInfo | Yes | |
| admin_peers | Yes | |
+| admin_addPeer | Yes | |
| | | |
| web3_clientVersion | Yes | |
| web3_sha3 | Yes | |
@@ -247,7 +280,7 @@ The following table shows the current implementation status of Erigon's RPC daem
| eth_getFilterChanges | Yes | |
| eth_uninstallFilter | Yes | |
| eth_getLogs | Yes | |
-| | | |
+| interned spe | | |
| eth_accounts | No | deprecated |
| eth_sendRawTransaction | Yes | `remote`. |
| eth_sendTransaction | - | not yet implemented |
@@ -334,6 +367,7 @@ The following table shows the current implementation status of Erigon's RPC daem
| bor_getCurrentValidators | Yes | Bor only |
| bor_getSnapshotProposerSequence | Yes | Bor only |
| bor_getRootHash | Yes | Bor only |
+| bor_getVoteOnHash | Yes | Bor only |
### GraphQL
diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go
index 713ede0d79c..2d8b381a254 100644
--- a/cmd/rpcdaemon/cli/config.go
+++ b/cmd/rpcdaemon/cli/config.go
@@ -7,20 +7,28 @@ import (
"fmt"
"net"
"net/http"
+ "net/url"
"os"
"path/filepath"
"strings"
"time"
"github.com/ledgerwatch/erigon-lib/chain"
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/datadir"
"github.com/ledgerwatch/erigon-lib/common/dir"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
"github.com/ledgerwatch/erigon-lib/kv/kvcfg"
"github.com/ledgerwatch/erigon-lib/kv/rawdbv3"
libstate "github.com/ledgerwatch/erigon-lib/state"
+ "github.com/ledgerwatch/erigon/consensus"
+ "github.com/ledgerwatch/erigon/consensus/bor"
+ "github.com/ledgerwatch/erigon/consensus/bor/contract"
+ "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span"
+ "github.com/ledgerwatch/erigon/consensus/ethash"
"github.com/ledgerwatch/erigon/core/state/temporal"
"github.com/ledgerwatch/erigon/core/systemcontracts"
+ "github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/eth/ethconfig"
"github.com/ledgerwatch/erigon/rpc/rpccfg"
"github.com/ledgerwatch/erigon/turbo/debug"
@@ -50,6 +58,7 @@ import (
"github.com/ledgerwatch/erigon/cmd/rpcdaemon/health"
"github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices"
"github.com/ledgerwatch/erigon/cmd/utils"
+ "github.com/ledgerwatch/erigon/cmd/utils/flags"
"github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/common/paths"
"github.com/ledgerwatch/erigon/core/rawdb"
@@ -80,19 +89,9 @@ func RootCommand() (*cobra.Command, *httpcfg.HttpCfg) {
rootCmd.PersistentFlags().StringVar(&cfg.PrivateApiAddr, "private.api.addr", "127.0.0.1:9090", "Erigon's components (txpool, rpcdaemon, sentry, downloader, ...) can be deployed as independent Processes on same/another server. Then components will connect to erigon by this internal grpc API. Example: 127.0.0.1:9090")
rootCmd.PersistentFlags().StringVar(&cfg.DataDir, "datadir", "", "path to Erigon working directory")
rootCmd.PersistentFlags().BoolVar(&cfg.GraphQLEnabled, "graphql", false, "enables graphql endpoint (disabled by default)")
- rootCmd.PersistentFlags().StringVar(&cfg.HttpListenAddress, "http.addr", nodecfg.DefaultHTTPHost, "HTTP-RPC server listening interface")
- rootCmd.PersistentFlags().StringVar(&cfg.TLSCertfile, "tls.cert", "", "certificate for client side TLS handshake")
- rootCmd.PersistentFlags().StringVar(&cfg.TLSKeyFile, "tls.key", "", "key file for client side TLS handshake")
- rootCmd.PersistentFlags().StringVar(&cfg.TLSCACert, "tls.cacert", "", "CA certificate for client side TLS handshake")
- rootCmd.PersistentFlags().IntVar(&cfg.HttpPort, "http.port", nodecfg.DefaultHTTPPort, "HTTP-RPC server listening port")
- rootCmd.PersistentFlags().StringSliceVar(&cfg.HttpCORSDomain, "http.corsdomain", []string{}, "Comma separated list of domains from which to accept cross origin requests (browser enforced)")
- rootCmd.PersistentFlags().StringSliceVar(&cfg.HttpVirtualHost, "http.vhosts", nodecfg.DefaultConfig.HTTPVirtualHosts, "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.")
- rootCmd.PersistentFlags().BoolVar(&cfg.HttpCompression, "http.compression", true, "Disable http compression")
- rootCmd.PersistentFlags().StringSliceVar(&cfg.API, "http.api", []string{"eth", "erigon"}, "API's offered over the HTTP-RPC interface: eth,erigon,web3,net,debug,trace,txpool,db. Supported methods: https://github.com/ledgerwatch/erigon/tree/devel/cmd/rpcdaemon")
rootCmd.PersistentFlags().Uint64Var(&cfg.Gascap, "rpc.gascap", 50_000_000, "Sets a cap on gas that can be used in eth_call/estimateGas")
rootCmd.PersistentFlags().Uint64Var(&cfg.MaxTraces, "trace.maxtraces", 200, "Sets a limit on traces that can be returned in trace_filter")
- rootCmd.PersistentFlags().BoolVar(&cfg.WebsocketEnabled, "ws", false, "Enable Websockets - Same port as HTTP")
- rootCmd.PersistentFlags().BoolVar(&cfg.WebsocketCompression, "ws.compression", false, "Enable Websocket compression (RFC 7692)")
+
rootCmd.PersistentFlags().StringVar(&cfg.RpcAllowListFilePath, utils.RpcAccessListFlag.Name, "", "Specify granular (method-by-method) API allowlist")
rootCmd.PersistentFlags().UintVar(&cfg.RpcBatchConcurrency, utils.RpcBatchConcurrencyFlag.Name, 2, utils.RpcBatchConcurrencyFlag.Usage)
rootCmd.PersistentFlags().BoolVar(&cfg.RpcStreamingDisable, utils.RpcStreamingDisableFlag.Name, false, utils.RpcStreamingDisableFlag.Usage)
@@ -100,16 +99,38 @@ func RootCommand() (*cobra.Command, *httpcfg.HttpCfg) {
rootCmd.PersistentFlags().BoolVar(&cfg.TraceCompatibility, "trace.compat", false, "Bug for bug compatibility with OE for trace_ routines")
rootCmd.PersistentFlags().StringVar(&cfg.TxPoolApiAddr, "txpool.api.addr", "", "txpool api network address, for example: 127.0.0.1:9090 (default: use value of --private.api.addr)")
rootCmd.PersistentFlags().BoolVar(&cfg.Sync.UseSnapshots, "snapshot", true, utils.SnapshotFlag.Usage)
+
rootCmd.PersistentFlags().StringVar(&stateCacheStr, "state.cache", "0MB", "Amount of data to store in StateCache (enabled if no --datadir set). Set 0 to disable StateCache. Defaults to 0MB RAM")
rootCmd.PersistentFlags().BoolVar(&cfg.GRPCServerEnabled, "grpc", false, "Enable GRPC server")
rootCmd.PersistentFlags().StringVar(&cfg.GRPCListenAddress, "grpc.addr", nodecfg.DefaultGRPCHost, "GRPC server listening interface")
rootCmd.PersistentFlags().IntVar(&cfg.GRPCPort, "grpc.port", nodecfg.DefaultGRPCPort, "GRPC server listening port")
rootCmd.PersistentFlags().BoolVar(&cfg.GRPCHealthCheckEnabled, "grpc.healthcheck", false, "Enable GRPC health check")
rootCmd.PersistentFlags().Float64Var(ðconfig.Defaults.RPCTxFeeCap, utils.RPCGlobalTxFeeCapFlag.Name, utils.RPCGlobalTxFeeCapFlag.Value, utils.RPCGlobalTxFeeCapFlag.Usage)
+ rootCmd.PersistentFlags().StringVar(&cfg.TLSCertfile, "tls.cert", "", "certificate for client side TLS handshake for GRPC")
+ rootCmd.PersistentFlags().StringVar(&cfg.TLSKeyFile, "tls.key", "", "key file for client side TLS handshake for GRPC")
+ rootCmd.PersistentFlags().StringVar(&cfg.TLSCACert, "tls.cacert", "", "CA certificate for client side TLS handshake for GRPC")
+
+ rootCmd.PersistentFlags().StringSliceVar(&cfg.API, "http.api", []string{"eth", "erigon"}, "API's offered over the RPC interface: eth,erigon,web3,net,debug,trace,txpool,db. Supported methods: https://github.com/ledgerwatch/erigon/tree/devel/cmd/rpcdaemon")
+
+ rootCmd.PersistentFlags().BoolVar(&cfg.HttpServerEnabled, "http.enabled", true, "enable http server")
+ rootCmd.PersistentFlags().StringVar(&cfg.HttpListenAddress, "http.addr", nodecfg.DefaultHTTPHost, "HTTP server listening interface")
+ rootCmd.PersistentFlags().IntVar(&cfg.HttpPort, "http.port", nodecfg.DefaultHTTPPort, "HTTP server listening port")
+ rootCmd.PersistentFlags().StringVar(&cfg.HttpURL, "http.url", "", "HTTP server listening url. will OVERRIDE http.addr and http.port. will NOT respect http paths. prefix supported are tcp, unix")
+ rootCmd.PersistentFlags().StringSliceVar(&cfg.HttpCORSDomain, "http.corsdomain", []string{}, "Comma separated list of domains from which to accept cross origin requests (browser enforced)")
+ rootCmd.PersistentFlags().StringSliceVar(&cfg.HttpVirtualHost, "http.vhosts", nodecfg.DefaultConfig.HTTPVirtualHosts, "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.")
+ rootCmd.PersistentFlags().BoolVar(&cfg.HttpCompression, "http.compression", true, "Disable http compression")
+ rootCmd.PersistentFlags().BoolVar(&cfg.WebsocketEnabled, "ws", false, "Enable Websockets - Same port as HTTP[S]")
+ rootCmd.PersistentFlags().BoolVar(&cfg.WebsocketCompression, "ws.compression", false, "Enable Websocket compression (RFC 7692)")
- rootCmd.PersistentFlags().BoolVar(&cfg.TCPServerEnabled, "tcp", false, "Enable TCP server")
- rootCmd.PersistentFlags().StringVar(&cfg.TCPListenAddress, "tcp.addr", nodecfg.DefaultTCPHost, "TCP server listening interface")
- rootCmd.PersistentFlags().IntVar(&cfg.TCPPort, "tcp.port", nodecfg.DefaultTCPPort, "TCP server listening port")
+ rootCmd.PersistentFlags().BoolVar(&cfg.HttpsServerEnabled, "https.enabled", false, "enable http server")
+ rootCmd.PersistentFlags().StringVar(&cfg.HttpsListenAddress, "https.addr", nodecfg.DefaultHTTPHost, "rpc HTTPS server listening interface")
+ rootCmd.PersistentFlags().IntVar(&cfg.HttpsPort, "https.port", 0, "rpc HTTPS server listening port. default to http+363 if not set")
+ rootCmd.PersistentFlags().StringVar(&cfg.HttpsURL, "https.url", "", "rpc HTTPS server listening url. will OVERRIDE https.addr and https.port. will NOT respect paths. prefix supported are tcp, unix")
+ rootCmd.PersistentFlags().StringVar(&cfg.HttpsCertfile, "https.cert", "", "certificate for rpc HTTPS server")
+ rootCmd.PersistentFlags().StringVar(&cfg.HttpsKeyFile, "https.key", "", "key file for rpc HTTPS server")
+
+ rootCmd.PersistentFlags().BoolVar(&cfg.SocketServerEnabled, "socket.enabled", false, "Enable IPC server")
+ rootCmd.PersistentFlags().StringVar(&cfg.SocketListenUrl, "socket.url", "unix:///var/run/erigon.sock", "IPC server listening url. prefix supported are tcp, unix")
rootCmd.PersistentFlags().BoolVar(&cfg.TraceRequests, utils.HTTPTraceFlag.Name, false, "Trace HTTP requests with INFO level")
rootCmd.PersistentFlags().DurationVar(&cfg.HTTPTimeouts.ReadTimeout, "http.timeouts.read", rpccfg.DefaultHTTPTimeouts.ReadTimeout, "Maximum duration for reading the entire request, including the body.")
@@ -118,8 +139,10 @@ func RootCommand() (*cobra.Command, *httpcfg.HttpCfg) {
rootCmd.PersistentFlags().DurationVar(&cfg.EvmCallTimeout, "rpc.evmtimeout", rpccfg.DefaultEvmCallTimeout, "Maximum amount of time to wait for the answer from EVM call.")
rootCmd.PersistentFlags().IntVar(&cfg.BatchLimit, utils.RpcBatchLimit.Name, utils.RpcBatchLimit.Value, utils.RpcBatchLimit.Usage)
rootCmd.PersistentFlags().IntVar(&cfg.ReturnDataLimit, utils.RpcReturnDataLimit.Name, utils.RpcReturnDataLimit.Value, utils.RpcReturnDataLimit.Usage)
-
+ rootCmd.PersistentFlags().BoolVar(&cfg.AllowUnprotectedTxs, utils.AllowUnprotectedTxs.Name, utils.AllowUnprotectedTxs.Value, utils.AllowUnprotectedTxs.Usage)
+ rootCmd.PersistentFlags().IntVar(&cfg.MaxGetProofRewindBlockCount, utils.RpcMaxGetProofRewindBlockCount.Name, utils.RpcMaxGetProofRewindBlockCount.Value, utils.RpcMaxGetProofRewindBlockCount.Usage)
rootCmd.PersistentFlags().Uint64Var(&cfg.OtsMaxPageSize, utils.OtsSearchMaxCapFlag.Name, utils.OtsSearchMaxCapFlag.Value, utils.OtsSearchMaxCapFlag.Usage)
+ rootCmd.PersistentFlags().DurationVar(&cfg.RPCSlowLogThreshold, utils.RPCSlowFlag.Name, utils.RPCSlowFlag.Value, utils.RPCSlowFlag.Usage)
if err := rootCmd.MarkPersistentFlagFilename("rpc.accessList", "json"); err != nil {
panic(err)
@@ -145,7 +168,9 @@ func RootCommand() (*cobra.Command, *httpcfg.HttpCfg) {
if cfg.DataDir == "" {
cfg.DataDir = paths.DefaultDataDir()
}
- cfg.Dirs = datadir.New(cfg.DataDir)
+ var dataDir flags.DirectoryString
+ dataDir.Set(cfg.DataDir)
+ cfg.Dirs = datadir.New(string(dataDir))
}
if cfg.TxPoolApiAddr == "" {
cfg.TxPoolApiAddr = cfg.PrivateApiAddr
@@ -269,10 +294,9 @@ func EmbeddedServices(ctx context.Context,
// RemoteServices - use when RPCDaemon run as independent process. Still it can use --datadir flag to enable
// `cfg.WithDatadir` (mode when it on 1 machine with Erigon)
-func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, rootCancel context.CancelFunc) (
- db kv.RoDB, borDb kv.RoDB,
- eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient,
- stateCache kvcache.Cache, blockReader services.FullBlockReader,
+func RemoteServices(ctx context.Context, cfg *httpcfg.HttpCfg, logger log.Logger, rootCancel context.CancelFunc) (
+ db kv.RoDB, eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient,
+ stateCache kvcache.Cache, blockReader services.FullBlockReader, engine consensus.EngineReader,
ff *rpchelper.Filters, agg *libstate.AggregatorV3, err error) {
if !cfg.WithDatadir && cfg.PrivateApiAddr == "" {
return nil, nil, nil, nil, nil, nil, nil, ff, nil, fmt.Errorf("either remote db or local db must be specified")
@@ -295,13 +319,26 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger,
// Configure DB first
var allSnapshots *freezeblocks.RoSnapshots
+ var allBorSnapshots *freezeblocks.BorRoSnapshots
onNewSnapshot := func() {}
+
+ var cc *chain.Config
+
if cfg.WithDatadir {
+ // Opening all databases in Accede and non-Readonly modes. Here is the motivation:
+ // Rpcdaemon must provide 2 features:
+ // 1. ability to start even if Erigon is down (to prevent cascade outage).
+ // 2. don't create databases by itself - because it doesn't know right parameters (Erigon may have cli flags: pagesize, etc...)
+ // Some databases (consensus, txpool, downloader) are woring in SafeNoSync mode - in this mode
+ // power-off may leave db in recoverable-non-consistent state. Such db can be recovered only if open in non-Readonly mode.
+ // Accede mode preventing db-creation:
+ // at first start RpcDaemon may start earlier than Erigon
+ // Accede mode will check db existence (may wait with retries). It's ok to fail in this case - some supervisor will restart us.
var rwKv kv.RwDB
dir.MustExist(cfg.Dirs.SnapHistory)
- logger.Trace("Creating chain db", "path", cfg.Dirs.Chaindata)
+ logger.Warn("Opening chain db", "path", cfg.Dirs.Chaindata)
limiter := semaphore.NewWeighted(int64(cfg.DBReadConcurrency))
- rwKv, err = kv2.NewMDBX(logger).RoTxsLimiter(limiter).Path(cfg.Dirs.Chaindata).Readonly().Open()
+ rwKv, err = kv2.NewMDBX(logger).RoTxsLimiter(limiter).Path(cfg.Dirs.Chaindata).Accede().Open(ctx)
if err != nil {
return nil, nil, nil, nil, nil, nil, nil, ff, nil, err
}
@@ -310,7 +347,6 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger,
}
db = rwKv
- var cc *chain.Config
if err := db.View(context.Background(), func(tx kv.Tx) error {
genesisHash, err := rawdb.ReadCanonicalHash(tx, 0)
if err != nil {
@@ -338,10 +374,13 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger,
// Configure sapshots
allSnapshots = freezeblocks.NewRoSnapshots(cfg.Snap, cfg.Dirs.Snap, logger)
+ allBorSnapshots = freezeblocks.NewBorRoSnapshots(cfg.Snap, cfg.Dirs.Snap, logger)
// To povide good UX - immediatly can read snapshots after RPCDaemon start, even if Erigon is down
// Erigon does store list of snapshots in db: means RPCDaemon can read this list now, but read by `remoteKvClient.Snapshots` after establish grpc connection
allSnapshots.OptimisticReopenWithDB(db)
+ allBorSnapshots.OptimisticalyReopenWithDB(db)
allSnapshots.LogStat()
+ allBorSnapshots.LogStat()
if agg, err = libstate.NewAggregatorV3(ctx, cfg.Dirs.SnapHistory, cfg.Dirs.Tmp, ethconfig.HistoryV3AggregationStep, db, logger); err != nil {
return nil, nil, nil, nil, nil, nil, nil, ff, nil, fmt.Errorf("create aggregator: %w", err)
@@ -367,6 +406,11 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger,
} else {
allSnapshots.LogStat()
}
+ if err := allBorSnapshots.ReopenList(reply.BlocksFiles, true); err != nil {
+ logger.Error("[bor snapshots] reopen", "err", err)
+ } else {
+ allSnapshots.LogStat()
+ }
_ = reply.HistoryFiles
@@ -384,7 +428,7 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger,
}()
}
onNewSnapshot()
- blockReader = freezeblocks.NewBlockReader(allSnapshots)
+ blockReader = freezeblocks.NewBlockReader(allSnapshots, allBorSnapshots)
var histV3Enabled bool
_ = db.View(ctx, func(tx kv.Tx) error {
@@ -404,26 +448,8 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger,
if db == nil {
db = remoteKv
}
- if cfg.WithDatadir {
- // bor (consensus) specific db
- var borKv kv.RoDB
- borDbPath := filepath.Join(cfg.DataDir, "bor")
- {
- // ensure db exist
- tmpDb, err := kv2.NewMDBX(logger).Path(borDbPath).Label(kv.ConsensusDB).Open()
- if err != nil {
- return nil, nil, nil, nil, nil, nil, nil, ff, nil, err
- }
- tmpDb.Close()
- }
- logger.Trace("Creating consensus db", "path", borDbPath)
- borKv, err = kv2.NewMDBX(logger).Path(borDbPath).Label(kv.ConsensusDB).Readonly().Open()
- if err != nil {
- return nil, nil, nil, nil, nil, nil, nil, ff, nil, err
- }
- // Skip the compatibility check, until we have a schema in erigon-lib
- borDb = borKv
- } else {
+
+ if !cfg.WithDatadir {
if cfg.StateCache.CacheSize > 0 {
stateCache = kvcache.New(cfg.StateCache)
} else {
@@ -455,6 +481,40 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger,
blockReader = remoteEth
eth = remoteEth
+ var remoteCE *remoteConsensusEngine
+
+ if cfg.WithDatadir {
+ switch {
+ case cc != nil:
+ switch {
+ case cc.Bor != nil:
+ var borKv kv.RoDB
+
+ // bor (consensus) specific db
+ borDbPath := filepath.Join(cfg.DataDir, "bor")
+ logger.Warn("[rpc] Opening Bor db", "path", borDbPath)
+ borKv, err = kv2.NewMDBX(logger).Path(borDbPath).Label(kv.ConsensusDB).Accede().Open(ctx)
+ if err != nil {
+ return nil, nil, nil, nil, nil, nil, nil, ff, nil, err
+ }
+ // Skip the compatibility check, until we have a schema in erigon-lib
+
+ engine = bor.NewRo(cc, borKv, blockReader,
+ span.NewChainSpanner(contract.ValidatorSet(), cc, true, logger),
+ contract.NewGenesisContractsClient(cc, cc.Bor.ValidatorContract, cc.Bor.StateReceiverContract, logger), logger)
+
+ default:
+ engine = ethash.NewFaker()
+ }
+
+ default:
+ engine = ethash.NewFaker()
+ }
+ } else {
+ remoteCE = &remoteConsensusEngine{}
+ engine = remoteCE
+ }
+
go func() {
if !remoteKv.EnsureVersionCompatibility() {
rootCancel()
@@ -468,13 +528,18 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger,
if !txPoolService.EnsureVersionCompatibility() {
rootCancel()
}
+ if remoteCE != nil {
+ if !remoteCE.init(db, blockReader, remoteKvClient, logger) {
+ rootCancel()
+ }
+ }
}()
ff = rpchelper.New(ctx, eth, txPool, mining, onNewSnapshot, logger)
- return db, borDb, eth, txPool, mining, stateCache, blockReader, ff, agg, err
+ return db, eth, txPool, mining, stateCache, blockReader, engine, ff, agg, err
}
-func StartRpcServer(ctx context.Context, cfg httpcfg.HttpCfg, rpcAPI []rpc.API, logger log.Logger) error {
+func StartRpcServer(ctx context.Context, cfg *httpcfg.HttpCfg, rpcAPI []rpc.API, logger log.Logger) error {
if cfg.Enabled {
return startRegularRpcServer(ctx, cfg, rpcAPI, logger)
}
@@ -482,7 +547,7 @@ func StartRpcServer(ctx context.Context, cfg httpcfg.HttpCfg, rpcAPI []rpc.API,
return nil
}
-func StartRpcServerWithJwtAuthentication(ctx context.Context, cfg httpcfg.HttpCfg, rpcAPI []rpc.API, logger log.Logger) error {
+func StartRpcServerWithJwtAuthentication(ctx context.Context, cfg *httpcfg.HttpCfg, rpcAPI []rpc.API, logger log.Logger) error {
if len(rpcAPI) == 0 {
return nil
}
@@ -494,11 +559,9 @@ func StartRpcServerWithJwtAuthentication(ctx context.Context, cfg httpcfg.HttpCf
return nil
}
-func startRegularRpcServer(ctx context.Context, cfg httpcfg.HttpCfg, rpcAPI []rpc.API, logger log.Logger) error {
+func startRegularRpcServer(ctx context.Context, cfg *httpcfg.HttpCfg, rpcAPI []rpc.API, logger log.Logger) error {
// register apis and create handler stack
- httpEndpoint := fmt.Sprintf("%s:%d", cfg.HttpListenAddress, cfg.HttpPort)
-
- srv := rpc.NewServer(cfg.RpcBatchConcurrency, cfg.TraceRequests, cfg.RpcStreamingDisable, logger)
+ srv := rpc.NewServer(cfg.RpcBatchConcurrency, cfg.TraceRequests, cfg.RpcStreamingDisable, logger, cfg.RPCSlowLogThreshold)
allowListForRPC, err := parseAllowListForRPC(cfg.RpcAllowListFilePath)
if err != nil {
@@ -508,6 +571,8 @@ func startRegularRpcServer(ctx context.Context, cfg httpcfg.HttpCfg, rpcAPI []rp
srv.SetBatchLimit(cfg.BatchLimit)
+ defer srv.Stop()
+
var defaultAPIList []rpc.API
for _, api := range rpcAPI {
@@ -527,43 +592,111 @@ func startRegularRpcServer(ctx context.Context, cfg httpcfg.HttpCfg, rpcAPI []rp
return fmt.Errorf("could not start register RPC apis: %w", err)
}
+ info := []interface{}{
+ "ws", cfg.WebsocketEnabled,
+ "ws.compression", cfg.WebsocketCompression, "grpc", cfg.GRPCServerEnabled,
+ }
+
+ if cfg.SocketServerEnabled {
+ socketUrl, err := url.Parse(cfg.SocketListenUrl)
+ if err != nil {
+ return fmt.Errorf("malformatted socket url %s: %w", cfg.SocketListenUrl, err)
+ }
+ tcpListener, err := net.Listen(socketUrl.Scheme, socketUrl.Host+socketUrl.EscapedPath())
+ if err != nil {
+ return fmt.Errorf("could not start Socket Listener: %w", err)
+ }
+ defer tcpListener.Close()
+ go func() {
+ err := srv.ServeListener(tcpListener)
+ if err != nil {
+ if !errors.Is(err, net.ErrClosed) {
+ logger.Error("Socket Listener Fatal Error", "err", err)
+ }
+ }
+ }()
+ info = append(info, "socket.url", socketUrl)
+ logger.Info("Socket Endpoint opened", "url", socketUrl)
+ }
+
httpHandler := node.NewHTTPHandlerStack(srv, cfg.HttpCORSDomain, cfg.HttpVirtualHost, cfg.HttpCompression)
var wsHandler http.Handler
if cfg.WebsocketEnabled {
wsHandler = srv.WebsocketHandler([]string{"*"}, nil, cfg.WebsocketCompression, logger)
}
-
graphQLHandler := graphql.CreateHandler(defaultAPIList)
-
apiHandler, err := createHandler(cfg, defaultAPIList, httpHandler, wsHandler, graphQLHandler, nil)
if err != nil {
return err
}
- listener, httpAddr, err := node.StartHTTPEndpoint(httpEndpoint, cfg.HTTPTimeouts, apiHandler)
- if err != nil {
- return fmt.Errorf("could not start RPC api: %w", err)
+ // Separate Websocket handler if websocket port flag specified
+ if cfg.WebsocketEnabled && cfg.WebsocketPort != cfg.HttpPort {
+ wsEndpoint := fmt.Sprintf("tcp://%s:%d", cfg.HttpListenAddress, cfg.WebsocketPort)
+ wsApiHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if isWebsocket(r) {
+ wsHandler.ServeHTTP(w, r)
+ }
+ })
+ wsListener, wsAddr, err := node.StartHTTPEndpoint(wsEndpoint, &node.HttpEndpointConfig{Timeouts: cfg.HTTPTimeouts}, wsApiHandler)
+ if err != nil {
+ return fmt.Errorf("could not start separate Websocket RPC api at port %d: %w", cfg.WebsocketPort, err)
+ }
+ info = append(info, "websocket.url", wsAddr)
+ defer func() {
+ shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ _ = wsListener.Shutdown(shutdownCtx)
+ logger.Info("HTTP endpoint closed", "url", wsAddr)
+ }()
}
- if cfg.TCPServerEnabled {
- tcpEndpoint := fmt.Sprintf("%s:%d", cfg.TCPListenAddress, cfg.TCPPort)
- tcpListener, err := net.Listen("tcp", tcpEndpoint)
- if err != nil {
- return fmt.Errorf("could not start TCP Listener: %w", err)
+ if cfg.HttpServerEnabled {
+ httpEndpoint := fmt.Sprintf("tcp://%s:%d", cfg.HttpListenAddress, cfg.HttpPort)
+ if cfg.HttpURL != "" {
+ httpEndpoint = cfg.HttpURL
}
- go func() {
- defer tcpListener.Close()
- err := srv.ServeListener(tcpListener)
- if err != nil {
- logger.Error("TCP Listener Fatal Error", "err", err)
- }
+ listener, httpAddr, err := node.StartHTTPEndpoint(httpEndpoint, &node.HttpEndpointConfig{
+ Timeouts: cfg.HTTPTimeouts,
+ }, apiHandler)
+ if err != nil {
+ return fmt.Errorf("could not start RPC api: %w", err)
+ }
+ info = append(info, "http.url", httpAddr)
+ defer func() {
+ shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ _ = listener.Shutdown(shutdownCtx)
+ logger.Info("HTTP endpoint closed", "url", httpAddr)
}()
- logger.Info("TCP Endpoint opened", "url", tcpEndpoint)
}
-
- info := []interface{}{
- "url", httpAddr, "ws", cfg.WebsocketEnabled,
- "ws.compression", cfg.WebsocketCompression, "grpc", cfg.GRPCServerEnabled,
+ if cfg.HttpsURL != "" {
+ cfg.HttpsServerEnabled = true
+ }
+ if cfg.HttpsServerEnabled {
+ if cfg.HttpsPort == 0 {
+ cfg.HttpsPort = cfg.HttpPort + 363
+ }
+ httpsEndpoint := fmt.Sprintf("tcp://%s:%d", cfg.HttpsListenAddress, cfg.HttpsPort)
+ if cfg.HttpsURL != "" {
+ httpsEndpoint = cfg.HttpsURL
+ }
+ listener, httpAddr, err := node.StartHTTPEndpoint(httpsEndpoint, &node.HttpEndpointConfig{
+ Timeouts: cfg.HTTPTimeouts,
+ HTTPS: true,
+ CertFile: cfg.HttpsCertfile,
+ KeyFile: cfg.HttpsKeyFile,
+ }, apiHandler)
+ if err != nil {
+ return fmt.Errorf("could not start RPC api: %w", err)
+ }
+ info = append(info, "https.url", httpAddr)
+ defer func() {
+ shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ _ = listener.Shutdown(shutdownCtx)
+ logger.Info("HTTPS endpoint closed", "url", httpAddr)
+ }()
}
var (
@@ -584,26 +717,20 @@ func startRegularRpcServer(ctx context.Context, cfg httpcfg.HttpCfg, rpcAPI []rp
}
go grpcServer.Serve(grpcListener)
info = append(info, "grpc.port", cfg.GRPCPort)
- }
-
- logger.Info("HTTP endpoint opened", info...)
-
- defer func() {
- srv.Stop()
- shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
- _ = listener.Shutdown(shutdownCtx)
- logger.Info("HTTP endpoint closed", "url", httpAddr)
- if cfg.GRPCServerEnabled {
- if cfg.GRPCHealthCheckEnabled {
- healthServer.Shutdown()
+ defer func() {
+ if cfg.GRPCServerEnabled {
+ if cfg.GRPCHealthCheckEnabled {
+ healthServer.Shutdown()
+ }
+ grpcServer.GracefulStop()
+ _ = grpcListener.Close()
+ logger.Info("GRPC endpoint closed", "url", grpcEndpoint)
}
- grpcServer.GracefulStop()
- _ = grpcListener.Close()
- logger.Info("GRPC endpoint closed", "url", grpcEndpoint)
- }
- }()
+ }()
+ }
+
+ logger.Info("JsonRpc endpoint opened", info...)
<-ctx.Done()
logger.Info("Exiting...")
return nil
@@ -616,8 +743,8 @@ type engineInfo struct {
EngineHttpEndpoint string
}
-func startAuthenticatedRpcServer(cfg httpcfg.HttpCfg, rpcAPI []rpc.API, logger log.Logger) (*engineInfo, error) {
- srv := rpc.NewServer(cfg.RpcBatchConcurrency, cfg.TraceRequests, cfg.RpcStreamingDisable, logger)
+func startAuthenticatedRpcServer(cfg *httpcfg.HttpCfg, rpcAPI []rpc.API, logger log.Logger) (*engineInfo, error) {
+ srv := rpc.NewServer(cfg.RpcBatchConcurrency, cfg.TraceRequests, cfg.RpcStreamingDisable, logger, cfg.RPCSlowLogThreshold)
engineListener, engineSrv, engineHttpEndpoint, err := createEngineListener(cfg, rpcAPI, logger)
if err != nil {
@@ -653,7 +780,7 @@ func isWebsocket(r *http.Request) bool {
// obtainJWTSecret loads the jwt-secret, either from the provided config,
// or from the default location. If neither of those are present, it generates
// a new secret and stores to the default location.
-func obtainJWTSecret(cfg httpcfg.HttpCfg, logger log.Logger) ([]byte, error) {
+func obtainJWTSecret(cfg *httpcfg.HttpCfg, logger log.Logger) ([]byte, error) {
// try reading from file
logger.Info("Reading JWT secret", "path", cfg.JWTSecretPath)
// If we run the rpcdaemon and datadir is not specified we just use jwt.hex in current directory.
@@ -679,7 +806,7 @@ func obtainJWTSecret(cfg httpcfg.HttpCfg, logger log.Logger) ([]byte, error) {
return jwtSecret, nil
}
-func createHandler(cfg httpcfg.HttpCfg, apiList []rpc.API, httpHandler http.Handler, wsHandler http.Handler, graphQLHandler http.Handler, jwtSecret []byte) (http.Handler, error) {
+func createHandler(cfg *httpcfg.HttpCfg, apiList []rpc.API, httpHandler http.Handler, wsHandler http.Handler, graphQLHandler http.Handler, jwtSecret []byte) (http.Handler, error) {
var handler http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if cfg.GraphQLEnabled && graphql.ProcessGraphQLcheckIfNeeded(graphQLHandler, w, r) {
return
@@ -704,10 +831,10 @@ func createHandler(cfg httpcfg.HttpCfg, apiList []rpc.API, httpHandler http.Hand
return handler, nil
}
-func createEngineListener(cfg httpcfg.HttpCfg, engineApi []rpc.API, logger log.Logger) (*http.Server, *rpc.Server, string, error) {
- engineHttpEndpoint := fmt.Sprintf("%s:%d", cfg.AuthRpcHTTPListenAddress, cfg.AuthRpcPort)
+func createEngineListener(cfg *httpcfg.HttpCfg, engineApi []rpc.API, logger log.Logger) (*http.Server, *rpc.Server, string, error) {
+ engineHttpEndpoint := fmt.Sprintf("tcp://%s:%d", cfg.AuthRpcHTTPListenAddress, cfg.AuthRpcPort)
- engineSrv := rpc.NewServer(cfg.RpcBatchConcurrency, cfg.TraceRequests, true, logger)
+ engineSrv := rpc.NewServer(cfg.RpcBatchConcurrency, cfg.TraceRequests, true, logger, cfg.RPCSlowLogThreshold)
if err := node.RegisterApisFromWhitelist(engineApi, nil, engineSrv, true, logger); err != nil {
return nil, nil, "", fmt.Errorf("could not start register RPC engine api: %w", err)
@@ -729,7 +856,9 @@ func createEngineListener(cfg httpcfg.HttpCfg, engineApi []rpc.API, logger log.L
return nil, nil, "", err
}
- engineListener, engineAddr, err := node.StartHTTPEndpoint(engineHttpEndpoint, cfg.AuthRpcTimeouts, engineApiHandler)
+ engineListener, engineAddr, err := node.StartHTTPEndpoint(engineHttpEndpoint, &node.HttpEndpointConfig{
+ Timeouts: cfg.AuthRpcTimeouts,
+ }, engineApiHandler)
if err != nil {
return nil, nil, "", fmt.Errorf("could not start RPC api: %w", err)
}
@@ -739,3 +868,91 @@ func createEngineListener(cfg httpcfg.HttpCfg, engineApi []rpc.API, logger log.L
return engineListener, engineSrv, engineAddr.String(), nil
}
+
+type remoteConsensusEngine struct {
+ engine consensus.EngineReader
+}
+
+func (e *remoteConsensusEngine) HasEngine() bool {
+ return e.engine != nil
+}
+
+func (e *remoteConsensusEngine) Engine() consensus.EngineReader {
+ return e.engine
+}
+
+func (e *remoteConsensusEngine) init(db kv.RoDB, blockReader services.FullBlockReader, remoteKV remote.KVClient, logger log.Logger) bool {
+ var cc *chain.Config
+
+ if err := db.View(context.Background(), func(tx kv.Tx) error {
+ genesisHash, err := rawdb.ReadCanonicalHash(tx, 0)
+ if err != nil {
+ return err
+ }
+ cc, err = rawdb.ReadChainConfig(tx, genesisHash)
+ if err != nil {
+ return err
+ }
+ return nil
+ }); err != nil {
+ return false
+ }
+
+ if cc.Bor != nil {
+ borKv, err := remotedb.NewRemote(gointerfaces.VersionFromProto(remotedbserver.KvServiceAPIVersion), logger, remoteKV).
+ WithBucketsConfig(kv.BorTablesCfg).
+ Open()
+
+ if err != nil {
+ return false
+ }
+
+ e.engine = bor.NewRo(cc, borKv, blockReader,
+ span.NewChainSpanner(contract.ValidatorSet(), cc, true, logger),
+ contract.NewGenesisContractsClient(cc, cc.Bor.ValidatorContract, cc.Bor.StateReceiverContract, logger), logger)
+ } else {
+ e.engine = ethash.NewFaker()
+ }
+
+ return true
+}
+
+func (e *remoteConsensusEngine) Author(header *types.Header) (libcommon.Address, error) {
+ if e.engine != nil {
+ return e.engine.Author(header)
+ }
+
+ return libcommon.Address{}, fmt.Errorf("remote consensus engine not iinitialized")
+}
+
+func (e *remoteConsensusEngine) IsServiceTransaction(sender libcommon.Address, syscall consensus.SystemCall) bool {
+ if e.engine != nil {
+ return e.engine.IsServiceTransaction(sender, syscall)
+ }
+
+ return false
+}
+
+func (e *remoteConsensusEngine) Type() chain.ConsensusName {
+ if e.engine != nil {
+ return e.engine.Type()
+ }
+
+ return ""
+}
+
+func (e *remoteConsensusEngine) CalculateRewards(config *chain.Config, header *types.Header, uncles []*types.Header, syscall consensus.SystemCall) ([]consensus.Reward, error) {
+ if e.engine != nil {
+ return e.engine.CalculateRewards(config, header, uncles, syscall)
+ }
+
+ return nil, fmt.Errorf("remote consensus engine not iinitialized")
+}
+
+func (e *remoteConsensusEngine) Close() error {
+ if e.engine != nil {
+ return e.engine.Close()
+ }
+
+ return nil
+}
diff --git a/cmd/rpcdaemon/cli/config_test.go b/cmd/rpcdaemon/cli/config_test.go
new file mode 100644
index 00000000000..c14c9cf9456
--- /dev/null
+++ b/cmd/rpcdaemon/cli/config_test.go
@@ -0,0 +1,21 @@
+package cli
+
+import (
+ "net/url"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestParseSocketUrl(t *testing.T) {
+ t.Run("sock", func(t *testing.T) {
+ socketUrl, err := url.Parse("unix:///some/file/path.sock")
+ require.NoError(t, err)
+ require.EqualValues(t, "/some/file/path.sock", socketUrl.Host+socketUrl.EscapedPath())
+ })
+ t.Run("sock", func(t *testing.T) {
+ socketUrl, err := url.Parse("tcp://localhost:1234")
+ require.NoError(t, err)
+ require.EqualValues(t, "localhost:1234", socketUrl.Host+socketUrl.EscapedPath())
+ })
+}
diff --git a/cmd/rpcdaemon/cli/httpcfg/http_cfg.go b/cmd/rpcdaemon/cli/httpcfg/http_cfg.go
index a41f562342b..7a0930750f2 100644
--- a/cmd/rpcdaemon/cli/httpcfg/http_cfg.go
+++ b/cmd/rpcdaemon/cli/httpcfg/http_cfg.go
@@ -10,37 +10,51 @@ import (
)
type HttpCfg struct {
- Enabled bool
- PrivateApiAddr string
+ Enabled bool
+
GraphQLEnabled bool
WithDatadir bool // Erigon's database can be read by separated processes on same machine - in read-only mode - with full support of transactions. It will share same "OS PageCache" with Erigon process.
DataDir string
Dirs datadir.Dirs
- HttpListenAddress string
AuthRpcHTTPListenAddress string
TLSCertfile string
TLSCACert string
TLSKeyFile string
- HttpPort int
- AuthRpcPort int
- HttpCORSDomain []string
- HttpVirtualHost []string
- AuthRpcVirtualHost []string
- HttpCompression bool
- API []string
- Gascap uint64
- MaxTraces uint64
- WebsocketEnabled bool
- WebsocketCompression bool
- RpcAllowListFilePath string
- RpcBatchConcurrency uint
- RpcStreamingDisable bool
- DBReadConcurrency int
- TraceCompatibility bool // Bug for bug compatibility for trace_ routines with OpenEthereum
- TxPoolApiAddr string
- StateCache kvcache.CoherentConfig
- Snap ethconfig.BlocksFreezing
- Sync ethconfig.Sync
+
+ HttpServerEnabled bool
+ HttpURL string
+ HttpListenAddress string
+ HttpPort int
+ HttpCORSDomain []string
+ HttpVirtualHost []string
+ AuthRpcVirtualHost []string
+ HttpCompression bool
+
+ HttpsServerEnabled bool
+ HttpsURL string
+ HttpsListenAddress string
+ HttpsPort int
+ HttpsCertfile string
+ HttpsKeyFile string
+
+ AuthRpcPort int
+ PrivateApiAddr string
+
+ API []string
+ Gascap uint64
+ MaxTraces uint64
+ WebsocketPort int
+ WebsocketEnabled bool
+ WebsocketCompression bool
+ RpcAllowListFilePath string
+ RpcBatchConcurrency uint
+ RpcStreamingDisable bool
+ DBReadConcurrency int
+ TraceCompatibility bool // Bug for bug compatibility for trace_ routines with OpenEthereum
+ TxPoolApiAddr string
+ StateCache kvcache.CoherentConfig
+ Snap ethconfig.BlocksFreezing
+ Sync ethconfig.Sync
// GRPC server
GRPCServerEnabled bool
@@ -48,10 +62,9 @@ type HttpCfg struct {
GRPCPort int
GRPCHealthCheckEnabled bool
- // Raw TCP Server
- TCPServerEnabled bool
- TCPListenAddress string
- TCPPort int
+ // Socket Server
+ SocketServerEnabled bool
+ SocketListenUrl string
JWTSecretPath string // Engine API Authentication
TraceRequests bool // Always trace requests in INFO level
@@ -61,9 +74,12 @@ type HttpCfg struct {
LogDirVerbosity string
LogDirPath string
- BatchLimit int // Maximum number of requests in a batch
- ReturnDataLimit int // Maximum number of bytes returned from calls (like eth_call)
-
+ BatchLimit int // Maximum number of requests in a batch
+ ReturnDataLimit int // Maximum number of bytes returned from calls (like eth_call)
+ AllowUnprotectedTxs bool // Whether to allow non EIP-155 protected transactions txs over RPC
+ MaxGetProofRewindBlockCount int //Max GetProof rewind block count
// Ots API
OtsMaxPageSize uint64
+
+ RPCSlowLogThreshold time.Duration
}
diff --git a/cmd/rpcdaemon/graphql/graph/generated.go b/cmd/rpcdaemon/graphql/graph/generated.go
index 2fe792cabcb..d1cfe32e753 100644
--- a/cmd/rpcdaemon/graphql/graph/generated.go
+++ b/cmd/rpcdaemon/graphql/graph/generated.go
@@ -24,6 +24,7 @@ import (
// NewExecutableSchema creates an ExecutableSchema from the ResolverRoot interface.
func NewExecutableSchema(cfg Config) graphql.ExecutableSchema {
return &executableSchema{
+ schema: cfg.Schema,
resolvers: cfg.Resolvers,
directives: cfg.Directives,
complexity: cfg.Complexity,
@@ -31,6 +32,7 @@ func NewExecutableSchema(cfg Config) graphql.ExecutableSchema {
}
type Config struct {
+ Schema *ast.Schema
Resolvers ResolverRoot
Directives DirectiveRoot
Complexity ComplexityRoot
@@ -182,12 +184,16 @@ type QueryResolver interface {
}
type executableSchema struct {
+ schema *ast.Schema
resolvers ResolverRoot
directives DirectiveRoot
complexity ComplexityRoot
}
func (e *executableSchema) Schema() *ast.Schema {
+ if e.schema != nil {
+ return e.schema
+ }
return parsedSchema
}
@@ -1023,14 +1029,14 @@ func (ec *executionContext) introspectSchema() (*introspection.Schema, error) {
if ec.DisableIntrospection {
return nil, errors.New("introspection disabled")
}
- return introspection.WrapSchema(parsedSchema), nil
+ return introspection.WrapSchema(ec.Schema()), nil
}
func (ec *executionContext) introspectType(name string) (*introspection.Type, error) {
if ec.DisableIntrospection {
return nil, errors.New("introspection disabled")
}
- return introspection.WrapTypeFromDef(parsedSchema, parsedSchema.Types[name]), nil
+ return introspection.WrapTypeFromDef(ec.Schema(), ec.Schema().Types[name]), nil
}
//go:embed "schema.graphqls"
@@ -1738,7 +1744,7 @@ func (ec *executionContext) fieldContext_Account_storage(ctx context.Context, fi
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.field_Account_storage_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
- return
+ return fc, err
}
return fc, nil
}
@@ -2215,7 +2221,7 @@ func (ec *executionContext) fieldContext_Block_miner(ctx context.Context, field
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.field_Block_miner_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
- return
+ return fc, err
}
return fc, nil
}
@@ -2911,7 +2917,7 @@ func (ec *executionContext) fieldContext_Block_ommerAt(ctx context.Context, fiel
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.field_Block_ommerAt_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
- return
+ return fc, err
}
return fc, nil
}
@@ -3156,7 +3162,7 @@ func (ec *executionContext) fieldContext_Block_transactionAt(ctx context.Context
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.field_Block_transactionAt_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
- return
+ return fc, err
}
return fc, nil
}
@@ -3223,7 +3229,7 @@ func (ec *executionContext) fieldContext_Block_logs(ctx context.Context, field g
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.field_Block_logs_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
- return
+ return fc, err
}
return fc, nil
}
@@ -3290,7 +3296,7 @@ func (ec *executionContext) fieldContext_Block_account(ctx context.Context, fiel
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.field_Block_account_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
- return
+ return fc, err
}
return fc, nil
}
@@ -3350,7 +3356,7 @@ func (ec *executionContext) fieldContext_Block_call(ctx context.Context, field g
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.field_Block_call_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
- return
+ return fc, err
}
return fc, nil
}
@@ -3405,7 +3411,7 @@ func (ec *executionContext) fieldContext_Block_estimateGas(ctx context.Context,
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.field_Block_estimateGas_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
- return
+ return fc, err
}
return fc, nil
}
@@ -3736,7 +3742,7 @@ func (ec *executionContext) fieldContext_Log_account(ctx context.Context, field
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.field_Log_account_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
- return
+ return fc, err
}
return fc, nil
}
@@ -3977,7 +3983,7 @@ func (ec *executionContext) fieldContext_Mutation_sendRawTransaction(ctx context
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.field_Mutation_sendRawTransaction_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
- return
+ return fc, err
}
return fc, nil
}
@@ -4183,7 +4189,7 @@ func (ec *executionContext) fieldContext_Pending_account(ctx context.Context, fi
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.field_Pending_account_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
- return
+ return fc, err
}
return fc, nil
}
@@ -4243,7 +4249,7 @@ func (ec *executionContext) fieldContext_Pending_call(ctx context.Context, field
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.field_Pending_call_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
- return
+ return fc, err
}
return fc, nil
}
@@ -4298,7 +4304,7 @@ func (ec *executionContext) fieldContext_Pending_estimateGas(ctx context.Context
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.field_Pending_estimateGas_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
- return
+ return fc, err
}
return fc, nil
}
@@ -4414,7 +4420,7 @@ func (ec *executionContext) fieldContext_Query_block(ctx context.Context, field
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.field_Query_block_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
- return
+ return fc, err
}
return fc, nil
}
@@ -4533,7 +4539,7 @@ func (ec *executionContext) fieldContext_Query_blocks(ctx context.Context, field
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.field_Query_blocks_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
- return
+ return fc, err
}
return fc, nil
}
@@ -4695,7 +4701,7 @@ func (ec *executionContext) fieldContext_Query_transaction(ctx context.Context,
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.field_Query_transaction_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
- return
+ return fc, err
}
return fc, nil
}
@@ -4762,7 +4768,7 @@ func (ec *executionContext) fieldContext_Query_logs(ctx context.Context, field g
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.field_Query_logs_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
- return
+ return fc, err
}
return fc, nil
}
@@ -5017,7 +5023,7 @@ func (ec *executionContext) fieldContext_Query___type(ctx context.Context, field
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.field_Query___type_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
- return
+ return fc, err
}
return fc, nil
}
@@ -5400,7 +5406,7 @@ func (ec *executionContext) fieldContext_Transaction_from(ctx context.Context, f
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.field_Transaction_from_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
- return
+ return fc, err
}
return fc, nil
}
@@ -5464,7 +5470,7 @@ func (ec *executionContext) fieldContext_Transaction_to(ctx context.Context, fie
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.field_Transaction_to_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
- return
+ return fc, err
}
return fc, nil
}
@@ -6096,7 +6102,7 @@ func (ec *executionContext) fieldContext_Transaction_createdContract(ctx context
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.field_Transaction_createdContract_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
- return
+ return fc, err
}
return fc, nil
}
@@ -7883,7 +7889,7 @@ func (ec *executionContext) fieldContext___Type_fields(ctx context.Context, fiel
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.field___Type_fields_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
- return
+ return fc, err
}
return fc, nil
}
@@ -8071,7 +8077,7 @@ func (ec *executionContext) fieldContext___Type_enumValues(ctx context.Context,
ctx = graphql.WithFieldContext(ctx, fc)
if fc.Args, err = ec.field___Type_enumValues_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
ec.Error(ctx, err)
- return
+ return fc, err
}
return fc, nil
}
diff --git a/cmd/rpcdaemon/graphql/graph/helpers.go b/cmd/rpcdaemon/graphql/graph/helpers.go
index 4bc6c7da4e6..77b90c38607 100644
--- a/cmd/rpcdaemon/graphql/graph/helpers.go
+++ b/cmd/rpcdaemon/graphql/graph/helpers.go
@@ -3,6 +3,7 @@ package graph
import (
"encoding/hex"
"fmt"
+ hexutil2 "github.com/ledgerwatch/erigon-lib/common/hexutil"
"reflect"
"strconv"
@@ -11,7 +12,6 @@ import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
- "github.com/ledgerwatch/erigon/common/hexutil"
"github.com/ledgerwatch/erigon/core/types"
)
@@ -21,16 +21,16 @@ func convertDataToStringP(abstractMap map[string]interface{}, field string) *str
switch v := abstractMap[field].(type) {
case int64:
result = strconv.FormatInt(v, 10)
- case *hexutil.Big:
+ case *hexutil2.Big:
if reflect.ValueOf(abstractMap[field]).IsZero() {
return nil
}
result = v.String()
case hexutility.Bytes:
result = v.String()
- case hexutil.Uint:
+ case hexutil2.Uint:
result = v.String()
- case hexutil.Uint64:
+ case hexutil2.Uint64:
result = v.String()
case *libcommon.Address:
if reflect.ValueOf(abstractMap[field]).IsZero() {
@@ -66,15 +66,15 @@ func convertDataToIntP(abstractMap map[string]interface{}, field string) *int {
var result int
switch v := abstractMap[field].(type) {
- case hexutil.Uint64:
- resultUint, err := hexutil.DecodeUint64(v.String())
+ case hexutil2.Uint64:
+ resultUint, err := hexutil2.DecodeUint64(v.String())
if err != nil {
result = 0
} else {
result = int(resultUint)
}
- case hexutil.Uint:
- resultUint, err := hexutil.DecodeUint64(v.String())
+ case hexutil2.Uint:
+ resultUint, err := hexutil2.DecodeUint64(v.String())
if err != nil {
result = 0
} else {
@@ -94,21 +94,21 @@ func convertDataToUint64P(abstractMap map[string]interface{}, field string) *uin
var result uint64
switch v := abstractMap[field].(type) {
- case hexutil.Uint64:
- resultUint, err := hexutil.DecodeUint64(v.String())
+ case hexutil2.Uint64:
+ resultUint, err := hexutil2.DecodeUint64(v.String())
if err != nil {
result = 0
} else {
result = resultUint
}
- case hexutil.Uint:
- resultUint, err := hexutil.DecodeUint64(v.String())
+ case hexutil2.Uint:
+ resultUint, err := hexutil2.DecodeUint64(v.String())
if err != nil {
result = 0
} else {
result = resultUint
}
- case *hexutil.Big:
+ case *hexutil2.Big:
result = v.ToInt().Uint64()
case int:
result = abstractMap[field].(uint64)
@@ -125,7 +125,7 @@ func convertDataToUint64P(abstractMap map[string]interface{}, field string) *uin
func convertStrHexToDec(hexString *string) *string {
var result string
- resUInt64, err := hexutil.DecodeUint64(*hexString)
+ resUInt64, err := hexutil2.DecodeUint64(*hexString)
if err != nil {
fmt.Println(err)
result = "0"
diff --git a/cmd/rpcdaemon/graphql/graph/schema.resolvers.go b/cmd/rpcdaemon/graphql/graph/schema.resolvers.go
index 14947154688..0bf234f9c0c 100644
--- a/cmd/rpcdaemon/graphql/graph/schema.resolvers.go
+++ b/cmd/rpcdaemon/graphql/graph/schema.resolvers.go
@@ -2,7 +2,7 @@ package graph
// This file will be automatically regenerated based on the schema, any resolver implementations
// will be copied through when generating and any unknown code will be moved to the end.
-// Code generated by github.com/99designs/gqlgen version v0.17.33
+// Code generated by github.com/99designs/gqlgen version v0.17.40
import (
"context"
@@ -11,8 +11,8 @@ import (
"strconv"
"strings"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
"github.com/ledgerwatch/erigon/cmd/rpcdaemon/graphql/graph/model"
- "github.com/ledgerwatch/erigon/common/hexutil"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/rpc"
)
@@ -161,7 +161,26 @@ func (r *queryResolver) Block(ctx context.Context, number *string, hash *string)
// Blocks is the resolver for the blocks field.
func (r *queryResolver) Blocks(ctx context.Context, from *uint64, to *uint64) ([]*model.Block, error) {
- panic(fmt.Errorf("not implemented: Blocks - blocks"))
+
+ var blocks []*model.Block
+
+ const maxBlocks = 25
+
+ fromBlockNumber := *from
+ toBlockNumber := *to
+
+ if toBlockNumber >= fromBlockNumber && (toBlockNumber-fromBlockNumber+1) < maxBlocks {
+
+ for i := fromBlockNumber; i <= toBlockNumber; i++ {
+ blockNumberStr := strconv.FormatUint(i, 10)
+ block, _ := r.Block(ctx, &blockNumberStr, nil)
+ if block != nil {
+ blocks = append(blocks, block)
+ }
+ }
+ }
+
+ return blocks, ctx.Err()
}
// Pending is the resolver for the pending field.
diff --git a/cmd/rpcdaemon/health/check_block.go b/cmd/rpcdaemon/health/check_block.go
index 8978b6ffc4e..93e8d71fc7e 100644
--- a/cmd/rpcdaemon/health/check_block.go
+++ b/cmd/rpcdaemon/health/check_block.go
@@ -16,7 +16,7 @@ func checkBlockNumber(blockNumber rpc.BlockNumber, api EthAPI) error {
return err
}
if len(data) == 0 { // block not found
- return fmt.Errorf("no known block with number %v (%x hex)", blockNumber, blockNumber)
+ return fmt.Errorf("no known block with number %v (%x hex)", blockNumber.Uint64(), blockNumber.Uint64())
}
return nil
diff --git a/cmd/rpcdaemon/health/health_test.go b/cmd/rpcdaemon/health/health_test.go
index 39c71c9cad1..a843923edfe 100644
--- a/cmd/rpcdaemon/health/health_test.go
+++ b/cmd/rpcdaemon/health/health_test.go
@@ -11,7 +11,8 @@ import (
"testing"
"time"
- "github.com/ledgerwatch/erigon/common/hexutil"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
+
"github.com/ledgerwatch/erigon/rpc"
)
diff --git a/cmd/rpcdaemon/health/interfaces.go b/cmd/rpcdaemon/health/interfaces.go
index c8bdca5dbf8..2fabf8d5de4 100644
--- a/cmd/rpcdaemon/health/interfaces.go
+++ b/cmd/rpcdaemon/health/interfaces.go
@@ -2,8 +2,8 @@ package health
import (
"context"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
- "github.com/ledgerwatch/erigon/common/hexutil"
"github.com/ledgerwatch/erigon/rpc"
)
diff --git a/cmd/rpcdaemon/main.go b/cmd/rpcdaemon/main.go
index e811b2775f2..8b90a3b5650 100644
--- a/cmd/rpcdaemon/main.go
+++ b/cmd/rpcdaemon/main.go
@@ -1,12 +1,14 @@
package main
import (
+ "context"
+ "errors"
"fmt"
"os"
"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/cmd/rpcdaemon/cli"
- "github.com/ledgerwatch/erigon/consensus/ethash"
+ "github.com/ledgerwatch/erigon/rpc"
"github.com/ledgerwatch/erigon/turbo/debug"
"github.com/ledgerwatch/erigon/turbo/jsonrpc"
"github.com/spf13/cobra"
@@ -18,20 +20,19 @@ func main() {
cmd.RunE = func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
logger := debug.SetupCobra(cmd, "sentry")
- db, borDb, backend, txPool, mining, stateCache, blockReader, ff, agg, err := cli.RemoteServices(ctx, *cfg, logger, rootCancel)
+ db, backend, txPool, mining, stateCache, blockReader, engine, ff, agg, err := cli.RemoteServices(ctx, cfg, logger, rootCancel)
if err != nil {
- logger.Error("Could not connect to DB", "err", err)
+ if !errors.Is(err, context.Canceled) {
+ logger.Error("Could not connect to DB", "err", err)
+ }
return nil
}
defer db.Close()
- if borDb != nil {
- defer borDb.Close()
- }
+ defer engine.Close()
- // TODO: Replace with correct consensus Engine
- engine := ethash.NewFaker()
- apiList := jsonrpc.APIList(db, borDb, backend, txPool, mining, ff, stateCache, blockReader, agg, *cfg, engine, logger)
- if err := cli.StartRpcServer(ctx, *cfg, apiList, logger); err != nil {
+ apiList := jsonrpc.APIList(db, backend, txPool, mining, ff, stateCache, blockReader, agg, cfg, engine, logger)
+ rpc.PreAllocateRPCMetricLabels(apiList)
+ if err := cli.StartRpcServer(ctx, cfg, apiList, logger); err != nil {
logger.Error(err.Error())
return nil
}
diff --git a/cmd/rpcdaemon/rpcdaemontest/test_util.go b/cmd/rpcdaemon/rpcdaemontest/test_util.go
index 8dfe4af3f94..ecff7012c16 100644
--- a/cmd/rpcdaemon/rpcdaemontest/test_util.go
+++ b/cmd/rpcdaemon/rpcdaemontest/test_util.go
@@ -102,10 +102,10 @@ func CreateTestSentry(t *testing.T) (*mock.MockSentry, *core.ChainPack, []*core.
t.Fatal(err)
}
- if err = m.InsertChain(orphanedChain, nil); err != nil {
+ if err = m.InsertChain(orphanedChain); err != nil {
t.Fatal(err)
}
- if err = m.InsertChain(chain, nil); err != nil {
+ if err = m.InsertChain(chain); err != nil {
t.Fatal(err)
}
@@ -428,7 +428,7 @@ func CreateTestSentryForTraces(t *testing.T) *mock.MockSentry {
t.Fatalf("generate blocks: %v", err)
}
- if err := m.InsertChain(chain, nil); err != nil {
+ if err := m.InsertChain(chain); err != nil {
t.Fatalf("failed to insert into chain: %v", err)
}
return m
@@ -534,7 +534,7 @@ func CreateTestSentryForTracesCollision(t *testing.T) *mock.MockSentry {
t.Fatalf("generate blocks: %v", err)
}
// Import the canonical chain
- if err := m.InsertChain(chain, nil); err != nil {
+ if err := m.InsertChain(chain); err != nil {
t.Fatalf("failed to insert into chain: %v", err)
}
diff --git a/cmd/rpcdaemon/rpcservices/eth_backend.go b/cmd/rpcdaemon/rpcservices/eth_backend.go
index 32ed4a7f1ed..aa4f8192ee0 100644
--- a/cmd/rpcdaemon/rpcservices/eth_backend.go
+++ b/cmd/rpcdaemon/rpcservices/eth_backend.go
@@ -79,10 +79,12 @@ func (back *RemoteBackend) BlockByHash(ctx context.Context, db kv.Tx, hash commo
block, _, err := back.BlockWithSenders(ctx, db, hash, *number)
return block, err
}
-func (back *RemoteBackend) TxsV3Enabled() bool { panic("not implemented") }
-func (back *RemoteBackend) Snapshots() services.BlockSnapshots { panic("not implemented") }
-func (back *RemoteBackend) FrozenBlocks() uint64 { return back.blockReader.FrozenBlocks() }
-func (back *RemoteBackend) FrozenFiles() (list []string) { return back.blockReader.FrozenFiles() }
+func (back *RemoteBackend) TxsV3Enabled() bool { panic("not implemented") }
+func (back *RemoteBackend) Snapshots() services.BlockSnapshots { panic("not implemented") }
+func (back *RemoteBackend) BorSnapshots() services.BlockSnapshots { panic("not implemented") }
+func (back *RemoteBackend) FrozenBlocks() uint64 { return back.blockReader.FrozenBlocks() }
+func (back *RemoteBackend) FrozenBorBlocks() uint64 { return back.blockReader.FrozenBorBlocks() }
+func (back *RemoteBackend) FrozenFiles() (list []string) { return back.blockReader.FrozenFiles() }
func (back *RemoteBackend) FreezingCfg() ethconfig.BlocksFreezing {
return back.blockReader.FreezingCfg()
}
@@ -263,6 +265,15 @@ func (back *RemoteBackend) CanonicalHash(ctx context.Context, tx kv.Getter, bloc
func (back *RemoteBackend) TxnByIdxInBlock(ctx context.Context, tx kv.Getter, blockNum uint64, i int) (types.Transaction, error) {
return back.blockReader.TxnByIdxInBlock(ctx, tx, blockNum, i)
}
+func (back *RemoteBackend) EventLookup(ctx context.Context, tx kv.Getter, txnHash common.Hash) (uint64, bool, error) {
+ return back.blockReader.EventLookup(ctx, tx, txnHash)
+}
+func (back *RemoteBackend) EventsByBlock(ctx context.Context, tx kv.Tx, hash common.Hash, blockNum uint64) ([]rlp.RawValue, error) {
+ return back.blockReader.EventsByBlock(ctx, tx, hash, blockNum)
+}
+func (back *RemoteBackend) Span(ctx context.Context, tx kv.Getter, spanId uint64) ([]byte, error) {
+ return back.blockReader.Span(ctx, tx, spanId)
+}
func (back *RemoteBackend) NodeInfo(ctx context.Context, limit uint32) ([]p2p.NodeInfo, error) {
nodes, err := back.remoteEthBackend.NodeInfo(ctx, &remote.NodesInfoRequest{Limit: limit})
@@ -307,6 +318,14 @@ func (back *RemoteBackend) NodeInfo(ctx context.Context, limit uint32) ([]p2p.No
return ret, nil
}
+func (back *RemoteBackend) AddPeer(ctx context.Context, request *remote.AddPeerRequest) (*remote.AddPeerReply, error) {
+ result, err := back.remoteEthBackend.AddPeer(ctx, request)
+ if err != nil {
+ return nil, fmt.Errorf("ETHBACKENDClient.AddPeer() error: %w", err)
+ }
+ return result, nil
+}
+
func (back *RemoteBackend) Peers(ctx context.Context) ([]*p2p.PeerInfo, error) {
rpcPeers, err := back.remoteEthBackend.Peers(ctx, &emptypb.Empty{})
if err != nil {
diff --git a/cmd/rpctest/main.go b/cmd/rpctest/main.go
index cbb2060b632..7f5c8a8847d 100644
--- a/cmd/rpctest/main.go
+++ b/cmd/rpctest/main.go
@@ -8,23 +8,26 @@ import (
"syscall"
"github.com/ledgerwatch/erigon/cmd/rpctest/rpctest"
+ "github.com/ledgerwatch/erigon/turbo/logging"
"github.com/ledgerwatch/log/v3"
"github.com/spf13/cobra"
)
func main() {
log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StderrHandler))
+ logger := logging.SetupLogger("rpctest")
var (
- needCompare bool
- fullTest bool
- gethURL string
- erigonURL string
- blockFrom uint64
- blockTo uint64
- latest bool
- recordFile string
- errorFile string
+ needCompare bool
+ fullTest bool
+ gethURL string
+ erigonURL string
+ blockFrom uint64
+ blockTo uint64
+ latest bool
+ recordFile string
+ errorFile string
+ visitAllPages bool
)
withErigonUrl := func(cmd *cobra.Command) {
cmd.Flags().StringVar(&erigonURL, "erigonUrl", "http://localhost:8545", "Erigon rpcdaemon url")
@@ -48,6 +51,9 @@ func main() {
withErrorFile := func(cmd *cobra.Command) {
cmd.Flags().StringVar(&errorFile, "errorFile", "", "File where to record errors (when responses do not match)")
}
+ withVisitAllPages := func(cmd *cobra.Command) {
+ cmd.Flags().BoolVar(&visitAllPages, "visitAllPages", false, "Visit all pages")
+ }
with := func(cmd *cobra.Command, opts ...func(*cobra.Command)) {
for i := range opts {
opts[i](cmd)
@@ -59,17 +65,75 @@ func main() {
Short: "",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- rpctest.BenchEthCall(erigonURL, gethURL, needCompare, latest, blockFrom, blockTo, recordFile, errorFile)
+ err := rpctest.BenchEthCall(erigonURL, gethURL, needCompare, latest, blockFrom, blockTo, recordFile, errorFile)
+ if err != nil {
+ logger.Error(err.Error())
+ }
},
}
with(benchEthCallCmd, withErigonUrl, withGethUrl, withNeedCompare, withBlockNum, withRecord, withErrorFile, withLatest)
+ var benchEthGetBlockByHash = &cobra.Command{
+ Use: "benchEthGetBlockByHash",
+ Short: "",
+ Long: ``,
+ Run: func(cmd *cobra.Command, args []string) {
+ err := rpctest.BenchEthGetBlockByHash(erigonURL, gethURL, needCompare, latest, blockFrom, blockTo, recordFile, errorFile)
+ if err != nil {
+ logger.Error(err.Error())
+ }
+ },
+ }
+ with(benchEthGetBlockByHash, withErigonUrl, withGethUrl, withNeedCompare, withBlockNum, withRecord, withErrorFile, withLatest)
+
+ var benchEthGetBlockByNumber2Cmd = &cobra.Command{
+ Use: "benchEthGetBlockByNumber2",
+ Short: "",
+ Long: ``,
+ Run: func(cmd *cobra.Command, args []string) {
+ err := rpctest.BenchEthGetBlockByNumber2(erigonURL, gethURL, needCompare, latest, blockFrom, blockTo, recordFile, errorFile)
+ if err != nil {
+ logger.Error(err.Error())
+ }
+ },
+ }
+ with(benchEthGetBlockByNumber2Cmd, withErigonUrl, withGethUrl, withNeedCompare, withBlockNum, withRecord, withErrorFile, withLatest)
+
+ var benchEthGetTransactionByHashCmd = &cobra.Command{
+ Use: "benchEthGetTransactionByHash",
+ Short: "",
+ Long: ``,
+ Run: func(cmd *cobra.Command, args []string) {
+ err := rpctest.BenchEthGetTransactionByHash(erigonURL, gethURL, needCompare, blockFrom, blockTo, recordFile, errorFile)
+ if err != nil {
+ logger.Error(err.Error())
+ }
+ },
+ }
+ with(benchEthGetTransactionByHashCmd, withErigonUrl, withGethUrl, withNeedCompare, withBlockNum, withRecord, withErrorFile, withLatest)
+
+ var benchOtsGetBlockTransactions = &cobra.Command{
+ Use: "benchOtsGetBlockTransactions",
+ Short: "",
+ Long: ``,
+ Run: func(cmd *cobra.Command, args []string) {
+ err := rpctest.BenchOtsGetBlockTransactions(erigonURL, gethURL, needCompare, visitAllPages, latest, blockFrom, blockTo, recordFile, errorFile)
+ if err != nil {
+ logger.Error(err.Error())
+ }
+ },
+ }
+ with(benchOtsGetBlockTransactions, withErigonUrl, withGethUrl, withNeedCompare, withVisitAllPages, withBlockNum, withRecord, withErrorFile, withLatest)
+
var bench1Cmd = &cobra.Command{
Use: "bench1",
Short: "",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- rpctest.Bench1(erigonURL, gethURL, needCompare, fullTest, blockFrom, blockTo, recordFile)
+ err := rpctest.Bench1(erigonURL, gethURL, needCompare, fullTest, blockFrom, blockTo, recordFile)
+ if err != nil {
+ logger.Error(err.Error())
+ }
},
}
with(bench1Cmd, withErigonUrl, withGethUrl, withNeedCompare, withBlockNum, withRecord)
@@ -80,7 +144,10 @@ func main() {
Short: "",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- rpctest.Bench2(erigonURL)
+ err := rpctest.Bench2(erigonURL)
+ if err != nil {
+ logger.Error(err.Error())
+ }
},
}
var bench3Cmd = &cobra.Command{
@@ -88,7 +155,10 @@ func main() {
Short: "",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- rpctest.Bench3(erigonURL, gethURL)
+ err := rpctest.Bench3(erigonURL, gethURL)
+ if err != nil {
+ logger.Error(err.Error())
+ }
},
}
with(bench3Cmd, withErigonUrl, withGethUrl)
@@ -98,7 +168,10 @@ func main() {
Short: "",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- rpctest.Bench4(erigonURL)
+ err := rpctest.Bench4(erigonURL)
+ if err != nil {
+ logger.Error(err.Error())
+ }
},
}
with(bench4Cmd, withErigonUrl)
@@ -108,7 +181,10 @@ func main() {
Short: "",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- rpctest.Bench5(erigonURL)
+ err := rpctest.Bench5(erigonURL)
+ if err != nil {
+ logger.Error(err.Error())
+ }
},
}
with(bench5Cmd, withErigonUrl)
@@ -117,7 +193,10 @@ func main() {
Short: "",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- rpctest.Bench6(erigonURL)
+ err := rpctest.Bench6(erigonURL)
+ if err != nil {
+ logger.Error(err.Error())
+ }
},
}
with(bench6Cmd, withErigonUrl)
@@ -127,7 +206,10 @@ func main() {
Short: "",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- rpctest.Bench7(erigonURL, gethURL)
+ err := rpctest.Bench7(erigonURL, gethURL)
+ if err != nil {
+ logger.Error(err.Error())
+ }
},
}
with(bench7Cmd, withErigonUrl, withGethUrl)
@@ -137,7 +219,10 @@ func main() {
Short: "",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- rpctest.BenchEthGetLogs(erigonURL, gethURL, needCompare, blockFrom, blockTo, recordFile, errorFile)
+ err := rpctest.BenchEthGetLogs(erigonURL, gethURL, needCompare, blockFrom, blockTo, recordFile, errorFile)
+ if err != nil {
+ logger.Error(err.Error())
+ }
},
}
with(benchEthGetLogsCmd, withErigonUrl, withGethUrl, withNeedCompare, withBlockNum, withRecord, withErrorFile)
@@ -147,57 +232,91 @@ func main() {
Short: "",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- rpctest.Bench9(erigonURL, gethURL, needCompare)
+ err := rpctest.Bench9(erigonURL, gethURL, needCompare)
+ if err != nil {
+ logger.Error(err.Error())
+ }
},
}
with(bench9Cmd, withErigonUrl, withGethUrl, withNeedCompare)
- var benchTraceBlockByHashCmd = &cobra.Command{
- Use: "benchTraceBlockByHash",
+ var benchTraceCallCmd = &cobra.Command{
+ Use: "benchTraceCall",
Short: "",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- rpctest.BenchTraceBlockByHash(erigonURL, gethURL, needCompare, blockFrom, blockTo, recordFile, errorFile)
+ err := rpctest.BenchTraceCall(erigonURL, gethURL, needCompare, blockFrom, blockTo, recordFile, errorFile)
+ if err != nil {
+ logger.Error(err.Error())
+ }
},
}
- with(benchTraceBlockByHashCmd, withGethUrl, withErigonUrl, withNeedCompare, withBlockNum, withRecord, withErrorFile)
+ with(benchTraceCallCmd, withGethUrl, withErigonUrl, withNeedCompare, withBlockNum, withRecord, withErrorFile)
- var benchTraceTransactionCmd = &cobra.Command{
- Use: "benchTraceTransaction",
+ // debug_trace* APIs
+ var benchDebugTraceBlockByNumberCmd = &cobra.Command{
+ Use: "benchDebugTraceBlockByNumber",
Short: "",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- rpctest.BenchTraceTransaction(erigonURL, gethURL, needCompare, blockFrom, blockTo, recordFile, errorFile)
+ err := rpctest.BenchDebugTraceBlockByNumber(erigonURL, gethURL, needCompare, blockFrom, blockTo, recordFile, errorFile)
+ if err != nil {
+ logger.Error(err.Error())
+ }
},
}
- with(benchTraceTransactionCmd, withGethUrl, withErigonUrl, withNeedCompare, withBlockNum, withRecord, withErrorFile)
+ with(benchDebugTraceBlockByNumberCmd, withErigonUrl, withGethUrl, withNeedCompare, withBlockNum, withRecord, withErrorFile, withLatest)
- var benchTraceCallCmd = &cobra.Command{
- Use: "benchTraceCall",
+ var benchDebugTraceBlockByHashCmd = &cobra.Command{
+ Use: "benchDebugTraceBlockByHash",
Short: "",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- rpctest.BenchTraceCall(erigonURL, gethURL, needCompare, blockFrom, blockTo, recordFile, errorFile)
+ err := rpctest.BenchDebugTraceBlockByHash(erigonURL, gethURL, needCompare, blockFrom, blockTo, recordFile, errorFile)
+ if err != nil {
+ logger.Error(err.Error())
+ }
},
}
- with(benchTraceCallCmd, withGethUrl, withErigonUrl, withNeedCompare, withBlockNum, withRecord, withErrorFile)
+ with(benchDebugTraceBlockByHashCmd, withGethUrl, withErigonUrl, withNeedCompare, withBlockNum, withRecord, withErrorFile)
+
+ var benchDebugTraceTransactionCmd = &cobra.Command{
+ Use: "benchDebugTraceTransaction",
+ Short: "",
+ Long: ``,
+ Run: func(cmd *cobra.Command, args []string) {
+ err := rpctest.BenchDebugTraceTransaction(erigonURL, gethURL, needCompare, blockFrom, blockTo, recordFile, errorFile)
+ if err != nil {
+ logger.Error(err.Error())
+ }
+ },
+ }
+ with(benchDebugTraceTransactionCmd, withGethUrl, withErigonUrl, withNeedCompare, withBlockNum, withRecord, withErrorFile)
var benchDebugTraceCallCmd = &cobra.Command{
Use: "benchDebugTraceCall",
Short: "",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- rpctest.BenchDebugTraceCall(erigonURL, gethURL, needCompare, blockFrom, blockTo, recordFile, errorFile)
+ err := rpctest.BenchDebugTraceCall(erigonURL, gethURL, needCompare, blockFrom, blockTo, recordFile, errorFile)
+ if err != nil {
+ logger.Error(err.Error())
+ }
},
}
with(benchDebugTraceCallCmd, withGethUrl, withErigonUrl, withNeedCompare, withBlockNum, withRecord, withErrorFile)
+ // debug_trace* APIs END
+
var benchTraceCallManyCmd = &cobra.Command{
Use: "benchTraceCallMany",
Short: "",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- rpctest.BenchTraceCallMany(erigonURL, gethURL, needCompare, blockFrom, blockTo, recordFile, errorFile)
+ err := rpctest.BenchTraceCallMany(erigonURL, gethURL, needCompare, blockFrom, blockTo, recordFile, errorFile)
+ if err != nil {
+ logger.Error(err.Error())
+ }
},
}
with(benchTraceCallManyCmd, withGethUrl, withErigonUrl, withNeedCompare, withBlockNum, withRecord, withErrorFile)
@@ -207,7 +326,10 @@ func main() {
Short: "",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- rpctest.BenchTraceBlock(erigonURL, gethURL, needCompare, blockFrom, blockTo, recordFile, errorFile)
+ err := rpctest.BenchTraceBlock(erigonURL, gethURL, needCompare, blockFrom, blockTo, recordFile, errorFile)
+ if err != nil {
+ logger.Error(err.Error())
+ }
},
}
with(benchTraceBlockCmd, withGethUrl, withErigonUrl, withNeedCompare, withBlockNum, withRecord, withErrorFile)
@@ -217,7 +339,10 @@ func main() {
Short: "",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- rpctest.BenchTraceFilter(erigonURL, gethURL, needCompare, blockFrom, blockTo, recordFile, errorFile)
+ err := rpctest.BenchTraceFilter(erigonURL, gethURL, needCompare, blockFrom, blockTo, recordFile, errorFile)
+ if err != nil {
+ logger.Error(err.Error())
+ }
},
}
with(benchTraceFilterCmd, withGethUrl, withErigonUrl, withNeedCompare, withBlockNum, withRecord, withErrorFile)
@@ -227,7 +352,10 @@ func main() {
Short: "",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- rpctest.BenchTxReceipt(erigonURL, gethURL, needCompare, blockFrom, blockTo, recordFile, errorFile)
+ err := rpctest.BenchTxReceipt(erigonURL, gethURL, needCompare, blockFrom, blockTo, recordFile, errorFile)
+ if err != nil {
+ logger.Error(err.Error())
+ }
},
}
with(benchTxReceiptCmd, withGethUrl, withErigonUrl, withNeedCompare, withBlockNum, withRecord, withErrorFile)
@@ -237,7 +365,10 @@ func main() {
Short: "",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- rpctest.BenchTraceReplayTransaction(erigonURL, gethURL, needCompare, blockFrom, blockTo, recordFile, errorFile)
+ err := rpctest.BenchTraceReplayTransaction(erigonURL, gethURL, needCompare, blockFrom, blockTo, recordFile, errorFile)
+ if err != nil {
+ logger.Error(err.Error())
+ }
},
}
with(benchTraceReplayTransactionCmd, withGethUrl, withErigonUrl, withNeedCompare, withBlockNum, withRecord, withErrorFile)
@@ -247,7 +378,10 @@ func main() {
Short: "",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- rpctest.BenchEthGetBlockByNumber(erigonURL)
+ err := rpctest.BenchEthGetBlockByNumber(erigonURL)
+ if err != nil {
+ logger.Error(err.Error())
+ }
},
}
with(benchEthBlockByNumberCmd, withErigonUrl)
@@ -257,7 +391,10 @@ func main() {
Short: "",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
- rpctest.BenchEthGetBalance(erigonURL, gethURL, needCompare, blockFrom, blockTo)
+ err := rpctest.BenchEthGetBalance(erigonURL, gethURL, needCompare, blockFrom, blockTo)
+ if err != nil {
+ logger.Error(err.Error())
+ }
},
}
with(benchEthGetBalanceCmd, withErigonUrl, withGethUrl, withNeedCompare, withBlockNum)
@@ -294,7 +431,10 @@ func main() {
rootCmd.Flags().Uint64Var(&blockTo, "blockTo", 2101000, "Block number to end test generation at")
rootCmd.AddCommand(
+ benchEthGetBlockByNumber2Cmd,
+ benchEthGetBlockByHash,
benchEthCallCmd,
+ benchEthGetTransactionByHashCmd,
bench1Cmd,
bench2Cmd,
bench3Cmd,
@@ -304,17 +444,20 @@ func main() {
bench7Cmd,
benchEthGetLogsCmd,
bench9Cmd,
- benchTraceTransactionCmd,
benchTraceCallCmd,
- benchDebugTraceCallCmd,
benchTraceCallManyCmd,
benchTraceBlockCmd,
benchTraceFilterCmd,
+ benchDebugTraceBlockByNumberCmd,
+ benchDebugTraceBlockByHashCmd,
+ benchDebugTraceTransactionCmd,
+ benchDebugTraceCallCmd,
benchTxReceiptCmd,
compareAccountRange,
benchTraceReplayTransactionCmd,
benchEthBlockByNumberCmd,
benchEthGetBalanceCmd,
+ benchOtsGetBlockTransactions,
replayCmd,
)
if err := rootCmd.ExecuteContext(rootContext()); err != nil {
diff --git a/cmd/rpctest/rpctest/account_range_verify.go b/cmd/rpctest/rpctest/account_range_verify.go
index dcb89c619ae..ed1bef30ef6 100644
--- a/cmd/rpctest/rpctest/account_range_verify.go
+++ b/cmd/rpctest/rpctest/account_range_verify.go
@@ -122,11 +122,13 @@ func CompareAccountRange(logger log.Logger, erigonURL, gethURL, tmpDataDir, geth
log.Error(err.Error())
return
}
+ defer tgTx.Rollback()
gethTx, err := gethKV.BeginRo(context.Background())
if err != nil {
log.Error(err.Error())
return
}
+ defer gethTx.Rollback()
tgCursor, err := tgTx.Cursor(kv.E2AccountsHistory)
if err != nil {
log.Error(err.Error())
diff --git a/cmd/rpctest/rpctest/bench1.go b/cmd/rpctest/rpctest/bench1.go
index 400e64c19de..0577a4e84b5 100644
--- a/cmd/rpctest/rpctest/bench1.go
+++ b/cmd/rpctest/rpctest/bench1.go
@@ -24,7 +24,7 @@ var routes map[string]string
// use false value - to generate vegeta files, it's faster but we can generate vegeta files for Geth and Erigon
//
// fullTest - if false - then call only methods which RPCDaemon currently supports
-func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFrom uint64, blockTo uint64, recordFile string) {
+func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFrom uint64, blockTo uint64, recordFileName string) error {
setRoutes(erigonURL, gethURL)
var client = &http.Client{
Timeout: time.Second * 600,
@@ -44,12 +44,10 @@ func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFro
res = reqGen.Erigon("eth_blockNumber", reqGen.blockNumber(), &blockNumber)
resultsCh <- res
if res.Err != nil {
- fmt.Printf("Could not get block number: %v\n", res.Err)
- return
+ return fmt.Errorf("Could not get block number: %v\n", res.Err)
}
if blockNumber.Error != nil {
- fmt.Printf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
- return
+ return fmt.Errorf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
}
fmt.Printf("Last block: %d\n", blockNumber.Number)
accounts := make(map[libcommon.Address]struct{})
@@ -61,8 +59,7 @@ func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFro
res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b)
resultsCh <- res
if res.Err != nil {
- fmt.Printf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err)
- return
+ return fmt.Errorf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err)
}
if b.Error != nil {
@@ -73,16 +70,13 @@ func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFro
var bg EthBlockByNumber
res = reqGen.Geth("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &bg)
if res.Err != nil {
- fmt.Printf("Could not retrieve block (geth) %d: %v\n", bn, res.Err)
- return
+ return fmt.Errorf("Could not retrieve block (geth) %d: %v\n", bn, res.Err)
}
if bg.Error != nil {
- fmt.Printf("Error retrieving block (geth): %d %s\n", bg.Error.Code, bg.Error.Message)
- return
+ return fmt.Errorf("Error retrieving block (geth): %d %s\n", bg.Error.Code, bg.Error.Message)
}
if !compareBlocks(&b, &bg) {
- fmt.Printf("Block difference for %d\n", bn)
- return
+ return fmt.Errorf("Block difference for %d\n", bn)
}
}
@@ -108,12 +102,10 @@ func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFro
res = reqGen.Erigon("debug_storageRangeAt", reqGen.storageRangeAt(b.Result.Hash, i, tx.To, *nextKey), &sr)
resultsCh <- res
if res.Err != nil {
- fmt.Printf("Could not get storageRange (Erigon): %s: %v\n", tx.Hash, res.Err)
- return
+ return fmt.Errorf("Could not get storageRange (Erigon): %s: %v\n", tx.Hash, res.Err)
}
if sr.Error != nil {
- fmt.Printf("Error getting storageRange: %d %s\n", sr.Error.Code, sr.Error.Message)
- return
+ return fmt.Errorf("Error getting storageRange: %d %s\n", sr.Error.Code, sr.Error.Message)
}
for k, v := range sr.Result.Storage {
@@ -132,8 +124,7 @@ func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFro
res = reqGen.Geth("debug_storageRangeAt", reqGen.storageRangeAt(b.Result.Hash, i, tx.To, *nextKeyG), &srGeth)
resultsCh <- res
if res.Err != nil {
- fmt.Printf("Could not get storageRange (geth): %s: %v\n", tx.Hash, res.Err)
- return
+ return fmt.Errorf("Could not get storageRange (geth): %s: %v\n", tx.Hash, res.Err)
}
if srGeth.Error != nil {
fmt.Printf("Error getting storageRange (geth): %d %s\n", srGeth.Error.Code, srGeth.Error.Message)
@@ -154,7 +145,7 @@ func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFro
printStorageRange(sm)
fmt.Printf("================smg\n")
printStorageRange(smg)
- return
+ return fmt.Errorf("Storage range different\n")
}
}
}
@@ -167,11 +158,11 @@ func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFro
reqGen.reqID++
var trace EthTxTrace
- res = reqGen.Erigon("debug_traceTransaction", reqGen.traceTransaction(tx.Hash), &trace)
+ res = reqGen.Erigon("debug_traceTransaction", reqGen.debugTraceTransaction(tx.Hash), &trace)
resultsCh <- res
if res.Err != nil {
fmt.Printf("Could not trace transaction (Erigon) %s: %v\n", tx.Hash, res.Err)
- print(client, routes[Erigon], reqGen.traceTransaction(tx.Hash))
+ print(client, routes[Erigon], reqGen.debugTraceTransaction(tx.Hash))
}
if trace.Error != nil {
@@ -180,21 +171,18 @@ func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFro
if needCompare {
var traceg EthTxTrace
- res = reqGen.Geth("debug_traceTransaction", reqGen.traceTransaction(tx.Hash), &traceg)
+ res = reqGen.Geth("debug_traceTransaction", reqGen.debugTraceTransaction(tx.Hash), &traceg)
resultsCh <- res
if res.Err != nil {
- fmt.Printf("Could not trace transaction (geth) %s: %v\n", tx.Hash, res.Err)
- print(client, routes[Geth], reqGen.traceTransaction(tx.Hash))
- return
+ print(client, routes[Geth], reqGen.debugTraceTransaction(tx.Hash))
+ return fmt.Errorf("Could not trace transaction (geth) %s: %v\n", tx.Hash, res.Err)
}
if traceg.Error != nil {
- fmt.Printf("Error tracing transaction (geth): %d %s\n", traceg.Error.Code, traceg.Error.Message)
- return
+ return fmt.Errorf("Error tracing transaction (geth): %d %s\n", traceg.Error.Code, traceg.Error.Message)
}
if res.Err == nil && trace.Error == nil {
if !compareTraces(&trace, &traceg) {
- fmt.Printf("Different traces block %d, tx %s\n", bn, tx.Hash)
- return
+ return fmt.Errorf("Different traces block %d, tx %s\n", bn, tx.Hash)
}
}
}
@@ -204,32 +192,28 @@ func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFro
res = reqGen.Erigon("eth_getTransactionReceipt", reqGen.getTransactionReceipt(tx.Hash), &receipt)
resultsCh <- res
if res.Err != nil {
- fmt.Printf("Count not get receipt (Erigon): %s: %v\n", tx.Hash, res.Err)
print(client, routes[Erigon], reqGen.getTransactionReceipt(tx.Hash))
- return
+ return fmt.Errorf("Count not get receipt (Erigon): %s: %v\n", tx.Hash, res.Err)
}
if receipt.Error != nil {
- fmt.Printf("Error getting receipt (Erigon): %d %s\n", receipt.Error.Code, receipt.Error.Message)
- return
+ return fmt.Errorf("Error getting receipt (Erigon): %d %s\n", receipt.Error.Code, receipt.Error.Message)
}
if needCompare {
var receiptg EthReceipt
res = reqGen.Geth("eth_getTransactionReceipt", reqGen.getTransactionReceipt(tx.Hash), &receiptg)
resultsCh <- res
if res.Err != nil {
- fmt.Printf("Count not get receipt (geth): %s: %v\n", tx.Hash, res.Err)
print(client, routes[Geth], reqGen.getTransactionReceipt(tx.Hash))
- return
+ return fmt.Errorf("Count not get receipt (geth): %s: %v\n", tx.Hash, res.Err)
}
if receiptg.Error != nil {
- fmt.Printf("Error getting receipt (geth): %d %s\n", receiptg.Error.Code, receiptg.Error.Message)
- return
+ return fmt.Errorf("Error getting receipt (geth): %d %s\n", receiptg.Error.Code, receiptg.Error.Message)
}
if !compareReceipts(&receipt, &receiptg) {
fmt.Printf("Different receipts block %d, tx %s\n", bn, tx.Hash)
print(client, routes[Geth], reqGen.getTransactionReceipt(tx.Hash))
print(client, routes[Erigon], reqGen.getTransactionReceipt(tx.Hash))
- return
+ return fmt.Errorf("Receipts are different\n")
}
}
}
@@ -242,28 +226,23 @@ func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFro
res = reqGen.Erigon("eth_getBalance", reqGen.getBalance(b.Result.Miner, bn), &balance)
resultsCh <- res
if res.Err != nil {
- fmt.Printf("Could not get account balance (Erigon): %v\n", res.Err)
- return
+ return fmt.Errorf("Could not get account balance (Erigon): %v\n", res.Err)
}
if balance.Error != nil {
- fmt.Printf("Error getting account balance (Erigon): %d %s", balance.Error.Code, balance.Error.Message)
- return
+ return fmt.Errorf("Error getting account balance (Erigon): %d %s", balance.Error.Code, balance.Error.Message)
}
if needCompare {
var balanceg EthBalance
res = reqGen.Geth("eth_getBalance", reqGen.getBalance(b.Result.Miner, bn), &balanceg)
resultsCh <- res
if res.Err != nil {
- fmt.Printf("Could not get account balance (geth): %v\n", res.Err)
- return
+ return fmt.Errorf("Could not get account balance (geth): %v\n", res.Err)
}
if balanceg.Error != nil {
- fmt.Printf("Error getting account balance (geth): %d %s\n", balanceg.Error.Code, balanceg.Error.Message)
- return
+ return fmt.Errorf("Error getting account balance (geth): %d %s\n", balanceg.Error.Code, balanceg.Error.Message)
}
if !compareBalances(&balance, &balanceg) {
- fmt.Printf("Miner %x balance difference for block %d\n", b.Result.Miner, bn)
- return
+ return fmt.Errorf("Miner %x balance difference for block %d\n", b.Result.Miner, bn)
}
}
@@ -274,12 +253,10 @@ func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFro
res = reqGen.Erigon("debug_getModifiedAccountsByNumber", reqGen.getModifiedAccountsByNumber(prevBn, bn), &mag)
resultsCh <- res
if res.Err != nil {
- fmt.Printf("Could not get modified accounts (Erigon): %v\n", res.Err)
- return
+ return fmt.Errorf("Could not get modified accounts (Erigon): %v\n", res.Err)
}
if mag.Error != nil {
- fmt.Printf("Error getting modified accounts (Erigon): %d %s\n", mag.Error.Code, mag.Error.Message)
- return
+ return fmt.Errorf("Error getting modified accounts (Erigon): %d %s\n", mag.Error.Code, mag.Error.Message)
}
fmt.Printf("Done blocks %d-%d, modified accounts: %d\n", prevBn, bn, len(mag.Result))
@@ -298,8 +275,7 @@ func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFro
resultsCh <- res
if res.Err != nil {
- fmt.Printf("Could not get accountRange (Erigon): %v\n", res.Err)
- return
+ return fmt.Errorf("Could not get accountRange (Erigon): %v\n", res.Err)
}
if sr.Error != nil {
@@ -316,8 +292,7 @@ func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFro
res = reqGen.Geth("debug_accountRange", reqGen.accountRange(bn, pageGeth, 256), &srGeth)
resultsCh <- res
if res.Err != nil {
- fmt.Printf("Could not get accountRange geth: %v\n", res.Err)
- return
+ return fmt.Errorf("Could not get accountRange geth: %v\n", res.Err)
}
if srGeth.Error != nil {
fmt.Printf("Error getting accountRange geth: %d %s\n", srGeth.Error.Code, srGeth.Error.Message)
@@ -332,14 +307,14 @@ func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFro
fmt.Printf("Different next page keys: %x geth %x", page, pageGeth)
}
if !compareAccountRanges(accRangeErigon, accRangeGeth) {
- fmt.Printf("Different in account ranges tx\n")
- return
+ return fmt.Errorf("Different in account ranges tx\n")
}
}
}
prevBn = bn
}
}
+ return nil
}
// vegetaWrite (to be run as a goroutine) writing results of server calls into several files:
diff --git a/cmd/rpctest/rpctest/bench2.go b/cmd/rpctest/rpctest/bench2.go
index 74165c14220..7dd73402800 100644
--- a/cmd/rpctest/rpctest/bench2.go
+++ b/cmd/rpctest/rpctest/bench2.go
@@ -10,7 +10,7 @@ import (
"github.com/ledgerwatch/erigon/crypto"
)
-func Bench2(erigon_url string) {
+func Bench2(erigon_url string) error {
var client = &http.Client{
Timeout: time.Second * 600,
}
@@ -19,12 +19,10 @@ func Bench2(erigon_url string) {
blockNumTemplate := `{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":%d}`
var blockNumber EthBlockNumber
if err := post(client, erigon_url, fmt.Sprintf(blockNumTemplate, req_id), &blockNumber); err != nil {
- fmt.Printf("Could not get block number: %v\n", err)
- return
+ return fmt.Errorf("Could not get block number: %v\n", err)
}
if blockNumber.Error != nil {
- fmt.Printf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
- return
+ return fmt.Errorf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
}
lastBlock := blockNumber.Number
fmt.Printf("Last block: %d\n", lastBlock)
@@ -35,8 +33,7 @@ func Bench2(erigon_url string) {
blockByNumTemplate := `{"jsonrpc":"2.0","method":"eth_getBlockByNumber","params":["0x%x",true],"id":%d}` //nolint
var b EthBlockByNumber
if err := post(client, erigon_url, fmt.Sprintf(blockByNumTemplate, bn, req_id), &b); err != nil {
- fmt.Printf("Could not retrieve block %d: %v\n", bn, err)
- return
+ return fmt.Errorf("Could not retrieve block %d: %v\n", bn, err)
}
if b.Error != nil {
fmt.Printf("Error retrieving block: %d %s\n", b.Error.Code, b.Error.Message)
@@ -53,8 +50,7 @@ func Bench2(erigon_url string) {
for nextKey != nil {
var sr DebugStorageRange
if err := post(client, erigon_url, fmt.Sprintf(storageRangeTemplate, b.Result.Hash, i, tx.To, *nextKey, 1024, req_id), &sr); err != nil {
- fmt.Printf("Could not get storageRange: %x: %v\n", tx.Hash, err)
- return
+ return fmt.Errorf("Could not get storageRange: %x: %v\n", tx.Hash, err)
}
if sr.Error != nil {
fmt.Printf("Error getting storageRange: %d %s\n", sr.Error.Code, sr.Error.Message)
@@ -83,15 +79,14 @@ func Bench2(erigon_url string) {
accountRangeTemplate := `{"jsonrpc":"2.0","method":"debug_getModifiedAccountsByNumber","params":[%d, %d],"id":%d}` //nolint
var ma DebugModifiedAccounts
if err := post(client, erigon_url, fmt.Sprintf(accountRangeTemplate, prevBn, bn, req_id), &ma); err != nil {
- fmt.Printf("Could not get modified accounts: %v\n", err)
- return
+ return fmt.Errorf("Could not get modified accounts: %v\n", err)
}
if ma.Error != nil {
- fmt.Printf("Error getting modified accounts: %d %s\n", ma.Error.Code, ma.Error.Message)
- return
+ return fmt.Errorf("Error getting modified accounts: %d %s\n", ma.Error.Code, ma.Error.Message)
}
fmt.Printf("Done blocks %d-%d, modified accounts: %d\n", prevBn, bn, len(ma.Result))
prevBn = bn
}
}
+ return nil
}
diff --git a/cmd/rpctest/rpctest/bench3.go b/cmd/rpctest/rpctest/bench3.go
index 317700750a5..5d4c959b809 100644
--- a/cmd/rpctest/rpctest/bench3.go
+++ b/cmd/rpctest/rpctest/bench3.go
@@ -11,7 +11,7 @@ import (
"github.com/ledgerwatch/erigon/core/state"
)
-func Bench3(erigon_url, geth_url string) {
+func Bench3(erigon_url, geth_url string) error {
var client = &http.Client{
Timeout: time.Second * 600,
}
@@ -30,8 +30,7 @@ func Bench3(erigon_url, geth_url string) {
encodedKey := base64.StdEncoding.EncodeToString(page)
var sr DebugAccountRange
if err := post(client, erigon_url, fmt.Sprintf(template, encodedKey, pageSize, req_id), &sr); err != nil {
- fmt.Printf("Could not get accountRange: %v\n", err)
- return
+ return fmt.Errorf("Could not get accountRange: %v\n", err)
}
if sr.Error != nil {
fmt.Printf("Error getting accountRange: %d %s\n", sr.Error.Code, sr.Error.Message)
@@ -51,9 +50,7 @@ func Bench3(erigon_url, geth_url string) {
encodedKey := base64.StdEncoding.EncodeToString(page)
var sr DebugAccountRange
if err := post(client, geth_url, fmt.Sprintf(template, encodedKey, pageSize, req_id), &sr); err != nil {
-
- fmt.Printf("Could not get accountRange: %v\n", err)
- return
+ return fmt.Errorf("Could not get accountRange: %v\n", err)
}
if sr.Error != nil {
fmt.Printf("Error getting accountRange: %d %s\n", sr.Error.Code, sr.Error.Message)
@@ -67,16 +64,14 @@ func Bench3(erigon_url, geth_url string) {
}
if !compareAccountRanges(accRangeTG, accRangeGeth) {
- fmt.Printf("Different in account ranges tx\n")
- return
+ return fmt.Errorf("Different in account ranges tx\n")
}
fmt.Println("debug_accountRanges... OK!")
template = `{"jsonrpc":"2.0","method":"eth_getBlockByNumber","params":["0x%x",true],"id":%d}`
var b EthBlockByNumber
if err := post(client, erigon_url, fmt.Sprintf(template, 1720000, req_id), &b); err != nil {
- fmt.Printf("Could not retrieve block %d: %v\n", 1720000, err)
- return
+ return fmt.Errorf("Could not retrieve block %d: %v\n", 1720000, err)
}
if b.Error != nil {
fmt.Printf("Error retrieving block: %d %s\n", b.Error.Code, b.Error.Message)
@@ -89,27 +84,23 @@ func Bench3(erigon_url, geth_url string) {
`
var trace EthTxTrace
if err := post(client, erigon_url, fmt.Sprintf(template, txhash, req_id), &trace); err != nil {
- fmt.Printf("Could not trace transaction %s: %v\n", txhash, err)
print(client, erigon_url, fmt.Sprintf(template, txhash, req_id))
- return
+ return fmt.Errorf("Could not trace transaction %s: %v\n", txhash, err)
}
if trace.Error != nil {
fmt.Printf("Error tracing transaction: %d %s\n", trace.Error.Code, trace.Error.Message)
}
var traceg EthTxTrace
if err := post(client, geth_url, fmt.Sprintf(template, txhash, req_id), &traceg); err != nil {
- fmt.Printf("Could not trace transaction g %s: %v\n", txhash, err)
print(client, geth_url, fmt.Sprintf(template, txhash, req_id))
- return
+ return fmt.Errorf("Could not trace transaction g %s: %v\n", txhash, err)
}
if traceg.Error != nil {
- fmt.Printf("Error tracing transaction g: %d %s\n", traceg.Error.Code, traceg.Error.Message)
- return
+ return fmt.Errorf("Error tracing transaction g: %d %s\n", traceg.Error.Code, traceg.Error.Message)
}
//print(client, erigon_url, fmt.Sprintf(template, txhash, req_id))
if !compareTraces(&trace, &traceg) {
- fmt.Printf("Different traces block %d, tx %s\n", 1720000, txhash)
- return
+ return fmt.Errorf("Different traces block %d, tx %s\n", 1720000, txhash)
}
}
to := libcommon.HexToAddress("0xbb9bc244d798123fde783fcc1c72d3bb8c189413")
@@ -125,8 +116,7 @@ func Bench3(erigon_url, geth_url string) {
for nextKey != nil {
var sr DebugStorageRange
if err := post(client, erigon_url, fmt.Sprintf(template, blockhash, i, to, *nextKey, 1024, req_id), &sr); err != nil {
- fmt.Printf("Could not get storageRange: %v\n", err)
- return
+ return fmt.Errorf("Could not get storageRange: %v\n", err)
}
if sr.Error != nil {
fmt.Printf("Error getting storageRange: %d %s\n", sr.Error.Code, sr.Error.Message)
@@ -144,8 +134,7 @@ func Bench3(erigon_url, geth_url string) {
for nextKey != nil {
var srg DebugStorageRange
if err := post(client, geth_url, fmt.Sprintf(template, blockhash, i, to, *nextKey, 1024, req_id), &srg); err != nil {
- fmt.Printf("Could not get storageRange g: %v\n", err)
- return
+ return fmt.Errorf("Could not get storageRange g: %v\n", err)
}
if srg.Error != nil {
fmt.Printf("Error getting storageRange g: %d %s\n", srg.Error.Code, srg.Error.Message)
@@ -159,8 +148,9 @@ func Bench3(erigon_url, geth_url string) {
}
fmt.Printf("storageRange g: %d\n", len(smg))
if !compareStorageRanges(sm, smg) {
- fmt.Printf("Different in storage ranges tx\n")
- return
+ return fmt.Errorf("Different in storage ranges tx\n")
}
+ return nil
+
}
diff --git a/cmd/rpctest/rpctest/bench4.go b/cmd/rpctest/rpctest/bench4.go
index 1d403049185..347490ce5a5 100644
--- a/cmd/rpctest/rpctest/bench4.go
+++ b/cmd/rpctest/rpctest/bench4.go
@@ -8,7 +8,7 @@ import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
)
-func Bench4(erigon_url string) {
+func Bench4(erigon_url string) error {
var client = &http.Client{
Timeout: time.Second * 600,
}
@@ -18,8 +18,7 @@ func Bench4(erigon_url string) {
template := `{"jsonrpc":"2.0","method":"eth_getBlockByNumber","params":["0x%x",true],"id":%d}`
var b EthBlockByNumber
if err := post(client, erigon_url, fmt.Sprintf(template, 1720000, req_id), &b); err != nil {
- fmt.Printf("Could not retrieve block %d: %v\n", 1720000, err)
- return
+ return fmt.Errorf("Could not retrieve block %d: %v\n", 1720000, err)
}
if b.Error != nil {
fmt.Printf("Error retrieving block: %d %s\n", b.Error.Code, b.Error.Message)
@@ -30,9 +29,8 @@ func Bench4(erigon_url string) {
template = `{"jsonrpc":"2.0","method":"debug_traceTransaction","params":["%s"],"id":%d}`
var trace EthTxTrace
if err := post(client, erigon_url, fmt.Sprintf(template, txhash, req_id), &trace); err != nil {
- fmt.Printf("Could not trace transaction %s: %v\n", txhash, err)
print(client, erigon_url, fmt.Sprintf(template, txhash, req_id))
- return
+ return fmt.Errorf("Could not trace transaction %s: %v\n", txhash, err)
}
if trace.Error != nil {
fmt.Printf("Error tracing transaction: %d %s\n", trace.Error.Code, trace.Error.Message)
@@ -50,8 +48,7 @@ func Bench4(erigon_url string) {
for nextKey != nil {
var sr DebugStorageRange
if err := post(client, erigon_url, fmt.Sprintf(template, blockhash, i, to, *nextKey, 1024, req_id), &sr); err != nil {
- fmt.Printf("Could not get storageRange: %v\n", err)
- return
+ return fmt.Errorf("Could not get storageRange: %v\n", err)
}
if sr.Error != nil {
fmt.Printf("Error getting storageRange: %d %s\n", sr.Error.Code, sr.Error.Message)
@@ -64,4 +61,5 @@ func Bench4(erigon_url string) {
}
}
fmt.Printf("storageRange: %d\n", len(sm))
+ return nil
}
diff --git a/cmd/rpctest/rpctest/bench5.go b/cmd/rpctest/rpctest/bench5.go
index 2104bc9d6d2..4066df310dc 100644
--- a/cmd/rpctest/rpctest/bench5.go
+++ b/cmd/rpctest/rpctest/bench5.go
@@ -8,7 +8,7 @@ import (
"time"
)
-func Bench5(erigonURL string) {
+func Bench5(erigonURL string) error {
var client = &http.Client{
Timeout: time.Second * 600,
}
@@ -24,16 +24,15 @@ func Bench5(erigonURL string) {
for scanner.Scan() {
req_id++
if err = post(client, erigonURL, fmt.Sprintf(template, scanner.Text(), req_id), &receipt); err != nil {
- fmt.Printf("Count not get receipt: %s: %v\n", scanner.Text(), err)
- return
+ return fmt.Errorf("Count not get receipt: %s: %v\n", scanner.Text(), err)
}
if receipt.Error != nil {
- fmt.Printf("Error getting receipt: %d %s\n", receipt.Error.Code, receipt.Error.Message)
- return
+ return fmt.Errorf("Error getting receipt: %d %s\n", receipt.Error.Code, receipt.Error.Message)
}
}
err = scanner.Err()
if err != nil {
panic(err)
}
+ return nil
}
diff --git a/cmd/rpctest/rpctest/bench6.go b/cmd/rpctest/rpctest/bench6.go
index 825c8b5ebbc..f9fcc3c57ae 100644
--- a/cmd/rpctest/rpctest/bench6.go
+++ b/cmd/rpctest/rpctest/bench6.go
@@ -8,7 +8,7 @@ import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
)
-func Bench6(erigon_url string) {
+func Bench6(erigon_url string) error {
var client = &http.Client{
Timeout: time.Second * 600,
}
@@ -20,12 +20,10 @@ func Bench6(erigon_url string) {
`
var blockNumber EthBlockNumber
if err := post(client, erigon_url, fmt.Sprintf(template, req_id), &blockNumber); err != nil {
- fmt.Printf("Could not get block number: %v\n", err)
- return
+ return fmt.Errorf("Could not get block number: %v\n", err)
}
if blockNumber.Error != nil {
- fmt.Printf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
- return
+ return fmt.Errorf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
}
lastBlock := blockNumber.Number
fmt.Printf("Last block: %d\n", lastBlock)
@@ -38,8 +36,7 @@ func Bench6(erigon_url string) {
`
var b EthBlockByNumber
if err := post(client, erigon_url, fmt.Sprintf(template, bn, req_id), &b); err != nil {
- fmt.Printf("Could not retrieve block %d: %v\n", bn, err)
- return
+ return fmt.Errorf("Could not retrieve block %d: %v\n", bn, err)
}
if b.Error != nil {
fmt.Printf("Error retrieving block: %d %s\n", b.Error.Code, b.Error.Message)
@@ -56,14 +53,13 @@ func Bench6(erigon_url string) {
`
var receipt EthReceipt
if err := post(client, erigon_url, fmt.Sprintf(template, tx.Hash, req_id), &receipt); err != nil {
- fmt.Printf("Count not get receipt: %s: %v\n", tx.Hash, err)
print(client, erigon_url, fmt.Sprintf(template, tx.Hash, req_id))
- return
+ return fmt.Errorf("Count not get receipt: %s: %v\n", tx.Hash, err)
}
if receipt.Error != nil {
- fmt.Printf("Error getting receipt: %d %s\n", receipt.Error.Code, receipt.Error.Message)
- return
+ return fmt.Errorf("Error getting receipt: %d %s\n", receipt.Error.Code, receipt.Error.Message)
}
}
}
+ return nil
}
diff --git a/cmd/rpctest/rpctest/bench7.go b/cmd/rpctest/rpctest/bench7.go
index 29c5e49a7e8..2a6dd6b49d5 100644
--- a/cmd/rpctest/rpctest/bench7.go
+++ b/cmd/rpctest/rpctest/bench7.go
@@ -8,7 +8,7 @@ import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
)
-func Bench7(erigonURL, gethURL string) {
+func Bench7(erigonURL, gethURL string) error {
setRoutes(erigonURL, gethURL)
var client = &http.Client{
Timeout: time.Second * 600,
@@ -34,8 +34,7 @@ func Bench7(erigonURL, gethURL string) {
for nextKey != nil {
var sr DebugStorageRange
if err := post(client, erigonURL, fmt.Sprintf(template, blockhash, i, to, *nextKey, 1024, reqID), &sr); err != nil {
- fmt.Printf("Could not get storageRange: %v\n", err)
- return
+ return fmt.Errorf("Could not get storageRange: %v\n", err)
}
if sr.Error != nil {
fmt.Printf("Error getting storageRange: %d %s\n", sr.Error.Code, sr.Error.Message)
@@ -54,8 +53,7 @@ func Bench7(erigonURL, gethURL string) {
for nextKeyG != nil {
var srg DebugStorageRange
if err := post(client, gethURL, fmt.Sprintf(template, blockhash, i, to, *nextKeyG, 1024, reqID), &srg); err != nil {
- fmt.Printf("Could not get storageRange: %v\n", err)
- return
+ return fmt.Errorf("Could not get storageRange: %v\n", err)
}
if srg.Error != nil {
fmt.Printf("Error getting storageRange: %d %s\n", srg.Error.Code, srg.Error.Message)
@@ -76,7 +74,8 @@ func Bench7(erigonURL, gethURL string) {
printStorageRange(sm)
fmt.Printf("================smg\n")
printStorageRange(smg)
- return
+ return fmt.Errorf("storage are different")
}
fmt.Printf("storageRanges: %d\n", len(sm))
+ return nil
}
diff --git a/cmd/rpctest/rpctest/bench9.go b/cmd/rpctest/rpctest/bench9.go
index 7b2a4266ee4..9c06fef9697 100644
--- a/cmd/rpctest/rpctest/bench9.go
+++ b/cmd/rpctest/rpctest/bench9.go
@@ -11,7 +11,7 @@ import (
)
// bench9 tests eth_getProof
-func Bench9(erigonURL, gethURL string, needCompare bool) {
+func Bench9(erigonURL, gethURL string, needCompare bool) error {
setRoutes(erigonURL, gethURL)
var client = &http.Client{
Timeout: time.Second * 600,
@@ -26,12 +26,10 @@ func Bench9(erigonURL, gethURL string, needCompare bool) {
var blockNumber EthBlockNumber
res = reqGen.Erigon("eth_blockNumber", reqGen.blockNumber(), &blockNumber)
if res.Err != nil {
- fmt.Printf("Could not get block number: %v\n", res.Err)
- return
+ return fmt.Errorf("Could not get block number: %v\n", res.Err)
}
if blockNumber.Error != nil {
- fmt.Printf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
- return
+ return fmt.Errorf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
}
lastBlock := blockNumber.Number
fmt.Printf("Last block: %d\n", lastBlock)
@@ -46,8 +44,7 @@ func Bench9(erigonURL, gethURL string, needCompare bool) {
res = reqGen.Erigon("debug_accountRange", reqGen.accountRange(bn, page, 256), &sr)
if res.Err != nil {
- fmt.Printf("Could not get accountRange (Erigon): %v\n", res.Err)
- return
+ return fmt.Errorf("Could not get accountRange (Erigon): %v\n", res.Err)
}
if sr.Error != nil {
@@ -74,8 +71,7 @@ func Bench9(erigonURL, gethURL string, needCompare bool) {
}
res = reqGen.Erigon("eth_getProof", reqGen.getProof(bn, address, storageList), &proof)
if res.Err != nil {
- fmt.Printf("Could not get getProof (Erigon): %v\n", res.Err)
- return
+ return fmt.Errorf("Could not get getProof (Erigon): %v\n", res.Err)
}
if proof.Error != nil {
fmt.Printf("Error getting getProof (Erigon): %d %s\n", proof.Error.Code, proof.Error.Message)
@@ -86,8 +82,7 @@ func Bench9(erigonURL, gethURL string, needCompare bool) {
reqGen.reqID++
res = reqGen.Geth("eth_getProof", reqGen.getProof(bn, address, storageList), &gethProof)
if res.Err != nil {
- fmt.Printf("Could not get getProof (geth): %v\n", res.Err)
- return
+ return fmt.Errorf("Could not get getProof (geth): %v\n", res.Err)
}
if gethProof.Error != nil {
fmt.Printf("Error getting getProof (geth): %d %s\n", gethProof.Error.Code, gethProof.Error.Message)
@@ -100,4 +95,5 @@ func Bench9(erigonURL, gethURL string, needCompare bool) {
}
}
}
+ return nil
}
diff --git a/cmd/rpctest/rpctest/bench_blockbynumber.go b/cmd/rpctest/rpctest/bench_blockbynumber.go
index 9ba4a06177f..9f016a1410c 100644
--- a/cmd/rpctest/rpctest/bench_blockbynumber.go
+++ b/cmd/rpctest/rpctest/bench_blockbynumber.go
@@ -7,7 +7,7 @@ import (
)
// BenchEthGetBlockByNumber generates lots of requests for eth_getBlockByNumber to attempt to reproduce issue where empty results are being returned
-func BenchEthGetBlockByNumber(erigonURL string) {
+func BenchEthGetBlockByNumber(erigonURL string) error {
setRoutes(erigonURL, erigonURL)
var client = &http.Client{
Timeout: time.Second * 600,
@@ -20,44 +20,37 @@ func BenchEthGetBlockByNumber(erigonURL string) {
var blockNumber EthBlockNumber
res = reqGen.Erigon("eth_blockNumber", reqGen.blockNumber(), &blockNumber)
if res.Err != nil {
- fmt.Printf("Could not get block number: %v\n", res.Err)
- return
+ return fmt.Errorf("Could not get block number: %v\n", res.Err)
}
if blockNumber.Error != nil {
- fmt.Printf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
- return
+ return fmt.Errorf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
}
fmt.Printf("Last block: %d\n", blockNumber.Number)
for bn := uint64(0); bn <= uint64(blockNumber.Number)/2; bn++ {
reqGen.reqID++
res = reqGen.Erigon2("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, false /* withTxs */))
if res.Err != nil {
- fmt.Printf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err)
- return
+ return fmt.Errorf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err)
}
if errVal := res.Result.Get("error"); errVal != nil {
- fmt.Printf("error: %d %s", errVal.GetInt("code"), errVal.GetStringBytes("message"))
- return
+ return fmt.Errorf("error: %d %s", errVal.GetInt("code"), errVal.GetStringBytes("message"))
}
if res.Result.Get("result") == nil || res.Result.Get("result").Get("number") == nil {
- fmt.Printf("empty result: %s\n", res.Response)
- return
+ return fmt.Errorf("empty result: %s\n", res.Response)
}
reqGen.reqID++
bn1 := uint64(blockNumber.Number) - bn
res = reqGen.Erigon2("eth_getBlockByNumber", reqGen.getBlockByNumber(bn1, false /* withTxs */))
if res.Err != nil {
- fmt.Printf("Could not retrieve block (Erigon) %d: %v\n", bn1, res.Err)
- return
+ return fmt.Errorf("Could not retrieve block (Erigon) %d: %v\n", bn1, res.Err)
}
if errVal := res.Result.Get("error"); errVal != nil {
- fmt.Printf("error: %d %s", errVal.GetInt("code"), errVal.GetStringBytes("message"))
- return
+ return fmt.Errorf("error: %d %s", errVal.GetInt("code"), errVal.GetStringBytes("message"))
}
if res.Result.Get("result") == nil || res.Result.Get("result").Get("number") == nil {
- fmt.Printf("empty result: %s\n", res.Response)
- return
+ return fmt.Errorf("empty result: %s\n", res.Response)
}
}
+ return nil
}
diff --git a/cmd/rpctest/rpctest/bench_debugTrace.go b/cmd/rpctest/rpctest/bench_debugTrace.go
new file mode 100644
index 00000000000..e73202eb535
--- /dev/null
+++ b/cmd/rpctest/rpctest/bench_debugTrace.go
@@ -0,0 +1,319 @@
+package rpctest
+
+import (
+ "bufio"
+ "fmt"
+ "net/http"
+ "os"
+ "time"
+)
+
+func BenchDebugTraceBlockByNumber(erigonUrl, gethUrl string, needCompare bool, blockFrom uint64, blockTo uint64, recordFileName string, errorFileName string) error {
+ setRoutes(erigonUrl, gethUrl)
+ var client = &http.Client{
+ Timeout: time.Second * 600,
+ }
+
+ var rec *bufio.Writer
+ if recordFileName != "" {
+ f, err := os.Create(recordFileName)
+ if err != nil {
+ return fmt.Errorf("Cannot create file %s for recording: %v\n", recordFileName, err)
+ }
+ defer f.Close()
+ rec = bufio.NewWriter(f)
+ defer rec.Flush()
+ }
+ var errs *bufio.Writer
+ if errorFileName != "" {
+ ferr, err := os.Create(errorFileName)
+ if err != nil {
+ return fmt.Errorf("Cannot create file %s for error output: %v\n", errorFileName, err)
+ }
+ defer ferr.Close()
+ errs = bufio.NewWriter(ferr)
+ defer errs.Flush()
+ }
+
+ var resultsCh chan CallResult = nil
+ if !needCompare {
+ resultsCh = make(chan CallResult, 1000)
+ defer close(resultsCh)
+ go vegetaWrite(true, []string{"debug_traceBlockByNumber"}, resultsCh)
+ }
+
+ reqGen := &RequestGenerator{
+ client: client,
+ }
+
+ var nBlocks = 0
+ for bn := blockFrom; bn < blockTo; bn++ {
+ nBlocks++
+ reqGen.reqID++
+ request := reqGen.debugTraceBlockByNumber(bn)
+ errCtx := fmt.Sprintf("block %d", bn)
+ if err := requestAndCompare(request, "debug_traceBlockByNumber", errCtx, reqGen, needCompare, rec, errs, resultsCh /* insertOnlyIfSuccess */, false); err != nil {
+ return err
+ }
+ }
+ fmt.Println("\nProcessed Blocks: ", nBlocks)
+
+ return nil
+}
+
+func BenchDebugTraceBlockByHash(erigonUrl, gethUrl string, needCompare bool, blockFrom uint64, blockTo uint64, recordFile string, errorFile string) error {
+ setRoutes(erigonUrl, gethUrl)
+ var client = &http.Client{
+ Timeout: time.Second * 600,
+ }
+
+ var rec *bufio.Writer
+ if recordFile != "" {
+ f, err := os.Create(recordFile)
+ if err != nil {
+ return fmt.Errorf("Cannot create file %s for recording: %v\n", recordFile, err)
+ }
+ defer f.Close()
+ rec = bufio.NewWriter(f)
+ defer rec.Flush()
+ }
+ var errs *bufio.Writer
+ if errorFile != "" {
+ ferr, err := os.Create(errorFile)
+ if err != nil {
+ return fmt.Errorf("Cannot create file %s for error output: %v\n", errorFile, err)
+ }
+ defer ferr.Close()
+ errs = bufio.NewWriter(ferr)
+ defer errs.Flush()
+ }
+
+ var resultsCh chan CallResult = nil
+ if !needCompare {
+ resultsCh = make(chan CallResult, 1000)
+ defer close(resultsCh)
+ go vegetaWrite(true, []string{"debug_traceBlockByHash"}, resultsCh)
+ }
+
+ reqGen := &RequestGenerator{
+ client: client,
+ }
+
+ reqGen.reqID++
+
+ var res CallResult
+ var nBlocks = 0
+ for bn := blockFrom; bn < blockTo; bn++ {
+ var b EthBlockByNumber
+ res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b)
+ if res.Err != nil {
+ return fmt.Errorf("retrieve block (Erigon) %d: %v", blockFrom, res.Err)
+ }
+ if b.Error != nil {
+ return fmt.Errorf("retrieving block (Erigon): %d %s", b.Error.Code, b.Error.Message)
+ }
+
+ nBlocks++
+ reqGen.reqID++
+ request := reqGen.traceBlockByHash(b.Result.Hash.Hex())
+ errCtx := fmt.Sprintf("block %d, tx %s", bn, b.Result.Hash.Hex())
+ if err := requestAndCompare(request, "debug_traceBlockByHash", errCtx, reqGen, needCompare, rec, errs, resultsCh,
+ /* insertOnlyIfSuccess */ false); err != nil {
+ fmt.Println(err)
+ return err
+ }
+ }
+ fmt.Println("\nProcessed Blocks: ", nBlocks)
+
+ return nil
+}
+
+func BenchDebugTraceTransaction(erigonUrl, gethUrl string, needCompare bool, blockFrom uint64, blockTo uint64, recordFileName string, errorFileName string) error {
+ setRoutes(erigonUrl, gethUrl)
+ var client = &http.Client{
+ Timeout: time.Second * 600,
+ }
+
+ var rec *bufio.Writer
+ if recordFileName != "" {
+ f, err := os.Create(recordFileName)
+ if err != nil {
+ return fmt.Errorf("Cannot create file %s for recording: %v\n", recordFileName, err)
+ }
+ defer f.Close()
+ rec = bufio.NewWriter(f)
+ defer rec.Flush()
+ }
+ var errs *bufio.Writer
+ if errorFileName != "" {
+ ferr, err := os.Create(errorFileName)
+ if err != nil {
+ return fmt.Errorf("Cannot create file %s for error output: %v\n", errorFileName, err)
+ }
+ defer ferr.Close()
+ errs = bufio.NewWriter(ferr)
+ defer errs.Flush()
+ }
+
+ var resultsCh chan CallResult = nil
+ if !needCompare {
+ resultsCh = make(chan CallResult, 1000)
+ defer close(resultsCh)
+ go vegetaWrite(true, []string{"debug_traceTransaction"}, resultsCh)
+ }
+
+ reqGen := &RequestGenerator{
+ client: client,
+ }
+
+ var res CallResult
+ var nBlocks = 0
+ var nTransactions = 0
+ for bn := blockFrom; bn < blockTo; bn++ {
+ nBlocks++
+ reqGen.reqID++
+ var erigonBlock EthBlockByNumber
+ res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &erigonBlock)
+ if res.Err != nil {
+ return fmt.Errorf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err)
+ }
+
+ if erigonBlock.Error != nil {
+ return fmt.Errorf("Error retrieving block (Erigon): %d %s\n", erigonBlock.Error.Code, erigonBlock.Error.Message)
+ }
+
+ if needCompare {
+ var otherBlock EthBlockByNumber
+ res = reqGen.Geth("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &otherBlock)
+ if res.Err != nil {
+ return fmt.Errorf("Could not retrieve block (geth) %d: %v\n", bn, res.Err)
+ }
+ if otherBlock.Error != nil {
+ return fmt.Errorf("Error retrieving block (geth): %d %s\n", otherBlock.Error.Code, otherBlock.Error.Message)
+ }
+ if !compareBlocks(&erigonBlock, &otherBlock) {
+ if rec != nil {
+ fmt.Fprintf(rec, "Block difference for block=%d\n", bn)
+ rec.Flush()
+ continue
+ } else {
+ return fmt.Errorf("block %d has different fields\n", bn)
+ }
+ }
+ }
+
+ for _, tx := range erigonBlock.Result.Transactions {
+ reqGen.reqID++
+ nTransactions++
+
+ var request string
+ request = reqGen.debugTraceTransaction(tx.Hash)
+ errCtx := fmt.Sprintf("bn=%d hash=%s", bn, tx.Hash)
+
+ if err := requestAndCompare(request, "debug_traceTransaction", errCtx, reqGen, needCompare, rec, errs, resultsCh,
+ /* insertOnlyIfSuccess */ false); err != nil {
+ return err
+ }
+ }
+ }
+ fmt.Println("\nProcessed Blocks: ", nBlocks, ", Transactions", nTransactions)
+
+ return nil
+}
+
+func BenchDebugTraceCall(erigonURL, gethURL string, needCompare bool, blockFrom uint64, blockTo uint64, recordFile string, errorFile string) error {
+ setRoutes(erigonURL, gethURL)
+ var client = &http.Client{
+ Timeout: time.Second * 600,
+ }
+ var rec *bufio.Writer
+ if recordFile != "" {
+ f, err := os.Create(recordFile)
+ if err != nil {
+ return fmt.Errorf("Cannot create file %s for recording: %v\n", recordFile, err)
+ }
+ defer f.Close()
+ rec = bufio.NewWriter(f)
+ defer rec.Flush()
+ }
+ var errs *bufio.Writer
+ if errorFile != "" {
+ ferr, err := os.Create(errorFile)
+ if err != nil {
+ return fmt.Errorf("Cannot create file %s for error output: %v\n", errorFile, err)
+ }
+ defer ferr.Close()
+ errs = bufio.NewWriter(ferr)
+ defer errs.Flush()
+ }
+
+ var resultsCh chan CallResult = nil
+ if !needCompare {
+ resultsCh = make(chan CallResult, 1000)
+ defer close(resultsCh)
+ go vegetaWrite(true, []string{"debug_traceCall"}, resultsCh)
+ }
+
+ reqGen := &RequestGenerator{
+ client: client,
+ }
+
+ var res CallResult
+
+ reqGen.reqID++
+ var blockNumber EthBlockNumber
+ res = reqGen.Erigon("eth_blockNumber", reqGen.blockNumber(), &blockNumber)
+ if res.Err != nil {
+ return fmt.Errorf("Could not get block number: %v\n", res.Err)
+ }
+ if blockNumber.Error != nil {
+ return fmt.Errorf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
+ }
+ fmt.Printf("Last block: %d\n", blockNumber.Number)
+
+ var nBlocks = 0
+ var nTransactions = 0
+ for bn := blockFrom; bn <= blockTo; bn++ {
+ reqGen.reqID++
+ var b EthBlockByNumber
+ res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b)
+ if res.Err != nil {
+ return fmt.Errorf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err)
+ }
+
+ if b.Error != nil {
+ return fmt.Errorf("Error retrieving block (Erigon): %d %s\n", b.Error.Code, b.Error.Message)
+ }
+
+ if needCompare {
+ var bg EthBlockByNumber
+ res = reqGen.Geth("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &bg)
+ if res.Err != nil {
+ return fmt.Errorf("Could not retrieve block (geth) %d: %v\n", bn, res.Err)
+ }
+ if bg.Error != nil {
+ return fmt.Errorf("Error retrieving block (geth): %d %s\n", bg.Error.Code, bg.Error.Message)
+ }
+ if !compareBlocks(&b, &bg) {
+ return fmt.Errorf("Block difference for %d\n", bn)
+ }
+ }
+ nBlocks++
+
+ for _, tx := range b.Result.Transactions {
+ nTransactions++
+ reqGen.reqID++
+
+ request := reqGen.debugTraceCall(tx.From, tx.To, &tx.Gas, &tx.GasPrice, &tx.Value, tx.Input, bn-1)
+ errCtx := fmt.Sprintf("block %d tx %s", bn, tx.Hash)
+ if err := requestAndCompare(request, "debug_traceCall", errCtx, reqGen, needCompare, rec, errs, resultsCh,
+ /* insertOnlyIfSuccess*/ false); err != nil {
+ fmt.Println(err)
+ return err
+ }
+ }
+ }
+ fmt.Println("\nProcessed Blocks: ", nBlocks, ", Transactions", nTransactions)
+
+ return nil
+}
diff --git a/cmd/rpctest/rpctest/bench_debugtracecall.go b/cmd/rpctest/rpctest/bench_debugtracecall.go
deleted file mode 100644
index 4492da33f7b..00000000000
--- a/cmd/rpctest/rpctest/bench_debugtracecall.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package rpctest
-
-import (
- "bufio"
- "fmt"
- "net/http"
- "os"
- "time"
-)
-
-// bench12 compares response of Erigon with Geth
-// but also can be used for comparing RPCDaemon with Geth
-// parameters:
-// needCompare - if false - doesn't call Erigon and doesn't compare responses
-func BenchDebugTraceCall(erigonURL, gethURL string, needCompare bool, blockFrom uint64, blockTo uint64, recordFile string, errorFile string) {
- setRoutes(erigonURL, gethURL)
- var client = &http.Client{
- Timeout: time.Second * 600,
- }
- var rec *bufio.Writer
- if recordFile != "" {
- f, err := os.Create(recordFile)
- if err != nil {
- fmt.Printf("Cannot create file %s for recording: %v\n", recordFile, err)
- return
- }
- defer f.Close()
- rec = bufio.NewWriter(f)
- defer rec.Flush()
- }
- var errs *bufio.Writer
- if errorFile != "" {
- ferr, err := os.Create(errorFile)
- if err != nil {
- fmt.Printf("Cannot create file %s for error output: %v\n", errorFile, err)
- return
- }
- defer ferr.Close()
- errs = bufio.NewWriter(ferr)
- defer errs.Flush()
- }
-
- var res CallResult
- reqGen := &RequestGenerator{
- client: client,
- }
-
- reqGen.reqID++
- var blockNumber EthBlockNumber
- res = reqGen.Erigon("eth_blockNumber", reqGen.blockNumber(), &blockNumber)
- if res.Err != nil {
- fmt.Printf("Could not get block number: %v\n", res.Err)
- return
- }
- if blockNumber.Error != nil {
- fmt.Printf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
- return
- }
- fmt.Printf("Last block: %d\n", blockNumber.Number)
- for bn := blockFrom; bn <= blockTo; bn++ {
- reqGen.reqID++
- var b EthBlockByNumber
- res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b)
- if res.Err != nil {
- fmt.Printf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err)
- return
- }
-
- if b.Error != nil {
- fmt.Printf("Error retrieving block (Erigon): %d %s\n", b.Error.Code, b.Error.Message)
- return
- }
-
- if needCompare {
- var bg EthBlockByNumber
- res = reqGen.Geth("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &bg)
- if res.Err != nil {
- fmt.Printf("Could not retrieve block (geth) %d: %v\n", bn, res.Err)
- return
- }
- if bg.Error != nil {
- fmt.Printf("Error retrieving block (geth): %d %s\n", bg.Error.Code, bg.Error.Message)
- return
- }
- if !compareBlocks(&b, &bg) {
- fmt.Printf("Block difference for %d\n", bn)
- return
- }
- }
-
- for _, tx := range b.Result.Transactions {
- reqGen.reqID++
-
- request := reqGen.debugTraceCall(tx.From, tx.To, &tx.Gas, &tx.GasPrice, &tx.Value, tx.Input, bn-1)
- errCtx := fmt.Sprintf("block %d tx %s", bn, tx.Hash)
- if err := requestAndCompare(request, "debug_traceCall", errCtx, reqGen, needCompare, rec, errs, nil); err != nil {
- fmt.Println(err)
- return
- }
- }
- }
-}
diff --git a/cmd/rpctest/rpctest/bench_ethcall.go b/cmd/rpctest/rpctest/bench_ethcall.go
index 6d2e0354515..206792a4bd3 100644
--- a/cmd/rpctest/rpctest/bench_ethcall.go
+++ b/cmd/rpctest/rpctest/bench_ethcall.go
@@ -16,7 +16,7 @@ import (
// false value - to generate vegeta files, it's faster but we can generate vegeta files for Geth and Erigon
// recordFile stores all eth_call returned with success
// errorFile stores information when erigon and geth doesn't return same data
-func BenchEthCall(erigonURL, gethURL string, needCompare, latest bool, blockFrom, blockTo uint64, recordFile string, errorFile string) {
+func BenchEthCall(erigonURL, gethURL string, needCompare, latest bool, blockFrom, blockTo uint64, recordFileName string, errorFileName string) error {
setRoutes(erigonURL, gethURL)
var client = &http.Client{
Timeout: time.Second * 600,
@@ -27,22 +27,20 @@ func BenchEthCall(erigonURL, gethURL string, needCompare, latest bool, blockFrom
var resultsCh chan CallResult = nil
var nTransactions = 0
- if errorFile != "" {
- f, err := os.Create(errorFile)
+ if errorFileName != "" {
+ f, err := os.Create(errorFileName)
if err != nil {
- fmt.Printf("Cannot create file %s for errorFile: %v\n", errorFile, err)
- return
+ return fmt.Errorf("Cannot create file %s for errorFile: %v\n", errorFileName, err)
}
defer f.Close()
errs = bufio.NewWriter(f)
defer errs.Flush()
}
- if recordFile != "" {
- frec, errRec := os.Create(recordFile)
+ if recordFileName != "" {
+ frec, errRec := os.Create(recordFileName)
if errRec != nil {
- fmt.Printf("Cannot create file %s for errorFile: %v\n", recordFile, errRec)
- return
+ return fmt.Errorf("Cannot create file %s for errorFile: %v\n", recordFileName, errRec)
}
defer frec.Close()
rec = bufio.NewWriter(frec)
@@ -67,34 +65,29 @@ func BenchEthCall(erigonURL, gethURL string, needCompare, latest bool, blockFrom
var b EthBlockByNumber
res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b)
if res.Err != nil {
- fmt.Printf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err)
- return
+ return fmt.Errorf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err)
}
if b.Error != nil {
- fmt.Printf("Error retrieving block (Erigon): %d %s\n", b.Error.Code, b.Error.Message)
- return
+ return fmt.Errorf("Error retrieving block (Erigon): %d %s\n", b.Error.Code, b.Error.Message)
}
if needCompare {
var bg EthBlockByNumber
res = reqGen.Geth("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &bg)
if res.Err != nil {
- fmt.Printf("Could not retrieve block (geth) %d: %v\n", bn, res.Err)
- return
+ return fmt.Errorf("Could not retrieve block (geth) %d: %v\n", bn, res.Err)
}
if bg.Error != nil {
- fmt.Printf("Error retrieving block (geth): %d %s\n", bg.Error.Code, bg.Error.Message)
- return
+ return fmt.Errorf("Error retrieving block (geth): %d %s\n", bg.Error.Code, bg.Error.Message)
}
if !compareBlocks(&b, &bg) {
- fmt.Printf("Block difference for %d\n", bn)
if rec != nil {
fmt.Fprintf(rec, "Block difference for block=%d\n", bn)
rec.Flush()
continue
} else {
- return
+ return fmt.Errorf("Block one or more fields areis different for block %d\n", bn)
}
}
}
@@ -105,19 +98,23 @@ func BenchEthCall(erigonURL, gethURL string, needCompare, latest bool, blockFrom
nTransactions = nTransactions + 1
var request string
+ var insertedOnlyIfSuccess bool
if latest {
request = reqGen.ethCallLatest(tx.From, tx.To, &tx.Gas, &tx.GasPrice, &tx.Value, tx.Input)
+ insertedOnlyIfSuccess = true
} else {
request = reqGen.ethCall(tx.From, tx.To, &tx.Gas, &tx.GasPrice, &tx.Value, tx.Input, bn-1)
+ insertedOnlyIfSuccess = false
}
errCtx := fmt.Sprintf(" bn=%d hash=%s", bn, tx.Hash)
- if err := requestAndCompare(request, "eth_call", errCtx, reqGen, needCompare, rec, errs, resultsCh); err != nil {
- fmt.Println(err)
- return
+ if err := requestAndCompare(request, "eth_call", errCtx, reqGen, needCompare, rec, errs, resultsCh,
+ insertedOnlyIfSuccess); err != nil {
+ return err
}
}
fmt.Println("\nProcessed Transactions: ", nTransactions)
}
+ return nil
}
diff --git a/cmd/rpctest/rpctest/bench_ethgetBalance.go b/cmd/rpctest/rpctest/bench_ethgetBalance.go
index f3f625b2e72..19a1d4b310f 100644
--- a/cmd/rpctest/rpctest/bench_ethgetBalance.go
+++ b/cmd/rpctest/rpctest/bench_ethgetBalance.go
@@ -12,7 +12,7 @@ import (
// needCompare - if false - doesn't call Erigon and doesn't compare responses
//
// use false value - to generate vegeta files, it's faster but we can generate vegeta files for Geth and Erigon
-func BenchEthGetBalance(erigonURL, gethURL string, needCompare bool, blockFrom uint64, blockTo uint64) {
+func BenchEthGetBalance(erigonURL, gethURL string, needCompare bool, blockFrom uint64, blockTo uint64) error {
setRoutes(erigonURL, gethURL)
var client = &http.Client{
Timeout: time.Second * 600,
@@ -34,40 +34,34 @@ func BenchEthGetBalance(erigonURL, gethURL string, needCompare bool, blockFrom u
var blockNumber EthBlockNumber
res = reqGen.Erigon("eth_blockNumber", reqGen.blockNumber(), &blockNumber)
if res.Err != nil {
- fmt.Printf("Could not get block number: %v\n", res.Err)
- return
+ return fmt.Errorf("Could not get block number: %v\n", res.Err)
}
if blockNumber.Error != nil {
- fmt.Printf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
- return
+ return fmt.Errorf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
}
for bn := blockFrom; bn <= blockTo; bn++ {
reqGen.reqID++
var b EthBlockByNumber
res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b)
if res.Err != nil {
- fmt.Printf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err)
- return
+ return fmt.Errorf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err)
}
if b.Error != nil {
- fmt.Printf("Error retrieving block (Erigon): %d %s\n", b.Error.Code, b.Error.Message)
+ return fmt.Errorf("Error retrieving block (Erigon): %d %s\n", b.Error.Code, b.Error.Message)
}
if needCompare {
var bg EthBlockByNumber
res = reqGen.Geth("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &bg)
if res.Err != nil {
- fmt.Printf("Could not retrieve block (geth) %d: %v\n", bn, res.Err)
- return
+ return fmt.Errorf("Could not retrieve block (geth) %d: %v\n", bn, res.Err)
}
if bg.Error != nil {
- fmt.Printf("Error retrieving block (geth): %d %s\n", bg.Error.Code, bg.Error.Message)
- return
+ return fmt.Errorf("Error retrieving block (geth): %d %s\n", bg.Error.Code, bg.Error.Message)
}
if !compareBlocks(&b, &bg) {
- fmt.Printf("Block difference for %d\n", bn)
- return
+ return fmt.Errorf("Block difference for %d\n", bn)
}
}
@@ -82,29 +76,25 @@ func BenchEthGetBalance(erigonURL, gethURL string, needCompare bool, blockFrom u
resultsCh <- res
}
if res.Err != nil {
- fmt.Printf("Could not get account balance (Erigon): %v\n", res.Err)
- return
+ return fmt.Errorf("Could not get account balance (Erigon): %v\n", res.Err)
}
if balance.Error != nil {
- fmt.Printf("Error getting account balance (Erigon): %d %s", balance.Error.Code, balance.Error.Message)
- return
+ return fmt.Errorf("Error getting account balance (Erigon): %d %s", balance.Error.Code, balance.Error.Message)
}
if needCompare {
var balanceg EthBalance
res = reqGen.Geth("eth_getBalance", reqGen.getBalance(account, bn), &balanceg)
if res.Err != nil {
- fmt.Printf("Could not get account balance (geth): %v\n", res.Err)
- return
+ return fmt.Errorf("Could not get account balance (geth): %v\n", res.Err)
}
if balanceg.Error != nil {
- fmt.Printf("Error getting account balance (geth): %d %s\n", balanceg.Error.Code, balanceg.Error.Message)
- return
+ return fmt.Errorf("Error getting account balance (geth): %d %s\n", balanceg.Error.Code, balanceg.Error.Message)
}
if !compareBalances(&balance, &balanceg) {
- fmt.Printf("Account %x balance difference for block %d\n", account, bn)
- return
+ return fmt.Errorf("Account %x balance difference for block %d\n", account, bn)
}
}
}
}
+ return nil
}
diff --git a/cmd/rpctest/rpctest/bench_ethgetblockbyhash.go b/cmd/rpctest/rpctest/bench_ethgetblockbyhash.go
new file mode 100644
index 00000000000..f6a6d2af90a
--- /dev/null
+++ b/cmd/rpctest/rpctest/bench_ethgetblockbyhash.go
@@ -0,0 +1,110 @@
+package rpctest
+
+import (
+ "bufio"
+ "fmt"
+ "net/http"
+ "os"
+ "time"
+)
+
+// BenchEthGetBlockByHash compares response of Erigon with Geth
+// but also can be used for comparing RPCDaemon with Geth or infura
+// parameters:
+// needCompare - if false - doesn't call Erigon and doesn't compare responses
+//
+// false value - to generate vegeta files, it's faster but we can generate vegeta files for Geth and Erigon
+// recordFileName stores all eth_call returned wite success
+// errorFileName stores information when erigon and geth doesn't return same data
+func BenchEthGetBlockByHash(erigonURL, gethURL string, needCompare, latest bool, blockFrom, blockTo uint64, recordFileName string, errorFileName string) error {
+ setRoutes(erigonURL, gethURL)
+ var client = &http.Client{
+ Timeout: time.Second * 600,
+ }
+
+ var rec *bufio.Writer
+ var errs *bufio.Writer
+ var resultsCh chan CallResult = nil
+ var nBlocks = 0
+
+ if errorFileName != "" {
+ f, err := os.Create(errorFileName)
+ if err != nil {
+ return fmt.Errorf("Cannot create file %s for errorFileName: %v\n", errorFileName, err)
+ }
+ defer f.Sync()
+ defer f.Close()
+ errs = bufio.NewWriter(f)
+ defer errs.Flush()
+ }
+
+ if recordFileName != "" {
+ frec, err := os.Create(recordFileName)
+ if err != nil {
+ return fmt.Errorf("Cannot create file %s for errorFile: %v\n", recordFileName, err)
+ }
+ defer frec.Close()
+ rec = bufio.NewWriter(frec)
+ defer rec.Flush()
+ }
+
+ if !needCompare {
+ resultsCh = make(chan CallResult, 1000)
+ defer close(resultsCh)
+ go vegetaWrite(true, []string{"eth_getBlockByHash"}, resultsCh)
+ }
+ var res CallResult
+
+ reqGen := &RequestGenerator{
+ client: client,
+ }
+
+ reqGen.reqID++
+
+ for bn := blockFrom; bn <= blockTo; bn++ {
+ reqGen.reqID++
+ var b EthBlockByNumber
+ res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b)
+ if res.Err != nil {
+ return fmt.Errorf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err)
+ }
+
+ if b.Error != nil {
+ return fmt.Errorf("Error retrieving block (Erigon): %d %s\n", b.Error.Code, b.Error.Message)
+ }
+
+ if needCompare {
+ var bg EthBlockByNumber
+ res = reqGen.Geth("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &bg)
+ if res.Err != nil {
+ return fmt.Errorf("Could not retrieve block (geth) %d: %v\n", bn, res.Err)
+ }
+ if bg.Error != nil {
+ return fmt.Errorf("Error retrieving block (geth): %d %s\n", bg.Error.Code, bg.Error.Message)
+ }
+ if !compareBlocks(&b, &bg) {
+ if rec != nil {
+ fmt.Fprintf(rec, "Block difference for block=%d\n", bn)
+ rec.Flush()
+ continue
+ } else {
+ return fmt.Errorf("block %d has different fields\n", bn)
+ }
+ }
+ }
+
+ reqGen.reqID++
+ nBlocks++
+ var request string
+ request = reqGen.getBlockByHash(b.Result.Hash, true)
+ errCtx := fmt.Sprintf(" bn=%d hash=%s", bn, b.Result.Hash)
+
+ if err := requestAndCompare(request, "eth_getBlockByHash", errCtx, reqGen, needCompare, rec, errs, resultsCh,
+ /* insertOnlyIfSuccess */ false); err != nil {
+ return err
+ }
+
+ fmt.Println("\nProcessed Blocks: ", nBlocks)
+ }
+ return nil
+}
diff --git a/cmd/rpctest/rpctest/bench_ethgetblockbynumber.go b/cmd/rpctest/rpctest/bench_ethgetblockbynumber.go
new file mode 100644
index 00000000000..8279c19f325
--- /dev/null
+++ b/cmd/rpctest/rpctest/bench_ethgetblockbynumber.go
@@ -0,0 +1,109 @@
+package rpctest
+
+import (
+ "bufio"
+ "fmt"
+ "net/http"
+ "os"
+ "time"
+)
+
+// BenchEthGetBlockByNumber compares response of Erigon with Geth
+// but also can be used for comparing RPCDaemon with Geth or infura
+// parameters:
+// needCompare - if false - doesn't call Erigon and doesn't compare responses
+//
+// false value - to generate vegeta files, it's faster but we can generate vegeta files for Geth and Erigon
+// recordFile stores all eth_call returned with success
+// errorFile stores information when erigon and geth doesn't return same data
+func BenchEthGetBlockByNumber2(erigonURL, gethURL string, needCompare, latest bool, blockFrom, blockTo uint64, recordFileName string, errorFileName string) error {
+ setRoutes(erigonURL, gethURL)
+ var client = &http.Client{
+ Timeout: time.Second * 600,
+ }
+
+ var rec *bufio.Writer
+ var errs *bufio.Writer
+ var resultsCh chan CallResult = nil
+ var nBlocks = 0
+
+ if errorFileName != "" {
+ f, err := os.Create(errorFileName)
+ if err != nil {
+ return fmt.Errorf("Cannot create file %s for errorFile: %v\n", errorFileName, err)
+ }
+ defer f.Close()
+ errs = bufio.NewWriter(f)
+ defer errs.Flush()
+ }
+
+ if recordFileName != "" {
+ frec, errRec := os.Create(recordFileName)
+ if errRec != nil {
+ return fmt.Errorf("Cannot create file %s for errorFile: %v\n", recordFileName, errRec)
+ }
+ defer frec.Close()
+ rec = bufio.NewWriter(frec)
+ defer rec.Flush()
+ }
+
+ if !needCompare {
+ resultsCh = make(chan CallResult, 1000)
+ defer close(resultsCh)
+ go vegetaWrite(true, []string{"eth_getBlockByNumber"}, resultsCh)
+ }
+ var res CallResult
+
+ reqGen := &RequestGenerator{
+ client: client,
+ }
+
+ reqGen.reqID++
+
+ for bn := blockFrom; bn <= blockTo; bn++ {
+ reqGen.reqID++
+ var b EthBlockByNumber
+ res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b)
+ if res.Err != nil {
+ return fmt.Errorf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err)
+ }
+
+ if b.Error != nil {
+ return fmt.Errorf("Error retrieving block (Erigon): %d %s\n", b.Error.Code, b.Error.Message)
+ }
+
+ if needCompare {
+ var bg EthBlockByNumber
+ res = reqGen.Geth("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &bg)
+ if res.Err != nil {
+ return fmt.Errorf("Could not retrieve block (geth) %d: %v\n", bn, res.Err)
+ }
+ if bg.Error != nil {
+ return fmt.Errorf("Error retrieving block (geth): %d %s\n", bg.Error.Code, bg.Error.Message)
+ }
+ if !compareBlocks(&b, &bg) {
+ if rec != nil {
+ fmt.Fprintf(rec, "Block difference for block=%d\n", bn)
+ rec.Flush()
+ continue
+ } else {
+ return fmt.Errorf("Block difference for %d\n", bn)
+ }
+ }
+ }
+
+ reqGen.reqID++
+ nBlocks++
+ var request string
+ request = reqGen.getBlockByNumber(bn, true)
+ errCtx := fmt.Sprintf(" bn=%d ", bn)
+
+ if err := requestAndCompare(request, "eth_getBlockByNumber", errCtx, reqGen, needCompare, rec, errs, resultsCh,
+ /* insertOnlyIfSuccess */ false); err != nil {
+ return err
+ }
+
+ fmt.Println("\nProcessed Blocks: ", nBlocks)
+ }
+ return nil
+}
diff --git a/cmd/rpctest/rpctest/bench_ethgetlogs.go b/cmd/rpctest/rpctest/bench_ethgetlogs.go
index 61c921bf36f..d70b868c806 100644
--- a/cmd/rpctest/rpctest/bench_ethgetlogs.go
+++ b/cmd/rpctest/rpctest/bench_ethgetlogs.go
@@ -17,7 +17,7 @@ import (
// false value - to generate vegeta files, it's faster but we can generate vegeta files for Geth and Erigon
// recordFile stores all eth_getlogs returned with success
// errorFile stores information when erigon and geth doesn't return same data
-func BenchEthGetLogs(erigonURL, gethURL string, needCompare bool, blockFrom uint64, blockTo uint64, recordFile string, errorFile string) {
+func BenchEthGetLogs(erigonURL, gethURL string, needCompare bool, blockFrom uint64, blockTo uint64, recordFile string, errorFile string) error {
setRoutes(erigonURL, gethURL)
var client = &http.Client{
Timeout: time.Second * 600,
@@ -27,8 +27,7 @@ func BenchEthGetLogs(erigonURL, gethURL string, needCompare bool, blockFrom uint
if recordFile != "" {
f, err := os.Create(recordFile)
if err != nil {
- fmt.Printf("Cannot create file %s for recording: %v\n", recordFile, err)
- return
+ return fmt.Errorf("Cannot create file %s for recording: %v\n", recordFile, err)
}
defer f.Close()
rec = bufio.NewWriter(f)
@@ -38,8 +37,7 @@ func BenchEthGetLogs(erigonURL, gethURL string, needCompare bool, blockFrom uint
if errorFile != "" {
ferr, err := os.Create(errorFile)
if err != nil {
- fmt.Printf("Cannot create file %s for error output: %v\n", errorFile, err)
- return
+ return fmt.Errorf("Cannot create file %s for error output: %v\n", errorFile, err)
}
defer ferr.Close()
errs = bufio.NewWriter(ferr)
@@ -62,12 +60,10 @@ func BenchEthGetLogs(erigonURL, gethURL string, needCompare bool, blockFrom uint
var blockNumber EthBlockNumber
res = reqGen.Erigon("eth_blockNumber", reqGen.blockNumber(), &blockNumber)
if res.Err != nil {
- fmt.Printf("Could not get block number: %v\n", res.Err)
- return
+ return fmt.Errorf("Could not get block number: %v\n", res.Err)
}
if blockNumber.Error != nil {
- fmt.Printf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
- return
+ return fmt.Errorf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
}
fmt.Printf("Last block: %d\n", blockNumber.Number)
@@ -80,12 +76,10 @@ func BenchEthGetLogs(erigonURL, gethURL string, needCompare bool, blockFrom uint
var mag DebugModifiedAccounts
res = reqGen.Erigon("debug_getModifiedAccountsByNumber", reqGen.getModifiedAccountsByNumber(prevBn, bn), &mag)
if res.Err != nil {
- fmt.Printf("Could not get modified accounts (Erigon): %v\n", res.Err)
- return
+ return fmt.Errorf("Could not get modified accounts (Erigon): %v\n", res.Err)
}
if mag.Error != nil {
- fmt.Printf("Error getting modified accounts (Erigon): %d %s\n", mag.Error.Code, mag.Error.Message)
- return
+ return fmt.Errorf("Error getting modified accounts (Erigon): %d %s\n", mag.Error.Code, mag.Error.Message)
}
if res.Err == nil && mag.Error == nil {
accountSet := extractAccountMap(&mag)
@@ -93,9 +87,10 @@ func BenchEthGetLogs(erigonURL, gethURL string, needCompare bool, blockFrom uint
reqGen.reqID++
request := reqGen.getLogs(prevBn, bn, account)
errCtx := fmt.Sprintf("account %x blocks %d-%d", account, prevBn, bn)
- if err := requestAndCompare(request, "eth_getLogs", errCtx, reqGen, needCompare, rec, errs, resultsCh); err != nil {
+ if err := requestAndCompare(request, "eth_getLogs", errCtx, reqGen, needCompare, rec, errs, resultsCh,
+ /* insertOnlyIfSuccess */ false); err != nil {
fmt.Println(err)
- return
+ return err
}
topics := getTopics(res.Result)
// All combination of account and one topic
@@ -103,9 +98,10 @@ func BenchEthGetLogs(erigonURL, gethURL string, needCompare bool, blockFrom uint
reqGen.reqID++
request = reqGen.getLogs1(prevBn, bn+10000, account, topic)
errCtx := fmt.Sprintf("account %x topic %x blocks %d-%d", account, topic, prevBn, bn)
- if err := requestAndCompare(request, "eth_getLogs", errCtx, reqGen, needCompare, rec, errs, resultsCh); err != nil {
+ if err := requestAndCompare(request, "eth_getLogs", errCtx, reqGen, needCompare, rec, errs, resultsCh,
+ /* insertOnlyIfSuccess */ false); err != nil {
fmt.Println(err)
- return
+ return err
}
}
// Random combinations of two topics
@@ -118,9 +114,10 @@ func BenchEthGetLogs(erigonURL, gethURL string, needCompare bool, blockFrom uint
reqGen.reqID++
request = reqGen.getLogs2(prevBn, bn+100000, account, topics[idx1], topics[idx2])
errCtx := fmt.Sprintf("account %x topic1 %x topic2 %x blocks %d-%d", account, topics[idx1], topics[idx2], prevBn, bn)
- if err := requestAndCompare(request, "eth_getLogs", errCtx, reqGen, needCompare, rec, errs, resultsCh); err != nil {
+ if err := requestAndCompare(request, "eth_getLogs", errCtx, reqGen, needCompare, rec, errs, resultsCh,
+ /* insertOnlyIfSuccess */ false); err != nil {
fmt.Println(err)
- return
+ return err
}
}
}
@@ -128,4 +125,5 @@ func BenchEthGetLogs(erigonURL, gethURL string, needCompare bool, blockFrom uint
fmt.Printf("Done blocks %d-%d, modified accounts: %d\n", prevBn, bn, len(mag.Result))
prevBn = bn
}
+ return nil
}
diff --git a/cmd/rpctest/rpctest/bench_gettransactionbyhash.go b/cmd/rpctest/rpctest/bench_gettransactionbyhash.go
new file mode 100644
index 00000000000..b33ba9dfd18
--- /dev/null
+++ b/cmd/rpctest/rpctest/bench_gettransactionbyhash.go
@@ -0,0 +1,115 @@
+package rpctest
+
+import (
+ "bufio"
+ "fmt"
+ "net/http"
+ "os"
+ "time"
+)
+
+// BenchEthGetTransactionByHash compares response of Erigon with Geth
+// but also can be used for comparing RPCDaemon with Geth or infura
+// parameters:
+// needCompare - if false - doesn't call Erigon and doesn't compare responses
+//
+// false value - to generate vegeta files, it's faster but we can generate vegeta files for Geth and Erigon
+//
+// recordFile stores all eth_GetTransactionByHash returned with success
+//
+// errorFile stores information when erigon and geth doesn't return same data
+func BenchEthGetTransactionByHash(erigonURL, gethURL string, needCompare bool, blockFrom, blockTo uint64, recordFileName string, errorFileName string) error {
+ setRoutes(erigonURL, gethURL)
+ var client = &http.Client{
+ Timeout: time.Second * 600,
+ }
+
+ var rec *bufio.Writer
+ var errs *bufio.Writer
+ var resultsCh chan CallResult = nil
+ var nTransactions = 0
+
+ if errorFileName != "" {
+ f, err := os.Create(errorFileName)
+ if err != nil {
+ return fmt.Errorf("Cannot create file %s for errorFile: %v\n", errorFileName, err)
+ }
+ defer f.Close()
+ errs = bufio.NewWriter(f)
+ defer errs.Flush()
+ }
+
+ if recordFileName != "" {
+ frec, errRec := os.Create(recordFileName)
+ if errRec != nil {
+ return fmt.Errorf("Cannot create file %s for errorFile: %v\n", recordFileName, errRec)
+ }
+ defer frec.Close()
+ rec = bufio.NewWriter(frec)
+ defer rec.Flush()
+ }
+
+ if !needCompare {
+ resultsCh = make(chan CallResult, 1000)
+ defer close(resultsCh)
+ go vegetaWrite(true, []string{"eth_getTransactionByHash"}, resultsCh)
+ }
+ var res CallResult
+
+ reqGen := &RequestGenerator{
+ client: client,
+ }
+
+ reqGen.reqID++
+
+ for bn := blockFrom; bn <= blockTo; bn++ {
+ reqGen.reqID++
+ var b EthBlockByNumber
+ res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b)
+ if res.Err != nil {
+ return fmt.Errorf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err)
+ }
+
+ if b.Error != nil {
+ return fmt.Errorf("Error retrieving block (Erigon): %d %s\n", b.Error.Code, b.Error.Message)
+ }
+
+ if needCompare {
+ var bg EthBlockByNumber
+ res = reqGen.Geth("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &bg)
+ if res.Err != nil {
+ return fmt.Errorf("Could not retrieve block (geth) %d: %v\n", bn, res.Err)
+ }
+ if bg.Error != nil {
+ return fmt.Errorf("Error retrieving block (geth): %d %s\n", bg.Error.Code, bg.Error.Message)
+ }
+ if !compareBlocks(&b, &bg) {
+ if rec != nil {
+ fmt.Fprintf(rec, "Block difference for block=%d\n", bn)
+ rec.Flush()
+ continue
+ } else {
+ return fmt.Errorf("Block one or more fields areis different for block %d\n", bn)
+ }
+ }
+ }
+
+ for _, tx := range b.Result.Transactions {
+
+ reqGen.reqID++
+ nTransactions = nTransactions + 1
+
+ var request string
+ request = reqGen.getTransactionByHash(tx.Hash)
+ errCtx := fmt.Sprintf(" bn=%d hash=%s", bn, tx.Hash)
+
+ if err := requestAndCompare(request, "eth_getTransactionByHash", errCtx, reqGen, needCompare, rec, errs, resultsCh,
+ /* insertOnlyIfSuccess */ false); err != nil {
+ return err
+ }
+ }
+
+ fmt.Println("\nProcessed Transactions: ", nTransactions)
+ }
+ return nil
+}
diff --git a/cmd/rpctest/rpctest/bench_otsgetblockTransactions.go b/cmd/rpctest/rpctest/bench_otsgetblockTransactions.go
new file mode 100644
index 00000000000..ac08b131a3b
--- /dev/null
+++ b/cmd/rpctest/rpctest/bench_otsgetblockTransactions.go
@@ -0,0 +1,113 @@
+package rpctest
+
+import (
+ "bufio"
+ "fmt"
+ "net/http"
+ "os"
+ "time"
+)
+
+// BenchOtsGetBlockTransactions compares response of Erigon with Geth
+// but also can be used for comparing RPCDaemon with Geth
+// parameters:
+// needCompare - if false - doesn't call Erigon and doesn't compare responses
+//
+// use false value - to generate vegeta files, it's faster but we can generate vegeta files for Geth and Erigon
+func BenchOtsGetBlockTransactions(erigonURL, gethURL string, needCompare, visitAllPages bool, latest bool, blockFrom, blockTo uint64, recordFileName string, errorFileName string) error {
+ setRoutes(erigonURL, gethURL)
+ var client = &http.Client{
+ Timeout: time.Second * 600,
+ }
+
+ var rec *bufio.Writer
+ var errs *bufio.Writer
+ var resultsCh chan CallResult = nil
+
+ if errorFileName != "" {
+ f, err := os.Create(errorFileName)
+ if err != nil {
+ return fmt.Errorf("Cannot create file %s for errorFileName: %v\n", errorFileName, err)
+ }
+ defer f.Sync()
+ defer f.Close()
+ errs = bufio.NewWriter(f)
+ defer errs.Flush()
+ }
+
+ if recordFileName != "" {
+ frec, err := os.Create(recordFileName)
+ if err != nil {
+ return fmt.Errorf("Cannot create file %s for errorFile: %v\n", recordFileName, err)
+ }
+ defer frec.Close()
+ rec = bufio.NewWriter(frec)
+ defer rec.Flush()
+ }
+
+ if !needCompare {
+ resultsCh = make(chan CallResult, 1000)
+ defer close(resultsCh)
+ go vegetaWrite(true, []string{"ots_getBlockTransactions"}, resultsCh)
+ }
+
+ var res CallResult
+
+ reqGen := &RequestGenerator{
+ client: client,
+ }
+
+ reqGen.reqID++
+
+ for bn := blockFrom; bn <= blockTo; bn++ {
+
+ var pageCount uint64 = 0
+ pageEnded := false
+
+ for !pageEnded {
+ reqGen.reqID++
+ var b OtsBlockTransactions
+ res = reqGen.Erigon("ots_getBlockTransactions", reqGen.otsGetBlockTransactions(bn, pageCount, 10), &b)
+
+ if len(b.Result.FullBlock.Transactions) < 1 || !visitAllPages {
+ pageEnded = true
+ }
+
+ if !needCompare {
+ resultsCh <- res
+ }
+
+ if res.Err != nil {
+ return fmt.Errorf("Could not retrieve transactions of block (Erigon) %d: %v\n", bn, res.Err)
+ }
+
+ if b.Error != nil {
+ return fmt.Errorf("Error retrieving transactions of block (Erigon): %d %s\n", b.Error.Code, b.Error.Message)
+ }
+
+ if needCompare {
+ var bg OtsBlockTransactions
+ res = reqGen.Geth("ots_getBlockTransactions", reqGen.otsGetBlockTransactions(bn, pageCount, 10), &bg)
+ if res.Err != nil {
+ return fmt.Errorf("Could not retrieve block (geth) %d: %v\n", bn, res.Err)
+ }
+ if bg.Error != nil {
+ return fmt.Errorf("Error retrieving block (geth): %d %s\n", bg.Error.Code, bg.Error.Message)
+ }
+ if !compareBlockTransactions(&b, &bg) {
+ if rec != nil {
+ fmt.Fprintf(rec, "Block difference for block=%d\n", bn)
+ rec.Flush()
+ continue
+ } else {
+ return fmt.Errorf("block %d has different fields\n", bn)
+ }
+ }
+ }
+ pageCount++
+ }
+
+ }
+
+ return nil
+}
diff --git a/cmd/rpctest/rpctest/bench_traceblock.go b/cmd/rpctest/rpctest/bench_traceblock.go
index 6e6105be1db..9b93add8568 100644
--- a/cmd/rpctest/rpctest/bench_traceblock.go
+++ b/cmd/rpctest/rpctest/bench_traceblock.go
@@ -12,7 +12,7 @@ import (
// but also can be used for comparing RPCDaemon with OpenEthereum
// parameters:
// needCompare - if false - doesn't call Erigon and doesn't compare responses
-func BenchTraceBlock(erigonURL, oeURL string, needCompare bool, blockFrom uint64, blockTo uint64, recordFile string, errorFile string) {
+func BenchTraceBlock(erigonURL, oeURL string, needCompare bool, blockFrom uint64, blockTo uint64, recordFile string, errorFile string) error {
setRoutes(erigonURL, oeURL)
var client = &http.Client{
Timeout: time.Second * 600,
@@ -21,8 +21,7 @@ func BenchTraceBlock(erigonURL, oeURL string, needCompare bool, blockFrom uint64
if recordFile != "" {
f, err := os.Create(recordFile)
if err != nil {
- fmt.Printf("Cannot create file %s for recording: %v\n", recordFile, err)
- return
+ return fmt.Errorf("Cannot create file %s for recording: %v\n", recordFile, err)
}
defer f.Close()
rec = bufio.NewWriter(f)
@@ -32,8 +31,7 @@ func BenchTraceBlock(erigonURL, oeURL string, needCompare bool, blockFrom uint64
if errorFile != "" {
ferr, err := os.Create(errorFile)
if err != nil {
- fmt.Printf("Cannot create file %s for error output: %v\n", errorFile, err)
- return
+ return fmt.Errorf("Cannot create file %s for error output: %v\n", errorFile, err)
}
defer ferr.Close()
errs = bufio.NewWriter(ferr)
@@ -49,12 +47,10 @@ func BenchTraceBlock(erigonURL, oeURL string, needCompare bool, blockFrom uint64
var blockNumber EthBlockNumber
res = reqGen.Erigon("eth_blockNumber", reqGen.blockNumber(), &blockNumber)
if res.Err != nil {
- fmt.Printf("Could not get block number: %v\n", res.Err)
- return
+ return fmt.Errorf("Could not get block number: %v\n", res.Err)
}
if blockNumber.Error != nil {
- fmt.Printf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
- return
+ return fmt.Errorf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
}
fmt.Printf("Last block: %d\n", blockNumber.Number)
for bn := blockFrom; bn <= blockTo; bn++ {
@@ -62,20 +58,19 @@ func BenchTraceBlock(erigonURL, oeURL string, needCompare bool, blockFrom uint64
var b EthBlockByNumber
res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b)
if res.Err != nil {
- fmt.Printf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err)
- return
+ return fmt.Errorf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err)
}
if b.Error != nil {
- fmt.Printf("Error retrieving block (Erigon): %d %s\n", b.Error.Code, b.Error.Message)
- return
+ return fmt.Errorf("Error retrieving block (Erigon): %d %s\n", b.Error.Code, b.Error.Message)
}
reqGen.reqID++
request := reqGen.traceBlock(bn)
errCtx := fmt.Sprintf("block %d", bn)
- if err := requestAndCompare(request, "trace_block", errCtx, reqGen, needCompare, rec, errs, nil); err != nil {
+ if err := requestAndCompare(request, "trace_block", errCtx, reqGen, needCompare, rec, errs, nil /* insertOnlyIfSuccess */, false); err != nil {
fmt.Println(err)
- return
+ return err
}
}
+ return nil
}
diff --git a/cmd/rpctest/rpctest/bench_tracecall.go b/cmd/rpctest/rpctest/bench_tracecall.go
index 1a1202b9873..6902e37b7d0 100644
--- a/cmd/rpctest/rpctest/bench_tracecall.go
+++ b/cmd/rpctest/rpctest/bench_tracecall.go
@@ -12,7 +12,7 @@ import (
// but also can be used for comparing RPCDaemon with Geth
// parameters:
// needCompare - if false - doesn't call Erigon and doesn't compare responses
-func BenchTraceCall(erigonURL, oeURL string, needCompare bool, blockFrom uint64, blockTo uint64, recordFile string, errorFile string) {
+func BenchTraceCall(erigonURL, oeURL string, needCompare bool, blockFrom uint64, blockTo uint64, recordFile string, errorFile string) error {
setRoutes(erigonURL, oeURL)
var client = &http.Client{
Timeout: time.Second * 600,
@@ -21,8 +21,7 @@ func BenchTraceCall(erigonURL, oeURL string, needCompare bool, blockFrom uint64,
if recordFile != "" {
f, err := os.Create(recordFile)
if err != nil {
- fmt.Printf("Cannot create file %s for recording: %v\n", recordFile, err)
- return
+ return fmt.Errorf("Cannot create file %s for recording: %v\n", recordFile, err)
}
defer f.Close()
rec = bufio.NewWriter(f)
@@ -32,8 +31,7 @@ func BenchTraceCall(erigonURL, oeURL string, needCompare bool, blockFrom uint64,
if errorFile != "" {
ferr, err := os.Create(errorFile)
if err != nil {
- fmt.Printf("Cannot create file %s for error output: %v\n", errorFile, err)
- return
+ return fmt.Errorf("Cannot create file %s for error output: %v\n", errorFile, err)
}
defer ferr.Close()
errs = bufio.NewWriter(ferr)
@@ -49,12 +47,10 @@ func BenchTraceCall(erigonURL, oeURL string, needCompare bool, blockFrom uint64,
var blockNumber EthBlockNumber
res = reqGen.Erigon("eth_blockNumber", reqGen.blockNumber(), &blockNumber)
if res.Err != nil {
- fmt.Printf("Could not get block number: %v\n", res.Err)
- return
+ return fmt.Errorf("Could not get block number: %v\n", res.Err)
}
if blockNumber.Error != nil {
- fmt.Printf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
- return
+ return fmt.Errorf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
}
fmt.Printf("Last block: %d\n", blockNumber.Number)
for bn := blockFrom; bn <= blockTo; bn++ {
@@ -62,22 +58,21 @@ func BenchTraceCall(erigonURL, oeURL string, needCompare bool, blockFrom uint64,
var b EthBlockByNumber
res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b)
if res.Err != nil {
- fmt.Printf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err)
- return
+ return fmt.Errorf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err)
}
if b.Error != nil {
- fmt.Printf("Error retrieving block (Erigon): %d %s\n", b.Error.Code, b.Error.Message)
- return
+ return fmt.Errorf("Error retrieving block (Erigon): %d %s\n", b.Error.Code, b.Error.Message)
}
for _, tx := range b.Result.Transactions {
reqGen.reqID++
request := reqGen.traceCall(tx.From, tx.To, &tx.Gas, &tx.GasPrice, &tx.Value, tx.Input, bn-1)
errCtx := fmt.Sprintf("block %d, tx %s", bn, tx.Hash)
- if err := requestAndCompare(request, "trace_call", errCtx, reqGen, needCompare, rec, errs, nil); err != nil {
- return
+ if err := requestAndCompare(request, "trace_call", errCtx, reqGen, needCompare, rec, errs, nil /* insertOnlyIfSuccess */, false); err != nil {
+ return err
}
}
}
+ return nil
}
diff --git a/cmd/rpctest/rpctest/bench_tracecallmany.go b/cmd/rpctest/rpctest/bench_tracecallmany.go
index a3599af7c0c..7c1b1953769 100644
--- a/cmd/rpctest/rpctest/bench_tracecallmany.go
+++ b/cmd/rpctest/rpctest/bench_tracecallmany.go
@@ -7,17 +7,17 @@ import (
"os"
"time"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
+
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
-
- "github.com/ledgerwatch/erigon/common/hexutil"
)
// BenchTraceCallMany compares response of Erigon with Geth
// but also can be used for comparing RPCDaemon with Geth
// parameters:
// needCompare - if false - doesn't call Erigon and doesn't compare responses
-func BenchTraceCallMany(erigonURL, oeURL string, needCompare bool, blockFrom uint64, blockTo uint64, recordFile string, errorFile string) {
+func BenchTraceCallMany(erigonURL, oeURL string, needCompare bool, blockFrom uint64, blockTo uint64, recordFile string, errorFile string) error {
setRoutes(erigonURL, oeURL)
var client = &http.Client{
Timeout: time.Second * 600,
@@ -26,8 +26,7 @@ func BenchTraceCallMany(erigonURL, oeURL string, needCompare bool, blockFrom uin
if recordFile != "" {
f, err := os.Create(recordFile)
if err != nil {
- fmt.Printf("Cannot create file %s for recording: %v\n", recordFile, err)
- return
+ return fmt.Errorf("Cannot create file %s for recording: %v\n", recordFile, err)
}
defer f.Close()
rec = bufio.NewWriter(f)
@@ -37,8 +36,7 @@ func BenchTraceCallMany(erigonURL, oeURL string, needCompare bool, blockFrom uin
if errorFile != "" {
ferr, err := os.Create(errorFile)
if err != nil {
- fmt.Printf("Cannot create file %s for error output: %v\n", errorFile, err)
- return
+ return fmt.Errorf("Cannot create file %s for error output: %v\n", errorFile, err)
}
defer ferr.Close()
errs = bufio.NewWriter(ferr)
@@ -54,12 +52,10 @@ func BenchTraceCallMany(erigonURL, oeURL string, needCompare bool, blockFrom uin
var blockNumber EthBlockNumber
res = reqGen.Erigon("eth_blockNumber", reqGen.blockNumber(), &blockNumber)
if res.Err != nil {
- fmt.Printf("Could not get block number: %v\n", res.Err)
- return
+ return fmt.Errorf("Could not get block number: %v\n", res.Err)
}
if blockNumber.Error != nil {
- fmt.Printf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
- return
+ return fmt.Errorf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
}
fmt.Printf("Last block: %d\n", blockNumber.Number)
for bn := blockFrom; bn <= blockTo; bn++ {
@@ -67,12 +63,10 @@ func BenchTraceCallMany(erigonURL, oeURL string, needCompare bool, blockFrom uin
var b EthBlockByNumber
res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b)
if res.Err != nil {
- fmt.Printf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err)
- return
+ return fmt.Errorf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err)
}
if b.Error != nil {
- fmt.Printf("Error retrieving block (Erigon): %d %s\n", b.Error.Code, b.Error.Message)
- return
+ return fmt.Errorf("Error retrieving block (Erigon): %d %s\n", b.Error.Code, b.Error.Message)
}
n := len(b.Result.Transactions)
@@ -96,9 +90,10 @@ func BenchTraceCallMany(erigonURL, oeURL string, needCompare bool, blockFrom uin
request := reqGen.traceCallMany(from, to, gas, gasPrice, value, data, bn-1)
errCtx := fmt.Sprintf("block %d", bn)
- if err := requestAndCompare(request, "trace_callMany", errCtx, reqGen, needCompare, rec, errs, nil); err != nil {
+ if err := requestAndCompare(request, "trace_callMany", errCtx, reqGen, needCompare, rec, errs, nil /* insertOnlyIfSuccess */, false); err != nil {
fmt.Println(err)
- return
+ return err
}
}
+ return nil
}
diff --git a/cmd/rpctest/rpctest/bench_tracefilter.go b/cmd/rpctest/rpctest/bench_tracefilter.go
index 9d907c2c559..927c93e5004 100644
--- a/cmd/rpctest/rpctest/bench_tracefilter.go
+++ b/cmd/rpctest/rpctest/bench_tracefilter.go
@@ -15,7 +15,7 @@ import (
// but also can be used for comparing RPCDaemon with Geth
// parameters:
// needCompare - if false - doesn't call Erigon and doesn't compare responses
-func BenchTraceFilter(erigonURL, oeURL string, needCompare bool, blockFrom uint64, blockTo uint64, recordFile string, errorFile string) {
+func BenchTraceFilter(erigonURL, oeURL string, needCompare bool, blockFrom uint64, blockTo uint64, recordFile string, errorFile string) error {
setRoutes(erigonURL, oeURL)
var client = &http.Client{
Timeout: time.Second * 600,
@@ -24,8 +24,7 @@ func BenchTraceFilter(erigonURL, oeURL string, needCompare bool, blockFrom uint6
if recordFile != "" {
f, err := os.Create(recordFile)
if err != nil {
- fmt.Printf("Cannot create file %s for recording: %v\n", recordFile, err)
- return
+ return fmt.Errorf("Cannot create file %s for recording: %v\n", recordFile, err)
}
defer f.Close()
rec = bufio.NewWriter(f)
@@ -35,8 +34,7 @@ func BenchTraceFilter(erigonURL, oeURL string, needCompare bool, blockFrom uint6
if errorFile != "" {
ferr, err := os.Create(errorFile)
if err != nil {
- fmt.Printf("Cannot create file %s for error output: %v\n", errorFile, err)
- return
+ return fmt.Errorf("Cannot create file %s for error output: %v\n", errorFile, err)
}
defer ferr.Close()
errs = bufio.NewWriter(ferr)
@@ -52,12 +50,10 @@ func BenchTraceFilter(erigonURL, oeURL string, needCompare bool, blockFrom uint6
var blockNumber EthBlockNumber
res = reqGen.Erigon("eth_blockNumber", reqGen.blockNumber(), &blockNumber)
if res.Err != nil {
- fmt.Printf("Could not get block number: %v\n", res.Err)
- return
+ return fmt.Errorf("Could not get block number: %v\n", res.Err)
}
if blockNumber.Error != nil {
- fmt.Printf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
- return
+ return fmt.Errorf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
}
fmt.Printf("Last block: %d\n", blockNumber.Number)
rnd := rand.New(rand.NewSource(42)) // nolint:gosec
@@ -68,12 +64,10 @@ func BenchTraceFilter(erigonURL, oeURL string, needCompare bool, blockFrom uint6
var mag DebugModifiedAccounts
res = reqGen.Erigon("debug_getModifiedAccountsByNumber", reqGen.getModifiedAccountsByNumber(prevBn, bn), &mag)
if res.Err != nil {
- fmt.Printf("Could not get modified accounts (Erigon): %v\n", res.Err)
- return
+ return fmt.Errorf("Could not get modified accounts (Erigon): %v\n", res.Err)
}
if mag.Error != nil {
- fmt.Printf("Error getting modified accounts (Erigon): %d %s\n", mag.Error.Code, mag.Error.Message)
- return
+ return fmt.Errorf("Error getting modified accounts (Erigon): %d %s\n", mag.Error.Code, mag.Error.Message)
}
if res.Err == nil && mag.Error == nil {
accountSet := extractAccountMap(&mag)
@@ -95,16 +89,16 @@ func BenchTraceFilter(erigonURL, oeURL string, needCompare bool, blockFrom uint6
reqGen.reqID++
request := reqGen.traceFilterFrom(prevBn, bn, account)
errCtx := fmt.Sprintf("traceFilterFrom fromBlock %d, toBlock %d, fromAddress %x", prevBn, bn, account)
- if err := requestAndCompare(request, "trace_filter", errCtx, reqGen, needCompare, rec, errs, nil); err != nil {
+ if err := requestAndCompare(request, "trace_filter", errCtx, reqGen, needCompare, rec, errs, nil /* insertOnlyIfSuccess */, false); err != nil {
fmt.Println(err)
- return
+ return err
}
reqGen.reqID++
request = reqGen.traceFilterTo(prevBn, bn, account)
errCtx = fmt.Sprintf("traceFilterTo fromBlock %d, toBlock %d, fromAddress %x", prevBn, bn, account)
- if err := requestAndCompare(request, "trace_filter", errCtx, reqGen, needCompare, rec, errs, nil); err != nil {
+ if err := requestAndCompare(request, "trace_filter", errCtx, reqGen, needCompare, rec, errs, nil /* insertOnlyIfSuccess */, false); err != nil {
fmt.Println(err)
- return
+ return err
}
}
/*
@@ -114,30 +108,30 @@ func BenchTraceFilter(erigonURL, oeURL string, needCompare bool, blockFrom uint6
reqGen.reqID++
request := reqGen.traceFilterUnion(prevBn, bn, from, to)
errCtx := fmt.Sprintf("traceFilterUnion fromBlock %d, toBlock %d, fromAddress %x, toAddress %x", prevBn, bn, from, to)
- if err := requestAndCompare(request, "trace_filter", errCtx, reqGen, needCompare, rec, errs, nil); err != nil {
+ if err := requestAndCompare(request, "trace_filter", errCtx, reqGen, needCompare, rec, errs, nil, false); err != nil {
fmt.Println(err)
- return
+ return err
}
reqGen.reqID++
request = reqGen.traceFilterAfter(prevBn, bn, 1)
errCtx = fmt.Sprintf("traceFilterAfter fromBlock %d, toBlock %d, after %x", prevBn, bn, 1)
- if err := requestAndCompare(request, "trace_filter", errCtx, reqGen, needCompare, rec, errs, nil); err != nil {
+ if err := requestAndCompare(request, "trace_filter", errCtx, reqGen, needCompare, rec, errs, nil, false); err != nil {
fmt.Println(err)
- return
+ return err
}
reqGen.reqID++
request = reqGen.traceFilterCount(prevBn, bn, 1)
errCtx = fmt.Sprintf("traceFilterCount fromBlock %d, toBlock %d, count %x", prevBn, bn, 1)
- if err := requestAndCompare(request, "trace_filter", errCtx, reqGen, needCompare, rec, errs, nil); err != nil {
+ if err := requestAndCompare(request, "trace_filter", errCtx, reqGen, needCompare, rec, errs, nil, false); err != nil {
fmt.Println(err)
- return
+ return err
}
reqGen.reqID++
request = reqGen.traceFilterCountAfter(prevBn, bn, 1, 1)
errCtx = fmt.Sprintf("traceFilterCountAfter fromBlock %d, toBlock %d, count %x, after %x", prevBn, bn, 1, 1)
- if err := requestAndCompare(request, "trace_filter", errCtx, reqGen, needCompare, rec, errs, nil); err != nil {
+ if err := requestAndCompare(request, "trace_filter", errCtx, reqGen, needCompare, rec, errs, nil, false); err != nil {
fmt.Println(err)
- return
+ return err
}
}
*/
@@ -145,4 +139,5 @@ func BenchTraceFilter(erigonURL, oeURL string, needCompare bool, blockFrom uint6
fmt.Printf("Done blocks %d-%d, modified accounts: %d\n", prevBn, bn, len(mag.Result))
prevBn = bn
}
+ return nil
}
diff --git a/cmd/rpctest/rpctest/bench_tracereplaytransaction.go b/cmd/rpctest/rpctest/bench_tracereplaytransaction.go
index 82e8854f5de..0124d27182e 100644
--- a/cmd/rpctest/rpctest/bench_tracereplaytransaction.go
+++ b/cmd/rpctest/rpctest/bench_tracereplaytransaction.go
@@ -8,7 +8,7 @@ import (
"time"
)
-func BenchTraceReplayTransaction(erigonUrl, gethUrl string, needCompare bool, blockFrom uint64, blockTo uint64, recordFile string, errorFile string) {
+func BenchTraceReplayTransaction(erigonUrl, gethUrl string, needCompare bool, blockFrom uint64, blockTo uint64, recordFile string, errorFile string) error {
setRoutes(erigonUrl, gethUrl)
var client = &http.Client{
Timeout: time.Second * 600,
@@ -18,8 +18,7 @@ func BenchTraceReplayTransaction(erigonUrl, gethUrl string, needCompare bool, bl
if recordFile != "" {
f, err := os.Create(recordFile)
if err != nil {
- fmt.Printf("Cannot create file %s for recording: %v\n", recordFile, err)
- return
+ return fmt.Errorf("Cannot create file %s for recording: %v\n", recordFile, err)
}
defer f.Close()
rec = bufio.NewWriter(f)
@@ -29,8 +28,7 @@ func BenchTraceReplayTransaction(erigonUrl, gethUrl string, needCompare bool, bl
if errorFile != "" {
ferr, err := os.Create(errorFile)
if err != nil {
- fmt.Printf("Cannot create file %s for error output: %v\n", errorFile, err)
- return
+ return fmt.Errorf("Cannot create file %s for error output: %v\n", errorFile, err)
}
defer ferr.Close()
errs = bufio.NewWriter(ferr)
@@ -48,21 +46,21 @@ func BenchTraceReplayTransaction(erigonUrl, gethUrl string, needCompare bool, bl
var b EthBlockByNumber
res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b)
if res.Err != nil {
- fmt.Printf("retrieve block (Erigon) %d: %v", blockFrom, res.Err)
- return
+ return fmt.Errorf("retrieve block (Erigon) %d: %v", blockFrom, res.Err)
}
if b.Error != nil {
- fmt.Printf("retrieving block (Erigon): %d %s", b.Error.Code, b.Error.Message)
- return
+ return fmt.Errorf("retrieving block (Erigon): %d %s", b.Error.Code, b.Error.Message)
}
for _, tx := range b.Result.Transactions {
reqGen.reqID++
request := reqGen.traceReplayTransaction(tx.Hash)
errCtx := fmt.Sprintf("block %d, tx %s", bn, tx.Hash)
- if err := requestAndCompare(request, "trace_replayTransaction", errCtx, reqGen, needCompare, rec, errs, nil); err != nil {
+ if err := requestAndCompare(request, "trace_replayTransaction", errCtx, reqGen, needCompare, rec, errs, nil,
+ /* insertOnlyIfSuccess */ false); err != nil {
fmt.Println(err)
- return
+ return err
}
}
}
+ return nil
}
diff --git a/cmd/rpctest/rpctest/bench_tracetransaction.go b/cmd/rpctest/rpctest/bench_tracetransaction.go
deleted file mode 100644
index 36c4d50092f..00000000000
--- a/cmd/rpctest/rpctest/bench_tracetransaction.go
+++ /dev/null
@@ -1,125 +0,0 @@
-package rpctest
-
-import (
- "bufio"
- "fmt"
- "net/http"
- "os"
- "time"
-)
-
-func BenchTraceBlockByHash(erigonUrl, gethUrl string, needCompare bool, blockFrom uint64, blockTo uint64, recordFile string, errorFile string) {
- setRoutes(erigonUrl, gethUrl)
- var client = &http.Client{
- Timeout: time.Second * 600,
- }
-
- var rec *bufio.Writer
- if recordFile != "" {
- f, err := os.Create(recordFile)
- if err != nil {
- fmt.Printf("Cannot create file %s for recording: %v\n", recordFile, err)
- return
- }
- defer f.Close()
- rec = bufio.NewWriter(f)
- defer rec.Flush()
- }
- var errs *bufio.Writer
- if errorFile != "" {
- ferr, err := os.Create(errorFile)
- if err != nil {
- fmt.Printf("Cannot create file %s for error output: %v\n", errorFile, err)
- return
- }
- defer ferr.Close()
- errs = bufio.NewWriter(ferr)
- defer errs.Flush()
- }
-
- var res CallResult
- reqGen := &RequestGenerator{
- client: client,
- }
-
- reqGen.reqID++
-
- for bn := blockFrom; bn < blockTo; bn++ {
- var b EthBlockByNumber
- res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b)
- if res.Err != nil {
- fmt.Printf("retrieve block (Erigon) %d: %v", blockFrom, res.Err)
- return
- }
- if b.Error != nil {
- fmt.Printf("retrieving block (Erigon): %d %s", b.Error.Code, b.Error.Message)
- return
- }
- reqGen.reqID++
- request := reqGen.traceBlockByHash(b.Result.Hash.Hex())
- errCtx := fmt.Sprintf("block %d, tx %s", bn, b.Result.Hash.Hex())
- if err := requestAndCompare(request, "debug_traceBlockByHash", errCtx, reqGen, needCompare, rec, errs, nil); err != nil {
- fmt.Println(err)
- return
- }
- }
-}
-
-func BenchTraceTransaction(erigonUrl, gethUrl string, needCompare bool, blockFrom uint64, blockTo uint64, recordFile string, errorFile string) {
- setRoutes(erigonUrl, gethUrl)
- var client = &http.Client{
- Timeout: time.Second * 600,
- }
-
- var rec *bufio.Writer
- if recordFile != "" {
- f, err := os.Create(recordFile)
- if err != nil {
- fmt.Printf("Cannot create file %s for recording: %v\n", recordFile, err)
- return
- }
- defer f.Close()
- rec = bufio.NewWriter(f)
- defer rec.Flush()
- }
- var errs *bufio.Writer
- if errorFile != "" {
- ferr, err := os.Create(errorFile)
- if err != nil {
- fmt.Printf("Cannot create file %s for error output: %v\n", errorFile, err)
- return
- }
- defer ferr.Close()
- errs = bufio.NewWriter(ferr)
- defer errs.Flush()
- }
-
- var res CallResult
- reqGen := &RequestGenerator{
- client: client,
- }
-
- reqGen.reqID++
-
- for bn := blockFrom; bn < blockTo; bn++ {
- var b EthBlockByNumber
- res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b)
- if res.Err != nil {
- fmt.Printf("retrieve block (Erigon) %d: %v", blockFrom, res.Err)
- return
- }
- if b.Error != nil {
- fmt.Printf("retrieving block (Erigon): %d %s", b.Error.Code, b.Error.Message)
- return
- }
- for _, tx := range b.Result.Transactions {
- reqGen.reqID++
- request := reqGen.traceTransaction(tx.Hash)
- errCtx := fmt.Sprintf("block %d, tx %s", bn, tx.Hash)
- if err := requestAndCompare(request, "debug_traceTransaction", errCtx, reqGen, needCompare, rec, errs, nil); err != nil {
- fmt.Println(err)
- return
- }
- }
- }
-}
diff --git a/cmd/rpctest/rpctest/bench_txreceipts.go b/cmd/rpctest/rpctest/bench_txreceipts.go
index 7fae08b2fde..8b677976a44 100644
--- a/cmd/rpctest/rpctest/bench_txreceipts.go
+++ b/cmd/rpctest/rpctest/bench_txreceipts.go
@@ -12,35 +12,41 @@ import (
// but also can be used for comparing RPCDaemon with Geth
// parameters:
// needCompare - if false - doesn't call Erigon and doesn't compare responses
-func BenchTxReceipt(erigonURL, gethURL string, needCompare bool, blockFrom uint64, blockTo uint64, recordFile string, errorFile string) {
+func BenchTxReceipt(erigonURL, gethURL string, needCompare bool, blockFrom uint64, blockTo uint64, recordFileName string, errorFileName string) error {
setRoutes(erigonURL, gethURL)
var client = &http.Client{
Timeout: time.Second * 600,
}
var rec *bufio.Writer
- if recordFile != "" {
- f, err := os.Create(recordFile)
+ var resultsCh chan CallResult = nil
+
+ if recordFileName != "" {
+ f, err := os.Create(recordFileName)
if err != nil {
- fmt.Printf("Cannot create file %s for recording: %v\n", recordFile, err)
- return
+ return fmt.Errorf("Cannot create file %s for recording: %v\n", recordFileName, err)
}
defer f.Close()
rec = bufio.NewWriter(f)
defer rec.Flush()
}
var errs *bufio.Writer
- if errorFile != "" {
- ferr, err := os.Create(errorFile)
+ if errorFileName != "" {
+ ferr, err := os.Create(errorFileName)
if err != nil {
- fmt.Printf("Cannot create file %s for error output: %v\n", errorFile, err)
- return
+ return fmt.Errorf("Cannot create file %s for error output: %v\n", errorFileName, err)
}
defer ferr.Close()
errs = bufio.NewWriter(ferr)
defer errs.Flush()
}
+ if !needCompare {
+ resultsCh = make(chan CallResult, 1000)
+ defer close(resultsCh)
+ go vegetaWrite(true, []string{"eth_getTransactionReceipt"}, resultsCh)
+ }
+
var res CallResult
reqGen := &RequestGenerator{
client: client,
@@ -50,12 +56,10 @@ func BenchTxReceipt(erigonURL, gethURL string, needCompare bool, blockFrom uint6
var blockNumber EthBlockNumber
res = reqGen.Erigon("eth_blockNumber", reqGen.blockNumber(), &blockNumber)
if res.Err != nil {
- fmt.Printf("Could not get block number: %v\n", res.Err)
- return
+ return fmt.Errorf("Could not get block number: %v\n", res.Err)
}
if blockNumber.Error != nil {
- fmt.Printf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
- return
+ return fmt.Errorf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message)
}
fmt.Printf("Last block: %d\n", blockNumber.Number)
for bn := blockFrom; bn <= blockTo; bn++ {
@@ -63,22 +67,21 @@ func BenchTxReceipt(erigonURL, gethURL string, needCompare bool, blockFrom uint6
var b EthBlockByNumber
res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b)
if res.Err != nil {
- fmt.Printf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err)
- return
+ return fmt.Errorf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err)
}
if b.Error != nil {
- fmt.Printf("Error retrieving block (Erigon): %d %s\n", b.Error.Code, b.Error.Message)
- return
+ return fmt.Errorf("Error retrieving block (Erigon): %d %s\n", b.Error.Code, b.Error.Message)
}
for _, tx := range b.Result.Transactions {
reqGen.reqID++
request := reqGen.getTransactionReceipt(tx.Hash)
errCtx := fmt.Sprintf("block %d, tx %s", bn, tx.Hash)
- if err := requestAndCompare(request, "eth_getTransactionReceipt", errCtx, reqGen, needCompare, rec, errs, nil); err != nil {
- fmt.Println(err)
- return
+ if err := requestAndCompare(request, "eth_getTransactionReceipt", errCtx, reqGen, needCompare, rec, errs, resultsCh,
+ /* insertOnlyIfSuccess */ false); err != nil {
+ return err
}
}
}
+ return nil
}
diff --git a/cmd/rpctest/rpctest/request_generator.go b/cmd/rpctest/rpctest/request_generator.go
index b5936b8980a..dfb75763005 100644
--- a/cmd/rpctest/rpctest/request_generator.go
+++ b/cmd/rpctest/rpctest/request_generator.go
@@ -7,12 +7,12 @@ import (
"strings"
"time"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
+
"github.com/valyala/fastjson"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
-
- "github.com/ledgerwatch/erigon/common/hexutil"
)
type CallResult struct {
@@ -39,6 +39,16 @@ func (g *RequestGenerator) getBlockByNumber(blockNum uint64, withTxs bool) strin
return fmt.Sprintf(template, blockNum, withTxs, g.reqID)
}
+func (g *RequestGenerator) getBlockByHash(hash libcommon.Hash, withTxs bool) string {
+ const template = `{"jsonrpc":"2.0","method":"eth_getBlockByHash","params":["0x%x",%t],"id":%d}`
+ return fmt.Sprintf(template, hash, withTxs, g.reqID)
+}
+
+func (g *RequestGenerator) getTransactionByHash(hash string) string {
+ const template = `{"jsonrpc":"2.0","method":"eth_getTransactionByHash","params":["%s"],"id":%d}`
+ return fmt.Sprintf(template, hash, g.reqID)
+}
+
func (g *RequestGenerator) storageRangeAt(hash libcommon.Hash, i int, to *libcommon.Address, nextKey libcommon.Hash) string {
const template = `{"jsonrpc":"2.0","method":"debug_storageRangeAt","params":["0x%x", %d,"0x%x","0x%x",%d],"id":%d}`
return fmt.Sprintf(template, hash, i, to, nextKey, 1024, g.reqID)
@@ -49,7 +59,12 @@ func (g *RequestGenerator) traceBlockByHash(hash string) string {
return fmt.Sprintf(template, hash, g.reqID)
}
-func (g *RequestGenerator) traceTransaction(hash string) string {
+func (g *RequestGenerator) debugTraceBlockByNumber(blockNum uint64) string {
+ const template = `{"jsonrpc":"2.0","method":"debug_traceBlockByNumber","params":[%d],"id":%d}`
+ return fmt.Sprintf(template, blockNum, g.reqID)
+}
+
+func (g *RequestGenerator) debugTraceTransaction(hash string) string {
const template = `{"jsonrpc":"2.0","method":"debug_traceTransaction","params":["%s"],"id":%d}`
return fmt.Sprintf(template, hash, g.reqID)
}
@@ -243,6 +258,11 @@ func (g *RequestGenerator) ethCallLatest(from libcommon.Address, to *libcommon.A
return sb.String()
}
+func (g *RequestGenerator) otsGetBlockTransactions(block_number uint64, page_number uint64, page_size uint64) string {
+ const template = `{"id":1,"jsonrpc":"2.0","method":"ots_getBlockTransactions","params":[%d, %d, %d]}`
+ return fmt.Sprintf(template, block_number, page_number, page_size)
+}
+
func (g *RequestGenerator) call(target string, method, body string, response interface{}) CallResult {
start := time.Now()
err := post(g.client, routes[target], body, response)
diff --git a/cmd/rpctest/rpctest/request_generator_test.go b/cmd/rpctest/rpctest/request_generator_test.go
index 2dfecf6cba1..99173249a85 100644
--- a/cmd/rpctest/rpctest/request_generator_test.go
+++ b/cmd/rpctest/rpctest/request_generator_test.go
@@ -168,7 +168,7 @@ func TestRequestGenerator_traceTransaction(t *testing.T) {
for _, testCase := range testCases {
reqGen := MockRequestGenerator(testCase.reqId)
- got := reqGen.traceTransaction(testCase.hash)
+ got := reqGen.debugTraceTransaction(testCase.hash)
require.EqualValues(t, testCase.expected, got)
}
}
diff --git a/cmd/rpctest/rpctest/type.go b/cmd/rpctest/rpctest/type.go
index 134bbbd1b7b..268fc2c2b16 100644
--- a/cmd/rpctest/rpctest/type.go
+++ b/cmd/rpctest/rpctest/type.go
@@ -3,10 +3,11 @@ package rpctest
import (
"fmt"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
+
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
- "github.com/ledgerwatch/erigon/common/hexutil"
"github.com/ledgerwatch/erigon/core/state"
)
@@ -241,11 +242,6 @@ type EthGetLogs struct {
Result []Log `json:"result"`
}
-type EthGetTransactionCount struct {
- CommonResponse
- Result hexutil.Uint64 `json:"result"`
-}
-
// AccountResult is the result struct for GetProof
type AccountResult struct {
Address libcommon.Address `json:"address"`
@@ -266,3 +262,67 @@ type ParityListStorageKeysResult struct {
CommonResponse
Result []hexutility.Bytes `json:"result"`
}
+
+type OtsTransaction struct {
+ BlockHash libcommon.Hash `json:"blockHash"`
+ BlockNumber hexutil.Uint64 `json:"blockNumber"`
+ From libcommon.Address `json:"from"`
+ Gas hexutil.Big `json:"gas"`
+ GasPrice hexutil.Big `json:"gasPrice"`
+ Hash string `json:"hash"`
+ Input hexutility.Bytes `json:"input"`
+ To *libcommon.Address `json:"to"` // Pointer because it might be missing
+ TransactionIndex hexutil.Uint64 `json:"transactionIndex"`
+ Value hexutil.Big `json:"value"`
+ Type hexutil.Big `json:"type"` // To check
+ ChainId hexutil.Big `json:"chainId"` // To check
+}
+
+type OtsReceipt struct {
+ BlockHash libcommon.Hash `json:"blockHash"`
+ BlockNumber hexutil.Uint64 `json:"blockNumber"`
+ ContractAddress string `json:"contractAddress"`
+ CumulativeGasUsed hexutil.Big `json:"cumulativeGasUsed"`
+ EffectiveGasPrice hexutil.Big `json:"effectiveGasPrice"`
+ From libcommon.Address `json:"from"`
+ GasUsed hexutil.Big `json:"gasUsed"`
+ To *libcommon.Address `json:"to"` // Pointer because it might be missing
+ TransactionHash string `json:"hash"`
+ TransactionIndex hexutil.Uint64 `json:"transactionIndex"`
+}
+
+type OtsFullBlock struct {
+ Difficulty hexutil.Big `json:"difficulty"`
+ ExtraData string `json:"extraData"`
+ GasLimit hexutil.Big `json:"gasLimit"`
+ GasUsed hexutil.Big `json:"gasUsed"`
+ Hash libcommon.Hash `json:"hash"`
+ Bloom string `json:"logsBloom" gencodec:"required"`
+ Miner libcommon.Address `json:"miner"`
+ MixHash string `json:"mixHash"`
+ Nonce string `json:"nonce"`
+ Number hexutil.Big `json:"number"`
+
+ ParentHash string `json:"parentHash"`
+ ReceiptsRoot string `json:"receiptsRoot"`
+ Sha3Uncles string `json:"sha3Uncles"`
+ Size hexutil.Big `json:"size"`
+ StateRoot string `json:"stateRoot"`
+ Timestamp string `json:"timestamp"`
+
+ TotalDifficulty hexutil.Big `json:"totalDifficulty"`
+ TransactionCount uint64 `json:"transactionCount"`
+ Transactions []OtsTransaction `json:"transactions"`
+ TxRoot libcommon.Hash `json:"transactionsRoot"`
+ Uncles []string `json:"uncles"`
+}
+
+type OtsBlockTransactionsResult struct {
+ FullBlock *OtsFullBlock `json:"fullblock"`
+ Receipts []OtsReceipt `json:"receipts"`
+}
+
+type OtsBlockTransactions struct {
+ CommonResponse
+ Result *OtsBlockTransactionsResult `json:"result"`
+}
diff --git a/cmd/rpctest/rpctest/utils.go b/cmd/rpctest/rpctest/utils.go
index 95d21dbb8a7..e78ffce8579 100644
--- a/cmd/rpctest/rpctest/utils.go
+++ b/cmd/rpctest/rpctest/utils.go
@@ -231,7 +231,7 @@ func compareErrors(errVal *fastjson.Value, errValg *fastjson.Value, methodName s
return nil
}
-func requestAndCompare(request string, methodName string, errCtx string, reqGen *RequestGenerator, needCompare bool, rec *bufio.Writer, errs *bufio.Writer, channel chan CallResult) error {
+func requestAndCompare(request string, methodName string, errCtx string, reqGen *RequestGenerator, needCompare bool, rec *bufio.Writer, errs *bufio.Writer, channel chan CallResult, insertOnlyIfSuccess bool) error {
recording := rec != nil
res := reqGen.Erigon2(methodName, request)
if res.Err != nil {
@@ -243,9 +243,6 @@ func requestAndCompare(request string, methodName string, errCtx string, reqGen
return fmt.Errorf("error invoking %s (Erigon): %d %s", methodName, errVal.GetInt("code"), errVal.GetStringBytes("message"))
}
}
- if channel != nil {
- channel <- res
- }
if needCompare {
resg := reqGen.Geth2(methodName, request)
if resg.Err != nil {
@@ -279,7 +276,14 @@ func requestAndCompare(request string, methodName string, errCtx string, reqGen
} else {
return compareErrors(errVal, errValg, methodName, errCtx, errs)
}
+ } else {
+ if channel != nil {
+ if insertOnlyIfSuccess == false || (insertOnlyIfSuccess && errVal == nil) {
+ channel <- res
+ }
+ }
}
+
if recording {
fmt.Fprintf(rec, "%s\n%s\n\n", request, res.Response)
}
@@ -652,3 +656,185 @@ func setRoutes(erigonUrl, gethURL string) {
routes[Erigon] = erigonUrl
routes[Geth] = gethURL
}
+
+func compareBlockTransactions(b, bg *OtsBlockTransactions) bool {
+ r := b.Result
+ rg := bg.Result
+ if r.FullBlock.Difficulty.ToInt().Cmp(rg.FullBlock.Difficulty.ToInt()) != 0 {
+ fmt.Printf("Different Difficulty %d %d\n", r.FullBlock.Difficulty.ToInt(), rg.FullBlock.Difficulty.ToInt())
+ return false
+ }
+ if r.FullBlock.ExtraData != rg.FullBlock.ExtraData {
+ fmt.Printf("Different ExtraData %s %s\n", r.FullBlock.ExtraData, rg.FullBlock.ExtraData)
+ return false
+ }
+ if r.FullBlock.GasLimit.ToInt().Cmp(rg.FullBlock.GasLimit.ToInt()) != 0 {
+ fmt.Printf("Different GasLimit %d %d\n", r.FullBlock.GasLimit.ToInt(), rg.FullBlock.GasLimit.ToInt())
+ return false
+ }
+ if r.FullBlock.GasUsed.ToInt().Cmp(rg.FullBlock.GasUsed.ToInt()) != 0 {
+ fmt.Printf("Different GasUsed %d %d\n", r.FullBlock.GasUsed.ToInt(), rg.FullBlock.GasUsed.ToInt())
+ return false
+ }
+ if r.FullBlock.Hash.String() != rg.FullBlock.Hash.String() {
+ fmt.Printf("Different Hash %s %s\n", r.FullBlock.Hash.String(), rg.FullBlock.Hash.String())
+ return false
+ }
+ if r.FullBlock.Bloom != rg.FullBlock.Bloom {
+ fmt.Printf("Different Bloom %s %s\n", r.FullBlock.Bloom, rg.FullBlock.Bloom)
+ return false
+ }
+ if r.FullBlock.Miner != rg.FullBlock.Miner {
+ fmt.Printf("Different Miner %x %x\n", r.FullBlock.Miner, rg.FullBlock.Miner)
+ return false
+ }
+ if r.FullBlock.MixHash != rg.FullBlock.MixHash {
+ fmt.Printf("Different MixHash %s %s\n", r.FullBlock.MixHash, rg.FullBlock.MixHash)
+ return false
+ }
+ if r.FullBlock.Nonce != rg.FullBlock.Nonce {
+ fmt.Printf("Different Nonce %s %s\n", r.FullBlock.Nonce, rg.FullBlock.Nonce)
+ return false
+ }
+ if r.FullBlock.Number.ToInt().Cmp(rg.FullBlock.Number.ToInt()) != 0 {
+ fmt.Printf("Different Number %d %d\n", r.FullBlock.Number.ToInt(), rg.FullBlock.Number.ToInt())
+ return false
+ }
+ if r.FullBlock.ParentHash != rg.FullBlock.ParentHash {
+ fmt.Printf("Different ParentHash %s %s\n", r.FullBlock.ParentHash, rg.FullBlock.ParentHash)
+ return false
+ }
+ if r.FullBlock.ReceiptsRoot != rg.FullBlock.ReceiptsRoot {
+ fmt.Printf("Different ReceiptsRoot %s %s\n", r.FullBlock.ReceiptsRoot, rg.FullBlock.ReceiptsRoot)
+ return false
+ }
+ if r.FullBlock.Sha3Uncles != rg.FullBlock.Sha3Uncles {
+ fmt.Printf("Different Sha3Uncles %s %s\n", r.FullBlock.Sha3Uncles, rg.FullBlock.Sha3Uncles)
+ return false
+ }
+ if r.FullBlock.Size.ToInt().Cmp(rg.FullBlock.Size.ToInt()) != 0 {
+ fmt.Printf("Different Size %d %d\n", r.FullBlock.Size.ToInt(), rg.FullBlock.Size.ToInt())
+ return false
+ }
+ if r.FullBlock.StateRoot != rg.FullBlock.StateRoot {
+ fmt.Printf("Different StateRoot %s %s\n", r.FullBlock.StateRoot, rg.FullBlock.StateRoot)
+ return false
+ }
+ if r.FullBlock.Timestamp != rg.FullBlock.Timestamp {
+ fmt.Printf("Different Timestamp %s %s\n", r.FullBlock.Timestamp, rg.FullBlock.Timestamp)
+ return false
+ }
+ if len(r.FullBlock.Transactions) != len(rg.FullBlock.Transactions) {
+ fmt.Printf("Num of txs different: %d %d\n", len(r.FullBlock.Transactions), len(rg.FullBlock.Transactions))
+ return false
+ }
+ for i, uncle := range r.FullBlock.Uncles {
+ if uncle != rg.FullBlock.Uncles[i] {
+ fmt.Printf("Uncles %d different: %x %x\n", i, uncle, rg.FullBlock.Uncles[i])
+ return false
+ }
+ }
+ for i, tx := range r.FullBlock.Transactions {
+ txg := rg.FullBlock.Transactions[i]
+ if tx.From != txg.From {
+ fmt.Printf("Tx %d different From: %x %x\n", i, tx.From, txg.From)
+ return false
+ }
+ if (tx.To == nil && txg.To != nil) || (tx.To != nil && txg.To == nil) {
+ fmt.Printf("Tx %d different To nilness: %t %t\n", i, tx.To == nil, txg.To == nil)
+ return false
+ }
+ if tx.To != nil && txg.To != nil && *tx.To != *txg.To {
+ fmt.Printf("Tx %d different To: %x %x\n", i, *tx.To, *txg.To)
+ return false
+ }
+ if tx.Hash != txg.Hash {
+ fmt.Printf("Tx %d different Hash: %s %s\n", i, tx.Hash, txg.Hash)
+ return false
+ }
+ if tx.BlockHash.String() != txg.BlockHash.String() {
+ fmt.Printf("Tx %d different BlockHash: %s %s\n", i, tx.BlockHash.String(), txg.BlockHash.String())
+ return false
+ }
+ if tx.BlockNumber.String() != txg.BlockNumber.String() {
+ fmt.Printf("Tx %d different TransactionHash: %s %s\n", i, tx.BlockNumber.String(), txg.BlockNumber.String())
+ return false
+ }
+ if tx.Gas.ToInt().Cmp(txg.Gas.ToInt()) != 0 {
+ fmt.Printf("Tx %d different Gas: %d %d\n", i, tx.Gas.ToInt(), txg.Gas.ToInt())
+ return false
+ }
+ if tx.GasPrice.ToInt().Cmp(txg.GasPrice.ToInt()) != 0 {
+ fmt.Printf("Tx %d different GasPrice: %d %d\n", i, tx.GasPrice.ToInt(), txg.GasPrice.ToInt())
+ return false
+ }
+ if tx.Input.String() != txg.Input.String() {
+ fmt.Printf("Tx %d different Input: %s %s\n", i, tx.Input.String(), txg.Input.String())
+ return false
+ }
+ if tx.TransactionIndex.String() != txg.TransactionIndex.String() {
+ fmt.Printf("Tx %d different TransactionIndex: %s %s\n", i, tx.TransactionIndex.String(), txg.TransactionIndex.String())
+ return false
+ }
+ if tx.Value.ToInt().Cmp(txg.Value.ToInt()) != 0 {
+ fmt.Printf("Tx %d different Value: %d %d\n", i, tx.Value.ToInt(), txg.Value.ToInt())
+ return false
+ }
+ if tx.Type.ToInt().Cmp(txg.Type.ToInt()) != 0 {
+ fmt.Printf("Tx %d different Type: %d %d\n", i, tx.Type.ToInt(), txg.Type.ToInt())
+ return false
+ }
+ if tx.ChainId.ToInt().Cmp(txg.ChainId.ToInt()) != 0 {
+ fmt.Printf("Tx %d different ChainId: %d %d\n", i, tx.ChainId.ToInt(), txg.ChainId.ToInt())
+ return false
+ }
+ }
+ for i, rcp := range r.Receipts {
+ rcpg := rg.Receipts[i]
+ if rcp.From != rcpg.From {
+ fmt.Printf("Receipt %d different From: %x %x\n", i, rcp.From, rcpg.From)
+ return false
+ }
+ if (rcp.To == nil && rcpg.To != nil) || (rcp.To != nil && rcpg.To == nil) {
+ fmt.Printf("Receipt %d different To nilness: %t %t\n", i, rcp.To == nil, rcpg.To == nil)
+ return false
+ }
+ if rcp.To != nil && rcpg.To != nil && *rcp.To != *rcpg.To {
+ fmt.Printf("Receipt %d different To: %x %x\n", i, *rcp.To, *rcpg.To)
+ return false
+ }
+ if rcp.BlockHash != rcpg.BlockHash {
+ fmt.Printf("Receipt %d different Hash: %s %s\n", i, rcp.BlockHash, rcpg.BlockHash)
+ return false
+ }
+ if rcp.ContractAddress != rcpg.ContractAddress {
+ fmt.Printf("Receipt %d different ContractAddress: %s %s\n", i, rcp.ContractAddress, rcpg.ContractAddress)
+ return false
+ }
+ if rcp.CumulativeGasUsed.ToInt().Cmp(rcpg.CumulativeGasUsed.ToInt()) != 0 {
+ fmt.Printf("Receipt %d different CumulativeGasUsed: %d %d\n", i, rcp.CumulativeGasUsed.ToInt(), rcpg.CumulativeGasUsed.ToInt())
+ return false
+ }
+ if rcp.EffectiveGasPrice.ToInt().Cmp(rcpg.EffectiveGasPrice.ToInt()) != 0 {
+ fmt.Printf("Receipt %d different EffectiveGasPrice: %d %d\n", i, rcp.EffectiveGasPrice.ToInt(), rcpg.EffectiveGasPrice.ToInt())
+ return false
+ }
+ if rcp.GasUsed.ToInt().Cmp(rcpg.GasUsed.ToInt()) != 0 {
+ fmt.Printf("Receipt %d different GasUsed: %d %d\n", i, rcp.GasUsed.ToInt(), rcpg.GasUsed.ToInt())
+ return false
+ }
+ if rcp.TransactionHash != rcpg.TransactionHash {
+ fmt.Printf("Receipt %d different TransactionHash: %s %s\n", i, rcp.TransactionHash, rcpg.TransactionHash)
+ return false
+ }
+ if rcp.BlockHash.String() != rcpg.BlockHash.String() {
+ fmt.Printf("Receipt %d different TransactionHash: %s %s\n", i, rcp.BlockHash.String(), rcpg.BlockHash.String())
+ return false
+ }
+ if rcp.BlockNumber.String() != rcpg.BlockNumber.String() {
+ fmt.Printf("Receipt %d different TransactionHash: %s %s\n", i, rcp.BlockNumber.String(), rcpg.BlockNumber.String())
+ return false
+ }
+ }
+ return true
+}
diff --git a/cmd/sentinel/cli/cliSettings.go b/cmd/sentinel/cli/cliSettings.go
deleted file mode 100644
index a10c50f16c7..00000000000
--- a/cmd/sentinel/cli/cliSettings.go
+++ /dev/null
@@ -1,159 +0,0 @@
-package cli
-
-import (
- "errors"
- "fmt"
- "os"
- "strings"
- "time"
-
- "github.com/ledgerwatch/erigon/cl/phase1/core/rawdb"
- "github.com/ledgerwatch/erigon/cl/phase1/core/state"
- "github.com/ledgerwatch/erigon/common"
-
- "github.com/ledgerwatch/erigon/cmd/utils"
- "github.com/urfave/cli/v2"
-
- "github.com/ledgerwatch/erigon/cl/clparams"
- "github.com/ledgerwatch/erigon/cmd/sentinel/cli/flags"
- "github.com/ledgerwatch/erigon/turbo/logging"
-
- "github.com/ledgerwatch/log/v3"
-)
-
-type ConsensusClientCliCfg struct {
- GenesisCfg *clparams.GenesisConfig
- BeaconCfg *clparams.BeaconChainConfig
- NetworkCfg *clparams.NetworkConfig
- BeaconDataCfg *rawdb.BeaconDataConfig
- Port uint `json:"port"`
- Addr string `json:"address"`
- ServerAddr string `json:"serverAddr"`
- ServerProtocol string `json:"serverProtocol"`
- ServerTcpPort uint `json:"serverTcpPort"`
- LogLvl uint `json:"logLevel"`
- NoDiscovery bool `json:"noDiscovery"`
- LocalDiscovery bool `json:"localDiscovery"`
- CheckpointUri string `json:"checkpointUri"`
- Chaindata string `json:"chaindata"`
- ErigonPrivateApi string `json:"erigonPrivateApi"`
- TransitionChain bool `json:"transitionChain"`
- NetworkType clparams.NetworkType
- InitialSync bool `json:"initialSync"`
- NoBeaconApi bool `json:"noBeaconApi"`
- BeaconApiReadTimeout time.Duration `json:"beaconApiReadTimeout"`
- BeaconApiWriteTimeout time.Duration `json:"beaconApiWriteTimeout"`
- BeaconAddr string `json:"beaconAddr"`
- BeaconProtocol string `json:"beaconProtocol"`
- RecordMode bool `json:"recordMode"`
- RecordDir string `json:"recordDir"`
- RunEngineAPI bool `json:"run_engine_api"`
- EngineAPIAddr string `json:"engine_api_addr"`
- EngineAPIPort int `json:"engine_api_port"`
- JwtSecret []byte
-
- InitalState *state.CachingBeaconState
-}
-
-func SetupConsensusClientCfg(ctx *cli.Context) (*ConsensusClientCliCfg, error) {
- cfg := &ConsensusClientCliCfg{}
- chainName := ctx.String(flags.Chain.Name)
- var err error
- cfg.GenesisCfg, cfg.NetworkCfg, cfg.BeaconCfg, cfg.NetworkType, err = clparams.GetConfigsByNetworkName(chainName)
- if err != nil {
- return nil, err
- }
- cfg.ErigonPrivateApi = ctx.String(flags.ErigonPrivateApiFlag.Name)
- if ctx.String(flags.BeaconConfigFlag.Name) != "" {
- cfg.BeaconCfg = new(clparams.BeaconChainConfig)
- if *cfg.BeaconCfg, err = clparams.CustomConfig(ctx.String(flags.BeaconConfigFlag.Name)); err != nil {
- return nil, err
- }
- if ctx.String(flags.GenesisSSZFlag.Name) == "" {
- return nil, fmt.Errorf("no genesis file provided")
- }
- cfg.GenesisCfg = new(clparams.GenesisConfig)
- var stateByte []byte
- // Now parse genesis time and genesis fork
- if *cfg.GenesisCfg, stateByte, err = clparams.ParseGenesisSSZToGenesisConfig(
- ctx.String(flags.GenesisSSZFlag.Name),
- cfg.BeaconCfg.GetCurrentStateVersion(0)); err != nil {
- return nil, err
- }
- cfg.InitalState = state.New(cfg.BeaconCfg)
- if cfg.InitalState.DecodeSSZ(stateByte, int(cfg.BeaconCfg.GetCurrentStateVersion(0))); err != nil {
- return nil, err
- }
- }
- cfg.ServerAddr = fmt.Sprintf("%s:%d", ctx.String(flags.SentinelServerAddr.Name), ctx.Int(flags.SentinelServerPort.Name))
- cfg.ServerProtocol = "tcp"
-
- cfg.NoBeaconApi = ctx.Bool(flags.NoBeaconApi.Name)
- cfg.BeaconApiReadTimeout = time.Duration(ctx.Uint64(flags.BeaconApiReadTimeout.Name)) * time.Second
- cfg.BeaconApiWriteTimeout = time.Duration(ctx.Uint(flags.BeaconApiWriteTimeout.Name)) * time.Second
- cfg.BeaconAddr = fmt.Sprintf("%s:%d", ctx.String(flags.BeaconApiAddr.Name), ctx.Int(flags.BeaconApiPort.Name))
- cfg.BeaconProtocol = "tcp"
- cfg.RecordMode = ctx.Bool(flags.RecordModeFlag.Name)
- cfg.RecordDir = ctx.String(flags.RecordModeDir.Name)
-
- cfg.RunEngineAPI = ctx.Bool(flags.RunEngineAPI.Name)
- cfg.EngineAPIAddr = ctx.String(flags.EngineApiHostFlag.Name)
- cfg.EngineAPIPort = ctx.Int(flags.EngineApiPortFlag.Name)
- if cfg.RunEngineAPI {
- secret, err := ObtainJwtSecret(ctx)
- if err != nil {
- log.Error("Failed to obtain jwt secret", "err", err)
- cfg.RunEngineAPI = false
- } else {
- cfg.JwtSecret = secret
- }
- }
-
- cfg.Port = uint(ctx.Int(flags.SentinelDiscoveryPort.Name))
- cfg.Addr = ctx.String(flags.SentinelDiscoveryAddr.Name)
-
- cfg.LogLvl = ctx.Uint(logging.LogVerbosityFlag.Name)
- fmt.Println(cfg.LogLvl)
- if cfg.LogLvl == uint(log.LvlInfo) || cfg.LogLvl == 0 {
- cfg.LogLvl = uint(log.LvlDebug)
- }
- cfg.NoDiscovery = ctx.Bool(flags.NoDiscovery.Name)
- cfg.LocalDiscovery = ctx.Bool(flags.LocalDiscovery.Name)
- if ctx.String(flags.CheckpointSyncUrlFlag.Name) != "" {
- cfg.CheckpointUri = ctx.String(flags.CheckpointSyncUrlFlag.Name)
- } else {
- cfg.CheckpointUri = clparams.GetCheckpointSyncEndpoint(cfg.NetworkType)
- fmt.Println(cfg.CheckpointUri)
- }
- cfg.Chaindata = ctx.String(flags.ChaindataFlag.Name)
- cfg.BeaconDataCfg = rawdb.BeaconDataConfigurations[ctx.String(flags.BeaconDBModeFlag.Name)]
- // Process bootnodes
- if ctx.String(flags.BootnodesFlag.Name) != "" {
- cfg.NetworkCfg.BootNodes = utils.SplitAndTrim(ctx.String(flags.BootnodesFlag.Name))
- }
- if ctx.String(flags.SentinelStaticPeersFlag.Name) != "" {
- cfg.NetworkCfg.StaticPeers = utils.SplitAndTrim(ctx.String(flags.SentinelStaticPeersFlag.Name))
- fmt.Println(cfg.NetworkCfg.StaticPeers)
- }
- cfg.TransitionChain = ctx.Bool(flags.TransitionChainFlag.Name)
- cfg.InitialSync = ctx.Bool(flags.InitSyncFlag.Name)
- return cfg, nil
-}
-
-func ObtainJwtSecret(ctx *cli.Context) ([]byte, error) {
- path := ctx.String(flags.JwtSecret.Name)
- if len(strings.TrimSpace(path)) == 0 {
- return nil, errors.New("Missing jwt secret path")
- }
-
- data, err := os.ReadFile(path)
- if err != nil {
- return nil, err
- }
- jwtSecret := common.FromHex(strings.TrimSpace(string(data)))
- if len(jwtSecret) == 32 {
- return jwtSecret, nil
- }
-
- return nil, fmt.Errorf("Invalid JWT secret at %s, invalid size", path)
-}
diff --git a/cmd/sentinel/cli/flags/defaultFlags.go b/cmd/sentinel/cli/flags/defaultFlags.go
deleted file mode 100644
index 2c20f457c47..00000000000
--- a/cmd/sentinel/cli/flags/defaultFlags.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package flags
-
-import "github.com/urfave/cli/v2"
-
-var CLDefaultFlags = []cli.Flag{
- &SentinelDiscoveryPort,
- &SentinelDiscoveryAddr,
- &SentinelServerPort,
- &SentinelServerAddr,
- &NoBeaconApi,
- &BeaconApiReadTimeout,
- &BeaconApiWriteTimeout,
- &BeaconApiPort,
- &BeaconApiAddr,
- &Chain,
- &SentinelTcpPort,
- &NoDiscovery,
- &ChaindataFlag,
- &BeaconDBModeFlag,
- &BootnodesFlag,
- &BeaconConfigFlag,
- &GenesisSSZFlag,
- &CheckpointSyncUrlFlag,
- &SentinelStaticPeersFlag,
- &TransitionChainFlag,
- &InitSyncFlag,
- &RecordModeDir,
- &RecordModeFlag,
- &RunEngineAPI,
- &EngineApiHostFlag,
- &EngineApiPortFlag,
- &JwtSecret,
-}
diff --git a/cmd/sentinel/main.go b/cmd/sentinel/main.go
index ec604b71124..3208d9c3e36 100644
--- a/cmd/sentinel/main.go
+++ b/cmd/sentinel/main.go
@@ -1,15 +1,13 @@
-/*
- Copyright 2022 Erigon-Lightclient contributors
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
+// Copyright 2022 Erigon-Lightclient contributors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
package main
@@ -18,18 +16,19 @@ import (
"fmt"
"os"
+ "github.com/ledgerwatch/erigon/cl/sentinel"
+ "github.com/ledgerwatch/erigon/cl/sentinel/service"
+ "github.com/ledgerwatch/erigon/cmd/sentinel/sentinelcli"
+ "github.com/ledgerwatch/erigon/cmd/sentinel/sentinelflags"
+
"github.com/ledgerwatch/log/v3"
"github.com/urfave/cli/v2"
- lcCli "github.com/ledgerwatch/erigon/cmd/sentinel/cli"
- "github.com/ledgerwatch/erigon/cmd/sentinel/cli/flags"
- "github.com/ledgerwatch/erigon/cmd/sentinel/sentinel"
- "github.com/ledgerwatch/erigon/cmd/sentinel/sentinel/service"
sentinelapp "github.com/ledgerwatch/erigon/turbo/app"
)
func main() {
- app := sentinelapp.MakeApp("sentinel", runSentinelNode, flags.CLDefaultFlags)
+ app := sentinelapp.MakeApp("sentinel", runSentinelNode, sentinelflags.CliFlags)
if err := app.Run(os.Args); err != nil {
_, printErr := fmt.Fprintln(os.Stderr, err)
if printErr != nil {
@@ -40,11 +39,13 @@ func main() {
}
func runSentinelNode(cliCtx *cli.Context) error {
- cfg, _ := lcCli.SetupConsensusClientCfg(cliCtx)
-
+ cfg, err := sentinelcli.SetupSentinelCli(cliCtx)
+ if err != nil {
+ return err
+ }
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(cfg.LogLvl), log.StderrHandler))
log.Info("[Sentinel] running sentinel with configuration", "cfg", cfg)
- _, err := service.StartSentinelService(&sentinel.SentinelConfig{
+ _, err = service.StartSentinelService(&sentinel.SentinelConfig{
IpAddr: cfg.Addr,
Port: int(cfg.Port),
TCPPort: cfg.ServerTcpPort,
diff --git a/cmd/sentinel/sentinel/communication/send_request.go b/cmd/sentinel/sentinel/communication/send_request.go
deleted file mode 100644
index fec0bff29f2..00000000000
--- a/cmd/sentinel/sentinel/communication/send_request.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package communication
-
-import (
- "context"
- "fmt"
- "io"
- "time"
-
- "github.com/libp2p/go-libp2p/core/host"
- "github.com/libp2p/go-libp2p/core/network"
- "github.com/libp2p/go-libp2p/core/peer"
- "github.com/libp2p/go-libp2p/core/protocol"
-)
-
-var NoRequestHandlers = map[string]bool{
- MetadataProtocolV1: true,
- MetadataProtocolV2: true,
-}
-
-type response struct {
- data []byte
- code byte
- err error
-}
-
-func SendRequestRawToPeer(ctx context.Context, host host.Host, data []byte, topic string, peerId peer.ID) ([]byte, byte, error) {
-
- nctx, cn := context.WithTimeout(ctx, 5*time.Second)
- defer cn()
- stream, err := writeRequestRaw(host, nctx, data, peerId, topic)
- if err != nil {
- return nil, 189, err
- }
- defer stream.Close()
-
- ch := make(chan response)
- go func() {
- res := verifyResponse(stream, peerId)
- select {
- case <-ctx.Done():
- return
- default:
- }
- ch <- res
- }()
- select {
- case <-ctx.Done():
- stream.Reset()
- return nil, 189, ctx.Err()
- case ans := <-ch:
- if ans.err != nil {
- ans.code = 189
- }
- return ans.data, ans.code, ans.err
- }
-}
-
-func writeRequestRaw(host host.Host, ctx context.Context, data []byte, peerId peer.ID, topic string) (network.Stream, error) {
- stream, err := host.NewStream(ctx, peerId, protocol.ID(topic))
- if err != nil {
- return nil, fmt.Errorf("failed to begin stream, err=%s", err)
- }
-
- if _, ok := NoRequestHandlers[topic]; !ok {
- if _, err := stream.Write(data); err != nil {
- return nil, err
- }
- }
-
- return stream, stream.CloseWrite()
-}
-
-func verifyResponse(stream network.Stream, peerId peer.ID) (resp response) {
- code := make([]byte, 1)
- _, resp.err = stream.Read(code)
- if resp.err != nil {
- return
- }
- resp.code = code[0]
- resp.data, resp.err = io.ReadAll(stream)
- if resp.err != nil {
- return
- }
- return
-}
diff --git a/cmd/sentinel/sentinel/gossip.go b/cmd/sentinel/sentinel/gossip.go
deleted file mode 100644
index 2f38d3b7836..00000000000
--- a/cmd/sentinel/sentinel/gossip.go
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- Copyright 2022 Erigon-Lightclient contributors
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package sentinel
-
-import (
- "context"
- "errors"
- "fmt"
- "sync"
-
- "github.com/ledgerwatch/log/v3"
- pubsub "github.com/libp2p/go-libp2p-pubsub"
- "github.com/libp2p/go-libp2p/core/peer"
-)
-
-// GossipSubscription abstracts a gossip subscription to write decoded structs.
-type GossipSubscription struct {
- gossip_topic GossipTopic
- host peer.ID
- ch chan *pubsub.Message
- ctx context.Context
-
- topic *pubsub.Topic
- sub *pubsub.Subscription
-
- cf context.CancelFunc
- rf pubsub.RelayCancelFunc
-
- setup sync.Once
- stopCh chan struct{}
-}
-
-func (sub *GossipSubscription) Listen() (err error) {
- sub.setup.Do(func() {
- sub.stopCh = make(chan struct{}, 3)
- sub.sub, err = sub.topic.Subscribe()
- if err != nil {
- err = fmt.Errorf("failed to begin topic %s subscription, err=%w", sub.topic.String(), err)
- return
- }
- var sctx context.Context
- sctx, sub.cf = context.WithCancel(sub.ctx)
- go sub.run(sctx, sub.sub, sub.sub.Topic())
- })
- return nil
-}
-
-// calls the cancel func for the subscriber and closes the topic and sub
-func (s *GossipSubscription) Close() {
- s.stopCh <- struct{}{}
- if s.cf != nil {
- s.cf()
- }
- if s.rf != nil {
- s.rf()
- }
- if s.sub != nil {
- s.sub.Cancel()
- s.sub = nil
- }
- if s.topic != nil {
- s.topic.Close()
- s.topic = nil
- }
-}
-
-// this is a helper to begin running the gossip subscription.
-// function should not be used outside of the constructor for gossip subscription
-func (s *GossipSubscription) run(ctx context.Context, sub *pubsub.Subscription, topic string) {
- defer func() {
- if r := recover(); r != nil {
- log.Error("[Sentinel Gossip] Message Handler Crashed", "err", r)
- }
- }()
- for {
- select {
- case <-ctx.Done():
- return
- case <-s.stopCh:
- return
- default:
- msg, err := sub.Next(ctx)
- if err != nil {
- if errors.Is(err, context.Canceled) {
- return
- }
- log.Warn("[Sentinel] fail to decode gossip packet", "err", err, "topic", topic)
- return
- }
- if msg.GetFrom() == s.host {
- continue
- }
- s.ch <- msg
- }
- }
-}
-
-func (g *GossipSubscription) Publish(data []byte) error {
- return g.topic.Publish(g.ctx, data)
-}
diff --git a/cmd/sentinel/sentinel/peers/manager.go b/cmd/sentinel/sentinel/peers/manager.go
deleted file mode 100644
index 51840e54fee..00000000000
--- a/cmd/sentinel/sentinel/peers/manager.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package peers
-
-import (
- "context"
- "sync"
- "time"
-
- "github.com/ledgerwatch/erigon/cl/phase1/core/state/lru"
- "github.com/ledgerwatch/erigon/metrics/methelp"
- "github.com/libp2p/go-libp2p/core/host"
- "github.com/libp2p/go-libp2p/core/peer"
-)
-
-const (
- maxBadPeers = 50000
- maxPeerRecordSize = 1000
- DefaultMaxPeers = 33
- MaxBadResponses = 50
-)
-
-func newPeer() *Peer {
- return &Peer{
- lastTouched: time.Now(),
- working: make(chan struct{}),
- }
-}
-
-type Manager struct {
- host host.Host
- peers *lru.Cache[peer.ID, *Peer]
- peerTimeout time.Duration
-
- mu sync.Mutex
-}
-
-func NewManager(ctx context.Context, host host.Host) *Manager {
- c, err := lru.New[peer.ID, *Peer]("beacon_peer_manager", 500)
- if err != nil {
- panic(err)
- }
- m := &Manager{
- peerTimeout: 8 * time.Hour,
- peers: c,
- host: host,
- }
- go m.run(ctx)
- return m
-}
-
-func (m *Manager) getPeer(id peer.ID) (peer *Peer) {
- m.mu.Lock()
- p, ok := m.peers.Get(id)
- if !ok {
- p = &Peer{
- pid: id,
- working: make(chan struct{}, 1),
- m: m,
- Penalties: 0,
- Banned: false,
- }
- m.peers.Add(id, p)
- }
- p.lastTouched = time.Now()
- m.mu.Unlock()
- return p
-}
-func (m *Manager) CtxPeer(ctx context.Context, id peer.ID, fn func(peer *Peer)) error {
- p := m.getPeer(id)
- select {
- case p.working <- struct{}{}:
- case <-ctx.Done():
- return ctx.Err()
- }
- fn(p)
- <-p.working
- return nil
-}
-
-func (m *Manager) TryPeer(id peer.ID, fn func(peer *Peer, ok bool)) {
- p := m.getPeer(id)
- select {
- case p.working <- struct{}{}:
- default:
- fn(nil, false)
- return
- }
- fn(p, true)
- <-p.working
-}
-
-// WithPeer will get the peer with id and run your lambda with it. it will update the last queried time
-// It will do all synchronization and so you can use the peer thread safe inside
-func (m *Manager) WithPeer(id peer.ID, fn func(peer *Peer)) {
- if fn == nil {
- return
- }
- p := m.getPeer(id)
- p.working <- struct{}{}
- defer func() {
- <-p.working
- }()
- fn(p)
-}
-
-func (m *Manager) run(ctx context.Context) {
- m1 := time.NewTicker(1 * time.Hour)
- for {
- select {
- case <-m1.C:
- m.gc()
- case <-ctx.Done():
- m1.Stop()
- return
- }
- }
-}
-
-// any extra GC policies that the lru does not suffice.
-// maybe we dont need
-func (m *Manager) gc() {
- m.mu.Lock()
- defer m.mu.Unlock()
- t := methelp.NewHistTimer("beacon_peer_manager_gc_time")
- defer t.PutSince()
- deleted := 0
- saw := 0
- n := time.Now()
- for _, k := range m.peers.Keys() {
- v, ok := m.peers.Get(k)
- if !ok {
- continue
- }
- saw = saw + 1
- if n.Sub(v.lastTouched) > m.peerTimeout {
- deleted = deleted + 1
- m.peers.Remove(k)
- }
- }
-}
diff --git a/cmd/sentinel/sentinel/peers/peer.go b/cmd/sentinel/sentinel/peers/peer.go
deleted file mode 100644
index d6ec928b40d..00000000000
--- a/cmd/sentinel/sentinel/peers/peer.go
+++ /dev/null
@@ -1,110 +0,0 @@
-package peers
-
-import (
- "strings"
- "time"
-
- "github.com/ledgerwatch/log/v3"
- "github.com/libp2p/go-libp2p/core/peer"
-)
-
-// Record Peer data.
-type Peer struct {
- Penalties int
- Banned bool
- InRequest bool
-
- // request info
- lastRequest time.Time
- successCount int
- useCount int
- // gc data
- lastTouched time.Time
- // acts as the mutex. channel used to avoid use of TryLock
- working chan struct{}
- // peer id
- pid peer.ID
- // backref to the manager that owns this peer
- m *Manager
-}
-
-func (p *Peer) ID() peer.ID {
- return p.pid
-}
-func (p *Peer) Penalize() {
- log.Debug("[Sentinel Peers] peer penalized", "peer-id", p.pid)
- p.Penalties++
-}
-
-func (p *Peer) Forgive() {
- log.Debug("[Sentinel Peers] peer forgiven", "peer-id", p.pid)
- if p.Penalties > 0 {
- p.Penalties--
- }
-}
-
-func (p *Peer) MarkUsed() {
- p.useCount++
- log.Trace("[Sentinel Peers] peer used", "peer-id", p.pid, "uses", p.useCount)
- p.lastRequest = time.Now()
-}
-
-func (p *Peer) MarkReplied() {
- p.successCount++
- log.Debug("[Sentinel Peers] peer replied", "peer-id", p.pid, "uses", p.useCount, "success", p.successCount)
-}
-
-func (p *Peer) IsAvailable() (available bool) {
- if p.Banned {
- return false
- }
- if p.Penalties > MaxBadResponses {
- return false
- }
- if time.Now().Sub(p.lastRequest) > 0*time.Second {
- return true
- }
- return false
-}
-
-func (p *Peer) IsBad() (bad bool) {
- if p.Banned {
- bad = true
- return
- }
- bad = p.Penalties > MaxBadResponses
- return
-}
-
-var skipReasons = []string{
- "bad handshake",
- "context",
- "security protocol",
- "connect:",
- "dial backoff",
-}
-
-func anySetInString(set []string, in string) bool {
- for _, v := range skipReasons {
- if strings.Contains(in, v) {
- return true
- }
- }
- return false
-}
-
-func (p *Peer) Disconnect(reason ...string) {
- rzn := strings.Join(reason, " ")
- if !anySetInString(skipReasons, rzn) {
- log.Debug("[Sentinel Peers] disconnecting from peer", "peer-id", p.pid, "reason", strings.Join(reason, " "))
- }
- p.m.host.Peerstore().RemovePeer(p.pid)
- p.m.host.Network().ClosePeer(p.pid)
- p.Penalties = 0
-}
-func (p *Peer) Ban(reason ...string) {
- log.Debug("[Sentinel Peers] bad peers has been banned", "peer-id", p.pid, "reason", strings.Join(reason, " "))
- p.Banned = true
- p.Disconnect(reason...)
- return
-}
diff --git a/cmd/sentinel/sentinelcli/cliSettings.go b/cmd/sentinel/sentinelcli/cliSettings.go
new file mode 100644
index 00000000000..ce2e0490986
--- /dev/null
+++ b/cmd/sentinel/sentinelcli/cliSettings.go
@@ -0,0 +1,74 @@
+package sentinelcli
+
+import (
+ "fmt"
+
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cmd/sentinel/sentinelflags"
+ "github.com/ledgerwatch/erigon/cmd/utils"
+
+ "github.com/ledgerwatch/erigon-lib/common"
+
+ "github.com/urfave/cli/v2"
+
+ "github.com/ledgerwatch/erigon/turbo/logging"
+
+ "github.com/ledgerwatch/log/v3"
+)
+
+type SentinelCliCfg struct {
+ GenesisCfg *clparams.GenesisConfig
+ BeaconCfg *clparams.BeaconChainConfig
+ NetworkCfg *clparams.NetworkConfig
+ NetworkType clparams.NetworkType
+ Port uint `json:"port"`
+ Addr string `json:"address"`
+ ServerAddr string `json:"server_addr"`
+ ServerProtocol string `json:"server_protocol"`
+ ServerTcpPort uint `json:"server_tcp_port"`
+ LogLvl uint `json:"log_level"`
+ NoDiscovery bool `json:"no_discovery"`
+ LocalDiscovery bool `json:"local_discovery"`
+}
+
+func SetupSentinelCli(ctx *cli.Context) (*SentinelCliCfg, error) {
+ cfg := &SentinelCliCfg{}
+ chainName := ctx.String(utils.ChainFlag.Name)
+ var err error
+ cfg.GenesisCfg, cfg.NetworkCfg, cfg.BeaconCfg, cfg.NetworkType, err = clparams.GetConfigsByNetworkName(chainName)
+ if err != nil {
+ return nil, err
+ }
+ if ctx.String(sentinelflags.BeaconConfigFlag.Name) != "" {
+ cfg.BeaconCfg = new(clparams.BeaconChainConfig)
+ if *cfg.BeaconCfg, err = clparams.CustomConfig(ctx.String(sentinelflags.BeaconConfigFlag.Name)); err != nil {
+ return nil, err
+ }
+ if ctx.String(sentinelflags.GenesisSSZFlag.Name) == "" {
+ return nil, fmt.Errorf("no genesis file provided")
+ }
+ cfg.GenesisCfg = new(clparams.GenesisConfig)
+
+ }
+ cfg.ServerAddr = fmt.Sprintf("%s:%d", ctx.String(sentinelflags.SentinelServerAddr.Name), ctx.Int(sentinelflags.SentinelServerPort.Name))
+ cfg.ServerProtocol = "tcp"
+
+ cfg.Port = uint(ctx.Int(sentinelflags.SentinelDiscoveryPort.Name))
+ cfg.Addr = ctx.String(sentinelflags.SentinelDiscoveryAddr.Name)
+
+ cfg.LogLvl = ctx.Uint(logging.LogVerbosityFlag.Name)
+ if cfg.LogLvl == uint(log.LvlInfo) || cfg.LogLvl == 0 {
+ cfg.LogLvl = uint(log.LvlDebug)
+ }
+ cfg.NoDiscovery = ctx.Bool(sentinelflags.NoDiscovery.Name)
+ cfg.LocalDiscovery = ctx.Bool(sentinelflags.LocalDiscovery.Name)
+
+ // Process bootnodes
+ if ctx.String(sentinelflags.BootnodesFlag.Name) != "" {
+ cfg.NetworkCfg.BootNodes = common.CliString2Array(ctx.String(sentinelflags.BootnodesFlag.Name))
+ }
+ if ctx.String(sentinelflags.SentinelStaticPeersFlag.Name) != "" {
+ cfg.NetworkCfg.StaticPeers = common.CliString2Array(ctx.String(sentinelflags.SentinelStaticPeersFlag.Name))
+ }
+ return cfg, nil
+}
diff --git a/cmd/sentinel/sentinelcli/flags/defaultFlags.go b/cmd/sentinel/sentinelcli/flags/defaultFlags.go
new file mode 100644
index 00000000000..0cfcbd53fdd
--- /dev/null
+++ b/cmd/sentinel/sentinelcli/flags/defaultFlags.go
@@ -0,0 +1 @@
+package flags
diff --git a/cmd/sentinel/sentinelcli/flags/flags.go b/cmd/sentinel/sentinelcli/flags/flags.go
new file mode 100644
index 00000000000..0cfcbd53fdd
--- /dev/null
+++ b/cmd/sentinel/sentinelcli/flags/flags.go
@@ -0,0 +1 @@
+package flags
diff --git a/cmd/sentinel/sentinelflags/flags.go b/cmd/sentinel/sentinelflags/flags.go
new file mode 100644
index 00000000000..4a59050d270
--- /dev/null
+++ b/cmd/sentinel/sentinelflags/flags.go
@@ -0,0 +1,79 @@
+package sentinelflags
+
+import (
+ "github.com/ledgerwatch/erigon/cmd/utils"
+ "github.com/urfave/cli/v2"
+)
+
+var CliFlags = []cli.Flag{
+ &utils.ChainFlag,
+
+ &SentinelDiscoveryPort,
+ &SentinelDiscoveryAddr,
+ &SentinelServerPort,
+ &SentinelServerAddr,
+ &SentinelTcpPort,
+ &NoDiscovery,
+ &BootnodesFlag,
+ &BeaconConfigFlag,
+ &GenesisSSZFlag,
+ &SentinelStaticPeersFlag,
+}
+
+var (
+ SentinelDiscoveryPort = cli.IntFlag{
+ Name: "discovery.port",
+ Usage: "sets the lightclient port",
+ Value: 4000,
+ }
+ SentinelDiscoveryAddr = cli.StringFlag{
+ Name: "discovery.addr",
+ Usage: "sets the lightclient discovery addr",
+ Value: "127.0.0.1",
+ }
+ SentinelTcpPort = cli.UintFlag{
+ Name: "sentinel.tcp.port",
+ Usage: "sets lightclient tcp port",
+ Value: 4001,
+ }
+ SentinelServerPort = cli.IntFlag{
+ Name: "sentinel.port",
+ Usage: "sets the lightclient server port",
+ Value: 7777,
+ }
+ SentinelServerAddr = cli.StringFlag{
+ Name: "sentinel.addr",
+ Usage: "sets the lightclient server host addr",
+ Value: "localhost",
+ }
+ NoDiscovery = cli.BoolFlag{
+ Name: "no-discovery",
+ Usage: "turn off or on the lightclient finding peers",
+ Value: false,
+ }
+ LocalDiscovery = cli.BoolFlag{
+ Name: "local-discovery",
+ Usage: "enable to also attempt to find peers over private ips. turning this on may cause issues with hosts such as hetzner",
+ Value: false,
+ }
+ BootnodesFlag = cli.StringFlag{
+ Name: "sentinel.bootnodes",
+ Usage: "Comma separated enode URLs for P2P discovery bootstrap",
+ Value: "",
+ }
+ BeaconConfigFlag = cli.StringFlag{
+ Name: "beacon-config",
+ Usage: "Path to beacon config",
+ Value: "",
+ }
+ GenesisSSZFlag = cli.StringFlag{
+ Name: "genesis-ssz",
+ Usage: "Path to genesis ssz",
+ Value: "",
+ }
+ SentinelStaticPeersFlag = cli.StringFlag{
+ Name: "sentinel.staticpeers",
+ Usage: "connect to comma-separated Consensus static peers",
+ Value: "",
+ }
+)
diff --git a/cmd/sentry/main.go b/cmd/sentry/main.go
index 146ee737baf..abd86dca1a9 100644
--- a/cmd/sentry/main.go
+++ b/cmd/sentry/main.go
@@ -8,9 +8,9 @@ import (
"github.com/ledgerwatch/erigon-lib/common/datadir"
"github.com/spf13/cobra"
- "github.com/ledgerwatch/erigon/cmd/sentry/sentry"
"github.com/ledgerwatch/erigon/cmd/utils"
"github.com/ledgerwatch/erigon/common/paths"
+ "github.com/ledgerwatch/erigon/p2p/sentry"
"github.com/ledgerwatch/erigon/turbo/debug"
"github.com/ledgerwatch/erigon/turbo/logging"
node2 "github.com/ledgerwatch/erigon/turbo/node"
diff --git a/cmd/sentry/sentry/broadcast.go b/cmd/sentry/sentry/broadcast.go
deleted file mode 100644
index 3ec9d82459a..00000000000
--- a/cmd/sentry/sentry/broadcast.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package sentry
-
-import (
- "context"
- "errors"
- "math"
- "math/big"
- "strings"
- "syscall"
-
- proto_sentry "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry"
- "github.com/ledgerwatch/log/v3"
- "google.golang.org/grpc"
-
- "github.com/ledgerwatch/erigon/core/types"
- "github.com/ledgerwatch/erigon/eth/protocols/eth"
- "github.com/ledgerwatch/erigon/p2p"
- "github.com/ledgerwatch/erigon/rlp"
- "github.com/ledgerwatch/erigon/turbo/stages/headerdownload"
-)
-
-// Methods of sentry called by Core
-
-const (
- // This is the target size for the packs of transactions or announcements. A
- // pack can get larger than this if a single transactions exceeds this size.
- maxTxPacketSize = 100 * 1024
-)
-
-func (cs *MultiClient) PropagateNewBlockHashes(ctx context.Context, announces []headerdownload.Announce) {
- cs.lock.RLock()
- defer cs.lock.RUnlock()
- typedRequest := make(eth.NewBlockHashesPacket, len(announces))
- for i := range announces {
- typedRequest[i].Hash = announces[i].Hash
- typedRequest[i].Number = announces[i].Number
- }
- data, err := rlp.EncodeToBytes(&typedRequest)
- if err != nil {
- log.Error("propagateNewBlockHashes", "err", err)
- return
- }
- var req66 *proto_sentry.OutboundMessageData
- // Send the block to a subset of our peers
- sendToAmount := int(math.Sqrt(float64(len(cs.sentries))))
- for i, sentry := range cs.sentries {
- if !sentry.Ready() {
- continue
- }
- if i > sendToAmount { //TODO: send to random sentries, not just to fi
- break
- }
-
- if req66 == nil {
- req66 = &proto_sentry.OutboundMessageData{
- Id: proto_sentry.MessageId_NEW_BLOCK_HASHES_66,
- Data: data,
- }
-
- _, err = sentry.SendMessageToAll(ctx, req66, &grpc.EmptyCallOption{})
- if err != nil {
- log.Error("propagateNewBlockHashes", "err", err)
- }
- }
- }
-}
-
-func (cs *MultiClient) BroadcastNewBlock(ctx context.Context, header *types.Header, body *types.RawBody, td *big.Int) {
- cs.lock.RLock()
- defer cs.lock.RUnlock()
- txs := make([]types.Transaction, len(body.Transactions))
- for i, tx := range body.Transactions {
- var err error
- if txs[i], err = types.DecodeTransaction(tx); err != nil {
- log.Error("broadcastNewBlock", "err", err)
- return
- }
- }
- data, err := rlp.EncodeToBytes(ð.NewBlockPacket{
- Block: types.NewBlock(header, txs, body.Uncles, nil, body.Withdrawals),
- TD: td,
- })
- if err != nil {
- log.Error("broadcastNewBlock", "err", err)
- }
- var req66 *proto_sentry.SendMessageToRandomPeersRequest
- // Send the block to a subset of our peers
- sendToAmount := int(math.Sqrt(float64(len(cs.sentries))))
- for i, sentry := range cs.sentries {
- if !sentry.Ready() {
- continue
- }
- if i > sendToAmount { //TODO: send to random sentries, not just to fi
- break
- }
-
- if req66 == nil {
- req66 = &proto_sentry.SendMessageToRandomPeersRequest{
- MaxPeers: 1024,
- Data: &proto_sentry.OutboundMessageData{
- Id: proto_sentry.MessageId_NEW_BLOCK_66,
- Data: data,
- },
- }
- }
- if _, err = sentry.SendMessageToRandomPeers(ctx, req66, &grpc.EmptyCallOption{}); err != nil {
- if isPeerNotFoundErr(err) || networkTemporaryErr(err) {
- log.Debug("broadcastNewBlock", "err", err)
- continue
- }
- log.Error("broadcastNewBlock", "err", err)
- }
- }
-}
-
-func networkTemporaryErr(err error) bool {
- return errors.Is(err, syscall.EPIPE) || errors.Is(err, p2p.ErrShuttingDown)
-}
-func isPeerNotFoundErr(err error) bool {
- return strings.Contains(err.Error(), "peer not found")
-}
diff --git a/cmd/silkworm_api/snapshot_idx.go b/cmd/silkworm_api/snapshot_idx.go
new file mode 100644
index 00000000000..4265ef19471
--- /dev/null
+++ b/cmd/silkworm_api/snapshot_idx.go
@@ -0,0 +1,128 @@
+package main
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "time"
+
+ "github.com/ledgerwatch/erigon-lib/common/background"
+ "github.com/ledgerwatch/erigon-lib/common/datadir"
+ "github.com/ledgerwatch/erigon-lib/downloader/snaptype"
+ "github.com/ledgerwatch/erigon-lib/kv/mdbx"
+ "github.com/ledgerwatch/erigon/cmd/hack/tool/fromdb"
+ "github.com/ledgerwatch/erigon/turbo/debug"
+ "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks"
+ "github.com/ledgerwatch/log/v3"
+ "github.com/urfave/cli/v2"
+ "golang.org/x/sync/errgroup"
+)
+
+// Build snapshot indexes for given snapshot files.
+// Sample usage:
+// build_idx --datadir erigon-1 --snapshot_path /snapshots/v1-000000-000500-headers.seg,/snapshots/v1-000500-001000-headers.seg
+
+func main() {
+
+ app := &cli.App{
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "datadir",
+ Value: "./dev",
+ Usage: "node data directory",
+ },
+ &cli.StringSliceFlag{
+ Name: "snapshot_path",
+ Usage: "pathname of the snapshot file",
+ },
+ },
+ Action: func(cCtx *cli.Context) error {
+ return buildIndex(cCtx, cCtx.String("datadir"), cCtx.StringSlice("snapshot_path"))
+ },
+ }
+
+ if err := app.Run(os.Args); err != nil {
+ log.Crit(err.Error())
+ }
+}
+
+func FindIf(segments []snaptype.FileInfo, predicate func(snaptype.FileInfo) bool) (snaptype.FileInfo, bool) {
+ for _, segment := range segments {
+ if predicate(segment) {
+ return segment, true
+ }
+ }
+ return snaptype.FileInfo{}, false // Return zero value and false if not found
+}
+
+func buildIndex(cliCtx *cli.Context, dataDir string, snapshotPaths []string) error {
+ logger, _, err := debug.Setup(cliCtx, true /* rootLogger */)
+ if err != nil {
+ return err
+ }
+ logLevel := log.LvlInfo
+
+ ps := background.NewProgressSet()
+
+ workers := 4
+ g, ctx := errgroup.WithContext(cliCtx.Context)
+ g.SetLimit(workers)
+
+ dirs := datadir.New(dataDir)
+
+ chainDB := mdbx.NewMDBX(logger).Path(dirs.Chaindata).MustOpen()
+ defer chainDB.Close()
+
+ chainConfig := fromdb.ChainConfig(chainDB)
+
+ segments, _, err := freezeblocks.Segments(dirs.Snap)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("Building indexes:\n- dataDir: %s\n- snapshots: %s\n", dataDir, snapshotPaths)
+ start := time.Now()
+
+ for _, snapshotPath := range snapshotPaths {
+ segment, found := FindIf(segments, func(s snaptype.FileInfo) bool {
+ return s.Path == snapshotPath
+ })
+ if !found {
+ return fmt.Errorf("segment %s not found", snapshotPath)
+ }
+
+ switch segment.T {
+ case snaptype.Headers:
+ g.Go(func() error {
+ jobProgress := &background.Progress{}
+ ps.Add(jobProgress)
+ defer ps.Delete(jobProgress)
+ return freezeblocks.HeadersIdx(ctx, chainConfig, segment.Path, segment.From, dirs.Tmp, jobProgress, logLevel, logger)
+ })
+ case snaptype.Bodies:
+ g.Go(func() error {
+ jobProgress := &background.Progress{}
+ ps.Add(jobProgress)
+ defer ps.Delete(jobProgress)
+ return freezeblocks.BodiesIdx(ctx, segment.Path, segment.From, dirs.Tmp, jobProgress, logLevel, logger)
+ })
+ case snaptype.Transactions:
+ g.Go(func() error {
+ jobProgress := &background.Progress{}
+ ps.Add(jobProgress)
+ defer ps.Delete(jobProgress)
+ dir, _ := filepath.Split(segment.Path)
+ return freezeblocks.TransactionsIdx(ctx, chainConfig, segment.From, segment.To, dir, dirs.Tmp, jobProgress, logLevel, logger)
+ })
+ }
+ }
+
+ if err := g.Wait(); err != nil {
+ return err
+ }
+
+ elapsed := time.Since(start)
+ fmt.Printf("Indexes for %d snapshots built in %d ms\n", len(snapshotPaths), elapsed.Milliseconds())
+
+ return nil
+}
diff --git a/cmd/state/commands/check_change_sets.go b/cmd/state/commands/check_change_sets.go
index 1c9de463f1d..85308fdcd3a 100644
--- a/cmd/state/commands/check_change_sets.go
+++ b/cmd/state/commands/check_change_sets.go
@@ -11,22 +11,30 @@ import (
"syscall"
"time"
+ "github.com/ledgerwatch/log/v3"
+ "github.com/spf13/cobra"
+
+ chain2 "github.com/ledgerwatch/erigon-lib/chain"
libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/datadir"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
"github.com/ledgerwatch/erigon-lib/kv"
kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx"
"github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2"
"github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks"
- "github.com/ledgerwatch/log/v3"
- "github.com/spf13/cobra"
+ "github.com/ledgerwatch/erigon/consensus"
"github.com/ledgerwatch/erigon/core/state"
"github.com/ledgerwatch/erigon/core/systemcontracts"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/core/vm"
"github.com/ledgerwatch/erigon/eth/ethconfig"
+ "github.com/ledgerwatch/erigon/eth/ethconsensusconfig"
"github.com/ledgerwatch/erigon/eth/stagedsync/stages"
+ "github.com/ledgerwatch/erigon/node/nodecfg"
+ "github.com/ledgerwatch/erigon/params"
"github.com/ledgerwatch/erigon/turbo/debug"
+ "github.com/ledgerwatch/erigon/turbo/services"
)
var (
@@ -48,13 +56,13 @@ var checkChangeSetsCmd = &cobra.Command{
Short: "Re-executes historical transactions in read-only mode and checks that their outputs match the database ChangeSets",
RunE: func(cmd *cobra.Command, args []string) error {
logger := debug.SetupCobra(cmd, "check_change_sets")
- return CheckChangeSets(genesis, block, chaindata, historyfile, nocheck, logger)
+ return CheckChangeSets(cmd.Context(), genesis, block, chaindata, historyfile, nocheck, logger)
},
}
// CheckChangeSets re-executes historical transactions in read-only mode
// and checks that their outputs match the database ChangeSets.
-func CheckChangeSets(genesis *types.Genesis, blockNum uint64, chaindata string, historyfile string, nocheck bool, logger log.Logger) error {
+func CheckChangeSets(ctx context.Context, genesis *types.Genesis, blockNum uint64, chaindata string, historyfile string, nocheck bool, logger log.Logger) error {
if len(historyfile) == 0 {
historyfile = chaindata
}
@@ -69,7 +77,7 @@ func CheckChangeSets(genesis *types.Genesis, blockNum uint64, chaindata string,
interruptCh <- true
}()
- db, err := kv2.NewMDBX(logger).Path(chaindata).Open()
+ db, err := kv2.NewMDBX(logger).Path(chaindata).Open(ctx)
if err != nil {
return err
}
@@ -78,7 +86,7 @@ func CheckChangeSets(genesis *types.Genesis, blockNum uint64, chaindata string,
if err := allSnapshots.ReopenFolder(); err != nil {
return fmt.Errorf("reopen snapshot segments: %w", err)
}
- blockReader := freezeblocks.NewBlockReader(allSnapshots)
+ blockReader := freezeblocks.NewBlockReader(allSnapshots, nil /* BorSnapshots */)
chainDb := db
defer chainDb.Close()
@@ -86,7 +94,6 @@ func CheckChangeSets(genesis *types.Genesis, blockNum uint64, chaindata string,
if chaindata != historyfile {
historyDb = kv2.MustOpen(historyfile)
}
- ctx := context.Background()
historyTx, err1 := historyDb.BeginRo(ctx)
if err1 != nil {
return err1
@@ -116,7 +123,7 @@ func CheckChangeSets(genesis *types.Genesis, blockNum uint64, chaindata string,
commitEvery := time.NewTicker(30 * time.Second)
defer commitEvery.Stop()
- engine := initConsensusEngine(chainConfig, allSnapshots, logger)
+ engine := initConsensusEngine(ctx, chainConfig, allSnapshots, blockReader, logger)
for !interrupt {
@@ -269,3 +276,20 @@ func CheckChangeSets(genesis *types.Genesis, blockNum uint64, chaindata string,
logger.Info("Checked", "blocks", blockNum, "next time specify --block", blockNum, "duration", time.Since(startTime))
return nil
}
+
+func initConsensusEngine(ctx context.Context, cc *chain2.Config, snapshots *freezeblocks.RoSnapshots, blockReader services.FullBlockReader, logger log.Logger) (engine consensus.Engine) {
+ config := ethconfig.Defaults
+
+ var consensusConfig interface{}
+
+ if cc.Clique != nil {
+ consensusConfig = params.CliqueSnapshot
+ } else if cc.Aura != nil {
+ consensusConfig = &config.Aura
+ } else if cc.Bor != nil {
+ consensusConfig = &config.Bor
+ } else {
+ consensusConfig = &config.Ethash
+ }
+ return ethconsensusconfig.CreateConsensusEngine(ctx, &nodecfg.Config{Dirs: datadir.New(datadirCli)}, cc, consensusConfig, config.Miner.Notify, config.Miner.Noverify, nil /* heimdallClient */, config.WithoutHeimdall, blockReader, true /* readonly */, logger)
+}
diff --git a/cmd/state/commands/erigon4.go b/cmd/state/commands/erigon4.go
deleted file mode 100644
index cb53c2436c6..00000000000
--- a/cmd/state/commands/erigon4.go
+++ /dev/null
@@ -1,619 +0,0 @@
-package commands
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "os"
- "os/signal"
- "path"
- "path/filepath"
- "runtime"
- "syscall"
- "time"
-
- "github.com/VictoriaMetrics/metrics"
- "github.com/holiman/uint256"
- "github.com/ledgerwatch/log/v3"
- "github.com/spf13/cobra"
-
- chain2 "github.com/ledgerwatch/erigon-lib/chain"
- "github.com/ledgerwatch/erigon-lib/commitment"
- libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon-lib/common/datadir"
- "github.com/ledgerwatch/erigon-lib/common/dbg"
- "github.com/ledgerwatch/erigon-lib/common/fixedgas"
- "github.com/ledgerwatch/erigon-lib/kv"
- kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx"
- libstate "github.com/ledgerwatch/erigon-lib/state"
-
- "github.com/ledgerwatch/erigon/cmd/state/exec3"
- "github.com/ledgerwatch/erigon/consensus"
- "github.com/ledgerwatch/erigon/consensus/misc"
- "github.com/ledgerwatch/erigon/core"
- "github.com/ledgerwatch/erigon/core/state"
- "github.com/ledgerwatch/erigon/core/types"
- "github.com/ledgerwatch/erigon/core/types/accounts"
- "github.com/ledgerwatch/erigon/core/vm"
- "github.com/ledgerwatch/erigon/eth/ethconfig"
- "github.com/ledgerwatch/erigon/eth/ethconsensusconfig"
- "github.com/ledgerwatch/erigon/node/nodecfg"
- "github.com/ledgerwatch/erigon/params"
- "github.com/ledgerwatch/erigon/turbo/debug"
- "github.com/ledgerwatch/erigon/turbo/services"
- "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks"
-)
-
-var (
- blockTo int
- traceBlock int
-)
-
-func init() {
- withBlock(erigon4Cmd)
- withDataDir(erigon4Cmd)
- withChain(erigon4Cmd)
-
- erigon4Cmd.Flags().IntVar(&commitmentFrequency, "commfreq", 125000, "how many blocks to skip between calculating commitment")
- erigon4Cmd.Flags().BoolVar(&commitments, "commitments", false, "set to true to calculate commitments")
- erigon4Cmd.Flags().StringVar(&commitmentMode, "commitments.mode", "direct", "defines the way to calculate commitments: 'direct' mode reads from state directly, 'update' accumulate updates before commitment")
- erigon4Cmd.Flags().Uint64Var(&startTxNumFrom, "tx", 0, "tx number to start from")
- erigon4Cmd.Flags().StringVar(&commitmentTrie, "commitments.trie", "hex", "hex - use Hex Patricia Hashed Trie for commitments, bin - use of binary patricia trie")
- erigon4Cmd.Flags().IntVar(&height, "height", 32, "amount of steps in biggest file")
- erigon4Cmd.Flags().Uint64Var(&stepSize, "step-size", ethconfig.HistoryV3AggregationStep, "amount of tx in one step")
-
- rootCmd.AddCommand(erigon4Cmd)
-}
-
-var (
- startTxNumFrom uint64 // flag --tx
- commitmentMode string // flag --commitments.mode [direct|update]
- logInterval = 30 * time.Second // time period to print aggregation stat to log
- dirtySpaceThreshold = uint64(2 * 1024 * 1024 * 1024) /* threshold of dirty space in MDBX transaction that triggers a commit */
- commitmentFrequency int // How many blocks to skip between calculating commitment
- commitments bool
- commitmentTrie string
-
- height int
- stepSize uint64
-
- blockExecutionTimer = metrics.GetOrCreateSummary("chain_execution_seconds")
- blockRootMismatchExpected bool // if trie variant is not hex, we could not have another rootHash with to verify it
-)
-
-var erigon4Cmd = &cobra.Command{
- Use: "erigon4",
- Short: "Experimental command to re-execute blocks from beginning using erigon2 state representation and history/domain",
- RunE: func(cmd *cobra.Command, args []string) error {
- logger := debug.SetupCobra(cmd, "erigon4")
- return Erigon4(genesis, chainConfig, logger)
- },
-}
-
-func Erigon4(genesis *types.Genesis, chainConfig *chain2.Config, logger log.Logger) error {
- sigs := make(chan os.Signal, 1)
- interruptCh := make(chan bool, 1)
- signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
-
- go func() {
- <-sigs
- interruptCh <- true
- }()
-
- historyDb, err := kv2.NewMDBX(logger).Path(path.Join(datadirCli, "chaindata")).Open()
- if err != nil {
- return fmt.Errorf("opening chaindata as read only: %v", err)
- }
- defer historyDb.Close()
-
- ctx := context.Background()
- historyTx, err1 := historyDb.BeginRo(ctx)
- if err1 != nil {
- return err1
- }
- defer historyTx.Rollback()
-
- stateDbPath := path.Join(datadirCli, "db4")
- if _, err = os.Stat(stateDbPath); err != nil {
- if !errors.Is(err, os.ErrNotExist) {
- return err
- }
- }
- db, err2 := kv2.NewMDBX(logger).Path(stateDbPath).WriteMap().Open()
- if err2 != nil {
- return err2
- }
- defer db.Close()
-
- dirs := datadir.New(datadirCli)
- aggPath := filepath.Join(datadirCli, "erigon4")
-
- var rwTx kv.RwTx
- defer func() {
- if rwTx != nil {
- rwTx.Rollback()
- }
- }()
- if rwTx, err = db.BeginRw(ctx); err != nil {
- return err
- }
-
- trieVariant := commitment.ParseTrieVariant(commitmentTrie)
- if trieVariant != commitment.VariantHexPatriciaTrie {
- blockRootMismatchExpected = true
- }
- mode := libstate.ParseCommitmentMode(commitmentMode)
- logger.Info("aggregator commitment trie", "variant", trieVariant, "mode", mode.String())
-
- agg, err3 := libstate.NewAggregator(aggPath, dirs.Tmp, stepSize, mode, trieVariant, logger)
- if err3 != nil {
- return fmt.Errorf("create aggregator: %w", err3)
- }
- if err := agg.ReopenFolder(); err != nil {
- return err
- }
-
- defer agg.Close()
-
- startTxNum := agg.EndTxNumMinimax()
- fmt.Printf("Max txNum in files: %d\n", startTxNum)
-
- agg.SetTx(rwTx)
- agg.StartWrites()
- defer agg.FinishWrites()
-
- latestBlock, latestTx, err := agg.SeekCommitment()
- if err != nil && startTxNum != 0 {
- return fmt.Errorf("failed to seek commitment to tx %d: %w", startTxNum, err)
- }
- if latestTx > startTxNum {
- fmt.Printf("Max txNum in DB: %d\n", latestTx)
- startTxNum = latestTx
- }
- if startTxNumFrom != 0 {
- startTxNum = startTxNumFrom
- }
-
- interrupt := false
- if startTxNum == 0 {
- genBlock, genesisIbs, err := core.GenesisToBlock(genesis, "")
- if err != nil {
- return err
- }
- agg.SetTxNum(0)
- if err = genesisIbs.CommitBlock(&chain2.Rules{}, &StateWriterV4{w: agg}); err != nil {
- return fmt.Errorf("cannot write state: %w", err)
- }
-
- blockRootHash, err := agg.ComputeCommitment(true, false)
- if err != nil {
- return err
- }
- if err = agg.FinishTx(); err != nil {
- return err
- }
-
- genesisRootHash := genBlock.Root()
- if !bytes.Equal(blockRootHash, genesisRootHash[:]) {
- return fmt.Errorf("genesis root hash mismatch: expected %x got %x", genesisRootHash, blockRootHash)
- }
- }
-
- logger.Info("Initialised chain configuration", "startTxNum", startTxNum, "block", latestBlock, "config", chainConfig)
-
- var (
- blockNum uint64
- trace bool
- vmConfig vm.Config
- txNum uint64 = 2 // Consider that each block contains at least first system tx and enclosing transactions, except for Clique consensus engine
- started = time.Now()
- )
-
- if startTxNum != 0 {
- txNum = startTxNum
- blockNum = latestBlock
- }
-
- logEvery := time.NewTicker(logInterval)
- defer logEvery.Stop()
-
- statx := &stat23{
- prevBlock: blockNum,
- prevTime: time.Now(),
- }
-
- go func() {
- for range logEvery.C {
- aStats := agg.Stats()
- statx.delta(aStats, blockNum, txNum).print(aStats, logger)
- }
- }()
-
- var blockReader services.FullBlockReader
- var allSnapshots = freezeblocks.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadirCli, "snapshots"), logger)
- defer allSnapshots.Close()
- if err := allSnapshots.ReopenFolder(); err != nil {
- return fmt.Errorf("reopen snapshot segments: %w", err)
- }
- blockReader = freezeblocks.NewBlockReader(allSnapshots)
- engine := initConsensusEngine(chainConfig, allSnapshots, logger)
-
- getHeader := func(hash libcommon.Hash, number uint64) *types.Header {
- h, err := blockReader.Header(ctx, historyTx, hash, number)
- if err != nil {
- panic(err)
- }
- return h
- }
- readWrapper := &StateReaderV4{ac: agg.MakeContext(), roTx: rwTx}
- writeWrapper := &StateWriterV4{w: agg}
-
- commitFn := func(txn uint64) error {
- if db == nil || rwTx == nil {
- return fmt.Errorf("commit failed due to invalid db/rwTx")
- }
- var spaceDirty uint64
- if spaceDirty, _, err = rwTx.(*kv2.MdbxTx).SpaceDirty(); err != nil {
- return fmt.Errorf("retrieving spaceDirty: %w", err)
- }
- if spaceDirty >= dirtySpaceThreshold {
- logger.Info("Initiated tx commit", "block", blockNum, "space dirty", libcommon.ByteCount(spaceDirty))
- }
- logger.Info("database commitment", "block", blockNum, "txNum", txn, "uptime", time.Since(started))
- if err := agg.Flush(ctx); err != nil {
- return err
- }
- if err = rwTx.Commit(); err != nil {
- return err
- }
- if interrupt {
- return nil
- }
-
- if rwTx, err = db.BeginRw(ctx); err != nil {
- return err
- }
-
- readWrapper.ac.Close()
- agg.SetTx(rwTx)
- readWrapper.roTx = rwTx
- readWrapper.ac = agg.MakeContext()
- return nil
- }
-
- mergedRoots := agg.AggregatedRoots()
- for !interrupt {
- blockNum++
- trace = traceBlock > 0 && blockNum == uint64(traceBlock)
- blockHash, err := blockReader.CanonicalHash(ctx, historyTx, blockNum)
- if err != nil {
- return err
- }
-
- b, _, err := blockReader.BlockWithSenders(ctx, historyTx, blockHash, blockNum)
- if err != nil {
- return err
- }
- if b == nil {
- logger.Info("history: block is nil", "block", blockNum)
- break
- }
- agg.SetTx(rwTx)
- agg.SetTxNum(txNum)
- agg.SetBlockNum(blockNum)
-
- if txNum, _, err = processBlock23(startTxNum, trace, txNum, readWrapper, writeWrapper, chainConfig, engine, getHeader, b, vmConfig, logger); err != nil {
- logger.Error("processing error", "block", blockNum, "err", err)
- return fmt.Errorf("processing block %d: %w", blockNum, err)
- }
-
- // Check for interrupts
- select {
- case interrupt = <-interruptCh:
- // Commit transaction only when interrupted or just before computing commitment (so it can be re-done)
- if err := agg.Flush(ctx); err != nil {
- logger.Error("aggregator flush", "err", err)
- }
-
- logger.Info(fmt.Sprintf("interrupted, please wait for cleanup, next time start with --tx %d", agg.Stats().TxCount))
- if err := commitFn(txNum); err != nil {
- logger.Error("db commit", "err", err)
- }
- case <-mergedRoots:
- if err := commitFn(txNum); err != nil {
- logger.Error("db commit on merge", "err", err)
- }
- default:
- }
- }
-
- return nil
-}
-
-type stat23 struct {
- blockNum uint64
- hits uint64
- misses uint64
- prevBlock uint64
- hitMissRatio float64
- blockSpeed float64
- txSpeed float64
- txNum uint64
- prevTxNum uint64
- prevTime time.Time
- mem runtime.MemStats
-}
-
-func (s *stat23) print(aStats libstate.FilesStats, logger log.Logger) {
- totalFiles := aStats.FilesCount
- totalDatSize := aStats.DataSize
- totalIdxSize := aStats.IdxSize
-
- logger.Info("Progress", "block", s.blockNum, "blk/s", s.blockSpeed, "tx", s.txNum, "txn/s", s.txSpeed, "state files", totalFiles,
- "total dat", libcommon.ByteCount(totalDatSize), "total idx", libcommon.ByteCount(totalIdxSize),
- "hit ratio", s.hitMissRatio, "hits+misses", s.hits+s.misses,
- "alloc", libcommon.ByteCount(s.mem.Alloc), "sys", libcommon.ByteCount(s.mem.Sys),
- )
-}
-
-func (s *stat23) delta(aStats libstate.FilesStats, blockNum, txNum uint64) *stat23 {
- currentTime := time.Now()
- dbg.ReadMemStats(&s.mem)
-
- interval := currentTime.Sub(s.prevTime).Seconds()
- s.blockNum = blockNum
- s.blockSpeed = float64(s.blockNum-s.prevBlock) / interval
- s.txNum = txNum
- s.txSpeed = float64(s.txNum-s.prevTxNum) / interval
- s.prevBlock = blockNum
- s.prevTxNum = txNum
- s.prevTime = currentTime
-
- total := s.hits + s.misses
- if total > 0 {
- s.hitMissRatio = float64(s.hits) / float64(total)
- }
- return s
-}
-
-func processBlock23(startTxNum uint64, trace bool, txNumStart uint64, rw *StateReaderV4, ww *StateWriterV4, chainConfig *chain2.Config,
- engine consensus.Engine, getHeader func(hash libcommon.Hash, number uint64) *types.Header, block *types.Block, vmConfig vm.Config,
- logger log.Logger,
-) (uint64, types.Receipts, error) {
- defer blockExecutionTimer.UpdateDuration(time.Now())
-
- header := block.Header()
- vmConfig.Debug = true
- gp := new(core.GasPool).AddGas(block.GasLimit()).AddBlobGas(fixedgas.MaxBlobGasPerBlock)
- usedGas := new(uint64)
- usedBlobGas := new(uint64)
- var receipts types.Receipts
- rules := chainConfig.Rules(block.NumberU64(), block.Time())
- txNum := txNumStart
- ww.w.SetTxNum(txNum)
-
- rw.blockNum = block.NumberU64()
-
- daoFork := txNum >= startTxNum && chainConfig.DAOForkBlock != nil && chainConfig.DAOForkBlock.Cmp(block.Number()) == 0
- if daoFork {
- ibs := state.New(rw)
- // TODO Actually add tracing to the DAO related accounts
- misc.ApplyDAOHardFork(ibs)
- if err := ibs.FinalizeTx(rules, ww); err != nil {
- return 0, nil, err
- }
- if err := ww.w.FinishTx(); err != nil {
- return 0, nil, fmt.Errorf("finish daoFork failed: %w", err)
- }
- }
-
- txNum++ // Pre-block transaction
- ww.w.SetTxNum(txNum)
- if err := ww.w.FinishTx(); err != nil {
- return 0, nil, fmt.Errorf("finish pre-block tx %d (block %d) has failed: %w", txNum, block.NumberU64(), err)
- }
-
- getHashFn := core.GetHashFn(header, getHeader)
- for i, tx := range block.Transactions() {
- if txNum >= startTxNum {
- ibs := state.New(rw)
- ibs.SetTxContext(tx.Hash(), block.Hash(), i)
- ct := exec3.NewCallTracer()
- vmConfig.Tracer = ct
- receipt, _, err := core.ApplyTransaction(chainConfig, getHashFn, engine, nil, gp, ibs, ww, header, tx, usedGas, usedBlobGas, vmConfig)
- if err != nil {
- return 0, nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err)
- }
- for from := range ct.Froms() {
- if err := ww.w.AddTraceFrom(from[:]); err != nil {
- return 0, nil, err
- }
- }
- for to := range ct.Tos() {
- if err := ww.w.AddTraceTo(to[:]); err != nil {
- return 0, nil, err
- }
- }
- receipts = append(receipts, receipt)
- for _, log := range receipt.Logs {
- if err = ww.w.AddLogAddr(log.Address[:]); err != nil {
- return 0, nil, fmt.Errorf("adding event log for addr %x: %w", log.Address, err)
- }
- for _, topic := range log.Topics {
- if err = ww.w.AddLogTopic(topic[:]); err != nil {
- return 0, nil, fmt.Errorf("adding event log for topic %x: %w", topic, err)
- }
- }
- }
- if err = ww.w.FinishTx(); err != nil {
- return 0, nil, fmt.Errorf("finish tx %d [%x] failed: %w", i, tx.Hash(), err)
- }
- if trace {
- fmt.Printf("FinishTx called for blockNum=%d, txIndex=%d, txNum=%d txHash=[%x]\n", block.NumberU64(), i, txNum, tx.Hash())
- }
- }
- txNum++
- ww.w.SetTxNum(txNum)
- }
-
- if txNum >= startTxNum {
- if chainConfig.IsByzantium(block.NumberU64()) {
- receiptSha := types.DeriveSha(receipts)
- if receiptSha != block.ReceiptHash() {
- fmt.Printf("mismatched receipt headers for block %d\n", block.NumberU64())
- for j, receipt := range receipts {
- fmt.Printf("tx %d, used gas: %d\n", j, receipt.GasUsed)
- }
- }
- }
- ibs := state.New(rw)
- if err := ww.w.AddTraceTo(block.Coinbase().Bytes()); err != nil {
- return 0, nil, fmt.Errorf("adding coinbase trace: %w", err)
- }
- for _, uncle := range block.Uncles() {
- if err := ww.w.AddTraceTo(uncle.Coinbase.Bytes()); err != nil {
- return 0, nil, fmt.Errorf("adding uncle trace: %w", err)
- }
- }
-
- // Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
- if _, _, err := engine.Finalize(chainConfig, header, ibs, block.Transactions(), block.Uncles(), receipts, block.Withdrawals(), nil, nil, logger); err != nil {
- return 0, nil, fmt.Errorf("finalize of block %d failed: %w", block.NumberU64(), err)
- }
-
- if err := ibs.CommitBlock(rules, ww); err != nil {
- return 0, nil, fmt.Errorf("committing block %d failed: %w", block.NumberU64(), err)
- }
-
- if err := ww.w.FinishTx(); err != nil {
- return 0, nil, fmt.Errorf("failed to finish tx: %w", err)
- }
- if trace {
- fmt.Printf("FinishTx called for %d block %d\n", txNum, block.NumberU64())
- }
- }
-
- txNum++ // Post-block transaction
- ww.w.SetTxNum(txNum)
- if txNum >= startTxNum {
- if commitments && commitmentFrequency > 0 && block.Number().Uint64()%uint64(commitmentFrequency) == 0 {
- rootHash, err := ww.w.ComputeCommitment(true, trace)
- if err != nil {
- return 0, nil, err
- }
- if !bytes.Equal(rootHash, header.Root[:]) {
- return 0, nil, fmt.Errorf("invalid root hash for block %d: expected %x got %x", block.NumberU64(), header.Root, rootHash)
- }
- }
-
- if err := ww.w.FinishTx(); err != nil {
- return 0, nil, fmt.Errorf("finish after-block tx %d (block %d) has failed: %w", txNum, block.NumberU64(), err)
- }
- }
-
- return txNum, receipts, nil
-}
-
-// Implements StateReader and StateWriter
-type StateReaderV4 struct {
- roTx kv.Tx
- ac *libstate.AggregatorContext
- blockNum uint64
-}
-
-type StateWriterV4 struct {
- w *libstate.Aggregator
-}
-
-func (rw *StateReaderV4) ReadAccountData(address libcommon.Address) (*accounts.Account, error) {
- enc, err := rw.ac.ReadAccountData(address.Bytes(), rw.roTx)
- if err != nil {
- return nil, err
- }
- if len(enc) == 0 {
- return nil, nil
- }
- var a accounts.Account
- if err := accounts.DeserialiseV3(&a, enc); err != nil {
- return nil, err
- }
- return &a, nil
-}
-
-func (rw *StateReaderV4) ReadAccountStorage(address libcommon.Address, incarnation uint64, key *libcommon.Hash) ([]byte, error) {
- enc, err := rw.ac.ReadAccountStorage(address.Bytes(), key.Bytes(), rw.roTx)
- if err != nil {
- return nil, err
- }
- if enc == nil {
- return nil, nil
- }
- if len(enc) == 1 && enc[0] == 0 {
- return nil, nil
- }
- return enc, nil
-}
-
-func (rw *StateReaderV4) ReadAccountCode(address libcommon.Address, incarnation uint64, codeHash libcommon.Hash) ([]byte, error) {
- return rw.ac.ReadAccountCode(address.Bytes(), rw.roTx)
-}
-
-func (rw *StateReaderV4) ReadAccountCodeSize(address libcommon.Address, incarnation uint64, codeHash libcommon.Hash) (int, error) {
- return rw.ac.ReadAccountCodeSize(address.Bytes(), rw.roTx)
-}
-
-func (rw *StateReaderV4) ReadAccountIncarnation(address libcommon.Address) (uint64, error) {
- return 0, nil
-}
-
-func (ww *StateWriterV4) UpdateAccountData(address libcommon.Address, original, account *accounts.Account) error {
- value := accounts.SerialiseV3(account)
- if err := ww.w.UpdateAccountData(address.Bytes(), value); err != nil {
- return err
- }
- return nil
-}
-
-func (ww *StateWriterV4) UpdateAccountCode(address libcommon.Address, incarnation uint64, codeHash libcommon.Hash, code []byte) error {
- if err := ww.w.UpdateAccountCode(address.Bytes(), code); err != nil {
- return err
- }
- return nil
-}
-
-func (ww *StateWriterV4) DeleteAccount(address libcommon.Address, original *accounts.Account) error {
- if err := ww.w.DeleteAccount(address.Bytes()); err != nil {
- return err
- }
- return nil
-}
-
-func (ww *StateWriterV4) WriteAccountStorage(address libcommon.Address, incarnation uint64, key *libcommon.Hash, original, value *uint256.Int) error {
- if err := ww.w.WriteAccountStorage(address.Bytes(), key.Bytes(), value.Bytes()); err != nil {
- return err
- }
- return nil
-}
-
-func (ww *StateWriterV4) CreateContract(address libcommon.Address) error {
- return nil
-}
-
-func initConsensusEngine(cc *chain2.Config, snapshots *freezeblocks.RoSnapshots, logger log.Logger) (engine consensus.Engine) {
- config := ethconfig.Defaults
-
- var consensusConfig interface{}
-
- if cc.Clique != nil {
- consensusConfig = params.CliqueSnapshot
- } else if cc.Aura != nil {
- consensusConfig = &config.Aura
- } else if cc.Bor != nil {
- consensusConfig = &config.Bor
- } else {
- consensusConfig = &config.Ethash
- }
- return ethconsensusconfig.CreateConsensusEngine(&nodecfg.Config{Dirs: datadir.New(datadirCli)}, cc, consensusConfig, config.Miner.Notify, config.Miner.Noverify, config.HeimdallgRPCAddress,
- config.HeimdallURL, config.WithoutHeimdall, true /* readonly */, logger)
-}
diff --git a/cmd/state/commands/opcode_tracer.go b/cmd/state/commands/opcode_tracer.go
index f8d95e7faff..c9cebb45e03 100644
--- a/cmd/state/commands/opcode_tracer.go
+++ b/cmd/state/commands/opcode_tracer.go
@@ -18,7 +18,6 @@ import (
chain2 "github.com/ledgerwatch/erigon-lib/chain"
libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon-lib/common/fixedgas"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/kvcfg"
"github.com/ledgerwatch/erigon-lib/kv/mdbx"
@@ -26,11 +25,9 @@ import (
"github.com/ledgerwatch/erigon/common/debug"
"github.com/ledgerwatch/erigon/consensus"
"github.com/ledgerwatch/erigon/consensus/ethash"
- "github.com/ledgerwatch/erigon/consensus/misc"
"github.com/ledgerwatch/erigon/core"
"github.com/ledgerwatch/erigon/core/rawdb"
"github.com/ledgerwatch/erigon/core/state"
- "github.com/ledgerwatch/erigon/core/systemcontracts"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/core/vm"
"github.com/ledgerwatch/erigon/eth/ethconfig"
@@ -116,7 +113,7 @@ type opcodeTracer struct {
saveBblocks bool
blockNumber uint64
depth int
- env vm.VMInterface
+ env *vm.EVM
}
func NewOpcodeTracer(blockNum uint64, saveOpcodes bool, saveBblocks bool) *opcodeTracer {
@@ -197,7 +194,7 @@ func (ot *opcodeTracer) captureStartOrEnter(from, to libcommon.Address, create b
ot.stack = append(ot.stack, &newTx)
}
-func (ot *opcodeTracer) CaptureStart(env vm.VMInterface, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) {
+func (ot *opcodeTracer) CaptureStart(env *vm.EVM, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) {
ot.env = env
ot.depth = 0
ot.captureStartOrEnter(from, to, create, input)
@@ -259,7 +256,7 @@ func (ot *opcodeTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64,
}
pc16 := uint16(pc)
- currentTxHash := ot.env.TxContext().TxHash
+ currentTxHash := ot.env.TxHash
currentTxDepth := opDepth - 1
ls := len(ot.stack)
@@ -431,7 +428,7 @@ func OpcodeTracer(genesis *types.Genesis, blockNum uint64, chaindata string, num
}
return nil
})
- blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()))
+ blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()), nil /* BorSnapshots */)
chainConfig := genesis.Config
vmConfig := vm.Config{Tracer: ot, Debug: true}
@@ -711,14 +708,11 @@ func runBlock(engine consensus.Engine, ibs *state.IntraBlockState, txnWriter sta
chainConfig *chain2.Config, getHeader func(hash libcommon.Hash, number uint64) *types.Header, block *types.Block, vmConfig vm.Config, trace bool, logger log.Logger) (types.Receipts, error) {
header := block.Header()
vmConfig.TraceJumpDest = true
- gp := new(core.GasPool).AddGas(block.GasLimit()).AddBlobGas(fixedgas.MaxBlobGasPerBlock)
+ gp := new(core.GasPool).AddGas(block.GasLimit()).AddBlobGas(chainConfig.GetMaxBlobGasPerBlock())
usedGas := new(uint64)
usedBlobGas := new(uint64)
var receipts types.Receipts
- if chainConfig.DAOForkBlock != nil && chainConfig.DAOForkBlock.Cmp(block.Number()) == 0 {
- misc.ApplyDAOHardFork(ibs)
- }
- systemcontracts.UpgradeBuildInSystemContract(chainConfig, header.Number, ibs, logger)
+ core.InitializeBlockExecution(engine, nil, header, chainConfig, ibs, logger)
rules := chainConfig.Rules(block.NumberU64(), block.Time())
for i, tx := range block.Transactions() {
ibs.SetTxContext(tx.Hash(), block.Hash(), i)
diff --git a/cmd/state/commands/state_root.go b/cmd/state/commands/state_root.go
index a147e79cb36..8945289cff3 100644
--- a/cmd/state/commands/state_root.go
+++ b/cmd/state/commands/state_root.go
@@ -43,7 +43,7 @@ var stateRootCmd = &cobra.Command{
Short: "Exerimental command to re-execute blocks from beginning and compute state root",
RunE: func(cmd *cobra.Command, args []string) error {
logger := debug.SetupCobra(cmd, "stateroot")
- return StateRoot(genesis, block, datadirCli, logger)
+ return StateRoot(cmd.Context(), genesis, block, datadirCli, logger)
},
}
@@ -55,12 +55,12 @@ func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) {
}); err != nil {
panic(err)
}
- br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()))
+ br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()), nil /* BorSnapshots */)
bw := blockio.NewBlockWriter(histV3)
return br, bw
}
-func StateRoot(genesis *types.Genesis, blockNum uint64, datadir string, logger log.Logger) error {
+func StateRoot(ctx context.Context, genesis *types.Genesis, blockNum uint64, datadir string, logger log.Logger) error {
sigs := make(chan os.Signal, 1)
interruptCh := make(chan bool, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
@@ -70,12 +70,11 @@ func StateRoot(genesis *types.Genesis, blockNum uint64, datadir string, logger l
interruptCh <- true
}()
dirs := datadir2.New(datadir)
- historyDb, err := kv2.NewMDBX(logger).Path(dirs.Chaindata).Open()
+ historyDb, err := kv2.NewMDBX(logger).Path(dirs.Chaindata).Open(ctx)
if err != nil {
return err
}
defer historyDb.Close()
- ctx := context.Background()
historyTx, err1 := historyDb.BeginRo(ctx)
if err1 != nil {
return err1
@@ -89,7 +88,7 @@ func StateRoot(genesis *types.Genesis, blockNum uint64, datadir string, logger l
} else if err = os.RemoveAll(stateDbPath); err != nil {
return err
}
- db, err2 := kv2.NewMDBX(logger).Path(stateDbPath).Open()
+ db, err2 := kv2.NewMDBX(logger).Path(stateDbPath).Open(ctx)
if err2 != nil {
return err2
}
diff --git a/cmd/state/exec3/calltracer_v3.go b/cmd/state/exec3/calltracer_v3.go
index 31e25fa0007..951e114dfa8 100644
--- a/cmd/state/exec3/calltracer_v3.go
+++ b/cmd/state/exec3/calltracer_v3.go
@@ -22,7 +22,7 @@ func (ct *CallTracer) Tos() map[libcommon.Address]struct{} { return ct.tos }
func (ct *CallTracer) CaptureTxStart(gasLimit uint64) {}
func (ct *CallTracer) CaptureTxEnd(restGas uint64) {}
-func (ct *CallTracer) CaptureStart(env vm.VMInterface, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) {
+func (ct *CallTracer) CaptureStart(env *vm.EVM, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) {
if ct.froms == nil {
ct.froms = map[libcommon.Address]struct{}{}
ct.tos = map[libcommon.Address]struct{}{}
diff --git a/cmd/state/exec3/state.go b/cmd/state/exec3/state.go
index 331255b0c7c..4e3297219d0 100644
--- a/cmd/state/exec3/state.go
+++ b/cmd/state/exec3/state.go
@@ -14,13 +14,13 @@ import (
"github.com/ledgerwatch/erigon/cmd/state/exec22"
"github.com/ledgerwatch/erigon/consensus"
- "github.com/ledgerwatch/erigon/consensus/misc"
"github.com/ledgerwatch/erigon/core"
"github.com/ledgerwatch/erigon/core/rawdb"
"github.com/ledgerwatch/erigon/core/state"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/core/vm"
"github.com/ledgerwatch/erigon/core/vm/evmtypes"
+ "github.com/ledgerwatch/erigon/rlp"
"github.com/ledgerwatch/erigon/turbo/services"
)
@@ -133,17 +133,12 @@ func (rw *Worker) RunTxTaskNoLock(txTask *exec22.TxTask) {
//ibs.SetTrace(true)
rules := txTask.Rules
- daoForkTx := rw.chainConfig.DAOForkBlock != nil && rw.chainConfig.DAOForkBlock.Uint64() == txTask.BlockNum && txTask.TxIndex == -1
var err error
header := txTask.Header
var logger = log.New("worker-tx")
switch {
- case daoForkTx:
- //fmt.Printf("txNum=%d, blockNum=%d, DAO fork\n", txTask.TxNum, txTask.BlockNum)
- misc.ApplyDAOHardFork(ibs)
- ibs.SoftFinalise()
case txTask.TxIndex == -1:
if txTask.BlockNum == 0 {
// Genesis block
@@ -161,7 +156,8 @@ func (rw *Worker) RunTxTaskNoLock(txTask *exec22.TxTask) {
syscall := func(contract libcommon.Address, data []byte, ibs *state.IntraBlockState, header *types.Header, constCall bool) ([]byte, error) {
return core.SysCallContract(contract, data, rw.chainConfig, ibs, header, rw.engine, constCall /* constCall */)
}
- rw.engine.Initialize(rw.chainConfig, rw.chain, header, ibs, txTask.Txs, txTask.Uncles, syscall)
+ rw.engine.Initialize(rw.chainConfig, rw.chain, header, ibs, syscall, logger)
+ txTask.Error = ibs.FinalizeTx(rules, noop)
case txTask.Final:
if txTask.BlockNum == 0 {
break
@@ -210,7 +206,7 @@ func (rw *Worker) RunTxTaskNoLock(txTask *exec22.TxTask) {
} else {
txTask.UsedGas = applyRes.UsedGas
// Update the state with pending changes
- ibs.SoftFinalise()
+ txTask.Error = ibs.FinalizeTx(rules, noop)
txTask.Logs = ibs.GetLogs(txHash)
txTask.TraceFroms = rw.callTracer.Froms()
txTask.TraceTos = rw.callTracer.Tos()
@@ -281,6 +277,16 @@ func (cr ChainReader) GetTd(hash libcommon.Hash, number uint64) *big.Int {
func (cr ChainReader) FrozenBlocks() uint64 {
return cr.blockReader.FrozenBlocks()
}
+func (cr ChainReader) GetBlock(hash libcommon.Hash, number uint64) *types.Block {
+ panic("")
+}
+func (cr ChainReader) HasBlock(hash libcommon.Hash, number uint64) bool {
+ panic("")
+}
+func (cr ChainReader) BorEventsByBlock(hash libcommon.Hash, number uint64) []rlp.RawValue {
+ panic("")
+}
+func (cr ChainReader) BorSpan(spanId uint64) []byte { panic("") }
func NewWorkersPool(lock sync.Locker, ctx context.Context, background bool, chainDb kv.RoDB, rs *state.StateV3, in *exec22.QueueWithRetry, blockReader services.FullBlockReader, chainConfig *chain.Config, genesis *types.Genesis, engine consensus.Engine, workerCount int) (reconWorkers []*Worker, applyWorker *Worker, rws *exec22.ResultsQueue, clear func(), wait func()) {
reconWorkers = make([]*Worker, workerCount)
diff --git a/cmd/state/exec3/state_recon.go b/cmd/state/exec3/state_recon.go
index 6ed8cfb5336..0172f9a3653 100644
--- a/cmd/state/exec3/state_recon.go
+++ b/cmd/state/exec3/state_recon.go
@@ -17,9 +17,7 @@ import (
libstate "github.com/ledgerwatch/erigon-lib/state"
"github.com/ledgerwatch/erigon/cmd/state/exec22"
- "github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/consensus"
- "github.com/ledgerwatch/erigon/consensus/misc"
"github.com/ledgerwatch/erigon/core"
"github.com/ledgerwatch/erigon/core/state"
"github.com/ledgerwatch/erigon/core/types"
@@ -151,7 +149,7 @@ func (fw *FillWorker) FillCode(codeCollector, plainContractCollector *etl.Collec
copy(compositeKey, key)
if len(val) > 0 {
- codeHash, err := common.HashData(val)
+ codeHash, err := libcommon.HashData(val)
if err != nil {
return err
}
@@ -292,7 +290,6 @@ func (rw *ReconWorker) runTxTask(txTask *exec22.TxTask) error {
rw.ibs.Reset()
ibs := rw.ibs
rules := txTask.Rules
- daoForkTx := rw.chainConfig.DAOForkBlock != nil && rw.chainConfig.DAOForkBlock.Uint64() == txTask.BlockNum && txTask.TxIndex == -1
var err error
var logger = log.New("recon-tx")
@@ -306,10 +303,6 @@ func (rw *ReconWorker) runTxTask(txTask *exec22.TxTask) error {
}
// For Genesis, rules should be empty, so that empty accounts can be included
rules = &chain.Rules{}
- } else if daoForkTx {
- //fmt.Printf("txNum=%d, blockNum=%d, DAO fork\n", txNum, blockNum)
- misc.ApplyDAOHardFork(ibs)
- ibs.SoftFinalise()
} else if txTask.Final {
if txTask.BlockNum > 0 {
//fmt.Printf("txNum=%d, blockNum=%d, finalisation of the block\n", txTask.TxNum, txTask.BlockNum)
@@ -329,9 +322,14 @@ func (rw *ReconWorker) runTxTask(txTask *exec22.TxTask) error {
return core.SysCallContract(contract, data, rw.chainConfig, ibState, header, rw.engine, constCall /* constCall */)
}
- rw.engine.Initialize(rw.chainConfig, rw.chain, txTask.Header, ibs, txTask.Txs, txTask.Uncles, syscall)
+ rw.engine.Initialize(rw.chainConfig, rw.chain, txTask.Header, ibs, syscall, logger)
+ if err = ibs.FinalizeTx(rules, noop); err != nil {
+ if _, readError := rw.stateReader.ReadError(); !readError {
+ return err
+ }
+ }
} else {
- gp := new(core.GasPool).AddGas(txTask.Tx.GetGas())
+ gp := new(core.GasPool).AddGas(txTask.Tx.GetGas()).AddBlobGas(txTask.Tx.GetBlobGas())
vmConfig := vm.Config{NoReceipts: true, SkipAnalysis: txTask.SkipAnalysis}
ibs.SetTxContext(txTask.Tx.Hash(), txTask.BlockHash, txTask.TxIndex)
msg := txTask.TxAsMessage
diff --git a/cmd/state/verify/check_indexes.go b/cmd/state/verify/check_indexes.go
index c3966ffd365..bc13606f8d4 100644
--- a/cmd/state/verify/check_indexes.go
+++ b/cmd/state/verify/check_indexes.go
@@ -3,13 +3,13 @@ package verify
import (
"context"
"fmt"
+ "github.com/ledgerwatch/erigon-lib/kv/dbutils"
"time"
"github.com/ledgerwatch/erigon-lib/kv/bitmapdb"
"github.com/ledgerwatch/erigon-lib/kv/mdbx"
"github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2"
"github.com/ledgerwatch/erigon/common"
- "github.com/ledgerwatch/erigon/common/dbutils"
)
func CheckIndex(ctx context.Context, chaindata string, changeSetBucket string, indexBucket string) error {
diff --git a/cmd/state/verify/verify_txlookup.go b/cmd/state/verify/verify_txlookup.go
index d4a9e2415e3..3a7351d11b8 100644
--- a/cmd/state/verify/verify_txlookup.go
+++ b/cmd/state/verify/verify_txlookup.go
@@ -28,7 +28,7 @@ func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) {
}); err != nil {
panic(err)
}
- br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()))
+ br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", log.New()), nil /* BorSnapshots */)
bw := blockio.NewBlockWriter(histV3)
return br, bw
}
diff --git a/cmd/tooling/README.md b/cmd/tooling/README.md
new file mode 100644
index 00000000000..222adcade50
--- /dev/null
+++ b/cmd/tooling/README.md
@@ -0,0 +1,3 @@
+# Tooling
+
+this are a bunch of tools for our scripting necessities
\ No newline at end of file
diff --git a/cmd/tooling/cli.go b/cmd/tooling/cli.go
new file mode 100644
index 00000000000..622bc9e6ca6
--- /dev/null
+++ b/cmd/tooling/cli.go
@@ -0,0 +1,174 @@
+package main
+
+import (
+ "fmt"
+ "math"
+ "os/exec"
+ "time"
+
+ "github.com/ledgerwatch/erigon/cl/clparams"
+ "github.com/ledgerwatch/erigon/cmd/caplin/caplin1"
+ "github.com/ledgerwatch/erigon/eth/ethconfig"
+ "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks"
+ "golang.org/x/net/context"
+
+ "github.com/ledgerwatch/erigon-lib/common/datadir"
+ "github.com/ledgerwatch/erigon-lib/downloader/snaptype"
+ "github.com/ledgerwatch/erigon/cl/persistence"
+ "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies"
+ "github.com/ledgerwatch/erigon/cl/persistence/db_config"
+ "github.com/ledgerwatch/erigon/cl/utils"
+
+ "github.com/ledgerwatch/log/v3"
+)
+
+var CLI struct {
+ BucketCaplinAutomation BucketCaplinAutomation `cmd:"" help:"migrate from one state to another"`
+}
+
+type chainCfg struct {
+ Chain string `help:"chain" default:"mainnet"`
+}
+
+// func (c *chainCfg) configs() (beaconConfig *clparams.BeaconChainConfig, genesisConfig *clparams.GenesisConfig, err error) {
+// genesisConfig, _, beaconConfig, _, err = clparams.GetConfigsByNetworkName(c.Chain)
+// return
+// }
+
+type withDatadir struct {
+ Datadir string `help:"datadir" default:"~/.local/share/erigon" type:"existingdir"`
+}
+
+// func (w *withPPROF) withProfile() {
+// if w.Pprof {
+// debug.StartPProf("localhost:6060", metrics.Setup("localhost:6060", log.Root()))
+// }
+// }
+
+// func (w *withSentinel) connectSentinel() (sentinel.SentinelClient, error) {
+// // YOLO message size
+// gconn, err := grpc.Dial(w.Sentinel, grpc.WithInsecure(), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt)))
+// if err != nil {
+// return nil, err
+// }
+// return sentinel.NewSentinelClient(gconn), nil
+// }
+
+// func openFs(fsName string, path string) (afero.Fs, error) {
+// return afero.NewBasePathFs(afero.NewBasePathFs(afero.NewOsFs(), fsName), path), nil
+// }
+
+type BucketCaplinAutomation struct {
+ withDatadir
+ chainCfg
+
+ UploadPeriod time.Duration `help:"upload period" default:"1440h"`
+ Bucket string `help:"r2 address" default:"http://localhost:8080"`
+}
+
+func (c *BucketCaplinAutomation) Run(ctx *Context) error {
+ _, _, beaconConfig, _, err := clparams.GetConfigsByNetworkName(c.Chain)
+ if err != nil {
+ return err
+ }
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlDebug, log.StderrHandler))
+ log.Info("Started the automation tool for automatic snapshot sanity check and R2 uploading (caplin only)", "chain", c.Chain)
+ dirs := datadir.New(c.Datadir)
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StderrHandler))
+ tickerTriggerer := time.NewTicker(c.UploadPeriod)
+ defer tickerTriggerer.Stop()
+ // do the checking at first run
+ if err := checkSnapshots(ctx, beaconConfig, dirs); err != nil {
+ return err
+ }
+ log.Info("Uploading snapshots to R2 bucket")
+ // next upload to R2
+ command := "rclone"
+ args := []string{"sync", dirs.Snap, c.Bucket, "--include", "*beaconblocks*"}
+ if err := exec.Command(command, args...).Run(); err != nil {
+ return fmt.Errorf("rclone failed, make sure rclone is installed and is properly configured: %s", err)
+ }
+ log.Info("Finished snapshots to R2 bucket")
+ for {
+ select {
+ case <-tickerTriggerer.C:
+ log.Info("Checking snapshots")
+ if err := checkSnapshots(ctx, beaconConfig, dirs); err != nil {
+ return err
+ }
+ log.Info("Finishing snapshots")
+ // next upload to R2
+ command := "rclone"
+ args := []string{"sync", dirs.Snap, c.Bucket, "--include", "*beaconblocks*"}
+ log.Info("Uploading snapshots to R2 bucket")
+ if err := exec.Command(command, args...).Run(); err != nil {
+ return fmt.Errorf("rclone failed, make sure rclone is installed and is properly configured: %s", err)
+ }
+ log.Info("Finished snapshots to R2 bucket")
+ case <-ctx.Done():
+ return nil
+ }
+ }
+}
+
+func checkSnapshots(ctx context.Context, beaconConfig *clparams.BeaconChainConfig, dirs datadir.Dirs) error {
+ rawDB, _ := persistence.AferoRawBeaconBlockChainFromOsPath(beaconConfig, dirs.CaplinHistory)
+ _, db, err := caplin1.OpenCaplinDatabase(ctx, db_config.DatabaseConfiguration{PruneDepth: math.MaxUint64}, beaconConfig, rawDB, dirs.CaplinIndexing, nil, false)
+ if err != nil {
+ return err
+ }
+ defer db.Close()
+ var to uint64
+ tx, err := db.BeginRo(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ to, err = beacon_indicies.ReadHighestFinalized(tx)
+ if err != nil {
+ return err
+ }
+
+ to = (to / snaptype.Erigon2RecentMergeLimit) * snaptype.Erigon2RecentMergeLimit
+
+ csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, dirs.Snap, log.Root())
+ if err := csn.ReopenFolder(); err != nil {
+ return err
+ }
+
+ genesisHeader, _, _, err := csn.ReadHeader(0)
+ if err != nil {
+ return err
+ }
+ previousBlockRoot, err := genesisHeader.Header.HashSSZ()
+ if err != nil {
+ return err
+ }
+ previousBlockSlot := genesisHeader.Header.Slot
+ for i := uint64(1); i < to; i++ {
+ if utils.Min64(0, i-320) > previousBlockSlot {
+ return fmt.Errorf("snapshot %d has invalid slot", i)
+ }
+ // Checking of snapshots is a chain contiguity problem
+ currentHeader, _, _, err := csn.ReadHeader(i)
+ if err != nil {
+ return err
+ }
+ if currentHeader == nil {
+ continue
+ }
+ if currentHeader.Header.ParentRoot != previousBlockRoot {
+ return fmt.Errorf("snapshot %d has invalid parent root", i)
+ }
+ previousBlockRoot, err = currentHeader.Header.HashSSZ()
+ if err != nil {
+ return err
+ }
+ previousBlockSlot = currentHeader.Header.Slot
+ if i%20000 == 0 {
+ log.Info("Successfully checked", "slot", i)
+ }
+ }
+ return nil
+}
diff --git a/cmd/sentinel/sentinel/request.go b/cmd/tooling/main.go
similarity index 64%
rename from cmd/sentinel/sentinel/request.go
rename to cmd/tooling/main.go
index d1e8e8cebf5..d2d04e6dcd2 100644
--- a/cmd/sentinel/sentinel/request.go
+++ b/cmd/tooling/main.go
@@ -11,22 +11,25 @@
limitations under the License.
*/
-package sentinel
+package main
import (
- "fmt"
+ "context"
- "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/alecthomas/kong"
)
-func (s *Sentinel) RandomPeer(topic string) (peer.ID, error) {
- var (
- pid peer.ID
- err error
- )
- pid, err = connectToRandomPeer(s, string(BeaconBlockTopic))
- if err != nil {
- return peer.ID(""), fmt.Errorf("failed to connect to a random peer err=%s", err)
- }
- return pid, nil
+type Context struct {
+ context.Context
+ kctx *kong.Context
+}
+
+func main() {
+ ctx := kong.Parse(&CLI)
+ // Call the Run() method of the selected parsed command.
+ err := ctx.Run(&Context{
+ kctx: ctx,
+ Context: context.TODO(),
+ })
+ ctx.FatalIfErrorf(err)
}
diff --git a/cmd/txpool/main.go b/cmd/txpool/main.go
index 8439503284c..d915a18b32b 100644
--- a/cmd/txpool/main.go
+++ b/cmd/txpool/main.go
@@ -49,9 +49,13 @@ var (
baseFeePoolLimit int
queuedPoolLimit int
- priceLimit uint64
- accountSlots uint64
- priceBump uint64
+ priceLimit uint64
+ accountSlots uint64
+ blobSlots uint64
+ priceBump uint64
+ blobPriceBump uint64
+
+ noTxGossip bool
commitEvery time.Duration
)
@@ -74,8 +78,11 @@ func init() {
rootCmd.PersistentFlags().IntVar(&queuedPoolLimit, "txpool.globalqueue", txpoolcfg.DefaultConfig.QueuedSubPoolLimit, "Maximum number of non-executable transaction slots for all accounts")
rootCmd.PersistentFlags().Uint64Var(&priceLimit, "txpool.pricelimit", txpoolcfg.DefaultConfig.MinFeeCap, "Minimum gas price (fee cap) limit to enforce for acceptance into the pool")
rootCmd.PersistentFlags().Uint64Var(&accountSlots, "txpool.accountslots", txpoolcfg.DefaultConfig.AccountSlots, "Minimum number of executable transaction slots guaranteed per account")
+ rootCmd.PersistentFlags().Uint64Var(&blobSlots, "txpool.blobslots", txpoolcfg.DefaultConfig.BlobSlots, "Max allowed total number of blobs (within type-3 txs) per account")
rootCmd.PersistentFlags().Uint64Var(&priceBump, "txpool.pricebump", txpoolcfg.DefaultConfig.PriceBump, "Price bump percentage to replace an already existing transaction")
+ rootCmd.PersistentFlags().Uint64Var(&blobPriceBump, "txpool.blobpricebump", txpoolcfg.DefaultConfig.BlobPriceBump, "Price bump percentage to replace an existing blob (type-3) transaction")
rootCmd.PersistentFlags().DurationVar(&commitEvery, utils.TxPoolCommitEveryFlag.Name, utils.TxPoolCommitEveryFlag.Value, utils.TxPoolCommitEveryFlag.Usage)
+ rootCmd.PersistentFlags().BoolVar(&noTxGossip, utils.TxPoolGossipDisableFlag.Name, utils.TxPoolGossipDisableFlag.Value, utils.TxPoolGossipDisableFlag.Usage)
rootCmd.Flags().StringSliceVar(&traceSenders, utils.TxPoolTraceSendersFlag.Name, []string{}, utils.TxPoolTraceSendersFlag.Usage)
}
@@ -139,7 +146,10 @@ func doTxpool(ctx context.Context, logger log.Logger) error {
cfg.QueuedSubPoolLimit = queuedPoolLimit
cfg.MinFeeCap = priceLimit
cfg.AccountSlots = accountSlots
+ cfg.BlobSlots = blobSlots
cfg.PriceBump = priceBump
+ cfg.BlobPriceBump = blobPriceBump
+ cfg.NoGossip = noTxGossip
cacheConfig := kvcache.DefaultCoherentConfig
cacheConfig.MetricsLabel = "txpool"
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 122d110aa72..4d30daa13ce 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -25,8 +25,16 @@ import (
"runtime"
"strconv"
"strings"
+ "time"
"github.com/c2h5oh/datasize"
+ "github.com/ledgerwatch/log/v3"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+ "github.com/urfave/cli/v2"
+
+ "github.com/ledgerwatch/erigon-lib/chain/networkname"
+ "github.com/ledgerwatch/erigon-lib/chain/snapcfg"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/cmp"
"github.com/ledgerwatch/erigon-lib/common/datadir"
@@ -35,10 +43,6 @@ import (
"github.com/ledgerwatch/erigon-lib/direct"
downloadercfg2 "github.com/ledgerwatch/erigon-lib/downloader/downloadercfg"
"github.com/ledgerwatch/erigon-lib/txpool/txpoolcfg"
- "github.com/ledgerwatch/log/v3"
- "github.com/spf13/cobra"
- "github.com/spf13/pflag"
- "github.com/urfave/cli/v2"
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cmd/downloader/downloadernat"
@@ -56,7 +60,7 @@ import (
"github.com/ledgerwatch/erigon/p2p/nat"
"github.com/ledgerwatch/erigon/p2p/netutil"
"github.com/ledgerwatch/erigon/params"
- "github.com/ledgerwatch/erigon/params/networkname"
+ "github.com/ledgerwatch/erigon/rpc/rpccfg"
)
// These are all the command line flags we support.
@@ -93,7 +97,7 @@ var (
}
ChainFlag = cli.StringFlag{
Name: "chain",
- Usage: "Name of the testnet to join",
+ Usage: "name of the network to join",
Value: networkname.MainnetChainName,
}
IdentityFlag = cli.StringFlag{
@@ -138,12 +142,17 @@ var (
}
InternalConsensusFlag = cli.BoolFlag{
Name: "internalcl",
- Usage: "enables internal consensus",
+ Usage: "Enables internal consensus",
}
// Transaction pool settings
TxPoolDisableFlag = cli.BoolFlag{
Name: "txpool.disable",
- Usage: "experimental external pool and block producer, see ./cmd/txpool/readme.md for more info. Disabling internal txpool and block producer.",
+ Usage: "Experimental external pool and block producer, see ./cmd/txpool/readme.md for more info. Disabling internal txpool and block producer.",
+ }
+ TxPoolGossipDisableFlag = cli.BoolFlag{
+ Name: "txpool.gossip.disable",
+ Usage: "Disabling p2p gossip of txs. Any txs received by p2p - will be dropped. Some networks like 'Optimism execution engine'/'Optimistic Rollup' - using it to protect against MEV attacks",
+ Value: txpoolcfg.DefaultConfig.NoGossip,
}
TxPoolLocalsFlag = cli.StringFlag{
Name: "txpool.locals",
@@ -163,11 +172,21 @@ var (
Usage: "Price bump percentage to replace an already existing transaction",
Value: txpoolcfg.DefaultConfig.PriceBump,
}
+ TxPoolBlobPriceBumpFlag = cli.Uint64Flag{
+ Name: "txpool.blobpricebump",
+ Usage: "Price bump percentage to replace existing (type-3) blob transaction",
+ Value: txpoolcfg.DefaultConfig.BlobPriceBump,
+ }
TxPoolAccountSlotsFlag = cli.Uint64Flag{
Name: "txpool.accountslots",
Usage: "Minimum number of executable transaction slots guaranteed per account",
Value: ethconfig.Defaults.DeprecatedTxPool.AccountSlots,
}
+ TxPoolBlobSlotsFlag = cli.Uint64Flag{
+ Name: "txpool.blobslots",
+ Usage: "Max allowed total number of blobs (within type-3 txs) per account",
+ Value: txpoolcfg.DefaultConfig.BlobSlots,
+ }
TxPoolGlobalSlotsFlag = cli.Uint64Flag{
Name: "txpool.globalslots",
Usage: "Maximum number of executable transaction slots for all accounts",
@@ -195,7 +214,7 @@ var (
}
TxPoolTraceSendersFlag = cli.StringFlag{
Name: "txpool.trace.senders",
- Usage: "Comma separared list of addresses, whoes transactions will traced in transaction pool with debug printing",
+ Usage: "Comma separated list of addresses, whose transactions will traced in transaction pool with debug printing",
Value: "",
}
TxPoolCommitEveryFlag = cli.DurationFlag{
@@ -293,7 +312,12 @@ var (
}
HTTPEnabledFlag = cli.BoolFlag{
Name: "http",
- Usage: "HTTP-RPC server (enabled by default). Use --http=false to disable it",
+ Usage: "JSON-RPC server (enabled by default). Use --http=false to disable it",
+ Value: true,
+ }
+ HTTPServerEnabledFlag = cli.BoolFlag{
+ Name: "http.enabled",
+ Usage: "JSON-RPC HTTP server (enabled by default). Use --http.enabled=false to disable it",
Value: true,
}
HTTPListenAddrFlag = cli.StringFlag{
@@ -358,7 +382,7 @@ var (
}
RpcStreamingDisableFlag = cli.BoolFlag{
Name: "rpc.streaming.disable",
- Usage: "Erigon has enalbed json streaming for some heavy endpoints (like trace_*). It's treadoff: greatly reduce amount of RAM (in some cases from 30GB to 30mb), but it produce invalid json format if error happened in the middle of streaming (because json is not streaming-friendly format)",
+ Usage: "Erigon has enabled json streaming for some heavy endpoints (like trace_*). It's a trade-off: greatly reduce amount of RAM (in some cases from 30GB to 30mb), but it produce invalid json format if error happened in the middle of streaming (because json is not streaming-friendly format)",
}
RpcBatchLimit = cli.IntFlag{
Name: "rpc.batch.limit",
@@ -377,7 +401,7 @@ var (
DBReadConcurrencyFlag = cli.IntFlag{
Name: "db.read.concurrency",
Usage: "Does limit amount of parallel db reads. Default: equal to GOMAXPROCS (or number of CPU)",
- Value: cmp.Max(10, runtime.GOMAXPROCS(-1)*8),
+ Value: cmp.Min(cmp.Max(10, runtime.GOMAXPROCS(-1)*64), 9_000),
}
RpcAccessListFlag = cli.StringFlag{
Name: "rpc.accessList",
@@ -396,7 +420,7 @@ var (
TxpoolApiAddrFlag = cli.StringFlag{
Name: "txpool.api.addr",
- Usage: "txpool api network address, for example: 127.0.0.1:9090 (default: use value of --private.api.addr)",
+ Usage: "TxPool api network address, for example: 127.0.0.1:9090 (default: use value of --private.api.addr)",
}
TraceMaxtracesFlag = cli.UintFlag{
@@ -468,7 +492,16 @@ var (
}
AllowUnprotectedTxs = cli.BoolFlag{
Name: "rpc.allow-unprotected-txs",
- Usage: "Allow for unprotected (non EIP155 signed) transactions to be submitted via RPC",
+ Usage: "Allow for unprotected (non-EIP155 signed) transactions to be submitted via RPC",
+ }
+ // Careful! Because we must rewind the hash state
+ // and re-compute the state trie, the further back in time the request, the more
+ // computationally intensive the operation becomes.
+ // The current default has been chosen arbitrarily as 'useful' without likely being overly computationally intense.
+ RpcMaxGetProofRewindBlockCount = cli.IntFlag{
+ Name: "rpc.maxgetproofrewindblockcount.limit",
+ Usage: "Max GetProof rewind block count",
+ Value: 100_000,
}
StateCacheFlag = cli.StringFlag{
Name: "state.cache",
@@ -504,17 +537,12 @@ var (
}
SentryAddrFlag = cli.StringFlag{
Name: "sentry.api.addr",
- Usage: "comma separated sentry addresses ':,:'",
+ Usage: "Comma separated sentry addresses ':,:'",
}
SentryLogPeerInfoFlag = cli.BoolFlag{
Name: "sentry.log-peer-info",
Usage: "Log detailed peer info when a peer connects or disconnects. Enable to integrate with observer.",
}
- SentryDropUselessPeers = cli.BoolFlag{
- Name: "sentry.drop-useless-peers",
- Usage: "Drop useless peers, those returning empty body or header responses",
- Value: false,
- }
DownloaderAddrFlag = cli.StringFlag{
Name: "downloader.api.addr",
Usage: "downloader address ':'",
@@ -545,14 +573,14 @@ var (
NATFlag = cli.StringFlag{
Name: "nat",
Usage: `NAT port mapping mechanism (any|none|upnp|pmp|stun|extip:)
- "" or "none" default - do not nat
- "extip:77.12.33.4" will assume the local machine is reachable on the given IP
- "any" uses the first auto-detected mechanism
- "upnp" uses the Universal Plug and Play protocol
- "pmp" uses NAT-PMP with an auto-detected gateway address
- "pmp:192.168.0.1" uses NAT-PMP with the given gateway address
- "stun" uses STUN to detect an external IP using a default server
- "stun:" uses STUN to detect an external IP using the given server (host:port)
+ "" or "none" Default - do not nat
+ "extip:77.12.33.4" Will assume the local machine is reachable on the given IP
+ "any" Uses the first auto-detected mechanism
+ "upnp" Uses the Universal Plug and Play protocol
+ "pmp" Uses NAT-PMP with an auto-detected gateway address
+ "pmp:192.168.0.1" Uses NAT-PMP with the given gateway address
+ "stun" Uses STUN to detect an external IP using a default server
+ "stun:" Uses STUN to detect an external IP using the given server (host:port)
`,
Value: "",
}
@@ -619,27 +647,27 @@ var (
}
HistoryV3Flag = cli.BoolFlag{
Name: "experimental.history.v3",
- Usage: "(also known as Erigon3) Not recommended yet: Can't change this flag after node creation. New DB and Snapshots format of history allows: parallel blocks execution, get state as of given transaction without executing whole block.",
+ Usage: "(Also known as Erigon3) Not recommended yet: Can't change this flag after node creation. New DB and Snapshots format of history allows: parallel blocks execution, get state as of given transaction without executing whole block.",
}
CliqueSnapshotCheckpointIntervalFlag = cli.UintFlag{
Name: "clique.checkpoint",
- Usage: "number of blocks after which to save the vote snapshot to the database",
+ Usage: "Number of blocks after which to save the vote snapshot to the database",
Value: 10,
}
CliqueSnapshotInmemorySnapshotsFlag = cli.IntFlag{
Name: "clique.snapshots",
- Usage: "number of recent vote snapshots to keep in memory",
+ Usage: "Number of recent vote snapshots to keep in memory",
Value: 1024,
}
CliqueSnapshotInmemorySignaturesFlag = cli.IntFlag{
Name: "clique.signatures",
- Usage: "number of recent block signatures to keep in memory",
+ Usage: "Number of recent block signatures to keep in memory",
Value: 16384,
}
CliqueDataDirFlag = flags.DirectoryFlag{
Name: "clique.datadir",
- Usage: "a path to clique db folder",
+ Usage: "Path to clique db folder",
Value: "",
}
@@ -654,22 +682,22 @@ var (
TorrentVerbosityFlag = cli.IntFlag{
Name: "torrent.verbosity",
Value: 2,
- Usage: "0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail (must set --verbosity to equal or higher level and has defeault: 3)",
+ Usage: "0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail (must set --verbosity to equal or higher level and has default: 2)",
}
TorrentDownloadRateFlag = cli.StringFlag{
Name: "torrent.download.rate",
Value: "16mb",
- Usage: "bytes per second, example: 32mb",
+ Usage: "Bytes per second, example: 32mb",
}
TorrentUploadRateFlag = cli.StringFlag{
Name: "torrent.upload.rate",
Value: "4mb",
- Usage: "bytes per second, example: 32mb",
+ Usage: "Bytes per second, example: 32mb",
}
TorrentDownloadSlotsFlag = cli.IntFlag{
Name: "torrent.download.slots",
Value: 3,
- Usage: "amount of files to download in parallel. If network has enough seeders 1-3 slot enough, if network has lack of seeders increase to 5-7 (too big value will slow down everything).",
+ Usage: "Amount of files to download in parallel. If network has enough seeders 1-3 slot enough, if network has lack of seeders increase to 5-7 (too big value will slow down everything).",
}
TorrentStaticPeersFlag = cli.StringFlag{
Name: "torrent.staticpeers",
@@ -678,37 +706,37 @@ var (
}
NoDownloaderFlag = cli.BoolFlag{
Name: "no-downloader",
- Usage: "to disable downloader component",
+ Usage: "Disables downloader component",
}
DownloaderVerifyFlag = cli.BoolFlag{
Name: "downloader.verify",
- Usage: "verify snapshots on startup. it will not report founded problems but just re-download broken pieces",
+ Usage: "Verify snapshots on startup. It will not report problems found, but re-download broken pieces.",
}
DisableIPV6 = cli.BoolFlag{
Name: "downloader.disable.ipv6",
- Usage: "Turns off ipv6 for the downlaoder",
+ Usage: "Turns off ipv6 for the downloader",
Value: false,
}
DisableIPV4 = cli.BoolFlag{
Name: "downloader.disable.ipv4",
- Usage: "Turn off ipv4 for the downloader",
+ Usage: "Turns off ipv4 for the downloader",
Value: false,
}
TorrentPortFlag = cli.IntFlag{
Name: "torrent.port",
Value: 42069,
- Usage: "port to listen and serve BitTorrent protocol",
+ Usage: "Port to listen and serve BitTorrent protocol",
}
TorrentMaxPeersFlag = cli.IntFlag{
Name: "torrent.maxpeers",
Value: 100,
- Usage: "unused parameter (reserved for future use)",
+ Usage: "Unused parameter (reserved for future use)",
}
TorrentConnsPerFileFlag = cli.IntFlag{
Name: "torrent.conns.perfile",
Value: 10,
- Usage: "connections per file",
+ Usage: "Number of connections per file",
}
DbPageSizeFlag = cli.StringFlag{
Name: "db.pagesize",
@@ -717,8 +745,13 @@ var (
}
DbSizeLimitFlag = cli.StringFlag{
Name: "db.size.limit",
- Usage: "runtime limit of chandata db size. you can change value of this flag at any time",
- Value: (3 * datasize.TB).String(),
+ Usage: "Runtime limit of chaindata db size. You can change value of this flag at any time.",
+ Value: (12 * datasize.TB).String(),
+ }
+ ForcePartialCommitFlag = cli.BoolFlag{
+ Name: "force.partial.commit",
+ Usage: "Force data commit after each stage (or even do multiple commits per 1 stage - to save it's progress). Don't use this flag if node is synced. Meaning: readers (users of RPC) would like to see 'fully consistent' data (block is executed and all indices are updated). Erigon guarantee this level of data-consistency. But 1 downside: after restore node from backup - it can't save partial progress (non-committed progress will be lost at restart). This flag will be removed in future if we can find automatic way to detect corner-cases.",
+ Value: false,
}
HealthCheckFlag = cli.BoolFlag{
@@ -732,6 +765,12 @@ var (
Value: "http://localhost:1317",
}
+ WebSeedsFlag = cli.StringFlag{
+ Name: "webseed",
+ Usage: "Comma-separated URL's, holding metadata about network-support infrastructure (like S3 buckets with snapshots, bootnodes, etc...)",
+ Value: "",
+ }
+
// WithoutHeimdallFlag no heimdall (for testing purpose)
WithoutHeimdallFlag = cli.BoolFlag{
Name: "bor.withoutheimdall",
@@ -748,6 +787,12 @@ var (
Usage: "Ignore the bor block period and wait for 'blocksize' transactions (for testing purposes)",
}
+ WithHeimdallMilestones = cli.BoolFlag{
+ Name: "bor.milestone",
+ Usage: "Enabling bor milestone processing",
+ Value: true,
+ }
+
// HeimdallgRPCAddressFlag flag for heimdall gRPC address
HeimdallgRPCAddressFlag = cli.StringFlag{
Name: "bor.heimdallgRPC",
@@ -791,10 +836,91 @@ var (
Usage: "Max allowed page size for search methods",
Value: 25,
}
+
+ DiagnosticsURLFlag = cli.StringFlag{
+ Name: "diagnostics.addr",
+ Usage: "Address of the diagnostics system provided by the support team",
+ }
+
+ DiagnosticsInsecureFlag = cli.BoolFlag{
+ Name: "diagnostics.insecure",
+ Usage: "Allows communication with diagnostics system using self-signed TLS certificates",
+ }
+
+ DiagnosticsSessionsFlag = cli.StringSliceFlag{
+ Name: "diagnostics.ids",
+ Usage: "Comma separated list of support session ids to connect to",
+ }
+
+ SilkwormExecutionFlag = cli.BoolFlag{
+ Name: "silkworm.exec",
+ Usage: "Enable Silkworm block execution",
+ }
+ SilkwormRpcDaemonFlag = cli.BoolFlag{
+ Name: "silkworm.rpcd",
+ Usage: "Enable embedded Silkworm RPC daemon",
+ }
+ SilkwormSentryFlag = cli.BoolFlag{
+ Name: "silkworm.sentry",
+ Usage: "Enable embedded Silkworm Sentry service",
+ }
+
+ BeaconAPIFlag = cli.BoolFlag{
+ Name: "beacon.api",
+ Usage: "Enable beacon API",
+ Value: false,
+ }
+ BeaconApiProtocolFlag = cli.StringFlag{
+ Name: "beacon.api.protocol",
+ Usage: "Protocol for beacon API",
+ Value: "tcp",
+ }
+ BeaconApiReadTimeoutFlag = cli.Uint64Flag{
+ Name: "beacon.api.read.timeout",
+ Usage: "Sets the seconds for a read time out in the beacon api",
+ Value: 5,
+ }
+ BeaconApiWriteTimeoutFlag = cli.Uint64Flag{
+ Name: "beacon.api.write.timeout",
+ Usage: "Sets the seconds for a write time out in the beacon api",
+ Value: 5,
+ }
+ BeaconApiIdleTimeoutFlag = cli.Uint64Flag{
+ Name: "beacon.api.ide.timeout",
+ Usage: "Sets the seconds for a write time out in the beacon api",
+ Value: 25,
+ }
+ BeaconApiAddrFlag = cli.StringFlag{
+ Name: "beacon.api.addr",
+ Usage: "sets the host to listen for beacon api requests",
+ Value: "localhost",
+ }
+ BeaconApiPortFlag = cli.UintFlag{
+ Name: "beacon.api.port",
+ Usage: "sets the port to listen for beacon api requests",
+ Value: 5555,
+ }
+ RPCSlowFlag = cli.DurationFlag{
+ Name: "rpc.slow",
+ Usage: "Print in logs RPC requests slower than given threshold: 100ms, 1s, 1m. Exluded methods: " + strings.Join(rpccfg.SlowLogBlackList, ","),
+ Value: 0,
+ }
+ CaplinBackfillingFlag = cli.BoolFlag{
+ Name: "caplin.backfilling",
+ Usage: "sets whether backfilling is enabled for caplin",
+ Value: false,
+ }
+ CaplinArchiveFlag = cli.BoolFlag{
+ Name: "caplin.archive",
+ Usage: "enables archival node in caplin (Experimental, does not work)",
+ Value: false,
+ }
)
var MetricFlags = []cli.Flag{&MetricsEnabledFlag, &MetricsHTTPFlag, &MetricsPortFlag}
+var DiagnosticsFlags = []cli.Flag{&DiagnosticsURLFlag, &DiagnosticsURLFlag, &DiagnosticsSessionsFlag}
+
// setNodeKey loads a node key from command line flags if provided,
// otherwise it tries to load it from datadir,
// otherwise it generates a new key in datadir.
@@ -856,7 +982,7 @@ func setBootstrapNodesV5(ctx *cli.Context, cfg *p2p.Config) {
func GetBootnodesFromFlags(urlsStr, chain string) ([]*enode.Node, error) {
var urls []string
if urlsStr != "" {
- urls = SplitAndTrim(urlsStr)
+ urls = libcommon.CliString2Array(urlsStr)
} else {
urls = params.BootnodeURLsOfChain(chain)
}
@@ -866,7 +992,7 @@ func GetBootnodesFromFlags(urlsStr, chain string) ([]*enode.Node, error) {
func setStaticPeers(ctx *cli.Context, cfg *p2p.Config) {
var urls []string
if ctx.IsSet(StaticPeersFlag.Name) {
- urls = SplitAndTrim(ctx.String(StaticPeersFlag.Name))
+ urls = libcommon.CliString2Array(ctx.String(StaticPeersFlag.Name))
} else {
chain := ctx.String(ChainFlag.Name)
urls = params.StaticPeerURLsOfChain(chain)
@@ -885,7 +1011,7 @@ func setTrustedPeers(ctx *cli.Context, cfg *p2p.Config) {
return
}
- urls := SplitAndTrim(ctx.String(TrustedPeersFlag.Name))
+ urls := libcommon.CliString2Array(ctx.String(TrustedPeersFlag.Name))
trustedNodes, err := ParseNodesFromURLs(urls)
if err != nil {
Fatalf("Option %s: %v", TrustedPeersFlag.Name, err)
@@ -979,6 +1105,7 @@ func NewP2PConfig(
return nil, fmt.Errorf("invalid nat option %s: %w", natSetting, err)
}
cfg.NAT = natif
+ cfg.NATSpec = natSetting
return cfg, nil
}
@@ -1000,7 +1127,7 @@ func setListenAddress(ctx *cli.Context, cfg *p2p.Config) {
cfg.ProtocolVersion = ctx.UintSlice(P2pProtocolVersionFlag.Name)
}
if ctx.IsSet(SentryAddrFlag.Name) {
- cfg.SentryAddr = SplitAndTrim(ctx.String(SentryAddrFlag.Name))
+ cfg.SentryAddr = libcommon.CliString2Array(ctx.String(SentryAddrFlag.Name))
}
// TODO cli lib doesn't store defaults for UintSlice properly so we have to get value directly
cfg.AllowedPorts = P2pProtocolAllowedPorts.Value.Value()
@@ -1027,26 +1154,16 @@ func setListenAddress(ctx *cli.Context, cfg *p2p.Config) {
// setNAT creates a port mapper from command line flags.
func setNAT(ctx *cli.Context, cfg *p2p.Config) {
if ctx.IsSet(NATFlag.Name) {
- natif, err := nat.Parse(ctx.String(NATFlag.Name))
+ natSetting := ctx.String(NATFlag.Name)
+ natif, err := nat.Parse(natSetting)
if err != nil {
Fatalf("Option %s: %v", NATFlag.Name, err)
}
cfg.NAT = natif
+ cfg.NATSpec = natSetting
}
}
-// SplitAndTrim splits input separated by a comma
-// and trims excessive white space from the substrings.
-func SplitAndTrim(input string) (ret []string) {
- l := strings.Split(input, ",")
- for _, r := range l {
- if r = strings.TrimSpace(r); r != "" {
- ret = append(ret, r)
- }
- }
- return ret
-}
-
// setEtherbase retrieves the etherbase from the directly specified
// command line flags.
func setEtherbase(ctx *cli.Context, cfg *ethconfig.Config) {
@@ -1121,7 +1238,6 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config, nodeName, datadir string, l
}
ethPeers := cfg.MaxPeers
- cfg.Name = nodeName
logger.Info("Maximum peer count", "ETH", ethPeers, "total", cfg.MaxPeers)
if netrestrict := ctx.String(NetrestrictFlag.Name); netrestrict != "" {
@@ -1162,12 +1278,10 @@ func SetNodeConfigCobra(cmd *cobra.Command, cfg *nodecfg.Config) {
func setDataDir(ctx *cli.Context, cfg *nodecfg.Config) {
if ctx.IsSet(DataDirFlag.Name) {
- cfg.Dirs.DataDir = ctx.String(DataDirFlag.Name)
+ cfg.Dirs = datadir.New(ctx.String(DataDirFlag.Name))
} else {
- cfg.Dirs.DataDir = paths.DataDirForNetwork(cfg.Dirs.DataDir, ctx.String(ChainFlag.Name))
+ cfg.Dirs = datadir.New(paths.DataDirForNetwork(paths.DefaultDataDir(), ctx.String(ChainFlag.Name)))
}
- cfg.Dirs = datadir.New(cfg.Dirs.DataDir)
-
cfg.MdbxPageSize = flags.DBPageSizeFlagUnmarshal(ctx, DbPageSizeFlag.Name, DbPageSizeFlag.Usage)
if err := cfg.MdbxDBSizeLimit.UnmarshalText([]byte(ctx.String(DbSizeLimitFlag.Name))); err != nil {
panic(err)
@@ -1188,13 +1302,10 @@ func setDataDirCobra(f *pflag.FlagSet, cfg *nodecfg.Config) {
panic(err)
}
if dirname != "" {
- cfg.Dirs.DataDir = dirname
+ cfg.Dirs = datadir.New(dirname)
} else {
- cfg.Dirs.DataDir = paths.DataDirForNetwork(cfg.Dirs.DataDir, chain)
+ cfg.Dirs = datadir.New(paths.DataDirForNetwork(paths.DefaultDataDir(), chain))
}
-
- cfg.Dirs.DataDir = paths.DataDirForNetwork(cfg.Dirs.DataDir, chain)
- cfg.Dirs = datadir.New(cfg.Dirs.DataDir)
}
func setGPO(ctx *cli.Context, cfg *gaspricecfg.Config) {
@@ -1222,12 +1333,13 @@ func setGPOCobra(f *pflag.FlagSet, cfg *gaspricecfg.Config) {
}
}
-func setTxPool(ctx *cli.Context, cfg *ethconfig.DeprecatedTxPoolConfig) {
+func setTxPool(ctx *cli.Context, fullCfg *ethconfig.Config) {
+ cfg := &fullCfg.DeprecatedTxPool
if ctx.IsSet(TxPoolDisableFlag.Name) {
cfg.Disable = true
}
if ctx.IsSet(TxPoolLocalsFlag.Name) {
- locals := SplitAndTrim(ctx.String(TxPoolLocalsFlag.Name))
+ locals := libcommon.CliString2Array(ctx.String(TxPoolLocalsFlag.Name))
for _, account := range locals {
if !libcommon.IsHexAddress(account) {
Fatalf("Invalid account in --txpool.locals: %s", account)
@@ -1245,9 +1357,15 @@ func setTxPool(ctx *cli.Context, cfg *ethconfig.DeprecatedTxPoolConfig) {
if ctx.IsSet(TxPoolPriceBumpFlag.Name) {
cfg.PriceBump = ctx.Uint64(TxPoolPriceBumpFlag.Name)
}
+ if ctx.IsSet(TxPoolBlobPriceBumpFlag.Name) {
+ fullCfg.TxPool.BlobPriceBump = ctx.Uint64(TxPoolBlobPriceBumpFlag.Name)
+ }
if ctx.IsSet(TxPoolAccountSlotsFlag.Name) {
cfg.AccountSlots = ctx.Uint64(TxPoolAccountSlotsFlag.Name)
}
+ if ctx.IsSet(TxPoolBlobSlotsFlag.Name) {
+ fullCfg.TxPool.BlobSlots = ctx.Uint64(TxPoolBlobSlotsFlag.Name)
+ }
if ctx.IsSet(TxPoolGlobalSlotsFlag.Name) {
cfg.GlobalSlots = ctx.Uint64(TxPoolGlobalSlotsFlag.Name)
}
@@ -1265,14 +1383,16 @@ func setTxPool(ctx *cli.Context, cfg *ethconfig.DeprecatedTxPoolConfig) {
}
if ctx.IsSet(TxPoolTraceSendersFlag.Name) {
// Parse the command separated flag
- senderHexes := SplitAndTrim(ctx.String(TxPoolTraceSendersFlag.Name))
+ senderHexes := libcommon.CliString2Array(ctx.String(TxPoolTraceSendersFlag.Name))
cfg.TracedSenders = make([]string, len(senderHexes))
for i, senderHex := range senderHexes {
sender := libcommon.HexToAddress(senderHex)
cfg.TracedSenders[i] = string(sender[:])
}
}
-
+ if ctx.IsSet(TxPoolBlobPriceBumpFlag.Name) {
+ fullCfg.TxPool.BlobPriceBump = ctx.Uint64(TxPoolBlobPriceBumpFlag.Name)
+ }
cfg.CommitEvery = common2.RandomizeDuration(ctx.Duration(TxPoolCommitEveryFlag.Name))
}
@@ -1362,6 +1482,7 @@ func setBorConfig(ctx *cli.Context, cfg *ethconfig.Config) {
cfg.HeimdallURL = ctx.String(HeimdallURLFlag.Name)
cfg.WithoutHeimdall = ctx.Bool(WithoutHeimdallFlag.Name)
cfg.HeimdallgRPCAddress = ctx.String(HeimdallgRPCAddressFlag.Name)
+ cfg.WithHeimdallMilestones = ctx.Bool(WithHeimdallMilestones.Name)
}
func setMiner(ctx *cli.Context, cfg *params.MiningConfig) {
@@ -1372,7 +1493,7 @@ func setMiner(ctx *cli.Context, cfg *params.MiningConfig) {
panic(fmt.Sprintf("Erigon supports only remote miners. Flag --%s or --%s is required", MinerNotifyFlag.Name, MinerSigningKeyFileFlag.Name))
}
if ctx.IsSet(MinerNotifyFlag.Name) {
- cfg.Notify = SplitAndTrim(ctx.String(MinerNotifyFlag.Name))
+ cfg.Notify = libcommon.CliString2Array(ctx.String(MinerNotifyFlag.Name))
}
if ctx.IsSet(MinerExtraDataFlag.Name) {
cfg.ExtraData = []byte(ctx.String(MinerExtraDataFlag.Name))
@@ -1397,7 +1518,7 @@ func setWhitelist(ctx *cli.Context, cfg *ethconfig.Config) {
return
}
cfg.Whitelist = make(map[uint64]libcommon.Hash)
- for _, entry := range SplitAndTrim(whitelist) {
+ for _, entry := range libcommon.CliString2Array(whitelist) {
parts := strings.Split(entry, "=")
if len(parts) != 2 {
Fatalf("Invalid whitelist entry: %s", entry)
@@ -1414,6 +1535,26 @@ func setWhitelist(ctx *cli.Context, cfg *ethconfig.Config) {
}
}
+func setBeaconAPI(ctx *cli.Context, cfg *ethconfig.Config) {
+ cfg.BeaconRouter.Active = ctx.Bool(BeaconAPIFlag.Name)
+ cfg.BeaconRouter.Protocol = ctx.String(BeaconApiProtocolFlag.Name)
+ cfg.BeaconRouter.Address = fmt.Sprintf("%s:%d", ctx.String(BeaconApiAddrFlag.Name), ctx.Int(BeaconApiPortFlag.Name))
+ cfg.BeaconRouter.ReadTimeTimeout = time.Duration(ctx.Uint64(BeaconApiReadTimeoutFlag.Name)) * time.Second
+ cfg.BeaconRouter.WriteTimeout = time.Duration(ctx.Uint64(BeaconApiWriteTimeoutFlag.Name)) * time.Second
+ cfg.BeaconRouter.IdleTimeout = time.Duration(ctx.Uint64(BeaconApiIdleTimeoutFlag.Name)) * time.Second
+}
+
+func setCaplin(ctx *cli.Context, cfg *ethconfig.Config) {
+ cfg.CaplinConfig.Backfilling = ctx.Bool(CaplinBackfillingFlag.Name) || ctx.Bool(CaplinArchiveFlag.Name)
+ cfg.CaplinConfig.Archive = ctx.Bool(CaplinArchiveFlag.Name)
+}
+
+func setSilkworm(ctx *cli.Context, cfg *ethconfig.Config) {
+ cfg.SilkwormExecution = ctx.Bool(SilkwormExecutionFlag.Name)
+ cfg.SilkwormRpcDaemon = ctx.Bool(SilkwormRpcDaemonFlag.Name)
+ cfg.SilkwormSentry = ctx.Bool(SilkwormSentryFlag.Name)
+}
+
// CheckExclusive verifies that only a single instance of the provided flags was
// set by the user. Each flag might optionally be followed by a string type to
// specialize it further.
@@ -1462,6 +1603,7 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.C
cfg.LightClientDiscoveryTCPPort = ctx.Uint64(LightClientDiscoveryTCPPortFlag.Name)
cfg.SentinelAddr = ctx.String(SentinelAddrFlag.Name)
cfg.SentinelPort = ctx.Uint64(SentinelPortFlag.Name)
+ cfg.ForcePartialCommit = ctx.Bool(ForcePartialCommitFlag.Name)
cfg.Sync.UseSnapshots = ethconfig.UseSnapshotsByChainName(ctx.String(ChainFlag.Name))
if ctx.IsSet(SnapshotFlag.Name) { //force override default by cli
@@ -1490,11 +1632,16 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.C
}
logger.Info("torrent verbosity", "level", lvl.LogString())
version := "erigon: " + params.VersionWithCommit(params.GitCommit)
- cfg.Downloader, err = downloadercfg2.New(cfg.Dirs.Snap, version, lvl, downloadRate, uploadRate, ctx.Int(TorrentPortFlag.Name), ctx.Int(TorrentConnsPerFileFlag.Name), ctx.Int(TorrentDownloadSlotsFlag.Name), ctx.StringSlice(TorrentDownloadSlotsFlag.Name))
+ chain := ctx.String(ChainFlag.Name)
+ webseedsList := libcommon.CliString2Array(ctx.String(WebSeedsFlag.Name))
+ if known, ok := snapcfg.KnownWebseeds[chain]; ok {
+ webseedsList = append(webseedsList, known...)
+ }
+ cfg.Downloader, err = downloadercfg2.New(cfg.Dirs, version, lvl, downloadRate, uploadRate, ctx.Int(TorrentPortFlag.Name), ctx.Int(TorrentConnsPerFileFlag.Name), ctx.Int(TorrentDownloadSlotsFlag.Name), ctx.StringSlice(TorrentDownloadSlotsFlag.Name), webseedsList, chain)
if err != nil {
panic(err)
}
- downloadernat.DoNat(nodeConfig.P2P.NAT, cfg.Downloader, logger)
+ downloadernat.DoNat(nodeConfig.P2P.NAT, cfg.Downloader.ClientConfig, logger)
}
nodeConfig.Http.Snap = cfg.Snapshot
@@ -1506,8 +1653,8 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.C
setEtherbase(ctx, cfg)
setGPO(ctx, &cfg.GPO)
- setTxPool(ctx, &cfg.DeprecatedTxPool)
- cfg.TxPool = ethconfig.DefaultTxPool2Config(cfg.DeprecatedTxPool)
+ setTxPool(ctx, cfg)
+ cfg.TxPool = ethconfig.DefaultTxPool2Config(cfg)
cfg.TxPool.DBDir = nodeConfig.Dirs.TxPool
setEthash(ctx, nodeConfig.Dirs.DataDir, cfg)
@@ -1515,9 +1662,11 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.C
setMiner(ctx, &cfg.Miner)
setWhitelist(ctx, cfg)
setBorConfig(ctx, cfg)
+ setSilkworm(ctx, cfg)
+ setBeaconAPI(ctx, cfg)
+ setCaplin(ctx, cfg)
cfg.Ethstats = ctx.String(EthStatsURLFlag.Name)
- cfg.P2PEnabled = len(nodeConfig.P2P.SentryAddr) == 0
cfg.HistoryV3 = ctx.Bool(HistoryV3Flag.Name)
if ctx.IsSet(NetworkIdFlag.Name) {
cfg.NetworkID = ctx.Uint64(NetworkIdFlag.Name)
@@ -1541,7 +1690,7 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.C
if urls == "" {
cfg.EthDiscoveryURLs = []string{}
} else {
- cfg.EthDiscoveryURLs = SplitAndTrim(urls)
+ cfg.EthDiscoveryURLs = libcommon.CliString2Array(urls)
}
}
// Override any default configs for hard coded networks.
@@ -1566,7 +1715,7 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.C
}
case networkname.DevChainName:
if !ctx.IsSet(NetworkIdFlag.Name) {
- cfg.NetworkID = 1337
+ cfg.NetworkID = params.NetworkIDByChainName(chain)
}
// Create new developer account or reuse existing one
@@ -1589,17 +1738,17 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.C
cfg.TxPool.OverrideCancunTime = cfg.OverrideCancunTime
}
- if ctx.IsSet(InternalConsensusFlag.Name) && clparams.EmbeddedEnabledByDefault(cfg.NetworkID) {
+ if ctx.IsSet(InternalConsensusFlag.Name) && clparams.EmbeddedSupported(cfg.NetworkID) {
cfg.InternalCL = ctx.Bool(InternalConsensusFlag.Name)
}
- if ctx.IsSet(SentryDropUselessPeers.Name) {
- cfg.DropUselessPeers = ctx.Bool(SentryDropUselessPeers.Name)
- }
-
if ctx.IsSet(TrustedSetupFile.Name) {
libkzg.SetTrustedSetupFilePath(ctx.String(TrustedSetupFile.Name))
}
+
+ if ctx.IsSet(TxPoolGossipDisableFlag.Name) {
+ cfg.DisableTxPoolGossip = ctx.Bool(TxPoolGossipDisableFlag.Name)
+ }
}
// SetDNSDiscoveryDefaults configures DNS discovery with the given URL if
@@ -1615,7 +1764,7 @@ func SetDNSDiscoveryDefaults(cfg *ethconfig.Config, genesis libcommon.Hash) {
}
func SplitTagsFlag(tagsFlag string) map[string]string {
- tags := SplitAndTrim(tagsFlag)
+ tags := libcommon.CliString2Array(tagsFlag)
tagsMap := map[string]string{}
for _, t := range tags {
@@ -1631,17 +1780,6 @@ func SplitTagsFlag(tagsFlag string) map[string]string {
return tagsMap
}
-// MakeConsolePreloads retrieves the absolute paths for the console JavaScript
-// scripts to preload before starting.
-func MakeConsolePreloads(ctx *cli.Context) []string {
- // Skip preloading if there's nothing to preload
- if ctx.String(PreloadJSFlag.Name) == "" {
- return nil
- }
- // Otherwise resolve absolute paths and return them
- return SplitAndTrim(ctx.String(PreloadJSFlag.Name))
-}
-
func CobraFlags(cmd *cobra.Command, urfaveCliFlagsLists ...[]cli.Flag) {
flags := cmd.PersistentFlags()
for _, urfaveCliFlags := range urfaveCliFlagsLists {
diff --git a/cmd/utils/flags/flags.go b/cmd/utils/flags/flags.go
index 58887f9f8e3..be0a8a396b7 100644
--- a/cmd/utils/flags/flags.go
+++ b/cmd/utils/flags/flags.go
@@ -305,6 +305,10 @@ func (b *bigValue) Set(s string) error {
return nil
}
+func (b *bigValue) Get() any {
+ return b.String()
+}
+
// GlobalBig returns the value of a BigFlag from the global flag set.
func GlobalBig(ctx *cli.Context, name string) *big.Int {
val := ctx.Generic(name)
diff --git a/cmd/verkle/main.go b/cmd/verkle/main.go
index 1a9bc20b4f4..f6d349168d9 100644
--- a/cmd/verkle/main.go
+++ b/cmd/verkle/main.go
@@ -8,6 +8,8 @@ import (
"os"
"time"
+ "github.com/ledgerwatch/erigon/cl/utils"
+
"github.com/c2h5oh/datasize"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/etl"
@@ -16,7 +18,6 @@ import (
"github.com/ledgerwatch/log/v3"
"go.uber.org/zap/buffer"
- "github.com/ledgerwatch/erigon/cl/utils"
"github.com/ledgerwatch/erigon/cmd/verkle/verkletrie"
"github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/core/types/accounts"
@@ -34,17 +35,17 @@ type optionsCfg struct {
const DumpSize = uint64(20000000000)
-func IncrementVerkleTree(cfg optionsCfg, logger log.Logger) error {
+func IncrementVerkleTree(ctx context.Context, cfg optionsCfg, logger log.Logger) error {
start := time.Now()
- db, err := mdbx.Open(cfg.stateDb, log.Root(), true)
+ db, err := openDB(ctx, cfg.stateDb, log.Root(), true)
if err != nil {
logger.Error("Error while opening database", "err", err.Error())
return err
}
defer db.Close()
- vDb, err := mdbx.Open(cfg.verkleDb, log.Root(), false)
+ vDb, err := openDB(ctx, cfg.verkleDb, log.Root(), false)
if err != nil {
logger.Error("Error while opening db transaction", "err", err.Error())
return err
@@ -74,10 +75,10 @@ func IncrementVerkleTree(cfg optionsCfg, logger log.Logger) error {
}
verkleWriter := verkletrie.NewVerkleTreeWriter(vTx, cfg.tmpdir, logger)
defer verkleWriter.Close()
- if err := verkletrie.IncrementAccount(vTx, tx, uint64(cfg.workersCount), verkleWriter, from, to); err != nil {
+ if err := verkletrie.IncrementAccount(vTx, tx, uint64(cfg.workersCount), verkleWriter, from, to, cfg.tmpdir); err != nil {
return err
}
- if _, err := verkletrie.IncrementStorage(vTx, tx, uint64(cfg.workersCount), verkleWriter, from, to); err != nil {
+ if _, err := verkletrie.IncrementStorage(vTx, tx, uint64(cfg.workersCount), verkleWriter, from, to, cfg.tmpdir); err != nil {
return err
}
if err := stages.SaveStageProgress(vTx, stages.VerkleTrie, to); err != nil {
@@ -88,15 +89,15 @@ func IncrementVerkleTree(cfg optionsCfg, logger log.Logger) error {
return vTx.Commit()
}
-func RegeneratePedersenHashstate(cfg optionsCfg, logger log.Logger) error {
- db, err := mdbx.Open(cfg.stateDb, log.Root(), true)
+func RegeneratePedersenHashstate(ctx context.Context, cfg optionsCfg, logger log.Logger) error {
+ db, err := openDB(ctx, cfg.stateDb, log.Root(), true)
if err != nil {
logger.Error("Error while opening database", "err", err.Error())
return err
}
defer db.Close()
- vDb, err := mdbx.Open(cfg.stateDb, log.Root(), false)
+ vDb, err := openDB(ctx, cfg.stateDb, log.Root(), false)
if err != nil {
logger.Error("Error while opening db transaction", "err", err.Error())
return err
@@ -130,16 +131,16 @@ func RegeneratePedersenHashstate(cfg optionsCfg, logger log.Logger) error {
return vTx.Commit()
}
-func GenerateVerkleTree(cfg optionsCfg, logger log.Logger) error {
+func GenerateVerkleTree(ctx context.Context, cfg optionsCfg, logger log.Logger) error {
start := time.Now()
- db, err := mdbx.Open(cfg.stateDb, log.Root(), true)
+ db, err := openDB(ctx, cfg.stateDb, log.Root(), true)
if err != nil {
logger.Error("Error while opening database", "err", err.Error())
return err
}
defer db.Close()
- vDb, err := mdbx.Open(cfg.verkleDb, log.Root(), false)
+ vDb, err := openDB(ctx, cfg.verkleDb, log.Root(), false)
if err != nil {
logger.Error("Error while opening db transaction", "err", err.Error())
return err
@@ -191,8 +192,8 @@ func GenerateVerkleTree(cfg optionsCfg, logger log.Logger) error {
return vTx.Commit()
}
-func analyseOut(cfg optionsCfg, logger log.Logger) error {
- db, err := mdbx.Open(cfg.verkleDb, logger, false)
+func analyseOut(ctx context.Context, cfg optionsCfg, logger log.Logger) error {
+ db, err := openDB(ctx, cfg.verkleDb, logger, false)
if err != nil {
return err
}
@@ -218,8 +219,8 @@ func analyseOut(cfg optionsCfg, logger log.Logger) error {
return nil
}
-func dump(cfg optionsCfg) error {
- db, err := mdbx.Open(cfg.verkleDb, log.Root(), false)
+func dump(ctx context.Context, cfg optionsCfg) error {
+ db, err := openDB(ctx, cfg.verkleDb, log.Root(), false)
if err != nil {
return err
}
@@ -285,8 +286,8 @@ func dump(cfg optionsCfg) error {
return nil
}
-func dump_acc_preimages(cfg optionsCfg) error {
- db, err := mdbx.Open(cfg.stateDb, log.Root(), false)
+func dump_acc_preimages(ctx context.Context, cfg optionsCfg) error {
+ db, err := openDB(ctx, cfg.stateDb, log.Root(), false)
if err != nil {
return err
}
@@ -323,7 +324,7 @@ func dump_acc_preimages(cfg optionsCfg) error {
if _, err := file.Write(k); err != nil {
return err
}
- addressHash := utils.Keccak256(k)
+ addressHash := utils.Sha256(k)
if _, err := file.Write(addressHash[:]); err != nil {
return err
@@ -339,8 +340,8 @@ func dump_acc_preimages(cfg optionsCfg) error {
return nil
}
-func dump_storage_preimages(cfg optionsCfg, logger log.Logger) error {
- db, err := mdbx.Open(cfg.stateDb, logger, false)
+func dump_storage_preimages(ctx context.Context, cfg optionsCfg, logger log.Logger) error {
+ db, err := openDB(ctx, cfg.stateDb, logger, false)
if err != nil {
return err
}
@@ -389,7 +390,7 @@ func dump_storage_preimages(cfg optionsCfg, logger log.Logger) error {
}
currentAddress = libcommon.BytesToAddress(k)
currentIncarnation = acc.Incarnation
- addressHash = utils.Keccak256(currentAddress[:])
+ addressHash = utils.Sha256(currentAddress[:])
} else {
address := libcommon.BytesToAddress(k[:20])
if address != currentAddress {
@@ -398,7 +399,7 @@ func dump_storage_preimages(cfg optionsCfg, logger log.Logger) error {
if binary.BigEndian.Uint64(k[20:]) != currentIncarnation {
continue
}
- storageHash := utils.Keccak256(k[28:])
+ storageHash := utils.Sha256(k[28:])
buf.Write(k[28:])
buf.Write(storageHash[:])
}
@@ -443,35 +444,50 @@ func main() {
}
switch *action {
case "hashstate":
- if err := RegeneratePedersenHashstate(opt, logger); err != nil {
+ if err := RegeneratePedersenHashstate(ctx, opt, logger); err != nil {
logger.Error("Error", "err", err.Error())
}
case "bucketsizes":
- if err := analyseOut(opt, logger); err != nil {
+ if err := analyseOut(ctx, opt, logger); err != nil {
logger.Error("Error", "err", err.Error())
}
case "verkle":
- if err := GenerateVerkleTree(opt, logger); err != nil {
+ if err := GenerateVerkleTree(ctx, opt, logger); err != nil {
logger.Error("Error", "err", err.Error())
}
case "incremental":
- if err := IncrementVerkleTree(opt, logger); err != nil {
+ if err := IncrementVerkleTree(ctx, opt, logger); err != nil {
logger.Error("Error", "err", err.Error())
}
case "dump":
log.Info("Dumping in dump.txt")
- if err := dump(opt); err != nil {
+ if err := dump(ctx, opt); err != nil {
log.Error("Error", "err", err.Error())
}
case "acc_preimages":
- if err := dump_acc_preimages(opt); err != nil {
+ if err := dump_acc_preimages(ctx, opt); err != nil {
logger.Error("Error", "err", err.Error())
}
case "storage_preimages":
- if err := dump_storage_preimages(opt, logger); err != nil {
+ if err := dump_storage_preimages(ctx, opt, logger); err != nil {
logger.Error("Error", "err", err.Error())
}
default:
log.Warn("No valid --action specified, aborting")
}
}
+
+func openDB(ctx context.Context, path string, logger log.Logger, accede bool) (kv.RwDB, error) {
+ var db kv.RwDB
+ var err error
+ opts := mdbx.NewMDBX(logger).Path(path)
+ if accede {
+ opts = opts.Accede()
+ }
+ db, err = opts.Open(ctx)
+
+ if err != nil {
+ return nil, err
+ }
+ return db, nil
+}
diff --git a/cmd/verkle/verkletrie/incrementAccount.go b/cmd/verkle/verkletrie/incrementAccount.go
index b626e7cb91e..e8555217e0a 100644
--- a/cmd/verkle/verkletrie/incrementAccount.go
+++ b/cmd/verkle/verkletrie/incrementAccount.go
@@ -16,7 +16,7 @@ import (
"github.com/ledgerwatch/erigon/core/types/accounts"
)
-func IncrementAccount(vTx kv.RwTx, tx kv.Tx, workers uint64, verkleWriter *VerkleTreeWriter, from, to uint64) error {
+func IncrementAccount(vTx kv.RwTx, tx kv.Tx, workers uint64, verkleWriter *VerkleTreeWriter, from, to uint64, tmpdir string) error {
logInterval := time.NewTicker(30 * time.Second)
logPrefix := "IncrementVerkleAccount"
@@ -59,7 +59,7 @@ func IncrementAccount(vTx kv.RwTx, tx kv.Tx, workers uint64, verkleWriter *Verkl
}
}
}()
- marker := NewVerkleMarker()
+ marker := NewVerkleMarker(tmpdir)
defer marker.Rollback()
for k, v, err := accountCursor.Seek(hexutility.EncodeTs(from)); k != nil; k, v, err = accountCursor.Next() {
diff --git a/cmd/verkle/verkletrie/incrementStorage.go b/cmd/verkle/verkletrie/incrementStorage.go
index 14773c81938..72ee83177c9 100644
--- a/cmd/verkle/verkletrie/incrementStorage.go
+++ b/cmd/verkle/verkletrie/incrementStorage.go
@@ -17,7 +17,7 @@ import (
"github.com/ledgerwatch/erigon/core/rawdb"
)
-func IncrementStorage(vTx kv.RwTx, tx kv.Tx, workers uint64, verkleWriter *VerkleTreeWriter, from, to uint64) (libcommon.Hash, error) {
+func IncrementStorage(vTx kv.RwTx, tx kv.Tx, workers uint64, verkleWriter *VerkleTreeWriter, from, to uint64, tmpdir string) (libcommon.Hash, error) {
logInterval := time.NewTicker(30 * time.Second)
logPrefix := "IncrementVerkleStorage"
@@ -50,7 +50,7 @@ func IncrementStorage(vTx kv.RwTx, tx kv.Tx, workers uint64, verkleWriter *Verkl
}
}
}()
- marker := NewVerkleMarker()
+ marker := NewVerkleMarker(tmpdir)
defer marker.Rollback()
for k, v, err := storageCursor.Seek(hexutility.EncodeTs(from)); k != nil; k, v, err = storageCursor.Next() {
diff --git a/cmd/verkle/verkletrie/pedersen_hashstate.go b/cmd/verkle/verkletrie/pedersen_hashstate.go
index f04561e4017..a56a15ac75b 100644
--- a/cmd/verkle/verkletrie/pedersen_hashstate.go
+++ b/cmd/verkle/verkletrie/pedersen_hashstate.go
@@ -234,7 +234,7 @@ func RegeneratePedersenCode(outTx kv.RwTx, readTx kv.Tx, workers uint64, verkleW
jobs <- ®eneratePedersenCodeJob{
address: libcommon.BytesToAddress(k),
- code: common.CopyBytes(code),
+ code: libcommon.CopyBytes(code),
}
select {
case <-logInterval.C:
diff --git a/cmd/verkle/verkletrie/verkle_marker.go b/cmd/verkle/verkletrie/verkle_marker.go
index 86c6d4afc6d..a979623fefc 100644
--- a/cmd/verkle/verkletrie/verkle_marker.go
+++ b/cmd/verkle/verkletrie/verkle_marker.go
@@ -13,8 +13,8 @@ type VerkleMarker struct {
}
//nolint:gocritic
-func NewVerkleMarker() *VerkleMarker {
- markedSlotsDb, err := mdbx.NewTemporaryMdbx()
+func NewVerkleMarker(tempdir string) *VerkleMarker {
+ markedSlotsDb, err := mdbx.NewTemporaryMdbx(context.TODO(), tempdir)
if err != nil {
panic(err)
}
diff --git a/cmd/verkle/verkletrie/verkle_tree_writer.go b/cmd/verkle/verkletrie/verkle_tree_writer.go
index 08f1acaa282..cd7cde8024a 100644
--- a/cmd/verkle/verkletrie/verkle_tree_writer.go
+++ b/cmd/verkle/verkletrie/verkle_tree_writer.go
@@ -198,7 +198,7 @@ func (v *VerkleTreeWriter) CommitVerkleTreeFromScratch() (libcommon.Hash, error)
if len(val) == 0 {
return next(k, nil, nil)
}
- if err := root.InsertOrdered(common.CopyBytes(k), common.CopyBytes(val), func(node verkle.VerkleNode) {
+ if err := root.InsertOrdered(libcommon.CopyBytes(k), libcommon.CopyBytes(val), func(node verkle.VerkleNode) {
rootHash := node.Commitment().Bytes()
encodedNode, err := node.Serialize()
if err != nil {
@@ -256,7 +256,7 @@ func (v *VerkleTreeWriter) CommitVerkleTree(root libcommon.Hash) (libcommon.Hash
logInterval := time.NewTicker(30 * time.Second)
if err := v.collector.Load(v.db, kv.VerkleTrie, func(key []byte, value []byte, _ etl.CurrentTableReader, next etl.LoadNextFunc) error {
if len(value) > 0 {
- if err := rootNode.Insert(common.CopyBytes(key), common.CopyBytes(value), resolverFunc); err != nil {
+ if err := rootNode.Insert(libcommon.CopyBytes(key), libcommon.CopyBytes(value), resolverFunc); err != nil {
return err
}
insertions++
diff --git a/cmd/verkle/verkletrie/workers.go b/cmd/verkle/verkletrie/workers.go
index 5b3695ba6d5..6be189ae1f6 100644
--- a/cmd/verkle/verkletrie/workers.go
+++ b/cmd/verkle/verkletrie/workers.go
@@ -6,7 +6,6 @@ import (
"github.com/holiman/uint256"
libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/core/types/accounts"
"github.com/ledgerwatch/erigon/turbo/trie/vtree"
)
@@ -149,16 +148,16 @@ func pedersenCodeWorker(ctx context.Context, logPrefix string, in chan *regenera
currentKey := vtree.GetTreeKeyCodeChunk(job.address[:], uint256.NewInt(0))
// Write code chunks
for i := 0; i < len(chunkedCode); i += 32 {
- chunks = append(chunks, common.CopyBytes(chunkedCode[i:i+32]))
+ chunks = append(chunks, libcommon.CopyBytes(chunkedCode[i:i+32]))
if currentKey[31]+offset < currentKey[31] || offsetOverflow {
currentKey = vtree.GetTreeKeyCodeChunk(job.address[:], uint256.NewInt(uint64(i)/32))
- chunkKeys = append(chunkKeys, common.CopyBytes(currentKey))
+ chunkKeys = append(chunkKeys, libcommon.CopyBytes(currentKey))
offset = 1
offsetOverflow = false
} else {
- codeKey := common.CopyBytes(currentKey)
+ codeKey := libcommon.CopyBytes(currentKey)
codeKey[31] += offset
- chunkKeys = append(chunkKeys, common.CopyBytes(codeKey))
+ chunkKeys = append(chunkKeys, libcommon.CopyBytes(codeKey))
offset += 1
// If offset overflows, handle it.
offsetOverflow = offset == 0
@@ -207,8 +206,8 @@ func incrementalAccountWorker(ctx context.Context, logPrefix string, in chan *re
currentKey := vtree.GetTreeKeyCodeChunk(job.address[:], uint256.NewInt(0))
// Write code chunks
for i := 0; i < len(chunkedCode); i += 32 {
- chunks = append(chunks, common.CopyBytes(chunkedCode[i:i+32]))
- codeKey := common.CopyBytes(currentKey)
+ chunks = append(chunks, libcommon.CopyBytes(chunkedCode[i:i+32]))
+ codeKey := libcommon.CopyBytes(currentKey)
if currentKey[31]+offset < currentKey[31] || offsetOverflow {
currentKey = vtree.GetTreeKeyCodeChunk(job.address[:], uint256.NewInt(uint64(i)/32))
chunkKeys = append(chunkKeys, codeKey)
diff --git a/common/bitutil/compress_test.go b/common/bitutil/compress_test.go
index d797498ca3a..ab4bd7ebb74 100644
--- a/common/bitutil/compress_test.go
+++ b/common/bitutil/compress_test.go
@@ -18,12 +18,11 @@ package bitutil
import (
"bytes"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
"math/rand"
"testing"
"github.com/ledgerwatch/log/v3"
-
- "github.com/ledgerwatch/erigon/common/hexutil"
)
// Tests that data bitset encoding and decoding works and is bijective.
diff --git a/common/bytes.go b/common/bytes.go
index 1c7662af796..728ccd0fe23 100644
--- a/common/bytes.go
+++ b/common/bytes.go
@@ -20,6 +20,7 @@ package common
import (
"bytes"
"encoding/hex"
+ "github.com/ledgerwatch/erigon-lib/common"
)
// FromHex returns the bytes represented by the hexadecimal string s.
@@ -31,18 +32,7 @@ func FromHex(s string) []byte {
if len(s)%2 == 1 {
s = "0" + s
}
- return Hex2Bytes(s)
-}
-
-// CopyBytes returns an exact copy of the provided bytes.
-func CopyBytes(b []byte) (copiedBytes []byte) {
- if b == nil {
- return nil
- }
- copiedBytes = make([]byte, len(b))
- copy(copiedBytes, b)
-
- return
+ return common.Hex2Bytes(s)
}
// has0xPrefix validates str begins with '0x' or '0X'.
@@ -85,12 +75,6 @@ func RightPadBytes(slice []byte, l int) []byte {
return padded
}
-// Hex2Bytes returns the bytes represented by the hexadecimal string str.
-func Hex2Bytes(str string) []byte {
- h, _ := hex.DecodeString(str)
- return h
-}
-
// LeftPadBytes zero-pads slice to the left up to length l.
func LeftPadBytes(slice []byte, l int) []byte {
if l <= len(slice) {
diff --git a/common/bytes_test.go b/common/bytes_test.go
index 0e3ec974ee4..723f23fa79c 100644
--- a/common/bytes_test.go
+++ b/common/bytes_test.go
@@ -18,13 +18,14 @@ package common
import (
"bytes"
+ "github.com/ledgerwatch/erigon-lib/common"
"testing"
)
func TestCopyBytes(t *testing.T) {
input := []byte{1, 2, 3, 4}
- v := CopyBytes(input)
+ v := common.CopyBytes(input)
if !bytes.Equal(v, []byte{1, 2, 3, 4}) {
t.Fatal("not equal after copy")
}
diff --git a/common/changeset/storage_changeset_test.go b/common/changeset/storage_changeset_test.go
index 02a5966a1eb..bed671ebbc5 100644
--- a/common/changeset/storage_changeset_test.go
+++ b/common/changeset/storage_changeset_test.go
@@ -3,6 +3,7 @@ package changeset
import (
"bytes"
"fmt"
+ "github.com/ledgerwatch/erigon-lib/kv/dbutils"
"math/rand"
"reflect"
"strconv"
@@ -16,7 +17,6 @@ import (
"github.com/stretchr/testify/require"
"github.com/ledgerwatch/erigon/common"
- "github.com/ledgerwatch/erigon/common/dbutils"
)
const (
@@ -30,7 +30,7 @@ func getDefaultIncarnation() uint64 { return defaultIncarnation }
func getRandomIncarnation() uint64 { return rand.Uint64() }
func hashValueGenerator(j int) []byte {
- val, _ := common.HashData([]byte("val" + strconv.Itoa(j)))
+ val, _ := libcommon.HashData([]byte("val" + strconv.Itoa(j)))
return val.Bytes()
}
@@ -40,7 +40,7 @@ func emptyValueGenerator(j int) []byte {
func getTestDataAtIndex(i, j int, inc uint64) []byte {
address := libcommon.HexToAddress(fmt.Sprintf("0xBe828AD8B538D1D691891F6c725dEdc5989abBc%d", i))
- key, _ := common.HashData([]byte("key" + strconv.Itoa(j)))
+ key, _ := libcommon.HashData([]byte("key" + strconv.Itoa(j)))
return dbutils.PlainGenerateCompositeStorageKey(address.Bytes(), inc, key.Bytes())
}
@@ -317,8 +317,8 @@ func BenchmarkDecodeNewStorage(t *testing.B) {
var err error
for i := 0; i < numOfElements; i++ {
address := []byte("0xa4e69cebbf4f8f3a1c6e493a6983d8a5879d22057a7c73b00e105d7c7e21ef" + strconv.Itoa(i))
- key, _ := common.HashData([]byte("key" + strconv.Itoa(i)))
- val, _ := common.HashData([]byte("val" + strconv.Itoa(i)))
+ key, _ := libcommon.HashData([]byte("key" + strconv.Itoa(i)))
+ val, _ := libcommon.HashData([]byte("val" + strconv.Itoa(i)))
err = ch.Add(dbutils.PlainGenerateCompositeStorageKey(address, rand.Uint64(), key[:]), val.Bytes())
if err != nil {
t.Fatal(err)
@@ -347,8 +347,8 @@ func BenchmarkEncodeNewStorage(t *testing.B) {
var err error
for i := 0; i < numOfElements; i++ {
address := []byte("0xa4e69cebbf4f8f3a1c6e493a6983d8a5879d22057a7c73b00e105d7c7e21ef" + strconv.Itoa(i))
- key, _ := common.HashData([]byte("key" + strconv.Itoa(i)))
- val, _ := common.HashData([]byte("val" + strconv.Itoa(i)))
+ key, _ := libcommon.HashData([]byte("key" + strconv.Itoa(i)))
+ val, _ := libcommon.HashData([]byte("val" + strconv.Itoa(i)))
err = ch.Add(dbutils.PlainGenerateCompositeStorageKey(address, rand.Uint64(), key[:]), val.Bytes())
if err != nil {
t.Fatal(err)
diff --git a/common/hexutil/json_example_test.go b/common/hexutil/json_example_test.go
deleted file mode 100644
index c256b412e39..00000000000
--- a/common/hexutil/json_example_test.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2017 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package hexutil_test
-
-import (
- "encoding/json"
- "fmt"
-
- "github.com/ledgerwatch/erigon-lib/common/hexutility"
-)
-
-type MyType [5]byte
-
-func (v *MyType) UnmarshalText(input []byte) error {
- return hexutility.UnmarshalFixedText("MyType", input, v[:])
-}
-
-func (v MyType) String() string {
- return hexutility.Bytes(v[:]).String()
-}
-
-func ExampleUnmarshalFixedText() {
- var v1, v2 MyType
- fmt.Println("v1 error:", json.Unmarshal([]byte(`"0x01"`), &v1))
- fmt.Println("v2 error:", json.Unmarshal([]byte(`"0x0101010101"`), &v2))
- fmt.Println("v2:", v2)
- // Output:
- // v1 error: hex string has length 2, want 10 for MyType
- // v2 error:
- // v2: 0x0101010101
-}
diff --git a/common/math/big_test.go b/common/math/big_test.go
index c01863024ab..5ef9e13d3ba 100644
--- a/common/math/big_test.go
+++ b/common/math/big_test.go
@@ -19,10 +19,9 @@ package math
import (
"bytes"
"encoding/hex"
+ "github.com/ledgerwatch/erigon-lib/common"
"math/big"
"testing"
-
- "github.com/ledgerwatch/erigon/common"
)
func TestHexOrDecimal256(t *testing.T) {
diff --git a/common/paths/paths.go b/common/paths/paths.go
index 67a4810fcf0..5b2bd1f69be 100644
--- a/common/paths/paths.go
+++ b/common/paths/paths.go
@@ -7,7 +7,7 @@ import (
"runtime"
"strings"
- "github.com/ledgerwatch/erigon/params/networkname"
+ "github.com/ledgerwatch/erigon-lib/chain/networkname"
"github.com/ledgerwatch/log/v3"
)
@@ -82,10 +82,14 @@ func DataDirForNetwork(datadir string, network string) string {
switch network {
case networkname.DevChainName:
return "" // unless explicitly requested, use memory databases
+ case networkname.HoleskyChainName:
+ return networkDataDirCheckingLegacy(datadir, "holesky")
case networkname.GoerliChainName:
return networkDataDirCheckingLegacy(datadir, "goerli")
case networkname.MumbaiChainName:
return networkDataDirCheckingLegacy(datadir, "mumbai")
+ case networkname.AmoyChainName:
+ return networkDataDirCheckingLegacy(datadir, "amoy")
case networkname.BorMainnetChainName:
return networkDataDirCheckingLegacy(datadir, "bor-mainnet")
case networkname.BorDevnetChainName:
diff --git a/common/sorted.go b/common/sorted.go
deleted file mode 100644
index fb2e665971f..00000000000
--- a/common/sorted.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package common
-
-import (
- "golang.org/x/exp/constraints"
- "golang.org/x/exp/slices"
-)
-
-func SortedKeys[K constraints.Ordered, V any](m map[K]V) []K {
- keys := make([]K, len(m))
- i := 0
- for k := range m {
- keys[i] = k
- i++
- }
- slices.Sort(keys)
- return keys
-}
-
-func RemoveDuplicatesFromSorted[T constraints.Ordered](slice []T) []T {
- for i := 1; i < len(slice); i++ {
- if slice[i] == slice[i-1] {
- slice = append(slice[:i], slice[i+1:]...)
- i--
- }
- }
- return slice
-}
diff --git a/common/types.go b/common/types.go
index e296ed887de..21d58434ccd 100644
--- a/common/types.go
+++ b/common/types.go
@@ -22,14 +22,13 @@ import (
"encoding/json"
"errors"
"fmt"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
"reflect"
"strings"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
"github.com/ledgerwatch/erigon-lib/common/length"
-
- "github.com/ledgerwatch/erigon/common/hexutil"
)
// Lengths of hashes and addresses in bytes.
diff --git a/consensus/aura/aura.go b/consensus/aura/aura.go
index 3f298eb8e31..e4dd33313d8 100644
--- a/consensus/aura/aura.go
+++ b/consensus/aura/aura.go
@@ -31,7 +31,6 @@ import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/kv"
- "github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/consensus"
"github.com/ledgerwatch/erigon/consensus/clique"
"github.com/ledgerwatch/erigon/consensus/ethash"
@@ -267,7 +266,7 @@ func NewAuRa(spec *chain.AuRaConfig, db kv.RwDB) (*AuRa, error) {
StepDuration: auraParams.StepDurations[0],
}
durations = append(durations, durInfo)
- times := common.SortedKeys(auraParams.StepDurations)
+ times := libcommon.SortedKeys(auraParams.StepDurations)
for i := 1; i < len(auraParams.StepDurations); i++ { // skip first
time := times[i]
dur := auraParams.StepDurations[time]
@@ -631,7 +630,9 @@ func (c *AuRa) Prepare(chain consensus.ChainHeaderReader, header *types.Header,
//return nil
}
-func (c *AuRa) Initialize(config *chain.Config, chain consensus.ChainHeaderReader, header *types.Header, state *state.IntraBlockState, txs []types.Transaction, uncles []*types.Header, syscallCustom consensus.SysCallCustom) {
+func (c *AuRa) Initialize(config *chain.Config, chain consensus.ChainHeaderReader, header *types.Header,
+ state *state.IntraBlockState, syscallCustom consensus.SysCallCustom, logger log.Logger,
+) {
blockNum := header.Number.Uint64()
//Check block gas limit from smart contract, if applicable
@@ -669,7 +670,7 @@ func (c *AuRa) Initialize(config *chain.Config, chain consensus.ChainHeaderReade
epoch, err := c.e.GetEpoch(header.ParentHash, blockNum-1)
if err != nil {
- log.Warn("[aura] initialize block: on epoch begin", "err", err)
+ logger.Warn("[aura] initialize block: on epoch begin", "err", err)
return
}
isEpochBegin := epoch != nil
@@ -678,7 +679,7 @@ func (c *AuRa) Initialize(config *chain.Config, chain consensus.ChainHeaderReade
}
err = c.cfg.Validators.onEpochBegin(isEpochBegin, header, syscall)
if err != nil {
- log.Warn("[aura] initialize block: on epoch begin", "err", err)
+ logger.Warn("[aura] initialize block: on epoch begin", "err", err)
return
}
// check_and_lock_block -> check_epoch_end_signal END (before enact)
@@ -699,7 +700,7 @@ func (c *AuRa) applyRewards(header *types.Header, state *state.IntraBlockState,
// word `signal epoch` == word `pending epoch`
func (c *AuRa) Finalize(config *chain.Config, header *types.Header, state *state.IntraBlockState, txs types.Transactions,
uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal,
- chain consensus.ChainHeaderReader, syscall consensus.SystemCall, logger log.Logger,
+ chain consensus.ChainReader, syscall consensus.SystemCall, logger log.Logger,
) (types.Transactions, types.Receipts, error) {
if err := c.applyRewards(header, state, syscall); err != nil {
return nil, nil, err
@@ -731,7 +732,7 @@ func (c *AuRa) Finalize(config *chain.Config, header *types.Header, state *state
}
if epochEndProof != nil {
c.EpochManager.noteNewEpoch()
- log.Info("[aura] epoch transition", "block_num", header.Number.Uint64())
+ logger.Info("[aura] epoch transition", "block_num", header.Number.Uint64())
if err := c.e.PutEpoch(header.Hash(), header.Number.Uint64(), epochEndProof); err != nil {
return nil, nil, err
}
@@ -837,7 +838,7 @@ func allHeadersUntil(chain consensus.ChainHeaderReader, from *types.Header, to l
//}
// FinalizeAndAssemble implements consensus.Engine
-func (c *AuRa) FinalizeAndAssemble(config *chain.Config, header *types.Header, state *state.IntraBlockState, txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, chain consensus.ChainHeaderReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger) (*types.Block, types.Transactions, types.Receipts, error) {
+func (c *AuRa) FinalizeAndAssemble(config *chain.Config, header *types.Header, state *state.IntraBlockState, txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, chain consensus.ChainReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger) (*types.Block, types.Transactions, types.Receipts, error) {
outTxs, outReceipts, err := c.Finalize(config, header, state, txs, uncles, receipts, withdrawals, chain, syscall, logger)
if err != nil {
return nil, nil, nil, err
diff --git a/consensus/aura/aura_test.go b/consensus/aura/aura_test.go
index 537f36a3d07..1772905f68a 100644
--- a/consensus/aura/aura_test.go
+++ b/consensus/aura/aura_test.go
@@ -58,7 +58,7 @@ func TestEmptyBlock(t *testing.T) {
blocks[0] = block
chain := &core.ChainPack{Headers: headers, Blocks: blocks, Receipts: receipts, TopBlock: block}
- err = m.InsertChain(chain, nil)
+ err = m.InsertChain(chain)
require.NoError(err)
}
@@ -105,18 +105,18 @@ func TestAuRaSkipGasLimit(t *testing.T) {
return fakeVal, err
}
require.NotPanics(func() {
- m.Engine.Initialize(chainConfig, &core.FakeChainReader{}, validPreMergeHeader, nil, nil, nil, syscallCustom)
+ m.Engine.Initialize(chainConfig, &core.FakeChainReader{}, validPreMergeHeader, nil, syscallCustom, nil)
})
invalidPreMergeHeader := validPreMergeHeader
invalidPreMergeHeader.GasLimit = 12_123456 //a different, wrong gasLimit
require.Panics(func() {
- m.Engine.Initialize(chainConfig, &core.FakeChainReader{}, invalidPreMergeHeader, nil, nil, nil, syscallCustom)
+ m.Engine.Initialize(chainConfig, &core.FakeChainReader{}, invalidPreMergeHeader, nil, syscallCustom, nil)
})
invalidPostMergeHeader := invalidPreMergeHeader
invalidPostMergeHeader.Difficulty = big.NewInt(0) //zero difficulty detected as PoS
require.NotPanics(func() {
- m.Engine.Initialize(chainConfig, &core.FakeChainReader{}, invalidPostMergeHeader, nil, nil, nil, syscallCustom)
+ m.Engine.Initialize(chainConfig, &core.FakeChainReader{}, invalidPostMergeHeader, nil, syscallCustom, nil)
})
}
diff --git a/consensus/aura/contract_abi.go b/consensus/aura/contract_abi.go
index 7f806f515d8..4fff77a2df3 100644
--- a/consensus/aura/contract_abi.go
+++ b/consensus/aura/contract_abi.go
@@ -9,7 +9,6 @@ import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/accounts/abi"
- "github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/consensus"
"github.com/ledgerwatch/erigon/consensus/aura/contracts"
)
@@ -112,7 +111,7 @@ func withdrawalAbi() abi.ABI {
}
func getCertifier(registrar libcommon.Address, syscall consensus.SystemCall) *libcommon.Address {
- hashedKey, err := common.HashData([]byte("service_transaction_checker"))
+ hashedKey, err := libcommon.HashData([]byte("service_transaction_checker"))
if err != nil {
panic(err)
}
diff --git a/consensus/bor/api.go b/consensus/bor/api.go
deleted file mode 100644
index bb8c933c12a..00000000000
--- a/consensus/bor/api.go
+++ /dev/null
@@ -1,301 +0,0 @@
-package bor
-
-import (
- "encoding/hex"
- "math"
- "math/big"
- "sort"
- "strconv"
- "sync"
-
- "github.com/xsleonard/go-merkle"
- "golang.org/x/crypto/sha3"
-
- lru "github.com/hashicorp/golang-lru/arc/v2"
- "github.com/ledgerwatch/erigon-lib/common"
-
- "github.com/ledgerwatch/erigon/consensus"
- "github.com/ledgerwatch/erigon/consensus/bor/valset"
- "github.com/ledgerwatch/erigon/core/types"
- "github.com/ledgerwatch/erigon/crypto"
- "github.com/ledgerwatch/erigon/rpc"
-)
-
-var (
- // MaxCheckpointLength is the maximum number of blocks that can be requested for constructing a checkpoint root hash
- MaxCheckpointLength = uint64(math.Pow(2, 15))
-)
-
-// API is a user facing RPC API to allow controlling the signer and voting
-// mechanisms of the proof-of-authority scheme.
-type API struct {
- chain consensus.ChainHeaderReader
- bor *Bor
- rootHashCache *lru.ARCCache[string, string]
-}
-
-// GetSnapshot retrieves the state snapshot at a given block.
-func (api *API) GetSnapshot(number *rpc.BlockNumber) (*Snapshot, error) {
- // Retrieve the requested block number (or current if none requested)
- var header *types.Header
- if number == nil || *number == rpc.LatestBlockNumber {
- header = api.chain.CurrentHeader()
- } else {
- header = api.chain.GetHeaderByNumber(uint64(number.Int64()))
- }
- // Ensure we have an actually valid block and return its snapshot
- if header == nil {
- return nil, errUnknownBlock
- }
-
- return api.bor.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil)
-}
-
-type BlockSigners struct {
- Signers []difficultiesKV
- Diff int
- Author common.Address
-}
-
-type difficultiesKV struct {
- Signer common.Address
- Difficulty uint64
-}
-
-func rankMapDifficulties(values map[common.Address]uint64) []difficultiesKV {
- ss := make([]difficultiesKV, 0, len(values))
- for k, v := range values {
- ss = append(ss, difficultiesKV{k, v})
- }
-
- sort.Slice(ss, func(i, j int) bool {
- return ss[i].Difficulty > ss[j].Difficulty
- })
-
- return ss
-}
-
-// GetSnapshotProposerSequence retrieves the in-turn signers of all sprints in a span
-func (api *API) GetSnapshotProposerSequence(number *rpc.BlockNumber) (BlockSigners, error) {
- snapNumber := *number - 1
-
- var difficulties = make(map[common.Address]uint64)
-
- snap, err := api.GetSnapshot(&snapNumber)
-
- if err != nil {
- return BlockSigners{}, err
- }
-
- proposer := snap.ValidatorSet.GetProposer().Address
- proposerIndex, _ := snap.ValidatorSet.GetByAddress(proposer)
-
- signers := snap.signers()
- for i := 0; i < len(signers); i++ {
- tempIndex := i
- if tempIndex < proposerIndex {
- tempIndex = tempIndex + len(signers)
- }
-
- difficulties[signers[i]] = uint64(len(signers) - (tempIndex - proposerIndex))
- }
-
- rankedDifficulties := rankMapDifficulties(difficulties)
-
- author, err := api.GetAuthor(number)
- if err != nil {
- return BlockSigners{}, err
- }
-
- diff := int(difficulties[*author])
- blockSigners := &BlockSigners{
- Signers: rankedDifficulties,
- Diff: diff,
- Author: *author,
- }
-
- return *blockSigners, nil
-}
-
-// GetSnapshotProposer retrieves the in-turn signer at a given block.
-func (api *API) GetSnapshotProposer(number *rpc.BlockNumber) (common.Address, error) {
- *number -= 1
- snap, err := api.GetSnapshot(number)
-
- if err != nil {
- return common.Address{}, err
- }
-
- return snap.ValidatorSet.GetProposer().Address, nil
-}
-
-// GetAuthor retrieves the author a block.
-func (api *API) GetAuthor(number *rpc.BlockNumber) (*common.Address, error) {
- // Retrieve the requested block number (or current if none requested)
- var header *types.Header
- if number == nil || *number == rpc.LatestBlockNumber {
- header = api.chain.CurrentHeader()
- } else {
- header = api.chain.GetHeaderByNumber(uint64(number.Int64()))
- }
- // Ensure we have an actually valid block and return its snapshot
- if header == nil {
- return nil, errUnknownBlock
- }
-
- author, err := api.bor.Author(header)
-
- return &author, err
-}
-
-// GetSnapshotAtHash retrieves the state snapshot at a given block.
-func (api *API) GetSnapshotAtHash(hash common.Hash) (*Snapshot, error) {
- header := api.chain.GetHeaderByHash(hash)
- if header == nil {
- return nil, errUnknownBlock
- }
-
- return api.bor.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil)
-}
-
-// GetSigners retrieves the list of authorized signers at the specified block.
-func (api *API) GetSigners(number *rpc.BlockNumber) ([]common.Address, error) {
- // Retrieve the requested block number (or current if none requested)
- var header *types.Header
- if number == nil || *number == rpc.LatestBlockNumber {
- header = api.chain.CurrentHeader()
- } else {
- header = api.chain.GetHeaderByNumber(uint64(number.Int64()))
- }
- // Ensure we have an actually valid block and return the signers from its snapshot
- if header == nil {
- return nil, errUnknownBlock
- }
-
- snap, err := api.bor.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil)
-
- if err != nil {
- return nil, err
- }
-
- return snap.signers(), nil
-}
-
-// GetSignersAtHash retrieves the list of authorized signers at the specified block.
-func (api *API) GetSignersAtHash(hash common.Hash) ([]common.Address, error) {
- header := api.chain.GetHeaderByHash(hash)
- if header == nil {
- return nil, errUnknownBlock
- }
-
- snap, err := api.bor.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil)
-
- if err != nil {
- return nil, err
- }
-
- return snap.signers(), nil
-}
-
-// GetCurrentProposer gets the current proposer
-func (api *API) GetCurrentProposer() (common.Address, error) {
- snap, err := api.GetSnapshot(nil)
- if err != nil {
- return common.Address{}, err
- }
-
- return snap.ValidatorSet.GetProposer().Address, nil
-}
-
-// GetCurrentValidators gets the current validators
-func (api *API) GetCurrentValidators() ([]*valset.Validator, error) {
- snap, err := api.GetSnapshot(nil)
- if err != nil {
- return make([]*valset.Validator, 0), err
- }
-
- return snap.ValidatorSet.Validators, nil
-}
-
-// GetRootHash returns the merkle root of the start to end block headers
-func (api *API) GetRootHash(start uint64, end uint64) (string, error) {
- if err := api.initializeRootHashCache(); err != nil {
- return "", err
- }
-
- key := getRootHashKey(start, end)
-
- if root, known := api.rootHashCache.Get(key); known {
- return root, nil
- }
-
- length := end - start + 1
-
- if length > MaxCheckpointLength {
- return "", &MaxCheckpointLengthExceededError{start, end}
- }
-
- currentHeaderNumber := api.chain.CurrentHeader().Number.Uint64()
-
- if start > end || end > currentHeaderNumber {
- return "", &valset.InvalidStartEndBlockError{Start: start, End: end, CurrentHeader: currentHeaderNumber}
- }
-
- blockHeaders := make([]*types.Header, end-start+1)
- wg := new(sync.WaitGroup)
- concurrent := make(chan bool, 20)
-
- for i := start; i <= end; i++ {
- wg.Add(1)
- concurrent <- true
-
- go func(number uint64) {
- blockHeaders[number-start] = api.chain.GetHeaderByNumber(number)
-
- <-concurrent
- wg.Done()
- }(i)
- }
- wg.Wait()
- close(concurrent)
-
- headers := make([][32]byte, NextPowerOfTwo(length))
-
- for i := 0; i < len(blockHeaders); i++ {
- blockHeader := blockHeaders[i]
- header := crypto.Keccak256(AppendBytes32(
- blockHeader.Number.Bytes(),
- new(big.Int).SetUint64(blockHeader.Time).Bytes(),
- blockHeader.TxHash.Bytes(),
- blockHeader.ReceiptHash.Bytes(),
- ))
-
- var arr [32]byte
-
- copy(arr[:], header)
- headers[i] = arr
- }
-
- tree := merkle.NewTreeWithOpts(merkle.TreeOptions{EnableHashSorting: false, DisableHashLeaves: true})
- if err := tree.Generate(Convert(headers), sha3.NewLegacyKeccak256()); err != nil {
- return "", err
- }
-
- root := hex.EncodeToString(tree.Root().Hash)
- api.rootHashCache.Add(key, root)
-
- return root, nil
-}
-
-func (api *API) initializeRootHashCache() error {
- var err error
- if api.rootHashCache == nil {
- api.rootHashCache, err = lru.NewARC[string, string](10)
- }
-
- return err
-}
-
-func getRootHashKey(start uint64, end uint64) string {
- return strconv.FormatUint(start, 10) + "-" + strconv.FormatUint(end, 10)
-}
diff --git a/consensus/bor/bor.go b/consensus/bor/bor.go
index ba5ad331fbf..fe913ed779e 100644
--- a/consensus/bor/bor.go
+++ b/consensus/bor/bor.go
@@ -3,10 +3,12 @@ package bor
import (
"bytes"
"context"
+ "encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
+ "math"
"math/big"
"sort"
"strconv"
@@ -14,9 +16,11 @@ import (
"sync/atomic"
"time"
- "github.com/google/btree"
lru "github.com/hashicorp/golang-lru/arc/v2"
"github.com/ledgerwatch/log/v3"
+ "github.com/xsleonard/go-merkle"
+ "golang.org/x/crypto/sha3"
+ "golang.org/x/sync/errgroup"
"github.com/ledgerwatch/erigon-lib/chain"
libcommon "github.com/ledgerwatch/erigon-lib/common"
@@ -25,18 +29,25 @@ import (
"github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/consensus"
- "github.com/ledgerwatch/erigon/consensus/bor/clerk"
+ "github.com/ledgerwatch/erigon/consensus/bor/finality"
+ "github.com/ledgerwatch/erigon/consensus/bor/finality/flags"
+ "github.com/ledgerwatch/erigon/consensus/bor/finality/whitelist"
+ "github.com/ledgerwatch/erigon/consensus/bor/heimdall"
"github.com/ledgerwatch/erigon/consensus/bor/heimdall/span"
"github.com/ledgerwatch/erigon/consensus/bor/statefull"
"github.com/ledgerwatch/erigon/consensus/bor/valset"
"github.com/ledgerwatch/erigon/consensus/misc"
+ "github.com/ledgerwatch/erigon/core/rawdb"
"github.com/ledgerwatch/erigon/core/state"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/core/types/accounts"
"github.com/ledgerwatch/erigon/crypto"
"github.com/ledgerwatch/erigon/crypto/cryptopool"
+ "github.com/ledgerwatch/erigon/eth/ethconfig/estimate"
+ "github.com/ledgerwatch/erigon/params"
"github.com/ledgerwatch/erigon/rlp"
"github.com/ledgerwatch/erigon/rpc"
+ "github.com/ledgerwatch/erigon/turbo/services"
)
const (
@@ -57,16 +68,15 @@ var (
"0": 64,
} // Default number of blocks after which to checkpoint and reset the pending votes
- extraVanity = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity
- extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal
-
uncleHash = types.CalcUncleHash(nil) // Always Keccak256(RLP([])) as uncles are meaningless outside of PoW.
// diffInTurn = big.NewInt(2) // Block difficulty for in-turn signatures
// diffNoTurn = big.NewInt(1) // Block difficulty for out-of-turn signatures
validatorHeaderBytesLength = length.Addr + 20 // address + power
- // systemAddress = libcommon.HexToAddress("0xffffFFFfFFffffffffffffffFfFFFfffFFFfFFfE")
+
+ // MaxCheckpointLength is the maximum number of blocks that can be requested for constructing a checkpoint root hash
+ MaxCheckpointLength = uint64(math.Pow(2, 15))
)
// Various error messages to mark blocks invalid. These should be private to
@@ -104,7 +114,7 @@ var (
// errInvalidSpanValidators is returned if a block contains an
// invalid list of validators (i.e. non divisible by 40 bytes).
- errInvalidSpanValidators = errors.New("invalid validator list on sprint end block")
+ ErrInvalidSpanValidators = errors.New("invalid validator list on sprint end block")
// errInvalidMixDigest is returned if a block's mix digest is non-zero.
errInvalidMixDigest = errors.New("non-zero mix digest")
@@ -125,6 +135,8 @@ var (
errUncleDetected = errors.New("uncles not allowed")
errUnknownValidators = errors.New("unknown validators")
+
+ errUnknownSnapshot = errors.New("unknown snapshot")
)
// SignerFn is a signer callback function to request a header to be signed by a
@@ -132,18 +144,18 @@ var (
type SignerFn func(signer libcommon.Address, mimeType string, message []byte) ([]byte, error)
// ecrecover extracts the Ethereum account address from a signed header.
-func ecrecover(header *types.Header, sigcache *lru.ARCCache[libcommon.Hash, libcommon.Address], c *chain.BorConfig) (libcommon.Address, error) {
+func Ecrecover(header *types.Header, sigcache *lru.ARCCache[libcommon.Hash, libcommon.Address], c *chain.BorConfig) (libcommon.Address, error) {
// If the signature's already cached, return that
hash := header.Hash()
if address, known := sigcache.Get(hash); known {
return address, nil
}
// Retrieve the signature from the header extra-data
- if len(header.Extra) < extraSeal {
+ if len(header.Extra) < types.ExtraSealLength {
return libcommon.Address{}, errMissingSignature
}
- signature := header.Extra[len(header.Extra)-extraSeal:]
+ signature := header.Extra[len(header.Extra)-types.ExtraSealLength:]
// Recover the public key and the Ethereum address
pubkey, err := crypto.Ecrecover(SealHash(header, c).Bytes(), signature)
@@ -234,9 +246,10 @@ type Bor struct {
chainConfig *chain.Config // Chain config
config *chain.BorConfig // Consensus engine configuration parameters for bor consensus
DB kv.RwDB // Database to store and retrieve snapshot checkpoints
+ blockReader services.FullBlockReader
- recents *lru.ARCCache[libcommon.Hash, *Snapshot] // Snapshots for recent block to speed up reorgs
- signatures *lru.ARCCache[libcommon.Hash, libcommon.Address] // Signatures of recent blocks to speed up mining
+ Recents *lru.ARCCache[libcommon.Hash, *Snapshot] // Snapshots for recent block to speed up reorgs
+ Signatures *lru.ARCCache[libcommon.Hash, libcommon.Address] // Signatures of recent blocks to speed up mining
authorizedSigner atomic.Pointer[signer] // Ethereum address and sign function of the signing key
@@ -244,15 +257,18 @@ type Bor struct {
spanner Spanner
GenesisContractsClient GenesisContract
- HeimdallClient IHeimdallClient
+ HeimdallClient heimdall.IHeimdallClient
// scope event.SubscriptionScope
// The fields below are for testing only
- fakeDiff bool // Skip difficulty verifications
- spanCache *btree.BTree
-
- closeOnce sync.Once
- logger log.Logger
+ fakeDiff bool // Skip difficulty verifications
+
+ closeOnce sync.Once
+ logger log.Logger
+ closeCh chan struct{} // Channel to signal the background processes to exit
+ frozenSnapshotsInit sync.Once
+ rootHashCache *lru.ARCCache[string, string]
+ headerProgress HeaderProgress
}
type signer struct {
@@ -353,8 +369,9 @@ func CalculateSprint(config *chain.BorConfig, number uint64) uint64 {
func New(
chainConfig *chain.Config,
db kv.RwDB,
+ blockReader services.FullBlockReader,
spanner Spanner,
- heimdallClient IHeimdallClient,
+ heimdallClient heimdall.IHeimdallClient,
genesisContracts GenesisContract,
logger log.Logger,
) *Bor {
@@ -369,18 +386,20 @@ func New(
// Allocate the snapshot caches and create the engine
recents, _ := lru.NewARC[libcommon.Hash, *Snapshot](inmemorySnapshots)
signatures, _ := lru.NewARC[libcommon.Hash, libcommon.Address](inmemorySignatures)
+
c := &Bor{
chainConfig: chainConfig,
config: borConfig,
DB: db,
- recents: recents,
- signatures: signatures,
+ blockReader: blockReader,
+ Recents: recents,
+ Signatures: signatures,
spanner: spanner,
GenesisContractsClient: genesisContracts,
HeimdallClient: heimdallClient,
- spanCache: btree.New(32),
execCtx: context.Background(),
logger: logger,
+ closeCh: make(chan struct{}),
}
c.authorizedSigner.Store(&signer{
@@ -393,7 +412,7 @@ func New(
// make sure we can decode all the GenesisAlloc in the BorConfig.
for key, genesisAlloc := range c.config.BlockAlloc {
- if _, err := decodeGenesisAlloc(genesisAlloc); err != nil {
+ if _, err := types.DecodeGenesisAlloc(genesisAlloc); err != nil {
panic(fmt.Sprintf("BUG: Block alloc '%s' in genesis is not correct: %v", key, err))
}
}
@@ -401,22 +420,76 @@ func New(
return c
}
+type rwWrapper struct {
+ kv.RoDB
+}
+
+func (w rwWrapper) Update(ctx context.Context, f func(tx kv.RwTx) error) error {
+ return fmt.Errorf("Update not implemented")
+}
+
+func (w rwWrapper) UpdateNosync(ctx context.Context, f func(tx kv.RwTx) error) error {
+ return fmt.Errorf("UpdateNosync not implemented")
+}
+
+func (w rwWrapper) BeginRw(ctx context.Context) (kv.RwTx, error) {
+ return nil, fmt.Errorf("BeginRw not implemented")
+}
+
+func (w rwWrapper) BeginRwNosync(ctx context.Context) (kv.RwTx, error) {
+ return nil, fmt.Errorf("BeginRwNosync not implemented")
+}
+
+// This is used by the rpcdaemon and tests which need read only access to the provided data services
+func NewRo(chainConfig *chain.Config, db kv.RoDB, blockReader services.FullBlockReader, spanner Spanner,
+ genesisContracts GenesisContract, logger log.Logger) *Bor {
+ // get bor config
+ borConfig := chainConfig.Bor
+
+ // Set any missing consensus parameters to their defaults
+ if borConfig != nil && borConfig.CalculateSprint(0) == 0 {
+ borConfig.Sprint = defaultSprintLength
+ }
+
+ recents, _ := lru.NewARC[libcommon.Hash, *Snapshot](inmemorySnapshots)
+ signatures, _ := lru.NewARC[libcommon.Hash, libcommon.Address](inmemorySignatures)
+
+ return &Bor{
+ chainConfig: chainConfig,
+ config: borConfig,
+ DB: rwWrapper{db},
+ blockReader: blockReader,
+ logger: logger,
+ Recents: recents,
+ Signatures: signatures,
+ execCtx: context.Background(),
+ closeCh: make(chan struct{}),
+ }
+}
+
// Type returns underlying consensus engine
func (c *Bor) Type() chain.ConsensusName {
return chain.BorConsensus
}
+type HeaderProgress interface {
+ Progress() uint64
+}
+
+func (c *Bor) HeaderProgress(p HeaderProgress) {
+ c.headerProgress = p
+}
+
// Author implements consensus.Engine, returning the Ethereum address recovered
// from the signature in the header's extra-data section.
// This is thread-safe (only access the header and config (which is never updated),
// as well as signatures, which are lru.ARCCache, which is thread-safe)
func (c *Bor) Author(header *types.Header) (libcommon.Address, error) {
- return ecrecover(header, c.signatures, c.config)
+ return Ecrecover(header, c.Signatures, c.config)
}
// VerifyHeader checks whether a header conforms to the consensus rules.
func (c *Bor) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error {
-
return c.verifyHeader(chain, header, nil)
}
@@ -458,7 +531,7 @@ func (c *Bor) verifyHeader(chain consensus.ChainHeaderReader, header *types.Head
return consensus.ErrFutureBlock
}
- if err := validateHeaderExtraField(header.Extra); err != nil {
+ if err := ValidateHeaderExtraField(header.Extra); err != nil {
return err
}
@@ -466,13 +539,13 @@ func (c *Bor) verifyHeader(chain consensus.ChainHeaderReader, header *types.Head
isSprintEnd := isSprintStart(number+1, c.config.CalculateSprint(number))
// Ensure that the extra-data contains a signer list on checkpoint, but none otherwise
- signersBytes := len(header.Extra) - extraVanity - extraSeal
+ signersBytes := len(GetValidatorBytes(header, c.config))
if !isSprintEnd && signersBytes != 0 {
return errExtraValidators
}
if isSprintEnd && signersBytes%validatorHeaderBytesLength != 0 {
- return errInvalidSpanValidators
+ return ErrInvalidSpanValidators
}
// Ensure that the mix digest is zero as we don't have fork protection currently
@@ -493,24 +566,26 @@ func (c *Bor) verifyHeader(chain consensus.ChainHeaderReader, header *types.Head
}
// Verify that the gas limit is <= 2^63-1
- gasCap := uint64(0x7fffffffffffffff)
+ if header.GasLimit > params.MaxGasLimit {
+ return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, params.MaxGasLimit)
+ }
- if header.GasLimit > gasCap {
- return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, gasCap)
+ if header.WithdrawalsHash != nil {
+ return consensus.ErrUnexpectedWithdrawals
}
// All basic checks passed, verify cascading fields
return c.verifyCascadingFields(chain, header, parents)
}
-// validateHeaderExtraField validates that the extra-data contains both the vanity and signature.
+// ValidateHeaderExtraField validates that the extra-data contains both the vanity and signature.
// header.Extra = header.Vanity + header.ProducerBytes (optional) + header.Seal
-func validateHeaderExtraField(extraBytes []byte) error {
- if len(extraBytes) < extraVanity {
+func ValidateHeaderExtraField(extraBytes []byte) error {
+ if len(extraBytes) < types.ExtraVanityLength {
return errMissingVanity
}
- if len(extraBytes) < extraVanity+extraSeal {
+ if len(extraBytes) < types.ExtraVanityLength+types.ExtraSealLength {
return errMissingSignature
}
@@ -560,74 +635,85 @@ func (c *Bor) verifyCascadingFields(chain consensus.ChainHeaderReader, header *t
return err
}
- if header.WithdrawalsHash != nil {
- return consensus.ErrUnexpectedWithdrawals
- }
-
if parent.Time+c.config.CalculatePeriod(number) > header.Time {
return ErrInvalidTimestamp
}
+ return nil
+}
- sprintLength := c.config.CalculateSprint(number)
+func (c *Bor) initFrozenSnapshot(chain consensus.ChainHeaderReader, number uint64, logEvery *time.Ticker) (snap *Snapshot, err error) {
+ c.logger.Info("Initializing frozen snapshots to", "number", number)
+ defer func() {
+ c.logger.Info("Done initializing frozen snapshots to", "number", number, "err", err)
+ }()
- // Verify the validator list match the local contract
- //
- // Note: Here we fetch the data from span instead of contract
- // as done in bor client. The contract (validator set) returns
- // a fixed span for 0th span i.e. 0 - 255 blocks. Hence, the
- // contract data and span data won't match for that. Skip validating
- // for 0th span. TODO: Remove `number > zerothSpanEnd` check
- // once we start fetching validator data from contract.
- if number > zerothSpanEnd && isSprintStart(number+1, sprintLength) {
- producerSet, err := c.spanner.GetCurrentProducers(number+1, c.authorizedSigner.Load().signer, c.getSpanForBlock)
+ // Special handling of the headers in the snapshot
+ zeroHeader := chain.GetHeaderByNumber(0)
- if err != nil {
- return err
- }
+ if zeroHeader != nil {
+ // get checkpoint data
+ hash := zeroHeader.Hash()
- sort.Sort(valset.ValidatorsByAddress(producerSet))
+ // get validators and current span
+ var validators []*valset.Validator
- headerVals, err := valset.ParseValidators(header.Extra[extraVanity : len(header.Extra)-extraSeal])
+ validators, err = c.spanner.GetCurrentValidators(0, c.authorizedSigner.Load().signer, chain)
if err != nil {
- return err
+ return nil, err
}
- if len(producerSet) != len(headerVals) {
- return errInvalidSpanValidators
+ // new snap shot
+ snap = NewSnapshot(c.config, c.Signatures, 0, hash, validators, c.logger)
+
+ if err = snap.Store(c.DB); err != nil {
+ return nil, err
}
- for i, val := range producerSet {
- if !bytes.Equal(val.HeaderBytes(), headerVals[i].HeaderBytes()) {
- return errInvalidSpanValidators
+ c.logger.Info("Stored proposer snapshot to disk", "number", 0, "hash", hash)
+
+ g := errgroup.Group{}
+ g.SetLimit(estimate.AlmostAllCPUs())
+ defer g.Wait()
+
+ batchSize := 128 // must be < inmemorySignatures
+ initialHeaders := make([]*types.Header, 0, batchSize)
+
+ for i := uint64(1); i <= number; i++ {
+ header := chain.GetHeaderByNumber(i)
+ {
+ // `snap.apply` bottleneck - is recover of signer.
+ // to speedup: recover signer in background goroutines and save in `sigcache`
+ // `batchSize` < `inmemorySignatures`: means all current batch will fit in cache - and `snap.apply` will find it there.
+ snap := snap
+ g.Go(func() error {
+ _, _ = Ecrecover(header, snap.sigcache, snap.config)
+ return nil
+ })
}
- }
- }
- snap, err := c.snapshot(chain, number-1, header.ParentHash, parents)
- if err != nil {
- return err
- }
+ initialHeaders = append(initialHeaders, header)
+ if len(initialHeaders) == cap(initialHeaders) {
+ snap, err = snap.Apply(nil, initialHeaders, c.logger)
- // verify the validator list in the last sprint block
- if isSprintStart(number, sprintLength) {
- // Retrieve the snapshot needed to verify this header and cache it
- parentValidatorBytes := parent.Extra[extraVanity : len(parent.Extra)-extraSeal]
- validatorsBytes := make([]byte, len(snap.ValidatorSet.Validators)*validatorHeaderBytesLength)
+ if err != nil {
+ return nil, err
+ }
- currentValidators := snap.ValidatorSet.Copy().Validators
- // sort validator by address
- sort.Sort(valset.ValidatorsByAddress(currentValidators))
- for i, validator := range currentValidators {
- copy(validatorsBytes[i*validatorHeaderBytesLength:], validator.HeaderBytes())
+ initialHeaders = initialHeaders[:0]
+ }
+ select {
+ case <-logEvery.C:
+ log.Info("Computing validator proposer prorities (forward)", "blockNum", i)
+ default:
+ }
}
- // len(header.Extra) >= extraVanity+extraSeal has already been validated in validateHeaderExtraField, so this won't result in a panic
- if !bytes.Equal(parentValidatorBytes, validatorsBytes) {
- return &MismatchingValidatorsError{number - 1, validatorsBytes, parentValidatorBytes}
+
+ if snap, err = snap.Apply(nil, initialHeaders, c.logger); err != nil {
+ return nil, err
}
}
- // All basic checks passed, verify the seal and return
- return c.verifySeal(chain, header, parents, snap)
+ return snap, nil
}
// snapshot retrieves the authorization snapshot at a given point in time.
@@ -642,19 +728,17 @@ func (c *Bor) snapshot(chain consensus.ChainHeaderReader, number uint64, hash li
//nolint:govet
for snap == nil {
// If an in-memory snapshot was found, use that
- if s, ok := c.recents.Get(hash); ok {
+ if s, ok := c.Recents.Get(hash); ok {
snap = s
-
break
}
// If an on-disk snapshot can be found, use that
if number%snapshotPersistInterval == 0 {
- if s, err := loadSnapshot(c.config, c.signatures, c.DB, hash); err == nil {
+ if s, err := LoadSnapshot(c.config, c.Signatures, c.DB, hash); err == nil {
c.logger.Trace("Loaded snapshot from disk", "number", number, "hash", hash)
snap = s
-
break
}
}
@@ -671,7 +755,12 @@ func (c *Bor) snapshot(chain consensus.ChainHeaderReader, number uint64, hash li
parents = parents[:len(parents)-1]
} else {
// No explicit parents (or no more left), reach out to the database
+ if chain == nil {
+ break
+ }
+
header = chain.GetHeader(hash, number)
+
if header == nil {
return nil, consensus.ErrUnknownAncestor
}
@@ -684,7 +773,7 @@ func (c *Bor) snapshot(chain consensus.ChainHeaderReader, number uint64, hash li
headers = append(headers, header)
number, hash = number-1, header.ParentHash
- if number <= chain.FrozenBlocks() {
+ if chain != nil && number < chain.FrozenBlocks() {
break
}
@@ -694,50 +783,21 @@ func (c *Bor) snapshot(chain consensus.ChainHeaderReader, number uint64, hash li
default:
}
}
- if snap == nil && number <= chain.FrozenBlocks() {
- // Special handling of the headers in the snapshot
- zeroHeader := chain.GetHeaderByNumber(0)
- if zeroHeader != nil {
- // get checkpoint data
- hash := zeroHeader.Hash()
- // get validators and current span
- validators, err := c.spanner.GetCurrentValidators(1, c.authorizedSigner.Load().signer, c.getSpanForBlock)
- if err != nil {
- return nil, err
- }
+ if snap == nil && chain != nil && number <= chain.FrozenBlocks() {
+ var err error
+ c.frozenSnapshotsInit.Do(func() {
+ snap, err = c.initFrozenSnapshot(chain, number, logEvery)
+ })
- // new snap shot
- snap = newSnapshot(c.config, c.signatures, 0, hash, validators, c.logger)
- if err := snap.store(c.DB); err != nil {
- return nil, err
- }
- c.logger.Info("Stored proposer snapshot to disk", "number", 0, "hash", hash)
- initialHeaders := make([]*types.Header, 0, 128)
- for i := uint64(1); i <= number; i++ {
- header := chain.GetHeaderByNumber(i)
- initialHeaders = append(initialHeaders, header)
- if len(initialHeaders) == cap(initialHeaders) {
- if snap, err = snap.apply(initialHeaders, c.logger); err != nil {
- return nil, err
- }
- initialHeaders = initialHeaders[:0]
- }
- select {
- case <-logEvery.C:
- log.Info("Computing validator proposer prorities (forward)", "blockNum", i)
- default:
- }
- }
- if snap, err = snap.apply(initialHeaders, c.logger); err != nil {
- return nil, err
- }
+ if err != nil {
+ return nil, err
}
}
// check if snapshot is nil
if snap == nil {
- return nil, fmt.Errorf("unknown error while retrieving snapshot at block number %v", number)
+ return nil, fmt.Errorf("%w at block number %v", errUnknownSnapshot, number)
}
// Previous snapshot found, apply any pending headers on top of it
@@ -746,19 +806,19 @@ func (c *Bor) snapshot(chain consensus.ChainHeaderReader, number uint64, hash li
}
var err error
- if snap, err = snap.apply(headers, c.logger); err != nil {
+ if snap, err = snap.Apply(nil, headers, c.logger); err != nil {
return nil, err
}
- c.recents.Add(snap.Hash, snap)
+ c.Recents.Add(snap.Hash, snap)
// If we've generated a new persistent snapshot, save to disk
if snap.Number%snapshotPersistInterval == 0 && len(headers) > 0 {
- if err = snap.store(c.DB); err != nil {
+ if err = snap.Store(c.DB); err != nil {
return nil, err
}
- c.logger.Trace("Stored snapshot to disk", "number", snap.Number, "hash", snap.Hash)
+ c.logger.Trace("Stored proposer snapshot to disk", "number", snap.Number, "hash", snap.Hash)
}
return snap, err
@@ -795,7 +855,7 @@ func (c *Bor) verifySeal(chain consensus.ChainHeaderReader, header *types.Header
return errUnknownBlock
}
// Resolve the authorization key and check against signers
- signer, err := ecrecover(header, c.signatures, c.config)
+ signer, err := Ecrecover(header, c.Signatures, c.config)
if err != nil {
return err
}
@@ -854,11 +914,11 @@ func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header, s
header.Difficulty = new(big.Int).SetUint64(snap.Difficulty(c.authorizedSigner.Load().signer))
// Ensure the extra data has all it's components
- if len(header.Extra) < extraVanity {
- header.Extra = append(header.Extra, bytes.Repeat([]byte{0x00}, extraVanity-len(header.Extra))...)
+ if len(header.Extra) < types.ExtraVanityLength {
+ header.Extra = append(header.Extra, bytes.Repeat([]byte{0x00}, types.ExtraVanityLength-len(header.Extra))...)
}
- header.Extra = header.Extra[:extraVanity]
+ header.Extra = header.Extra[:types.ExtraVanityLength]
// get validator set if number
// Note: headers.Extra has producer set and not validator set. The bor
@@ -866,7 +926,11 @@ func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header, s
// where it fetches producers internally. As we fetch data from span
// in Erigon, use directly the `GetCurrentProducers` function.
if isSprintStart(number+1, c.config.CalculateSprint(number)) {
- newValidators, err := c.spanner.GetCurrentProducers(number+1, c.authorizedSigner.Load().signer, c.getSpanForBlock)
+ var spanID uint64
+ if number+1 > zerothSpanEnd {
+ spanID = 1 + (number+1-zerothSpanEnd-1)/spanLength
+ }
+ newValidators, err := c.spanner.GetCurrentProducers(spanID, c.authorizedSigner.Load().signer, chain)
if err != nil {
return errUnknownValidators
}
@@ -874,13 +938,47 @@ func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header, s
// sort validator by address
sort.Sort(valset.ValidatorsByAddress(newValidators))
- for _, validator := range newValidators {
- header.Extra = append(header.Extra, validator.HeaderBytes()...)
+ if c.config.IsParallelUniverse(header.Number.Uint64()) {
+ var tempValidatorBytes []byte
+
+ for _, validator := range newValidators {
+ tempValidatorBytes = append(tempValidatorBytes, validator.HeaderBytes()...)
+ }
+
+ blockExtraData := &BlockExtraData{
+ ValidatorBytes: tempValidatorBytes,
+ TxDependency: nil,
+ }
+
+ blockExtraDataBytes, err := rlp.EncodeToBytes(blockExtraData)
+ if err != nil {
+ log.Error("error while encoding block extra data: %v", err)
+ return fmt.Errorf("error while encoding block extra data: %v", err)
+ }
+
+ header.Extra = append(header.Extra, blockExtraDataBytes...)
+ } else {
+ for _, validator := range newValidators {
+ header.Extra = append(header.Extra, validator.HeaderBytes()...)
+ }
+ }
+ } else if c.config.IsParallelUniverse(header.Number.Uint64()) {
+ blockExtraData := &BlockExtraData{
+ ValidatorBytes: nil,
+ TxDependency: nil,
+ }
+
+ blockExtraDataBytes, err := rlp.EncodeToBytes(blockExtraData)
+ if err != nil {
+ log.Error("error while encoding block extra data: %v", err)
+ return fmt.Errorf("error while encoding block extra data: %v", err)
}
+
+ header.Extra = append(header.Extra, blockExtraDataBytes...)
}
// add extra seal space
- header.Extra = append(header.Extra, make([]byte, extraSeal)...)
+ header.Extra = append(header.Extra, make([]byte, types.ExtraSealLength)...)
// Mix digest is reserved for now, set to empty
header.MixDigest = libcommon.Hash{}
@@ -917,31 +1015,35 @@ func (c *Bor) CalculateRewards(config *chain.Config, header *types.Header, uncle
// rewards given.
func (c *Bor) Finalize(config *chain.Config, header *types.Header, state *state.IntraBlockState,
txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal,
- chain consensus.ChainHeaderReader, syscall consensus.SystemCall, logger log.Logger,
+ chain consensus.ChainReader, syscall consensus.SystemCall, logger log.Logger,
) (types.Transactions, types.Receipts, error) {
- var err error
-
headerNumber := header.Number.Uint64()
+ if withdrawals != nil || header.WithdrawalsHash != nil {
+ return nil, nil, consensus.ErrUnexpectedWithdrawals
+ }
+
if isSprintStart(headerNumber, c.config.CalculateSprint(headerNumber)) {
cx := statefull.ChainContext{Chain: chain, Bor: c}
- // check and commit span
- if err := c.checkAndCommitSpan(state, header, cx, syscall); err != nil {
- c.logger.Error("Error while committing span", "err", err)
- return nil, types.Receipts{}, err
- }
- if c.HeimdallClient != nil {
+ if c.blockReader != nil {
+ // check and commit span
+ if err := c.checkAndCommitSpan(state, header, cx, syscall); err != nil {
+ err := fmt.Errorf("Finalize.checkAndCommitSpan: %w", err)
+ c.logger.Error("[bor] committing span", "err", err)
+ return nil, types.Receipts{}, err
+ }
// commit states
- if err = c.CommitStates(state, header, cx, syscall); err != nil {
- c.logger.Error("Error while committing states", "err", err)
+ if err := c.CommitStates(state, header, cx, syscall); err != nil {
+ err := fmt.Errorf("Finalize.CommitStates: %w", err)
+ c.logger.Error("[bor] Error while committing states", "err", err)
return nil, types.Receipts{}, err
}
}
}
- if err = c.changeContractCodeIfNeeded(headerNumber, state); err != nil {
- c.logger.Error("Error changing contract code", "err", err)
+ if err := c.changeContractCodeIfNeeded(headerNumber, state); err != nil {
+ c.logger.Error("[bor] Error changing contract code", "err", err)
return nil, types.Receipts{}, err
}
@@ -955,31 +1057,16 @@ func (c *Bor) Finalize(config *chain.Config, header *types.Header, state *state.
return nil, types.Receipts{}, nil
}
-func decodeGenesisAlloc(i interface{}) (types.GenesisAlloc, error) {
- var alloc types.GenesisAlloc
-
- b, err := json.Marshal(i)
- if err != nil {
- return nil, err
- }
-
- if err := json.Unmarshal(b, &alloc); err != nil {
- return nil, err
- }
-
- return alloc, nil
-}
-
func (c *Bor) changeContractCodeIfNeeded(headerNumber uint64, state *state.IntraBlockState) error {
for blockNumber, genesisAlloc := range c.config.BlockAlloc {
if blockNumber == strconv.FormatUint(headerNumber, 10) {
- allocs, err := decodeGenesisAlloc(genesisAlloc)
+ allocs, err := types.DecodeGenesisAlloc(genesisAlloc)
if err != nil {
return fmt.Errorf("failed to decode genesis alloc: %v", err)
}
for addr, account := range allocs {
- c.logger.Trace("change contract code", "address", addr)
+ c.logger.Trace("[bor] change contract code", "address", addr)
state.SetCode(addr, account.Code)
}
}
@@ -992,32 +1079,37 @@ func (c *Bor) changeContractCodeIfNeeded(headerNumber uint64, state *state.Intra
// nor block rewards given, and returns the final block.
func (c *Bor) FinalizeAndAssemble(chainConfig *chain.Config, header *types.Header, state *state.IntraBlockState,
txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal,
- chain consensus.ChainHeaderReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger,
+ chain consensus.ChainReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger,
) (*types.Block, types.Transactions, types.Receipts, error) {
// stateSyncData := []*types.StateSyncData{}
headerNumber := header.Number.Uint64()
+
+ if withdrawals != nil || header.WithdrawalsHash != nil {
+ return nil, nil, nil, consensus.ErrUnexpectedWithdrawals
+ }
+
if isSprintStart(headerNumber, c.config.CalculateSprint(headerNumber)) {
cx := statefull.ChainContext{Chain: chain, Bor: c}
- // check and commit span
- err := c.checkAndCommitSpan(state, header, cx, syscall)
- if err != nil {
- c.logger.Error("Error while committing span", "err", err)
- return nil, nil, types.Receipts{}, err
- }
-
- if c.HeimdallClient != nil {
+ if c.blockReader != nil {
+ // check and commit span
+ if err := c.checkAndCommitSpan(state, header, cx, syscall); err != nil {
+ err := fmt.Errorf("FinalizeAndAssemble.checkAndCommitSpan: %w", err)
+ c.logger.Error("[bor] committing span", "err", err)
+ return nil, nil, types.Receipts{}, err
+ }
// commit states
- if err = c.CommitStates(state, header, cx, syscall); err != nil {
- c.logger.Error("Error while committing states", "err", err)
+ if err := c.CommitStates(state, header, cx, syscall); err != nil {
+ err := fmt.Errorf("FinalizeAndAssemble.CommitStates: %w", err)
+ c.logger.Error("[bor] committing states", "err", err)
return nil, nil, types.Receipts{}, err
}
}
}
if err := c.changeContractCodeIfNeeded(headerNumber, state); err != nil {
- c.logger.Error("Error changing contract code", "err", err)
+ c.logger.Error("[bor] Error changing contract code", "err", err)
return nil, nil, types.Receipts{}, err
}
@@ -1041,7 +1133,7 @@ func (c *Bor) GenerateSeal(chain consensus.ChainHeaderReader, currnt, parent *ty
}
func (c *Bor) Initialize(config *chain.Config, chain consensus.ChainHeaderReader, header *types.Header,
- state *state.IntraBlockState, txs []types.Transaction, uncles []*types.Header, syscall consensus.SysCallCustom) {
+ state *state.IntraBlockState, syscall consensus.SysCallCustom, logger log.Logger) {
}
// Authorize injects a private key into the consensus engine to mint new blocks
@@ -1066,7 +1158,7 @@ func (c *Bor) Seal(chain consensus.ChainHeaderReader, block *types.Block, result
// For 0-period chains, refuse to seal empty blocks (no reward but would spin sealing)
if c.config.CalculatePeriod(number) == 0 && len(block.Transactions()) == 0 {
- c.logger.Trace("Sealing paused, waiting for transactions")
+ c.logger.Trace("[bor] Sealing paused, waiting for transactions")
return nil
}
@@ -1091,7 +1183,7 @@ func (c *Bor) Seal(chain consensus.ChainHeaderReader, block *types.Block, result
}
// Sweet, the protocol permits us to sign the block, wait for our time
- delay := time.Unix(int64(header.Time), 0).Sub(time.Now()) // nolint: gosimple
+ delay := time.Until(time.Unix(int64(header.Time), 0))
// wiggle was already accounted for in header.Time, this is just for logging
wiggle := time.Duration(successionNumber) * time.Duration(c.config.CalculateBackupMultiplier(number)) * time.Second
@@ -1100,20 +1192,26 @@ func (c *Bor) Seal(chain consensus.ChainHeaderReader, block *types.Block, result
if err != nil {
return err
}
- copy(header.Extra[len(header.Extra)-extraSeal:], sighash)
-
- // Wait until sealing is terminated or delay timeout.
- c.logger.Info("Waiting for slot to sign and propagate", "number", number, "hash", header.Hash, "delay", common.PrettyDuration(delay))
+ copy(header.Extra[len(header.Extra)-types.ExtraSealLength:], sighash)
go func() {
+ // Wait until sealing is terminated or delay timeout.
+ c.logger.Info("[bor] Waiting for slot to sign and propagate", "number", number, "hash", header.Hash, "delay", common.PrettyDuration(delay), "TxCount", block.Transactions().Len(), "Signer", signer)
+
select {
case <-stop:
- c.logger.Info("Discarding sealing operation for block", "number", number)
+ c.logger.Info("[bor] Stopped sealing operation for block", "number", number)
return
case <-time.After(delay):
+
+ if c.headerProgress != nil && c.headerProgress.Progress() >= number {
+ c.logger.Info("Discarding sealing operation for block", "number", number)
+ return
+ }
+
if wiggle > 0 {
c.logger.Info(
- "Sealed out-of-turn",
+ "[bor] Sealed out-of-turn",
"number", number,
"wiggle", common.PrettyDuration(wiggle),
"delay", delay,
@@ -1122,7 +1220,7 @@ func (c *Bor) Seal(chain consensus.ChainHeaderReader, block *types.Block, result
)
} else {
c.logger.Info(
- "Sealed in-turn",
+ "[bor] Sealed in-turn",
"number", number,
"delay", delay,
"headerDifficulty", header.Difficulty,
@@ -1139,16 +1237,38 @@ func (c *Bor) Seal(chain consensus.ChainHeaderReader, block *types.Block, result
return nil
}
+// IsValidator returns true if this instance is the validator for this block
+func (c *Bor) IsValidator(header *types.Header) (bool, error) {
+ number := header.Number.Uint64()
+
+ if number == 0 {
+ return false, nil
+ }
+
+ snap, err := c.snapshot(nil, number-1, header.ParentHash, nil)
+
+ if err != nil {
+ if errors.Is(err, errUnknownSnapshot) {
+ return false, nil
+ }
+
+ return false, err
+ }
+
+ currentSigner := c.authorizedSigner.Load()
+
+ return snap.ValidatorSet.HasAddress(currentSigner.signer), nil
+}
+
// IsProposer returns true if this instance is the proposer for this block
-func (c *Bor) IsProposer(chain consensus.ChainHeaderReader, block *types.Block) (bool, error) {
- header := block.Header()
+func (c *Bor) IsProposer(header *types.Header) (bool, error) {
number := header.Number.Uint64()
if number == 0 {
return false, nil
}
- snap, err := c.snapshot(chain, number-1, header.ParentHash, nil)
+ snap, err := c.snapshot(nil, number-1, header.ParentHash, nil)
if err != nil {
return false, err
}
@@ -1185,23 +1305,49 @@ func (c *Bor) IsServiceTransaction(sender libcommon.Address, syscall consensus.S
return false
}
-// APIs implements consensus.Engine, returning the user facing RPC API to allow
-// controlling the signer voting.
+// Depricated: To get the API use jsonrpc.APIList
func (c *Bor) APIs(chain consensus.ChainHeaderReader) []rpc.API {
- return []rpc.API{{
- Namespace: "bor",
- Version: "1.0",
- Service: &API{chain: chain, bor: c},
- Public: false,
- }}
+ return []rpc.API{}
+}
+
+type FinalityAPI interface {
+ GetRootHash(start uint64, end uint64) (string, error)
+}
+
+type FinalityAPIFunc func(start uint64, end uint64) (string, error)
+
+func (f FinalityAPIFunc) GetRootHash(start uint64, end uint64) (string, error) {
+ return f(start, end)
+}
+
+func (c *Bor) Start(chainDB kv.RwDB) {
+ if flags.Milestone {
+ whitelist.RegisterService(c.DB)
+ finality.Whitelist(c.HeimdallClient, c.DB, chainDB, c.blockReader, c.logger,
+ FinalityAPIFunc(func(start uint64, end uint64) (string, error) {
+ ctx := context.Background()
+ tx, err := chainDB.BeginRo(ctx)
+ if err != nil {
+ return "", err
+ }
+ defer tx.Rollback()
+
+ return c.GetRootHash(ctx, tx, start, end)
+ }), c.closeCh)
+ }
}
-// Close implements consensus.Engine. It's a noop for bor as there are no background threads.
func (c *Bor) Close() error {
c.closeOnce.Do(func() {
+ if c.DB != nil {
+ c.DB.Close()
+ }
+
if c.HeimdallClient != nil {
c.HeimdallClient.Close()
}
+ // Close all bg processes
+ close(c.closeCh)
})
return nil
@@ -1248,42 +1394,6 @@ func (c *Bor) needToCommitSpan(currentSpan *span.Span, headerNumber uint64) bool
return false
}
-func (c *Bor) getSpanForBlock(blockNum uint64) (*span.HeimdallSpan, error) {
- c.logger.Debug("Getting span", "for block", blockNum)
- var borSpan *span.HeimdallSpan
- c.spanCache.AscendGreaterOrEqual(&span.HeimdallSpan{Span: span.Span{EndBlock: blockNum}}, func(item btree.Item) bool {
- borSpan = item.(*span.HeimdallSpan)
- return false
- })
-
- if borSpan != nil && borSpan.StartBlock <= blockNum && borSpan.EndBlock >= blockNum {
- return borSpan, nil
- }
-
- // Span with given block block number is not loaded
- // As span has fixed set of blocks (except 0th span), we can
- // formulate it and get the exact ID we'd need to fetch.
- var spanID uint64
- if blockNum > zerothSpanEnd {
- spanID = 1 + (blockNum-zerothSpanEnd-1)/spanLength
- }
-
- c.logger.Info("Span with given block number is not loaded", "fetching span", spanID)
-
- response, err := c.HeimdallClient.Span(c.execCtx, spanID)
- if err != nil {
- return nil, err
- }
- borSpan = response
- c.spanCache.ReplaceOrInsert(borSpan)
-
- for c.spanCache.Len() > 128 {
- c.spanCache.DeleteMin()
- }
-
- return borSpan, nil
-}
-
func (c *Bor) fetchAndCommitSpan(
newSpanID uint64,
state *state.IntraBlockState,
@@ -1302,12 +1412,10 @@ func (c *Bor) fetchAndCommitSpan(
heimdallSpan = *s
} else {
- response, err := c.HeimdallClient.Span(c.execCtx, newSpanID)
- if err != nil {
+ spanJson := chain.Chain.BorSpan(newSpanID)
+ if err := json.Unmarshal(spanJson, &heimdallSpan); err != nil {
return err
}
-
- heimdallSpan = *response
}
// check if chain id matches with heimdall span
@@ -1322,103 +1430,92 @@ func (c *Bor) fetchAndCommitSpan(
return c.spanner.CommitSpan(heimdallSpan, syscall)
}
-// CommitStates commit states
-func (c *Bor) CommitStates(
- state *state.IntraBlockState,
- header *types.Header,
- chain statefull.ChainContext,
- syscall consensus.SystemCall,
-) error {
- fetchStart := time.Now()
- number := header.Number.Uint64()
-
- var (
- lastStateIDBig *big.Int
- from uint64
- to time.Time
- err error
- )
-
- // Explicit condition for Indore fork won't be needed for fetching this
- // as erigon already performs this call on the IBS (Intra block state) of
- // the incoming chain.
- lastStateIDBig, err = c.GenesisContractsClient.LastStateId(syscall)
- if err != nil {
- return err
- }
-
- if c.config.IsIndore(number) {
- stateSyncDelay := c.config.CalculateStateSyncDelay(number)
- to = time.Unix(int64(header.Time-stateSyncDelay), 0)
- } else {
- to = time.Unix(int64(chain.Chain.GetHeaderByNumber(number-c.config.CalculateSprint(number)).Time), 0)
+func (c *Bor) GetRootHash(ctx context.Context, tx kv.Tx, start, end uint64) (string, error) {
+ length := end - start + 1
+ if length > MaxCheckpointLength {
+ return "", &MaxCheckpointLengthExceededError{Start: start, End: end}
}
- lastStateID := lastStateIDBig.Uint64()
- from = lastStateID + 1
-
- c.logger.Info(
- "Fetching state updates from Heimdall",
- "fromID", from,
- "to", to.Format(time.RFC3339),
- )
+ cacheKey := strconv.FormatUint(start, 10) + "-" + strconv.FormatUint(end, 10)
- eventRecords, err := c.HeimdallClient.StateSyncEvents(c.execCtx, lastStateID+1, to.Unix())
- if err != nil {
- return err
+ if c.rootHashCache == nil {
+ c.rootHashCache, _ = lru.NewARC[string, string](100)
}
- if c.config.OverrideStateSyncRecords != nil {
- if val, ok := c.config.OverrideStateSyncRecords[strconv.FormatUint(number, 10)]; ok {
- eventRecords = eventRecords[0:val]
- }
+ if root, known := c.rootHashCache.Get(cacheKey); known {
+ return root, nil
}
- fetchTime := time.Since(fetchStart)
- processStart := time.Now()
- chainID := c.chainConfig.ChainID.String()
-
- for _, eventRecord := range eventRecords {
- if eventRecord.ID <= lastStateID {
- continue
- }
-
- if err := validateEventRecord(eventRecord, number, to, lastStateID, chainID); err != nil {
- c.logger.Error("while validating event record", "block", number, "to", to, "stateID", lastStateID+1, "error", err.Error())
- break
- }
+ header := rawdb.ReadCurrentHeader(tx)
+ var currentHeaderNumber uint64 = 0
+ if header == nil {
+ return "", &valset.InvalidStartEndBlockError{Start: start, End: end, CurrentHeader: currentHeaderNumber}
+ }
+ currentHeaderNumber = header.Number.Uint64()
+ if start > end || end > currentHeaderNumber {
+ return "", &valset.InvalidStartEndBlockError{Start: start, End: end, CurrentHeader: currentHeaderNumber}
+ }
+ blockHeaders := make([]*types.Header, end-start+1)
+ for number := start; number <= end; number++ {
+ blockHeaders[number-start], _ = c.getHeaderByNumber(ctx, tx, number)
+ }
- if err := c.GenesisContractsClient.CommitState(eventRecord, syscall); err != nil {
- return err
- }
+ headers := make([][32]byte, NextPowerOfTwo(length))
+ for i := 0; i < len(blockHeaders); i++ {
+ blockHeader := blockHeaders[i]
+ header := crypto.Keccak256(AppendBytes32(
+ blockHeader.Number.Bytes(),
+ new(big.Int).SetUint64(blockHeader.Time).Bytes(),
+ blockHeader.TxHash.Bytes(),
+ blockHeader.ReceiptHash.Bytes(),
+ ))
- lastStateID++
+ var arr [32]byte
+ copy(arr[:], header)
+ headers[i] = arr
}
+ tree := merkle.NewTreeWithOpts(merkle.TreeOptions{EnableHashSorting: false, DisableHashLeaves: true})
+ if err := tree.Generate(Convert(headers), sha3.NewLegacyKeccak256()); err != nil {
+ return "", err
+ }
+ root := hex.EncodeToString(tree.Root().Hash)
- processTime := time.Since(processStart)
-
- c.logger.Info("StateSyncData", "number", number, "lastStateID", lastStateID, "total records", len(eventRecords), "fetch time", fetchTime, "process time", processTime)
+ c.rootHashCache.Add(cacheKey, root)
- return nil
+ return root, nil
}
-func validateEventRecord(eventRecord *clerk.EventRecordWithTime, number uint64, to time.Time, lastStateID uint64, chainID string) error {
- // event id should be sequential and event.Time should lie in the range [from, to)
- if lastStateID+1 != eventRecord.ID || eventRecord.ChainID != chainID || !eventRecord.Time.Before(to) {
- return &InvalidStateReceivedError{number, lastStateID, &to, eventRecord}
+func (c *Bor) getHeaderByNumber(ctx context.Context, tx kv.Tx, number uint64) (*types.Header, error) {
+ header, err := c.blockReader.HeaderByNumber(ctx, tx, number)
+ if err != nil {
+ return nil, err
}
+ if header == nil {
+ return nil, fmt.Errorf("[bor] header not found: %d", number)
+ }
+ return header, nil
+}
+// CommitStates commit states
+func (c *Bor) CommitStates(
+ state *state.IntraBlockState,
+ header *types.Header,
+ chain statefull.ChainContext,
+ syscall consensus.SystemCall,
+) error {
+ events := chain.Chain.BorEventsByBlock(header.Hash(), header.Number.Uint64())
+ for _, event := range events {
+ if err := c.GenesisContractsClient.CommitState(event, syscall); err != nil {
+ return err
+ }
+ }
return nil
}
-func (c *Bor) SetHeimdallClient(h IHeimdallClient) {
+func (c *Bor) SetHeimdallClient(h heimdall.IHeimdallClient) {
c.HeimdallClient = h
}
-func (c *Bor) GetCurrentValidators(blockNumber uint64, signer libcommon.Address, getSpanForBlock func(blockNum uint64) (*span.HeimdallSpan, error)) ([]*valset.Validator, error) {
- return c.spanner.GetCurrentValidators(blockNumber, signer, getSpanForBlock)
-}
-
//
// Private methods
//
@@ -1501,7 +1598,7 @@ func getUpdatedValidatorSet(oldValidatorSet *valset.ValidatorSet, newVals []*val
}
if err := v.UpdateWithChangeSet(changes, logger); err != nil {
- logger.Error("Error while updating change set", "error", err)
+ logger.Error("[bor] Error while updating change set", "error", err)
}
return v
@@ -1510,3 +1607,54 @@ func getUpdatedValidatorSet(oldValidatorSet *valset.ValidatorSet, newVals []*val
func isSprintStart(number, sprint uint64) bool {
return number%sprint == 0
}
+
+// In bor, RLP encoding of BlockExtraData will be stored in the Extra field in the header
+type BlockExtraData struct {
+ // Validator bytes of bor
+ ValidatorBytes []byte
+
+ // length of TxDependency -> n (n = number of transactions in the block)
+ // length of TxDependency[i] -> k (k = a whole number)
+ // k elements in TxDependency[i] -> transaction indexes on which transaction i is dependent on
+ TxDependency [][]uint64
+}
+
+// Returns the Block-STM Transaction Dependency from the block header
+func GetTxDependency(b *types.Block) [][]uint64 {
+ tempExtra := b.Extra()
+
+ if len(tempExtra) < types.ExtraVanityLength+types.ExtraSealLength {
+ log.Error("length of extra less is than vanity and seal")
+ return nil
+ }
+
+ var blockExtraData BlockExtraData
+
+ if err := rlp.DecodeBytes(tempExtra[types.ExtraVanityLength:len(tempExtra)-types.ExtraSealLength], &blockExtraData); err != nil {
+ log.Error("error while decoding block extra data", "err", err)
+ return nil
+ }
+
+ return blockExtraData.TxDependency
+}
+
+func GetValidatorBytes(h *types.Header, config *chain.BorConfig) []byte {
+ tempExtra := h.Extra
+
+ if !config.IsParallelUniverse(h.Number.Uint64()) {
+ return tempExtra[types.ExtraVanityLength : len(tempExtra)-types.ExtraSealLength]
+ }
+
+ if len(tempExtra) < types.ExtraVanityLength+types.ExtraSealLength {
+ log.Error("length of extra less is than vanity and seal")
+ return nil
+ }
+
+ var blockExtraData BlockExtraData
+ if err := rlp.DecodeBytes(tempExtra[types.ExtraVanityLength:len(tempExtra)-types.ExtraSealLength], &blockExtraData); err != nil {
+ log.Error("error while decoding block extra data", "err", err)
+ return nil
+ }
+
+ return blockExtraData.ValidatorBytes
+}
diff --git a/consensus/bor/bor_test.go b/consensus/bor/bor_test.go
index 3e25fd1c60e..352686e5034 100644
--- a/consensus/bor/bor_test.go
+++ b/consensus/bor/bor_test.go
@@ -2,11 +2,13 @@ package bor_test
import (
"context"
+ "encoding/json"
"fmt"
"math/big"
"testing"
"github.com/ledgerwatch/erigon-lib/chain"
+ "github.com/ledgerwatch/erigon-lib/common"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/gointerfaces/sentry"
"github.com/ledgerwatch/erigon-lib/kv/memdb"
@@ -15,6 +17,7 @@ import (
"github.com/ledgerwatch/erigon/consensus/bor/clerk"
"github.com/ledgerwatch/erigon/consensus/bor/contract"
"github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint"
+ "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone"
"github.com/ledgerwatch/erigon/consensus/bor/heimdall/span"
"github.com/ledgerwatch/erigon/consensus/bor/valset"
"github.com/ledgerwatch/erigon/core"
@@ -102,12 +105,32 @@ func (h test_heimdall) FetchCheckpointCount(ctx context.Context) (int64, error)
return 0, fmt.Errorf("TODO")
}
+func (h test_heimdall) FetchMilestone(ctx context.Context) (*milestone.Milestone, error) {
+ return nil, fmt.Errorf("TODO")
+}
+
+func (h test_heimdall) FetchMilestoneCount(ctx context.Context) (int64, error) {
+ return 0, fmt.Errorf("TODO")
+}
+
+func (h test_heimdall) FetchNoAckMilestone(ctx context.Context, milestoneID string) error {
+ return fmt.Errorf("TODO")
+}
+
+func (h test_heimdall) FetchLastNoAckMilestone(ctx context.Context) (string, error) {
+ return "", fmt.Errorf("TODO")
+}
+
+func (h test_heimdall) FetchMilestoneID(ctx context.Context, milestoneID string) error {
+ return fmt.Errorf("TODO")
+}
+
func (h test_heimdall) Close() {}
type test_genesisContract struct {
}
-func (g test_genesisContract) CommitState(event *clerk.EventRecordWithTime, syscall consensus.SystemCall) error {
+func (g test_genesisContract) CommitState(event rlp.RawValue, syscall consensus.SystemCall) error {
return nil
}
@@ -152,9 +175,15 @@ func (r headerReader) GetTd(libcommon.Hash, uint64) *big.Int {
return nil
}
+func (r headerReader) BorSpan(spanId uint64) []byte {
+ b, _ := json.Marshal(&r.validator.heimdall.currentSpan)
+ return b
+}
+
type spanner struct {
*span.ChainSpanner
- currentSpan span.Span
+ validatorAddress common.Address
+ currentSpan span.Span
}
func (c spanner) GetCurrentSpan(_ consensus.SystemCall) (*span.Span, error) {
@@ -166,6 +195,16 @@ func (c *spanner) CommitSpan(heimdallSpan span.HeimdallSpan, syscall consensus.S
return nil
}
+func (c *spanner) GetCurrentValidators(spanId uint64, signer libcommon.Address, chain consensus.ChainHeaderReader) ([]*valset.Validator, error) {
+ return []*valset.Validator{
+ {
+ ID: 1,
+ Address: c.validatorAddress,
+ VotingPower: 1000,
+ ProposerPriority: 1,
+ }}, nil
+}
+
type validator struct {
*mock.MockSentry
heimdall *test_heimdall
@@ -179,14 +218,12 @@ func (v validator) generateChain(length int) (*core.ChainPack, error) {
}
func (v validator) IsProposer(block *types.Block) (bool, error) {
- return v.Engine.(*bor.Bor).IsProposer(headerReader{v}, block)
+ return v.Engine.(*bor.Bor).IsProposer(block.Header())
}
func (v validator) sealBlocks(blocks []*types.Block) ([]*types.Block, error) {
sealedBlocks := make([]*types.Block, 0, len(blocks))
- sealResults := make(chan *types.Block)
-
hr := headerReader{v}
for _, block := range blocks {
@@ -200,6 +237,8 @@ func (v validator) sealBlocks(blocks []*types.Block) ([]*types.Block, error) {
header.ParentHash = parent.Hash()
}
+ sealResults := make(chan *types.Block, 1)
+
if err := v.Engine.Seal(hr, block, sealResults, nil); err != nil {
return nil, err
}
@@ -227,17 +266,22 @@ func (v validator) verifyBlocks(blocks []*types.Block) error {
func newValidator(t *testing.T, heimdall *test_heimdall, blocks map[uint64]*types.Block) validator {
logger := log.Root()
+ validatorKey, _ := crypto.GenerateKey()
+ validatorAddress := crypto.PubkeyToAddress(validatorKey.PublicKey)
bor := bor.New(
heimdall.chainConfig,
memdb.New(""),
- &spanner{span.NewChainSpanner(contract.ValidatorSet(), heimdall.chainConfig, false, logger), span.Span{}},
+ nil, /* blockReader */
+ &spanner{span.NewChainSpanner(contract.ValidatorSet(), heimdall.chainConfig, false, logger), validatorAddress, span.Span{}},
heimdall,
test_genesisContract{},
logger,
)
- validatorKey, _ := crypto.GenerateKey()
- validatorAddress := crypto.PubkeyToAddress(validatorKey.PublicKey)
+ /*fmt.Printf("Private: 0x%s\nPublic: 0x%s\nAddress: %s\n",
+ hex.EncodeToString(crypto.FromECDSA(validatorKey)),
+ hex.EncodeToString(crypto.MarshalPubkey(&validatorKey.PublicKey)),
+ strings.ToLower(validatorAddress.Hex()))*/
if heimdall.validatorSet == nil {
heimdall.validatorSet = valset.NewValidatorSet([]*valset.Validator{
diff --git a/consensus/bor/contract/client.go b/consensus/bor/contract/client.go
index 4fc927aa2f2..09f2ba5a340 100644
--- a/consensus/bor/contract/client.go
+++ b/consensus/bor/contract/client.go
@@ -8,7 +8,6 @@ import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/accounts/abi"
"github.com/ledgerwatch/erigon/consensus"
- "github.com/ledgerwatch/erigon/consensus/bor/clerk"
"github.com/ledgerwatch/erigon/rlp"
"github.com/ledgerwatch/log/v3"
)
@@ -56,27 +55,8 @@ func NewGenesisContractsClient(
}
}
-func (gc *GenesisContractsClient) CommitState(event *clerk.EventRecordWithTime, syscall consensus.SystemCall) error {
- eventRecord := event.BuildEventRecord()
-
- recordBytes, err := rlp.EncodeToBytes(eventRecord)
- if err != nil {
- return err
- }
-
- const method = "commitState"
-
- t := event.Time.Unix()
-
- data, err := gc.stateReceiverABI.Pack(method, big.NewInt(0).SetInt64(t), recordBytes)
- if err != nil {
- gc.logger.Error("Unable to pack tx for commitState", "err", err)
- return err
- }
-
- gc.logger.Info("→ committing new state", "eventRecord", event.String())
- _, err = syscall(gc.StateReceiverContract, data)
-
+func (gc *GenesisContractsClient) CommitState(event rlp.RawValue, syscall consensus.SystemCall) error {
+ _, err := syscall(gc.StateReceiverContract, event)
return err
}
@@ -85,7 +65,7 @@ func (gc *GenesisContractsClient) LastStateId(syscall consensus.SystemCall) (*bi
data, err := gc.stateReceiverABI.Pack(method)
if err != nil {
- gc.logger.Error("Unable to pack tx for LastStateId", "err", err)
+ gc.logger.Error("[bor] Unable to pack tx for LastStateId", "err", err)
return nil, err
}
diff --git a/consensus/bor/fake.go b/consensus/bor/fake.go
index fb07949bd69..fb79b7642da 100644
--- a/consensus/bor/fake.go
+++ b/consensus/bor/fake.go
@@ -5,7 +5,6 @@ import (
"github.com/ledgerwatch/erigon/consensus"
"github.com/ledgerwatch/erigon/consensus/ethash"
"github.com/ledgerwatch/erigon/core/state"
- "github.com/ledgerwatch/erigon/core/systemcontracts"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/log/v3"
)
@@ -14,8 +13,7 @@ type FakeBor struct {
*ethash.FakeEthash
}
-// NewFaker creates a bor consensus engine with a fake FakeEthash +
-// processing of fake bor system contracts
+// NewFaker creates a bor consensus engine with a FakeEthash
func NewFaker() *FakeBor {
return &FakeBor{
FakeEthash: ethash.NewFaker(),
@@ -24,8 +22,7 @@ func NewFaker() *FakeBor {
func (f *FakeBor) Finalize(config *chain.Config, header *types.Header, state *state.IntraBlockState,
txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal,
- chain consensus.ChainHeaderReader, syscall consensus.SystemCall, logger log.Logger,
+ chain consensus.ChainReader, syscall consensus.SystemCall, logger log.Logger,
) (types.Transactions, types.Receipts, error) {
- systemcontracts.UpgradeBuildInSystemContract(config, header.Number, state, logger)
return f.FakeEthash.Finalize(config, header, state, txs, uncles, r, withdrawals, chain, syscall, logger)
}
diff --git a/consensus/bor/finality/api.go b/consensus/bor/finality/api.go
new file mode 100644
index 00000000000..288080e570b
--- /dev/null
+++ b/consensus/bor/finality/api.go
@@ -0,0 +1,56 @@
+package finality
+
+import (
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon/consensus/bor/finality/whitelist"
+ "github.com/ledgerwatch/erigon/core/rawdb"
+ "github.com/ledgerwatch/erigon/core/types"
+)
+
+func GetFinalizedBlockNumber(tx kv.Tx) uint64 {
+ currentBlockNum := rawdb.ReadCurrentHeader(tx)
+
+ service := whitelist.GetWhitelistingService()
+
+ doExist, number, hash := service.GetWhitelistedMilestone()
+ if doExist && number <= currentBlockNum.Number.Uint64() {
+
+ blockHeader := rawdb.ReadHeaderByNumber(tx, number)
+
+ if blockHeader == nil {
+ return 0
+ }
+
+ if blockHeader.Hash() == hash {
+ return number
+ }
+ }
+
+ doExist, number, hash = service.GetWhitelistedCheckpoint()
+ if doExist && number <= currentBlockNum.Number.Uint64() {
+
+ blockHeader := rawdb.ReadHeaderByNumber(tx, number)
+
+ if blockHeader == nil {
+ return 0
+ }
+
+ if blockHeader.Hash() == hash {
+ return number
+ }
+ }
+
+ return 0
+}
+
+// CurrentFinalizedBlock retrieves the current finalized block of the canonical
+// chain. The block is retrieved from the blockchain's internal cache.
+func CurrentFinalizedBlock(tx kv.Tx, number uint64) *types.Block {
+ hash, err := rawdb.ReadCanonicalHash(tx, number)
+ if err != nil || hash == (common.Hash{}) {
+ return nil
+ }
+
+ return rawdb.ReadBlock(tx, hash, number)
+}
diff --git a/consensus/bor/finality/bor_verifier.go b/consensus/bor/finality/bor_verifier.go
new file mode 100644
index 00000000000..a8dde9dc1ce
--- /dev/null
+++ b/consensus/bor/finality/bor_verifier.go
@@ -0,0 +1,165 @@
+// nolint
+package finality
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/ledgerwatch/log/v3"
+
+ "github.com/ledgerwatch/erigon-lib/metrics"
+ "github.com/ledgerwatch/erigon/consensus/bor/finality/generics"
+ "github.com/ledgerwatch/erigon/consensus/bor/finality/whitelist"
+ "github.com/ledgerwatch/erigon/core/rawdb"
+)
+
+var (
+ // errMissingBlocks is returned when we don't have the blocks locally, yet.
+ errMissingBlocks = errors.New("missing blocks")
+
+ // errRootHash is returned when we aren't able to calculate the root hash
+ // locally for a range of blocks.
+ errRootHash = errors.New("failed to get local root hash")
+
+ // errHashMismatch is returned when the local hash doesn't match
+ // with the hash of checkpoint/milestone. It is the root hash of blocks
+ // in case of checkpoint and is end block hash in case of milestones.
+ errHashMismatch = errors.New("hash mismatch")
+
+ // errEndBlock is returned when we're unable to fetch a block locally.
+ errEndBlock = errors.New("failed to get end block")
+
+ //Metrics for collecting the rewindLength
+ rewindLengthMeter = metrics.GetOrCreateGauge("chain_autorewind_length")
+)
+
+type borVerifier struct {
+ verify func(ctx context.Context, config *config, start uint64, end uint64, hash string, isCheckpoint bool) (string, error)
+}
+
+func newBorVerifier() *borVerifier {
+ return &borVerifier{borVerify}
+}
+
+func borVerify(ctx context.Context, config *config, start uint64, end uint64, hash string, isCheckpoint bool) (string, error) {
+ roTx, err := config.chainDB.BeginRo(ctx)
+ if err != nil {
+ return hash, err
+ }
+ defer roTx.Rollback()
+
+ str := "milestone"
+ if isCheckpoint {
+ str = "checkpoint"
+ }
+
+ service := whitelist.GetWhitelistingService()
+
+ // check if we have the given blocks
+ currentBlock := rawdb.ReadCurrentBlockNumber(roTx)
+ if currentBlock == nil {
+ log.Debug("[bor] no current block marker yet: syncing...", "incoming", str)
+ return hash, errMissingBlocks
+ }
+
+ head := *currentBlock
+ if head < end {
+ log.Debug("[bor] current head block behind incoming", "block", str, "head", head, "end block", end)
+ return hash, errMissingBlocks
+ }
+
+ var localHash string
+
+ // verify the hash
+ if isCheckpoint {
+ var err error
+
+ // in case of checkpoint get the rootHash
+ localHash, err = config.borAPI.GetRootHash(start, end)
+
+ if err != nil {
+ log.Debug("[bor] Failed to get root hash of given block range while whitelisting checkpoint", "start", start, "end", end, "err", err)
+ return hash, errRootHash
+ }
+ } else {
+ // in case of milestone(isCheckpoint==false) get the hash of endBlock
+ block, err := config.blockReader.BlockByNumber(ctx, roTx, end)
+ if err != nil {
+ log.Debug("[bor] Failed to get end block hash while whitelisting milestone", "number", end, "err", err)
+ return hash, errEndBlock
+ }
+ if block == nil {
+ err := fmt.Errorf("[bor] block not found: %d", end)
+ log.Debug("[bor] Failed to get end block hash while whitelisting milestone", "number", end, "err", err)
+ return hash, err
+ }
+
+ localHash = fmt.Sprintf("%v", block.Hash())[2:]
+ }
+
+ //nolint
+ if localHash != hash {
+
+ if isCheckpoint {
+ log.Warn("[bor] Root hash mismatch while whitelisting checkpoint", "expected", localHash, "got", hash)
+ } else {
+ log.Warn("[bor] End block hash mismatch while whitelisting milestone", "expected", localHash, "got", hash)
+ }
+
+ var (
+ rewindTo uint64
+ doExist bool
+ )
+
+ if doExist, rewindTo, _ = service.GetWhitelistedMilestone(); doExist {
+
+ } else if doExist, rewindTo, _ = service.GetWhitelistedCheckpoint(); doExist {
+
+ } else {
+ if start <= 0 {
+ rewindTo = 0
+ } else {
+ rewindTo = start - 1
+ }
+ }
+
+ if head-rewindTo > 255 {
+ rewindTo = head - 255
+ }
+
+ if isCheckpoint {
+ log.Warn("[bor] Rewinding chain due to checkpoint root hash mismatch", "number", rewindTo)
+ } else {
+ log.Warn("[bor] Rewinding chain due to milestone endblock hash mismatch", "number", rewindTo)
+ }
+
+ rewindBack(head, rewindTo)
+
+ return hash, errHashMismatch
+ }
+
+ // fetch the end block hash
+ block, err := config.blockReader.BlockByNumber(ctx, roTx, end)
+ if err != nil {
+ log.Debug("[bor] Failed to get end block hash while whitelisting", "err", err)
+ return hash, errEndBlock
+ }
+ if block == nil {
+ log.Debug("[bor] Current header behind the end block", "block", end)
+ return hash, errEndBlock
+ }
+
+ hash = fmt.Sprintf("%v", block.Hash())
+
+ return hash, nil
+}
+
+// Stop the miner if the mining process is running and rewind back the chain
+func rewindBack(head uint64, rewindTo uint64) {
+ rewindLengthMeter.SetUint64(head - rewindTo)
+
+ // Chain cannot be rewinded from this routine
+ // hence we are using a shared variable
+ generics.BorMilestoneRewind.Store(&rewindTo)
+}
diff --git a/consensus/bor/finality/flags/flags.go b/consensus/bor/finality/flags/flags.go
new file mode 100644
index 00000000000..97090f4f08b
--- /dev/null
+++ b/consensus/bor/finality/flags/flags.go
@@ -0,0 +1,3 @@
+package flags
+
+var Milestone = true
diff --git a/consensus/bor/finality/generics/generics.go b/consensus/bor/finality/generics/generics.go
new file mode 100644
index 00000000000..d54b26fbbda
--- /dev/null
+++ b/consensus/bor/finality/generics/generics.go
@@ -0,0 +1,22 @@
+package generics
+
+import (
+ "sync/atomic"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon/core/types"
+)
+
+func Empty[T any]() (t T) {
+ return
+}
+
+// BorMilestoneRewind is used as a flag/variable
+// Flag: if equals 0, no rewind according to bor whitelisting service
+// Variable: if not equals 0, rewind chain back to BorMilestoneRewind
+var BorMilestoneRewind atomic.Pointer[uint64]
+
+type Response struct {
+ Headers []*types.Header
+ Hashes []libcommon.Hash
+}
diff --git a/consensus/bor/finality/rawdb/checkpoint.go b/consensus/bor/finality/rawdb/checkpoint.go
new file mode 100644
index 00000000000..eecf3a53236
--- /dev/null
+++ b/consensus/bor/finality/rawdb/checkpoint.go
@@ -0,0 +1,33 @@
+// nolint
+package rawdb
+
+import (
+ "errors"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+)
+
+var (
+ lastCheckpoint = []byte("LastCheckpoint")
+
+ ErrEmptyLastFinality = errors.New("empty response while getting last finality")
+ ErrIncorrectFinality = errors.New("last checkpoint in the DB is incorrect")
+ ErrIncorrectFinalityToStore = errors.New("failed to marshal the last finality struct")
+ ErrDBNotResponding = errors.New("failed to store the last finality struct")
+ ErrIncorrectLockFieldToStore = errors.New("failed to marshal the lockField struct ")
+ ErrIncorrectLockField = errors.New("lock field in the DB is incorrect")
+ ErrIncorrectFutureMilestoneFieldToStore = errors.New("failed to marshal the future milestone field struct ")
+ ErrIncorrectFutureMilestoneField = errors.New("future milestone field in the DB is incorrect")
+)
+
+type Checkpoint struct {
+ Finality
+}
+
+func (c *Checkpoint) clone() *Checkpoint {
+ return &Checkpoint{}
+}
+
+func (c *Checkpoint) block() (uint64, libcommon.Hash) {
+ return c.Block, c.Hash
+}
diff --git a/consensus/bor/finality/rawdb/milestone.go b/consensus/bor/finality/rawdb/milestone.go
new file mode 100644
index 00000000000..d5ac8f49621
--- /dev/null
+++ b/consensus/bor/finality/rawdb/milestone.go
@@ -0,0 +1,250 @@
+package rawdb
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon/consensus/bor/finality/generics"
+ "github.com/ledgerwatch/log/v3"
+)
+
+var (
+ lastMilestone = []byte("LastMilestone")
+ lockFieldKey = []byte("LockField")
+ futureMilestoneKey = []byte("FutureMilestoneField")
+)
+
+type Finality struct {
+ Block uint64
+ Hash libcommon.Hash
+}
+
+type LockField struct {
+ Val bool
+ Block uint64
+ Hash libcommon.Hash
+ IdList map[string]struct{}
+}
+
+type FutureMilestoneField struct {
+ Order []uint64
+ List map[uint64]libcommon.Hash
+}
+
+func (f *Finality) set(block uint64, hash libcommon.Hash) {
+ f.Block = block
+ f.Hash = hash
+}
+
+type Milestone struct {
+ Finality
+}
+
+func (m *Milestone) clone() *Milestone {
+ return &Milestone{}
+}
+
+func (m *Milestone) block() (uint64, libcommon.Hash) {
+ return m.Block, m.Hash
+}
+
+func ReadFinality[T BlockFinality[T]](db kv.RwDB) (uint64, libcommon.Hash, error) {
+ lastTV, key := getKey[T]()
+
+ var data []byte
+
+ err := db.View(context.Background(), func(tx kv.Tx) error {
+ res, err := tx.GetOne(kv.BorFinality, key)
+ data = res
+ return err
+ })
+
+ if err != nil {
+ return 0, libcommon.Hash{}, fmt.Errorf("%w: empty response for %s", err, string(key))
+ }
+
+ if len(data) == 0 {
+ return 0, libcommon.Hash{}, fmt.Errorf("%w for %s", ErrEmptyLastFinality, string(key))
+ }
+
+ if err = json.Unmarshal(data, lastTV); err != nil {
+ log.Error(fmt.Sprintf("Unable to unmarshal the last %s block number in database", string(key)), "err", err)
+
+ return 0, libcommon.Hash{}, fmt.Errorf("%w(%v) for %s, data %v(%q)",
+ ErrIncorrectFinality, err, string(key), data, string(data))
+ }
+
+ block, hash := lastTV.block()
+
+ return block, hash, nil
+}
+
+func WriteLastFinality[T BlockFinality[T]](db kv.RwDB, block uint64, hash libcommon.Hash) error {
+ lastTV, key := getKey[T]()
+
+ lastTV.set(block, hash)
+
+ enc, err := json.Marshal(lastTV)
+ if err != nil {
+ log.Error(fmt.Sprintf("Failed to marshal the %s struct", string(key)), "err", err)
+
+ return fmt.Errorf("%w: %v for %s struct", ErrIncorrectFinalityToStore, err, string(key))
+ }
+
+ err = db.Update(context.Background(), func(tx kv.RwTx) error {
+ return tx.Put(kv.BorFinality, key, enc)
+ })
+
+ if err != nil {
+ log.Error(fmt.Sprintf("Failed to store the %s struct", string(key)), "err", err)
+
+ return fmt.Errorf("%w: %v for %s struct", ErrDBNotResponding, err, string(key))
+ }
+
+ return nil
+}
+
+type BlockFinality[T any] interface {
+ set(uint64, libcommon.Hash)
+ clone() T
+ block() (uint64, libcommon.Hash)
+}
+
+func getKey[T BlockFinality[T]]() (T, []byte) {
+ lastT := generics.Empty[T]().clone()
+
+ var key []byte
+
+ switch any(lastT).(type) {
+ case *Milestone:
+ key = lastMilestone
+ case *Checkpoint:
+ key = lastCheckpoint
+ }
+
+ return lastT, key
+}
+
+func WriteLockField(db kv.RwDB, val bool, block uint64, hash libcommon.Hash, idListMap map[string]struct{}) error {
+
+ lockField := LockField{
+ Val: val,
+ Block: block,
+ Hash: hash,
+ IdList: idListMap,
+ }
+
+ key := lockFieldKey
+
+ enc, err := json.Marshal(lockField)
+ if err != nil {
+ log.Error("Failed to marshal the lock field struct", "err", err)
+
+ return fmt.Errorf("%w: %v for lock field struct", ErrIncorrectLockFieldToStore, err)
+ }
+
+ err = db.Update(context.Background(), func(tx kv.RwTx) error {
+ return tx.Put(kv.BorFinality, key, enc)
+ })
+
+ if err != nil {
+ log.Error("Failed to store the lock field struct", "err", err)
+
+ return fmt.Errorf("%w: %v for lock field struct", ErrDBNotResponding, err)
+ }
+
+ return nil
+}
+
+func ReadLockField(db kv.RwDB) (bool, uint64, libcommon.Hash, map[string]struct{}, error) {
+ key := lockFieldKey
+ lockField := LockField{}
+
+ var data []byte
+ err := db.View(context.Background(), func(tx kv.Tx) error {
+ res, err := tx.GetOne(kv.BorFinality, key)
+ data = res
+ return err
+ })
+
+ if err != nil {
+ return false, 0, libcommon.Hash{}, nil, fmt.Errorf("%w: empty response for lock field", err)
+ }
+
+ if len(data) == 0 {
+ return false, 0, libcommon.Hash{}, nil, fmt.Errorf("%w for %s", ErrIncorrectLockField, string(key))
+ }
+
+ if err = json.Unmarshal(data, &lockField); err != nil {
+ log.Error(fmt.Sprintf("Unable to unmarshal the lock field in database"), "err", err)
+
+ return false, 0, libcommon.Hash{}, nil, fmt.Errorf("%w(%v) for lock field , data %v(%q)",
+ ErrIncorrectLockField, err, data, string(data))
+ }
+
+ val, block, hash, idList := lockField.Val, lockField.Block, lockField.Hash, lockField.IdList
+
+ return val, block, hash, idList, nil
+}
+
+func WriteFutureMilestoneList(db kv.RwDB, order []uint64, list map[uint64]libcommon.Hash) error {
+ futureMilestoneField := FutureMilestoneField{
+ Order: order,
+ List: list,
+ }
+
+ key := futureMilestoneKey
+
+ enc, err := json.Marshal(futureMilestoneField)
+ if err != nil {
+ log.Error("Failed to marshal the future milestone field struct", "err", err)
+
+ return fmt.Errorf("%w: %v for future milestone field struct", ErrIncorrectFutureMilestoneFieldToStore, err)
+ }
+
+ err = db.Update(context.Background(), func(tx kv.RwTx) error {
+ return tx.Put(kv.BorFinality, key, enc)
+ })
+
+ if err != nil {
+ log.Error("Failed to store the future milestone field struct", "err", err)
+
+ return fmt.Errorf("%w: %v for future milestone field struct", ErrDBNotResponding, err)
+ }
+
+ return nil
+}
+
+func ReadFutureMilestoneList(db kv.RwDB) ([]uint64, map[uint64]libcommon.Hash, error) {
+ key := futureMilestoneKey
+ futureMilestoneField := FutureMilestoneField{}
+
+ var data []byte
+ err := db.View(context.Background(), func(tx kv.Tx) error {
+ res, err := tx.GetOne(kv.BorFinality, key)
+ data = res
+ return err
+ })
+
+ if err != nil {
+ return nil, nil, fmt.Errorf("%w: empty response for future milestone field", err)
+ }
+
+ if len(data) == 0 {
+ return nil, nil, fmt.Errorf("%w for %s", ErrIncorrectLockField, string(key))
+ }
+
+ if err = json.Unmarshal(data, &futureMilestoneField); err != nil {
+ log.Error(fmt.Sprintf("Unable to unmarshal the future milestone field in database"), "err", err)
+
+ return nil, nil, fmt.Errorf("%w(%v) for future milestone field, data %v(%q)",
+ ErrIncorrectFutureMilestoneField, err, data, string(data))
+ }
+
+ order, list := futureMilestoneField.Order, futureMilestoneField.List
+
+ return order, list, nil
+}
diff --git a/consensus/bor/finality/whitelist.go b/consensus/bor/finality/whitelist.go
new file mode 100644
index 00000000000..76abfcc0d35
--- /dev/null
+++ b/consensus/bor/finality/whitelist.go
@@ -0,0 +1,237 @@
+package finality
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon/consensus/bor/finality/flags"
+ "github.com/ledgerwatch/erigon/consensus/bor/finality/whitelist"
+ "github.com/ledgerwatch/erigon/consensus/bor/heimdall"
+ "github.com/ledgerwatch/erigon/turbo/services"
+ "github.com/ledgerwatch/log/v3"
+)
+
+type config struct {
+ heimdall heimdall.IHeimdallClient
+ borDB kv.RwDB
+ chainDB kv.RwDB
+ blockReader services.BlockReader
+ logger log.Logger
+ borAPI BorAPI
+ closeCh chan struct{}
+}
+
+type BorAPI interface {
+ GetRootHash(start uint64, end uint64) (string, error)
+}
+
+func Whitelist(heimdall heimdall.IHeimdallClient, borDB kv.RwDB, chainDB kv.RwDB, blockReader services.BlockReader, logger log.Logger, borAPI BorAPI, closeCh chan struct{}) {
+ if !flags.Milestone {
+ return
+ }
+
+ config := &config{
+ heimdall: heimdall,
+ borDB: borDB,
+ chainDB: chainDB,
+ blockReader: blockReader,
+ logger: logger,
+ borAPI: borAPI,
+ closeCh: closeCh,
+ }
+
+ go startCheckpointWhitelistService(config)
+ go startMilestoneWhitelistService(config)
+ go startNoAckMilestoneService(config)
+ go startNoAckMilestoneByIDService(config)
+}
+
+const (
+ whitelistTimeout = 30 * time.Second
+ noAckMilestoneTimeout = 4 * time.Second
+)
+
+// StartCheckpointWhitelistService starts the goroutine to fetch checkpoints and update the
+// checkpoint whitelist map.
+func startCheckpointWhitelistService(config *config) {
+ const (
+ tickerDuration = 100 * time.Second
+ fnName = "whitelist checkpoint"
+ )
+
+ RetryHeimdallHandler(handleWhitelistCheckpoint, config, tickerDuration, whitelistTimeout, fnName)
+}
+
+// startMilestoneWhitelistService starts the goroutine to fetch milestiones and update the
+// milestone whitelist map.
+func startMilestoneWhitelistService(config *config) {
+ const (
+ tickerDuration = 12 * time.Second
+ fnName = "whitelist milestone"
+ )
+
+ RetryHeimdallHandler(handleMilestone, config, tickerDuration, whitelistTimeout, fnName)
+}
+
+func startNoAckMilestoneService(config *config) {
+ const (
+ tickerDuration = 6 * time.Second
+ fnName = "no-ack-milestone service"
+ )
+
+ RetryHeimdallHandler(handleNoAckMilestone, config, tickerDuration, noAckMilestoneTimeout, fnName)
+}
+
+func startNoAckMilestoneByIDService(config *config) {
+ const (
+ tickerDuration = 1 * time.Minute
+ fnName = "no-ack-milestone-by-id service"
+ )
+
+ RetryHeimdallHandler(handleNoAckMilestoneByID, config, tickerDuration, noAckMilestoneTimeout, fnName)
+}
+
+type heimdallHandler func(ctx context.Context, heimdallClient heimdall.IHeimdallClient, config *config) error
+
+func RetryHeimdallHandler(fn heimdallHandler, config *config, tickerDuration time.Duration, timeout time.Duration, fnName string) {
+ retryHeimdallHandler(fn, config, tickerDuration, timeout, fnName)
+}
+
+func retryHeimdallHandler(fn heimdallHandler, config *config, tickerDuration time.Duration, timeout time.Duration, fnName string) {
+ // a shortcut helps with tests and early exit
+ select {
+ case <-config.closeCh:
+ return
+ default:
+ }
+
+ if config.heimdall == nil {
+ config.logger.Error("[bor] engine not available")
+ return
+ }
+
+ // first run for fetching milestones
+ firstCtx, cancel := context.WithTimeout(context.Background(), timeout)
+ err := fn(firstCtx, config.heimdall, config)
+
+ cancel()
+
+ if err != nil {
+ if !errors.Is(err, errMissingBlocks) {
+ config.logger.Warn(fmt.Sprintf("[bor] unable to start the %s service - first run", fnName), "err", err)
+ }
+ }
+
+ ticker := time.NewTicker(tickerDuration)
+ defer ticker.Stop()
+
+ for {
+ defer func() {
+ r := recover()
+ if r != nil {
+ log.Warn(fmt.Sprintf("[bor] service %s- run failed with panic", fnName), "err", r)
+ }
+ }()
+
+ select {
+ case <-ticker.C:
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ err := fn(ctx, config.heimdall, config)
+
+ cancel()
+
+ if err != nil {
+ if errors.Is(err, errMissingBlocks) {
+ config.logger.Debug(fmt.Sprintf("[bor] unable to handle %s", fnName), "err", err)
+ } else {
+ config.logger.Warn(fmt.Sprintf("[bor] unable to handle %s", fnName), "err", err)
+ }
+ }
+ case <-config.closeCh:
+ return
+ }
+ }
+}
+
+// handleWhitelistCheckpoint handles the checkpoint whitelist mechanism.
+func handleWhitelistCheckpoint(ctx context.Context, heimdallClient heimdall.IHeimdallClient, config *config) error {
+ service := whitelist.GetWhitelistingService()
+
+ // Create a new bor verifier, which will be used to verify checkpoints and milestones
+ verifier := newBorVerifier()
+ blockNum, blockHash, err := fetchWhitelistCheckpoint(ctx, heimdallClient, verifier, config)
+
+ // If the array is empty, we're bound to receive an error. Non-nill error and non-empty array
+ // means that array has partial elements and it failed for some block. We'll add those partial
+ // elements anyway.
+ if err != nil {
+ return err
+ }
+
+ service.ProcessCheckpoint(blockNum, blockHash)
+
+ return nil
+}
+
+// handleMilestone handles the milestone mechanism.
+func handleMilestone(ctx context.Context, heimdallClient heimdall.IHeimdallClient, config *config) error {
+ service := whitelist.GetWhitelistingService()
+
+ // Create a new bor verifier, which will be used to verify checkpoints and milestones
+ verifier := newBorVerifier()
+ num, hash, err := fetchWhitelistMilestone(ctx, heimdallClient, verifier, config)
+
+ // If the current chain head is behind the received milestone, add it to the future milestone
+ // list. Also, the hash mismatch (end block hash) error will lead to rewind so also
+ // add that milestone to the future milestone list.
+ if errors.Is(err, errMissingBlocks) || errors.Is(err, errHashMismatch) {
+ service.ProcessFutureMilestone(num, hash)
+ return nil
+ }
+
+ if errors.Is(err, heimdall.ErrServiceUnavailable) {
+ return nil
+ }
+
+ if err != nil {
+ return err
+ }
+
+ service.ProcessMilestone(num, hash)
+
+ return nil
+}
+
+func handleNoAckMilestone(ctx context.Context, heimdallClient heimdall.IHeimdallClient, config *config) error {
+ service := whitelist.GetWhitelistingService()
+ milestoneID, err := fetchNoAckMilestone(ctx, heimdallClient, config.logger)
+
+ if errors.Is(err, heimdall.ErrServiceUnavailable) {
+ return nil
+ }
+
+ if err != nil {
+ return err
+ }
+
+ service.RemoveMilestoneID(milestoneID)
+
+ return nil
+}
+
+func handleNoAckMilestoneByID(ctx context.Context, heimdallClient heimdall.IHeimdallClient, config *config) error {
+ service := whitelist.GetWhitelistingService()
+ milestoneIDs := service.GetMilestoneIDsList()
+
+ for _, milestoneID := range milestoneIDs {
+ err := fetchNoAckMilestoneByID(ctx, heimdallClient, milestoneID, config.logger)
+ if err == nil {
+ service.RemoveMilestoneID(milestoneID)
+ }
+ }
+
+ return nil
+}
diff --git a/consensus/bor/finality/whitelist/checkpoint.go b/consensus/bor/finality/whitelist/checkpoint.go
new file mode 100644
index 00000000000..fc4a1443610
--- /dev/null
+++ b/consensus/bor/finality/whitelist/checkpoint.go
@@ -0,0 +1,49 @@
+package whitelist
+
+import (
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/metrics"
+ "github.com/ledgerwatch/erigon/consensus/bor/finality/rawdb"
+ "github.com/ledgerwatch/erigon/core/types"
+)
+
+type checkpoint struct {
+ finality[*rawdb.Checkpoint]
+}
+type checkpointService interface {
+ finalityService
+}
+
+var (
+ //Metrics for collecting the whitelisted milestone number
+ whitelistedCheckpointNumberMeter = metrics.GetOrCreateGauge("chain_checkpoint_latest")
+
+ //Metrics for collecting the number of invalid chains received
+ checkpointChainMeter = metrics.GetOrCreateGauge("chain_checkpoint_isvalidchain")
+)
+
+// IsValidChain checks the validity of chain by comparing it
+// against the local checkpoint entry
+func (w *checkpoint) IsValidChain(currentHeader uint64, chain []*types.Header) bool {
+ w.finality.RLock()
+ defer w.finality.RUnlock()
+
+ res := w.finality.IsValidChain(currentHeader, chain)
+
+ if res {
+ checkpointChainMeter.Inc()
+ } else {
+ checkpointChainMeter.Dec()
+ }
+
+ return res
+}
+
+func (w *checkpoint) Process(block uint64, hash common.Hash) {
+ w.finality.Lock()
+ defer w.finality.Unlock()
+
+ w.finality.Process(block, hash)
+
+ whitelistedCheckpointNumberMeter.SetUint64(block)
+}
diff --git a/consensus/bor/finality/whitelist/finality.go b/consensus/bor/finality/whitelist/finality.go
new file mode 100644
index 00000000000..f1abbbf3df6
--- /dev/null
+++ b/consensus/bor/finality/whitelist/finality.go
@@ -0,0 +1,78 @@
+package whitelist
+
+import (
+ "sync"
+
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon/consensus/bor/finality/rawdb"
+ "github.com/ledgerwatch/erigon/core/types"
+ "github.com/ledgerwatch/log/v3"
+)
+
+type finality[T rawdb.BlockFinality[T]] struct {
+ sync.RWMutex
+ db kv.RwDB
+ Hash common.Hash // Whitelisted Hash, populated by reaching out to heimdall
+ Number uint64 // Number , populated by reaching out to heimdall
+ interval uint64 // Interval, until which we can allow importing
+ doExist bool
+}
+
+type finalityService interface {
+ IsValidChain(currentHeader uint64, chain []*types.Header) bool
+ Get() (bool, uint64, common.Hash)
+ Process(block uint64, hash common.Hash)
+ Purge()
+}
+
+// IsValidChain checks the validity of chain by comparing it
+// against the local checkpoint entry
+func (f *finality[T]) IsValidChain(currentHeader uint64, chain []*types.Header) bool {
+ // Return if we've received empty chain
+ if len(chain) == 0 {
+ return false
+ }
+
+ res := isValidChain(currentHeader, chain, f.doExist, f.Number, f.Hash, f.interval)
+
+ return res
+}
+
+func (f *finality[T]) Process(block uint64, hash common.Hash) {
+ f.doExist = true
+ f.Hash = hash
+ f.Number = block
+
+ err := rawdb.WriteLastFinality[T](f.db, block, hash)
+
+ if err != nil {
+ log.Error("Error in writing whitelist state to db", "err", err)
+ }
+}
+
+// Get returns the existing whitelisted
+// entries of checkpoint of the form (doExist,block number,block hash.)
+func (f *finality[T]) Get() (bool, uint64, common.Hash) {
+ f.RLock()
+ defer f.RUnlock()
+
+ if f.doExist {
+ return f.doExist, f.Number, f.Hash
+ }
+
+ block, hash, err := rawdb.ReadFinality[T](f.db)
+ if err != nil {
+ return false, f.Number, f.Hash
+ }
+
+ return true, block, hash
+}
+
+// Purge purges the whitlisted checkpoint
+func (f *finality[T]) Purge() {
+ f.Lock()
+ defer f.Unlock()
+
+ f.doExist = false
+}
diff --git a/consensus/bor/finality/whitelist/milestone.go b/consensus/bor/finality/whitelist/milestone.go
new file mode 100644
index 00000000000..0d80ed4b5a7
--- /dev/null
+++ b/consensus/bor/finality/whitelist/milestone.go
@@ -0,0 +1,291 @@
+package whitelist
+
+import (
+ "github.com/ledgerwatch/log/v3"
+
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/metrics"
+ "github.com/ledgerwatch/erigon/consensus/bor/finality/flags"
+ "github.com/ledgerwatch/erigon/consensus/bor/finality/rawdb"
+ "github.com/ledgerwatch/erigon/core/types"
+)
+
+type milestone struct {
+ finality[*rawdb.Milestone]
+
+ LockedMilestoneNumber uint64 // Locked sprint number
+ LockedMilestoneHash common.Hash //Hash for the locked endBlock
+ Locked bool //
+ LockedMilestoneIDs map[string]struct{} //list of milestone ids
+
+ FutureMilestoneList map[uint64]common.Hash // Future Milestone list
+ FutureMilestoneOrder []uint64 // Future Milestone Order
+ MaxCapacity int //Capacity of future Milestone list
+}
+
+type milestoneService interface {
+ finalityService
+
+ GetMilestoneIDsList() []string
+ RemoveMilestoneID(milestoneId string)
+ LockMutex(endBlockNum uint64) bool
+ UnlockMutex(doLock bool, milestoneId string, endBlockNum uint64, endBlockHash common.Hash)
+ UnlockSprint(endBlockNum uint64)
+ ProcessFutureMilestone(num uint64, hash common.Hash)
+}
+
+var (
+ //Metrics for collecting the whitelisted milestone number
+ whitelistedMilestoneMeter = metrics.GetOrCreateGauge("chain_milestone_latest")
+
+ //Metrics for collecting the future milestone number
+ futureMilestoneMeter = metrics.GetOrCreateGauge("chain_milestone_future")
+
+ //Metrics for collecting the length of the MilestoneIds map
+ milestoneIdsLengthMeter = metrics.GetOrCreateGauge("chain_milestone_idslength")
+
+ //Metrics for collecting the number of valid chains received
+ milestoneChainMeter = metrics.GetOrCreateGauge("chain_milestone_isvalidchain")
+)
+
+// IsValidChain checks the validity of chain by comparing it
+// against the local milestone entries
+func (m *milestone) IsValidChain(currentHeader uint64, chain []*types.Header) bool {
+ //Checking for the milestone flag
+ if !flags.Milestone {
+ return true
+ }
+
+ m.finality.RLock()
+ defer m.finality.RUnlock()
+
+ var isValid = false
+ defer func() {
+ if isValid {
+ milestoneChainMeter.Inc()
+ } else {
+ milestoneChainMeter.Dec()
+ }
+ }()
+
+ res := m.finality.IsValidChain(currentHeader, chain)
+
+ if !res {
+ isValid = false
+ return false
+ }
+
+ if m.Locked && !m.IsReorgAllowed(chain, m.LockedMilestoneNumber, m.LockedMilestoneHash) {
+ isValid = false
+ return false
+ }
+
+ if !m.IsFutureMilestoneCompatible(chain) {
+ isValid = false
+ return false
+ }
+
+ isValid = true
+ return true
+}
+
+func (m *milestone) Process(block uint64, hash common.Hash) {
+ m.finality.Lock()
+ defer m.finality.Unlock()
+
+ m.finality.Process(block, hash)
+
+ for i := 0; i < len(m.FutureMilestoneOrder); i++ {
+ if m.FutureMilestoneOrder[i] <= block {
+ m.dequeueFutureMilestone()
+ } else {
+ break
+ }
+ }
+
+ whitelistedMilestoneMeter.SetUint64(block)
+
+ m.UnlockSprint(block)
+}
+
+// LockMutex This function will Lock the mutex at the time of voting
+func (m *milestone) LockMutex(endBlockNum uint64) bool {
+ m.finality.Lock()
+
+ if m.doExist && endBlockNum <= m.Number { //if endNum is less than whitelisted milestone, then we won't lock the sprint
+ log.Debug("[bor] endBlockNumber is less than or equal to latesMilestoneNumber", "endBlock Number", endBlockNum, "LatestMilestone Number", m.Number)
+ return false
+ }
+
+ if m.Locked && endBlockNum < m.LockedMilestoneNumber {
+ log.Debug("[bor] endBlockNum is less than locked milestone number", "endBlock Number", endBlockNum, "Locked Milestone Number", m.LockedMilestoneNumber)
+ return false
+ }
+
+ return true
+}
+
+// UnlockMutex This function will unlock the mutex locked in LockMutex
+func (m *milestone) UnlockMutex(doLock bool, milestoneId string, endBlockNum uint64, endBlockHash common.Hash) {
+ m.Locked = m.Locked || doLock
+
+ if doLock {
+ m.UnlockSprint(m.LockedMilestoneNumber)
+ m.Locked = true
+ m.LockedMilestoneHash = endBlockHash
+ m.LockedMilestoneNumber = endBlockNum
+ m.LockedMilestoneIDs[milestoneId] = struct{}{}
+ }
+
+ err := rawdb.WriteLockField(m.db, m.Locked, m.LockedMilestoneNumber, m.LockedMilestoneHash, m.LockedMilestoneIDs)
+ if err != nil {
+ log.Error("Error in writing lock data of milestone to db", "err", err)
+ }
+
+ milestoneIdsLengthMeter.SetInt(len(m.LockedMilestoneIDs))
+
+ m.finality.Unlock()
+}
+
+// UnlockSprint This function will unlock the locked sprint
+func (m *milestone) UnlockSprint(endBlockNum uint64) {
+ if endBlockNum < m.LockedMilestoneNumber {
+ return
+ }
+
+ m.Locked = false
+ m.purgeMilestoneIDsList()
+
+ err := rawdb.WriteLockField(m.db, m.Locked, m.LockedMilestoneNumber, m.LockedMilestoneHash, m.LockedMilestoneIDs)
+
+ if err != nil {
+ log.Error("[bor] Error in writing lock data of milestone to db", "err", err)
+ }
+}
+
+// RemoveMilestoneID This function will remove the stored milestoneID
+func (m *milestone) RemoveMilestoneID(milestoneId string) {
+ m.finality.Lock()
+ defer m.finality.Unlock()
+
+ delete(m.LockedMilestoneIDs, milestoneId)
+
+ if len(m.LockedMilestoneIDs) == 0 {
+ m.Locked = false
+ }
+
+ err := rawdb.WriteLockField(m.db, m.Locked, m.LockedMilestoneNumber, m.LockedMilestoneHash, m.LockedMilestoneIDs)
+
+ if err != nil {
+ log.Error("[bor] Error in writing lock data of milestone to db", "err", err)
+ }
+
+}
+
+// IsReorgAllowed This will check whether the incoming chain matches the locked sprint hash
+func (m *milestone) IsReorgAllowed(chain []*types.Header, lockedMilestoneNumber uint64, lockedMilestoneHash common.Hash) bool {
+ if chain[len(chain)-1].Number.Uint64() <= lockedMilestoneNumber { //Can't reorg if the end block of incoming
+ return false //chain is less than locked sprint number
+ }
+
+ for i := 0; i < len(chain); i++ {
+ if chain[i].Number.Uint64() == lockedMilestoneNumber {
+ return chain[i].Hash() == lockedMilestoneHash
+ }
+ }
+
+ return true
+}
+
+// GetMilestoneIDsList This will return the list of milestoneIDs stored.
+func (m *milestone) GetMilestoneIDsList() []string {
+ m.finality.RLock()
+ defer m.finality.RUnlock()
+
+ // fixme: use generics :)
+ keys := make([]string, 0, len(m.LockedMilestoneIDs))
+ for key := range m.LockedMilestoneIDs {
+ keys = append(keys, key)
+ }
+
+ return keys
+}
+
+// This is remove the milestoneIDs stored in the list.
+func (m *milestone) purgeMilestoneIDsList() {
+ m.LockedMilestoneIDs = make(map[string]struct{})
+}
+
+func (m *milestone) IsFutureMilestoneCompatible(chain []*types.Header) bool {
+ //Tip of the received chain
+ chainTipNumber := chain[len(chain)-1].Number.Uint64()
+
+ for i := len(m.FutureMilestoneOrder) - 1; i >= 0; i-- {
+ //Finding out the highest future milestone number
+ //which is less or equal to received chain tip
+ if chainTipNumber >= m.FutureMilestoneOrder[i] {
+ //Looking for the received chain 's particular block number(matching future milestone number)
+ for j := len(chain) - 1; j >= 0; j-- {
+ if chain[j].Number.Uint64() == m.FutureMilestoneOrder[i] {
+ endBlockNum := m.FutureMilestoneOrder[i]
+ endBlockHash := m.FutureMilestoneList[endBlockNum]
+
+ //Checking the received chain matches with future milestone
+ return chain[j].Hash() == endBlockHash
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+func (m *milestone) ProcessFutureMilestone(num uint64, hash common.Hash) {
+ if len(m.FutureMilestoneOrder) < m.MaxCapacity {
+ m.enqueueFutureMilestone(num, hash)
+ }
+
+ if num < m.LockedMilestoneNumber {
+ return
+ }
+
+ m.Locked = false
+ m.purgeMilestoneIDsList()
+
+ err := rawdb.WriteLockField(m.db, m.Locked, m.LockedMilestoneNumber, m.LockedMilestoneHash, m.LockedMilestoneIDs)
+
+ if err != nil {
+ log.Error("[bor] Error in writing lock data of milestone to db", "err", err)
+ }
+}
+
+// EnqueueFutureMilestone add the future milestone to the list
+func (m *milestone) enqueueFutureMilestone(key uint64, hash common.Hash) {
+ if _, ok := m.FutureMilestoneList[key]; ok {
+ log.Debug("[bor] Future milestone already exist", "endBlockNumber", key, "futureMilestoneHash", hash)
+ return
+ }
+
+ log.Debug("[bor] Enqueing new future milestone", "endBlockNumber", key, "futureMilestoneHash", hash)
+
+ m.FutureMilestoneList[key] = hash
+ m.FutureMilestoneOrder = append(m.FutureMilestoneOrder, key)
+
+ err := rawdb.WriteFutureMilestoneList(m.db, m.FutureMilestoneOrder, m.FutureMilestoneList)
+ if err != nil {
+ log.Error("[bor] Error in writing future milestone data to db", "err", err)
+ }
+
+ futureMilestoneMeter.SetUint64(key)
+}
+
+// DequeueFutureMilestone remove the future milestone entry from the list.
+func (m *milestone) dequeueFutureMilestone() {
+ delete(m.FutureMilestoneList, m.FutureMilestoneOrder[0])
+ m.FutureMilestoneOrder = m.FutureMilestoneOrder[1:]
+
+ err := rawdb.WriteFutureMilestoneList(m.db, m.FutureMilestoneOrder, m.FutureMilestoneList)
+ if err != nil {
+ log.Error("[bor] Error in writing future milestone data to db", "err", err)
+ }
+}
diff --git a/consensus/bor/finality/whitelist/service.go b/consensus/bor/finality/whitelist/service.go
new file mode 100644
index 00000000000..7bf7aa89819
--- /dev/null
+++ b/consensus/bor/finality/whitelist/service.go
@@ -0,0 +1,191 @@
+package whitelist
+
+import (
+ "errors"
+
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon/consensus/bor/finality/rawdb"
+ "github.com/ledgerwatch/erigon/core/types"
+)
+
+var (
+ ErrMismatch = errors.New("mismatch error")
+ ErrNoRemote = errors.New("remote peer doesn't have a target block number")
+)
+
+type Service struct {
+ checkpointService
+ milestoneService
+}
+
+var ws *Service
+
+func RegisterService(db kv.RwDB) {
+ ws = NewService(db)
+}
+
+func GetWhitelistingService() *Service {
+ return ws
+}
+
+func NewService(db kv.RwDB) *Service {
+ var checkpointDoExist = true
+ checkpointNumber, checkpointHash, err := rawdb.ReadFinality[*rawdb.Checkpoint](db)
+
+ if err != nil {
+ checkpointDoExist = false
+ }
+
+ var milestoneDoExist = true
+
+ milestoneNumber, milestoneHash, err := rawdb.ReadFinality[*rawdb.Milestone](db)
+ if err != nil {
+ milestoneDoExist = false
+ }
+
+ locked, lockedMilestoneNumber, lockedMilestoneHash, lockedMilestoneIDs, err := rawdb.ReadLockField(db)
+ if err != nil || !locked {
+ locked = false
+ lockedMilestoneIDs = make(map[string]struct{})
+ }
+
+ order, list, err := rawdb.ReadFutureMilestoneList(db)
+ if err != nil {
+ order = make([]uint64, 0)
+ list = make(map[uint64]common.Hash)
+ }
+
+ return &Service{
+ &checkpoint{
+ finality[*rawdb.Checkpoint]{
+ doExist: checkpointDoExist,
+ Number: checkpointNumber,
+ Hash: checkpointHash,
+ interval: 256,
+ db: db,
+ },
+ },
+
+ &milestone{
+ finality: finality[*rawdb.Milestone]{
+ doExist: milestoneDoExist,
+ Number: milestoneNumber,
+ Hash: milestoneHash,
+ interval: 256,
+ db: db,
+ },
+
+ Locked: locked,
+ LockedMilestoneNumber: lockedMilestoneNumber,
+ LockedMilestoneHash: lockedMilestoneHash,
+ LockedMilestoneIDs: lockedMilestoneIDs,
+ FutureMilestoneList: list,
+ FutureMilestoneOrder: order,
+ MaxCapacity: 10,
+ },
+ }
+}
+
+func (s *Service) PurgeWhitelistedCheckpoint() error {
+ s.checkpointService.Purge()
+ return nil
+}
+
+func (s *Service) PurgeWhitelistedMilestone() error {
+ s.milestoneService.Purge()
+ return nil
+}
+
+func (s *Service) GetWhitelistedCheckpoint() (bool, uint64, common.Hash) {
+ return s.checkpointService.Get()
+}
+
+func (s *Service) GetWhitelistedMilestone() (bool, uint64, common.Hash) {
+ return s.milestoneService.Get()
+}
+
+func (s *Service) ProcessMilestone(endBlockNum uint64, endBlockHash common.Hash) {
+ s.milestoneService.Process(endBlockNum, endBlockHash)
+}
+
+func (s *Service) ProcessCheckpoint(endBlockNum uint64, endBlockHash common.Hash) {
+ s.checkpointService.Process(endBlockNum, endBlockHash)
+}
+
+func (s *Service) IsValidChain(currentHeader uint64, chain []*types.Header) bool {
+ checkpointBool := s.checkpointService.IsValidChain(currentHeader, chain)
+ if !checkpointBool {
+ return checkpointBool
+ }
+
+ milestoneBool := s.milestoneService.IsValidChain(currentHeader, chain)
+ if !milestoneBool {
+ return milestoneBool
+ }
+
+ return true
+}
+
+func (s *Service) GetMilestoneIDsList() []string {
+ return s.milestoneService.GetMilestoneIDsList()
+}
+
+func splitChain(current uint64, chain []*types.Header) ([]*types.Header, []*types.Header) {
+ var (
+ pastChain []*types.Header
+ futureChain []*types.Header
+ first = chain[0].Number.Uint64()
+ last = chain[len(chain)-1].Number.Uint64()
+ )
+
+ if current >= first {
+ if len(chain) == 1 || current >= last {
+ pastChain = chain
+ } else {
+ pastChain = chain[:current-first+1]
+ }
+ }
+
+ if current < last {
+ if len(chain) == 1 || current < first {
+ futureChain = chain
+ } else {
+ futureChain = chain[current-first+1:]
+ }
+ }
+
+ return pastChain, futureChain
+}
+
+func isValidChain(currentHeader uint64, chain []*types.Header, doExist bool, number uint64, hash common.Hash, interval uint64) bool {
+ // Check if we have milestone to validate incoming chain in memory
+ if !doExist {
+ // We don't have any entry, no additional validation will be possible
+ return true
+ }
+
+ // Check if imported chain is less than whitelisted number
+ if chain[len(chain)-1].Number.Uint64() < number {
+ if currentHeader >= number { //If current tip of the chain is greater than whitelist number then return false
+ return false
+ } else {
+ return true
+ }
+ }
+
+ // Split the chain into past and future chain
+ pastChain, _ := splitChain(currentHeader, chain)
+
+ // Iterate over the chain and validate against the last milestone
+ // It will handle all cases when the incoming chain has atleast one milestone
+ for i := len(pastChain) - 1; i >= 0; i-- {
+ if pastChain[i].Number.Uint64() == number {
+ res := pastChain[i].Hash() == hash
+
+ return res
+ }
+ }
+
+ return true
+}
diff --git a/consensus/bor/finality/whitelist/service_test.go b/consensus/bor/finality/whitelist/service_test.go
new file mode 100644
index 00000000000..0a45e6fe712
--- /dev/null
+++ b/consensus/bor/finality/whitelist/service_test.go
@@ -0,0 +1,944 @@
+package whitelist
+
+import (
+ "fmt"
+ "math/big"
+ "reflect"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/ledgerwatch/erigon-lib/common"
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/memdb"
+ "github.com/ledgerwatch/erigon/consensus/bor/finality/rawdb"
+ "github.com/ledgerwatch/erigon/core/types"
+ "github.com/stretchr/testify/require"
+
+ "pgregory.net/rapid"
+)
+
+// NewMockService creates a new mock whitelist service
+func NewMockService(db kv.RwDB) *Service {
+ return &Service{
+
+ &checkpoint{
+ finality[*rawdb.Checkpoint]{
+ doExist: false,
+ interval: 256,
+ db: db,
+ },
+ },
+
+ &milestone{
+ finality: finality[*rawdb.Milestone]{
+ doExist: false,
+ interval: 256,
+ db: db,
+ },
+ LockedMilestoneIDs: make(map[string]struct{}),
+ FutureMilestoneList: make(map[uint64]libcommon.Hash),
+ FutureMilestoneOrder: make([]uint64, 0),
+ MaxCapacity: 10,
+ },
+ }
+}
+
+// TestWhitelistCheckpoint checks the checkpoint whitelist setter and getter functions.
+func TestWhitelistedCheckpoint(t *testing.T) {
+ t.Parallel()
+
+ db := memdb.NewTestDB(t)
+
+ //Creating the service for the whitelisting the checkpoints
+ s := NewMockService(db)
+
+ cp := s.checkpointService.(*checkpoint)
+
+ require.Equal(t, cp.doExist, false, "expected false as no cp exist at this point")
+
+ _, _, err := rawdb.ReadFinality[*rawdb.Checkpoint](db)
+ require.NotNil(t, err, "Error should be nil while reading from the db")
+ //Adding the checkpoint
+ s.ProcessCheckpoint(11, libcommon.Hash{})
+
+ require.Equal(t, cp.doExist, true, "expected true as cp exist")
+
+ //Removing the checkpoint
+ s.PurgeWhitelistedCheckpoint()
+
+ require.Equal(t, cp.doExist, false, "expected false as no cp exist at this point")
+
+ //Adding the checkpoint
+ s.ProcessCheckpoint(12, libcommon.Hash{1})
+
+ // //Receiving the stored checkpoint
+ doExist, number, hash := s.GetWhitelistedCheckpoint()
+
+ // //Validating the values received
+ require.Equal(t, doExist, true, "expected true ascheckpoint exist at this point")
+ require.Equal(t, number, uint64(12), "expected number to be 11 but got", number)
+ require.Equal(t, hash, libcommon.Hash{1}, "expected the 1 hash but got", hash)
+ require.NotEqual(t, hash, libcommon.Hash{}, "expected the hash to be different from zero hash")
+
+ s.PurgeWhitelistedCheckpoint()
+ doExist, number, hash = s.GetWhitelistedCheckpoint()
+ //Validating the values received from the db, not memory
+ require.Equal(t, doExist, true, "expected true ascheckpoint exist at this point")
+ require.Equal(t, number, uint64(12), "expected number to be 11 but got", number)
+ require.Equal(t, hash, libcommon.Hash{1}, "expected the 1 hash but got", hash)
+ require.NotEqual(t, hash, libcommon.Hash{}, "expected the hash to be different from zero hash")
+
+ checkpointNumber, checkpointHash, err := rawdb.ReadFinality[*rawdb.Checkpoint](db)
+ require.Nil(t, err, "Error should be nil while reading from the db")
+ require.Equal(t, checkpointHash, libcommon.Hash{1}, "expected the 1 hash but got", hash)
+ require.Equal(t, checkpointNumber, uint64(12), "expected number to be 11 but got", number)
+}
+
+// TestMilestone checks the milestone whitelist setter and getter functions
+func TestMilestone(t *testing.T) {
+ t.Parallel()
+
+ db := memdb.NewTestDB(t)
+
+ s := NewMockService(db)
+
+ milestone := s.milestoneService.(*milestone)
+
+ //Checking for the variables when no milestone is Processed
+ require.Equal(t, milestone.doExist, false, "expected false as no milestone exist at this point")
+ require.Equal(t, milestone.Locked, false, "expected false as it was not locked")
+ require.Equal(t, milestone.LockedMilestoneNumber, uint64(0), "expected 0 as it was not initialized")
+
+ _, _, err := rawdb.ReadFinality[*rawdb.Milestone](db)
+ require.NotNil(t, err, "Error should be nil while reading from the db")
+
+ //Acquiring the mutex lock
+ milestone.LockMutex(11)
+ require.Equal(t, milestone.Locked, false, "expected false as sprint is not locked till this point")
+
+ //Releasing the mutex lock
+ milestone.UnlockMutex(true, "milestoneID1", uint64(11), common.Hash{})
+ require.Equal(t, milestone.LockedMilestoneNumber, uint64(11), "expected 11 as it was not initialized")
+ require.Equal(t, milestone.Locked, true, "expected true as sprint is locked now")
+ require.Equal(t, len(milestone.LockedMilestoneIDs), 1, "expected 1 as only 1 milestoneID has been entered")
+
+ _, ok := milestone.LockedMilestoneIDs["milestoneID1"]
+ require.True(t, ok, "milestoneID1 should exist in the LockedMilestoneIDs map")
+
+ _, ok = milestone.LockedMilestoneIDs["milestoneID2"]
+ require.False(t, ok, "milestoneID2 shouldn't exist in the LockedMilestoneIDs map")
+
+ milestone.LockMutex(11)
+ milestone.UnlockMutex(true, "milestoneID2", uint64(11), common.Hash{})
+ require.Equal(t, len(milestone.LockedMilestoneIDs), 1, "expected 1 as only 1 milestoneID has been entered")
+
+ _, ok = milestone.LockedMilestoneIDs["milestoneID2"]
+ require.True(t, ok, "milestoneID2 should exist in the LockedMilestoneIDs map")
+
+ milestone.RemoveMilestoneID("milestoneID1")
+ require.Equal(t, len(milestone.LockedMilestoneIDs), 1, "expected 1 as one out of two has been removed in previous step")
+ require.Equal(t, milestone.Locked, true, "expected true as sprint is locked now")
+
+ milestone.RemoveMilestoneID("milestoneID2")
+ require.Equal(t, len(milestone.LockedMilestoneIDs), 0, "expected 1 as both the milestonesIDs has been removed in previous step")
+ require.Equal(t, milestone.Locked, false, "expected false")
+
+ milestone.LockMutex(11)
+ milestone.UnlockMutex(true, "milestoneID3", uint64(11), common.Hash{})
+ require.True(t, milestone.Locked, "expected true")
+ require.Equal(t, milestone.LockedMilestoneNumber, uint64(11), "Expected 11")
+
+ milestone.LockMutex(15)
+ require.True(t, milestone.Locked, "expected true")
+ require.Equal(t, milestone.LockedMilestoneNumber, uint64(11), "Expected 11")
+ milestone.UnlockMutex(true, "milestoneID4", uint64(15), common.Hash{})
+ require.True(t, milestone.Locked, "expected true as final confirmation regarding the lock has been made")
+ require.Equal(t, len(milestone.LockedMilestoneIDs), 1, "expected 1 as previous milestonesIDs has been removed in previous step")
+
+ //Adding the milestone
+ s.ProcessMilestone(11, common.Hash{})
+
+ require.True(t, milestone.Locked, "expected true as locked sprint is of number 15")
+ require.Equal(t, milestone.doExist, true, "expected true as milestone exist")
+ require.Equal(t, len(milestone.LockedMilestoneIDs), 1, "expected 1 as still last milestone of sprint number 15 exist")
+
+ //Reading from the Db
+ locked, lockedMilestoneNumber, lockedMilestoneHash, lockedMilestoneIDs, err := rawdb.ReadLockField(db)
+
+ require.Nil(t, err)
+ require.True(t, locked, "expected true as locked sprint is of number 15")
+ require.Equal(t, lockedMilestoneNumber, uint64(15), "Expected 15")
+ require.Equal(t, lockedMilestoneHash, common.Hash{}, "Expected", common.Hash{})
+ require.Equal(t, len(lockedMilestoneIDs), 1, "expected 1 as still last milestone of sprint number 15 exist")
+
+ _, ok = lockedMilestoneIDs["milestoneID4"]
+ require.True(t, ok, "expected true as milestoneIDList should contain 'milestoneID4'")
+
+ //Asking the lock for sprintNumber less than last whitelisted milestone
+ require.False(t, milestone.LockMutex(11), "Cant lock the sprintNumber less than equal to latest whitelisted milestone")
+ milestone.UnlockMutex(false, "", uint64(11), common.Hash{}) //Unlock is required after every lock to release the mutex
+
+ //Adding the milestone
+ s.ProcessMilestone(51, common.Hash{})
+ require.False(t, milestone.Locked, "expected false as lock from sprint number 15 is removed")
+ require.Equal(t, milestone.doExist, true, "expected true as milestone exist")
+ require.Equal(t, len(milestone.LockedMilestoneIDs), 0, "expected 0 as all the milestones have been removed")
+
+ //Reading from the Db
+ locked, _, _, lockedMilestoneIDs, err = rawdb.ReadLockField(db)
+
+ require.Nil(t, err)
+ require.False(t, locked, "expected true as locked sprint is of number 15")
+ require.Equal(t, len(lockedMilestoneIDs), 0, "expected 0 as milestoneID exist in the map")
+
+ //Removing the milestone
+ s.PurgeWhitelistedMilestone()
+
+ require.Equal(t, milestone.doExist, false, "expected false as no milestone exist at this point")
+
+ //Removing the milestone
+ s.ProcessMilestone(11, common.Hash{1})
+
+ doExist, number, hash := s.GetWhitelistedMilestone()
+
+ //validating the values received
+ require.Equal(t, doExist, true, "expected true as milestone exist at this point")
+ require.Equal(t, number, uint64(11), "expected number to be 11 but got", number)
+ require.Equal(t, hash, common.Hash{1}, "expected the 1 hash but got", hash)
+
+ s.PurgeWhitelistedMilestone()
+ doExist, number, hash = s.GetWhitelistedMilestone()
+
+ //Validating the values received from the db, not memory
+ require.Equal(t, doExist, true, "expected true as milestone exist at this point")
+ require.Equal(t, number, uint64(11), "expected number to be 11 but got", number)
+ require.Equal(t, hash, common.Hash{1}, "expected the 1 hash but got", hash)
+
+ milestoneNumber, milestoneHash, err := rawdb.ReadFinality[*rawdb.Milestone](db)
+ require.Nil(t, err, "Error should be nil while reading from the db")
+ require.Equal(t, milestoneHash, common.Hash{1}, "expected the 1 hash but got", hash)
+ require.Equal(t, milestoneNumber, uint64(11), "expected number to be 11 but got", number)
+
+ _, _, err = rawdb.ReadFutureMilestoneList(db)
+ require.NotNil(t, err, "Error should be not nil")
+
+ s.ProcessFutureMilestone(16, common.Hash{16})
+ require.Equal(t, len(milestone.FutureMilestoneOrder), 1, "expected length is 1 as we added only 1 future milestone")
+ require.Equal(t, milestone.FutureMilestoneOrder[0], uint64(16), "expected value is 16 but got", milestone.FutureMilestoneOrder[0])
+ require.Equal(t, milestone.FutureMilestoneList[16], common.Hash{16}, "expected value is", common.Hash{16}.String()[2:], "but got", milestone.FutureMilestoneList[16])
+
+ order, list, err := rawdb.ReadFutureMilestoneList(db)
+ require.Nil(t, err, "Error should be nil while reading from the db")
+ require.Equal(t, len(order), 1, "expected the 1 hash but got", len(order))
+ require.Equal(t, order[0], uint64(16), "expected number to be 16 but got", order[0])
+ require.Equal(t, list[order[0]], common.Hash{16}, "expected value is", common.Hash{16}.String()[2:], "but got", list[order[0]])
+
+ capicity := milestone.MaxCapacity
+ for i := 16; i <= 16*(capicity+1); i = i + 16 {
+ s.ProcessFutureMilestone(uint64(i), common.Hash{16})
+ }
+
+ require.Equal(t, len(milestone.FutureMilestoneOrder), capicity, "expected length is", capicity)
+ require.Equal(t, milestone.FutureMilestoneOrder[capicity-1], uint64(16*capicity), "expected value is", uint64(16*capicity), "but got", milestone.FutureMilestoneOrder[capicity-1])
+}
+
+// TestIsValidChain checks the IsValidChain function in isolation
+// for different cases by providing a mock current header and chain
+func TestIsValidChain(t *testing.T) {
+ t.Parallel()
+
+ db := memdb.NewTestDB(t)
+
+ s := NewMockService(db)
+ chainA := createMockChain(1, 20) // A1->A2...A19->A20
+
+ //Case1: no checkpoint whitelist and no milestone and no locking, should consider the chain as valid
+ res := s.IsValidChain(0, chainA)
+
+ require.Equal(t, res, true, "Expected chain to be valid")
+
+ tempChain := createMockChain(21, 22) // A21->A22
+
+ // add mock checkpoint entry
+ s.ProcessCheckpoint(tempChain[1].Number.Uint64(), tempChain[1].Hash())
+
+ //Make the mock chain with zero blocks
+ zeroChain := make([]*types.Header, 0)
+
+ //Case2: As input chain is of zero length,should consider the chain as invalid
+ res = s.IsValidChain(0, zeroChain)
+
+ require.Equal(t, res, false, "expected chain to be invalid", len(zeroChain))
+
+ //Case3A: As the received chain and current tip of local chain is behind the oldest whitelisted block entry, should consider
+ // the chain as valid
+ res = s.IsValidChain(chainA[len(chainA)-1].Number.Uint64(), chainA)
+
+ require.Equal(t, res, true, "expected chain to be valid")
+
+ //Case3B: As the received chain is behind the oldest whitelisted block entry,but current tip is at par with whitelisted checkpoint, should consider
+ // the chain as invalid
+ res = s.IsValidChain(tempChain[1].Number.Uint64(), chainA)
+
+ require.Equal(t, res, false, "expected chain to be invalid ")
+
+ // add mock milestone entry
+ s.ProcessMilestone(tempChain[1].Number.Uint64(), tempChain[1].Hash())
+
+ //Case4A: As the received chain and current tip of local chain is behind the oldest whitelisted block entry, should consider
+ // the chain as valid
+ res = s.IsValidChain(chainA[len(chainA)-1].Number.Uint64(), chainA)
+
+ require.Equal(t, res, true, "expected chain to be valid")
+
+ //Case4B: As the received chain is behind the oldest whitelisted block entry and but current tip is at par with whitelisted milestine, should consider
+ // the chain as invalid
+ res = s.IsValidChain(tempChain[1].Number.Uint64(), chainA)
+
+ require.Equal(t, res, false, "expected chain to be invalid")
+
+ //Remove the whitelisted checkpoint
+ s.PurgeWhitelistedCheckpoint()
+
+ //Case5: As the received chain is still invalid after removing the checkpoint as it is
+ //still behind the whitelisted milestone
+ res = s.IsValidChain(tempChain[1].Number.Uint64(), chainA)
+ require.Equal(t, res, false, "expected chain to be invalid")
+
+ //Remove the whitelisted milestone
+ s.PurgeWhitelistedMilestone()
+
+ //At this stage there is no whitelisted milestone and checkpoint
+
+ checkpoint := s.checkpointService.(*checkpoint)
+ milestone := s.milestoneService.(*milestone)
+
+ //Locking for sprintNumber 15
+ milestone.LockMutex(chainA[len(chainA)-5].Number.Uint64())
+ milestone.UnlockMutex(true, "MilestoneID1", chainA[len(chainA)-5].Number.Uint64(), chainA[len(chainA)-5].Hash())
+
+ //Case6: As the received chain is valid as the locked sprintHash matches with the incoming chain.
+ res = s.IsValidChain(chainA[len(chainA)-1].Number.Uint64(), chainA)
+ require.Equal(t, res, true, "expected chain to be valid as incoming chain matches with the locked value ")
+
+ hash3 := libcommon.Hash{3}
+
+ //Locking for sprintNumber 16 with different hash
+ milestone.LockMutex(chainA[len(chainA)-4].Number.Uint64())
+ milestone.UnlockMutex(true, "MilestoneID2", chainA[len(chainA)-4].Number.Uint64(), hash3)
+
+ res = s.IsValidChain(chainA[len(chainA)-1].Number.Uint64(), chainA)
+
+ require.Equal(t, res, false, "expected chain to be invalid as incoming chain does match with the locked value hash ")
+
+ //Locking for sprintNumber 19
+ milestone.LockMutex(chainA[len(chainA)-1].Number.Uint64())
+ milestone.UnlockMutex(true, "MilestoneID1", chainA[len(chainA)-1].Number.Uint64(), chainA[len(chainA)-1].Hash())
+
+ //Case7: As the received chain is valid as the locked sprintHash matches with the incoming chain.
+ res = s.IsValidChain(chainA[len(chainA)-1].Number.Uint64(), chainA)
+
+ require.Equal(t, res, false, "expected chain to be invalid as incoming chain is less than the locked value ")
+
+ //Locking for sprintNumber 19
+ milestone.LockMutex(uint64(21))
+ milestone.UnlockMutex(true, "MilestoneID1", uint64(21), hash3)
+
+ //Case8: As the received chain is invalid as the locked sprintHash matches is ahead of incoming chain.
+ res = s.IsValidChain(chainA[len(chainA)-1].Number.Uint64(), chainA)
+ require.Equal(t, res, false, "expected chain to be invalid as incoming chain is less than the locked value ")
+
+ //Unlocking the sprint
+ milestone.UnlockSprint(uint64(21))
+
+ // Clear checkpoint whitelist and add block A15 in whitelist
+ s.PurgeWhitelistedCheckpoint()
+ s.ProcessCheckpoint(chainA[15].Number.Uint64(), chainA[15].Hash())
+
+ require.Equal(t, checkpoint.doExist, true, "expected true as checkpoint exists.")
+
+ // case9: As the received chain is having valid checkpoint,should consider the chain as valid.
+ res = s.IsValidChain(chainA[len(chainA)-1].Number.Uint64(), chainA)
+ require.Equal(t, res, true, "expected chain to be valid")
+
+ // add mock milestone entries
+ s.ProcessMilestone(tempChain[1].Number.Uint64(), tempChain[1].Hash())
+
+ // case10: Try importing a past chain having valid checkpoint, should
+ // consider the chain as invalid as still lastest milestone is ahead of the chain.
+ res = s.IsValidChain(tempChain[1].Number.Uint64(), chainA)
+ require.Equal(t, res, false, "expected chain to be invalid")
+
+ // add mock milestone entries
+ s.ProcessMilestone(chainA[19].Number.Uint64(), chainA[19].Hash())
+
+ // case12: Try importing a chain having valid checkpoint and milestone, should
+ // consider the chain as valid
+ res = s.IsValidChain(tempChain[1].Number.Uint64(), chainA)
+ require.Equal(t, res, true, "expected chain to be invalid")
+
+ // add mock milestone entries
+ s.ProcessMilestone(chainA[19].Number.Uint64(), chainA[19].Hash())
+
+ // case13: Try importing a past chain having valid checkpoint and milestone, should
+ // consider the chain as valid
+ res = s.IsValidChain(tempChain[1].Number.Uint64(), chainA)
+ require.Equal(t, res, true, "expected chain to be valid")
+
+ // add mock milestone entries with wrong hash
+ s.ProcessMilestone(chainA[19].Number.Uint64(), chainA[18].Hash())
+
+ // case14: Try importing a past chain having valid checkpoint and milestone with wrong hash, should
+ // consider the chain as invalid
+ res = s.IsValidChain(chainA[len(chainA)-1].Number.Uint64(), chainA)
+ require.Equal(t, res, false, "expected chain to be invalid as hash mismatches")
+
+ // Clear milestone and add blocks A15 in whitelist
+ s.ProcessMilestone(chainA[15].Number.Uint64(), chainA[15].Hash())
+
+ // case16: Try importing a past chain having valid checkpoint, should
+ // consider the chain as valid
+ res = s.IsValidChain(tempChain[1].Number.Uint64(), chainA)
+ require.Equal(t, res, true, "expected chain to be valid")
+
+ // Clear checkpoint whitelist and mock blocks in whitelist
+ tempChain = createMockChain(20, 20) // A20
+
+ s.PurgeWhitelistedCheckpoint()
+ s.ProcessCheckpoint(tempChain[0].Number.Uint64(), tempChain[0].Hash())
+
+ require.Equal(t, checkpoint.doExist, true, "expected true")
+
+ // case17: Try importing a past chain having invalid checkpoint,should consider the chain as invalid
+ res = s.IsValidChain(tempChain[0].Number.Uint64(), chainA)
+ require.Equal(t, res, false, "expected chain to be invalid")
+ // Not checking error here because we return nil in case of checkpoint mismatch
+
+ // case18: Try importing a future chain but within interval, should consider the chain as valid
+ res = s.IsValidChain(tempChain[len(tempChain)-1].Number.Uint64(), tempChain)
+ require.Equal(t, res, true, "expected chain to be invalid")
+
+ // create a future chain to be imported of length <= `checkpointInterval`
+ chainB := createMockChain(21, 30) // B21->B22...B29->B30
+
+ // case19: Try importing a future chain of acceptable length,should consider the chain as valid
+ res = s.IsValidChain(tempChain[0].Number.Uint64(), chainB)
+ require.Equal(t, res, true, "expected chain to be valid")
+
+ s.PurgeWhitelistedCheckpoint()
+ s.PurgeWhitelistedMilestone()
+
+ chainB = createMockChain(21, 29) // C21->C22....C29
+
+ s.milestoneService.ProcessFutureMilestone(29, chainB[8].Hash())
+
+ // case20: Try importing a future chain which match the future milestone should the chain as valid
+ res = s.IsValidChain(tempChain[0].Number.Uint64(), chainB)
+ require.Equal(t, res, true, "expected chain to be valid")
+
+ chainB = createMockChain(21, 27) // C21->C22...C39->C40...C->256
+
+ // case21: Try importing a chain whose end point is less than future milestone
+ res = s.IsValidChain(tempChain[0].Number.Uint64(), chainB)
+ require.Equal(t, res, true, "expected chain to be valid")
+
+ chainB = createMockChain(30, 39) // C21->C22...C39->C40...C->256
+
+ //Processing wrong hash
+ s.milestoneService.ProcessFutureMilestone(38, chainB[9].Hash())
+
+ // case22: Try importing a future chain with mismatch future milestone
+ res = s.IsValidChain(tempChain[0].Number.Uint64(), chainB)
+ require.Equal(t, res, false, "expected chain to be invalid")
+
+ chainB = createMockChain(40, 49) // C40->C41...C48->C49
+
+ // case23: Try importing a future chain whose starting point is ahead of latest future milestone
+ res = s.IsValidChain(tempChain[0].Number.Uint64(), chainB)
+ require.Equal(t, res, true, "expected chain to be invalid")
+
+}
+
+func TestPropertyBasedTestingMilestone(t *testing.T) {
+ db := memdb.NewTestDB(t)
+
+ rapid.Check(t, func(t *rapid.T) {
+
+ milestone := milestone{
+ finality: finality[*rawdb.Milestone]{
+ doExist: false,
+ Number: 0,
+ Hash: common.Hash{},
+ interval: 256,
+ db: db,
+ },
+
+ Locked: false,
+ LockedMilestoneNumber: 0,
+ LockedMilestoneHash: common.Hash{},
+ LockedMilestoneIDs: make(map[string]struct{}),
+ FutureMilestoneList: make(map[uint64]common.Hash),
+ FutureMilestoneOrder: make([]uint64, 0),
+ MaxCapacity: 10,
+ }
+
+ var (
+ milestoneEndNum = rapid.Uint64().Draw(t, "endBlock")
+ milestoneID = rapid.String().Draw(t, "MilestoneID")
+ doLock = rapid.Bool().Draw(t, "Voted")
+ )
+
+ val := milestone.LockMutex(milestoneEndNum)
+ if !val {
+ t.Error("LockMutex need to return true when there is no whitelisted milestone and locked milestone")
+ }
+
+ milestone.UnlockMutex(doLock, milestoneID, milestoneEndNum, common.Hash{})
+
+ if doLock {
+ //Milestone should not be whitelisted
+ if milestone.doExist {
+ t.Error("Milestone is not expected to be whitelisted")
+ }
+
+ //Local chain should be locked
+ if !milestone.Locked {
+ t.Error("Milestone is expected to be locked at", milestoneEndNum)
+ }
+
+ if milestone.LockedMilestoneNumber != milestoneEndNum {
+ t.Error("Locked milestone number is expected to be", milestoneEndNum)
+ }
+
+ if len(milestone.LockedMilestoneIDs) != 1 {
+ t.Error("List should contain 1 milestone")
+ }
+
+ _, ok := milestone.LockedMilestoneIDs[milestoneID]
+
+ if !ok {
+ t.Error("List doesn't contain correct milestoneID")
+ }
+ }
+
+ if !doLock {
+ if milestone.doExist {
+ t.Error("Milestone is not expected to be whitelisted")
+ }
+
+ if milestone.Locked {
+ t.Error("Milestone is expected not to be locked")
+ }
+
+ if milestone.LockedMilestoneNumber != 0 {
+ t.Error("Locked milestone number is expected to be", 0)
+ }
+
+ if len(milestone.LockedMilestoneIDs) != 0 {
+ t.Error("List should not contain milestone")
+ }
+
+ _, ok := milestone.LockedMilestoneIDs[milestoneID]
+
+ if ok {
+ t.Error("List shouldn't contain any milestoneID")
+ }
+ }
+
+ fitlerFn := func(i uint64) bool {
+ if i <= uint64(1000) {
+ return true
+ }
+
+ return false
+ }
+
+ var (
+ start = rapid.Uint64Max(milestoneEndNum).Draw(t, "start for mock chain")
+ end = rapid.Uint64Min(start).Filter(fitlerFn).Draw(t, "end for mock chain")
+ )
+
+ chainTemp := createMockChain(start, end)
+
+ val = milestone.IsValidChain(start, chainTemp)
+
+ if doLock && val {
+ t.Error("When the chain is locked at milestone, it should not pass IsValidChain for incompatible incoming chain")
+ }
+
+ if !doLock && !val {
+ t.Error("When the chain is not locked at milestone, it should pass IsValidChain for incoming chain")
+ }
+
+ var (
+ milestoneEndNum2 = rapid.Uint64().Draw(t, "endBlockNum 2")
+ milestoneID2 = rapid.String().Draw(t, "MilestoneID 2")
+ doLock2 = rapid.Bool().Draw(t, "Voted 2")
+ )
+
+ val = milestone.LockMutex(milestoneEndNum2)
+
+ if doLock && milestoneEndNum > milestoneEndNum2 && val {
+ t.Error("LockMutex need to return false as previous locked milestone is greater")
+ }
+
+ if doLock && milestoneEndNum <= milestoneEndNum2 && !val {
+ t.Error("LockMutex need to return true as previous locked milestone is less")
+ }
+
+ milestone.UnlockMutex(doLock2, milestoneID2, milestoneEndNum2, common.Hash{})
+
+ if doLock2 {
+ if milestone.doExist {
+ t.Error("Milestone is not expected to be whitelisted")
+ }
+
+ if !milestone.Locked {
+ t.Error("Milestone is expected to be locked at", milestoneEndNum2)
+ }
+
+ if milestone.LockedMilestoneNumber != milestoneEndNum2 {
+ t.Error("Locked milestone number is expected to be", milestoneEndNum)
+ }
+
+ if len(milestone.LockedMilestoneIDs) != 1 {
+ t.Error("List should contain 1 milestone")
+ }
+
+ _, ok := milestone.LockedMilestoneIDs[milestoneID2]
+
+ if !ok {
+ t.Error("List doesn't contain correct milestoneID")
+ }
+ }
+
+ if !doLock2 {
+ if milestone.doExist {
+ t.Error("Milestone is not expected to be whitelisted")
+ }
+
+ if !doLock && milestone.Locked {
+ t.Error("Milestone is expected not to be locked")
+ }
+
+ if doLock && !milestone.Locked {
+ t.Error("Milestone is expected to be locked at", milestoneEndNum)
+ }
+
+ if !doLock && milestone.LockedMilestoneNumber != 0 {
+ t.Error("Locked milestone number is expected to be", 0)
+ }
+
+ if doLock && milestone.LockedMilestoneNumber != milestoneEndNum {
+ t.Error("Locked milestone number is expected to be", milestoneEndNum)
+ }
+
+ if !doLock && len(milestone.LockedMilestoneIDs) != 0 {
+ t.Error("List should not contain milestone")
+ }
+
+ if doLock && len(milestone.LockedMilestoneIDs) != 1 {
+ t.Error("List should not contain milestone")
+ }
+
+ _, ok := milestone.LockedMilestoneIDs[milestoneID]
+
+ if !doLock && ok {
+ t.Error("List shouldn't contain any milestoneID")
+ }
+
+ if doLock && !ok {
+ t.Error("List should contain milestoneID")
+ }
+ }
+
+ var (
+ milestoneNum = rapid.Uint64().Draw(t, "milestone Number")
+ )
+
+ lockedValue := milestone.LockedMilestoneNumber
+
+ milestone.Process(milestoneNum, common.Hash{})
+
+ isChainLocked := doLock || doLock2
+
+ if !milestone.doExist {
+ t.Error("Should have the whitelisted milestone")
+ }
+
+ if milestone.finality.Number != milestoneNum {
+ t.Error("Should have the whitelisted milestone", milestoneNum)
+ }
+
+ if isChainLocked {
+ if milestoneNum < lockedValue {
+ if !milestone.Locked {
+ t.Error("Milestone is expected to be locked")
+ }
+ } else {
+ if milestone.Locked {
+ t.Error("Milestone is expected not to be locked")
+ }
+ }
+ }
+
+ var (
+ futureMilestoneNum = rapid.Uint64Min(milestoneNum).Draw(t, "future milestone Number")
+ )
+
+ isChainLocked = milestone.Locked
+
+ milestone.ProcessFutureMilestone(futureMilestoneNum, common.Hash{})
+
+ if isChainLocked {
+ if futureMilestoneNum < lockedValue {
+ if !milestone.Locked {
+ t.Error("Milestone is expected to be locked")
+ }
+ } else {
+ if milestone.Locked {
+ t.Error("Milestone is expected not to be locked")
+ }
+ }
+ }
+ })
+}
+
+func TestSplitChain(t *testing.T) {
+ t.Parallel()
+
+ type Result struct {
+ pastStart uint64
+ pastEnd uint64
+ futureStart uint64
+ futureEnd uint64
+ pastLength int
+ futureLength int
+ }
+
+ // Current chain is at block: X
+ // Incoming chain is represented as [N, M]
+ testCases := []struct {
+ name string
+ current uint64
+ chain []*types.Header
+ result Result
+ }{
+ {name: "X = 10, N = 11, M = 20", current: uint64(10), chain: createMockChain(11, 20), result: Result{futureStart: 11, futureEnd: 20, futureLength: 10}},
+ {name: "X = 10, N = 13, M = 20", current: uint64(10), chain: createMockChain(13, 20), result: Result{futureStart: 13, futureEnd: 20, futureLength: 8}},
+ {name: "X = 10, N = 2, M = 10", current: uint64(10), chain: createMockChain(2, 10), result: Result{pastStart: 2, pastEnd: 10, pastLength: 9}},
+ {name: "X = 10, N = 2, M = 9", current: uint64(10), chain: createMockChain(2, 9), result: Result{pastStart: 2, pastEnd: 9, pastLength: 8}},
+ {name: "X = 10, N = 2, M = 8", current: uint64(10), chain: createMockChain(2, 8), result: Result{pastStart: 2, pastEnd: 8, pastLength: 7}},
+ {name: "X = 10, N = 5, M = 15", current: uint64(10), chain: createMockChain(5, 15), result: Result{pastStart: 5, pastEnd: 10, pastLength: 6, futureStart: 11, futureEnd: 15, futureLength: 5}},
+ {name: "X = 10, N = 10, M = 20", current: uint64(10), chain: createMockChain(10, 20), result: Result{pastStart: 10, pastEnd: 10, pastLength: 1, futureStart: 11, futureEnd: 20, futureLength: 10}},
+ }
+ for _, tc := range testCases {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ past, future := splitChain(tc.current, tc.chain)
+ require.Equal(t, len(past), tc.result.pastLength)
+ require.Equal(t, len(future), tc.result.futureLength)
+
+ if len(past) > 0 {
+ // Check if we have expected block/s
+ require.Equal(t, past[0].Number.Uint64(), tc.result.pastStart)
+ require.Equal(t, past[len(past)-1].Number.Uint64(), tc.result.pastEnd)
+ }
+
+ if len(future) > 0 {
+ // Check if we have expected block/s
+ require.Equal(t, future[0].Number.Uint64(), tc.result.futureStart)
+ require.Equal(t, future[len(future)-1].Number.Uint64(), tc.result.futureEnd)
+ }
+ })
+ }
+}
+
+//nolint:gocognit
+func TestSplitChainProperties(t *testing.T) {
+ t.Parallel()
+
+ // Current chain is at block: X
+ // Incoming chain is represented as [N, M]
+
+ currentChain := []int{0, 1, 2, 3, 10, 100} // blocks starting from genesis
+ blockDiffs := []int{0, 1, 2, 3, 4, 5, 9, 10, 11, 12, 90, 100, 101, 102}
+
+ caseParams := make(map[int]map[int]map[int]struct{}) // X -> N -> M
+
+ for _, current := range currentChain {
+ // past cases only + past to current
+ for _, diff := range blockDiffs {
+ from := current - diff
+
+ // use int type for everything to not care about underflow
+ if from < 0 {
+ continue
+ }
+
+ for _, diff := range blockDiffs {
+ to := current - diff
+
+ if to >= from {
+ addTestCaseParams(caseParams, current, from, to)
+ }
+ }
+ }
+
+ // future only + current to future
+ for _, diff := range blockDiffs {
+ from := current + diff
+
+ if from < 0 {
+ continue
+ }
+
+ for _, diff := range blockDiffs {
+ to := current + diff
+
+ if to >= from {
+ addTestCaseParams(caseParams, current, from, to)
+ }
+ }
+ }
+
+ // past-current-future
+ for _, diff := range blockDiffs {
+ from := current - diff
+
+ if from < 0 {
+ continue
+ }
+
+ for _, diff := range blockDiffs {
+ to := current + diff
+
+ if to >= from {
+ addTestCaseParams(caseParams, current, from, to)
+ }
+ }
+ }
+ }
+
+ type testCase struct {
+ current int
+ remoteStart int
+ remoteEnd int
+ }
+
+ var ts []testCase
+
+ // X -> N -> M
+ for x, nm := range caseParams {
+ for n, mMap := range nm {
+ for m := range mMap {
+ ts = append(ts, testCase{x, n, m})
+ }
+ }
+ }
+
+ //nolint:paralleltest
+ for i, tc := range ts {
+ tc := tc
+
+ name := fmt.Sprintf("test case: index = %d, X = %d, N = %d, M = %d", i, tc.current, tc.remoteStart, tc.remoteEnd)
+
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+
+ chain := createMockChain(uint64(tc.remoteStart), uint64(tc.remoteEnd))
+
+ past, future := splitChain(uint64(tc.current), chain)
+
+ // properties
+ if len(past) > 0 {
+ // Check if the chain is ordered
+ isOrdered := sort.SliceIsSorted(past, func(i, j int) bool {
+ return past[i].Number.Uint64() < past[j].Number.Uint64()
+ })
+
+ require.True(t, isOrdered, "an ordered past chain expected: %v", past)
+
+ isSequential := sort.SliceIsSorted(past, func(i, j int) bool {
+ return past[i].Number.Uint64() == past[j].Number.Uint64()-1
+ })
+
+ require.True(t, isSequential, "a sequential past chain expected: %v", past)
+
+ // Check if current block >= past chain's last block
+ require.Equal(t, past[len(past)-1].Number.Uint64() <= uint64(tc.current), true)
+ }
+
+ if len(future) > 0 {
+ // Check if the chain is ordered
+ isOrdered := sort.SliceIsSorted(future, func(i, j int) bool {
+ return future[i].Number.Uint64() < future[j].Number.Uint64()
+ })
+
+ require.True(t, isOrdered, "an ordered future chain expected: %v", future)
+
+ isSequential := sort.SliceIsSorted(future, func(i, j int) bool {
+ return future[i].Number.Uint64() == future[j].Number.Uint64()-1
+ })
+
+ require.True(t, isSequential, "a sequential future chain expected: %v", future)
+
+ // Check if future chain's first block > current block
+ require.Equal(t, future[len(future)-1].Number.Uint64() > uint64(tc.current), true)
+ }
+
+ // Check if both chains are continuous
+ if len(past) > 0 && len(future) > 0 {
+ require.Equal(t, past[len(past)-1].Number.Uint64(), future[0].Number.Uint64()-1)
+ }
+
+ // Check if we get the original chain on appending both
+ gotChain := append(past, future...)
+ require.Equal(t, reflect.DeepEqual(gotChain, chain), true)
+ })
+ }
+}
+
+// createMockChain returns a chain with dummy headers
+// starting from `start` to `end` (inclusive)
+func createMockChain(start, end uint64) []*types.Header {
+ var (
+ i uint64
+ idx uint64
+ )
+
+ chain := make([]*types.Header, end-start+1)
+
+ for i = start; i <= end; i++ {
+ header := &types.Header{
+ Number: big.NewInt(int64(i)),
+ Time: uint64(time.Now().UnixMicro()) + i,
+ }
+ chain[idx] = header
+ idx++
+ }
+
+ return chain
+}
+
+// mXNM should be initialized
+func addTestCaseParams(mXNM map[int]map[int]map[int]struct{}, x, n, m int) {
+ //nolint:ineffassign
+ mNM, ok := mXNM[x]
+ if !ok {
+ mNM = make(map[int]map[int]struct{})
+ mXNM[x] = mNM
+ }
+
+ //nolint:ineffassign
+ _, ok = mNM[n]
+ if !ok {
+ mM := make(map[int]struct{})
+ mNM[n] = mM
+ }
+
+ mXNM[x][n][m] = struct{}{}
+}
diff --git a/consensus/bor/finality/whitelist_helpers.go b/consensus/bor/finality/whitelist_helpers.go
new file mode 100644
index 00000000000..54dbff49690
--- /dev/null
+++ b/consensus/bor/finality/whitelist_helpers.go
@@ -0,0 +1,140 @@
+package finality
+
+import (
+ "context"
+ "errors"
+
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon/consensus/bor/finality/whitelist"
+ "github.com/ledgerwatch/erigon/consensus/bor/heimdall"
+ "github.com/ledgerwatch/log/v3"
+)
+
+var (
+ // errCheckpoint is returned when we are unable to fetch the
+ // latest checkpoint from the local heimdall.
+ errCheckpoint = errors.New("failed to fetch latest checkpoint")
+
+ // errMilestone is returned when we are unable to fetch the
+ // latest milestone from the local heimdall.
+ errMilestone = errors.New("failed to fetch latest milestone")
+
+ ErrNotInRejectedList = errors.New("MilestoneID not in rejected list")
+)
+
+// fetchWhitelistCheckpoint fetches the latest checkpoint from it's local heimdall
+// and verifies the data against bor data.
+func fetchWhitelistCheckpoint(ctx context.Context, heimdallClient heimdall.IHeimdallClient, verifier *borVerifier, config *config) (uint64, common.Hash, error) {
+ var (
+ blockNum uint64
+ blockHash common.Hash
+ )
+
+ // fetch the latest checkpoint from Heimdall
+ checkpoint, err := heimdallClient.FetchCheckpoint(ctx, -1)
+ if err != nil {
+ config.logger.Debug("[bor.heimdall] Failed to fetch latest checkpoint for whitelisting", "err", err)
+ return blockNum, blockHash, errCheckpoint
+ }
+
+ // Verify if the checkpoint fetched can be added to the local whitelist entry or not
+ // If verified, it returns the hash of the end block of the checkpoint. If not,
+ // it will return appropriate error.
+ hash, err := verifier.verify(ctx, config, checkpoint.StartBlock.Uint64(), checkpoint.EndBlock.Uint64(), checkpoint.RootHash.String()[2:], true)
+
+ if err != nil {
+ if errors.Is(err, errMissingBlocks) {
+ config.logger.Debug("[bor.heimdall] Got new checkpoint", "start", checkpoint.StartBlock.Uint64(), "end", checkpoint.EndBlock.Uint64(), "rootHash", checkpoint.RootHash.String())
+ config.logger.Debug("[bor.heimdall] Failed to whitelist checkpoint", "err", err)
+ } else {
+ config.logger.Info("[bor.heimdall] Got new checkpoint", "start", checkpoint.StartBlock.Uint64(), "end", checkpoint.EndBlock.Uint64(), "rootHash", checkpoint.RootHash.String())
+ config.logger.Warn("[bor.heimdall] Failed to whitelist checkpoint", "err", err)
+ }
+
+ return blockNum, blockHash, err
+ }
+
+ config.logger.Info("[bor.heimdall] Got new checkpoint", "start", checkpoint.StartBlock.Uint64(), "end", checkpoint.EndBlock.Uint64(), "rootHash", checkpoint.RootHash.String())
+
+ blockNum = checkpoint.EndBlock.Uint64()
+ blockHash = common.HexToHash(hash)
+
+ return blockNum, blockHash, nil
+}
+
+// fetchWhitelistMilestone fetches the latest milestone from it's local heimdall
+// and verifies the data against bor data.
+func fetchWhitelistMilestone(ctx context.Context, heimdallClient heimdall.IHeimdallClient, verifier *borVerifier, config *config) (uint64, common.Hash, error) {
+ var (
+ num uint64
+ hash common.Hash
+ )
+
+ // fetch latest milestone
+ milestone, err := heimdallClient.FetchMilestone(ctx)
+ if errors.Is(err, heimdall.ErrServiceUnavailable) {
+ config.logger.Debug("[bor.heimdall] Failed to fetch latest milestone for whitelisting", "err", err)
+ return num, hash, err
+ }
+
+ if err != nil {
+ config.logger.Warn("[bor.heimdall] Failed to fetch latest milestone for whitelisting", "err", err)
+ return num, hash, errMilestone
+ }
+
+ config.logger.Debug("[bor.heimdall] Got new milestone", "start", milestone.StartBlock.Uint64(), "end", milestone.EndBlock.Uint64())
+
+ num = milestone.EndBlock.Uint64()
+ hash = milestone.Hash
+
+ // Verify if the milestone fetched can be added to the local whitelist entry or not
+ // If verified, it returns the hash of the end block of the milestone. If not,
+ // it will return appropriate error.
+ _, err = verifier.verify(ctx, config, milestone.StartBlock.Uint64(), milestone.EndBlock.Uint64(), milestone.Hash.String()[2:], false)
+ if err != nil {
+ whitelist.GetWhitelistingService().UnlockSprint(milestone.EndBlock.Uint64())
+ return num, hash, err
+ }
+
+ return num, hash, nil
+}
+
+func fetchNoAckMilestone(ctx context.Context, heimdallClient heimdall.IHeimdallClient, logger log.Logger) (string, error) {
+ var (
+ milestoneID string
+ )
+
+ milestoneID, err := heimdallClient.FetchLastNoAckMilestone(ctx)
+ if errors.Is(err, heimdall.ErrServiceUnavailable) {
+ logger.Debug("[bor.heimdall] Failed to fetch latest no-ack milestone", "err", err)
+ return milestoneID, err
+ }
+
+ if err != nil {
+ logger.Warn("[bor.heimdall] Failed to fetch latest no-ack milestone", "err", err)
+ return milestoneID, errMilestone
+ }
+
+ return milestoneID, nil
+}
+
+func fetchNoAckMilestoneByID(ctx context.Context, heimdallClient heimdall.IHeimdallClient, milestoneID string, logger log.Logger) error {
+ err := heimdallClient.FetchNoAckMilestone(ctx, milestoneID)
+ if errors.Is(err, heimdall.ErrServiceUnavailable) {
+ logger.Debug("[bor.heimdall] Failed to fetch no-ack milestone by ID", "milestoneID", milestoneID, "err", err)
+ return err
+ }
+
+ // fixme: handle different types of errors
+ if errors.Is(err, ErrNotInRejectedList) {
+ logger.Warn("[bor.heimdall] MilestoneID not in rejected list", "milestoneID", milestoneID, "err", err)
+ return err
+ }
+
+ if err != nil {
+ logger.Warn("[bor.heimdall] Failed to fetch no-ack milestone by ID ", "milestoneID", milestoneID, "err", err)
+ return errMilestone
+ }
+
+ return nil
+}
diff --git a/consensus/bor/genesis.go b/consensus/bor/genesis.go
index 5096b33c57f..24b0964f454 100644
--- a/consensus/bor/genesis.go
+++ b/consensus/bor/genesis.go
@@ -4,11 +4,11 @@ import (
"math/big"
"github.com/ledgerwatch/erigon/consensus"
- "github.com/ledgerwatch/erigon/consensus/bor/clerk"
+ "github.com/ledgerwatch/erigon/rlp"
)
//go:generate mockgen -destination=./genesis_contract_mock.go -package=bor . GenesisContract
type GenesisContract interface {
- CommitState(event *clerk.EventRecordWithTime, syscall consensus.SystemCall) error
+ CommitState(event rlp.RawValue, syscall consensus.SystemCall) error
LastStateId(syscall consensus.SystemCall) (*big.Int, error)
}
diff --git a/consensus/bor/heimall.go b/consensus/bor/heimall.go
deleted file mode 100644
index b28cec81e4c..00000000000
--- a/consensus/bor/heimall.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package bor
-
-import (
- "context"
-
- "github.com/ledgerwatch/erigon/consensus/bor/clerk"
- "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint"
- "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span"
-)
-
-//go:generate mockgen -destination=../../tests/bor/mocks/IHeimdallClient.go -package=mocks . IHeimdallClient
-type IHeimdallClient interface {
- StateSyncEvents(ctx context.Context, fromID uint64, to int64) ([]*clerk.EventRecordWithTime, error)
- Span(ctx context.Context, spanID uint64) (*span.HeimdallSpan, error)
- FetchCheckpoint(ctx context.Context, number int64) (*checkpoint.Checkpoint, error)
- FetchCheckpointCount(ctx context.Context) (int64, error)
- Close()
-}
-
-type HeimdallServer interface {
- StateSyncEvents(ctx context.Context, fromID uint64, to int64, limit int) (uint64, []*clerk.EventRecordWithTime, error)
- Span(ctx context.Context, spanID uint64) (*span.HeimdallSpan, error)
- FetchCheckpoint(ctx context.Context, number int64) (*checkpoint.Checkpoint, error)
- FetchCheckpointCount(ctx context.Context) (int64, error)
- Close()
-}
diff --git a/consensus/bor/heimdall/checkpoint/checkpoint.go b/consensus/bor/heimdall/checkpoint/checkpoint.go
index 258cadd7e76..ebced7beef8 100644
--- a/consensus/bor/heimdall/checkpoint/checkpoint.go
+++ b/consensus/bor/heimdall/checkpoint/checkpoint.go
@@ -1,6 +1,7 @@
package checkpoint
import (
+ "fmt"
"math/big"
libcommon "github.com/ledgerwatch/erigon-lib/common"
@@ -16,6 +17,18 @@ type Checkpoint struct {
Timestamp uint64 `json:"timestamp"`
}
+func (m Checkpoint) String() string {
+ return fmt.Sprintf(
+ "Checkpoint {%v (%d:%d) %v %v %v}",
+ m.Proposer.String(),
+ m.StartBlock,
+ m.EndBlock,
+ m.RootHash.Hex(),
+ m.BorChainID,
+ m.Timestamp,
+ )
+}
+
type CheckpointResponse struct {
Height string `json:"height"`
Result Checkpoint `json:"result"`
diff --git a/consensus/bor/heimdall/client.go b/consensus/bor/heimdall/client.go
index f0bcf6a54d5..717da531f60 100644
--- a/consensus/bor/heimdall/client.go
+++ b/consensus/bor/heimdall/client.go
@@ -11,22 +11,28 @@ import (
"sort"
"time"
+ "github.com/ledgerwatch/erigon-lib/metrics"
+
"github.com/ledgerwatch/erigon/consensus/bor/clerk"
"github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint"
+ "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone"
"github.com/ledgerwatch/erigon/consensus/bor/heimdall/span"
"github.com/ledgerwatch/log/v3"
)
var (
- // // ErrShutdownDetected is returned if a shutdown was detected
+ // ErrShutdownDetected is returned if a shutdown was detected
ErrShutdownDetected = errors.New("shutdown detected")
ErrNoResponse = errors.New("got a nil response")
ErrNotSuccessfulResponse = errors.New("error while fetching data from Heimdall")
+ ErrNotInRejectedList = errors.New("milestoneID doesn't exist in rejected list")
+ ErrNotInMilestoneList = errors.New("milestoneID doesn't exist in Heimdall")
+ ErrServiceUnavailable = errors.New("service unavailable")
)
const (
stateFetchLimit = 50
- apiHeimdallTimeout = 5 * time.Second
+ apiHeimdallTimeout = 10 * time.Second
retryCall = 5 * time.Second
)
@@ -67,8 +73,16 @@ func NewHeimdallClient(urlString string, logger log.Logger) *HeimdallClient {
const (
fetchStateSyncEventsFormat = "from-id=%d&to-time=%d&limit=%d"
fetchStateSyncEventsPath = "clerk/event-record/list"
- fetchCheckpoint = "/checkpoints/%s"
- fetchCheckpointCount = "/checkpoints/count"
+
+ fetchCheckpoint = "/checkpoints/%s"
+ fetchCheckpointCount = "/checkpoints/count"
+
+ fetchMilestone = "/milestone/latest"
+ fetchMilestoneCount = "/milestone/count"
+
+ fetchLastNoAckMilestone = "/milestone/lastNoAck"
+ fetchNoAckMilestone = "/milestone/noAck/%s"
+ fetchMilestoneID = "/milestone/ID/%s"
fetchSpanFormat = "bor/span/%d"
)
@@ -82,7 +96,7 @@ func (h *HeimdallClient) StateSyncEvents(ctx context.Context, fromID uint64, to
return nil, err
}
- h.logger.Debug("Fetching state sync events", "queryParams", url.RawQuery)
+ h.logger.Debug("[bor.heimdall] Fetching state sync events", "queryParams", url.RawQuery)
ctx = withRequestType(ctx, stateSyncRequest)
@@ -145,6 +159,23 @@ func (h *HeimdallClient) FetchCheckpoint(ctx context.Context, number int64) (*ch
return &response.Result, nil
}
+// FetchMilestone fetches the checkpoint from heimdall
+func (h *HeimdallClient) FetchMilestone(ctx context.Context) (*milestone.Milestone, error) {
+ url, err := milestoneURL(h.urlString)
+ if err != nil {
+ return nil, err
+ }
+
+ ctx = withRequestType(ctx, milestoneRequest)
+
+ response, err := FetchWithRetry[milestone.MilestoneResponse](ctx, h.client, url, h.closeCh, h.logger)
+ if err != nil {
+ return nil, err
+ }
+
+ return &response.Result, nil
+}
+
// FetchCheckpointCount fetches the checkpoint count from heimdall
func (h *HeimdallClient) FetchCheckpointCount(ctx context.Context) (int64, error) {
url, err := checkpointCountURL(h.urlString)
@@ -162,20 +193,105 @@ func (h *HeimdallClient) FetchCheckpointCount(ctx context.Context) (int64, error
return response.Result.Result, nil
}
+// FetchMilestoneCount fetches the milestone count from heimdall
+func (h *HeimdallClient) FetchMilestoneCount(ctx context.Context) (int64, error) {
+ url, err := milestoneCountURL(h.urlString)
+ if err != nil {
+ return 0, err
+ }
+
+ ctx = withRequestType(ctx, milestoneCountRequest)
+
+ response, err := FetchWithRetry[milestone.MilestoneCountResponse](ctx, h.client, url, h.closeCh, h.logger)
+ if err != nil {
+ return 0, err
+ }
+
+ return response.Result.Count, nil
+}
+
+// FetchLastNoAckMilestone fetches the last no-ack-milestone from heimdall
+func (h *HeimdallClient) FetchLastNoAckMilestone(ctx context.Context) (string, error) {
+ url, err := lastNoAckMilestoneURL(h.urlString)
+ if err != nil {
+ return "", err
+ }
+
+ ctx = withRequestType(ctx, milestoneLastNoAckRequest)
+
+ response, err := FetchWithRetry[milestone.MilestoneLastNoAckResponse](ctx, h.client, url, h.closeCh, h.logger)
+ if err != nil {
+ return "", err
+ }
+
+ return response.Result.Result, nil
+}
+
+// FetchNoAckMilestone fetches the last no-ack-milestone from heimdall
+func (h *HeimdallClient) FetchNoAckMilestone(ctx context.Context, milestoneID string) error {
+ url, err := noAckMilestoneURL(h.urlString, milestoneID)
+ if err != nil {
+ return err
+ }
+
+ ctx = withRequestType(ctx, milestoneNoAckRequest)
+
+ response, err := FetchWithRetry[milestone.MilestoneNoAckResponse](ctx, h.client, url, h.closeCh, h.logger)
+ if err != nil {
+ return err
+ }
+
+ if !response.Result.Result {
+ return fmt.Errorf("%w: milestoneID %q", ErrNotInRejectedList, milestoneID)
+ }
+
+ return nil
+}
+
+// FetchMilestoneID fetches the bool result from Heimdal whether the ID corresponding
+// to the given milestone is in process in Heimdall
+func (h *HeimdallClient) FetchMilestoneID(ctx context.Context, milestoneID string) error {
+ url, err := milestoneIDURL(h.urlString, milestoneID)
+ if err != nil {
+ return err
+ }
+
+ ctx = withRequestType(ctx, milestoneIDRequest)
+
+ response, err := FetchWithRetry[milestone.MilestoneIDResponse](ctx, h.client, url, h.closeCh, h.logger)
+
+ if err != nil {
+ return err
+ }
+
+ if !response.Result.Result {
+ return fmt.Errorf("%w: milestoneID %q", ErrNotInMilestoneList, milestoneID)
+ }
+
+ return nil
+}
+
// FetchWithRetry returns data from heimdall with retry
func FetchWithRetry[T any](ctx context.Context, client http.Client, url *url.URL, closeCh chan struct{}, logger log.Logger) (*T, error) {
// request data once
request := &Request{client: client, url: url, start: time.Now()}
result, err := Fetch[T](ctx, request)
-
if err == nil {
return result, nil
}
+ // 503 (Service Unavailable) is thrown when an endpoint isn't activated
+ // yet in heimdall. E.g. when the hardfork hasn't hit yet but heimdall
+ // is upgraded.
+ if errors.Is(err, ErrServiceUnavailable) {
+ logger.Debug("[bor.heimdall] service unavailable at the moment", "path", url.Path, "error", err)
+ return nil, err
+ }
+
// attempt counter
attempt := 1
- logger.Warn("an error while trying fetching from Heimdall", "attempt", attempt, "error", err)
+ logger.Warn("[bor.heimdall] an error while fetching", "path", url.Path, "attempt", attempt, "error", err)
// create a new ticker for retrying the request
ticker := time.NewTicker(retryCall)
@@ -185,26 +301,28 @@ func FetchWithRetry[T any](ctx context.Context, client http.Client, url *url.URL
retryLoop:
for {
- logger.Info("Retrying again in 5 seconds to fetch data from Heimdall", "path", url.Path, "attempt", attempt)
-
attempt++
select {
case <-ctx.Done():
- logger.Debug("Shutdown detected, terminating request by context.Done")
-
+ logger.Debug("[bor.heimdall] request canceled", "reason", ctx.Err(), "path", url.Path, "attempt", attempt)
return nil, ctx.Err()
case <-closeCh:
- logger.Debug("Shutdown detected, terminating request by closing")
+ logger.Debug("[bor.heimdall] shutdown detected, terminating request", "path", url.Path)
return nil, ErrShutdownDetected
case <-ticker.C:
request = &Request{client: client, url: url, start: time.Now()}
result, err = Fetch[T](ctx, request)
+ if errors.Is(err, ErrServiceUnavailable) {
+ logger.Debug("[bor.heimdall] service unavailable at the moment", "path", url.Path, "attempt", attempt, "error", err)
+ return nil, err
+ }
+
if err != nil {
if attempt%logEach == 0 {
- logger.Warn("an error while trying fetching from Heimdall", "attempt", attempt, "error", err)
+ logger.Warn("[bor.heimdall] an error while trying fetching", "path", url.Path, "attempt", attempt, "error", err)
}
continue retryLoop
@@ -215,16 +333,15 @@ retryLoop:
}
}
-// TODO: Uncomment once metrics are added
// Fetch fetches response from heimdall
func Fetch[T any](ctx context.Context, request *Request) (*T, error) {
- // isSuccessful := false
+ isSuccessful := false
- // defer func() {
- // if metrics.EnabledExpensive {
- // sendMetrics(ctx, request.start, isSuccessful)
- // }
- // }()
+ defer func() {
+ if metrics.EnabledExpensive {
+ sendMetrics(ctx, request.start, isSuccessful)
+ }
+ }()
result := new(T)
@@ -242,7 +359,7 @@ func Fetch[T any](ctx context.Context, request *Request) (*T, error) {
return nil, err
}
- // isSuccessful = true
+ isSuccessful = true
return result, nil
}
@@ -268,10 +385,30 @@ func checkpointURL(urlString string, number int64) (*url.URL, error) {
return makeURL(urlString, url, "")
}
+func milestoneURL(urlString string) (*url.URL, error) {
+ url := fetchMilestone
+ return makeURL(urlString, url, "")
+}
+
func checkpointCountURL(urlString string) (*url.URL, error) {
return makeURL(urlString, fetchCheckpointCount, "")
}
+func milestoneCountURL(urlString string) (*url.URL, error) {
+ return makeURL(urlString, fetchMilestoneCount, "")
+}
+func lastNoAckMilestoneURL(urlString string) (*url.URL, error) {
+ return makeURL(urlString, fetchLastNoAckMilestone, "")
+}
+
+func noAckMilestoneURL(urlString string, id string) (*url.URL, error) {
+ return makeURL(urlString, fmt.Sprintf(fetchNoAckMilestone, id), "")
+}
+
+func milestoneIDURL(urlString string, id string) (*url.URL, error) {
+ return makeURL(urlString, fmt.Sprintf(fetchMilestoneID, id), "")
+}
+
func makeURL(urlString, rawPath, rawQuery string) (*url.URL, error) {
u, err := url.Parse(urlString)
if err != nil {
@@ -298,9 +435,13 @@ func internalFetch(ctx context.Context, client http.Client, u *url.URL) ([]byte,
defer res.Body.Close()
+ if res.StatusCode == http.StatusServiceUnavailable {
+ return nil, fmt.Errorf("%w: response code %d", ErrServiceUnavailable, res.StatusCode)
+ }
+
// check status code
if res.StatusCode != 200 && res.StatusCode != 204 {
- return nil, fmt.Errorf("%w: response code %d", ErrNotSuccessfulResponse, res.StatusCode)
+ return nil, fmt.Errorf("%w: %s:response code %d", ErrNotSuccessfulResponse, u.String(), res.StatusCode)
}
// unmarshall data from buffer
diff --git a/consensus/bor/heimdall/heimall.go b/consensus/bor/heimdall/heimall.go
new file mode 100644
index 00000000000..2ef405290f2
--- /dev/null
+++ b/consensus/bor/heimdall/heimall.go
@@ -0,0 +1,42 @@
+package heimdall
+
+import (
+ "context"
+
+ "github.com/ledgerwatch/erigon/consensus/bor/clerk"
+ "github.com/ledgerwatch/erigon/consensus/bor/finality/generics"
+ "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint"
+ "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone"
+ "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span"
+)
+
+func MilestoneRewindPending() bool {
+ return generics.BorMilestoneRewind.Load() != nil && *generics.BorMilestoneRewind.Load() != 0
+}
+
+//go:generate mockgen -destination=../../tests/bor/mocks/IHeimdallClient.go -package=mocks . IHeimdallClient
+type IHeimdallClient interface {
+ StateSyncEvents(ctx context.Context, fromID uint64, to int64) ([]*clerk.EventRecordWithTime, error)
+ Span(ctx context.Context, spanID uint64) (*span.HeimdallSpan, error)
+ FetchCheckpoint(ctx context.Context, number int64) (*checkpoint.Checkpoint, error)
+ FetchCheckpointCount(ctx context.Context) (int64, error)
+ FetchMilestone(ctx context.Context) (*milestone.Milestone, error)
+ FetchMilestoneCount(ctx context.Context) (int64, error)
+ FetchNoAckMilestone(ctx context.Context, milestoneID string) error //Fetch the bool value whether milestone corresponding to the given id failed in the Heimdall
+ FetchLastNoAckMilestone(ctx context.Context) (string, error) //Fetch latest failed milestone id
+ FetchMilestoneID(ctx context.Context, milestoneID string) error //Fetch the bool value whether milestone corresponding to the given id is in process in Heimdall
+ Close()
+}
+
+type HeimdallServer interface {
+ StateSyncEvents(ctx context.Context, fromID uint64, to int64, limit int) (uint64, []*clerk.EventRecordWithTime, error)
+ Span(ctx context.Context, spanID uint64) (*span.HeimdallSpan, error)
+ FetchCheckpoint(ctx context.Context, number int64) (*checkpoint.Checkpoint, error)
+ FetchCheckpointCount(ctx context.Context) (int64, error)
+ FetchMilestone(ctx context.Context) (*milestone.Milestone, error)
+ FetchMilestoneCount(ctx context.Context) (int64, error)
+ FetchNoAckMilestone(ctx context.Context, milestoneID string) error
+ FetchLastNoAckMilestone(ctx context.Context) (string, error)
+ FetchMilestoneID(ctx context.Context, milestoneID string) error
+ Close()
+}
diff --git a/consensus/bor/heimdall/metrics.go b/consensus/bor/heimdall/metrics.go
index ca7f190035f..0157c37766d 100644
--- a/consensus/bor/heimdall/metrics.go
+++ b/consensus/bor/heimdall/metrics.go
@@ -3,24 +3,30 @@ package heimdall
import (
"context"
"time"
+
+ "github.com/ledgerwatch/erigon-lib/metrics"
)
type (
requestTypeKey struct{}
requestType string
- // TODO: Uncomment once metrics are added
- // meter struct {
- // request map[bool]metrics.Meter // map[isSuccessful]metrics.Meter
- // timer metrics.Timer
- // }
+ meter struct {
+ request map[bool]metrics.Gauge
+ timer metrics.Summary
+ }
)
const (
- stateSyncRequest requestType = "state-sync"
- spanRequest requestType = "span"
- checkpointRequest requestType = "checkpoint"
- checkpointCountRequest requestType = "checkpoint-count"
+ stateSyncRequest requestType = "state-sync"
+ spanRequest requestType = "span"
+ checkpointRequest requestType = "checkpoint"
+ checkpointCountRequest requestType = "checkpoint-count"
+ milestoneRequest requestType = "milestone"
+ milestoneCountRequest requestType = "milestone-count"
+ milestoneNoAckRequest requestType = "milestone-no-ack"
+ milestoneLastNoAckRequest requestType = "milestone-last-no-ack"
+ milestoneIDRequest requestType = "milestone-id"
)
func withRequestType(ctx context.Context, reqType requestType) context.Context {
@@ -32,52 +38,50 @@ func getRequestType(ctx context.Context) (requestType, bool) {
return reqType, ok
}
-// TODO: Uncomment once metrics are added
-// var (
-// requestMeters = map[requestType]meter{
-// stateSyncRequest: {
-// request: map[bool]metrics.Meter{
-// true: metrics.NewRegisteredMeter("client/requests/statesync/valid", nil),
-// false: metrics.NewRegisteredMeter("client/requests/statesync/invalid", nil),
-// },
-// timer: metrics.NewRegisteredTimer("client/requests/statesync/duration", nil),
-// },
-// spanRequest: {
-// request: map[bool]metrics.Meter{
-// true: metrics.NewRegisteredMeter("client/requests/span/valid", nil),
-// false: metrics.NewRegisteredMeter("client/requests/span/invalid", nil),
-// },
-// timer: metrics.NewRegisteredTimer("client/requests/span/duration", nil),
-// },
-// checkpointRequest: {
-// request: map[bool]metrics.Meter{
-// true: metrics.NewRegisteredMeter("client/requests/checkpoint/valid", nil),
-// false: metrics.NewRegisteredMeter("client/requests/checkpoint/invalid", nil),
-// },
-// timer: metrics.NewRegisteredTimer("client/requests/checkpoint/duration", nil),
-// },
-// checkpointCountRequest: {
-// request: map[bool]metrics.Meter{
-// true: metrics.NewRegisteredMeter("client/requests/checkpointcount/valid", nil),
-// false: metrics.NewRegisteredMeter("client/requests/checkpointcount/invalid", nil),
-// },
-// timer: metrics.NewRegisteredTimer("client/requests/checkpointcount/duration", nil),
-// },
-// }
-// )
+var (
+ requestMeters = map[requestType]meter{
+ stateSyncRequest: {
+ request: map[bool]metrics.Gauge{
+ true: metrics.GetOrCreateGauge("client_requests_statesync_valid"),
+ false: metrics.GetOrCreateGauge("client_requests_statesync_invalid"),
+ },
+ timer: metrics.GetOrCreateSummary("client_requests_statesync_duration"),
+ },
+ spanRequest: {
+ request: map[bool]metrics.Gauge{
+ true: metrics.GetOrCreateGauge("client_requests_span_valid"),
+ false: metrics.GetOrCreateGauge("client_requests_span_invalid"),
+ },
+ timer: metrics.GetOrCreateSummary("client_requests_span_duration"),
+ },
+ checkpointRequest: {
+ request: map[bool]metrics.Gauge{
+ true: metrics.GetOrCreateGauge("client_requests_checkpoint_valid"),
+ false: metrics.GetOrCreateGauge("client_requests_checkpoint_invalid"),
+ },
+ timer: metrics.GetOrCreateSummary("client_requests_checkpoint_duration"),
+ },
+ checkpointCountRequest: {
+ request: map[bool]metrics.Gauge{
+ true: metrics.GetOrCreateGauge("client_requests_checkpointcount_valid"),
+ false: metrics.GetOrCreateGauge("client_requests_checkpointcount_invalid"),
+ },
+ timer: metrics.GetOrCreateSummary("client_requests_checkpointcount_duration"),
+ },
+ }
+)
-// TODO: Uncomment once metrics is added
func sendMetrics(ctx context.Context, start time.Time, isSuccessful bool) {
- // reqType, ok := getRequestType(ctx)
- // if !ok {
- // return
- // }
+ reqType, ok := getRequestType(ctx)
+ if !ok {
+ return
+ }
- // meters, ok := requestMeters[reqType]
- // if !ok {
- // return
- // }
+ meters, ok := requestMeters[reqType]
+ if !ok {
+ return
+ }
- // meters.request[isSuccessful].Mark(1)
- // meters.timer.Update(time.Since(start))
+ meters.request[isSuccessful].Set(1)
+ meters.timer.ObserveDuration(start)
}
diff --git a/consensus/bor/heimdall/milestone/milestone.go b/consensus/bor/heimdall/milestone/milestone.go
new file mode 100644
index 00000000000..a849f4461bd
--- /dev/null
+++ b/consensus/bor/heimdall/milestone/milestone.go
@@ -0,0 +1,58 @@
+package milestone
+
+import (
+ "math/big"
+
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
+)
+
+// milestone defines a response object type of bor milestone
+type Milestone struct {
+ Proposer libcommon.Address `json:"proposer"`
+ StartBlock *big.Int `json:"start_block"`
+ EndBlock *big.Int `json:"end_block"`
+ Hash libcommon.Hash `json:"hash"`
+ BorChainID string `json:"bor_chain_id"`
+ Timestamp uint64 `json:"timestamp"`
+}
+
+type MilestoneResponse struct {
+ Height string `json:"height"`
+ Result Milestone `json:"result"`
+}
+
+type MilestoneCount struct {
+ Count int64 `json:"count"`
+}
+
+type MilestoneCountResponse struct {
+ Height string `json:"height"`
+ Result MilestoneCount `json:"result"`
+}
+
+type MilestoneLastNoAck struct {
+ Result string `json:"result"`
+}
+
+type MilestoneLastNoAckResponse struct {
+ Height string `json:"height"`
+ Result MilestoneLastNoAck `json:"result"`
+}
+
+type MilestoneNoAck struct {
+ Result bool `json:"result"`
+}
+
+type MilestoneNoAckResponse struct {
+ Height string `json:"height"`
+ Result MilestoneNoAck `json:"result"`
+}
+
+type MilestoneID struct {
+ Result bool `json:"result"`
+}
+
+type MilestoneIDResponse struct {
+ Height string `json:"height"`
+ Result MilestoneID `json:"result"`
+}
diff --git a/consensus/bor/heimdall/span/spanner.go b/consensus/bor/heimdall/span/spanner.go
index ed4b0e042d9..b3738c4774c 100644
--- a/consensus/bor/heimdall/span/spanner.go
+++ b/consensus/bor/heimdall/span/spanner.go
@@ -2,6 +2,7 @@ package span
import (
"encoding/hex"
+ "encoding/json"
"math/big"
"github.com/ledgerwatch/erigon-lib/chain"
@@ -37,7 +38,7 @@ func (c *ChainSpanner) GetCurrentSpan(syscall consensus.SystemCall) (*Span, erro
data, err := c.validatorSet.Pack(method)
if err != nil {
- c.logger.Error("Unable to pack tx for getCurrentSpan", "error", err)
+ c.logger.Error("[bor] Unable to pack tx for getCurrentSpan", "error", err)
return nil, err
}
@@ -67,47 +68,30 @@ func (c *ChainSpanner) GetCurrentSpan(syscall consensus.SystemCall) (*Span, erro
return &span, nil
}
-func (c *ChainSpanner) GetCurrentValidators(blockNumber uint64, signer libcommon.Address, getSpanForBlock func(blockNum uint64) (*HeimdallSpan, error)) ([]*valset.Validator, error) {
- // Use signer as validator in case of bor devent
- if c.withoutHeimdall {
- c.logger.Info("Spanner returning pre-set validator set")
- validators := []*valset.Validator{
- {
- ID: 1,
- Address: signer,
- VotingPower: 1000,
- ProposerPriority: 1,
- },
- }
-
- return validators, nil
+func (c *ChainSpanner) GetCurrentValidators(spanId uint64, signer libcommon.Address, chain consensus.ChainHeaderReader) ([]*valset.Validator, error) {
+ // Use hardcoded bor devnet valset if chain-name = bor-devnet
+ if NetworkNameVals[c.chainConfig.ChainName] != nil && c.withoutHeimdall {
+ return NetworkNameVals[c.chainConfig.ChainName], nil
}
- span, err := getSpanForBlock(blockNumber)
- if err != nil {
+ spanBytes := chain.BorSpan(spanId)
+ var span HeimdallSpan
+ if err := json.Unmarshal(spanBytes, &span); err != nil {
return nil, err
}
return span.ValidatorSet.Validators, nil
}
-func (c *ChainSpanner) GetCurrentProducers(blockNumber uint64, signer libcommon.Address, getSpanForBlock func(blockNum uint64) (*HeimdallSpan, error)) ([]*valset.Validator, error) {
- // Use signer as validator in case of bor devent
- if c.withoutHeimdall {
- validators := []*valset.Validator{
- {
- ID: 1,
- Address: signer,
- VotingPower: 1000,
- ProposerPriority: 1,
- },
- }
-
- return validators, nil
+func (c *ChainSpanner) GetCurrentProducers(spanId uint64, signer libcommon.Address, chain consensus.ChainHeaderReader) ([]*valset.Validator, error) {
+ // Use hardcoded bor devnet valset if chain-name = bor-devnet
+ if NetworkNameVals[c.chainConfig.ChainName] != nil && c.withoutHeimdall {
+ return NetworkNameVals[c.chainConfig.ChainName], nil
}
- span, err := getSpanForBlock(blockNumber)
- if err != nil {
+ spanBytes := chain.BorSpan(spanId)
+ var span HeimdallSpan
+ if err := json.Unmarshal(spanBytes, &span); err != nil {
return nil, err
}
@@ -144,7 +128,7 @@ func (c *ChainSpanner) CommitSpan(heimdallSpan HeimdallSpan, syscall consensus.S
return err
}
- c.logger.Debug("✅ Committing new span",
+ c.logger.Debug("[bor] ✅ Committing new span",
"id", heimdallSpan.ID,
"startBlock", heimdallSpan.StartBlock,
"endBlock", heimdallSpan.EndBlock,
@@ -161,7 +145,7 @@ func (c *ChainSpanner) CommitSpan(heimdallSpan HeimdallSpan, syscall consensus.S
producerBytes,
)
if err != nil {
- c.logger.Error("Unable to pack tx for commitSpan", "error", err)
+ c.logger.Error("[bor] Unable to pack tx for commitSpan", "error", err)
return err
}
diff --git a/consensus/bor/heimdall/span/testValidators.go b/consensus/bor/heimdall/span/testValidators.go
new file mode 100644
index 00000000000..29cf1cc2e6a
--- /dev/null
+++ b/consensus/bor/heimdall/span/testValidators.go
@@ -0,0 +1,41 @@
+package span
+
+import (
+ "github.com/ledgerwatch/erigon-lib/chain/networkname"
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon/consensus/bor/valset"
+)
+
+// NetworkNameVals is a map of network name to validator set for tests/devnets
+var NetworkNameVals = make(map[string][]*valset.Validator)
+
+// Validator set for bor e2e test chain with 2 validator configuration
+var BorE2ETestChain2Valset = []*valset.Validator{
+ {
+ ID: 1,
+ Address: common.HexToAddress("71562b71999873DB5b286dF957af199Ec94617F7"),
+ VotingPower: 1000,
+ ProposerPriority: 1,
+ },
+ {
+ ID: 2,
+ Address: common.HexToAddress("9fB29AAc15b9A4B7F17c3385939b007540f4d791"),
+ VotingPower: 1000,
+ ProposerPriority: 2,
+ },
+}
+
+// Validator set for bor devnet-chain with 1 validator configuration
+var BorDevnetChainVals = []*valset.Validator{
+ {
+ ID: 1,
+ Address: common.HexToAddress("0x67b1d87101671b127f5f8714789C7192f7ad340e"),
+ VotingPower: 1000,
+ ProposerPriority: 1,
+ },
+}
+
+func init() {
+ NetworkNameVals[networkname.BorE2ETestChain2ValName] = BorE2ETestChain2Valset
+ NetworkNameVals[networkname.BorDevnetChainName] = BorDevnetChainVals
+}
diff --git a/consensus/bor/heimdallgrpc/milestone.go b/consensus/bor/heimdallgrpc/milestone.go
new file mode 100644
index 00000000000..ab39cbb3952
--- /dev/null
+++ b/consensus/bor/heimdallgrpc/milestone.go
@@ -0,0 +1,102 @@
+package heimdallgrpc
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+
+ "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone"
+
+ proto "github.com/maticnetwork/polyproto/heimdall"
+ protoutils "github.com/maticnetwork/polyproto/utils"
+)
+
+func (h *HeimdallGRPCClient) FetchMilestoneCount(ctx context.Context) (int64, error) {
+ h.logger.Info("Fetching milestone count")
+
+ res, err := h.client.FetchMilestoneCount(ctx, nil)
+ if err != nil {
+ return 0, err
+ }
+
+ h.logger.Info("Fetched milestone count")
+
+ return res.Result.Count, nil
+}
+
+func (h *HeimdallGRPCClient) FetchMilestone(ctx context.Context) (*milestone.Milestone, error) {
+ h.logger.Info("Fetching milestone")
+
+ res, err := h.client.FetchMilestone(ctx, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ h.logger.Info("Fetched milestone")
+
+ milestone := &milestone.Milestone{
+ StartBlock: new(big.Int).SetUint64(res.Result.StartBlock),
+ EndBlock: new(big.Int).SetUint64(res.Result.EndBlock),
+ Hash: protoutils.ConvertH256ToHash(res.Result.RootHash),
+ Proposer: protoutils.ConvertH160toAddress(res.Result.Proposer),
+ BorChainID: res.Result.BorChainID,
+ Timestamp: uint64(res.Result.Timestamp.GetSeconds()),
+ }
+
+ return milestone, nil
+}
+
+func (h *HeimdallGRPCClient) FetchLastNoAckMilestone(ctx context.Context) (string, error) {
+ h.logger.Info("Fetching latest no ack milestone Id")
+
+ res, err := h.client.FetchLastNoAckMilestone(ctx, nil)
+ if err != nil {
+ return "", err
+ }
+
+ h.logger.Info("Fetched last no-ack milestone")
+
+ return res.Result.Result, nil
+}
+
+func (h *HeimdallGRPCClient) FetchNoAckMilestone(ctx context.Context, milestoneID string) error {
+ req := &proto.FetchMilestoneNoAckRequest{
+ MilestoneID: milestoneID,
+ }
+
+ h.logger.Info("Fetching no ack milestone", "milestoneID", milestoneID)
+
+ res, err := h.client.FetchNoAckMilestone(ctx, req)
+ if err != nil {
+ return err
+ }
+
+ if !res.Result.Result {
+ return fmt.Errorf("Not in rejected list: milestoneID %q", milestoneID)
+ }
+
+ h.logger.Info("Fetched no ack milestone", "milestoneID", milestoneID)
+
+ return nil
+}
+
+func (h *HeimdallGRPCClient) FetchMilestoneID(ctx context.Context, milestoneID string) error {
+ req := &proto.FetchMilestoneIDRequest{
+ MilestoneID: milestoneID,
+ }
+
+ h.logger.Info("Fetching milestone id", "milestoneID", milestoneID)
+
+ res, err := h.client.FetchMilestoneID(ctx, req)
+ if err != nil {
+ return err
+ }
+
+ if !res.Result.Result {
+ return fmt.Errorf("This milestoneID %q does not exist", milestoneID)
+ }
+
+ h.logger.Info("Fetched milestone id", "milestoneID", milestoneID)
+
+ return nil
+}
diff --git a/consensus/bor/heimdallgrpc/server.go b/consensus/bor/heimdallgrpc/server.go
index 2953ab3ac1f..8139c33ddac 100644
--- a/consensus/bor/heimdallgrpc/server.go
+++ b/consensus/bor/heimdallgrpc/server.go
@@ -7,7 +7,7 @@ import (
"time"
"github.com/ledgerwatch/erigon-lib/gointerfaces"
- "github.com/ledgerwatch/erigon/consensus/bor"
+ "github.com/ledgerwatch/erigon/consensus/bor/heimdall"
"github.com/ledgerwatch/log/v3"
proto "github.com/maticnetwork/polyproto/heimdall"
"google.golang.org/grpc"
@@ -19,7 +19,7 @@ import (
type HeimdallGRPCServer struct {
proto.UnimplementedHeimdallServer
- heimdall bor.HeimdallServer
+ heimdall heimdall.HeimdallServer
logger log.Logger
}
@@ -27,7 +27,7 @@ func (h *HeimdallGRPCServer) Span(ctx context.Context, in *proto.SpanRequest) (*
result, err := h.heimdall.Span(ctx, in.ID)
if err != nil {
- h.logger.Error("Error while fetching span")
+ h.logger.Error("[bor.heimdall] Error while fetching span")
return nil, err
}
@@ -106,7 +106,7 @@ func (h *HeimdallGRPCServer) FetchCheckpointCount(ctx context.Context, in *empty
count, err := h.heimdall.FetchCheckpointCount(ctx)
if err != nil {
- h.logger.Error("Error while fetching checkpoint count")
+ h.logger.Error("[bor.heimdall] Error while fetching checkpoint count")
return nil, err
}
@@ -121,7 +121,7 @@ func (h *HeimdallGRPCServer) FetchCheckpoint(ctx context.Context, in *proto.Fetc
_ /*checkpoint*/, err := h.heimdall.FetchCheckpoint(ctx, in.ID)
if err != nil {
- h.logger.Error("Error while fetching checkpoint")
+ h.logger.Error("[bor.heimdall] Error while fetching checkpoint")
return nil, err
}
@@ -159,7 +159,7 @@ func (h *HeimdallGRPCServer) StateSyncEvents(req *proto.StateSyncEventsRequest,
height, events, err := h.heimdall.StateSyncEvents(context.Background(), fromId, int64(req.ToTime), int(req.Limit))
if err != nil {
- h.logger.Error("Error while fetching event records", "error", err)
+ h.logger.Error("[bor.heimdall] Error while fetching event records", "error", err)
return status.Errorf(codes.Internal, err.Error())
}
@@ -187,7 +187,7 @@ func (h *HeimdallGRPCServer) StateSyncEvents(req *proto.StateSyncEventsRequest,
})
if err != nil {
- h.logger.Error("Error while sending event record", "error", err)
+ h.logger.Error("[bor.heimdall] Error while sending event record", "error", err)
return status.Errorf(codes.Internal, err.Error())
}
@@ -204,7 +204,7 @@ func (h *HeimdallGRPCServer) StateSyncEvents(req *proto.StateSyncEventsRequest,
// StartHeimdallServer creates a heimdall GRPC server - which is implemented via the passed in client
// interface. It is intended for use in testing where more than a single test validator is required rather
// than to replace the maticnetwork implementation
-func StartHeimdallServer(shutDownCtx context.Context, heimdall bor.HeimdallServer, addr string, logger log.Logger) error {
+func StartHeimdallServer(shutDownCtx context.Context, heimdall heimdall.HeimdallServer, addr string, logger log.Logger) error {
grpcServer := grpc.NewServer(withLoggingUnaryInterceptor(logger))
proto.RegisterHeimdallServer(grpcServer,
&HeimdallGRPCServer{
@@ -219,16 +219,16 @@ func StartHeimdallServer(shutDownCtx context.Context, heimdall bor.HeimdallServe
go func() {
if err := grpcServer.Serve(lis); err != nil {
- logger.Error("failed to serve grpc server", "err", err)
+ logger.Error("[bor.heimdall] failed to serve grpc server", "err", err)
}
<-shutDownCtx.Done()
grpcServer.Stop()
lis.Close()
- logger.Info("GRPC Server stopped", "addr", addr)
+ logger.Info("[bor.heimdall] GRPC Server stopped", "addr", addr)
}()
- logger.Info("GRPC Server started", "addr", addr)
+ logger.Info("[bor.heimdall] GRPC Server started", "addr", addr)
return nil
}
@@ -242,7 +242,7 @@ func withLoggingUnaryInterceptor(logger log.Logger) grpc.ServerOption {
err = status.Errorf(codes.Internal, err.Error())
}
- logger.Debug("Request", "method", info.FullMethod, "duration", time.Since(start), "error", err)
+ logger.Debug("[bor.heimdall] Request", "method", info.FullMethod, "duration", time.Since(start), "error", err)
return h, err
})
diff --git a/consensus/bor/heimdallgrpc/state_sync.go b/consensus/bor/heimdallgrpc/state_sync.go
index 3cf93dc906a..e1b49e67d93 100644
--- a/consensus/bor/heimdallgrpc/state_sync.go
+++ b/consensus/bor/heimdallgrpc/state_sync.go
@@ -6,7 +6,6 @@ import (
"io"
libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/consensus/bor/clerk"
proto "github.com/maticnetwork/polyproto/heimdall"
)
@@ -46,7 +45,7 @@ func (h *HeimdallGRPCClient) StateSyncEvents(ctx context.Context, fromID uint64,
EventRecord: clerk.EventRecord{
ID: event.ID,
Contract: libcommon.HexToAddress(event.Contract),
- Data: common.Hex2Bytes(event.Data[2:]),
+ Data: libcommon.Hex2Bytes(event.Data[2:]),
TxHash: libcommon.HexToHash(event.TxHash),
LogIndex: event.LogIndex,
ChainID: event.ChainID,
diff --git a/consensus/bor/snapshot.go b/consensus/bor/snapshot.go
index 0e8825dd5bb..836acf36343 100644
--- a/consensus/bor/snapshot.go
+++ b/consensus/bor/snapshot.go
@@ -37,7 +37,7 @@ const BorSeparate = "BorSeparate"
// newSnapshot creates a new snapshot with the specified startup parameters. This
// method does not initialize the set of recent signers, so only ever use if for
// the genesis block.
-func newSnapshot(
+func NewSnapshot(
config *chain.BorConfig,
sigcache *lru.ARCCache[common.Hash, common.Address],
number uint64,
@@ -57,7 +57,7 @@ func newSnapshot(
}
// loadSnapshot loads an existing snapshot from the database.
-func loadSnapshot(config *chain.BorConfig, sigcache *lru.ARCCache[common.Hash, common.Address], db kv.RwDB, hash common.Hash) (*Snapshot, error) {
+func LoadSnapshot(config *chain.BorConfig, sigcache *lru.ARCCache[common.Hash, common.Address], db kv.RwDB, hash common.Hash) (*Snapshot, error) {
tx, err := db.BeginRo(context.Background())
if err != nil {
return nil, err
@@ -90,7 +90,7 @@ func loadSnapshot(config *chain.BorConfig, sigcache *lru.ARCCache[common.Hash, c
}
// store inserts the snapshot into the database.
-func (s *Snapshot) store(db kv.RwDB) error {
+func (s *Snapshot) Store(db kv.RwDB) error {
blob, err := json.Marshal(s)
if err != nil {
return err
@@ -118,7 +118,7 @@ func (s *Snapshot) copy() *Snapshot {
return cpy
}
-func (s *Snapshot) apply(headers []*types.Header, logger log.Logger) (*Snapshot, error) {
+func (s *Snapshot) Apply(parent *types.Header, headers []*types.Header, logger log.Logger) (*Snapshot, error) {
// Allow passing in no headers for cleaner code
if len(headers) == 0 {
return s, nil
@@ -145,31 +145,43 @@ func (s *Snapshot) apply(headers []*types.Header, logger log.Logger) (*Snapshot,
if number >= sprintLen {
delete(snap.Recents, number-sprintLen)
}
-
// Resolve the authorization key and check against signers
- signer, err := ecrecover(header, s.sigcache, s.config)
+ signer, err := Ecrecover(header, s.sigcache, s.config)
+
if err != nil {
return nil, err
}
+ var validSigner bool
+ var succession int
+
// check if signer is in validator set
if !snap.ValidatorSet.HasAddress(signer) {
- return nil, &UnauthorizedSignerError{number, signer.Bytes()}
+ return snap, &UnauthorizedSignerError{number, signer.Bytes()}
}
-
- if _, err = snap.GetSignerSuccessionNumber(signer); err != nil {
- return nil, err
+ if succession, err = snap.GetSignerSuccessionNumber(signer); err != nil {
+ return snap, err
}
// add recents
snap.Recents[number] = signer
+ validSigner = true
+
+ if parent != nil && header.Time < parent.Time+CalcProducerDelay(number, succession, s.config) {
+ return snap, &BlockTooSoonError{number, succession}
+ }
+ difficulty := snap.Difficulty(signer)
+ if header.Difficulty.Uint64() != difficulty {
+ return snap, &WrongDifficultyError{number, difficulty, header.Difficulty.Uint64(), signer.Bytes()}
+ }
+
// change validator set and change proposer
if number > 0 && (number+1)%sprintLen == 0 {
- if err := validateHeaderExtraField(header.Extra); err != nil {
- return nil, err
+ if err := ValidateHeaderExtraField(header.Extra); err != nil {
+ return snap, err
}
- validatorBytes := header.Extra[extraVanity : len(header.Extra)-extraSeal]
+ validatorBytes := GetValidatorBytes(header, s.config)
// get validators from headers and use that for new validator set
newVals, _ := valset.ParseValidators(validatorBytes)
@@ -177,10 +189,14 @@ func (s *Snapshot) apply(headers []*types.Header, logger log.Logger) (*Snapshot,
v.IncrementProposerPriority(1, logger)
snap.ValidatorSet = v
}
- }
- snap.Number += uint64(len(headers))
- snap.Hash = headers[len(headers)-1].Hash()
+ if number > 64 && !validSigner {
+ return snap, &UnauthorizedSignerError{number, signer.Bytes()}
+ }
+ parent = header
+ snap.Number = number
+ snap.Hash = header.Hash()
+ }
return snap, nil
}
diff --git a/consensus/bor/span.go b/consensus/bor/span.go
index 7365fd10c80..41e8abec8db 100644
--- a/consensus/bor/span.go
+++ b/consensus/bor/span.go
@@ -10,7 +10,7 @@ import (
//go:generate mockgen -destination=./span_mock.go -package=bor . Spanner
type Spanner interface {
GetCurrentSpan(syscall consensus.SystemCall) (*span.Span, error)
- GetCurrentValidators(blockNumber uint64, signer libcommon.Address, getSpanForBlock func(blockNum uint64) (*span.HeimdallSpan, error)) ([]*valset.Validator, error)
- GetCurrentProducers(blockNumber uint64, signer libcommon.Address, getSpanForBlock func(blockNum uint64) (*span.HeimdallSpan, error)) ([]*valset.Validator, error)
+ GetCurrentValidators(spanId uint64, signer libcommon.Address, chain consensus.ChainHeaderReader) ([]*valset.Validator, error)
+ GetCurrentProducers(spanId uint64, signer libcommon.Address, chain consensus.ChainHeaderReader) ([]*valset.Validator, error)
CommitSpan(heimdallSpan span.HeimdallSpan, syscall consensus.SystemCall) error
}
diff --git a/consensus/bor/statefull/processor.go b/consensus/bor/statefull/processor.go
index 475a13bcd9b..a19844e0f28 100644
--- a/consensus/bor/statefull/processor.go
+++ b/consensus/bor/statefull/processor.go
@@ -11,7 +11,7 @@ import (
)
type ChainContext struct {
- Chain consensus.ChainHeaderReader
+ Chain consensus.ChainReader
Bor consensus.Engine
}
diff --git a/consensus/chain_reader.go b/consensus/chain_reader.go
index 795e2a856e4..f79de40c4cc 100644
--- a/consensus/chain_reader.go
+++ b/consensus/chain_reader.go
@@ -78,3 +78,11 @@ func (cr ChainReaderImpl) GetTd(hash libcommon.Hash, number uint64) *big.Int {
func (cr ChainReaderImpl) FrozenBlocks() uint64 {
return cr.BlockReader.FrozenBlocks()
}
+
+func (cr ChainReaderImpl) BorSpan(spanId uint64) []byte {
+ spanBytes, err := cr.BlockReader.Span(context.Background(), cr.Db, spanId)
+ if err != nil {
+ log.Error("BorSpan failed", "err", err)
+ }
+ return spanBytes
+}
diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go
index aaf4c6da083..6efe46f21ae 100644
--- a/consensus/clique/clique.go
+++ b/consensus/clique/clique.go
@@ -22,6 +22,8 @@ import (
"context"
"errors"
"fmt"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
+ "github.com/ledgerwatch/erigon-lib/kv/dbutils"
"io"
"math/big"
"math/rand"
@@ -38,9 +40,7 @@ import (
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon/common"
- "github.com/ledgerwatch/erigon/common/dbutils"
"github.com/ledgerwatch/erigon/common/debug"
- "github.com/ledgerwatch/erigon/common/hexutil"
"github.com/ledgerwatch/erigon/consensus"
"github.com/ledgerwatch/erigon/core/state"
"github.com/ledgerwatch/erigon/core/types"
@@ -366,7 +366,7 @@ func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header
}
func (c *Clique) Initialize(config *chain.Config, chain consensus.ChainHeaderReader, header *types.Header,
- state *state.IntraBlockState, txs []types.Transaction, uncles []*types.Header, syscall consensus.SysCallCustom) {
+ state *state.IntraBlockState, syscall consensus.SysCallCustom, logger log.Logger) {
}
func (c *Clique) CalculateRewards(config *chain.Config, header *types.Header, uncles []*types.Header, syscall consensus.SystemCall,
@@ -378,7 +378,7 @@ func (c *Clique) CalculateRewards(config *chain.Config, header *types.Header, un
// rewards given.
func (c *Clique) Finalize(config *chain.Config, header *types.Header, state *state.IntraBlockState,
txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal,
- chain consensus.ChainHeaderReader, syscall consensus.SystemCall, logger log.Logger,
+ chain consensus.ChainReader, syscall consensus.SystemCall, logger log.Logger,
) (types.Transactions, types.Receipts, error) {
// No block rewards in PoA, so the state remains as is and uncles are dropped
header.UncleHash = types.CalcUncleHash(nil)
@@ -389,7 +389,7 @@ func (c *Clique) Finalize(config *chain.Config, header *types.Header, state *sta
// nor block rewards given, and returns the final block.
func (c *Clique) FinalizeAndAssemble(chainConfig *chain.Config, header *types.Header, state *state.IntraBlockState,
txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal,
- chain consensus.ChainHeaderReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger,
+ chain consensus.ChainReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger,
) (*types.Block, types.Transactions, types.Receipts, error) {
// No block rewards in PoA, so the state remains as is and uncles are dropped
header.UncleHash = types.CalcUncleHash(nil)
diff --git a/consensus/clique/clique_test.go b/consensus/clique/clique_test.go
index c69bbf349af..87167c13428 100644
--- a/consensus/clique/clique_test.go
+++ b/consensus/clique/clique_test.go
@@ -106,7 +106,7 @@ func TestReimportMirroredState(t *testing.T) {
}
// Insert the first two blocks and make sure the chain is valid
- if err := m.InsertChain(chain.Slice(0, 2), nil); err != nil {
+ if err := m.InsertChain(chain.Slice(0, 2)); err != nil {
t.Fatalf("failed to insert initial blocks: %v", err)
}
if err := m.DB.View(m.Ctx, func(tx kv.Tx) error {
@@ -123,7 +123,7 @@ func TestReimportMirroredState(t *testing.T) {
// Simulate a crash by creating a new chain on top of the database, without
// flushing the dirty states out. Insert the last block, triggering a sidechain
// reimport.
- if err := m.InsertChain(chain.Slice(2, chain.Length()), nil); err != nil {
+ if err := m.InsertChain(chain.Slice(2, chain.Length())); err != nil {
t.Fatalf("failed to insert final block: %v", err)
}
if err := m.DB.View(m.Ctx, func(tx kv.Tx) error {
diff --git a/consensus/clique/snapshot.go b/consensus/clique/snapshot.go
index c478d6ec3b1..fb34b19c680 100644
--- a/consensus/clique/snapshot.go
+++ b/consensus/clique/snapshot.go
@@ -21,6 +21,7 @@ import (
"context"
"errors"
"fmt"
+ "github.com/ledgerwatch/erigon-lib/kv/dbutils"
"sort"
"time"
@@ -33,7 +34,6 @@ import (
"github.com/ledgerwatch/log/v3"
"github.com/ledgerwatch/erigon/common"
- "github.com/ledgerwatch/erigon/common/dbutils"
"github.com/ledgerwatch/erigon/core/types"
)
diff --git a/consensus/clique/snapshot_test.go b/consensus/clique/snapshot_test.go
index bfdaa4e1d09..0f487177bfc 100644
--- a/consensus/clique/snapshot_test.go
+++ b/consensus/clique/snapshot_test.go
@@ -477,7 +477,7 @@ func TestClique(t *testing.T) {
chainX.Headers[k] = b.Header()
}
chainX.TopBlock = batches[j][len(batches[j])-1]
- if err = m.InsertChain(chainX, nil); err != nil {
+ if err = m.InsertChain(chainX); err != nil {
t.Errorf("test %d: failed to import batch %d, %v", i, j, err)
failed = true
break
@@ -493,7 +493,7 @@ func TestClique(t *testing.T) {
chainX.Headers[k] = b.Header()
}
chainX.TopBlock = batches[len(batches)-1][len(batches[len(batches)-1])-1]
- err = m.InsertChain(chainX, nil)
+ err = m.InsertChain(chainX)
if tt.failure != nil && err == nil {
t.Errorf("test %d: expected failure", i)
}
diff --git a/consensus/consensus.go b/consensus/consensus.go
index 6db756f042a..0a98706fa34 100644
--- a/consensus/consensus.go
+++ b/consensus/consensus.go
@@ -27,6 +27,7 @@ import (
"github.com/ledgerwatch/erigon/core/state"
"github.com/ledgerwatch/erigon/core/types"
+ "github.com/ledgerwatch/erigon/rlp"
"github.com/ledgerwatch/erigon/rpc"
"github.com/ledgerwatch/log/v3"
)
@@ -54,6 +55,9 @@ type ChainHeaderReader interface {
// Number of blocks frozen in the block snapshots
FrozenBlocks() uint64
+
+ // Byte string representation of a bor span with given ID
+ BorSpan(spanId uint64) []byte
}
// ChainReader defines a small collection of methods needed to access the local
@@ -63,9 +67,10 @@ type ChainReader interface {
// GetBlock retrieves a block from the database by hash and number.
GetBlock(hash libcommon.Hash, number uint64) *types.Block
- GetHeader(hash libcommon.Hash, number uint64) *types.Header
HasBlock(hash libcommon.Hash, number uint64) bool
+
+ BorEventsByBlock(hash libcommon.Hash, number uint64) []rlp.RawValue
}
type SystemCall func(contract libcommon.Address, data []byte) ([]byte, error)
@@ -117,6 +122,9 @@ type EngineReader interface {
CalculateRewards(config *chain.Config, header *types.Header, uncles []*types.Header, syscall SystemCall,
) ([]Reward, error)
+
+ // Close terminates any background threads, DB's etc maintained by the consensus engine.
+ Close() error
}
// EngineReader are write methods of the consensus engine
@@ -136,7 +144,7 @@ type EngineWriter interface {
// Initialize runs any pre-transaction state modifications (e.g. epoch start)
Initialize(config *chain.Config, chain ChainHeaderReader, header *types.Header,
- state *state.IntraBlockState, txs []types.Transaction, uncles []*types.Header, syscall SysCallCustom)
+ state *state.IntraBlockState, syscall SysCallCustom, logger log.Logger)
// Finalize runs any post-transaction state modifications (e.g. block rewards)
// but does not assemble the block.
@@ -145,7 +153,7 @@ type EngineWriter interface {
// consensus rules that happen at finalization (e.g. block rewards).
Finalize(config *chain.Config, header *types.Header, state *state.IntraBlockState,
txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal,
- chain ChainHeaderReader, syscall SystemCall, logger log.Logger,
+ chain ChainReader, syscall SystemCall, logger log.Logger,
) (types.Transactions, types.Receipts, error)
// FinalizeAndAssemble runs any post-transaction state modifications (e.g. block
@@ -155,7 +163,7 @@ type EngineWriter interface {
// consensus rules that happen at finalization (e.g. block rewards).
FinalizeAndAssemble(config *chain.Config, header *types.Header, state *state.IntraBlockState,
txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal,
- chain ChainHeaderReader, syscall SystemCall, call Call, logger log.Logger,
+ chain ChainReader, syscall SystemCall, call Call, logger log.Logger,
) (*types.Block, types.Transactions, types.Receipts, error)
// Seal generates a new sealing request for the given input block and pushes
@@ -177,9 +185,6 @@ type EngineWriter interface {
// APIs returns the RPC APIs this consensus engine provides.
APIs(chain ChainHeaderReader) []rpc.API
-
- // Close terminates any background threads maintained by the consensus engine.
- Close() error
}
// PoW is a consensus engine based on proof-of-work.
diff --git a/consensus/errors.go b/consensus/errors.go
index c88ae64b4e7..0659812b7e7 100644
--- a/consensus/errors.go
+++ b/consensus/errors.go
@@ -19,6 +19,11 @@ package consensus
import "errors"
var (
+ // ErrInvalidBlock is a generic error to wrap all non-transient genuine protocol validation errors.
+ // For example, ErrUnexpectedWithdrawals should be wrapped as ErrInvalidBlock,
+ // while an out-of-memory error should not.
+ ErrInvalidBlock = errors.New("invalid block")
+
// ErrUnknownAncestor is returned when validating a block requires an ancestor
// that is unknown.
ErrUnknownAncestor = errors.New("unknown ancestor")
diff --git a/consensus/ethash/algorithm.go b/consensus/ethash/algorithm.go
index 3c20c22439e..6ac5737b672 100644
--- a/consensus/ethash/algorithm.go
+++ b/consensus/ethash/algorithm.go
@@ -18,6 +18,7 @@ package ethash
import (
"encoding/binary"
+ common2 "github.com/ledgerwatch/erigon-lib/common"
"hash"
"math/big"
"reflect"
@@ -135,7 +136,7 @@ func seedHash(block uint64) []byte {
return seed
}
- h := common.NewHasher()
+ h := common2.NewHasher()
for i := 0; i < int(block/epochLength); i++ {
h.Sha.Reset()
@@ -151,7 +152,7 @@ func seedHash(block uint64) []byte {
}
}
- common.ReturnHasherToPool(h)
+ common2.ReturnHasherToPool(h)
return seed
}
diff --git a/consensus/ethash/algorithm_test.go b/consensus/ethash/algorithm_test.go
index 722c83fadc3..d9fa5b1d400 100644
--- a/consensus/ethash/algorithm_test.go
+++ b/consensus/ethash/algorithm_test.go
@@ -19,13 +19,12 @@ package ethash
import (
"bytes"
"encoding/binary"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
"io"
"reflect"
"testing"
"github.com/ledgerwatch/erigon-lib/common/length"
-
- "github.com/ledgerwatch/erigon/common/hexutil"
)
// prepare converts an ethash cache or dataset from a byte stream into the internal
diff --git a/consensus/ethash/api.go b/consensus/ethash/api.go
index 7ee7d81b19f..0ecd3a819d2 100644
--- a/consensus/ethash/api.go
+++ b/consensus/ethash/api.go
@@ -18,10 +18,10 @@ package ethash
import (
"errors"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon/common/hexutil"
"github.com/ledgerwatch/erigon/core/types"
)
diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go
index b3ce172a5fb..5c92c2061d5 100644
--- a/consensus/ethash/consensus.go
+++ b/consensus/ethash/consensus.go
@@ -24,7 +24,7 @@ import (
"runtime"
"time"
- mapset "github.com/deckarep/golang-set"
+ mapset "github.com/deckarep/golang-set/v2"
"github.com/holiman/uint256"
"github.com/ledgerwatch/erigon-lib/chain"
libcommon "github.com/ledgerwatch/erigon-lib/common"
@@ -149,9 +149,9 @@ func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, header *types.He
return nil
}
-func getUncles(chain consensus.ChainReader, header *types.Header) (mapset.Set, map[libcommon.Hash]*types.Header) {
+func getUncles(chain consensus.ChainReader, header *types.Header) (mapset.Set[libcommon.Hash], map[libcommon.Hash]*types.Header) {
// Gather the set of past uncles and ancestors
- uncles, ancestors := mapset.NewSet(), make(map[libcommon.Hash]*types.Header)
+ uncles, ancestors := mapset.NewSet[libcommon.Hash](), make(map[libcommon.Hash]*types.Header)
number, parent := header.Number.Uint64()-1, header.ParentHash
for i := 0; i < 7; i++ {
@@ -178,7 +178,7 @@ func getUncles(chain consensus.ChainReader, header *types.Header) (mapset.Set, m
return uncles, ancestors
}
-func (ethash *Ethash) VerifyUncle(chain consensus.ChainHeaderReader, header *types.Header, uncle *types.Header, uncles mapset.Set, ancestors map[libcommon.Hash]*types.Header, seal bool) error {
+func (ethash *Ethash) VerifyUncle(chain consensus.ChainHeaderReader, header *types.Header, uncle *types.Header, uncles mapset.Set[libcommon.Hash], ancestors map[libcommon.Hash]*types.Header, seal bool) error {
// Make sure every uncle is rewarded only once
hash := uncle.Hash()
if uncles.Contains(hash) {
@@ -550,14 +550,17 @@ func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.H
}
func (ethash *Ethash) Initialize(config *chain.Config, chain consensus.ChainHeaderReader, header *types.Header,
- state *state.IntraBlockState, txs []types.Transaction, uncles []*types.Header, syscall consensus.SysCallCustom) {
+ state *state.IntraBlockState, syscall consensus.SysCallCustom, logger log.Logger) {
+ if config.DAOForkBlock != nil && config.DAOForkBlock.Cmp(header.Number) == 0 {
+ misc.ApplyDAOHardFork(state)
+ }
}
// Finalize implements consensus.Engine, accumulating the block and uncle rewards,
// setting the final state on the header
func (ethash *Ethash) Finalize(config *chain.Config, header *types.Header, state *state.IntraBlockState,
txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal,
- chain consensus.ChainHeaderReader, syscall consensus.SystemCall, logger log.Logger,
+ chain consensus.ChainReader, syscall consensus.SystemCall, logger log.Logger,
) (types.Transactions, types.Receipts, error) {
// Accumulate any block and uncle rewards and commit the final state root
accumulateRewards(config, state, header, uncles)
@@ -568,7 +571,7 @@ func (ethash *Ethash) Finalize(config *chain.Config, header *types.Header, state
// uncle rewards, setting the final state and assembling the block.
func (ethash *Ethash) FinalizeAndAssemble(chainConfig *chain.Config, header *types.Header, state *state.IntraBlockState,
txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal,
- chain consensus.ChainHeaderReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger,
+ chain consensus.ChainReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger,
) (*types.Block, types.Transactions, types.Receipts, error) {
// Finalize block
diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go
index 813c5283e78..ef2840371de 100644
--- a/consensus/ethash/ethash.go
+++ b/consensus/ethash/ethash.go
@@ -32,13 +32,12 @@ import (
"unsafe"
"github.com/edsrzf/mmap-go"
- "github.com/hashicorp/golang-lru/simplelru"
+ "github.com/hashicorp/golang-lru/v2/simplelru"
"github.com/ledgerwatch/erigon/consensus/ethash/ethashcfg"
"github.com/ledgerwatch/erigon/common/debug"
cmath "github.com/ledgerwatch/erigon/common/math"
"github.com/ledgerwatch/erigon/consensus"
- "github.com/ledgerwatch/erigon/metrics"
"github.com/ledgerwatch/erigon/rpc"
"github.com/ledgerwatch/log/v3"
)
@@ -183,7 +182,7 @@ type lru struct {
mu sync.Mutex
// Items are kept in a LRU cache, but there is a special case:
// We always keep an item for (highest seen epoch) + 1 as the 'future item'.
- cache *simplelru.LRU
+ cache *simplelru.LRU[uint64, any]
future uint64
futureItem interface{}
}
@@ -194,7 +193,7 @@ func newlru(what string, maxItems int, new func(epoch uint64) interface{}) *lru
if maxItems <= 0 {
maxItems = 1
}
- cache, _ := simplelru.NewLRU(maxItems, func(key, value interface{}) {
+ cache, _ := simplelru.NewLRU[uint64, any](maxItems, func(key uint64, value interface{}) {
log.Trace("Evicted ethash "+what, "epoch", key)
})
return &lru{what: what, new: new, cache: cache}
@@ -410,8 +409,8 @@ type Ethash struct {
datasets *lru // In memory datasets to avoid regenerating too often
// Mining related fields
- rand *rand.Rand // Properly seeded random source for nonces
- hashrate metrics.Meter // Meter tracking the average hashrate
+ rand *rand.Rand // Properly seeded random source for nonces
+ hashrate *hashRateMeter // Meter tracking the average hashrate
remote *remoteSealer
// The fields below are hooks for testing
@@ -439,7 +438,7 @@ func New(config ethashcfg.Config, notify []string, noverify bool) *Ethash {
config: config,
caches: newlru("cache", config.CachesInMem, newCache),
datasets: newlru("dataset", config.DatasetsInMem, newDataset),
- hashrate: metrics.NewMeterForced(),
+ hashrate: newHashRateMeter(),
}
if config.PowMode == ethashcfg.ModeShared {
ethash.shared = GetSharedEthash()
@@ -534,7 +533,7 @@ func (ethash *Ethash) dataset(block uint64, async bool) *dataset {
func (ethash *Ethash) Hashrate() float64 {
// Short circuit if we are run the ethash in normal/test mode.
if (ethash.config.PowMode != ethashcfg.ModeNormal && ethash.config.PowMode != ethashcfg.ModeTest) || ethash.remote == nil {
- return ethash.hashrate.Rate1()
+ return ethash.hashrate.Rate()
}
var res = make(chan uint64, 1)
@@ -542,11 +541,11 @@ func (ethash *Ethash) Hashrate() float64 {
case ethash.remote.fetchRateCh <- res:
case <-ethash.remote.exitCh:
// Return local hashrate only if ethash is stopped.
- return ethash.hashrate.Rate1()
+ return ethash.hashrate.Rate()
}
// Gather total submitted hash rate of remote sealers.
- return ethash.hashrate.Rate1() + float64(<-res)
+ return ethash.hashrate.Rate() + float64(<-res)
}
// APIs implements consensus.Engine, returning the user facing RPC APIs.
diff --git a/consensus/ethash/ethash_test.go b/consensus/ethash/ethash_test.go
index 805ebe9cf60..c0566237c68 100644
--- a/consensus/ethash/ethash_test.go
+++ b/consensus/ethash/ethash_test.go
@@ -17,13 +17,13 @@
package ethash
import (
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
"math/big"
"testing"
"time"
libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon/common/hexutil"
"github.com/ledgerwatch/erigon/core/types"
)
diff --git a/consensus/ethash/fake.go b/consensus/ethash/fake.go
index 19a362a9751..70899866d27 100644
--- a/consensus/ethash/fake.go
+++ b/consensus/ethash/fake.go
@@ -3,7 +3,7 @@ package ethash
import (
"time"
- mapset "github.com/deckarep/golang-set"
+ mapset "github.com/deckarep/golang-set/v2"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/consensus/ethash/ethashcfg"
"github.com/ledgerwatch/log/v3"
@@ -87,7 +87,7 @@ func (f *FakeEthash) VerifyUncles(chain consensus.ChainReader, header *types.Hea
return nil
}
-func (f *FakeEthash) VerifyUncle(chain consensus.ChainHeaderReader, block *types.Header, uncle *types.Header, uncles mapset.Set, ancestors map[libcommon.Hash]*types.Header, seal bool) error {
+func (f *FakeEthash) VerifyUncle(chain consensus.ChainHeaderReader, block *types.Header, uncle *types.Header, uncles mapset.Set[libcommon.Hash], ancestors map[libcommon.Hash]*types.Header, seal bool) error {
err := f.Ethash.VerifyUncle(chain, block, uncle, uncles, ancestors, false)
if err != nil {
return err
diff --git a/consensus/ethash/meter.go b/consensus/ethash/meter.go
new file mode 100644
index 00000000000..a75b5c1f06e
--- /dev/null
+++ b/consensus/ethash/meter.go
@@ -0,0 +1,193 @@
+package ethash
+
+import (
+ "math"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ledgerwatch/erigon/common/debug"
+)
+
+func newHashRateMeter() *hashRateMeter {
+ m := newMeter()
+ arbiter.mu.Lock()
+ defer arbiter.mu.Unlock()
+ arbiter.meters[m] = struct{}{}
+ if !arbiter.started {
+ arbiter.started = true
+ go arbiter.tick()
+ }
+ return m
+}
+
+// meterSnapshot is a read-only copy of another Meter.
+type meterSnapshot struct {
+ // WARNING: The `temp` field is accessed atomically.
+ // On 32 bit platforms, only 64-bit aligned fields can be atomic. The struct is
+ // guaranteed to be so aligned, so take advantage of that. For more information,
+ // see https://golang.org/pkg/sync/atomic/#pkg-note-BUG.
+ temp int64
+ count int64
+ rate float64
+}
+
+// Count returns the count of events at the time the snapshot was taken.
+func (m *meterSnapshot) Count() int64 { return m.count }
+
+// Mark panics.
+func (*meterSnapshot) Mark(n int64) {
+ panic("Mark called on a MeterSnapshot")
+}
+
+// Rate1 returns the one-minute moving average rate of events per second at the
+// time the snapshot was taken.
+func (m *meterSnapshot) Rate() float64 { return m.rate }
+
+// Stop is a no-op.
+func (m *meterSnapshot) Stop() {}
+
+// StandardMeter is the standard implementation of a Meter.
+type hashRateMeter struct {
+ lock sync.RWMutex
+ snapshot *meterSnapshot
+ a1 *ewma
+ startTime time.Time
+ stopped uint32
+}
+
+func newMeter() *hashRateMeter {
+ return &hashRateMeter{
+ snapshot: &meterSnapshot{},
+ a1: &ewma{alpha: 1 - math.Exp(-5.0/60.0/1)},
+ startTime: time.Now(),
+ }
+}
+
+// Stop stops the meter, Mark() will be a no-op if you use it after being stopped.
+func (m *hashRateMeter) Stop() {
+ stopped := atomic.SwapUint32(&m.stopped, 1)
+ if stopped != 1 {
+ arbiter.mu.Lock()
+ delete(arbiter.meters, m)
+ arbiter.mu.Unlock()
+ }
+}
+
+// Count returns the number of events recorded.
+// It updates the meter to be as accurate as possible
+func (m *hashRateMeter) Count() int64 {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ m.updateMeter()
+ return m.snapshot.count
+}
+
+// Mark records the occurrence of n events.
+func (m *hashRateMeter) Mark(n int64) {
+ m.lock.Lock()
+ m.snapshot.temp = n
+ m.lock.Unlock()
+}
+
+// Rate returns the one-minute moving average rate of events per second.
+func (m *hashRateMeter) Rate() float64 {
+ m.lock.RLock()
+ defer m.lock.RUnlock()
+ return m.snapshot.rate
+}
+
+// Snapshot returns a read-only copy of the meter.
+func (m *hashRateMeter) Snapshot() *meterSnapshot {
+ m.lock.RLock()
+ snapshot := *m.snapshot
+ m.lock.RUnlock()
+ return &snapshot
+}
+
+func (m *hashRateMeter) updateSnapshot() {
+ // should run with write lock held on m.lock
+ snapshot := m.snapshot
+ snapshot.rate = m.a1.Rate()
+}
+
+func (m *hashRateMeter) updateMeter() {
+ // should only run with write lock held on m.lock
+ n := atomic.SwapInt64(&m.snapshot.temp, 0)
+ m.snapshot.count += n
+ m.a1.Update(n)
+}
+
+func (m *hashRateMeter) tick() {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ m.updateMeter()
+ m.a1.Tick()
+ m.updateSnapshot()
+}
+
+// meterArbiter ticks meters every 5s from a single goroutine.
+// meters are references in a set for future stopping.
+type meterArbiter struct {
+ mu sync.RWMutex
+ started bool
+ meters map[*hashRateMeter]struct{}
+ ticker *time.Ticker
+}
+
+var arbiter = meterArbiter{ticker: time.NewTicker(5 * time.Second), meters: make(map[*hashRateMeter]struct{})}
+
+// Ticks meters on the scheduled interval
+func (ma *meterArbiter) tick() {
+ defer debug.LogPanic()
+ for range ma.ticker.C {
+ ma.tickMeters()
+ }
+}
+
+func (ma *meterArbiter) tickMeters() {
+ ma.mu.RLock()
+ defer ma.mu.RUnlock()
+ for meter := range ma.meters {
+ meter.tick()
+ }
+}
+
+// ewma is the standard implementation of an EWMA and tracks the number
+// of uncounted events and processes them on each tick. It uses the
+// sync/atomic package to manage uncounted events.
+type ewma struct {
+ uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
+ alpha float64
+ rate float64
+ init bool
+ mutex sync.Mutex
+}
+
+// Rate returns the moving average rate of events per second.
+func (a *ewma) Rate() float64 {
+ a.mutex.Lock()
+ defer a.mutex.Unlock()
+ return a.rate * float64(time.Second)
+}
+
+// Tick ticks the clock to update the moving average. It assumes it is called
+// every five seconds.
+func (a *ewma) Tick() {
+ count := atomic.LoadInt64(&a.uncounted)
+ atomic.AddInt64(&a.uncounted, -count)
+ instantRate := float64(count) / float64(5*time.Second)
+ a.mutex.Lock()
+ defer a.mutex.Unlock()
+ if a.init {
+ a.rate += a.alpha * (instantRate - a.rate)
+ } else {
+ a.init = true
+ a.rate = instantRate
+ }
+}
+
+// Update adds n uncounted events.
+func (a *ewma) Update(n int64) {
+ atomic.AddInt64(&a.uncounted, n)
+}
diff --git a/consensus/ethash/sealer.go b/consensus/ethash/sealer.go
index 76a00da872f..d020147ff5a 100644
--- a/consensus/ethash/sealer.go
+++ b/consensus/ethash/sealer.go
@@ -21,6 +21,7 @@ import (
"context"
crand "crypto/rand"
"errors"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
"math"
"math/big"
"math/rand"
@@ -32,7 +33,6 @@ import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/common"
- "github.com/ledgerwatch/erigon/common/hexutil"
"github.com/ledgerwatch/erigon/consensus"
"github.com/ledgerwatch/erigon/core/types"
)
diff --git a/consensus/merge/merge.go b/consensus/merge/merge.go
index 6521efc9aee..c5544ef33bd 100644
--- a/consensus/merge/merge.go
+++ b/consensus/merge/merge.go
@@ -132,7 +132,7 @@ func (s *Merge) CalculateRewards(config *chain.Config, header *types.Header, unc
func (s *Merge) Finalize(config *chain.Config, header *types.Header, state *state.IntraBlockState,
txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal,
- chain consensus.ChainHeaderReader, syscall consensus.SystemCall, logger log.Logger,
+ chain consensus.ChainReader, syscall consensus.SystemCall, logger log.Logger,
) (types.Transactions, types.Receipts, error) {
if !misc.IsPoSHeader(header) {
return s.eth1Engine.Finalize(config, header, state, txs, uncles, r, withdrawals, chain, syscall, logger)
@@ -164,7 +164,7 @@ func (s *Merge) Finalize(config *chain.Config, header *types.Header, state *stat
func (s *Merge) FinalizeAndAssemble(config *chain.Config, header *types.Header, state *state.IntraBlockState,
txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal,
- chain consensus.ChainHeaderReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger,
+ chain consensus.ChainReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger,
) (*types.Block, types.Transactions, types.Receipts, error) {
if !misc.IsPoSHeader(header) {
return s.eth1Engine.FinalizeAndAssemble(config, header, state, txs, uncles, receipts, withdrawals, chain, syscall, call, logger)
@@ -242,12 +242,18 @@ func (s *Merge) verifyHeader(chain consensus.ChainHeaderReader, header, parent *
return consensus.ErrUnexpectedWithdrawals
}
- cancun := chain.Config().IsCancun(header.Time)
- if cancun {
- return misc.VerifyPresenceOfCancunHeaderFields(header)
- } else {
+ if !chain.Config().IsCancun(header.Time) {
return misc.VerifyAbsenceOfCancunHeaderFields(header)
}
+
+ if err := misc.VerifyPresenceOfCancunHeaderFields(header); err != nil {
+ return err
+ }
+ expectedExcessBlobGas := misc.CalcExcessBlobGas(chain.Config(), parent)
+ if *header.ExcessBlobGas != expectedExcessBlobGas {
+ return fmt.Errorf("invalid excessBlobGas: have %d, want %d", *header.ExcessBlobGas, expectedExcessBlobGas)
+ }
+ return nil
}
func (s *Merge) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
@@ -265,10 +271,16 @@ func (s *Merge) IsServiceTransaction(sender libcommon.Address, syscall consensus
return s.eth1Engine.IsServiceTransaction(sender, syscall)
}
-func (s *Merge) Initialize(config *chain.Config, chain consensus.ChainHeaderReader, header *types.Header, state *state.IntraBlockState, txs []types.Transaction, uncles []*types.Header, syscall consensus.SysCallCustom) {
- s.eth1Engine.Initialize(config, chain, header, state, txs, uncles, syscall)
+func (s *Merge) Initialize(config *chain.Config, chain consensus.ChainHeaderReader, header *types.Header,
+ state *state.IntraBlockState, syscall consensus.SysCallCustom, logger log.Logger,
+) {
+ if !misc.IsPoSHeader(header) {
+ s.eth1Engine.Initialize(config, chain, header, state, syscall, logger)
+ }
if chain.Config().IsCancun(header.Time) {
- misc.ApplyBeaconRootEip4788(chain, header, state)
+ misc.ApplyBeaconRootEip4788(header.ParentBeaconBlockRoot, func(addr libcommon.Address, data []byte) ([]byte, error) {
+ return syscall(addr, data, state, header, false /* constCall */)
+ })
}
}
diff --git a/consensus/merge/merge_test.go b/consensus/merge/merge_test.go
index bf0558211d3..aee7810cd2f 100644
--- a/consensus/merge/merge_test.go
+++ b/consensus/merge/merge_test.go
@@ -41,6 +41,10 @@ func (r readerMock) FrozenBlocks() uint64 {
return 0
}
+func (r readerMock) BorSpan(spanId uint64) []byte {
+ return nil
+}
+
// The thing only that changes beetwen normal ethash checks other than POW, is difficulty
// and nonce so we are gonna test those
func TestVerifyHeaderDifficulty(t *testing.T) {
diff --git a/consensus/misc/eip1559.go b/consensus/misc/eip1559.go
index bd43055066f..8cd278b253b 100644
--- a/consensus/misc/eip1559.go
+++ b/consensus/misc/eip1559.go
@@ -50,7 +50,7 @@ func VerifyEip1559Header(config *chain.Config, parent, header *types.Header, ski
expectedBaseFee := CalcBaseFee(config, parent)
if header.BaseFee.Cmp(expectedBaseFee) != 0 {
return fmt.Errorf("invalid baseFee: have %s, want %s, parentBaseFee %s, parentGasUsed %d",
- expectedBaseFee, header.BaseFee, parent.BaseFee, parent.GasUsed)
+ header.BaseFee, expectedBaseFee, parent.BaseFee, parent.GasUsed)
}
return nil
}
diff --git a/consensus/misc/eip4788.go b/consensus/misc/eip4788.go
index 099fdcbf6ae..26293004b28 100644
--- a/consensus/misc/eip4788.go
+++ b/consensus/misc/eip4788.go
@@ -1,31 +1,16 @@
package misc
import (
- "github.com/holiman/uint256"
+ "github.com/ledgerwatch/log/v3"
libcommon "github.com/ledgerwatch/erigon-lib/common"
-
"github.com/ledgerwatch/erigon/consensus"
- "github.com/ledgerwatch/erigon/core/state"
- "github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/params"
)
-func ApplyBeaconRootEip4788(chain consensus.ChainHeaderReader, header *types.Header, state *state.IntraBlockState) {
- historyStorageAddress := libcommon.BytesToAddress(params.HistoryStorageAddress)
- historicalRootsModulus := params.HistoricalRootsModulus
- timestampReduced := header.Time % historicalRootsModulus
- timestampExtended := timestampReduced + historicalRootsModulus
- timestampIndex := libcommon.BytesToHash((uint256.NewInt(timestampReduced)).Bytes())
- rootIndex := libcommon.BytesToHash(uint256.NewInt(timestampExtended).Bytes())
- parentBeaconBlockRootInt := *uint256.NewInt(0).SetBytes(header.ParentBeaconBlockRoot.Bytes())
- state.SetState(historyStorageAddress, ×tampIndex, *uint256.NewInt(header.Time))
- state.SetState(historyStorageAddress, &rootIndex, parentBeaconBlockRootInt)
-
- // Ensure that the historyStorageAddress has nonzero nonce to prevent wipe-out of its storage stipulated by EIP-161
- // (when the historyStorageAddress has zero balance).
- // See https://github.com/ethereum/EIPs/pull/7431
- if state.GetNonce(historyStorageAddress) == 0 {
- state.SetNonce(historyStorageAddress, 1)
+func ApplyBeaconRootEip4788(parentBeaconBlockRoot *libcommon.Hash, syscall consensus.SystemCall) {
+ _, err := syscall(params.BeaconRootsAddress, parentBeaconBlockRoot.Bytes())
+ if err != nil {
+ log.Warn("Failed to call beacon roots contract", "err", err)
}
}
diff --git a/consensus/misc/eip4788_test.go b/consensus/misc/eip4788_test.go
deleted file mode 100644
index 0b7e50272ec..00000000000
--- a/consensus/misc/eip4788_test.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package misc
-
-import (
- "crypto/sha256"
- "math/big"
- "testing"
-
- "github.com/holiman/uint256"
- "github.com/ledgerwatch/erigon-lib/chain"
- libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon/core/state"
- "github.com/ledgerwatch/erigon/core/types"
- "github.com/ledgerwatch/erigon/core/types/accounts"
- "github.com/ledgerwatch/erigon/core/vm"
- "github.com/ledgerwatch/erigon/params"
- "github.com/stretchr/testify/assert"
-)
-
-type dummyChainHeaderReader struct {
-}
-
-func (r dummyChainHeaderReader) Config() *chain.Config {
- return nil
-}
-
-func (r dummyChainHeaderReader) CurrentHeader() *types.Header {
- return nil
-}
-
-func (r dummyChainHeaderReader) GetHeader(libcommon.Hash, uint64) *types.Header {
- return nil
-}
-
-func (r dummyChainHeaderReader) GetHeaderByNumber(uint64) *types.Header {
- return nil
-}
-
-func (r dummyChainHeaderReader) GetHeaderByHash(libcommon.Hash) *types.Header {
- return nil
-}
-
-func (r dummyChainHeaderReader) GetTd(libcommon.Hash, uint64) *big.Int {
- return nil
-}
-
-func (r dummyChainHeaderReader) FrozenBlocks() uint64 {
- return 0
-}
-
-type dummyStateReader struct {
-}
-
-func (dsr *dummyStateReader) ReadAccountData(address libcommon.Address) (*accounts.Account, error) {
- return nil, nil
-}
-func (dsr *dummyStateReader) ReadAccountStorage(address libcommon.Address, incarnation uint64, key *libcommon.Hash) ([]byte, error) {
- return nil, nil
-}
-func (dsr *dummyStateReader) ReadAccountCode(address libcommon.Address, incarnation uint64, codeHash libcommon.Hash) ([]byte, error) {
- return make([]byte, 0), nil
-}
-func (dsr *dummyStateReader) ReadAccountCodeSize(address libcommon.Address, incarnation uint64, codeHash libcommon.Hash) (int, error) {
- return 0, nil
-}
-func (dsr *dummyStateReader) ReadAccountIncarnation(address libcommon.Address) (uint64, error) {
- return 0, nil
-}
-
-func TestApplyBeaconRoot(t *testing.T) {
- var mockReader dummyChainHeaderReader
- testHashBytes := sha256.Sum256([]byte("test"))
- testRootHash := libcommon.BytesToHash(testHashBytes[:])
- header := types.Header{
- ParentBeaconBlockRoot: &testRootHash,
- Time: 1,
- }
-
- var state state.IntraBlockState = *state.New(&dummyStateReader{})
-
- ApplyBeaconRootEip4788(mockReader, &header, &state)
- pc := vm.PrecompiledContractsCancun[libcommon.BytesToAddress(params.HistoryStorageAddress)]
- spc, ok := pc.(vm.StatefulPrecompiledContract)
- if !ok {
- t.Fatalf("Error instantiating pre-compile")
- }
- timestampParam := uint256.NewInt(1).Bytes32()
-
- res, err := spc.RunStateful(timestampParam[:], &state)
- if err != nil {
- t.Errorf("error %v", err)
- }
- assert.Equal(t, testRootHash.Bytes(), res, "Beacon root mismatch")
- t.Logf("result %v", res)
-}
diff --git a/consensus/misc/eip4844.go b/consensus/misc/eip4844.go
index 7c8d2de92be..9b4eb159fb6 100644
--- a/consensus/misc/eip4844.go
+++ b/consensus/misc/eip4844.go
@@ -21,14 +21,14 @@ import (
"github.com/holiman/uint256"
+ "github.com/ledgerwatch/erigon-lib/chain"
"github.com/ledgerwatch/erigon-lib/common/fixedgas"
"github.com/ledgerwatch/erigon/core/types"
- "github.com/ledgerwatch/erigon/params"
)
// CalcExcessBlobGas implements calc_excess_blob_gas from EIP-4844
-func CalcExcessBlobGas(parent *types.Header) uint64 {
+func CalcExcessBlobGas(config *chain.Config, parent *types.Header) uint64 {
var excessBlobGas, blobGasUsed uint64
if parent.ExcessBlobGas != nil {
excessBlobGas = *parent.ExcessBlobGas
@@ -37,10 +37,10 @@ func CalcExcessBlobGas(parent *types.Header) uint64 {
blobGasUsed = *parent.BlobGasUsed
}
- if excessBlobGas+blobGasUsed < fixedgas.TargetBlobGasPerBlock {
+ if excessBlobGas+blobGasUsed < config.GetTargetBlobGasPerBlock() {
return 0
}
- return excessBlobGas + blobGasUsed - fixedgas.TargetBlobGasPerBlock
+ return excessBlobGas + blobGasUsed - config.GetTargetBlobGasPerBlock()
}
// FakeExponential approximates factor * e ** (num / denom) using a taylor expansion
@@ -99,8 +99,8 @@ func VerifyAbsenceOfCancunHeaderFields(header *types.Header) error {
return nil
}
-func GetBlobGasPrice(excessBlobGas uint64) (*uint256.Int, error) {
- return FakeExponential(uint256.NewInt(params.MinBlobGasPrice), uint256.NewInt(params.BlobGasPriceUpdateFraction), excessBlobGas)
+func GetBlobGasPrice(config *chain.Config, excessBlobGas uint64) (*uint256.Int, error) {
+ return FakeExponential(uint256.NewInt(config.GetMinBlobGasPrice()), uint256.NewInt(config.GetBlobGasPriceUpdateFraction()), excessBlobGas)
}
func GetBlobGasUsed(numBlobs int) uint64 {
diff --git a/consensus/misc/gaslimit.go b/consensus/misc/gaslimit.go
index 440a1629e26..16fab48373c 100644
--- a/consensus/misc/gaslimit.go
+++ b/consensus/misc/gaslimit.go
@@ -17,7 +17,6 @@
package misc
import (
- "errors"
"fmt"
"github.com/ledgerwatch/erigon/params"
@@ -36,7 +35,7 @@ func VerifyGaslimit(parentGasLimit, headerGasLimit uint64) error {
return fmt.Errorf("invalid gas limit: have %d, want %d +-= %d", headerGasLimit, parentGasLimit, limit-1)
}
if headerGasLimit < params.MinGasLimit {
- return errors.New("invalid gas limit below 5000")
+ return fmt.Errorf("invalid gas limit below %d", params.MinGasLimit)
}
return nil
}
diff --git a/core/allocs/amoy.json b/core/allocs/amoy.json
new file mode 100644
index 00000000000..df3d74e4526
--- /dev/null
+++ b/core/allocs/amoy.json
@@ -0,0 +1,17 @@
+{
+ "0000000000000000000000000000000000001000": {
+ "balance": "0x0",
+ "code": "0x608060405234801561001057600080fd5b50600436106101f05760003560e01c806360c8614d1161010f578063af26aa96116100a2578063d5b844eb11610071578063d5b844eb14610666578063dcf2793a14610684578063e3b7c924146106b6578063f59cf565146106d4576101f0565b8063af26aa96146105c7578063b71d7a69146105e7578063b7ab4db514610617578063c1b3c91914610636576101f0565b806370ba5707116100de57806370ba57071461052b57806398ab2b621461055b5780639d11b80714610579578063ae756451146105a9576101f0565b806360c8614d1461049c57806365b3a1e2146104bc57806366332354146104db578063687a9bd6146104f9576101f0565b80633434735f1161018757806344d6528f1161015657806344d6528f146103ee5780634dbc959f1461041e57806355614fcc1461043c578063582a8d081461046c576101f0565b80633434735f1461035257806335ddfeea1461037057806343ee8213146103a057806344c15cb1146103be576101f0565b806323f2a73f116101c357806323f2a73f146102a45780632bc06564146102d45780632de3a180146102f25780632eddf35214610322576101f0565b8063047a6c5b146101f55780630c35b1cb146102275780631270b5741461025857806323c2a2b414610288575b600080fd5b61020f600480360361020a9190810190612944565b610706565b60405161021e93929190613283565b60405180910390f35b610241600480360361023c9190810190612944565b61075d565b60405161024f9291906130a4565b60405180910390f35b610272600480360361026d919081019061296d565b610939565b60405161027f91906130db565b60405180910390f35b6102a2600480360361029d9190810190612a4c565b610a91565b005b6102be60048036036102b9919081019061296d565b61112a565b6040516102cb91906130db565b60405180910390f35b6102dc611281565b6040516102e99190613231565b60405180910390f35b61030c600480360361030791908101906128a1565b611286565b60405161031991906130f6565b60405180910390f35b61033c60048036036103379190810190612944565b611307565b6040516103499190613231565b60405180910390f35b61035a611437565b6040516103679190613089565b60405180910390f35b61038a600480360361038591908101906128dd565b61144f565b60405161039791906130db565b60405180910390f35b6103a861151a565b6040516103b591906130f6565b60405180910390f35b6103d860048036036103d391908101906129a9565b611531565b6040516103e59190613231565b60405180910390f35b6104086004803603610403919081019061296d565b611619565b6040516104159190613216565b60405180910390f35b610426611781565b6040516104339190613231565b60405180910390f35b61045660048036036104519190810190612826565b611791565b60405161046391906130db565b60405180910390f35b6104866004803603610481919081019061284f565b6117ab565b60405161049391906130f6565b60405180910390f35b6104a4611829565b6040516104b393929190613283565b60405180910390f35b6104c461189d565b6040516104d29291906130a4565b60405180910390f35b6104e361198e565b6040516104f09190613231565b60405180910390f35b610513600480360361050e9190810190612a10565b611993565b6040516105229392919061324c565b60405180910390f35b61054560048036036105409190810190612826565b6119f7565b60405161055291906130db565b60405180910390f35b610563611a11565b60405161057091906130f6565b60405180910390f35b610593600480360361058e9190810190612944565b611a28565b6040516105a09190613231565b60405180910390f35b6105b1611b59565b6040516105be91906130f6565b60405180910390f35b6105cf611b70565b6040516105de93929190613283565b60405180910390f35b61060160048036036105fc9190810190612944565b611bd1565b60405161060e9190613231565b60405180910390f35b61061f611cd1565b60405161062d9291906130a4565b60405180910390f35b610650600480360361064b9190810190612944565b611ce5565b60405161065d9190613231565b60405180910390f35b61066e611d06565b60405161067b91906132ba565b60405180910390f35b61069e60048036036106999190810190612a10565b611d0b565b6040516106ad9392919061324c565b60405180910390f35b6106be611d6f565b6040516106cb9190613231565b60405180910390f35b6106ee60048036036106e99190810190612944565b611d81565b6040516106fd93929190613283565b60405180910390f35b60008060006002600085815260200190815260200160002060000154600260008681526020019081526020016000206001015460026000878152602001908152602001600020600201549250925092509193909250565b60608060ff83116107795761077061189d565b91509150610934565b600061078484611bd1565b9050606060016000838152602001908152602001600020805490506040519080825280602002602001820160405280156107cd5781602001602082028038833980820191505090505b509050606060016000848152602001908152602001600020805490506040519080825280602002602001820160405280156108175781602001602082028038833980820191505090505b50905060008090505b60016000858152602001908152602001600020805490508110156109295760016000858152602001908152602001600020818154811061085c57fe5b906000526020600020906003020160020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1683828151811061089a57fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff16815250506001600085815260200190815260200160002081815481106108f257fe5b90600052602060002090600302016001015482828151811061091057fe5b6020026020010181815250508080600101915050610820565b508181945094505050505b915091565b6000606060016000858152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b82821015610a0c578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152505081526020019060010190610970565b50505050905060008090505b8151811015610a84578373ffffffffffffffffffffffffffffffffffffffff16828281518110610a4457fe5b60200260200101516040015173ffffffffffffffffffffffffffffffffffffffff161415610a7757600192505050610a8b565b8080600101915050610a18565b5060009150505b92915050565b73fffffffffffffffffffffffffffffffffffffffe73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610b13576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610b0a906131f6565b60405180910390fd5b6000610b1d611781565b90506000811415610b3157610b30611dab565b5b610b456001826120cc90919063ffffffff16565b8814610b86576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610b7d90613176565b60405180910390fd5b868611610bc8576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610bbf906131d6565b60405180910390fd5b6000601060018989030181610bd957fe5b0614610c1a576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610c11906131b6565b60405180910390fd5b8660026000838152602001908152602001600020600101541115610c73576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610c6a90613156565b60405180910390fd5b6000600260008a81526020019081526020016000206000015414610ccc576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610cc390613196565b60405180910390fd5b604051806060016040528089815260200188815260200187815250600260008a8152602001908152602001600020600082015181600001556020820151816001015560408201518160020155905050600388908060018154018082558091505090600182039060005260206000200160009091929091909150555060008060008a815260200190815260200160002081610d669190612620565b506000600160008a815260200190815260200160002081610d879190612620565b506060610ddf610dda87878080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050506120eb565b612119565b905060008090505b8151811015610f51576060610e0e838381518110610e0157fe5b6020026020010151612119565b90506000808c81526020019081526020016000208054809190600101610e349190612620565b506040518060600160405280610e5d83600081518110610e5057fe5b60200260200101516121f6565b8152602001610e7f83600181518110610e7257fe5b60200260200101516121f6565b8152602001610ea183600281518110610e9457fe5b6020026020010151612267565b73ffffffffffffffffffffffffffffffffffffffff168152506000808d81526020019081526020016000208381548110610ed757fe5b9060005260206000209060030201600082015181600001556020820151816001015560408201518160020160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550905050508080600101915050610de7565b506060610fa9610fa486868080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050506120eb565b612119565b905060008090505b815181101561111d576060610fd8838381518110610fcb57fe5b6020026020010151612119565b9050600160008d81526020019081526020016000208054809190600101610fff9190612620565b5060405180606001604052806110288360008151811061101b57fe5b60200260200101516121f6565b815260200161104a8360018151811061103d57fe5b60200260200101516121f6565b815260200161106c8360028151811061105f57fe5b6020026020010151612267565b73ffffffffffffffffffffffffffffffffffffffff16815250600160008e815260200190815260200160002083815481106110a357fe5b9060005260206000209060030201600082015181600001556020820151816001015560408201518160020160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550905050508080600101915050610fb1565b5050505050505050505050565b60006060600080858152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b828210156111fc578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152505081526020019060010190611160565b50505050905060008090505b8151811015611274578373ffffffffffffffffffffffffffffffffffffffff1682828151811061123457fe5b60200260200101516040015173ffffffffffffffffffffffffffffffffffffffff1614156112675760019250505061127b565b8080600101915050611208565b5060009150505b92915050565b601081565b60006002600160f81b84846040516020016112a393929190612ff6565b6040516020818303038152906040526040516112bf9190613033565b602060405180830381855afa1580156112dc573d6000803e3d6000fd5b5050506040513d601f19601f820116820180604052506112ff9190810190612878565b905092915050565b60006060600080848152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b828210156113d9578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815250508152602001906001019061133d565b505050509050600080905060008090505b825181101561142c5761141d83828151811061140257fe5b602002602001015160200151836120cc90919063ffffffff16565b915080806001019150506113ea565b508092505050919050565b73fffffffffffffffffffffffffffffffffffffffe81565b600080600080859050600060218087518161146657fe5b04029050600081111561147f5761147c876117ab565b91505b6000602190505b818111611509576000600182038801519050818801519550806000602081106114ab57fe5b1a60f81b9450600060f81b857effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191614156114f0576114e98685611286565b93506114fd565b6114fa8487611286565b93505b50602181019050611486565b508782149450505050509392505050565b60405161152690613074565b604051809103902081565b60008060009050600080905060008090505b84518167ffffffffffffffff16101561160c57606061156e868367ffffffffffffffff16604161228a565b90506000611585828961231690919063ffffffff16565b905061158f612652565b6115998a83611619565b90506115a58a8361112a565b80156115dc57508473ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16115b156115fe578194506115fb8160200151876120cc90919063ffffffff16565b95505b505050604181019050611543565b5081925050509392505050565b611621612652565b6060600080858152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b828210156116f1578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152505081526020019060010190611655565b50505050905060008090505b8151811015611779578373ffffffffffffffffffffffffffffffffffffffff1682828151811061172957fe5b60200260200101516040015173ffffffffffffffffffffffffffffffffffffffff16141561176c5781818151811061175d57fe5b60200260200101519250611779565b80806001019150506116fd565b505092915050565b600061178c43611bd1565b905090565b60006117a461179e611781565b8361112a565b9050919050565b60006002600060f81b836040516020016117c6929190612fca565b6040516020818303038152906040526040516117e29190613033565b602060405180830381855afa1580156117ff573d6000803e3d6000fd5b5050506040513d601f19601f820116820180604052506118229190810190612878565b9050919050565b60008060008061184a600161183c611781565b6120cc90919063ffffffff16565b905060026000828152602001908152602001600020600001546002600083815260200190815260200160002060010154600260008481526020019081526020016000206002015493509350935050909192565b606080606060016040519080825280602002602001820160405280156118d25781602001602082028038833980820191505090505b509050736ab3d36c46ecfb9b9c0bd51cb1c3da5a2c81cea6816000815181106118f757fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff1681525050606060016040519080825280602002602001820160405280156119635781602001602082028038833980820191505090505b5090506127108160008151811061197657fe5b60200260200101818152505081819350935050509091565b60ff81565b600160205281600052604060002081815481106119ac57fe5b9060005260206000209060030201600091509150508060000154908060010154908060020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905083565b6000611a0a611a04611781565b83610939565b9050919050565b604051611a1d9061304a565b604051809103902081565b6000606060016000848152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b82821015611afb578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152505081526020019060010190611a5f565b505050509050600080905060008090505b8251811015611b4e57611b3f838281518110611b2457fe5b602002602001015160200151836120cc90919063ffffffff16565b91508080600101915050611b0c565b508092505050919050565b604051611b659061305f565b604051809103902081565b600080600080611b7e611781565b905060026000828152602001908152602001600020600001546002600083815260200190815260200160002060010154600260008481526020019081526020016000206002015493509350935050909192565b60008060038054905090505b6000811115611c9157611bee612689565b6002600060036001850381548110611c0257fe5b906000526020600020015481526020019081526020016000206040518060600160405290816000820154815260200160018201548152602001600282015481525050905083816020015111158015611c5f57506000816040015114155b8015611c6f575080604001518411155b15611c8257806000015192505050611ccc565b50808060019003915050611bdd565b5060006003805490501115611cc757600360016003805490500381548110611cb557fe5b90600052602060002001549050611ccc565b600090505b919050565b606080611cdd4361075d565b915091509091565b60038181548110611cf257fe5b906000526020600020016000915090505481565b600281565b60006020528160005260406000208181548110611d2457fe5b9060005260206000209060030201600091509150508060000154908060010154908060020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905083565b600060104381611d7b57fe5b04905090565b60026020528060005260406000206000915090508060000154908060010154908060020154905083565b606080611db661189d565b8092508193505050600080905060405180606001604052808281526020016000815260200160ff815250600260008381526020019081526020016000206000820151816000015560208201518160010155604082015181600201559050506003819080600181540180825580915050906001820390600052602060002001600090919290919091505550600080600083815260200190815260200160002081611e5f9190612620565b5060006001600083815260200190815260200160002081611e809190612620565b5060008090505b8351811015611fa2576000808381526020019081526020016000208054809190600101611eb49190612620565b506040518060600160405280828152602001848381518110611ed257fe5b60200260200101518152602001858381518110611eeb57fe5b602002602001015173ffffffffffffffffffffffffffffffffffffffff168152506000808481526020019081526020016000208281548110611f2957fe5b9060005260206000209060030201600082015181600001556020820151816001015560408201518160020160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055509050508080600101915050611e87565b5060008090505b83518110156120c657600160008381526020019081526020016000208054809190600101611fd79190612620565b506040518060600160405280828152602001848381518110611ff557fe5b6020026020010151815260200185838151811061200e57fe5b602002602001015173ffffffffffffffffffffffffffffffffffffffff1681525060016000848152602001908152602001600020828154811061204d57fe5b9060005260206000209060030201600082015181600001556020820151816001015560408201518160020160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055509050508080600101915050611fa9565b50505050565b6000808284019050838110156120e157600080fd5b8091505092915050565b6120f36126aa565b600060208301905060405180604001604052808451815260200182815250915050919050565b606061212482612420565b61212d57600080fd5b60006121388361246e565b905060608160405190808252806020026020018201604052801561217657816020015b6121636126c4565b81526020019060019003908161215b5790505b509050600061218885602001516124df565b8560200151019050600080600090505b848110156121e9576121a983612568565b91506040518060400160405280838152602001848152508482815181106121cc57fe5b602002602001018190525081830192508080600101915050612198565b5082945050505050919050565b600080826000015111801561221057506021826000015111155b61221957600080fd5b600061222883602001516124df565b9050600081846000015103905060008083866020015101905080519150602083101561225b57826020036101000a820491505b81945050505050919050565b6000601582600001511461227a57600080fd5b612283826121f6565b9050919050565b60608183018451101561229c57600080fd5b60608215600081146122b95760405191506020820160405261230a565b6040519150601f8416801560200281840101858101878315602002848b0101015b818310156122f757805183526020830192506020810190506122da565b50868552601f19601f8301166040525050505b50809150509392505050565b6000806000806041855114612331576000935050505061241a565b602085015192506040850151915060ff6041860151169050601b8160ff16101561235c57601b810190505b601b8160ff16141580156123745750601c8160ff1614155b15612385576000935050505061241a565b6000600187838686604051600081526020016040526040516123aa9493929190613111565b6020604051602081039080840390855afa1580156123cc573d6000803e3d6000fd5b505050602060405103519050600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16141561241257600080fd5b809450505050505b92915050565b600080826000015114156124375760009050612469565b60008083602001519050805160001a915060c060ff168260ff16101561246257600092505050612469565b6001925050505b919050565b6000808260000151141561248557600090506124da565b6000809050600061249984602001516124df565b84602001510190506000846000015185602001510190505b808210156124d3576124c282612568565b8201915082806001019350506124b1565b8293505050505b919050565b600080825160001a9050608060ff168110156124ff576000915050612563565b60b860ff16811080612524575060c060ff168110158015612523575060f860ff1681105b5b15612533576001915050612563565b60c060ff168110156125535760018060b80360ff16820301915050612563565b60018060f80360ff168203019150505b919050565b6000806000835160001a9050608060ff168110156125895760019150612616565b60b860ff168110156125a6576001608060ff168203019150612615565b60c060ff168110156125d65760b78103600185019450806020036101000a85510460018201810193505050612614565b60f860ff168110156125f357600160c060ff168203019150612613565b60f78103600185019450806020036101000a855104600182018101935050505b5b5b5b8192505050919050565b81548183558181111561264d5760030281600302836000526020600020918201910161264c91906126de565b5b505050565b60405180606001604052806000815260200160008152602001600073ffffffffffffffffffffffffffffffffffffffff1681525090565b60405180606001604052806000815260200160008152602001600081525090565b604051806040016040528060008152602001600081525090565b604051806040016040528060008152602001600081525090565b61273191905b8082111561272d5760008082016000905560018201600090556002820160006101000a81549073ffffffffffffffffffffffffffffffffffffffff0219169055506003016126e4565b5090565b90565b600081359050612743816134b3565b92915050565b600081359050612758816134ca565b92915050565b60008151905061276d816134ca565b92915050565b60008083601f84011261278557600080fd5b8235905067ffffffffffffffff81111561279e57600080fd5b6020830191508360018202830111156127b657600080fd5b9250929050565b600082601f8301126127ce57600080fd5b81356127e16127dc82613302565b6132d5565b915080825260208301602083018583830111156127fd57600080fd5b61280883828461345d565b50505092915050565b600081359050612820816134e1565b92915050565b60006020828403121561283857600080fd5b600061284684828501612734565b91505092915050565b60006020828403121561286157600080fd5b600061286f84828501612749565b91505092915050565b60006020828403121561288a57600080fd5b60006128988482850161275e565b91505092915050565b600080604083850312156128b457600080fd5b60006128c285828601612749565b92505060206128d385828601612749565b9150509250929050565b6000806000606084860312156128f257600080fd5b600061290086828701612749565b935050602061291186828701612749565b925050604084013567ffffffffffffffff81111561292e57600080fd5b61293a868287016127bd565b9150509250925092565b60006020828403121561295657600080fd5b600061296484828501612811565b91505092915050565b6000806040838503121561298057600080fd5b600061298e85828601612811565b925050602061299f85828601612734565b9150509250929050565b6000806000606084860312156129be57600080fd5b60006129cc86828701612811565b93505060206129dd86828701612749565b925050604084013567ffffffffffffffff8111156129fa57600080fd5b612a06868287016127bd565b9150509250925092565b60008060408385031215612a2357600080fd5b6000612a3185828601612811565b9250506020612a4285828601612811565b9150509250929050565b600080600080600080600060a0888a031215612a6757600080fd5b6000612a758a828b01612811565b9750506020612a868a828b01612811565b9650506040612a978a828b01612811565b955050606088013567ffffffffffffffff811115612ab457600080fd5b612ac08a828b01612773565b9450945050608088013567ffffffffffffffff811115612adf57600080fd5b612aeb8a828b01612773565b925092505092959891949750929550565b6000612b088383612b2c565b60208301905092915050565b6000612b208383612f9d565b60208301905092915050565b612b35816133d2565b82525050565b612b44816133d2565b82525050565b6000612b558261334e565b612b5f8185613389565b9350612b6a8361332e565b8060005b83811015612b9b578151612b828882612afc565b9750612b8d8361336f565b925050600181019050612b6e565b5085935050505092915050565b6000612bb382613359565b612bbd818561339a565b9350612bc88361333e565b8060005b83811015612bf9578151612be08882612b14565b9750612beb8361337c565b925050600181019050612bcc565b5085935050505092915050565b612c0f816133e4565b82525050565b612c26612c21826133f0565b61349f565b82525050565b612c358161341c565b82525050565b612c4c612c478261341c565b6134a9565b82525050565b6000612c5d82613364565b612c6781856133ab565b9350612c7781856020860161346c565b80840191505092915050565b6000612c906004836133c7565b91507f766f7465000000000000000000000000000000000000000000000000000000006000830152600482019050919050565b6000612cd0602d836133b6565b91507f537461727420626c6f636b206d7573742062652067726561746572207468616e60008301527f2063757272656e74207370616e000000000000000000000000000000000000006020830152604082019050919050565b6000612d36600f836133b6565b91507f496e76616c6964207370616e20696400000000000000000000000000000000006000830152602082019050919050565b6000612d766013836133b6565b91507f5370616e20616c726561647920657869737473000000000000000000000000006000830152602082019050919050565b6000612db66045836133b6565b91507f446966666572656e6365206265747765656e20737461727420616e6420656e6460008301527f20626c6f636b206d75737420626520696e206d756c7469706c6573206f66207360208301527f7072696e740000000000000000000000000000000000000000000000000000006040830152606082019050919050565b6000612e426005836133c7565b91507f38303030320000000000000000000000000000000000000000000000000000006000830152600582019050919050565b6000612e82600e836133c7565b91507f6865696d64616c6c2d38303030320000000000000000000000000000000000006000830152600e82019050919050565b6000612ec2602a836133b6565b91507f456e6420626c6f636b206d7573742062652067726561746572207468616e207360008301527f7461727420626c6f636b000000000000000000000000000000000000000000006020830152604082019050919050565b6000612f286012836133b6565b91507f4e6f742053797374656d204164646573732100000000000000000000000000006000830152602082019050919050565b606082016000820151612f716000850182612f9d565b506020820151612f846020850182612f9d565b506040820151612f976040850182612b2c565b50505050565b612fa681613446565b82525050565b612fb581613446565b82525050565b612fc481613450565b82525050565b6000612fd68285612c15565b600182019150612fe68284612c3b565b6020820191508190509392505050565b60006130028286612c15565b6001820191506130128285612c3b565b6020820191506130228284612c3b565b602082019150819050949350505050565b600061303f8284612c52565b915081905092915050565b600061305582612c83565b9150819050919050565b600061306a82612e35565b9150819050919050565b600061307f82612e75565b9150819050919050565b600060208201905061309e6000830184612b3b565b92915050565b600060408201905081810360008301526130be8185612b4a565b905081810360208301526130d28184612ba8565b90509392505050565b60006020820190506130f06000830184612c06565b92915050565b600060208201905061310b6000830184612c2c565b92915050565b60006080820190506131266000830187612c2c565b6131336020830186612fbb565b6131406040830185612c2c565b61314d6060830184612c2c565b95945050505050565b6000602082019050818103600083015261316f81612cc3565b9050919050565b6000602082019050818103600083015261318f81612d29565b9050919050565b600060208201905081810360008301526131af81612d69565b9050919050565b600060208201905081810360008301526131cf81612da9565b9050919050565b600060208201905081810360008301526131ef81612eb5565b9050919050565b6000602082019050818103600083015261320f81612f1b565b9050919050565b600060608201905061322b6000830184612f5b565b92915050565b60006020820190506132466000830184612fac565b92915050565b60006060820190506132616000830186612fac565b61326e6020830185612fac565b61327b6040830184612b3b565b949350505050565b60006060820190506132986000830186612fac565b6132a56020830185612fac565b6132b26040830184612fac565b949350505050565b60006020820190506132cf6000830184612fbb565b92915050565b6000604051905081810181811067ffffffffffffffff821117156132f857600080fd5b8060405250919050565b600067ffffffffffffffff82111561331957600080fd5b601f19601f8301169050602081019050919050565b6000819050602082019050919050565b6000819050602082019050919050565b600081519050919050565b600081519050919050565b600081519050919050565b6000602082019050919050565b6000602082019050919050565b600082825260208201905092915050565b600082825260208201905092915050565b600081905092915050565b600082825260208201905092915050565b600081905092915050565b60006133dd82613426565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b600060ff82169050919050565b82818337600083830152505050565b60005b8381101561348a57808201518184015260208101905061346f565b83811115613499576000848401525b50505050565b6000819050919050565b6000819050919050565b6134bc816133d2565b81146134c757600080fd5b50565b6134d38161341c565b81146134de57600080fd5b50565b6134ea81613446565b81146134f557600080fd5b5056fea365627a7a7231582054ae86d53ba5464a9d9098ba148754fdba81f088cac08ff2e7c7bf6a46aae8a96c6578706572696d656e74616cf564736f6c63430005110040"
+ },
+ "0000000000000000000000000000000000001001": {
+ "balance": "0x0",
+ "code": "0x608060405234801561001057600080fd5b50600436106100415760003560e01c806319494a17146100465780633434735f146100e15780635407ca671461012b575b600080fd5b6100c76004803603604081101561005c57600080fd5b81019080803590602001909291908035906020019064010000000081111561008357600080fd5b82018360208201111561009557600080fd5b803590602001918460018302840111640100000000831117156100b757600080fd5b9091929391929390505050610149565b604051808215151515815260200191505060405180910390f35b6100e96104b6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6101336104ce565b6040518082815260200191505060405180910390f35b600073fffffffffffffffffffffffffffffffffffffffe73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610200576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f4e6f742053797374656d2041646465737321000000000000000000000000000081525060200191505060405180910390fd5b606061025761025285858080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050506104d4565b610502565b905060006102788260008151811061026b57fe5b60200260200101516105df565b905080600160005401146102f4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601b8152602001807f537461746549647320617265206e6f742073657175656e7469616c000000000081525060200191505060405180910390fd5b600080815480929190600101919050555060006103248360018151811061031757fe5b6020026020010151610650565b905060606103458460028151811061033857fe5b6020026020010151610673565b9050610350826106ff565b156104ab576000624c4b409050606084836040516024018083815260200180602001828103825283818151815260200191508051906020019080838360005b838110156103aa57808201518184015260208101905061038f565b50505050905090810190601f1680156103d75780820380516001836020036101000a031916815260200191505b5093505050506040516020818303038152906040527f26c53bea000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff8381831617835250505050905060008082516020840160008887f19650847f5a22725590b0a51c923940223f7458512164b1113359a735e86e7f27f44791ee88604051808215151515815260200191505060405180910390a250505b505050509392505050565b73fffffffffffffffffffffffffffffffffffffffe81565b60005481565b6104dc61097f565b600060208301905060405180604001604052808451815260200182815250915050919050565b606061050d82610718565b61051657600080fd5b600061052183610766565b905060608160405190808252806020026020018201604052801561055f57816020015b61054c610999565b8152602001906001900390816105445790505b509050600061057185602001516107d7565b8560200151019050600080600090505b848110156105d25761059283610860565b91506040518060400160405280838152602001848152508482815181106105b557fe5b602002602001018190525081830192508080600101915050610581565b5082945050505050919050565b60008082600001511180156105f957506021826000015111155b61060257600080fd5b600061061183602001516107d7565b9050600081846000015103905060008083866020015101905080519150602083101561064457826020036101000a820491505b81945050505050919050565b6000601582600001511461066357600080fd5b61066c826105df565b9050919050565b6060600082600001511161068657600080fd5b600061069583602001516107d7565b905060008184600001510390506060816040519080825280601f01601f1916602001820160405280156106d75781602001600182028038833980820191505090505b50905060008160200190506106f3848760200151018285610918565b81945050505050919050565b600080823b905060008163ffffffff1611915050919050565b6000808260000151141561072f5760009050610761565b60008083602001519050805160001a915060c060ff168260ff16101561075a57600092505050610761565b6001925050505b919050565b6000808260000151141561077d57600090506107d2565b6000809050600061079184602001516107d7565b84602001510190506000846000015185602001510190505b808210156107cb576107ba82610860565b8201915082806001019350506107a9565b8293505050505b919050565b600080825160001a9050608060ff168110156107f757600091505061085b565b60b860ff1681108061081c575060c060ff16811015801561081b575060f860ff1681105b5b1561082b57600191505061085b565b60c060ff1681101561084b5760018060b80360ff1682030191505061085b565b60018060f80360ff168203019150505b919050565b6000806000835160001a9050608060ff16811015610881576001915061090e565b60b860ff1681101561089e576001608060ff16820301915061090d565b60c060ff168110156108ce5760b78103600185019450806020036101000a8551046001820181019350505061090c565b60f860ff168110156108eb57600160c060ff16820301915061090b565b60f78103600185019450806020036101000a855104600182018101935050505b5b5b5b8192505050919050565b60008114156109265761097a565b5b602060ff1681106109565782518252602060ff1683019250602060ff1682019150602060ff1681039050610927565b6000600182602060ff16036101000a03905080198451168184511681811785525050505b505050565b604051806040016040528060008152602001600081525090565b60405180604001604052806000815260200160008152509056fea265627a7a72315820bf998627ab3f0decc9d4ae85871979b6f1fec222bbafeb2e0045daf58b93f4b864736f6c63430005110032"
+ },
+ "0000000000000000000000000000000000001010": {
+ "balance": "0x204fce28085b549b31600000",
+ "code": "0x6080604052600436106101b75760003560e01c80638da5cb5b116100ec578063b789543c1161008a578063e614d0d611610064578063e614d0d614610ac1578063ed9ef52414610aec578063f2fde38b14610b3d578063fc0c546a14610b8e576101b7565b8063b789543c146109e8578063cc79f97b14610a6b578063e306f77914610a96576101b7565b806395d89b41116100c657806395d89b4114610874578063a9059cbb14610904578063abceeba21461096a578063acd06cb314610995576101b7565b80638da5cb5b1461075e5780638f32d59b146107b55780639025e64c146107e4576101b7565b806347e7ef241161015957806370a082311161013357806370a08231146105a5578063715018a61461060a578063771282f61461062157806377d32e941461064c576101b7565b806347e7ef2414610482578063485cc955146104dd57806360f96a8f1461054e576101b7565b806319d27d9c1161019557806319d27d9c146102c85780632e1a7d4d146103cc578063313ce567146103fa57806342fc47fb1461042b576101b7565b806306fdde03146101bc5780631499c5921461024c57806318160ddd1461029d575b600080fd5b3480156101c857600080fd5b506101d1610be5565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156102115780820151818401526020810190506101f6565b50505050905090810190601f16801561023e5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561025857600080fd5b5061029b6004803603602081101561026f57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610c22565b005b3480156102a957600080fd5b506102b2610c90565b6040518082815260200191505060405180910390f35b3480156102d457600080fd5b5061038a600480360360a08110156102eb57600080fd5b810190808035906020019064010000000081111561030857600080fd5b82018360208201111561031a57600080fd5b8035906020019184600183028401116401000000008311171561033c57600080fd5b9091929391929390803590602001909291908035906020019092919080359060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610ca6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6103f8600480360360208110156103e257600080fd5b8101908080359060200190929190505050610e7b565b005b34801561040657600080fd5b5061040f610fcd565b604051808260ff1660ff16815260200191505060405180910390f35b34801561043757600080fd5b50610440610fd6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561048e57600080fd5b506104db600480360360408110156104a557600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610ffc565b005b3480156104e957600080fd5b5061054c6004803603604081101561050057600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506111b8565b005b34801561055a57600080fd5b50610563611287565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b3480156105b157600080fd5b506105f4600480360360208110156105c857600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506112ad565b6040518082815260200191505060405180910390f35b34801561061657600080fd5b5061061f6112ce565b005b34801561062d57600080fd5b5061063661139e565b6040518082815260200191505060405180910390f35b34801561065857600080fd5b5061071c6004803603604081101561066f57600080fd5b81019080803590602001909291908035906020019064010000000081111561069657600080fd5b8201836020820111156106a857600080fd5b803590602001918460018302840111640100000000831117156106ca57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f8201169050808301925050505050505091929192905050506113a4565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561076a57600080fd5b50610773611529565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b3480156107c157600080fd5b506107ca611552565b604051808215151515815260200191505060405180910390f35b3480156107f057600080fd5b506107f96115a9565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561083957808201518184015260208101905061081e565b50505050905090810190601f1680156108665780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561088057600080fd5b506108896115e2565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156108c95780820151818401526020810190506108ae565b50505050905090810190601f1680156108f65780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b6109506004803603604081101561091a57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291908035906020019092919050505061161f565b604051808215151515815260200191505060405180910390f35b34801561097657600080fd5b5061097f611645565b6040518082815260200191505060405180910390f35b3480156109a157600080fd5b506109ce600480360360208110156109b857600080fd5b81019080803590602001909291905050506116d2565b604051808215151515815260200191505060405180910390f35b3480156109f457600080fd5b50610a5560048036036080811015610a0b57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291908035906020019092919080359060200190929190803590602001909291905050506116f2565b6040518082815260200191505060405180910390f35b348015610a7757600080fd5b50610a80611712565b6040518082815260200191505060405180910390f35b348015610aa257600080fd5b50610aab611719565b6040518082815260200191505060405180910390f35b348015610acd57600080fd5b50610ad661171f565b6040518082815260200191505060405180910390f35b348015610af857600080fd5b50610b3b60048036036020811015610b0f57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506117ac565b005b348015610b4957600080fd5b50610b8c60048036036020811015610b6057600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050611903565b005b348015610b9a57600080fd5b50610ba3611920565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b60606040518060400160405280600b81526020017f4d6174696320546f6b656e000000000000000000000000000000000000000000815250905090565b6040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260108152602001807f44697361626c656420666561747572650000000000000000000000000000000081525060200191505060405180910390fd5b6000601260ff16600a0a6402540be40002905090565b6000808511610cb457600080fd5b6000831480610cc35750824311155b610d35576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260148152602001807f5369676e6174757265206973206578706972656400000000000000000000000081525060200191505060405180910390fd5b6000610d4c610d4633888888611946565b30611a1c565b9050600015156005600083815260200190815260200160002060009054906101000a900460ff16151514610de8576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600f8152602001807f536967206465616374697661746564000000000000000000000000000000000081525060200191505060405180910390fd5b60016005600083815260200190815260200160002060006101000a81548160ff021916908315150217905550610e628189898080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050506113a4565b9150610e6f828488611bab565b50509695505050505050565b60003390506000610e8b826112ad565b9050610ea283600654611f6890919063ffffffff16565b600681905550600083118015610eb757508234145b610f29576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f496e73756666696369656e7420616d6f756e740000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167febff2602b3f468259e1e99f613fed6691f3a6526effe6ef3e768ba7ae7a36c4f8584610fa5876112ad565b60405180848152602001838152602001828152602001935050505060405180910390a3505050565b60006012905090565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b611004611552565b61100d57600080fd5b60008111801561104a5750600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b61109f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602381526020018061226c6023913960400191505060405180910390fd5b60006110aa836112ad565b905060008390508073ffffffffffffffffffffffffffffffffffffffff166108fc849081150290604051600060405180830381858888f193505050501580156110f7573d6000803e3d6000fd5b5061110d83600654611f8890919063ffffffff16565b6006819055508373ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f4e2ca0515ed1aef1395f66b5303bb5d6f1bf9d61a353fa53f73f8ac9973fa9f6858561118f896112ad565b60405180848152602001838152602001828152602001935050505060405180910390a350505050565b600760009054906101000a900460ff161561121e576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260238152602001806122496023913960400191505060405180910390fd5b6001600760006101000a81548160ff02191690831515021790555080600260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555061128382611fa7565b5050565b600460009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008173ffffffffffffffffffffffffffffffffffffffff16319050919050565b6112d6611552565b6112df57600080fd5b600073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a360008060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550565b60065481565b60008060008060418551146113bf5760009350505050611523565b602085015192506040850151915060ff6041860151169050601b8160ff1610156113ea57601b810190505b601b8160ff16141580156114025750601c8160ff1614155b156114135760009350505050611523565b60018682858560405160008152602001604052604051808581526020018460ff1660ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa158015611470573d6000803e3d6000fd5b505050602060405103519350600073ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff16141561151f576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f4572726f7220696e2065637265636f766572000000000000000000000000000081525060200191505060405180910390fd5b5050505b92915050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614905090565b6040518060400160405280600381526020017f013882000000000000000000000000000000000000000000000000000000000081525081565b60606040518060400160405280600581526020017f4d41544943000000000000000000000000000000000000000000000000000000815250905090565b6000813414611631576000905061163f565b61163c338484611bab565b90505b92915050565b6040518060800160405280605b8152602001612313605b91396040516020018082805190602001908083835b602083106116945780518252602082019150602081019050602083039250611671565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b60056020528060005260406000206000915054906101000a900460ff1681565b600061170861170386868686611946565b61209f565b9050949350505050565b6201388281565b60015481565b60405180608001604052806052815260200161228f605291396040516020018082805190602001908083835b6020831061176e578051825260208201915060208101905060208303925061174b565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b6117b4611552565b6117bd57600080fd5b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff161415611843576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260328152602001806122e16032913960400191505060405180910390fd5b8073ffffffffffffffffffffffffffffffffffffffff16600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f1f9f3556dd336016cdf20adaead7d5c73665dba664b60e8c17e9a4eb91ce1d3960405160405180910390a380600360006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b61190b611552565b61191457600080fd5b61191d81611fa7565b50565b600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000806040518060800160405280605b8152602001612313605b91396040516020018082805190602001908083835b602083106119985780518252602082019150602081019050602083039250611975565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120905060405181815273ffffffffffffffffffffffffffffffffffffffff8716602082015285604082015284606082015283608082015260a0812092505081915050949350505050565b60008060405180608001604052806052815260200161228f605291396040516020018082805190602001908083835b60208310611a6e5780518252602082019150602081019050602083039250611a4b565b6001836020036101000a038019825116818451168082178552505050505050905001915050604051602081830303815290604052805190602001206040518060400160405280600d81526020017f4d61746963204e6574776f726b00000000000000000000000000000000000000815250805190602001206040518060400160405280600181526020017f3100000000000000000000000000000000000000000000000000000000000000815250805190602001206201388286604051602001808681526020018581526020018481526020018381526020018273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200195505050505050604051602081830303815290604052805190602001209050611ba284826120b4565b91505092915050565b6000803073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611c2b57600080fd5b505afa158015611c3f573d6000803e3d6000fd5b505050506040513d6020811015611c5557600080fd5b8101908080519060200190929190505050905060003073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611ce757600080fd5b505afa158015611cfb573d6000803e3d6000fd5b505050506040513d6020811015611d1157600080fd5b81019080805190602001909291905050509050611d2f8686866120f5565b8473ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167fe6497e3ee548a3372136af2fcb0696db31fc6cf20260707645068bd3fe97f3c48786863073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611e3757600080fd5b505afa158015611e4b573d6000803e3d6000fd5b505050506040513d6020811015611e6157600080fd5b81019080805190602001909291905050503073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611eef57600080fd5b505afa158015611f03573d6000803e3d6000fd5b505050506040513d6020811015611f1957600080fd5b8101908080519060200190929190505050604051808681526020018581526020018481526020018381526020018281526020019550505050505060405180910390a46001925050509392505050565b600082821115611f7757600080fd5b600082840390508091505092915050565b600080828401905083811015611f9d57600080fd5b8091505092915050565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff161415611fe157600080fd5b8073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a3806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b60006120ad826001546120b4565b9050919050565b60006040517f190100000000000000000000000000000000000000000000000000000000000081528260028201528360228201526042812091505092915050565b3073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff161415612197576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f63616e27742073656e6420746f204d524332300000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f193505050501580156121dd573d6000803e3d6000fd5b508173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a350505056fe54686520636f6e747261637420697320616c726561647920696e697469616c697a6564496e73756666696369656e7420616d6f756e74206f7220696e76616c69642075736572454950373132446f6d61696e28737472696e67206e616d652c737472696e672076657273696f6e2c75696e7432353620636861696e49642c6164647265737320766572696679696e67436f6e7472616374294368696c6420746f6b656e3a206e6577206368696c64206164647265737320697320746865207a65726f2061646472657373546f6b656e5472616e736665724f726465722861646472657373207370656e6465722c75696e7432353620746f6b656e49644f72416d6f756e742c6279746573333220646174612c75696e743235362065787069726174696f6e29a265627a7a72315820eb7babc3f24d0f6948fed0801b988371ba0eb26b26d496fea92b7b57a72148e164736f6c63430005110032"
+ },
+ "6aB3d36C46ecFb9B9c0bD51CB1c3da5A2C81cea6": {
+ "balance": "0x3635c9adc5dea00000"
+ }
+}
\ No newline at end of file
diff --git a/core/allocs/holesky.json b/core/allocs/holesky.json
new file mode 100644
index 00000000000..09783fe6c7e
--- /dev/null
+++ b/core/allocs/holesky.json
@@ -0,0 +1,987 @@
+{
+ "0x0000000000000000000000000000000000000000": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000001": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000002": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000003": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000004": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000005": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000006": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000007": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000008": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000009": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000000a": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000000b": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000000c": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000000d": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000000e": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000000f": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000010": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000011": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000012": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000013": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000014": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000015": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000016": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000017": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000018": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000019": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000001a": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000001b": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000001c": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000001d": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000001e": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000001f": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000020": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000021": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000022": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000023": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000024": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000025": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000026": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000027": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000028": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000029": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000002a": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000002b": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000002c": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000002d": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000002e": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000002f": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000030": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000031": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000032": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000033": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000034": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000035": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000036": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000037": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000038": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000039": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000003a": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000003b": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000003c": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000003d": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000003e": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000003f": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000040": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000041": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000042": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000043": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000044": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000045": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000046": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000047": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000048": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000049": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000004a": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000004b": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000004c": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000004d": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000004e": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000004f": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000050": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000051": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000052": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000053": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000054": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000055": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000056": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000057": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000058": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000059": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000005a": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000005b": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000005c": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000005d": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000005e": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000005f": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000060": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000061": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000062": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000063": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000064": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000065": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000066": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000067": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000068": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000069": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000006a": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000006b": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000006c": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000006d": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000006e": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000006f": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000070": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000071": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000072": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000073": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000074": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000075": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000076": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000077": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000078": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000079": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000007a": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000007b": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000007c": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000007d": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000007e": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000007f": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000080": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000081": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000082": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000083": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000084": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000085": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000086": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000087": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000088": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000089": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000008a": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000008b": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000008c": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000008d": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000008e": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000008f": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000090": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000091": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000092": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000093": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000094": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000095": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000096": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000097": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000098": {
+ "balance": "0x1"
+ },
+ "0x0000000000000000000000000000000000000099": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000009a": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000009b": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000009c": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000009d": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000009e": {
+ "balance": "0x1"
+ },
+ "0x000000000000000000000000000000000000009f": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000a0": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000a1": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000a2": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000a3": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000a4": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000a5": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000a6": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000a7": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000a8": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000a9": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000aa": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000ab": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000ac": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000ad": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000ae": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000af": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000b0": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000b1": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000b2": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000b3": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000b4": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000b5": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000b6": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000b7": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000b8": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000b9": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000ba": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000bb": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000bc": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000bd": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000be": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000bf": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000c0": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000c1": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000c2": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000c3": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000c4": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000c5": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000c6": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000c7": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000c8": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000c9": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000ca": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000cb": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000cc": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000cd": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000ce": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000cf": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000d0": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000d1": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000d2": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000d3": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000d4": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000d5": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000d6": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000d7": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000d8": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000d9": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000da": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000db": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000dc": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000dd": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000de": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000df": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000e0": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000e1": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000e2": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000e3": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000e4": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000e5": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000e6": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000e7": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000e8": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000e9": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000ea": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000eb": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000ec": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000ed": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000ee": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000ef": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000f0": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000f1": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000f2": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000f3": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000f4": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000f5": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000f6": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000f7": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000f8": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000f9": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000fa": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000fb": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000fc": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000fd": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000fe": {
+ "balance": "0x1"
+ },
+ "0x00000000000000000000000000000000000000ff": {
+ "balance": "0x1"
+ },
+ "0x4242424242424242424242424242424242424242": {
+ "balance": "0x0",
+ "code": "0x60806040526004361061003f5760003560e01c806301ffc9a71461004457806322895118146100a4578063621fd130146101ba578063c5f2892f14610244575b600080fd5b34801561005057600080fd5b506100906004803603602081101561006757600080fd5b50357fffffffff000000000000000000000000000000000000000000000000000000001661026b565b604080519115158252519081900360200190f35b6101b8600480360360808110156100ba57600080fd5b8101906020810181356401000000008111156100d557600080fd5b8201836020820111156100e757600080fd5b8035906020019184600183028401116401000000008311171561010957600080fd5b91939092909160208101903564010000000081111561012757600080fd5b82018360208201111561013957600080fd5b8035906020019184600183028401116401000000008311171561015b57600080fd5b91939092909160208101903564010000000081111561017957600080fd5b82018360208201111561018b57600080fd5b803590602001918460018302840111640100000000831117156101ad57600080fd5b919350915035610304565b005b3480156101c657600080fd5b506101cf6110b5565b6040805160208082528351818301528351919283929083019185019080838360005b838110156102095781810151838201526020016101f1565b50505050905090810190601f1680156102365780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561025057600080fd5b506102596110c7565b60408051918252519081900360200190f35b60007fffffffff0000000000000000000000000000000000000000000000000000000082167f01ffc9a70000000000000000000000000000000000000000000000000000000014806102fe57507fffffffff0000000000000000000000000000000000000000000000000000000082167f8564090700000000000000000000000000000000000000000000000000000000145b92915050565b6030861461035d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118056026913960400191505060405180910390fd5b602084146103b6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252603681526020018061179c6036913960400191505060405180910390fd5b6060821461040f576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260298152602001806118786029913960400191505060405180910390fd5b670de0b6b3a7640000341015610470576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118526026913960400191505060405180910390fd5b633b9aca003406156104cd576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260338152602001806117d26033913960400191505060405180910390fd5b633b9aca00340467ffffffffffffffff811115610535576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602781526020018061182b6027913960400191505060405180910390fd5b6060610540826114ba565b90507f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c589898989858a8a6105756020546114ba565b6040805160a0808252810189905290819060208201908201606083016080840160c085018e8e80828437600083820152601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690910187810386528c815260200190508c8c808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690920188810386528c5181528c51602091820193918e019250908190849084905b83811015610648578181015183820152602001610630565b50505050905090810190601f1680156106755780820380516001836020036101000a031916815260200191505b5086810383528881526020018989808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169092018881038452895181528951602091820193918b019250908190849084905b838110156106ef5781810151838201526020016106d7565b50505050905090810190601f16801561071c5780820380516001836020036101000a031916815260200191505b509d505050505050505050505050505060405180910390a1600060028a8a600060801b604051602001808484808284377fffffffffffffffffffffffffffffffff0000000000000000000000000000000090941691909301908152604080517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0818403018152601090920190819052815191955093508392506020850191508083835b602083106107fc57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016107bf565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610859573d6000803e3d6000fd5b5050506040513d602081101561086e57600080fd5b5051905060006002806108846040848a8c6116fe565b6040516020018083838082843780830192505050925050506040516020818303038152906040526040518082805190602001908083835b602083106108f857805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016108bb565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610955573d6000803e3d6000fd5b5050506040513d602081101561096a57600080fd5b5051600261097b896040818d6116fe565b60405160009060200180848480828437919091019283525050604080518083038152602092830191829052805190945090925082918401908083835b602083106109f457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016109b7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610a51573d6000803e3d6000fd5b5050506040513d6020811015610a6657600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610ada57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610a9d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610b37573d6000803e3d6000fd5b5050506040513d6020811015610b4c57600080fd5b50516040805160208101858152929350600092600292839287928f928f92018383808284378083019250505093505050506040516020818303038152906040526040518082805190602001908083835b60208310610bd957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610b9c565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610c36573d6000803e3d6000fd5b5050506040513d6020811015610c4b57600080fd5b50516040518651600291889160009188916020918201918291908601908083835b60208310610ca957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610c6c565b6001836020036101000a0380198251168184511680821785525050505050509050018367ffffffffffffffff191667ffffffffffffffff1916815260180182815260200193505050506040516020818303038152906040526040518082805190602001908083835b60208310610d4e57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610d11565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610dab573d6000803e3d6000fd5b5050506040513d6020811015610dc057600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610e3457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610df7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610e91573d6000803e3d6000fd5b5050506040513d6020811015610ea657600080fd5b50519050858114610f02576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260548152602001806117486054913960600191505060405180910390fd5b60205463ffffffff11610f60576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260218152602001806117276021913960400191505060405180910390fd5b602080546001019081905560005b60208110156110a9578160011660011415610fa0578260008260208110610f9157fe5b0155506110ac95505050505050565b600260008260208110610faf57fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061102557805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610fe8565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015611082573d6000803e3d6000fd5b5050506040513d602081101561109757600080fd5b50519250600282049150600101610f6e565b50fe5b50505050505050565b60606110c26020546114ba565b905090565b6020546000908190815b60208110156112f05781600116600114156111e6576002600082602081106110f557fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061116b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161112e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156111c8573d6000803e3d6000fd5b5050506040513d60208110156111dd57600080fd5b505192506112e2565b600283602183602081106111f657fe5b015460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061126b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161122e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156112c8573d6000803e3d6000fd5b5050506040513d60208110156112dd57600080fd5b505192505b6002820491506001016110d1565b506002826112ff6020546114ba565b600060401b6040516020018084815260200183805190602001908083835b6020831061135a57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161131d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790527fffffffffffffffffffffffffffffffffffffffffffffffff000000000000000095909516920191825250604080518083037ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8018152601890920190819052815191955093508392850191508083835b6020831061143f57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101611402565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa15801561149c573d6000803e3d6000fd5b5050506040513d60208110156114b157600080fd5b50519250505090565b60408051600880825281830190925260609160208201818036833701905050905060c082901b8060071a60f81b826000815181106114f457fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060061a60f81b8260018151811061153757fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060051a60f81b8260028151811061157a57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060041a60f81b826003815181106115bd57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060031a60f81b8260048151811061160057fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060021a60f81b8260058151811061164357fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060011a60f81b8260068151811061168657fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060001a60f81b826007815181106116c957fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a90535050919050565b6000808585111561170d578182fd5b83861115611719578182fd5b505082019391909203915056fe4465706f736974436f6e74726163743a206d65726b6c6520747265652066756c6c4465706f736974436f6e74726163743a207265636f6e7374727563746564204465706f7369744461746120646f6573206e6f74206d6174636820737570706c696564206465706f7369745f646174615f726f6f744465706f736974436f6e74726163743a20696e76616c6964207769746864726177616c5f63726564656e7469616c73206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c7565206e6f74206d756c7469706c65206f6620677765694465706f736974436f6e74726163743a20696e76616c6964207075626b6579206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f20686967684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f206c6f774465706f736974436f6e74726163743a20696e76616c6964207369676e6174757265206c656e677468a26469706673582212201dd26f37a621703009abf16e77e69c93dc50c79db7f6cc37543e3e0e3decdc9764736f6c634300060b0033",
+ "storage": {
+ "0x0000000000000000000000000000000000000000000000000000000000000022": "0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b",
+ "0x0000000000000000000000000000000000000000000000000000000000000023": "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71",
+ "0x0000000000000000000000000000000000000000000000000000000000000024": "0xc78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c",
+ "0x0000000000000000000000000000000000000000000000000000000000000025": "0x536d98837f2dd165a55d5eeae91485954472d56f246df256bf3cae19352a123c",
+ "0x0000000000000000000000000000000000000000000000000000000000000026": "0x9efde052aa15429fae05bad4d0b1d7c64da64d03d7a1854a588c2cb8430c0d30",
+ "0x0000000000000000000000000000000000000000000000000000000000000027": "0xd88ddfeed400a8755596b21942c1497e114c302e6118290f91e6772976041fa1",
+ "0x0000000000000000000000000000000000000000000000000000000000000028": "0x87eb0ddba57e35f6d286673802a4af5975e22506c7cf4c64bb6be5ee11527f2c",
+ "0x0000000000000000000000000000000000000000000000000000000000000029": "0x26846476fd5fc54a5d43385167c95144f2643f533cc85bb9d16b782f8d7db193",
+ "0x000000000000000000000000000000000000000000000000000000000000002a": "0x506d86582d252405b840018792cad2bf1259f1ef5aa5f887e13cb2f0094f51e1",
+ "0x000000000000000000000000000000000000000000000000000000000000002b": "0xffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b",
+ "0x000000000000000000000000000000000000000000000000000000000000002c": "0x6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220",
+ "0x000000000000000000000000000000000000000000000000000000000000002d": "0xb7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f",
+ "0x000000000000000000000000000000000000000000000000000000000000002e": "0xdf6af5f5bbdb6be9ef8aa618e4bf8073960867171e29676f8b284dea6a08a85e",
+ "0x000000000000000000000000000000000000000000000000000000000000002f": "0xb58d900f5e182e3c50ef74969ea16c7726c549757cc23523c369587da7293784",
+ "0x0000000000000000000000000000000000000000000000000000000000000030": "0xd49a7502ffcfb0340b1d7885688500ca308161a7f96b62df9d083b71fcc8f2bb",
+ "0x0000000000000000000000000000000000000000000000000000000000000031": "0x8fe6b1689256c0d385f42f5bbe2027a22c1996e110ba97c171d3e5948de92beb",
+ "0x0000000000000000000000000000000000000000000000000000000000000032": "0x8d0d63c39ebade8509e0ae3c9c3876fb5fa112be18f905ecacfecb92057603ab",
+ "0x0000000000000000000000000000000000000000000000000000000000000033": "0x95eec8b2e541cad4e91de38385f2e046619f54496c2382cb6cacd5b98c26f5a4",
+ "0x0000000000000000000000000000000000000000000000000000000000000034": "0xf893e908917775b62bff23294dbbe3a1cd8e6cc1c35b4801887b646a6f81f17f",
+ "0x0000000000000000000000000000000000000000000000000000000000000035": "0xcddba7b592e3133393c16194fac7431abf2f5485ed711db282183c819e08ebaa",
+ "0x0000000000000000000000000000000000000000000000000000000000000036": "0x8a8d7fe3af8caa085a7639a832001457dfb9128a8061142ad0335629ff23ff9c",
+ "0x0000000000000000000000000000000000000000000000000000000000000037": "0xfeb3c337d7a51a6fbf00b9e34c52e1c9195c969bd4e7a0bfd51d5c5bed9c1167",
+ "0x0000000000000000000000000000000000000000000000000000000000000038": "0xe71f0aa83cc32edfbefa9f4d3e0174ca85182eec9f3a09f6a6c0df6377a510d7",
+ "0x0000000000000000000000000000000000000000000000000000000000000039": "0x31206fa80a50bb6abe29085058f16212212a60eec8f049fecb92d8c8e0a84bc0",
+ "0x000000000000000000000000000000000000000000000000000000000000003a": "0x21352bfecbeddde993839f614c3dac0a3ee37543f9b412b16199dc158e23b544",
+ "0x000000000000000000000000000000000000000000000000000000000000003b": "0x619e312724bb6d7c3153ed9de791d764a366b389af13c58bf8a8d90481a46765",
+ "0x000000000000000000000000000000000000000000000000000000000000003c": "0x7cdd2986268250628d0c10e385c58c6191e6fbe05191bcc04f133f2cea72c1c4",
+ "0x000000000000000000000000000000000000000000000000000000000000003d": "0x848930bd7ba8cac54661072113fb278869e07bb8587f91392933374d017bcbe1",
+ "0x000000000000000000000000000000000000000000000000000000000000003e": "0x8869ff2c22b28cc10510d9853292803328be4fb0e80495e8bb8d271f5b889636",
+ "0x000000000000000000000000000000000000000000000000000000000000003f": "0xb5fe28e79f1b850f8658246ce9b6a1e7b49fc06db7143e8fe0b4f2b0c5523a5c",
+ "0x0000000000000000000000000000000000000000000000000000000000000040": "0x985e929f70af28d0bdd1a90a808f977f597c7c778c489e98d3bd8910d31ac0f7"
+ }
+ },
+ "0x0000006916a87b82333f4245046623b23794C65C": {
+ "balance": "0x52b7d2dcc80cd2e4000000"
+ },
+ "0x0be949928Ff199c9EBA9E110db210AA5C94EFAd0": {
+ "balance": "0x7c13bc4b2c133c56000000"
+ },
+ "0x0C100000006d7b5e23a1eAEE637f28cA32Cd5b31": {
+ "balance": "0x52b7d2dcc80cd2e4000000"
+ },
+ "0x0C35317B7a96C454E2CB3d1A255D775Ab112cCc8": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0x0d731cfabC5574329823F26d488416451d2ea376": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0x0e79065B5F11b5BD1e62B935A600976ffF3754B9": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0x105083929bF9bb22C26cB1777Ec92661170D4285": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0x10F5d45854e038071485AC9e402308cF80D2d2fE": {
+ "balance": "0x52b7d2dcc80cd2e4000000"
+ },
+ "0x1268AD189526AC0b386faF06eFfC46779c340eE6": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0x12Cba59f5A74DB81a12ff63C349Bd82CBF6007C2": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0x1446D7f6dF00380F246d8211dE7f0FaBC4Fd248C": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0x164e38a375247A784A81d420201AA8fe4E513921": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0x1B7aA44088a0eA95bdc65fef6E5071E946Bf7d8f": {
+ "balance": "0x52b7d2dcc80cd2e4000000"
+ },
+ "0x222222222222cF64a76AE3d36859958c864fDA2c": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0x2f14582947E292a2eCd20C430B46f2d27CFE213c": {
+ "balance": "0x52b7d2dcc80cd2e4000000"
+ },
+ "0x2f2c75B5Dd5D246194812b00eEb3B09c2c66e2eE": {
+ "balance": "0x52b7d2dcc80cd2e4000000"
+ },
+ "0x341c40b94bf2afbfa42573cb78f16ee15a056238": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0x34f845773D4364999f2fbC7AA26ABDeE902cBb46": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0x3C75594181e03E8ECD8468A0037F058a9dAfad79": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0x462396E69dBfa455F405f4DD82F3014Af8003B72": {
+ "balance": "0xa56fa5b99019a5c8000000"
+ },
+ "0x49Df3CCa2670eB0D591146B16359fe336e476F29": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0x4D0b04b405c6b62C7cFC3aE54759747e2C0b4662": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0x4D496CcC28058B1D74B7a19541663E21154f9c84": {
+ "balance": "0x52b7d2dcc80cd2e4000000"
+ },
+ "0x509a7667aC8D0320e36172c192506a6188aA84f6": {
+ "balance": "0x7c13bc4b2c133c56000000"
+ },
+ "0x5180db0237291A6449DdA9ed33aD90a38787621c": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0x52730f347dEf6BA09adfF62EaC60D5fEe8205BC4": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0x5EAC0fBd3dfef8aE3efa3c5dc1aa193bc6033dFd": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0x6a7aA9b882d50Bb7bc5Da1a244719C99f12F06a3": {
+ "balance": "0x52b7d2dcc80cd2e4000000"
+ },
+ "0x6Cc9397c3B38739daCbfaA68EaD5F5D77Ba5F455": {
+ "balance": "0x52b7d2dcc80cd2e4000000"
+ },
+ "0x762cA62ca2549ad806763B3Aa1eA317c429bDBDa": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0x778F5F13C4Be78A3a4d7141BCB26999702f407CF": {
+ "balance": "0x52b7d2dcc80cd2e4000000"
+ },
+ "0x875D25Ee4bC604C71BaF6236a8488F22399BED4b": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0x8dF7878d3571BEF5e5a744F96287C8D20386d75A": {
+ "balance": "0x52b7d2dcc80cd2e4000000"
+ },
+ "0x9E415A096fF77650dc925dEA546585B4adB322B6": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0xA0766B65A4f7B1da79a1AF79aC695456eFa28644": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0xA29B144A449E414A472c60C7AAf1aaFfE329021D": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0xa55395566b0b54395B3246f96A0bDc4b8a483df9": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0xAC9ba72fb61aA7c31A95df0A8b6ebA6f41EF875e": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0xB0498C15879db2eE5471d4926c5fAA25C9a09683": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0xB19Fb4c1f280327e60Ed37b1Dc6EE77533539314": {
+ "balance": "0x52b7d2dcc80cd2e4000000"
+ },
+ "0xC21cB9C99C316d1863142F7dD86dd5496D81A8D6": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0xc473d412dc52e349862209924c8981b2ee420768": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0xC48E23C5F6e1eA0BaEf6530734edC3968f79Af2e": {
+ "balance": "0x52b7d2dcc80cd2e4000000"
+ },
+ "0xc6e2459991BfE27cca6d86722F35da23A1E4Cb97": {
+ "balance": "0x52b7d2dcc80cd2e4000000"
+ },
+ "0xD3994e4d3202dD23c8497d7F75bF1647d1DA1bb1": {
+ "balance": "0x19D971E4FE8401E74000000"
+ },
+ "0xDCA6e9B48Ea86AeBFDf9929949124042296b6e34": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0xe0a2Bd4258D2768837BAa26A28fE71Dc079f84c7": {
+ "balance": "0x52b7d2dcc80cd2e4000000"
+ },
+ "0xEA28d002042fd9898D0Db016be9758eeAFE35C1E": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0xEfA7454f1116807975A4750B46695E967850de5D": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0xFBFd6Fa9F73Ac6A058E01259034C28001BEf8247": {
+ "balance": "0x52b7d2dcc80cd2e4000000"
+ },
+ "0xe0991E844041bE6F11B99da5b114b6bCf84EBd57": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0x15E719b6AcAf1E4411Bf0f9576CB1D0dB161DdFc": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0x346D827a75F98F0A7a324Ff80b7C3F90252E8baC": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0x73b2e0E54510239E22cC936F0b4a6dE1acf0AbdE": {
+ "balance": "0x52b7d2dcc80cd2e4000000"
+ },
+ "0xBb977B2EE8a111D788B3477D242078d0B837E72b": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0x834Dbf5A03e29c25bc55459cCe9c021EeBE676Ad": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0xD1F77E4C1C45186e8653C489F90e008a73597296": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0xb04aeF2a3d2D86B01006cCD4339A2e943d9c6480": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0xC9CA2bA9A27De1Db589d8c33Ab8EDFa2111b31fb": {
+ "balance": "0xd3c21bcecceda1000000"
+ },
+ "0x4BC656B34De23896fa6069C9862F355b740401aF": {
+ "balance": "0x084595161401484a000000"
+ }
+}
\ No newline at end of file
diff --git a/core/asm/asm_test.go b/core/asm/asm_test.go
index 3a20d2db747..e5b2ae9c0e9 100644
--- a/core/asm/asm_test.go
+++ b/core/asm/asm_test.go
@@ -23,6 +23,7 @@ import (
// Tests disassembling the instructions for valid evm code
func TestInstructionIteratorValid(t *testing.T) {
+ t.Parallel()
cnt := 0
script, _ := hex.DecodeString("61000000")
@@ -41,6 +42,7 @@ func TestInstructionIteratorValid(t *testing.T) {
// Tests disassembling the instructions for invalid evm code
func TestInstructionIteratorInvalid(t *testing.T) {
+ t.Parallel()
cnt := 0
script, _ := hex.DecodeString("6100")
@@ -56,6 +58,7 @@ func TestInstructionIteratorInvalid(t *testing.T) {
// Tests disassembling the instructions for empty evm code
func TestInstructionIteratorEmpty(t *testing.T) {
+ t.Parallel()
cnt := 0
script, _ := hex.DecodeString("")
diff --git a/core/asm/compiler_test.go b/core/asm/compiler_test.go
index ce9df436bd4..9e47f559c5c 100644
--- a/core/asm/compiler_test.go
+++ b/core/asm/compiler_test.go
@@ -21,6 +21,7 @@ import (
)
func TestCompiler(t *testing.T) {
+ t.Parallel()
tests := []struct {
input, output string
}{
diff --git a/core/asm/lex_test.go b/core/asm/lex_test.go
index c3a97aa5087..453c2ca64ee 100644
--- a/core/asm/lex_test.go
+++ b/core/asm/lex_test.go
@@ -32,6 +32,7 @@ func lexAll(src string) []token {
}
func TestLexer(t *testing.T) {
+ t.Parallel()
tests := []struct {
input string
tokens []token
diff --git a/core/block_validator_test.go b/core/block_validator_test.go
index 0548eda0c89..34ab7a2c21d 100644
--- a/core/block_validator_test.go
+++ b/core/block_validator_test.go
@@ -31,6 +31,7 @@ import (
// Tests that simple header verification works, for both good and bad blocks.
func TestHeaderVerification(t *testing.T) {
+ t.Parallel()
// Create a simple chain to verify
var (
gspec = &types.Genesis{Config: params.TestChainConfig}
@@ -62,7 +63,7 @@ func TestHeaderVerification(t *testing.T) {
}); err != nil {
panic(err)
}
- if err = m.InsertChain(chain.Slice(i, i+1), nil); err != nil {
+ if err = m.InsertChain(chain.Slice(i, i+1)); err != nil {
t.Fatalf("test %d: error inserting the block: %v", i, err)
}
@@ -72,6 +73,7 @@ func TestHeaderVerification(t *testing.T) {
// Tests that simple header with seal verification works, for both good and bad blocks.
func TestHeaderWithSealVerification(t *testing.T) {
+ t.Parallel()
// Create a simple chain to verify
var (
gspec = &types.Genesis{Config: params.TestChainAuraConfig}
@@ -104,7 +106,7 @@ func TestHeaderWithSealVerification(t *testing.T) {
}); err != nil {
panic(err)
}
- if err = m.InsertChain(chain.Slice(i, i+1), nil); err != nil {
+ if err = m.InsertChain(chain.Slice(i, i+1)); err != nil {
t.Fatalf("test %d: error inserting the block: %v", i, err)
}
diff --git a/core/blockchain.go b/core/blockchain.go
index a158ea6cc03..d750e6bbc28 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -21,34 +21,35 @@ import (
"fmt"
"time"
- metrics2 "github.com/VictoriaMetrics/metrics"
+ "github.com/ledgerwatch/log/v3"
"golang.org/x/crypto/sha3"
"golang.org/x/exp/slices"
"github.com/ledgerwatch/erigon-lib/chain"
libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon-lib/common/fixedgas"
-
+ "github.com/ledgerwatch/erigon-lib/common/cmp"
+ "github.com/ledgerwatch/erigon-lib/metrics"
"github.com/ledgerwatch/erigon/common/math"
"github.com/ledgerwatch/erigon/common/u256"
"github.com/ledgerwatch/erigon/consensus"
- "github.com/ledgerwatch/erigon/consensus/misc"
"github.com/ledgerwatch/erigon/core/state"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/core/vm"
"github.com/ledgerwatch/erigon/core/vm/evmtypes"
"github.com/ledgerwatch/erigon/rlp"
- "github.com/ledgerwatch/log/v3"
)
var (
- BlockExecutionTimer = metrics2.GetOrCreateSummary("chain_execution_seconds")
+ blockExecutionTimer = metrics.GetOrCreateSummary("chain_execution_seconds")
)
type SyncMode string
const (
TriesInMemory = 128
+
+ // See gas_limit in https://github.com/gnosischain/specs/blob/master/execution/withdrawals.md
+ SysCallGasLimit = uint64(30_000_000)
)
type RejectedTx struct {
@@ -78,11 +79,11 @@ func ExecuteBlockEphemerally(
blockHashFunc func(n uint64) libcommon.Hash,
engine consensus.Engine, block *types.Block,
stateReader state.StateReader, stateWriter state.WriterWithChangeSets,
- chainReader consensus.ChainHeaderReader, getTracer func(txIndex int, txHash libcommon.Hash) (vm.EVMLogger, error),
+ chainReader consensus.ChainReader, getTracer func(txIndex int, txHash libcommon.Hash) (vm.EVMLogger, error),
logger log.Logger,
) (*EphemeralExecResult, error) {
- defer BlockExecutionTimer.UpdateDuration(time.Now())
+ defer blockExecutionTimer.ObserveDuration(time.Now())
block.Uncles()
ibs := state.New(stateReader)
header := block.Header()
@@ -90,7 +91,7 @@ func ExecuteBlockEphemerally(
usedGas := new(uint64)
usedBlobGas := new(uint64)
gp := new(GasPool)
- gp.AddGas(block.GasLimit()).AddBlobGas(fixedgas.MaxBlobGasPerBlock)
+ gp.AddGas(block.GasLimit()).AddBlobGas(chainConfig.GetMaxBlobGasPerBlock())
var (
rejectedTxs []*RejectedTx
@@ -98,15 +99,10 @@ func ExecuteBlockEphemerally(
receipts types.Receipts
)
- if !vmConfig.ReadOnly {
- if err := InitializeBlockExecution(engine, chainReader, block.Header(), block.Transactions(), block.Uncles(), chainConfig, ibs); err != nil {
- return nil, err
- }
+ if err := InitializeBlockExecution(engine, chainReader, block.Header(), chainConfig, ibs, logger); err != nil {
+ return nil, err
}
- if chainConfig.DAOForkBlock != nil && chainConfig.DAOForkBlock.Cmp(block.Number()) == 0 {
- misc.ApplyDAOHardFork(ibs)
- }
noop := state.NewNoopWriter()
//fmt.Printf("====txs processing start: %d====\n", block.NumberU64())
for i, tx := range block.Transactions() {
@@ -187,7 +183,7 @@ func ExecuteBlockEphemerally(
stateSyncReceipt := &types.Receipt{}
if chainConfig.Consensus == chain.BorConsensus && len(blockLogs) > 0 {
- slices.SortStableFunc(blockLogs, func(i, j *types.Log) bool { return i.Index < j.Index })
+ slices.SortStableFunc(blockLogs, func(i, j *types.Log) int { return cmp.Compare(i.Index, j.Index) })
if len(blockLogs) > len(logs) {
stateSyncReceipt.Logs = blockLogs[len(logs):] // get state-sync logs from `state.Logs()`
@@ -216,7 +212,8 @@ func SysCallContract(contract libcommon.Address, data []byte, chainConfig *chain
state.SystemAddress,
&contract,
0, u256.Num0,
- math.MaxUint64, u256.Num0,
+ SysCallGasLimit,
+ u256.Num0,
nil, nil,
data, nil, false,
true, // isFree
@@ -257,7 +254,8 @@ func SysCreate(contract libcommon.Address, data []byte, chainConfig chain.Config
contract,
nil, // to
0, u256.Num0,
- math.MaxUint64, u256.Num0,
+ SysCallGasLimit,
+ u256.Num0,
nil, nil,
data, nil, false,
true, // isFree
@@ -285,7 +283,7 @@ func FinalizeBlockExecution(
header *types.Header, txs types.Transactions, uncles []*types.Header,
stateWriter state.WriterWithChangeSets, cc *chain.Config,
ibs *state.IntraBlockState, receipts types.Receipts,
- withdrawals []*types.Withdrawal, headerReader consensus.ChainHeaderReader,
+ withdrawals []*types.Withdrawal, chainReader consensus.ChainReader,
isMining bool,
logger log.Logger,
) (newBlock *types.Block, newTxs types.Transactions, newReceipt types.Receipts, err error) {
@@ -293,9 +291,9 @@ func FinalizeBlockExecution(
return SysCallContract(contract, data, cc, ibs, header, engine, false /* constCall */)
}
if isMining {
- newBlock, newTxs, newReceipt, err = engine.FinalizeAndAssemble(cc, header, ibs, txs, uncles, receipts, withdrawals, headerReader, syscall, nil, logger)
+ newBlock, newTxs, newReceipt, err = engine.FinalizeAndAssemble(cc, header, ibs, txs, uncles, receipts, withdrawals, chainReader, syscall, nil, logger)
} else {
- _, _, err = engine.Finalize(cc, header, ibs, txs, uncles, receipts, withdrawals, headerReader, syscall, logger)
+ _, _, err = engine.Finalize(cc, header, ibs, txs, uncles, receipts, withdrawals, chainReader, syscall, logger)
}
if err != nil {
return nil, nil, nil, err
@@ -311,10 +309,12 @@ func FinalizeBlockExecution(
return newBlock, newTxs, newReceipt, nil
}
-func InitializeBlockExecution(engine consensus.Engine, chain consensus.ChainHeaderReader, header *types.Header, txs types.Transactions, uncles []*types.Header, cc *chain.Config, ibs *state.IntraBlockState) error {
- engine.Initialize(cc, chain, header, ibs, txs, uncles, func(contract libcommon.Address, data []byte, ibState *state.IntraBlockState, header *types.Header, constCall bool) ([]byte, error) {
+func InitializeBlockExecution(engine consensus.Engine, chain consensus.ChainHeaderReader, header *types.Header,
+ cc *chain.Config, ibs *state.IntraBlockState, logger log.Logger,
+) error {
+ engine.Initialize(cc, chain, header, ibs, func(contract libcommon.Address, data []byte, ibState *state.IntraBlockState, header *types.Header, constCall bool) ([]byte, error) {
return SysCallContract(contract, data, cc, ibState, header, engine, constCall)
- })
+ }, logger)
noop := state.NewNoopWriter()
ibs.FinalizeTx(cc.Rules(header.Number.Uint64(), header.Time), noop)
return nil
diff --git a/core/chain_makers.go b/core/chain_makers.go
index f3b85d985db..25a96626be9 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -28,16 +28,15 @@ import (
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/log/v3"
- "github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/consensus"
"github.com/ledgerwatch/erigon/consensus/merge"
"github.com/ledgerwatch/erigon/consensus/misc"
"github.com/ledgerwatch/erigon/core/state"
- "github.com/ledgerwatch/erigon/core/systemcontracts"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/core/vm"
"github.com/ledgerwatch/erigon/eth/ethconfig"
"github.com/ledgerwatch/erigon/params"
+ "github.com/ledgerwatch/erigon/rlp"
"github.com/ledgerwatch/erigon/turbo/trie"
)
@@ -365,13 +364,12 @@ func GenerateChain(config *chain.Config, parent *types.Block, engine consensus.E
if daoBlock := config.DAOForkBlock; daoBlock != nil {
limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange)
if b.header.Number.Cmp(daoBlock) >= 0 && b.header.Number.Cmp(limit) < 0 {
- b.header.Extra = common.CopyBytes(params.DAOForkBlockExtra)
- }
- if daoBlock.Cmp(b.header.Number) == 0 {
- misc.ApplyDAOHardFork(ibs)
+ b.header.Extra = libcommon.CopyBytes(params.DAOForkBlockExtra)
}
}
- systemcontracts.UpgradeBuildInSystemContract(config, b.header.Number, ibs, logger)
+ if b.engine != nil {
+ InitializeBlockExecution(b.engine, nil, b.header, config, ibs, logger)
+ }
// Execute any user modifications to the block
if gen != nil {
gen(i, b)
@@ -417,7 +415,7 @@ func GenerateChain(config *chain.Config, parent *types.Block, engine consensus.E
return &ChainPack{Headers: headers, Blocks: blocks, Receipts: receipts, TopBlock: blocks[n-1]}, nil
}
-func hashKeyAndAddIncarnation(k []byte, h *common.Hasher) (newK []byte, err error) {
+func hashKeyAndAddIncarnation(k []byte, h *libcommon.Hasher) (newK []byte, err error) {
if len(k) == length.Addr {
newK = make([]byte, length.Hash)
} else {
@@ -522,8 +520,8 @@ func CalcHashRootForTests(tx kv.RwTx, header *types.Header, histV4 bool) (hashRo
if err != nil {
return hashRoot, err
}
- h := common.NewHasher()
- defer common.ReturnHasherToPool(h)
+ h := libcommon.NewHasher()
+ defer libcommon.ReturnHasherToPool(h)
for k, v, err := c.First(); k != nil; k, v, err = c.Next() {
if err != nil {
return hashRoot, fmt.Errorf("interate over plain state: %w", err)
@@ -533,11 +531,11 @@ func CalcHashRootForTests(tx kv.RwTx, header *types.Header, histV4 bool) (hashRo
return hashRoot, fmt.Errorf("insert hashed key: %w", err)
}
if len(k) > length.Addr {
- if err = tx.Put(kv.HashedStorage, newK, common.CopyBytes(v)); err != nil {
+ if err = tx.Put(kv.HashedStorage, newK, libcommon.CopyBytes(v)); err != nil {
return hashRoot, fmt.Errorf("insert hashed key: %w", err)
}
} else {
- if err = tx.Put(kv.HashedAccounts, newK, common.CopyBytes(v)); err != nil {
+ if err = tx.Put(kv.HashedAccounts, newK, libcommon.CopyBytes(v)); err != nil {
return hashRoot, fmt.Errorf("insert hashed key: %w", err)
}
}
@@ -603,7 +601,7 @@ func MakeEmptyHeader(parent *types.Header, chainConfig *chain.Config, timestamp
}
if chainConfig.IsCancun(header.Time) {
- excessBlobGas := misc.CalcExcessBlobGas(parent)
+ excessBlobGas := misc.CalcExcessBlobGas(chainConfig, parent)
header.ExcessBlobGas = &excessBlobGas
header.BlobGasUsed = new(uint64)
}
@@ -652,3 +650,7 @@ func (cr *FakeChainReader) GetBlock(hash libcommon.Hash, number uint64) *types.B
func (cr *FakeChainReader) HasBlock(hash libcommon.Hash, number uint64) bool { return false }
func (cr *FakeChainReader) GetTd(hash libcommon.Hash, number uint64) *big.Int { return nil }
func (cr *FakeChainReader) FrozenBlocks() uint64 { return 0 }
+func (cr *FakeChainReader) BorEventsByBlock(hash libcommon.Hash, number uint64) []rlp.RawValue {
+ return nil
+}
+func (cr *FakeChainReader) BorSpan(spanId uint64) []byte { return nil }
diff --git a/core/forkid/forkid.go b/core/forkid/forkid.go
index f137c4815f6..433c9221b18 100644
--- a/core/forkid/forkid.go
+++ b/core/forkid/forkid.go
@@ -26,12 +26,11 @@ import (
"reflect"
"strings"
- "github.com/ledgerwatch/erigon-lib/chain"
- libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/log/v3"
"golang.org/x/exp/slices"
- "github.com/ledgerwatch/erigon/common"
+ "github.com/ledgerwatch/erigon-lib/chain"
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
)
var (
@@ -55,12 +54,6 @@ type ID struct {
// Filter is a fork id filter to validate a remotely advertised ID.
type Filter func(id ID) error
-// NewID calculates the Ethereum fork ID from the chain config, genesis hash, head height and time.
-func NewID(config *chain.Config, genesis libcommon.Hash, headHeight, headTime uint64) ID {
- heightForks, timeForks := GatherForks(config)
- return NewIDFromForks(heightForks, timeForks, genesis, headHeight, headTime)
-}
-
func NewIDFromForks(heightForks, timeForks []uint64, genesis libcommon.Hash, headHeight, headTime uint64) ID {
// Calculate the starting checksum from the genesis hash
hash := crc32.ChecksumIEEE(genesis[:])
@@ -104,9 +97,9 @@ func NewFilterFromForks(heightForks, timeForks []uint64, genesis libcommon.Hash,
}
// NewStaticFilter creates a filter at block zero.
-func NewStaticFilter(config *chain.Config, genesis libcommon.Hash) Filter {
- heightForks, timeForks := GatherForks(config)
- return newFilter(heightForks, timeForks, genesis, 0, 0)
+func NewStaticFilter(config *chain.Config, genesisHash libcommon.Hash, genesisTime uint64) Filter {
+ heightForks, timeForks := GatherForks(config, genesisTime)
+ return newFilter(heightForks, timeForks, genesisHash, 0 /* headHeight */, genesisTime)
}
// Simple heuristic returning true if the value is a Unix time after 2 Dec 2022.
@@ -215,7 +208,7 @@ func checksumToBytes(hash uint32) [4]byte {
}
// GatherForks gathers all the known forks and creates a sorted list out of them.
-func GatherForks(config *chain.Config) (heightForks []uint64, timeForks []uint64) {
+func GatherForks(config *chain.Config, genesisTime uint64) (heightForks []uint64, timeForks []uint64) {
// Gather all the fork block numbers via reflection
kind := reflect.TypeOf(chain.Config{})
conf := reflect.ValueOf(config).Elem()
@@ -237,7 +230,10 @@ func GatherForks(config *chain.Config) (heightForks []uint64, timeForks []uint64
rule := conf.Field(i).Interface().(*big.Int)
if rule != nil {
if time {
- timeForks = append(timeForks, rule.Uint64())
+ t := rule.Uint64()
+ if t > genesisTime {
+ timeForks = append(timeForks, t)
+ }
} else {
heightForks = append(heightForks, rule.Uint64())
}
@@ -248,12 +244,16 @@ func GatherForks(config *chain.Config) (heightForks []uint64, timeForks []uint64
heightForks = append(heightForks, *config.Aura.PosdaoTransition)
}
+ if config.Bor != nil && config.Bor.AgraBlock != nil {
+ heightForks = append(heightForks, config.Bor.AgraBlock.Uint64())
+ }
+
// Sort the fork block numbers & times to permit chronological XOR
slices.Sort(heightForks)
slices.Sort(timeForks)
// Deduplicate block numbers/times applying to multiple forks
- heightForks = common.RemoveDuplicatesFromSorted(heightForks)
- timeForks = common.RemoveDuplicatesFromSorted(timeForks)
+ heightForks = libcommon.RemoveDuplicatesFromSorted(heightForks)
+ timeForks = libcommon.RemoveDuplicatesFromSorted(timeForks)
// Skip any forks in block 0, that's the genesis ruleset
if len(heightForks) > 0 && heightForks[0] == 0 {
heightForks = heightForks[1:]
diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go
index d8894d81661..310beaa739a 100644
--- a/core/forkid/forkid_test.go
+++ b/core/forkid/forkid_test.go
@@ -24,7 +24,6 @@ import (
"github.com/ledgerwatch/erigon-lib/chain"
libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/params"
"github.com/ledgerwatch/erigon/rlp"
)
@@ -34,6 +33,7 @@ import (
// Forks before Shanghai are triggered by the block number,
// while Shanghai and later forks are triggered by the block time.
func TestCreation(t *testing.T) {
+ t.Parallel()
type testcase struct {
head uint64
time uint64
@@ -142,10 +142,44 @@ func TestCreation(t *testing.T) {
{4100419, 1684934220, ID{Hash: checksumToBytes(0xa15a4252), Next: 0}}, // First Shanghai block
},
},
+ // Mumbai test cases
+ {
+ params.MumbaiChainConfig,
+ params.MumbaiGenesisHash,
+ []testcase{
+ {0, 0, ID{Hash: checksumToBytes(0xf6ef3fdf), Next: 2722000}},
+ {2722000, 0, ID{Hash: checksumToBytes(0x8647df30), Next: 13996000}}, // First Istanbul block
+ {13996000, 0, ID{Hash: checksumToBytes(0x06cc1179), Next: 22640000}}, // First Berlin block
+ {22640000, 0, ID{Hash: checksumToBytes(0x9adf950e), Next: 41874000}}, // First London block
+ {41874000, 0, ID{Hash: checksumToBytes(0x0c015a91), Next: 0}}, // First Agra block
+ },
+ },
+ // Amoy test cases
+ {
+ params.AmoyChainConfig,
+ params.AmoyGenesisHash,
+ []testcase{
+ {0, 0, ID{Hash: checksumToBytes(0xbe06a477), Next: 73100}},
+ {73100, 0, ID{Hash: checksumToBytes(0x135d2cd5), Next: 0}}, // First London, Jaipur, Delhi, Indore, Agra
+ },
+ },
+ // Bor mainnet test cases
+ {
+ params.BorMainnetChainConfig,
+ params.BorMainnetGenesisHash,
+ []testcase{
+ {0, 0, ID{Hash: checksumToBytes(0x0e07e722), Next: 3395000}},
+ {3395000, 0, ID{Hash: checksumToBytes(0x27806576), Next: 14750000}}, // First Istanbul block
+ {14750000, 0, ID{Hash: checksumToBytes(0x66e26adb), Next: 23850000}}, // First Berlin block
+ {23850000, 0, ID{Hash: checksumToBytes(0x4f2f71cc), Next: 50523000}}, // First London block
+ {50523000, 0, ID{Hash: checksumToBytes(0xdc08865c), Next: 0}}, // First Agra block
+ },
+ },
}
for i, tt := range tests {
for j, ttt := range tt.cases {
- if have := NewID(tt.config, tt.genesis, ttt.head, ttt.time); have != ttt.want {
+ heightForks, timeForks := GatherForks(tt.config, 0 /* genesisTime */)
+ if have := NewIDFromForks(heightForks, timeForks, tt.genesis, ttt.head, ttt.time); have != ttt.want {
t.Errorf("test %d, case %d: fork ID mismatch: have %x, want %x", i, j, have, ttt.want)
}
}
@@ -155,6 +189,7 @@ func TestCreation(t *testing.T) {
// TestValidation tests that a local peer correctly validates and accepts a remote
// fork ID.
func TestValidation(t *testing.T) {
+ t.Parallel()
tests := []struct {
head uint64
id ID
@@ -222,7 +257,7 @@ func TestValidation(t *testing.T) {
// fork) at block 7279999, before Petersburg. Local is incompatible.
{7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7279999}, ErrLocalIncompatibleOrStale},
}
- heightForks, timeForks := GatherForks(params.MainnetChainConfig)
+ heightForks, timeForks := GatherForks(params.MainnetChainConfig, 0 /* genesisTime */)
for i, tt := range tests {
filter := newFilter(heightForks, timeForks, params.MainnetGenesisHash, tt.head, 0)
if err := filter(tt.id); err != tt.err {
@@ -234,13 +269,14 @@ func TestValidation(t *testing.T) {
// Tests that IDs are properly RLP encoded (specifically important because we
// use uint32 to store the hash, but we need to encode it as [4]byte).
func TestEncoding(t *testing.T) {
+ t.Parallel()
tests := []struct {
id ID
want []byte
}{
- {ID{Hash: checksumToBytes(0), Next: 0}, common.Hex2Bytes("c6840000000080")},
- {ID{Hash: checksumToBytes(0xdeadbeef), Next: 0xBADDCAFE}, common.Hex2Bytes("ca84deadbeef84baddcafe,")},
- {ID{Hash: checksumToBytes(math.MaxUint32), Next: math.MaxUint64}, common.Hex2Bytes("ce84ffffffff88ffffffffffffffff")},
+ {ID{Hash: checksumToBytes(0), Next: 0}, libcommon.Hex2Bytes("c6840000000080")},
+ {ID{Hash: checksumToBytes(0xdeadbeef), Next: 0xBADDCAFE}, libcommon.Hex2Bytes("ca84deadbeef84baddcafe,")},
+ {ID{Hash: checksumToBytes(math.MaxUint32), Next: math.MaxUint64}, libcommon.Hex2Bytes("ce84ffffffff88ffffffffffffffff")},
}
for i, tt := range tests {
have, err := rlp.EncodeToBytes(tt.id)
diff --git a/core/genesis_test.go b/core/genesis_test.go
index 05c7d4d778e..d5039236e7d 100644
--- a/core/genesis_test.go
+++ b/core/genesis_test.go
@@ -6,6 +6,7 @@ import (
"testing"
"github.com/holiman/uint256"
+ "github.com/ledgerwatch/erigon-lib/chain/networkname"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/datadir"
"github.com/ledgerwatch/erigon-lib/kv"
@@ -19,11 +20,11 @@ import (
"github.com/ledgerwatch/erigon/core/state"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/params"
- "github.com/ledgerwatch/erigon/params/networkname"
"github.com/ledgerwatch/log/v3"
)
func TestGenesisBlockHashes(t *testing.T) {
+ t.Parallel()
logger := log.New()
_, db, _ := temporal.NewTestDB(t, datadir.New(t.TempDir()), nil)
check := func(network string) {
@@ -45,6 +46,7 @@ func TestGenesisBlockHashes(t *testing.T) {
}
func TestGenesisBlockRoots(t *testing.T) {
+ t.Parallel()
require := require.New(t)
var err error
@@ -73,6 +75,7 @@ func TestGenesisBlockRoots(t *testing.T) {
}
func TestCommitGenesisIdempotency(t *testing.T) {
+ t.Parallel()
logger := log.New()
_, db, _ := temporal.NewTestDB(t, datadir.New(t.TempDir()), nil)
tx, err := db.BeginRw(context.Background())
@@ -94,6 +97,7 @@ func TestCommitGenesisIdempotency(t *testing.T) {
}
func TestAllocConstructor(t *testing.T) {
+ t.Parallel()
require := require.New(t)
assert := assert.New(t)
diff --git a/core/genesis_write.go b/core/genesis_write.go
index 7cbf4ccc03c..7c7f9750fe9 100644
--- a/core/genesis_write.go
+++ b/core/genesis_write.go
@@ -32,14 +32,15 @@ import (
"golang.org/x/exp/slices"
"github.com/ledgerwatch/erigon-lib/chain"
+ "github.com/ledgerwatch/erigon-lib/chain/networkname"
libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/kvcfg"
"github.com/ledgerwatch/erigon-lib/kv/mdbx"
"github.com/ledgerwatch/erigon-lib/kv/rawdbv3"
"github.com/ledgerwatch/erigon/common"
- "github.com/ledgerwatch/erigon/common/hexutil"
"github.com/ledgerwatch/erigon/consensus/ethash"
"github.com/ledgerwatch/erigon/consensus/merge"
"github.com/ledgerwatch/erigon/core/rawdb"
@@ -48,7 +49,6 @@ import (
"github.com/ledgerwatch/erigon/crypto"
"github.com/ledgerwatch/erigon/eth/ethconfig"
"github.com/ledgerwatch/erigon/params"
- "github.com/ledgerwatch/erigon/params/networkname"
"github.com/ledgerwatch/erigon/turbo/trie"
)
@@ -343,6 +343,18 @@ func MainnetGenesisBlock() *types.Genesis {
}
}
+// HoleskyGenesisBlock returns the Holesky main net genesis block.
+func HoleskyGenesisBlock() *types.Genesis {
+ return &types.Genesis{
+ Config: params.HoleskyChainConfig,
+ Nonce: 4660,
+ GasLimit: 25000000,
+ Difficulty: big.NewInt(1),
+ Timestamp: 1695902100,
+ Alloc: readPrealloc("allocs/holesky.json"),
+ }
+}
+
// SepoliaGenesisBlock returns the Sepolia network genesis block.
func SepoliaGenesisBlock() *types.Genesis {
return &types.Genesis{
@@ -368,6 +380,7 @@ func GoerliGenesisBlock() *types.Genesis {
}
}
+// MumbaiGenesisBlock returns the Amoy network genesis block.
func MumbaiGenesisBlock() *types.Genesis {
return &types.Genesis{
Config: params.MumbaiChainConfig,
@@ -381,6 +394,20 @@ func MumbaiGenesisBlock() *types.Genesis {
}
}
+// AmoyGenesisBlock returns the Amoy network genesis block.
+func AmoyGenesisBlock() *types.Genesis {
+ return &types.Genesis{
+ Config: params.AmoyChainConfig,
+ Nonce: 0,
+ Timestamp: 1700225065,
+ GasLimit: 10000000,
+ Difficulty: big.NewInt(1),
+ Mixhash: libcommon.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
+ Coinbase: libcommon.HexToAddress("0x0000000000000000000000000000000000000000"),
+ Alloc: readPrealloc("allocs/amoy.json"),
+ }
+}
+
// BorMainnetGenesisBlock returns the Bor Mainnet network genesis block.
func BorMainnetGenesisBlock() *types.Genesis {
return &types.Genesis{
@@ -528,6 +555,7 @@ func GenesisToBlock(g *types.Genesis, tmpDir string) (*types.Block, *state.Intra
go func() { // we may run inside write tx, can't open 2nd write tx in same goroutine
// TODO(yperbasis): use memdb.MemoryMutation instead
defer wg.Done()
+
genesisTmpDB := mdbx.NewMDBX(log.New()).InMem(tmpDir).MapSize(2 * datasize.GB).GrowthStep(1 * datasize.MB).MustOpen()
defer genesisTmpDB.Close()
var tx kv.RwTx
@@ -629,12 +657,16 @@ func GenesisBlockByChainName(chain string) *types.Genesis {
switch chain {
case networkname.MainnetChainName:
return MainnetGenesisBlock()
+ case networkname.HoleskyChainName:
+ return HoleskyGenesisBlock()
case networkname.SepoliaChainName:
return SepoliaGenesisBlock()
case networkname.GoerliChainName:
return GoerliGenesisBlock()
case networkname.MumbaiChainName:
return MumbaiGenesisBlock()
+ case networkname.AmoyChainName:
+ return AmoyGenesisBlock()
case networkname.BorMainnetChainName:
return BorMainnetGenesisBlock()
case networkname.BorDevnetChainName:
diff --git a/core/rawdb/accessors_account.go b/core/rawdb/accessors_account.go
index 254d4d9bb02..0607f04648e 100644
--- a/core/rawdb/accessors_account.go
+++ b/core/rawdb/accessors_account.go
@@ -23,7 +23,7 @@ import (
"github.com/ledgerwatch/erigon/core/types/accounts"
)
-func ReadAccount(db kv.Tx, addr libcommon.Address, acc *accounts.Account) (bool, error) {
+func ReadAccount(db kv.Getter, addr libcommon.Address, acc *accounts.Account) (bool, error) {
enc, err := db.GetOne(kv.PlainState, addr[:])
if err != nil {
return false, err
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
index 9ff8a9d9db0..c15de0e3de3 100644
--- a/core/rawdb/accessors_chain.go
+++ b/core/rawdb/accessors_chain.go
@@ -26,9 +26,10 @@ import (
"math/big"
"time"
+ "github.com/ledgerwatch/erigon-lib/kv/dbutils"
+
"github.com/gballet/go-verkle"
- common2 "github.com/ledgerwatch/erigon-lib/common"
- libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/cmp"
"github.com/ledgerwatch/erigon-lib/common/dbg"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
@@ -37,27 +38,30 @@ import (
"github.com/ledgerwatch/erigon-lib/kv/rawdbv3"
"github.com/ledgerwatch/log/v3"
- "github.com/ledgerwatch/erigon/common"
- "github.com/ledgerwatch/erigon/common/dbutils"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/ethdb/cbor"
"github.com/ledgerwatch/erigon/rlp"
)
+const (
+ spanLength = 6400 // Number of blocks in a span
+ zerothSpanEnd = 255 // End block of 0th span
+)
+
// ReadCanonicalHash retrieves the hash assigned to a canonical block number.
-func ReadCanonicalHash(db kv.Getter, number uint64) (libcommon.Hash, error) {
+func ReadCanonicalHash(db kv.Getter, number uint64) (common.Hash, error) {
data, err := db.GetOne(kv.HeaderCanonical, hexutility.EncodeTs(number))
if err != nil {
- return libcommon.Hash{}, fmt.Errorf("failed ReadCanonicalHash: %w, number=%d", err, number)
+ return common.Hash{}, fmt.Errorf("failed ReadCanonicalHash: %w, number=%d", err, number)
}
if len(data) == 0 {
- return libcommon.Hash{}, nil
+ return common.Hash{}, nil
}
- return libcommon.BytesToHash(data), nil
+ return common.BytesToHash(data), nil
}
// WriteCanonicalHash stores the hash assigned to a canonical block number.
-func WriteCanonicalHash(db kv.Putter, hash libcommon.Hash, number uint64) error {
+func WriteCanonicalHash(db kv.Putter, hash common.Hash, number uint64) error {
if err := db.Put(kv.HeaderCanonical, hexutility.EncodeTs(number), hash.Bytes()); err != nil {
return fmt.Errorf("failed to store number to hash mapping: %w", err)
}
@@ -87,7 +91,7 @@ func TruncateCanonicalHash(tx kv.RwTx, blockFrom uint64, markChainAsBad bool) er
}
// IsCanonicalHashDeprecated determines whether a header with the given hash is on the canonical chain.
-func IsCanonicalHashDeprecated(db kv.Getter, hash libcommon.Hash) (bool, *uint64, error) {
+func IsCanonicalHashDeprecated(db kv.Getter, hash common.Hash) (bool, *uint64, error) {
number := ReadHeaderNumber(db, hash)
if number == nil {
return false, nil, nil
@@ -96,19 +100,19 @@ func IsCanonicalHashDeprecated(db kv.Getter, hash libcommon.Hash) (bool, *uint64
if err != nil {
return false, nil, err
}
- return canonicalHash != (libcommon.Hash{}) && canonicalHash == hash, number, nil
+ return canonicalHash != (common.Hash{}) && canonicalHash == hash, number, nil
}
-func IsCanonicalHash(db kv.Getter, hash libcommon.Hash, number uint64) (bool, error) {
+func IsCanonicalHash(db kv.Getter, hash common.Hash, number uint64) (bool, error) {
canonicalHash, err := ReadCanonicalHash(db, number)
if err != nil {
return false, err
}
- return canonicalHash != (libcommon.Hash{}) && canonicalHash == hash, nil
+ return canonicalHash != (common.Hash{}) && canonicalHash == hash, nil
}
// ReadHeaderNumber returns the header number assigned to a hash.
-func ReadHeaderNumber(db kv.Getter, hash libcommon.Hash) *uint64 {
+func ReadHeaderNumber(db kv.Getter, hash common.Hash) *uint64 {
data, err := db.GetOne(kv.HeaderNumber, hash.Bytes())
if err != nil {
log.Error("ReadHeaderNumber failed", "err", err)
@@ -123,7 +127,7 @@ func ReadHeaderNumber(db kv.Getter, hash libcommon.Hash) *uint64 {
number := binary.BigEndian.Uint64(data)
return &number
}
-func ReadBadHeaderNumber(db kv.Getter, hash libcommon.Hash) (*uint64, error) {
+func ReadBadHeaderNumber(db kv.Getter, hash common.Hash) (*uint64, error) {
data, err := db.GetOne(kv.BadHeaderNumber, hash.Bytes())
if err != nil {
return nil, err
@@ -139,7 +143,7 @@ func ReadBadHeaderNumber(db kv.Getter, hash libcommon.Hash) (*uint64, error) {
}
// WriteHeaderNumber stores the hash->number mapping.
-func WriteHeaderNumber(db kv.Putter, hash libcommon.Hash, number uint64) error {
+func WriteHeaderNumber(db kv.Putter, hash common.Hash, number uint64) error {
if err := db.Put(kv.HeaderNumber, hash[:], hexutility.EncodeTs(number)); err != nil {
return err
}
@@ -147,19 +151,19 @@ func WriteHeaderNumber(db kv.Putter, hash libcommon.Hash, number uint64) error {
}
// ReadHeadHeaderHash retrieves the hash of the current canonical head header.
-func ReadHeadHeaderHash(db kv.Getter) libcommon.Hash {
+func ReadHeadHeaderHash(db kv.Getter) common.Hash {
data, err := db.GetOne(kv.HeadHeaderKey, []byte(kv.HeadHeaderKey))
if err != nil {
log.Error("ReadHeadHeaderHash failed", "err", err)
}
if len(data) == 0 {
- return libcommon.Hash{}
+ return common.Hash{}
}
- return libcommon.BytesToHash(data)
+ return common.BytesToHash(data)
}
// WriteHeadHeaderHash stores the hash of the current canonical head header.
-func WriteHeadHeaderHash(db kv.Putter, hash libcommon.Hash) error {
+func WriteHeadHeaderHash(db kv.Putter, hash common.Hash) error {
if err := db.Put(kv.HeadHeaderKey, []byte(kv.HeadHeaderKey), hash.Bytes()); err != nil {
return fmt.Errorf("failed to store last header's hash: %w", err)
}
@@ -167,89 +171,89 @@ func WriteHeadHeaderHash(db kv.Putter, hash libcommon.Hash) error {
}
// ReadHeadBlockHash retrieves the hash of the current canonical head block.
-func ReadHeadBlockHash(db kv.Getter) libcommon.Hash {
+func ReadHeadBlockHash(db kv.Getter) common.Hash {
data, err := db.GetOne(kv.HeadBlockKey, []byte(kv.HeadBlockKey))
if err != nil {
log.Error("ReadHeadBlockHash failed", "err", err)
}
if len(data) == 0 {
- return libcommon.Hash{}
+ return common.Hash{}
}
- return libcommon.BytesToHash(data)
+ return common.BytesToHash(data)
}
// WriteHeadBlockHash stores the head block's hash.
-func WriteHeadBlockHash(db kv.Putter, hash libcommon.Hash) {
+func WriteHeadBlockHash(db kv.Putter, hash common.Hash) {
if err := db.Put(kv.HeadBlockKey, []byte(kv.HeadBlockKey), hash.Bytes()); err != nil {
log.Crit("Failed to store last block's hash", "err", err)
}
}
// ReadForkchoiceHead retrieves headBlockHash from the last Engine API forkChoiceUpdated.
-func ReadForkchoiceHead(db kv.Getter) libcommon.Hash {
+func ReadForkchoiceHead(db kv.Getter) common.Hash {
data, err := db.GetOne(kv.LastForkchoice, []byte("headBlockHash"))
if err != nil {
log.Error("ReadForkchoiceHead failed", "err", err)
}
if len(data) == 0 {
- return libcommon.Hash{}
+ return common.Hash{}
}
- return libcommon.BytesToHash(data)
+ return common.BytesToHash(data)
}
// WriteForkchoiceHead stores headBlockHash from the last Engine API forkChoiceUpdated.
-func WriteForkchoiceHead(db kv.Putter, hash libcommon.Hash) {
+func WriteForkchoiceHead(db kv.Putter, hash common.Hash) {
if err := db.Put(kv.LastForkchoice, []byte("headBlockHash"), hash[:]); err != nil {
log.Crit("Failed to store head block hash", "err", err)
}
}
// ReadForkchoiceSafe retrieves safeBlockHash from the last Engine API forkChoiceUpdated.
-func ReadForkchoiceSafe(db kv.Getter) libcommon.Hash {
+func ReadForkchoiceSafe(db kv.Getter) common.Hash {
data, err := db.GetOne(kv.LastForkchoice, []byte("safeBlockHash"))
if err != nil {
log.Error("ReadForkchoiceSafe failed", "err", err)
- return libcommon.Hash{}
+ return common.Hash{}
}
if len(data) == 0 {
- return libcommon.Hash{}
+ return common.Hash{}
}
- return libcommon.BytesToHash(data)
+ return common.BytesToHash(data)
}
// WriteForkchoiceSafe stores safeBlockHash from the last Engine API forkChoiceUpdated.
-func WriteForkchoiceSafe(db kv.Putter, hash libcommon.Hash) {
+func WriteForkchoiceSafe(db kv.Putter, hash common.Hash) {
if err := db.Put(kv.LastForkchoice, []byte("safeBlockHash"), hash[:]); err != nil {
log.Crit("Failed to store safe block hash", "err", err)
}
}
// ReadForkchoiceFinalized retrieves finalizedBlockHash from the last Engine API forkChoiceUpdated.
-func ReadForkchoiceFinalized(db kv.Getter) libcommon.Hash {
+func ReadForkchoiceFinalized(db kv.Getter) common.Hash {
data, err := db.GetOne(kv.LastForkchoice, []byte("finalizedBlockHash"))
if err != nil {
log.Error("ReadForkchoiceFinalize failed", "err", err)
- return libcommon.Hash{}
+ return common.Hash{}
}
if len(data) == 0 {
- return libcommon.Hash{}
+ return common.Hash{}
}
- return libcommon.BytesToHash(data)
+ return common.BytesToHash(data)
}
// WriteForkchoiceFinalized stores finalizedBlockHash from the last Engine API forkChoiceUpdated.
-func WriteForkchoiceFinalized(db kv.Putter, hash libcommon.Hash) {
+func WriteForkchoiceFinalized(db kv.Putter, hash common.Hash) {
if err := db.Put(kv.LastForkchoice, []byte("finalizedBlockHash"), hash[:]); err != nil {
log.Crit("Failed to safe finalized block hash", "err", err)
}
}
// ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
-func ReadHeaderRLP(db kv.Getter, hash libcommon.Hash, number uint64) rlp.RawValue {
+func ReadHeaderRLP(db kv.Getter, hash common.Hash, number uint64) rlp.RawValue {
data, err := db.GetOne(kv.Headers, dbutils.HeaderKey(number, hash))
if err != nil {
log.Error("ReadHeaderRLP failed", "err", err)
@@ -258,7 +262,7 @@ func ReadHeaderRLP(db kv.Getter, hash libcommon.Hash, number uint64) rlp.RawValu
}
// ReadHeader retrieves the block header corresponding to the hash.
-func ReadHeader(db kv.Getter, hash libcommon.Hash, number uint64) *types.Header {
+func ReadHeader(db kv.Getter, hash common.Hash, number uint64) *types.Header {
data := ReadHeaderRLP(db, hash, number)
if len(data) == 0 {
return nil
@@ -333,7 +337,7 @@ func WriteHeader(db kv.RwTx, header *types.Header) error {
}
return nil
}
-func WriteHeaderRaw(db kv.StatelessRwTx, number uint64, hash libcommon.Hash, headerRlp []byte, skipIndexing bool) error {
+func WriteHeaderRaw(db kv.StatelessRwTx, number uint64, hash common.Hash, headerRlp []byte, skipIndexing bool) error {
if err := db.Put(kv.Headers, dbutils.HeaderKey(number, hash), headerRlp); err != nil {
return err
}
@@ -347,7 +351,7 @@ func WriteHeaderRaw(db kv.StatelessRwTx, number uint64, hash libcommon.Hash, hea
}
// DeleteHeader - dangerous, use PruneBlocks/TruncateBlocks methods
-func DeleteHeader(db kv.Deleter, hash libcommon.Hash, number uint64) {
+func DeleteHeader(db kv.Deleter, hash common.Hash, number uint64) {
if err := db.Delete(kv.Headers, dbutils.HeaderKey(number, hash)); err != nil {
log.Crit("Failed to delete header", "err", err)
}
@@ -357,7 +361,7 @@ func DeleteHeader(db kv.Deleter, hash libcommon.Hash, number uint64) {
}
// ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
-func ReadBodyRLP(db kv.Tx, hash libcommon.Hash, number uint64) rlp.RawValue {
+func ReadBodyRLP(db kv.Tx, hash common.Hash, number uint64) rlp.RawValue {
body, _ := ReadBodyWithTransactions(db, hash, number)
bodyRlp, err := rlp.EncodeToBytes(body)
if err != nil {
@@ -365,7 +369,7 @@ func ReadBodyRLP(db kv.Tx, hash libcommon.Hash, number uint64) rlp.RawValue {
}
return bodyRlp
}
-func ReadStorageBodyRLP(db kv.Getter, hash libcommon.Hash, number uint64) rlp.RawValue {
+func ReadStorageBodyRLP(db kv.Getter, hash common.Hash, number uint64) rlp.RawValue {
bodyRlp, err := db.GetOne(kv.BlockBody, dbutils.BlockBodyKey(number, hash))
if err != nil {
log.Error("ReadBodyRLP failed", "err", err)
@@ -373,7 +377,7 @@ func ReadStorageBodyRLP(db kv.Getter, hash libcommon.Hash, number uint64) rlp.Ra
return bodyRlp
}
-func ReadStorageBody(db kv.Getter, hash libcommon.Hash, number uint64) (types.BodyForStorage, error) {
+func ReadStorageBody(db kv.Getter, hash common.Hash, number uint64) (types.BodyForStorage, error) {
bodyRlp, err := db.GetOne(kv.BlockBody, dbutils.BlockBodyKey(number, hash))
if err != nil {
log.Error("ReadBodyRLP failed", "err", err)
@@ -385,7 +389,7 @@ func ReadStorageBody(db kv.Getter, hash libcommon.Hash, number uint64) (types.Bo
return *bodyForStorage, nil
}
-func TxnByIdxInBlock(db kv.Getter, blockHash libcommon.Hash, blockNum uint64, txIdxInBlock int) (types.Transaction, error) {
+func TxnByIdxInBlock(db kv.Getter, blockHash common.Hash, blockNum uint64, txIdxInBlock int) (types.Transaction, error) {
b, err := ReadBodyForStorageByKey(db, dbutils.BlockBodyKey(blockNum, blockHash))
if err != nil {
return nil, err
@@ -483,7 +487,7 @@ func WriteRawTransactions(tx kv.RwTx, txs [][]byte, baseTxId uint64) error {
}
// WriteBodyForStorage stores an RLP encoded block body into the database.
-func WriteBodyForStorage(db kv.Putter, hash libcommon.Hash, number uint64, body *types.BodyForStorage) error {
+func WriteBodyForStorage(db kv.Putter, hash common.Hash, number uint64, body *types.BodyForStorage) error {
data, err := rlp.EncodeToBytes(body)
if err != nil {
return err
@@ -497,14 +501,14 @@ func ReadBodyByNumber(db kv.Tx, number uint64) (*types.Body, uint64, uint32, err
if err != nil {
return nil, 0, 0, fmt.Errorf("failed ReadCanonicalHash: %w", err)
}
- if hash == (libcommon.Hash{}) {
+ if hash == (common.Hash{}) {
return nil, 0, 0, nil
}
body, baseTxId, txAmount := ReadBody(db, hash, number)
return body, baseTxId, txAmount, nil
}
-func ReadBodyWithTransactions(db kv.Getter, hash libcommon.Hash, number uint64) (*types.Body, error) {
+func ReadBodyWithTransactions(db kv.Getter, hash common.Hash, number uint64) (*types.Body, error) {
body, baseTxId, txAmount := ReadBody(db, hash, number)
if body == nil {
return nil, nil
@@ -581,7 +585,7 @@ func ReadBodyForStorageByKey(db kv.Getter, k []byte) (*types.BodyForStorage, err
return bodyForStorage, nil
}
-func ReadBody(db kv.Getter, hash libcommon.Hash, number uint64) (*types.Body, uint64, uint32) {
+func ReadBody(db kv.Getter, hash common.Hash, number uint64) (*types.Body, uint64, uint32) {
data := ReadStorageBodyRLP(db, hash, number)
if len(data) == 0 {
return nil, 0, 0
@@ -602,23 +606,23 @@ func ReadBody(db kv.Getter, hash libcommon.Hash, number uint64) (*types.Body, ui
return body, bodyForStorage.BaseTxId + 1, bodyForStorage.TxAmount - 2 // 1 system txn in the begining of block, and 1 at the end
}
-func HasSenders(db kv.Getter, hash libcommon.Hash, number uint64) (bool, error) {
+func HasSenders(db kv.Getter, hash common.Hash, number uint64) (bool, error) {
return db.Has(kv.Senders, dbutils.BlockBodyKey(number, hash))
}
-func ReadSenders(db kv.Getter, hash libcommon.Hash, number uint64) ([]libcommon.Address, error) {
+func ReadSenders(db kv.Getter, hash common.Hash, number uint64) ([]common.Address, error) {
data, err := db.GetOne(kv.Senders, dbutils.BlockBodyKey(number, hash))
if err != nil {
return nil, fmt.Errorf("readSenders failed: %w", err)
}
- senders := make([]libcommon.Address, len(data)/length.Addr)
+ senders := make([]common.Address, len(data)/length.Addr)
for i := 0; i < len(senders); i++ {
copy(senders[i][:], data[i*length.Addr:])
}
return senders, nil
}
-func WriteRawBodyIfNotExists(db kv.RwTx, hash libcommon.Hash, number uint64, body *types.RawBody) (ok bool, err error) {
+func WriteRawBodyIfNotExists(db kv.RwTx, hash common.Hash, number uint64, body *types.RawBody) (ok bool, err error) {
exists, err := db.Has(kv.BlockBody, dbutils.BlockBodyKey(number, hash))
if err != nil {
return false, err
@@ -629,7 +633,7 @@ func WriteRawBodyIfNotExists(db kv.RwTx, hash libcommon.Hash, number uint64, bod
return WriteRawBody(db, hash, number, body)
}
-func WriteRawBody(db kv.RwTx, hash libcommon.Hash, number uint64, body *types.RawBody) (ok bool, err error) {
+func WriteRawBody(db kv.RwTx, hash common.Hash, number uint64, body *types.RawBody) (ok bool, err error) {
baseTxnID, err := db.IncrementSequence(kv.EthTx, uint64(len(body.Transactions))+2)
if err != nil {
return false, err
@@ -650,7 +654,7 @@ func WriteRawBody(db kv.RwTx, hash libcommon.Hash, number uint64, body *types.Ra
return true, nil
}
-func WriteBody(db kv.RwTx, hash libcommon.Hash, number uint64, body *types.Body) (err error) {
+func WriteBody(db kv.RwTx, hash common.Hash, number uint64, body *types.Body) (err error) {
// Pre-processing
body.SendersFromTxs()
baseTxId, err := db.IncrementSequence(kv.EthTx, uint64(len(body.Transactions))+2)
@@ -672,7 +676,7 @@ func WriteBody(db kv.RwTx, hash libcommon.Hash, number uint64, body *types.Body)
return nil
}
-func WriteSenders(db kv.Putter, hash libcommon.Hash, number uint64, senders []libcommon.Address) error {
+func WriteSenders(db kv.Putter, hash common.Hash, number uint64, senders []common.Address) error {
data := make([]byte, length.Addr*len(senders))
for i, sender := range senders {
copy(data[i*length.Addr:], sender[:])
@@ -684,7 +688,7 @@ func WriteSenders(db kv.Putter, hash libcommon.Hash, number uint64, senders []li
}
// DeleteBody removes all block body data associated with a hash.
-func DeleteBody(db kv.Deleter, hash libcommon.Hash, number uint64) {
+func DeleteBody(db kv.Deleter, hash common.Hash, number uint64) {
if err := db.Delete(kv.BlockBody, dbutils.BlockBodyKey(number, hash)); err != nil {
log.Crit("Failed to delete block body", "err", err)
}
@@ -705,7 +709,7 @@ func AppendCanonicalTxNums(tx kv.RwTx, from uint64) (err error) {
if err != nil {
return err
}
- if h == (libcommon.Hash{}) {
+ if h == (common.Hash{}) {
break
}
@@ -728,7 +732,7 @@ func AppendCanonicalTxNums(tx kv.RwTx, from uint64) (err error) {
}
// ReadTd retrieves a block's total difficulty corresponding to the hash.
-func ReadTd(db kv.Getter, hash libcommon.Hash, number uint64) (*big.Int, error) {
+func ReadTd(db kv.Getter, hash common.Hash, number uint64) (*big.Int, error) {
data, err := db.GetOne(kv.HeaderTD, dbutils.HeaderKey(number, hash))
if err != nil {
return nil, fmt.Errorf("failed ReadTd: %w", err)
@@ -743,7 +747,7 @@ func ReadTd(db kv.Getter, hash libcommon.Hash, number uint64) (*big.Int, error)
return td, nil
}
-func ReadTdByHash(db kv.Getter, hash libcommon.Hash) (*big.Int, error) {
+func ReadTdByHash(db kv.Getter, hash common.Hash) (*big.Int, error) {
headNumber := ReadHeaderNumber(db, hash)
if headNumber == nil {
return nil, nil
@@ -752,7 +756,7 @@ func ReadTdByHash(db kv.Getter, hash libcommon.Hash) (*big.Int, error) {
}
// WriteTd stores the total difficulty of a block into the database.
-func WriteTd(db kv.Putter, hash libcommon.Hash, number uint64, td *big.Int) error {
+func WriteTd(db kv.Putter, hash common.Hash, number uint64, td *big.Int) error {
data, err := rlp.EncodeToBytes(td)
if err != nil {
return fmt.Errorf("failed to RLP encode block total difficulty: %w", err)
@@ -830,7 +834,7 @@ func ReadRawReceipts(db kv.Tx, blockNum uint64) types.Receipts {
// The current implementation populates these metadata fields by reading the receipts'
// corresponding block body, so if the block body is not found it will return nil even
// if the receipt itself is stored.
-func ReadReceipts(db kv.Tx, block *types.Block, senders []libcommon.Address) types.Receipts {
+func ReadReceipts(db kv.Tx, block *types.Block, senders []common.Address) types.Receipts {
if block == nil {
return nil
}
@@ -954,7 +958,7 @@ func ReceiptsAvailableFrom(tx kv.Tx) (uint64, error) {
//
// Note, due to concurrent download of header and block body the header and thus
// canonical hash can be stored in the database but the body data not (yet).
-func ReadBlock(tx kv.Getter, hash libcommon.Hash, number uint64) *types.Block {
+func ReadBlock(tx kv.Getter, hash common.Hash, number uint64) *types.Block {
header := ReadHeader(tx, hash, number)
if header == nil {
return nil
@@ -968,12 +972,12 @@ func ReadBlock(tx kv.Getter, hash libcommon.Hash, number uint64) *types.Block {
// HasBlock - is more efficient than ReadBlock because doesn't read transactions.
// It's is not equivalent of HasHeader because headers and bodies written by different stages
-func HasBlock(db kv.Getter, hash libcommon.Hash, number uint64) bool {
+func HasBlock(db kv.Getter, hash common.Hash, number uint64) bool {
body := ReadStorageBodyRLP(db, hash, number)
return len(body) > 0
}
-func ReadBlockWithSenders(db kv.Getter, hash libcommon.Hash, number uint64) (*types.Block, []libcommon.Address, error) {
+func ReadBlockWithSenders(db kv.Getter, hash common.Hash, number uint64) (*types.Block, []common.Address, error) {
block := ReadBlock(db, hash, number)
if block == nil {
return nil, nil, nil
@@ -1051,7 +1055,7 @@ func PruneBlocks(tx kv.RwTx, blockTo uint64, blocksDeleteLimit int) error {
}
// Copying k because otherwise the same memory will be reused
// for the next key and Delete below will end up deleting 1 more record than required
- kCopy := common.CopyBytes(k)
+ kCopy := common.Copy(k)
if err = tx.Delete(kv.Senders, kCopy); err != nil {
return err
}
@@ -1066,6 +1070,74 @@ func PruneBlocks(tx kv.RwTx, blockTo uint64, blocksDeleteLimit int) error {
return nil
}
+// PruneBorBlocks - delete [1, to) old blocks after moving it to snapshots.
+// keeps genesis in db: [1, to)
+// doesn't change sequences of kv.EthTx and kv.NonCanonicalTxs
+// doesn't delete Receipts, Senders, Canonical markers, TotalDifficulty
+func PruneBorBlocks(tx kv.RwTx, blockTo uint64, blocksDeleteLimit int) error {
+ c, err := tx.Cursor(kv.BorEventNums)
+ if err != nil {
+ return err
+ }
+ defer c.Close()
+ var blockNumBytes [8]byte
+ binary.BigEndian.PutUint64(blockNumBytes[:], blockTo)
+ k, v, err := c.Seek(blockNumBytes[:])
+ if err != nil {
+ return err
+ }
+ var eventIdTo uint64 = math.MaxUint64
+ if k != nil {
+ eventIdTo = binary.BigEndian.Uint64(v)
+ }
+ c1, err := tx.RwCursor(kv.BorEvents)
+ if err != nil {
+ return err
+ }
+ defer c1.Close()
+ counter := blocksDeleteLimit
+ for k, _, err = c1.First(); err == nil && k != nil && counter > 0; k, _, err = c1.Next() {
+ eventId := binary.BigEndian.Uint64(k)
+ if eventId >= eventIdTo {
+ break
+ }
+ if err = c1.DeleteCurrent(); err != nil {
+ return err
+ }
+ counter--
+ }
+ if err != nil {
+ return err
+ }
+ var firstSpanToKeep uint64
+ if blockTo > zerothSpanEnd {
+ firstSpanToKeep = 1 + (blockTo-zerothSpanEnd-1)/spanLength
+ }
+ c2, err := tx.RwCursor(kv.BorSpans)
+ if err != nil {
+ return err
+ }
+ defer c2.Close()
+ counter = blocksDeleteLimit
+ for k, _, err := c2.First(); err == nil && k != nil && counter > 0; k, _, err = c2.Next() {
+ spanId := binary.BigEndian.Uint64(k)
+ if spanId >= firstSpanToKeep {
+ break
+ }
+ if err = c2.DeleteCurrent(); err != nil {
+ return err
+ }
+ counter--
+ }
+ return nil
+}
+
+func TruncateCanonicalChain(ctx context.Context, db kv.RwTx, from uint64) error {
+ return db.ForEach(kv.HeaderCanonical, hexutility.EncodeTs(from), func(k, _ []byte) error {
+ return db.Delete(kv.HeaderCanonical, k)
+ })
+}
+
// TruncateBlocks - delete block >= blockFrom
// does decrement sequences of kv.EthTx and kv.NonCanonicalTxs
// doesn't delete Receipts, Senders, Canonical markers, TotalDifficulty
@@ -1091,7 +1163,7 @@ func TruncateBlocks(ctx context.Context, tx kv.RwTx, blockFrom uint64) error {
}
// Copying k because otherwise the same memory will be reused
// for the next key and Delete below will end up deleting 1 more record than required
- kCopy := common.CopyBytes(k)
+ kCopy := common.Copy(k)
if err := tx.Delete(kv.Senders, kCopy); err != nil {
return err
}
@@ -1106,7 +1178,7 @@ func TruncateBlocks(ctx context.Context, tx kv.RwTx, blockFrom uint64) error {
case <-ctx.Done():
return ctx.Err()
case <-logEvery.C:
- log.Info("TruncateBlocks", "block", binary.BigEndian.Uint64(k))
+ log.Info("TruncateBlocks", "block", binary.BigEndian.Uint64(kCopy))
default:
}
return nil
@@ -1144,14 +1216,14 @@ func ReadHeaderByNumber(db kv.Getter, number uint64) *types.Header {
log.Error("ReadCanonicalHash failed", "err", err)
return nil
}
- if hash == (libcommon.Hash{}) {
+ if hash == (common.Hash{}) {
return nil
}
return ReadHeader(db, hash, number)
}
-func ReadHeaderByHash(db kv.Getter, hash libcommon.Hash) (*types.Header, error) {
+func ReadHeaderByHash(db kv.Getter, hash common.Hash) (*types.Header, error) {
number := ReadHeaderNumber(db, hash)
if number == nil {
return nil, nil
@@ -1169,54 +1241,54 @@ func DeleteNewerEpochs(tx kv.RwTx, number uint64) error {
return tx.Delete(kv.Epoch, k)
})
}
-func ReadEpoch(tx kv.Tx, blockNum uint64, blockHash libcommon.Hash) (transitionProof []byte, err error) {
+func ReadEpoch(tx kv.Tx, blockNum uint64, blockHash common.Hash) (transitionProof []byte, err error) {
k := make([]byte, dbutils.NumberLength+length.Hash)
binary.BigEndian.PutUint64(k, blockNum)
copy(k[dbutils.NumberLength:], blockHash[:])
return tx.GetOne(kv.Epoch, k)
}
-func FindEpochBeforeOrEqualNumber(tx kv.Tx, n uint64) (blockNum uint64, blockHash libcommon.Hash, transitionProof []byte, err error) {
+func FindEpochBeforeOrEqualNumber(tx kv.Tx, n uint64) (blockNum uint64, blockHash common.Hash, transitionProof []byte, err error) {
c, err := tx.Cursor(kv.Epoch)
if err != nil {
- return 0, libcommon.Hash{}, nil, err
+ return 0, common.Hash{}, nil, err
}
defer c.Close()
seek := hexutility.EncodeTs(n)
k, v, err := c.Seek(seek)
if err != nil {
- return 0, libcommon.Hash{}, nil, err
+ return 0, common.Hash{}, nil, err
}
if k != nil {
num := binary.BigEndian.Uint64(k)
if num == n {
- return n, libcommon.BytesToHash(k[dbutils.NumberLength:]), v, nil
+ return n, common.BytesToHash(k[dbutils.NumberLength:]), v, nil
}
}
k, v, err = c.Prev()
if err != nil {
- return 0, libcommon.Hash{}, nil, err
+ return 0, common.Hash{}, nil, err
}
if k == nil {
- return 0, libcommon.Hash{}, nil, nil
+ return 0, common.Hash{}, nil, nil
}
- return binary.BigEndian.Uint64(k), libcommon.BytesToHash(k[dbutils.NumberLength:]), v, nil
+ return binary.BigEndian.Uint64(k), common.BytesToHash(k[dbutils.NumberLength:]), v, nil
}
-func WriteEpoch(tx kv.RwTx, blockNum uint64, blockHash libcommon.Hash, transitionProof []byte) (err error) {
+func WriteEpoch(tx kv.RwTx, blockNum uint64, blockHash common.Hash, transitionProof []byte) (err error) {
k := make([]byte, dbutils.NumberLength+length.Hash)
binary.BigEndian.PutUint64(k, blockNum)
copy(k[dbutils.NumberLength:], blockHash[:])
return tx.Put(kv.Epoch, k, transitionProof)
}
-func ReadPendingEpoch(tx kv.Tx, blockNum uint64, blockHash libcommon.Hash) (transitionProof []byte, err error) {
+func ReadPendingEpoch(tx kv.Tx, blockNum uint64, blockHash common.Hash) (transitionProof []byte, err error) {
k := make([]byte, 8+32)
binary.BigEndian.PutUint64(k, blockNum)
copy(k[8:], blockHash[:])
return tx.GetOne(kv.PendingEpoch, k)
}
-func WritePendingEpoch(tx kv.RwTx, blockNum uint64, blockHash libcommon.Hash, transitionProof []byte) (err error) {
+func WritePendingEpoch(tx kv.RwTx, blockNum uint64, blockHash common.Hash, transitionProof []byte) (err error) {
k := make([]byte, 8+32)
binary.BigEndian.PutUint64(k, blockNum)
copy(k[8:], blockHash[:])
@@ -1229,7 +1301,7 @@ func Transitioned(db kv.Getter, blockNum uint64, terminalTotalDifficulty *big.In
return false, nil
}
- if terminalTotalDifficulty.Cmp(libcommon.Big0) == 0 {
+ if terminalTotalDifficulty.Cmp(common.Big0) == 0 {
return true, nil
}
header := ReadHeaderByNumber(db, blockNum)
@@ -1237,7 +1309,7 @@ func Transitioned(db kv.Getter, blockNum uint64, terminalTotalDifficulty *big.In
return false, nil
}
- if header.Difficulty.Cmp(libcommon.Big0) == 0 {
+ if header.Difficulty.Cmp(common.Big0) == 0 {
return true, nil
}
@@ -1250,7 +1322,7 @@ func Transitioned(db kv.Getter, blockNum uint64, terminalTotalDifficulty *big.In
}
// IsPosBlock returns true if the block number comes after POS transition or is the last POW block
-func IsPosBlock(db kv.Getter, blockHash libcommon.Hash) (trans bool, err error) {
+func IsPosBlock(db kv.Getter, blockHash common.Hash) (trans bool, err error) {
header, err := ReadHeaderByHash(db, blockHash)
if err != nil {
return false, err
@@ -1259,7 +1331,7 @@ func IsPosBlock(db kv.Getter, blockHash libcommon.Hash) (trans bool, err error)
return false, nil
}
- return header.Difficulty.Cmp(libcommon.Big0) == 0, nil
+ return header.Difficulty.Cmp(common.Big0) == 0, nil
}
var SnapshotsKey = []byte("snapshots")
@@ -1324,7 +1396,7 @@ func PruneTable(tx kv.RwTx, table string, pruneTo uint64, ctx context.Context, l
}
select {
case <-ctx.Done():
- return common2.ErrStopped
+ return common.ErrStopped
default:
}
if err = c.DeleteCurrent(); err != nil {
@@ -1353,7 +1425,7 @@ func PruneTableDupSort(tx kv.RwTx, table string, logPrefix string, pruneTo uint6
case <-logEvery.C:
log.Info(fmt.Sprintf("[%s]", logPrefix), "table", table, "block", blockNum)
case <-ctx.Done():
- return common2.ErrStopped
+ return common.ErrStopped
default:
}
if err = tx.Delete(table, k); err != nil {
@@ -1363,22 +1435,22 @@ func PruneTableDupSort(tx kv.RwTx, table string, logPrefix string, pruneTo uint6
return nil
}
-func ReadVerkleRoot(tx kv.Tx, blockNum uint64) (libcommon.Hash, error) {
+func ReadVerkleRoot(tx kv.Tx, blockNum uint64) (common.Hash, error) {
root, err := tx.GetOne(kv.VerkleRoots, hexutility.EncodeTs(blockNum))
if err != nil {
- return libcommon.Hash{}, err
+ return common.Hash{}, err
}
- return libcommon.BytesToHash(root), nil
+ return common.BytesToHash(root), nil
}
-func WriteVerkleRoot(tx kv.RwTx, blockNum uint64, root libcommon.Hash) error {
+func WriteVerkleRoot(tx kv.RwTx, blockNum uint64, root common.Hash) error {
return tx.Put(kv.VerkleRoots, hexutility.EncodeTs(blockNum), root[:])
}
func WriteVerkleNode(tx kv.RwTx, node verkle.VerkleNode) error {
var (
- root libcommon.Hash
+ root common.Hash
encoded []byte
err error
)
@@ -1391,7 +1463,7 @@ func WriteVerkleNode(tx kv.RwTx, node verkle.VerkleNode) error {
return tx.Put(kv.VerkleTrie, root[:], encoded)
}
-func ReadVerkleNode(tx kv.RwTx, root libcommon.Hash) (verkle.VerkleNode, error) {
+func ReadVerkleNode(tx kv.RwTx, root common.Hash) (verkle.VerkleNode, error) {
encoded, err := tx.GetOne(kv.VerkleTrie, root[:])
if err != nil {
return nil, err
diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go
index a317d7b133e..9b6e9a84e77 100644
--- a/core/rawdb/accessors_chain_test.go
+++ b/core/rawdb/accessors_chain_test.go
@@ -41,6 +41,7 @@ import (
// Tests block header storage and retrieval operations.
func TestHeaderStorage(t *testing.T) {
+ t.Parallel()
m := mock.Mock(t)
tx, err := m.DB.BeginRw(m.Ctx)
require.NoError(t, err)
@@ -81,6 +82,7 @@ func TestHeaderStorage(t *testing.T) {
// Tests block body storage and retrieval operations.
func TestBodyStorage(t *testing.T) {
+ t.Parallel()
m := mock.Mock(t)
tx, err := m.DB.BeginRw(m.Ctx)
require.NoError(t, err)
@@ -149,6 +151,7 @@ func TestBodyStorage(t *testing.T) {
// Tests block storage and retrieval operations.
func TestBlockStorage(t *testing.T) {
+ t.Parallel()
m := mock.Mock(t)
require := require.New(t)
tx, err := m.DB.BeginRw(m.Ctx)
@@ -259,6 +262,7 @@ func TestBlockStorage(t *testing.T) {
// Tests that partial block contents don't get reassembled into full blocks.
func TestPartialBlockStorage(t *testing.T) {
+ t.Parallel()
m := mock.Mock(t)
tx, err := m.DB.BeginRw(m.Ctx)
require.NoError(t, err)
@@ -305,6 +309,7 @@ func TestPartialBlockStorage(t *testing.T) {
// Tests block total difficulty storage and retrieval operations.
func TestTdStorage(t *testing.T) {
+ t.Parallel()
m := mock.Mock(t)
tx, err := m.DB.BeginRw(m.Ctx)
require.NoError(t, err)
@@ -349,6 +354,7 @@ func TestTdStorage(t *testing.T) {
// Tests that canonical numbers can be mapped to hashes and retrieved.
func TestCanonicalMappingStorage(t *testing.T) {
+ t.Parallel()
m := mock.Mock(t)
tx, err := m.DB.BeginRw(m.Ctx)
require.NoError(t, err)
@@ -394,6 +400,7 @@ func TestCanonicalMappingStorage(t *testing.T) {
// Tests that head headers and head blocks can be assigned, individually.
func TestHeadStorage2(t *testing.T) {
+ t.Parallel()
_, db := memdb.NewTestTx(t)
blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")})
@@ -421,6 +428,7 @@ func TestHeadStorage2(t *testing.T) {
// Tests that head headers and head blocks can be assigned, individually.
func TestHeadStorage(t *testing.T) {
+ t.Parallel()
m := mock.Mock(t)
tx, err := m.DB.BeginRw(m.Ctx)
require.NoError(t, err)
@@ -444,6 +452,7 @@ func TestHeadStorage(t *testing.T) {
// Tests that receipts associated with a single block can be stored and retrieved.
func TestBlockReceiptStorage(t *testing.T) {
+ t.Parallel()
m := mock.Mock(t)
tx, err := m.DB.BeginRw(m.Ctx)
require.NoError(t, err)
@@ -536,6 +545,7 @@ func TestBlockReceiptStorage(t *testing.T) {
// Tests block storage and retrieval operations with withdrawals.
func TestBlockWithdrawalsStorage(t *testing.T) {
+ t.Parallel()
m := mock.Mock(t)
require := require.New(t)
tx, err := m.DB.BeginRw(m.Ctx)
@@ -672,6 +682,7 @@ func TestBlockWithdrawalsStorage(t *testing.T) {
// Tests pre-shanghai body to make sure withdrawals doesn't panic
func TestPreShanghaiBodyNoPanicOnWithdrawals(t *testing.T) {
+ t.Parallel()
require := require.New(t)
const bodyRlp = "f902bef8bef85d0101019471562b71999873db5b286df957af199ec94617f701801ca023f4aad9a71341d2990012a732366c3bc8a4ce9ff54c05546a9487445ac67692a0290d3a1411c2a675a4c12c98af60e34ea4d689f0ddfe0250a9e09c0819dfe3bff85d0201029471562b71999873db5b286df957af199ec94617f701801ca0f824d7edc241758aca948ff34d3797e4e31003f76cc9e05fb9c19e967fc48113a070e1389f0fa23fe765a04b23e98f98db6d630e3a035c1c7c968142ababb85a1df901fbf901f8a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080808080808b7465737420686561646572a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"
@@ -686,6 +697,7 @@ func TestPreShanghaiBodyNoPanicOnWithdrawals(t *testing.T) {
// Tests pre-shanghai bodyForStorage to make sure withdrawals doesn't panic
func TestPreShanghaiBodyForStorageNoPanicOnWithdrawals(t *testing.T) {
+ t.Parallel()
require := require.New(t)
const bodyForStorageRlp = "c38002c0"
@@ -700,6 +712,7 @@ func TestPreShanghaiBodyForStorageNoPanicOnWithdrawals(t *testing.T) {
// Tests shanghai bodyForStorage to make sure withdrawals are present
func TestShanghaiBodyForStorageHasWithdrawals(t *testing.T) {
+ t.Parallel()
require := require.New(t)
const bodyForStorageRlp = "f83f8002c0f83adc0f82157c94ff000000000000000000000000000000000000008203e8dc1082157d94ff000000000000000000000000000000000000008203e9"
@@ -715,6 +728,7 @@ func TestShanghaiBodyForStorageHasWithdrawals(t *testing.T) {
// Tests shanghai bodyForStorage to make sure when no withdrawals the slice is empty (not nil)
func TestShanghaiBodyForStorageNoWithdrawals(t *testing.T) {
+ t.Parallel()
require := require.New(t)
const bodyForStorageRlp = "c48002c0c0c0"
diff --git a/core/rawdb/accessors_indexes_test.go b/core/rawdb/accessors_indexes_test.go
index dc7283bd653..6901a6c5eb3 100644
--- a/core/rawdb/accessors_indexes_test.go
+++ b/core/rawdb/accessors_indexes_test.go
@@ -34,6 +34,7 @@ import (
// Tests that positional lookup metadata can be stored and retrieved.
func TestLookupStorage(t *testing.T) {
+ t.Parallel()
tests := []struct {
name string
writeTxLookupEntries func(kv.Putter, *types.Block)
@@ -48,7 +49,9 @@ func TestLookupStorage(t *testing.T) {
}
for _, tc := range tests {
+ tc := tc
t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
m := mock.Mock(t)
br := m.BlockReader
tx, err := m.DB.BeginRw(m.Ctx)
diff --git a/core/rawdb/blockio/block_writer.go b/core/rawdb/blockio/block_writer.go
index a4a9e7702f0..73264cda1fa 100644
--- a/core/rawdb/blockio/block_writer.go
+++ b/core/rawdb/blockio/block_writer.go
@@ -3,13 +3,13 @@ package blockio
import (
"context"
"encoding/binary"
+ "github.com/ledgerwatch/erigon-lib/kv/dbutils"
"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
"github.com/ledgerwatch/erigon-lib/etl"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/rawdbv3"
- "github.com/ledgerwatch/erigon/common/dbutils"
"github.com/ledgerwatch/erigon/core/rawdb"
"github.com/ledgerwatch/erigon/turbo/backup"
"github.com/ledgerwatch/log/v3"
@@ -109,3 +109,11 @@ func (w *BlockWriter) TruncateBodies(db kv.RoDB, tx kv.RwTx, from uint64) error
func (w *BlockWriter) PruneBlocks(ctx context.Context, tx kv.RwTx, blockTo uint64, blocksDeleteLimit int) error {
return rawdb.PruneBlocks(tx, blockTo, blocksDeleteLimit)
}
+
+// PruneBorBlocks - [1, to) old blocks after moving it to snapshots.
+// keeps genesis in db
+// doesn't change sequences of kv.EthTx and kv.NonCanonicalTxs
+// doesn't delete Receipts, Senders, Canonical markers, TotalDifficulty
+func (w *BlockWriter) PruneBorBlocks(ctx context.Context, tx kv.RwTx, blockTo uint64, blocksDeleteLimit int) error {
+ return rawdb.PruneBorBlocks(tx, blockTo, blocksDeleteLimit)
+}
diff --git a/core/rawdb/bor_receipts.go b/core/rawdb/bor_receipts.go
index 35027124ee3..d05c25f1a44 100644
--- a/core/rawdb/bor_receipts.go
+++ b/core/rawdb/bor_receipts.go
@@ -4,6 +4,7 @@ import (
"bytes"
"errors"
"fmt"
+ "github.com/ledgerwatch/erigon-lib/kv/dbutils"
"math/big"
libcommon "github.com/ledgerwatch/erigon-lib/common"
@@ -11,7 +12,6 @@ import (
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/log/v3"
- "github.com/ledgerwatch/erigon/common/dbutils"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/ethdb/cbor"
"github.com/ledgerwatch/erigon/rlp"
@@ -164,7 +164,7 @@ func ReadBorTransaction(db kv.Tx, borTxHash libcommon.Hash) (types.Transaction,
return borTx, err
}
-func ReadBorTxLookupEntry(db kv.Tx, borTxHash libcommon.Hash) (*uint64, error) {
+func ReadBorTxLookupEntry(db kv.Getter, borTxHash libcommon.Hash) (*uint64, error) {
blockNumBytes, err := db.GetOne(kv.BorTxLookup, borTxHash.Bytes())
if err != nil {
return nil, err
diff --git a/core/rawdb/rawdbreset/reset_stages.go b/core/rawdb/rawdbreset/reset_stages.go
index 93938e78e90..cb5e238b8c8 100644
--- a/core/rawdb/rawdbreset/reset_stages.go
+++ b/core/rawdb/rawdbreset/reset_stages.go
@@ -98,6 +98,18 @@ func ResetBlocks(tx kv.RwTx, db kv.RoDB, agg *state.AggregatorV3,
return nil
}
+func ResetBorHeimdall(ctx context.Context, tx kv.RwTx) error {
+ if err := tx.ClearBucket(kv.BorEventNums); err != nil {
+ return err
+ }
+ if err := tx.ClearBucket(kv.BorEvents); err != nil {
+ return err
+ }
+ if err := tx.ClearBucket(kv.BorSpans); err != nil {
+ return err
+ }
+ return clearStageProgress(tx, stages.BorHeimdall)
+}
func ResetSenders(ctx context.Context, db kv.RwDB, tx kv.RwTx) error {
if err := backup.ClearTables(ctx, db, tx, kv.Senders); err != nil {
return nil
diff --git a/core/rlp_test.go b/core/rlp_test.go
index 8ef1c13ba83..65b98dbff43 100644
--- a/core/rlp_test.go
+++ b/core/rlp_test.go
@@ -73,6 +73,7 @@ func getBlock(tb testing.TB, transactions int, uncles int, dataSize int, tmpDir
// TestRlpIterator tests that individual transactions can be picked out
// from blocks without full unmarshalling/marshalling
func TestRlpIterator(t *testing.T) {
+ t.Parallel()
for _, tt := range []struct {
txs int
uncles int
diff --git a/core/skip_analysis.go b/core/skip_analysis.go
index 96c9226bdd9..55d38fc15ec 100644
--- a/core/skip_analysis.go
+++ b/core/skip_analysis.go
@@ -20,8 +20,7 @@ import (
"sort"
"github.com/ledgerwatch/erigon-lib/chain"
-
- "github.com/ledgerwatch/erigon/params/networkname"
+ "github.com/ledgerwatch/erigon-lib/chain/networkname"
)
// SkipAnalysis function tells us whether we can skip performing jumpdest analysis
diff --git a/core/state/access_list_test.go b/core/state/access_list_test.go
index 03ad2511203..10b6dab49f9 100644
--- a/core/state/access_list_test.go
+++ b/core/state/access_list_test.go
@@ -63,6 +63,7 @@ func verifySlots(t *testing.T, s *IntraBlockState, addrString string, slotString
}
func TestAccessList(t *testing.T) {
+ t.Parallel()
// Some helpers
addr := common.HexToAddress
slot := common.HexToHash
diff --git a/core/state/cached_reader2.go b/core/state/cached_reader2.go
index 58e63b4620f..915544319c8 100644
--- a/core/state/cached_reader2.go
+++ b/core/state/cached_reader2.go
@@ -3,12 +3,12 @@ package state
import (
"bytes"
"encoding/binary"
+ "github.com/ledgerwatch/erigon-lib/kv/dbutils"
"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/kvcache"
- "github.com/ledgerwatch/erigon/common/dbutils"
"github.com/ledgerwatch/erigon/core/types/accounts"
)
diff --git a/core/state/change_set_writer.go b/core/state/change_set_writer.go
index b3da878448f..59de3a85f86 100644
--- a/core/state/change_set_writer.go
+++ b/core/state/change_set_writer.go
@@ -4,13 +4,13 @@ import (
"fmt"
"github.com/holiman/uint256"
+
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
"github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/dbutils"
historyv22 "github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2"
- "github.com/ledgerwatch/erigon/common"
- "github.com/ledgerwatch/erigon/common/dbutils"
"github.com/ledgerwatch/erigon/core/types/accounts"
)
@@ -43,7 +43,7 @@ func NewChangeSetWriterPlain(db kv.RwTx, blockNumber uint64) *ChangeSetWriter {
func (w *ChangeSetWriter) GetAccountChanges() (*historyv22.ChangeSet, error) {
cs := historyv22.NewAccountChangeSet()
for address, val := range w.accountChanges {
- if err := cs.Add(common.CopyBytes(address[:]), val); err != nil {
+ if err := cs.Add(libcommon.CopyBytes(address[:]), val); err != nil {
return nil, err
}
}
@@ -71,6 +71,8 @@ func accountsEqual(a1, a2 *accounts.Account) bool {
return false
} else if a1.Balance.Cmp(&a2.Balance) != 0 {
return false
+ } else if a1.Incarnation != a2.Incarnation {
+ return false
}
if a1.IsEmptyCodeHash() {
if !a2.IsEmptyCodeHash() {
diff --git a/core/state/database_test.go b/core/state/database_test.go
index ce50eb03bcd..534605ec278 100644
--- a/core/state/database_test.go
+++ b/core/state/database_test.go
@@ -143,7 +143,7 @@ func TestCreate2Revive(t *testing.T) {
require.NoError(t, err)
// BLOCK 1
- if err = m.InsertChain(chain.Slice(0, 1), nil); err != nil {
+ if err = m.InsertChain(chain.Slice(0, 1)); err != nil {
t.Fatal(err)
}
@@ -157,7 +157,7 @@ func TestCreate2Revive(t *testing.T) {
require.NoError(t, err)
// BLOCK 2
- if err = m.InsertChain(chain.Slice(1, 2), nil); err != nil {
+ if err = m.InsertChain(chain.Slice(1, 2)); err != nil {
t.Fatal(err)
}
@@ -179,7 +179,7 @@ func TestCreate2Revive(t *testing.T) {
require.NoError(t, err)
// BLOCK 3
- if err = m.InsertChain(chain.Slice(2, 3), nil); err != nil {
+ if err = m.InsertChain(chain.Slice(2, 3)); err != nil {
t.Fatal(err)
}
err = m.DB.View(context.Background(), func(tx kv.Tx) error {
@@ -192,7 +192,7 @@ func TestCreate2Revive(t *testing.T) {
require.NoError(t, err)
// BLOCK 4
- if err = m.InsertChain(chain.Slice(3, 4), nil); err != nil {
+ if err = m.InsertChain(chain.Slice(3, 4)); err != nil {
t.Fatal(err)
}
err = m.DB.View(context.Background(), func(tx kv.Tx) error {
@@ -349,7 +349,7 @@ func TestCreate2Polymorth(t *testing.T) {
require.NoError(t, err)
// BLOCK 1
- if err = m.InsertChain(chain.Slice(0, 1), nil); err != nil {
+ if err = m.InsertChain(chain.Slice(0, 1)); err != nil {
t.Fatal(err)
}
@@ -364,7 +364,7 @@ func TestCreate2Polymorth(t *testing.T) {
require.NoError(t, err)
// BLOCK 2
- if err = m.InsertChain(chain.Slice(1, 2), nil); err != nil {
+ if err = m.InsertChain(chain.Slice(1, 2)); err != nil {
t.Fatal(err)
}
@@ -384,7 +384,7 @@ func TestCreate2Polymorth(t *testing.T) {
require.NoError(t, err)
// BLOCK 3
- if err = m.InsertChain(chain.Slice(2, 3), nil); err != nil {
+ if err = m.InsertChain(chain.Slice(2, 3)); err != nil {
t.Fatal(err)
}
err = m.DB.View(context.Background(), func(tx kv.Tx) error {
@@ -397,7 +397,7 @@ func TestCreate2Polymorth(t *testing.T) {
require.NoError(t, err)
// BLOCK 4
- if err = m.InsertChain(chain.Slice(3, 4), nil); err != nil {
+ if err = m.InsertChain(chain.Slice(3, 4)); err != nil {
t.Fatal(err)
}
err = m.DB.View(context.Background(), func(tx kv.Tx) error {
@@ -417,7 +417,7 @@ func TestCreate2Polymorth(t *testing.T) {
require.NoError(t, err)
// BLOCK 5
- if err = m.InsertChain(chain.Slice(4, 5), nil); err != nil {
+ if err = m.InsertChain(chain.Slice(4, 5)); err != nil {
t.Fatal(err)
}
err = m.DB.View(context.Background(), func(tx kv.Tx) error {
@@ -438,6 +438,7 @@ func TestCreate2Polymorth(t *testing.T) {
}
func TestReorgOverSelfDestruct(t *testing.T) {
+ t.Parallel()
// Configure and generate a sample block chain
var (
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
@@ -534,7 +535,7 @@ func TestReorgOverSelfDestruct(t *testing.T) {
})
require.NoError(t, err)
// BLOCK 1
- if err = m.InsertChain(chain.Slice(0, 1), nil); err != nil {
+ if err = m.InsertChain(chain.Slice(0, 1)); err != nil {
t.Fatal(err)
}
@@ -553,7 +554,7 @@ func TestReorgOverSelfDestruct(t *testing.T) {
require.NoError(t, err)
// BLOCKS 2 + 3
- if err = m.InsertChain(chain.Slice(1, chain.Length()), nil); err != nil {
+ if err = m.InsertChain(chain.Slice(1, chain.Length())); err != nil {
t.Fatal(err)
}
@@ -567,7 +568,7 @@ func TestReorgOverSelfDestruct(t *testing.T) {
require.NoError(t, err)
// REORG of block 2 and 3, and insert new (empty) BLOCK 2, 3, and 4
- if err = m.InsertChain(longerChain.Slice(1, 4), nil); err != nil {
+ if err = m.InsertChain(longerChain.Slice(1, 4)); err != nil {
t.Fatal(err)
}
err = m.DB.View(context.Background(), func(tx kv.Tx) error {
@@ -586,6 +587,7 @@ func TestReorgOverSelfDestruct(t *testing.T) {
}
func TestReorgOverStateChange(t *testing.T) {
+ t.Parallel()
// Configure and generate a sample block chain
var (
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
@@ -675,7 +677,7 @@ func TestReorgOverStateChange(t *testing.T) {
require.NoError(t, err)
// BLOCK 1
- if err = m.InsertChain(chain.Slice(0, 1), nil); err != nil {
+ if err = m.InsertChain(chain.Slice(0, 1)); err != nil {
t.Fatal(err)
}
@@ -694,12 +696,12 @@ func TestReorgOverStateChange(t *testing.T) {
require.NoError(t, err)
// BLOCK 2
- if err = m.InsertChain(chain.Slice(1, chain.Length()), nil); err != nil {
+ if err = m.InsertChain(chain.Slice(1, chain.Length())); err != nil {
t.Fatal(err)
}
// REORG of block 2 and 3, and insert new (empty) BLOCK 2, 3, and 4
- if err = m.InsertChain(longerChain.Slice(1, 3), nil); err != nil {
+ if err = m.InsertChain(longerChain.Slice(1, 3)); err != nil {
t.Fatal(err)
}
err = m.DB.View(context.Background(), func(tx kv.Tx) error {
@@ -734,6 +736,7 @@ func (b BucketsStats) Size() uint64 {
}
func TestCreateOnExistingStorage(t *testing.T) {
+ t.Parallel()
// Configure and generate a sample block chain
var (
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
@@ -802,7 +805,7 @@ func TestCreateOnExistingStorage(t *testing.T) {
require.NoError(t, err)
// BLOCK 1
- if err = m.InsertChain(chain.Slice(0, 1), nil); err != nil {
+ if err = m.InsertChain(chain.Slice(0, 1)); err != nil {
t.Fatal(err)
}
@@ -824,6 +827,7 @@ func TestCreateOnExistingStorage(t *testing.T) {
}
func TestReproduceCrash(t *testing.T) {
+ t.Parallel()
// This example was taken from Ropsten contract that used to cause a crash
// it is created in the block 598915 and then there are 3 transactions modifying
// its storage in the same block:
@@ -865,6 +869,7 @@ func TestReproduceCrash(t *testing.T) {
}
}
func TestEip2200Gas(t *testing.T) {
+ t.Parallel()
// Configure and generate a sample block chain
var (
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
@@ -938,7 +943,7 @@ func TestEip2200Gas(t *testing.T) {
require.NoError(t, err)
// BLOCK 1
- if err = m.InsertChain(chain.Slice(0, 1), nil); err != nil {
+ if err = m.InsertChain(chain.Slice(0, 1)); err != nil {
t.Fatal(err)
}
@@ -960,6 +965,7 @@ func TestEip2200Gas(t *testing.T) {
// Create contract, drop trie, reload trie from disk and add block with contract call
func TestWrongIncarnation(t *testing.T) {
+ t.Parallel()
// Configure and generate a sample block chain
var (
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
@@ -1024,7 +1030,7 @@ func TestWrongIncarnation(t *testing.T) {
require.NoError(t, err)
// BLOCK 1
- if err = m.InsertChain(chain.Slice(0, 1), nil); err != nil {
+ if err = m.InsertChain(chain.Slice(0, 1)); err != nil {
t.Fatal(err)
}
@@ -1051,7 +1057,7 @@ func TestWrongIncarnation(t *testing.T) {
require.NoError(t, err)
// BLOCKS 2
- if err = m.InsertChain(chain.Slice(1, 2), nil); err != nil {
+ if err = m.InsertChain(chain.Slice(1, 2)); err != nil {
t.Fatal(err)
}
err = m.DB.View(context.Background(), func(tx kv.Tx) error {
@@ -1072,6 +1078,7 @@ func TestWrongIncarnation(t *testing.T) {
// create acc, deploy to it contract, reorg to state without contract
func TestWrongIncarnation2(t *testing.T) {
+ t.Parallel()
// Configure and generate a sample block chain
var (
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
@@ -1169,12 +1176,12 @@ func TestWrongIncarnation2(t *testing.T) {
require.NoError(t, err)
// BLOCK 1
- if err = m.InsertChain(chain.Slice(0, 1), nil); err != nil {
+ if err = m.InsertChain(chain.Slice(0, 1)); err != nil {
t.Fatal(err)
}
// BLOCKS 2
- if err = m.InsertChain(chain.Slice(1, chain.Length()), nil); err != nil {
+ if err = m.InsertChain(chain.Slice(1, chain.Length())); err != nil {
t.Fatal(err)
}
@@ -1199,7 +1206,7 @@ func TestWrongIncarnation2(t *testing.T) {
})
require.NoError(t, err)
// REORG of block 2 and 3, and insert new (empty) BLOCK 2, 3, and 4
- if err = m.InsertChain(longerChain.Slice(1, longerChain.Length()), nil); err != nil {
+ if err = m.InsertChain(longerChain.Slice(1, longerChain.Length())); err != nil {
t.Fatal(err)
}
@@ -1221,6 +1228,7 @@ func TestWrongIncarnation2(t *testing.T) {
}
func TestChangeAccountCodeBetweenBlocks(t *testing.T) {
+ t.Parallel()
contract := libcommon.HexToAddress("0x71dd1027069078091B3ca48093B00E4735B20624")
_, tx := memdb.NewTestTx(t)
@@ -1258,6 +1266,7 @@ func TestChangeAccountCodeBetweenBlocks(t *testing.T) {
// TestCacheCodeSizeSeparately makes sure that we don't store CodeNodes for code sizes
func TestCacheCodeSizeSeparately(t *testing.T) {
+ t.Parallel()
contract := libcommon.HexToAddress("0x71dd1027069078091B3ca48093B00E4735B20624")
//root := libcommon.HexToHash("0xb939e5bcf5809adfb87ab07f0795b05b95a1d64a90f0eddd0c3123ac5b433854")
@@ -1290,6 +1299,7 @@ func TestCacheCodeSizeSeparately(t *testing.T) {
// TestCacheCodeSizeInTrie makes sure that we dont just read from the DB all the time
func TestCacheCodeSizeInTrie(t *testing.T) {
+ t.Parallel()
t.Skip("switch to TG state readers/writers")
contract := libcommon.HexToAddress("0x71dd1027069078091B3ca48093B00E4735B20624")
root := libcommon.HexToHash("0xb939e5bcf5809adfb87ab07f0795b05b95a1d64a90f0eddd0c3123ac5b433854")
@@ -1332,6 +1342,7 @@ func TestCacheCodeSizeInTrie(t *testing.T) {
}
func TestRecreateAndRewind(t *testing.T) {
+ t.Parallel()
// Configure and generate a sample block chain
var (
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
@@ -1369,7 +1380,7 @@ func TestRecreateAndRewind(t *testing.T) {
case 1:
// Calculate the address of the Phoenix and create handle to phoenix contract
var codeHash libcommon.Hash
- if codeHash, err = common.HashData(common.FromHex(contracts.PhoenixBin)); err != nil {
+ if codeHash, err = libcommon.HashData(common.FromHex(contracts.PhoenixBin)); err != nil {
panic(err)
}
phoenixAddress = crypto.CreateAddress2(reviveAddress, [32]byte{}, codeHash.Bytes())
@@ -1431,7 +1442,7 @@ func TestRecreateAndRewind(t *testing.T) {
case 1:
// Calculate the address of the Phoenix and create handle to phoenix contract
var codeHash libcommon.Hash
- if codeHash, err = common.HashData(common.FromHex(contracts.PhoenixBin)); err != nil {
+ if codeHash, err = libcommon.HashData(common.FromHex(contracts.PhoenixBin)); err != nil {
panic(err)
}
phoenixAddress = crypto.CreateAddress2(reviveAddress, [32]byte{}, codeHash.Bytes())
@@ -1472,7 +1483,7 @@ func TestRecreateAndRewind(t *testing.T) {
}
// BLOCKS 1 and 2
- if err = m.InsertChain(chain.Slice(0, 2), nil); err != nil {
+ if err = m.InsertChain(chain.Slice(0, 2)); err != nil {
t.Fatal(err)
}
@@ -1493,7 +1504,7 @@ func TestRecreateAndRewind(t *testing.T) {
require.NoError(t, err)
// Block 3 and 4
- if err = m.InsertChain(chain.Slice(2, chain.Length()), nil); err != nil {
+ if err = m.InsertChain(chain.Slice(2, chain.Length())); err != nil {
t.Fatal(err)
}
err = m.DB.View(context.Background(), func(tx kv.Tx) error {
@@ -1512,7 +1523,7 @@ func TestRecreateAndRewind(t *testing.T) {
require.NoError(t, err)
// Reorg
- if err = m.InsertChain(longerChain, nil); err != nil {
+ if err = m.InsertChain(longerChain); err != nil {
t.Fatal(err)
}
err = m.DB.View(context.Background(), func(tx kv.Tx) error {
@@ -1531,6 +1542,7 @@ func TestRecreateAndRewind(t *testing.T) {
}
func TestTxLookupUnwind(t *testing.T) {
+ t.Parallel()
var (
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
address = crypto.PubkeyToAddress(key.PublicKey)
@@ -1572,10 +1584,10 @@ func TestTxLookupUnwind(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err = m.InsertChain(chain1, nil); err != nil {
+ if err = m.InsertChain(chain1); err != nil {
t.Fatal(err)
}
- if err = m.InsertChain(chain2, nil); err != nil {
+ if err = m.InsertChain(chain2); err != nil {
t.Fatal(err)
}
var count uint64
diff --git a/core/state/db_state_reader.go b/core/state/db_state_reader.go
index 21f91d5e981..11ba22559b8 100644
--- a/core/state/db_state_reader.go
+++ b/core/state/db_state_reader.go
@@ -3,13 +3,12 @@ package state
import (
"bytes"
"encoding/binary"
+ "github.com/ledgerwatch/erigon-lib/kv/dbutils"
"github.com/VictoriaMetrics/fastcache"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/kv"
- "github.com/ledgerwatch/erigon/common"
- "github.com/ledgerwatch/erigon/common/dbutils"
"github.com/ledgerwatch/erigon/core/types/accounts"
)
@@ -52,7 +51,7 @@ func (dbr *DbStateReader) ReadAccountData(address libcommon.Address) (*accounts.
}
if !ok {
var err error
- if addrHash, err1 := common.HashData(address[:]); err1 == nil {
+ if addrHash, err1 := libcommon.HashData(address[:]); err1 == nil {
enc, err = dbr.db.GetOne(kv.HashedAccounts, addrHash[:])
} else {
return nil, err1
@@ -75,11 +74,11 @@ func (dbr *DbStateReader) ReadAccountData(address libcommon.Address) (*accounts.
}
func (dbr *DbStateReader) ReadAccountStorage(address libcommon.Address, incarnation uint64, key *libcommon.Hash) ([]byte, error) {
- addrHash, err := common.HashData(address[:])
+ addrHash, err := libcommon.HashData(address[:])
if err != nil {
return nil, err
}
- seckey, err1 := common.HashData(key[:])
+ seckey, err1 := libcommon.HashData(key[:])
if err1 != nil {
return nil, err1
}
diff --git a/core/state/db_state_writer.go b/core/state/db_state_writer.go
index 0853e902062..33c3c762091 100644
--- a/core/state/db_state_writer.go
+++ b/core/state/db_state_writer.go
@@ -7,13 +7,13 @@ import (
"github.com/RoaringBitmap/roaring/roaring64"
"github.com/holiman/uint256"
+
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/bitmapdb"
+ dbutils2 "github.com/ledgerwatch/erigon-lib/kv/dbutils"
"github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2"
- "github.com/ledgerwatch/erigon/common"
- "github.com/ledgerwatch/erigon/common/dbutils"
"github.com/ledgerwatch/erigon/common/math"
"github.com/ledgerwatch/erigon/core/types/accounts"
"github.com/ledgerwatch/erigon/ethdb"
@@ -66,7 +66,7 @@ func (dsw *DbStateWriter) UpdateAccountData(address libcommon.Address, original,
if err := dsw.csw.UpdateAccountData(address, original, account); err != nil {
return err
}
- addrHash, err := common.HashData(address[:])
+ addrHash, err := libcommon.HashData(address[:])
if err != nil {
return err
}
@@ -75,6 +75,15 @@ func (dsw *DbStateWriter) UpdateAccountData(address libcommon.Address, original,
if err := dsw.db.Put(kv.HashedAccounts, addrHash[:], value); err != nil {
return err
}
+
+ if account.Incarnation == 0 && original.Incarnation > 0 {
+ var b [8]byte
+ binary.BigEndian.PutUint64(b[:], original.Incarnation)
+ if err := dsw.db.Put(kv.IncarnationMap, address[:], b[:]); err != nil {
+ return err
+ }
+ }
+
return nil
}
@@ -82,7 +91,7 @@ func (dsw *DbStateWriter) DeleteAccount(address libcommon.Address, original *acc
if err := dsw.csw.DeleteAccount(address, original); err != nil {
return err
}
- addrHash, err := common.HashData(address[:])
+ addrHash, err := libcommon.HashData(address[:])
if err != nil {
return err
}
@@ -107,12 +116,12 @@ func (dsw *DbStateWriter) UpdateAccountCode(address libcommon.Address, incarnati
if err := dsw.db.Put(kv.Code, codeHash[:], code); err != nil {
return err
}
- addrHash, err := common.HashData(address.Bytes())
+ addrHash, err := libcommon.HashData(address.Bytes())
if err != nil {
return err
}
//save contract to codeHash mapping
- if err := dsw.db.Put(kv.ContractCode, dbutils.GenerateStoragePrefix(addrHash[:], incarnation), codeHash[:]); err != nil {
+ if err := dsw.db.Put(kv.ContractCode, dbutils2.GenerateStoragePrefix(addrHash[:], incarnation), codeHash[:]); err != nil {
return err
}
return nil
@@ -126,15 +135,15 @@ func (dsw *DbStateWriter) WriteAccountStorage(address libcommon.Address, incarna
if *original == *value {
return nil
}
- seckey, err := common.HashData(key[:])
+ seckey, err := libcommon.HashData(key[:])
if err != nil {
return err
}
- addrHash, err := common.HashData(address[:])
+ addrHash, err := libcommon.HashData(address[:])
if err != nil {
return err
}
- compositeKey := dbutils.GenerateCompositeStorageKey(addrHash, incarnation, seckey)
+ compositeKey := dbutils2.GenerateCompositeStorageKey(addrHash, incarnation, seckey)
v := value.Bytes()
if len(v) == 0 {
@@ -181,9 +190,9 @@ func (dsw *DbStateWriter) WriteHistory() error {
func writeIndex(blocknum uint64, changes *historyv2.ChangeSet, bucket string, changeDb kv.RwTx) error {
buf := bytes.NewBuffer(nil)
for _, change := range changes.Changes {
- k := dbutils.CompositeKeyWithoutIncarnation(change.Key)
+ k := dbutils2.CompositeKeyWithoutIncarnation(change.Key)
- index, err := bitmapdb.Get64(changeDb, bucket, k, 0, math.MaxUint32)
+ index, err := bitmapdb.Get64(changeDb, bucket, k, math.MaxUint32, math.MaxUint32)
if err != nil {
return fmt.Errorf("find chunk failed: %w", err)
}
@@ -193,7 +202,7 @@ func writeIndex(blocknum uint64, changes *historyv2.ChangeSet, bucket string, ch
if _, err = chunk.WriteTo(buf); err != nil {
return err
}
- return changeDb.Put(bucket, chunkKey, common.CopyBytes(buf.Bytes()))
+ return changeDb.Put(bucket, chunkKey, libcommon.CopyBytes(buf.Bytes()))
}); err != nil {
return err
}
diff --git a/core/state/dump.go b/core/state/dump.go
index d217b1442a7..fff70b3ddb6 100644
--- a/core/state/dump.go
+++ b/core/state/dump.go
@@ -20,6 +20,7 @@ import (
"bytes"
"encoding/json"
"fmt"
+ "github.com/ledgerwatch/erigon-lib/kv/dbutils"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
@@ -28,7 +29,6 @@ import (
"github.com/ledgerwatch/erigon-lib/kv/rawdbv3"
"github.com/ledgerwatch/erigon/common"
- "github.com/ledgerwatch/erigon/common/dbutils"
"github.com/ledgerwatch/erigon/core/types/accounts"
"github.com/ledgerwatch/erigon/crypto"
"github.com/ledgerwatch/erigon/turbo/trie"
@@ -273,7 +273,7 @@ func (d *Dumper) DumpToCollector(c DumpCollector, excludeCode, excludeStorage bo
}
loc := k[20:]
account.Storage[libcommon.BytesToHash(loc).String()] = common.Bytes2Hex(vs)
- h, _ := common.HashData(loc)
+ h, _ := libcommon.HashData(loc)
t.Update(h.Bytes(), libcommon.Copy(vs))
}
} else {
@@ -284,7 +284,7 @@ func (d *Dumper) DumpToCollector(c DumpCollector, excludeCode, excludeStorage bo
d.blockNumber,
func(_, loc, vs []byte) (bool, error) {
account.Storage[libcommon.BytesToHash(loc).String()] = common.Bytes2Hex(vs)
- h, _ := common.HashData(loc)
+ h, _ := libcommon.HashData(loc)
t.Update(h.Bytes(), libcommon.Copy(vs))
return true, nil
}); err != nil {
diff --git a/core/state/history_test.go b/core/state/history_test.go
index ab243534bba..eb6e75154dd 100644
--- a/core/state/history_test.go
+++ b/core/state/history_test.go
@@ -8,6 +8,8 @@ import (
"strconv"
"testing"
+ "github.com/ledgerwatch/erigon-lib/kv/dbutils"
+
"github.com/davecgh/go-spew/spew"
"github.com/holiman/uint256"
libcommon "github.com/ledgerwatch/erigon-lib/common"
@@ -19,8 +21,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "github.com/ledgerwatch/erigon/common"
- "github.com/ledgerwatch/erigon/common/dbutils"
"github.com/ledgerwatch/erigon/common/math"
"github.com/ledgerwatch/erigon/core/state/historyv2read"
"github.com/ledgerwatch/erigon/core/types/accounts"
@@ -29,6 +29,7 @@ import (
)
func TestMutationDeleteTimestamp(t *testing.T) {
+ t.Parallel()
_, tx := memdb.NewTestTx(t)
acc := make([]*accounts.Account, 10)
@@ -88,6 +89,7 @@ func TestMutationDeleteTimestamp(t *testing.T) {
}
func TestMutationCommit(t *testing.T) {
+ t.Parallel()
_, tx := memdb.NewTestTx(t)
numOfAccounts := 5
@@ -292,6 +294,7 @@ func randomAccount(t *testing.T) (*accounts.Account, libcommon.Address) {
*/
func TestWalkAsOfStatePlain(t *testing.T) {
+ t.Parallel()
_, tx := memdb.NewTestTx(t)
emptyVal := uint256.NewInt(0)
@@ -377,7 +380,7 @@ func TestWalkAsOfStatePlain(t *testing.T) {
for _, addr := range addrs {
if err := WalkAsOfStorage(tx, addr, historyv2read.DefaultIncarnation, libcommon.Hash{}, 2, func(kAddr, kLoc []byte, v []byte) (b bool, e error) {
- err := block2.Add(append(common.CopyBytes(kAddr), kLoc...), common.CopyBytes(v))
+ err := block2.Add(append(libcommon.CopyBytes(kAddr), kLoc...), libcommon.CopyBytes(v))
if err != nil {
t.Fatal(err)
}
@@ -393,7 +396,7 @@ func TestWalkAsOfStatePlain(t *testing.T) {
}
for _, addr := range addrs {
if err := WalkAsOfStorage(tx, addr, historyv2read.DefaultIncarnation, libcommon.Hash{}, 4, func(kAddr, kLoc []byte, v []byte) (b bool, e error) {
- err := block4.Add(append(common.CopyBytes(kAddr), kLoc...), common.CopyBytes(v))
+ err := block4.Add(append(libcommon.CopyBytes(kAddr), kLoc...), libcommon.CopyBytes(v))
if err != nil {
t.Fatal(err)
}
@@ -424,7 +427,7 @@ func TestWalkAsOfStatePlain(t *testing.T) {
}
for _, addr := range addrs {
if err := WalkAsOfStorage(tx, addr, historyv2read.DefaultIncarnation, libcommon.Hash{}, 6, func(kAddr, kLoc []byte, v []byte) (b bool, e error) {
- err := block6.Add(append(common.CopyBytes(kAddr), kLoc...), common.CopyBytes(v))
+ err := block6.Add(append(libcommon.CopyBytes(kAddr), kLoc...), libcommon.CopyBytes(v))
if err != nil {
t.Fatal(err)
}
@@ -452,6 +455,7 @@ func TestWalkAsOfStatePlain(t *testing.T) {
}
func TestWalkAsOfUsingFixedBytesStatePlain(t *testing.T) {
+ t.Parallel()
_, tx := memdb.NewTestTx(t)
emptyVal := uint256.NewInt(0)
@@ -555,7 +559,7 @@ func TestWalkAsOfUsingFixedBytesStatePlain(t *testing.T) {
copy(startKey[:length.Addr], addr1.Bytes())
if err := WalkAsOfStorage(tx, addr1, historyv2read.DefaultIncarnation, libcommon.Hash{}, 2, func(kAddr, kLoc []byte, v []byte) (b bool, e error) {
- err := block2.Add(append(common.CopyBytes(kAddr), kLoc...), common.CopyBytes(v))
+ err := block2.Add(append(libcommon.CopyBytes(kAddr), kLoc...), libcommon.CopyBytes(v))
if err != nil {
t.Fatal(err)
}
@@ -569,7 +573,7 @@ func TestWalkAsOfUsingFixedBytesStatePlain(t *testing.T) {
Changes: make([]historyv2.Change, 0),
}
if err := WalkAsOfStorage(tx, addr1, historyv2read.DefaultIncarnation, libcommon.Hash{}, 4, func(kAddr, kLoc []byte, v []byte) (b bool, e error) {
- err := block4.Add(append(common.CopyBytes(kAddr), kLoc...), common.CopyBytes(v))
+ err := block4.Add(append(libcommon.CopyBytes(kAddr), kLoc...), libcommon.CopyBytes(v))
if err != nil {
t.Fatal(err)
}
@@ -597,7 +601,7 @@ func TestWalkAsOfUsingFixedBytesStatePlain(t *testing.T) {
block4.Changes = block4.Changes[:0]
for _, addr := range []libcommon.Address{addr1, addr2} {
if err := WalkAsOfStorage(tx, addr, historyv2read.DefaultIncarnation, libcommon.Hash{}, 4, func(kAddr, kLoc []byte, v []byte) (b bool, e error) {
- err := block4.Add(append(common.CopyBytes(kAddr), kLoc...), common.CopyBytes(v))
+ err := block4.Add(append(libcommon.CopyBytes(kAddr), kLoc...), libcommon.CopyBytes(v))
if err != nil {
t.Fatal(err)
}
@@ -617,7 +621,7 @@ func TestWalkAsOfUsingFixedBytesStatePlain(t *testing.T) {
Changes: make([]historyv2.Change, 0),
}
if err := WalkAsOfStorage(tx, addr1, historyv2read.DefaultIncarnation, libcommon.Hash{}, 6, func(kAddr, kLoc []byte, v []byte) (b bool, e error) {
- err := block6.Add(append(common.CopyBytes(kAddr), kLoc...), common.CopyBytes(v))
+ err := block6.Add(append(libcommon.CopyBytes(kAddr), kLoc...), libcommon.CopyBytes(v))
if err != nil {
t.Fatal(err)
}
@@ -641,7 +645,7 @@ func TestWalkAsOfUsingFixedBytesStatePlain(t *testing.T) {
block6.Changes = block6.Changes[:0]
for _, addr := range []libcommon.Address{addr1, addr2} {
if err := WalkAsOfStorage(tx, addr, historyv2read.DefaultIncarnation, libcommon.Hash{}, 6, func(kAddr, kLoc []byte, v []byte) (b bool, e error) {
- err := block6.Add(append(common.CopyBytes(kAddr), kLoc...), common.CopyBytes(v))
+ err := block6.Add(append(libcommon.CopyBytes(kAddr), kLoc...), libcommon.CopyBytes(v))
if err != nil {
t.Fatal(err)
}
@@ -658,6 +662,7 @@ func TestWalkAsOfUsingFixedBytesStatePlain(t *testing.T) {
}
func TestWalkAsOfAccountPlain(t *testing.T) {
+ t.Parallel()
_, tx := memdb.NewTestTx(t)
emptyValAcc := accounts.NewAccount()
@@ -681,7 +686,7 @@ func TestWalkAsOfAccountPlain(t *testing.T) {
addrHashes := make([]libcommon.Hash, numOfAccounts)
for i := uint8(0); i < numOfAccounts; i++ {
addrs[i] = libcommon.Address{i + 1}
- addrHash, _ := common.HashData(addrs[i].Bytes())
+ addrHash, _ := libcommon.HashData(addrs[i].Bytes())
addrHashes[i] = addrHash
}
@@ -730,7 +735,7 @@ func TestWalkAsOfAccountPlain(t *testing.T) {
})
if err := WalkAsOfAccounts(tx, libcommon.Address{}, 2, func(k []byte, v []byte) (b bool, e error) {
- innerErr := block2.Add(common.CopyBytes(k), common.CopyBytes(v))
+ innerErr := block2.Add(libcommon.CopyBytes(k), libcommon.CopyBytes(v))
if innerErr != nil {
t.Fatal(innerErr)
}
@@ -762,7 +767,7 @@ func TestWalkAsOfAccountPlain(t *testing.T) {
}
if err := WalkAsOfAccounts(tx, libcommon.Address{}, 4, func(k []byte, v []byte) (b bool, e error) {
- innerErr := block4.Add(common.CopyBytes(k), common.CopyBytes(v))
+ innerErr := block4.Add(libcommon.CopyBytes(k), libcommon.CopyBytes(v))
if innerErr != nil {
t.Fatal(innerErr)
}
@@ -794,7 +799,7 @@ func TestWalkAsOfAccountPlain(t *testing.T) {
}
if err := WalkAsOfAccounts(tx, libcommon.Address{}, 6, func(k []byte, v []byte) (b bool, e error) {
- innerErr := block6.Add(common.CopyBytes(k), common.CopyBytes(v))
+ innerErr := block6.Add(libcommon.CopyBytes(k), libcommon.CopyBytes(v))
if innerErr != nil {
t.Fatal(innerErr)
}
@@ -806,6 +811,7 @@ func TestWalkAsOfAccountPlain(t *testing.T) {
}
func TestWalkAsOfAccountPlain_WithChunks(t *testing.T) {
+ t.Parallel()
_, tx := memdb.NewTestTx(t)
emptyValAcc := accounts.NewAccount()
@@ -829,7 +835,7 @@ func TestWalkAsOfAccountPlain_WithChunks(t *testing.T) {
addrHashes := make([]libcommon.Hash, numOfAccounts)
for i := uint8(0); i < numOfAccounts; i++ {
addrs[i] = libcommon.Address{i + 1}
- addrHash, _ := common.HashData(addrs[i].Bytes())
+ addrHash, _ := libcommon.HashData(addrs[i].Bytes())
addrHashes[i] = addrHash
}
@@ -923,7 +929,7 @@ func TestWalkAsOfAccountPlain_WithChunks(t *testing.T) {
}
if err := WalkAsOfAccounts(tx, libcommon.Address{}, blockNum, func(k []byte, v []byte) (b bool, e error) {
- innerErr := obtained.Add(common.CopyBytes(k), common.CopyBytes(v))
+ innerErr := obtained.Add(libcommon.CopyBytes(k), libcommon.CopyBytes(v))
if innerErr != nil {
t.Fatal(innerErr)
}
@@ -957,6 +963,7 @@ func TestWalkAsOfAccountPlain_WithChunks(t *testing.T) {
}
func TestWalkAsOfStoragePlain_WithChunks(t *testing.T) {
+ t.Parallel()
_, tx := memdb.NewTestTx(t)
numOfAccounts := uint8(4)
@@ -964,7 +971,7 @@ func TestWalkAsOfStoragePlain_WithChunks(t *testing.T) {
addrHashes := make([]libcommon.Hash, numOfAccounts)
for i := uint8(0); i < numOfAccounts; i++ {
addrs[i] = libcommon.Address{i + 1}
- addrHash, _ := common.HashData(addrs[i].Bytes())
+ addrHash, _ := libcommon.HashData(addrs[i].Bytes())
addrHashes[i] = addrHash
}
key := libcommon.Hash{123}
@@ -1057,7 +1064,7 @@ func TestWalkAsOfStoragePlain_WithChunks(t *testing.T) {
for _, addr := range addrs {
if err := WalkAsOfStorage(tx, addr, historyv2read.DefaultIncarnation, libcommon.Hash{}, blockNum, func(kAddr, kLoc []byte, v []byte) (b bool, e error) {
- if innerErr := obtained.Add(append(common.CopyBytes(kAddr), kLoc...), common.CopyBytes(v)); innerErr != nil {
+ if innerErr := obtained.Add(append(libcommon.CopyBytes(kAddr), kLoc...), libcommon.CopyBytes(v)); innerErr != nil {
t.Fatal(innerErr)
}
return true, nil
diff --git a/core/state/history_walk.go b/core/state/history_walk.go
index f14446f122a..686d3e8905e 100644
--- a/core/state/history_walk.go
+++ b/core/state/history_walk.go
@@ -4,6 +4,7 @@ import (
"bytes"
"encoding/binary"
"fmt"
+ "github.com/ledgerwatch/erigon-lib/kv/dbutils"
"github.com/RoaringBitmap/roaring/roaring64"
libcommon "github.com/ledgerwatch/erigon-lib/common"
@@ -12,7 +13,6 @@ import (
"github.com/ledgerwatch/erigon-lib/kv/bitmapdb"
"github.com/ledgerwatch/erigon/common"
- "github.com/ledgerwatch/erigon/common/dbutils"
"github.com/ledgerwatch/erigon/ethdb"
)
diff --git a/core/state/intra_block_state.go b/core/state/intra_block_state.go
index 68225e687a1..2f27468702d 100644
--- a/core/state/intra_block_state.go
+++ b/core/state/intra_block_state.go
@@ -547,23 +547,25 @@ func (sdb *IntraBlockState) createObject(addr libcommon.Address, previous *state
func (sdb *IntraBlockState) CreateAccount(addr libcommon.Address, contractCreation bool) {
var prevInc uint64
previous := sdb.getStateObject(addr)
- if contractCreation {
- if previous != nil && previous.selfdestructed {
- prevInc = previous.data.Incarnation
+ if previous != nil && previous.selfdestructed {
+ prevInc = previous.data.Incarnation
+ } else {
+ if inc, err := sdb.stateReader.ReadAccountIncarnation(addr); err == nil {
+ prevInc = inc
} else {
- if inc, err := sdb.stateReader.ReadAccountIncarnation(addr); err == nil {
- prevInc = inc
- } else {
- sdb.savedErr = err
- }
+ sdb.savedErr = err
}
}
+ if previous != nil && prevInc < previous.data.PrevIncarnation {
+ prevInc = previous.data.PrevIncarnation
+ }
newObj := sdb.createObject(addr, previous)
if previous != nil && !previous.selfdestructed {
newObj.data.Balance.Set(&previous.data.Balance)
}
newObj.data.Initialised = true
+ newObj.data.PrevIncarnation = prevInc
if contractCreation {
newObj.createdContract = true
@@ -686,24 +688,6 @@ func (sdb *IntraBlockState) FinalizeTx(chainRules *chain.Rules, stateWriter Stat
return nil
}
-func (sdb *IntraBlockState) SoftFinalise() {
- for addr := range sdb.journal.dirties {
- _, exist := sdb.stateObjects[addr]
- if !exist {
- // ripeMD is 'touched' at block 1714175, in tx 0x1237f737031e40bcde4a8b7e717b2d15e3ecadfe49bb1bbc71ee9deb09c6fcf2
- // That tx goes out of gas, and although the notion of 'touched' does not exist there, the
- // touch-event will still be recorded in the journal. Since ripeMD is a special snowflake,
- // it will persist in the journal even though the journal is reverted. In this special circumstance,
- // it may exist in `sdb.journal.dirties` but not in `sdb.stateObjects`.
- // Thus, we can safely ignore it here
- continue
- }
- sdb.stateObjectsDirty[addr] = struct{}{}
- }
- // Invalidate journal because reverting across transactions is not allowed.
- sdb.clearJournalAndRefund()
-}
-
// CommitBlock finalizes the state by removing the self destructed objects
// and clears the journal as well as the refunds.
func (sdb *IntraBlockState) CommitBlock(chainRules *chain.Rules, stateWriter StateWriter) error {
@@ -810,15 +794,17 @@ func (sdb *IntraBlockState) Prepare(rules *chain.Rules, sender, coinbase libcomm
}
// AddAddressToAccessList adds the given address to the access list
-func (sdb *IntraBlockState) AddAddressToAccessList(addr libcommon.Address) {
- if sdb.accessList.AddAddress(addr) {
+func (sdb *IntraBlockState) AddAddressToAccessList(addr libcommon.Address) (addrMod bool) {
+ addrMod = sdb.accessList.AddAddress(addr)
+ if addrMod {
sdb.journal.append(accessListAddAccountChange{&addr})
}
+ return addrMod
}
// AddSlotToAccessList adds the given (address, slot)-tuple to the access list
-func (sdb *IntraBlockState) AddSlotToAccessList(addr libcommon.Address, slot libcommon.Hash) {
- addrMod, slotMod := sdb.accessList.AddSlot(addr, slot)
+func (sdb *IntraBlockState) AddSlotToAccessList(addr libcommon.Address, slot libcommon.Hash) (addrMod, slotMod bool) {
+ addrMod, slotMod = sdb.accessList.AddSlot(addr, slot)
if addrMod {
// In practice, this should not happen, since there is no way to enter the
// scope of 'address' without having the 'address' become already added
@@ -832,6 +818,7 @@ func (sdb *IntraBlockState) AddSlotToAccessList(addr libcommon.Address, slot lib
slot: &slot,
})
}
+ return addrMod, slotMod
}
// AddressInAccessList returns true if the given address is in the access list.
@@ -839,7 +826,6 @@ func (sdb *IntraBlockState) AddressInAccessList(addr libcommon.Address) bool {
return sdb.accessList.ContainsAddress(addr)
}
-// SlotInAccessList returns true if the given (address, slot)-tuple is in the access list.
func (sdb *IntraBlockState) SlotInAccessList(addr libcommon.Address, slot libcommon.Hash) (addressPresent bool, slotPresent bool) {
return sdb.accessList.Contains(addr, slot)
}
diff --git a/core/state/intra_block_state_test.go b/core/state/intra_block_state_test.go
index 08f4b28899f..4a909986a5e 100644
--- a/core/state/intra_block_state_test.go
+++ b/core/state/intra_block_state_test.go
@@ -38,6 +38,7 @@ import (
)
func TestSnapshotRandom(t *testing.T) {
+ t.Parallel()
config := &quick.Config{MaxCount: 1000}
err := quick.Check((*snapshotTest).run, config)
if cerr, ok := err.(*quick.CheckError); ok {
@@ -326,6 +327,7 @@ func (test *snapshotTest) checkEqual(state, checkstate *IntraBlockState) error {
}
func TestTransientStorage(t *testing.T) {
+ t.Parallel()
state := New(nil)
key := libcommon.Hash{0x01}
diff --git a/core/state/plain_readonly.go b/core/state/plain_readonly.go
index 433ffeccd25..9f1337f4e95 100644
--- a/core/state/plain_readonly.go
+++ b/core/state/plain_readonly.go
@@ -20,6 +20,7 @@ import (
"bytes"
"encoding/binary"
"fmt"
+ "github.com/ledgerwatch/erigon-lib/kv/dbutils"
"sort"
"github.com/google/btree"
@@ -30,8 +31,6 @@ import (
"github.com/ledgerwatch/erigon-lib/kv/kvcfg"
"github.com/ledgerwatch/log/v3"
- "github.com/ledgerwatch/erigon/common"
- "github.com/ledgerwatch/erigon/common/dbutils"
"github.com/ledgerwatch/erigon/core/state/historyv2read"
"github.com/ledgerwatch/erigon/core/types/accounts"
)
@@ -133,7 +132,7 @@ func (s *PlainState) ForEachStorage(addr libcommon.Address, startLocation libcom
// Skip deleted entries
return true, nil
}
- keyHash, err1 := common.HashData(kLoc)
+ keyHash, err1 := libcommon.HashData(kLoc)
if err1 != nil {
return false, err1
}
@@ -289,8 +288,8 @@ func (s *PlainState) WriteAccountStorage(address libcommon.Address, incarnation
t = btree.New(16)
s.storage[address] = t
}
- h := common.NewHasher()
- defer common.ReturnHasherToPool(h)
+ h := libcommon.NewHasher()
+ defer libcommon.ReturnHasherToPool(h)
_, err := h.Sha.Write(key[:])
if err != nil {
return err
diff --git a/core/state/plain_state_reader.go b/core/state/plain_state_reader.go
index 0b43033f259..0db63b4dcdb 100644
--- a/core/state/plain_state_reader.go
+++ b/core/state/plain_state_reader.go
@@ -3,11 +3,11 @@ package state
import (
"bytes"
"encoding/binary"
+ "github.com/ledgerwatch/erigon-lib/kv/dbutils"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/kv"
- "github.com/ledgerwatch/erigon/common/dbutils"
"github.com/ledgerwatch/erigon/core/types/accounts"
)
diff --git a/core/state/plain_state_writer.go b/core/state/plain_state_writer.go
index 367139027df..eb3361e58ae 100644
--- a/core/state/plain_state_writer.go
+++ b/core/state/plain_state_writer.go
@@ -4,10 +4,11 @@ import (
"encoding/binary"
"github.com/holiman/uint256"
+
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/dbutils"
- "github.com/ledgerwatch/erigon/common/dbutils"
"github.com/ledgerwatch/erigon/core/types/accounts"
"github.com/ledgerwatch/erigon/turbo/shards"
)
@@ -56,6 +57,14 @@ func (w *PlainStateWriter) UpdateAccountData(address libcommon.Address, original
w.accumulator.ChangeAccount(address, account.Incarnation, value)
}
+ if account.Incarnation == 0 && original.Incarnation > 0 {
+ var b [8]byte
+ binary.BigEndian.PutUint64(b[:], original.Incarnation)
+ if err := w.db.Put(kv.IncarnationMap, address[:], b[:]); err != nil {
+ return err
+ }
+ }
+
return w.db.Put(kv.PlainState, address[:], value)
}
diff --git a/core/state/recon_writer_inc.go b/core/state/recon_writer_inc.go
index 05862023dbe..a6faade2c8d 100644
--- a/core/state/recon_writer_inc.go
+++ b/core/state/recon_writer_inc.go
@@ -2,14 +2,13 @@ package state
import (
"bytes"
+ "github.com/ledgerwatch/erigon-lib/kv/dbutils"
"github.com/holiman/uint256"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/kv"
libstate "github.com/ledgerwatch/erigon-lib/state"
- "github.com/ledgerwatch/erigon/common"
- "github.com/ledgerwatch/erigon/common/dbutils"
"github.com/ledgerwatch/erigon/core/types/accounts"
)
@@ -60,7 +59,7 @@ func (w *StateReconWriterInc) UpdateAccountCode(address libcommon.Address, incar
return nil
}
if len(code) > 0 {
- w.rs.Put(kv.CodeR, codeHashBytes, nil, common.CopyBytes(code), w.txNum)
+ w.rs.Put(kv.CodeR, codeHashBytes, nil, libcommon.CopyBytes(code), w.txNum)
w.rs.Put(kv.PlainContractR, dbutils.PlainGenerateStoragePrefix(addr, FirstContractIncarnation), nil, codeHashBytes, w.txNum)
} else {
w.rs.Delete(kv.PlainContractD, dbutils.PlainGenerateStoragePrefix(addr, FirstContractIncarnation), nil, w.txNum)
@@ -85,7 +84,7 @@ func (w *StateReconWriterInc) DeleteAccount(address libcommon.Address, original
for k, _, err = c.Seek(addr); err == nil && bytes.HasPrefix(k, addr); k, _, err = c.Next() {
//fmt.Printf("delete account storage [%x] [%x]=>{} txNum: %d\n", address, k[20+8:], w.txNum)
if len(k) > 20 {
- w.rs.Delete(kv.PlainStateD, addr, common.CopyBytes(k[20+8:]), w.txNum)
+ w.rs.Delete(kv.PlainStateD, addr, libcommon.CopyBytes(k[20+8:]), w.txNum)
}
}
if err != nil {
diff --git a/core/state/rw_v3.go b/core/state/rw_v3.go
index 756501a8afe..6ed8f08400c 100644
--- a/core/state/rw_v3.go
+++ b/core/state/rw_v3.go
@@ -10,20 +10,20 @@ import (
"time"
"unsafe"
- "github.com/VictoriaMetrics/metrics"
"github.com/holiman/uint256"
+ "github.com/ledgerwatch/log/v3"
+ btree2 "github.com/tidwall/btree"
+
"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/dbg"
"github.com/ledgerwatch/erigon-lib/common/length"
"github.com/ledgerwatch/erigon-lib/etl"
"github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/dbutils"
"github.com/ledgerwatch/erigon-lib/kv/order"
+ "github.com/ledgerwatch/erigon-lib/metrics"
libstate "github.com/ledgerwatch/erigon-lib/state"
- "github.com/ledgerwatch/log/v3"
- btree2 "github.com/tidwall/btree"
-
"github.com/ledgerwatch/erigon/cmd/state/exec22"
- "github.com/ledgerwatch/erigon/common/dbutils"
"github.com/ledgerwatch/erigon/core/state/temporal"
"github.com/ledgerwatch/erigon/core/types/accounts"
"github.com/ledgerwatch/erigon/turbo/shards"
@@ -32,7 +32,7 @@ import (
const CodeSizeTable = "CodeSize"
const StorageTable = "Storage"
-var ExecTxsDone = metrics.NewCounter(`exec_txs_done`)
+var execTxsDone = metrics.NewCounter(`exec_txs_done`)
type StateV3 struct {
lock sync.RWMutex
@@ -275,7 +275,7 @@ func (rs *StateV3) RegisterSender(txTask *exec22.TxTask) bool {
}
func (rs *StateV3) CommitTxNum(sender *common.Address, txNum uint64, in *exec22.QueueWithRetry) (count int) {
- ExecTxsDone.Inc()
+ execTxsDone.Inc()
rs.triggerLock.Lock()
defer rs.triggerLock.Unlock()
@@ -652,7 +652,9 @@ func (rs *StateV3) Unwind(ctx context.Context, tx kv.RwTx, txUnwindTo uint64, ag
return nil
}
-func (rs *StateV3) DoneCount() uint64 { return ExecTxsDone.Get() }
+func (rs *StateV3) DoneCount() uint64 {
+ return execTxsDone.GetValueUint64()
+}
func (rs *StateV3) SizeEstimate() (r uint64) {
rs.lock.RLock()
diff --git a/core/state/state_test.go b/core/state/state_test.go
index 7a81456b31f..f0f2242ab25 100644
--- a/core/state/state_test.go
+++ b/core/state/state_test.go
@@ -209,6 +209,7 @@ func (s *StateSuite) TestSnapshotEmpty(c *checker.C) {
// use testing instead of checker because checker does not support
// printing/logging in tests (-check.vv does not work)
func TestSnapshot2(t *testing.T) {
+ t.Parallel()
_, tx := memdb.NewTestTx(t)
w := NewPlainState(tx, 1, nil)
state := New(NewPlainState(tx, 1, nil))
@@ -324,6 +325,7 @@ func compareStateObjects(so0, so1 *stateObject, t *testing.T) {
}
func TestDump(t *testing.T) {
+ t.Parallel()
_, tx := memdb.NewTestTx(t)
w := NewPlainStateWriter(tx, tx, 0)
state := New(NewPlainStateReader(tx))
diff --git a/core/state/temporal/kv_temporal.go b/core/state/temporal/kv_temporal.go
index 3ebd94952d2..8be4940df75 100644
--- a/core/state/temporal/kv_temporal.go
+++ b/core/state/temporal/kv_temporal.go
@@ -105,7 +105,7 @@ func (db *DB) Agg() *state.AggregatorV3 { return db.agg }
func (db *DB) InternalDB() kv.RwDB { return db.RwDB }
func (db *DB) BeginTemporalRo(ctx context.Context) (kv.TemporalTx, error) {
- kvTx, err := db.RwDB.BeginRo(ctx)
+ kvTx, err := db.RwDB.BeginRo(ctx) //nolint:gocritic
if err != nil {
return nil, err
}
diff --git a/core/state_processor.go b/core/state_processor.go
index 672385fa7f2..c5b81a49786 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -34,7 +34,7 @@ import (
// indicating the block was invalid.
func applyTransaction(config *chain.Config, engine consensus.EngineReader, gp *GasPool, ibs *state.IntraBlockState,
stateWriter state.StateWriter, header *types.Header, tx types.Transaction, usedGas, usedBlobGas *uint64,
- evm vm.VMInterface, cfg vm.Config) (*types.Receipt, []byte, error) {
+ evm *vm.EVM, cfg vm.Config) (*types.Receipt, []byte, error) {
rules := evm.ChainRules()
msg, err := tx.AsMessage(*types.MakeSigner(config, header.Number.Uint64(), header.Time), header.BaseFee, rules)
if err != nil {
@@ -86,7 +86,7 @@ func applyTransaction(config *chain.Config, engine consensus.EngineReader, gp *G
receipt.GasUsed = result.UsedGas
// if the transaction created a contract, store the creation address in the receipt.
if msg.To() == nil {
- receipt.ContractAddress = crypto.CreateAddress(evm.TxContext().Origin, tx.GetNonce())
+ receipt.ContractAddress = crypto.CreateAddress(evm.Origin, tx.GetNonce())
}
// Set the receipt logs and create a bloom for filtering
receipt.Logs = ibs.GetLogs(tx.Hash())
diff --git a/core/state_transition.go b/core/state_transition.go
index 18c0c567e09..3972dc21e1e 100644
--- a/core/state_transition.go
+++ b/core/state_transition.go
@@ -24,7 +24,6 @@ import (
"github.com/ledgerwatch/erigon-lib/txpool/txpoolcfg"
types2 "github.com/ledgerwatch/erigon-lib/types"
- "github.com/ledgerwatch/erigon/common"
cmath "github.com/ledgerwatch/erigon/common/math"
"github.com/ledgerwatch/erigon/common/u256"
"github.com/ledgerwatch/erigon/consensus/misc"
@@ -66,7 +65,7 @@ type StateTransition struct {
value *uint256.Int
data []byte
state evmtypes.IntraBlockState
- evm vm.VMInterface
+ evm *vm.EVM
//some pre-allocated intermediate variables
sharedBuyGas *uint256.Int
@@ -120,7 +119,7 @@ func (result *ExecutionResult) Return() []byte {
if result.Err != nil {
return nil
}
- return common.CopyBytes(result.ReturnData)
+ return libcommon.CopyBytes(result.ReturnData)
}
// Revert returns the concrete revert reason if the execution is aborted by `REVERT`
@@ -129,7 +128,7 @@ func (result *ExecutionResult) Revert() []byte {
if result.Err != vm.ErrExecutionReverted {
return nil
}
- return common.CopyBytes(result.ReturnData)
+ return libcommon.CopyBytes(result.ReturnData)
}
// IntrinsicGas computes the 'intrinsic gas' for a message with the given data.
@@ -151,7 +150,7 @@ func IntrinsicGas(data []byte, accessList types2.AccessList, isContractCreation
}
// NewStateTransition initialises and returns a new state transition object.
-func NewStateTransition(evm vm.VMInterface, msg Message, gp *GasPool) *StateTransition {
+func NewStateTransition(evm *vm.EVM, msg Message, gp *GasPool) *StateTransition {
isBor := evm.ChainConfig().Bor != nil
return &StateTransition{
gp: gp,
@@ -180,8 +179,8 @@ func NewStateTransition(evm vm.VMInterface, msg Message, gp *GasPool) *StateTran
// state and would never be accepted within a block.
// `refunds` is false when it is not required to apply gas refunds
// `gasBailout` is true when it is not required to fail transaction if the balance is not enough to pay gas.
-// for trace_call to replicate OE/Pariry behaviour
-func ApplyMessage(evm vm.VMInterface, msg Message, gp *GasPool, refunds bool, gasBailout bool) (*ExecutionResult, error) {
+// for trace_call to replicate OE/Parity behaviour
+func ApplyMessage(evm *vm.EVM, msg Message, gp *GasPool, refunds bool, gasBailout bool) (*ExecutionResult, error) {
return NewStateTransition(evm, msg, gp).TransitionDb(refunds, gasBailout)
}
@@ -194,33 +193,33 @@ func (st *StateTransition) to() libcommon.Address {
}
func (st *StateTransition) buyGas(gasBailout bool) error {
- mgval := st.sharedBuyGas
- mgval.SetUint64(st.msg.Gas())
- mgval, overflow := mgval.MulOverflow(mgval, st.gasPrice)
+ gasVal := st.sharedBuyGas
+ gasVal.SetUint64(st.msg.Gas())
+ gasVal, overflow := gasVal.MulOverflow(gasVal, st.gasPrice)
if overflow {
return fmt.Errorf("%w: address %v", ErrInsufficientFunds, st.msg.From().Hex())
}
// compute blob fee for eip-4844 data blobs if any
- dgval := new(uint256.Int)
+ blobGasVal := new(uint256.Int)
if st.evm.ChainRules().IsCancun {
- if st.evm.Context().ExcessBlobGas == nil {
+ if st.evm.Context.ExcessBlobGas == nil {
return fmt.Errorf("%w: Cancun is active but ExcessBlobGas is nil", ErrInternalFailure)
}
- blobGasPrice, err := misc.GetBlobGasPrice(*st.evm.Context().ExcessBlobGas)
+ blobGasPrice, err := misc.GetBlobGasPrice(st.evm.ChainConfig(), *st.evm.Context.ExcessBlobGas)
if err != nil {
return err
}
- _, overflow = dgval.MulOverflow(blobGasPrice, new(uint256.Int).SetUint64(st.msg.BlobGas()))
+ blobGasVal, overflow = blobGasVal.MulOverflow(blobGasPrice, new(uint256.Int).SetUint64(st.msg.BlobGas()))
if overflow {
- return fmt.Errorf("%w: overflow converting blob gas: %v", ErrInsufficientFunds, dgval)
+ return fmt.Errorf("%w: overflow converting blob gas: %v", ErrInsufficientFunds, blobGasVal)
}
if err := st.gp.SubBlobGas(st.msg.BlobGas()); err != nil {
return err
}
}
- balanceCheck := mgval
+ balanceCheck := gasVal
if st.gasFeeCap != nil {
balanceCheck = st.sharedBuyGasBalance.SetUint64(st.msg.Gas())
balanceCheck, overflow = balanceCheck.MulOverflow(balanceCheck, st.gasFeeCap)
@@ -231,7 +230,7 @@ func (st *StateTransition) buyGas(gasBailout bool) error {
if overflow {
return fmt.Errorf("%w: address %v", ErrInsufficientFunds, st.msg.From().Hex())
}
- balanceCheck, overflow = balanceCheck.AddOverflow(balanceCheck, dgval)
+ balanceCheck, overflow = balanceCheck.AddOverflow(balanceCheck, blobGasVal)
if overflow {
return fmt.Errorf("%w: address %v", ErrInsufficientFunds, st.msg.From().Hex())
}
@@ -253,8 +252,8 @@ func (st *StateTransition) buyGas(gasBailout bool) error {
st.initialGas = st.msg.Gas()
if subBalance {
- st.state.SubBalance(st.msg.From(), mgval)
- st.state.SubBalance(st.msg.From(), dgval)
+ st.state.SubBalance(st.msg.From(), gasVal)
+ st.state.SubBalance(st.msg.From(), blobGasVal)
}
return nil
}
@@ -301,16 +300,16 @@ func (st *StateTransition) preCheck(gasBailout bool) error {
if st.evm.ChainRules().IsLondon {
// Skip the checks if gas fields are zero and baseFee was explicitly disabled (eth_call)
if !st.evm.Config().NoBaseFee || !st.gasFeeCap.IsZero() || !st.tip.IsZero() {
- if err := CheckEip1559TxGasFeeCap(st.msg.From(), st.gasFeeCap, st.tip, st.evm.Context().BaseFee, st.msg.IsFree()); err != nil {
+ if err := CheckEip1559TxGasFeeCap(st.msg.From(), st.gasFeeCap, st.tip, st.evm.Context.BaseFee, st.msg.IsFree()); err != nil {
return err
}
}
}
if st.msg.BlobGas() > 0 && st.evm.ChainRules().IsCancun {
- if st.evm.Context().ExcessBlobGas == nil {
+ if st.evm.Context.ExcessBlobGas == nil {
return fmt.Errorf("%w: Cancun is active but ExcessBlobGas is nil", ErrInternalFailure)
}
- blobGasPrice, err := misc.GetBlobGasPrice(*st.evm.Context().ExcessBlobGas)
+ blobGasPrice, err := misc.GetBlobGasPrice(st.evm.ChainConfig(), *st.evm.Context.ExcessBlobGas)
if err != nil {
return err
}
@@ -318,7 +317,7 @@ func (st *StateTransition) preCheck(gasBailout bool) error {
if blobGasPrice.Cmp(maxFeePerBlobGas) > 0 {
return fmt.Errorf("%w: address %v, maxFeePerBlobGas: %v blobGasPrice: %v, excessBlobGas: %v",
ErrMaxFeePerBlobGas,
- st.msg.From().Hex(), st.msg.MaxFeePerBlobGas(), blobGasPrice, st.evm.Context().ExcessBlobGas)
+ st.msg.From().Hex(), st.msg.MaxFeePerBlobGas(), blobGasPrice, st.evm.Context.ExcessBlobGas)
}
}
@@ -339,7 +338,7 @@ func (st *StateTransition) preCheck(gasBailout bool) error {
// However if any consensus issue encountered, return the error directly with
// nil evm execution result.
func (st *StateTransition) TransitionDb(refunds bool, gasBailout bool) (*ExecutionResult, error) {
- coinbase := st.evm.Context().Coinbase
+ coinbase := st.evm.Context.Coinbase
var input1 *uint256.Int
var input2 *uint256.Int
@@ -389,7 +388,7 @@ func (st *StateTransition) TransitionDb(refunds bool, gasBailout bool) (*Executi
var bailout bool
// Gas bailout (for trace_call) should only be applied if there is not sufficient balance to perform value transfer
if gasBailout {
- if !msg.Value().IsZero() && !st.evm.Context().CanTransfer(st.state, msg.From(), msg.Value()) {
+ if !msg.Value().IsZero() && !st.evm.Context.CanTransfer(st.state, msg.From(), msg.Value()) {
bailout = true
}
}
@@ -430,8 +429,8 @@ func (st *StateTransition) TransitionDb(refunds bool, gasBailout bool) (*Executi
}
effectiveTip := st.gasPrice
if rules.IsLondon {
- if st.gasFeeCap.Gt(st.evm.Context().BaseFee) {
- effectiveTip = cmath.Min256(st.tip, new(uint256.Int).Sub(st.gasFeeCap, st.evm.Context().BaseFee))
+ if st.gasFeeCap.Gt(st.evm.Context.BaseFee) {
+ effectiveTip = cmath.Min256(st.tip, new(uint256.Int).Sub(st.gasFeeCap, st.evm.Context.BaseFee))
} else {
effectiveTip = u256.Num0
}
@@ -439,10 +438,12 @@ func (st *StateTransition) TransitionDb(refunds bool, gasBailout bool) (*Executi
amount := new(uint256.Int).SetUint64(st.gasUsed())
amount.Mul(amount, effectiveTip) // gasUsed * effectiveTip = how much goes to the block producer (miner, validator)
st.state.AddBalance(coinbase, amount)
- if !msg.IsFree() && rules.IsLondon && rules.IsEip1559FeeCollector {
- burntContractAddress := *st.evm.ChainConfig().Eip1559FeeCollector
- burnAmount := new(uint256.Int).Mul(new(uint256.Int).SetUint64(st.gasUsed()), st.evm.Context().BaseFee)
- st.state.AddBalance(burntContractAddress, burnAmount)
+ if !msg.IsFree() && rules.IsLondon {
+ burntContractAddress := st.evm.ChainConfig().GetBurntContract(st.evm.Context.BlockNumber)
+ if burntContractAddress != nil {
+ burnAmount := new(uint256.Int).Mul(new(uint256.Int).SetUint64(st.gasUsed()), st.evm.Context.BaseFee)
+ st.state.AddBalance(*burntContractAddress, burnAmount)
+ }
}
if st.isBor {
// Deprecating transfer log and will be removed in future fork. PLEASE DO NOT USE this transfer log going forward. Parameters won't get updated as expected going forward with EIP1559
diff --git a/core/system_contract_lookup.go b/core/system_contract_lookup.go
index 0e26363ba5d..2905904a2d3 100644
--- a/core/system_contract_lookup.go
+++ b/core/system_contract_lookup.go
@@ -1,49 +1,58 @@
package core
import (
- "encoding/hex"
"fmt"
+ "strconv"
+ "github.com/ledgerwatch/erigon-lib/chain/networkname"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
- "github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/core/systemcontracts"
+ "github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/params"
- "github.com/ledgerwatch/erigon/params/networkname"
)
func init() {
- // Initialise systemContractCodeLookup
- for _, chainName := range []string{networkname.BorMainnetChainName, networkname.MumbaiChainName, networkname.BorDevnetChainName} {
+ // Initialise SystemContractCodeLookup
+ for _, chainName := range []string{networkname.BorMainnetChainName, networkname.MumbaiChainName, networkname.AmoyChainName, networkname.BorDevnetChainName} {
byChain := map[libcommon.Address][]libcommon.CodeRecord{}
systemcontracts.SystemContractCodeLookup[chainName] = byChain
// Apply genesis with the block number 0
genesisBlock := GenesisBlockByChainName(chainName)
- for addr, alloc := range genesisBlock.Alloc {
- if len(alloc.Code) > 0 {
- list := byChain[addr]
- codeHash, err := common.HashData(alloc.Code)
- if err != nil {
- panic(fmt.Errorf("failed to hash system contract code: %s", err.Error()))
- }
- list = append(list, libcommon.CodeRecord{BlockNumber: 0, CodeHash: codeHash})
- byChain[addr] = list
- }
- }
+ allocToCodeRecords(genesisBlock.Alloc, byChain, 0)
// Process upgrades
chainConfig := params.ChainConfigByChainName(chainName)
- if chainConfig.Bor != nil && chainConfig.Bor.CalcuttaBlock != nil {
- blockNum := chainConfig.Bor.CalcuttaBlock.Uint64()
- if blockNum != 0 {
- addCodeRecords(systemcontracts.CalcuttaUpgrade[chainName], blockNum, byChain)
+ for blockNumStr, genesisAlloc := range chainConfig.Bor.BlockAlloc {
+ blockNum, err := strconv.ParseUint(blockNumStr, 10, 64)
+ if err != nil {
+ panic(fmt.Errorf("failed to parse block number in BlockAlloc: %s", err.Error()))
}
+ alloc, err := types.DecodeGenesisAlloc(genesisAlloc)
+ if err != nil {
+ panic(fmt.Errorf("failed to decode block alloc: %v", err))
+ }
+ allocToCodeRecords(alloc, byChain, blockNum)
}
}
addGnosisSpecialCase()
}
+func allocToCodeRecords(alloc types.GenesisAlloc, byChain map[libcommon.Address][]libcommon.CodeRecord, blockNum uint64) {
+ for addr, account := range alloc {
+ if len(account.Code) > 0 {
+ list := byChain[addr]
+ codeHash, err := libcommon.HashData(account.Code)
+ if err != nil {
+ panic(fmt.Errorf("failed to hash system contract code: %s", err.Error()))
+ }
+ list = append(list, libcommon.CodeRecord{BlockNumber: blockNum, CodeHash: codeHash})
+ byChain[addr] = list
+ }
+ }
+}
+
// some hard coding for gnosis chain here to solve a historical problem with the token contract being re-written
// and losing the history for it in the DB. Temporary hack until erigon 3 arrives
func addGnosisSpecialCase() {
@@ -55,7 +64,7 @@ func addGnosisSpecialCase() {
oldContractCode := hexutility.FromHex("0x6080604052600436106101b65763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166305d2035b81146101bb57806306fdde03146101e4578063095ea7b31461026e5780630b26cf661461029257806318160ddd146102b557806323b872dd146102dc57806330adf81f14610306578063313ce5671461031b5780633644e51514610346578063395093511461035b5780634000aea01461037f57806340c10f19146103b057806342966c68146103d457806354fd4d50146103ec578063661884631461040157806369ffa08a1461042557806370a082311461044c578063715018a61461046d578063726600ce146104825780637d64bcb4146104a35780637ecebe00146104b8578063859ba28c146104d95780638da5cb5b1461051a5780638fcbaf0c1461054b57806395d89b4114610589578063a457c2d71461059e578063a9059cbb146105c2578063b753a98c146105e6578063bb35783b1461060a578063cd59658314610634578063d73dd62314610649578063dd62ed3e1461066d578063f2d5d56b14610694578063f2fde38b146106b8578063ff9e884d146106d9575b600080fd5b3480156101c757600080fd5b506101d0610700565b604080519115158252519081900360200190f35b3480156101f057600080fd5b506101f9610721565b6040805160208082528351818301528351919283929083019185019080838360005b8381101561023357818101518382015260200161021b565b50505050905090810190601f1680156102605780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561027a57600080fd5b506101d0600160a060020a03600435166024356107af565b34801561029e57600080fd5b506102b3600160a060020a0360043516610803565b005b3480156102c157600080fd5b506102ca61085d565b60408051918252519081900360200190f35b3480156102e857600080fd5b506101d0600160a060020a0360043581169060243516604435610863565b34801561031257600080fd5b506102ca610a32565b34801561032757600080fd5b50610330610a56565b6040805160ff9092168252519081900360200190f35b34801561035257600080fd5b506102ca610a5f565b34801561036757600080fd5b506101d0600160a060020a0360043516602435610a65565b34801561038b57600080fd5b506101d060048035600160a060020a0316906024803591604435918201910135610a78565b3480156103bc57600080fd5b506101d0600160a060020a0360043516602435610b89565b3480156103e057600080fd5b506102b3600435610c94565b3480156103f857600080fd5b506101f9610ca1565b34801561040d57600080fd5b506101d0600160a060020a0360043516602435610cd8565b34801561043157600080fd5b506102b3600160a060020a0360043581169060243516610db5565b34801561045857600080fd5b506102ca600160a060020a0360043516610df1565b34801561047957600080fd5b506102b3610e0c565b34801561048e57600080fd5b506101d0600160a060020a0360043516610e23565b3480156104af57600080fd5b506101d0610e37565b3480156104c457600080fd5b506102ca600160a060020a0360043516610e3e565b3480156104e557600080fd5b506104ee610e50565b6040805167ffffffffffffffff9485168152928416602084015292168183015290519081900360600190f35b34801561052657600080fd5b5061052f610e5b565b60408051600160a060020a039092168252519081900360200190f35b34801561055757600080fd5b506102b3600160a060020a0360043581169060243516604435606435608435151560ff60a4351660c43560e435610e6a565b34801561059557600080fd5b506101f9611171565b3480156105aa57600080fd5b506101d0600160a060020a03600435166024356111cb565b3480156105ce57600080fd5b506101d0600160a060020a03600435166024356111d7565b3480156105f257600080fd5b506102b3600160a060020a0360043516602435611202565b34801561061657600080fd5b506102b3600160a060020a036004358116906024351660443561120d565b34801561064057600080fd5b5061052f61121e565b34801561065557600080fd5b506101d0600160a060020a036004351660243561122d565b34801561067957600080fd5b506102ca600160a060020a03600435811690602435166112b4565b3480156106a057600080fd5b506102b3600160a060020a03600435166024356112df565b3480156106c457600080fd5b506102b3600160a060020a03600435166112ea565b3480156106e557600080fd5b506102ca600160a060020a036004358116906024351661130a565b60065474010000000000000000000000000000000000000000900460ff1681565b6000805460408051602060026001851615610100026000190190941693909304601f810184900484028201840190925281815292918301828280156107a75780601f1061077c576101008083540402835291602001916107a7565b820191906000526020600020905b81548152906001019060200180831161078a57829003601f168201915b505050505081565b336000818152600560209081526040808320600160a060020a03871680855290835281842086905581518681529151939490939092600080516020611a13833981519152928290030190a350600192915050565b600654600160a060020a0316331461081a57600080fd5b61082381611327565b151561082e57600080fd5b6007805473ffffffffffffffffffffffffffffffffffffffff1916600160a060020a0392909216919091179055565b60045490565b600080600160a060020a038516151561087b57600080fd5b600160a060020a038416151561089057600080fd5b600160a060020a0385166000908152600360205260409020546108b9908463ffffffff61132f16565b600160a060020a0380871660009081526003602052604080822093909355908616815220546108ee908463ffffffff61134116565b600160a060020a0380861660008181526003602090815260409182902094909455805187815290519193928916926000805160206119f383398151915292918290030190a3600160a060020a0385163314610a1c5761094d85336112b4565b905060001981146109b757610968818463ffffffff61132f16565b600160a060020a038616600081815260056020908152604080832033808552908352928190208590558051948552519193600080516020611a13833981519152929081900390910190a3610a1c565b600160a060020a0385166000908152600a602090815260408083203384529091529020541580610a1157506109ea611354565b600160a060020a0386166000908152600a6020908152604080832033845290915290205410155b1515610a1c57600080fd5b610a27858585611358565b506001949350505050565b7fea2aa0a1be11a07ed86d755c93467f4f82362b452371d1ba94d1715123511acb81565b60025460ff1681565b60085481565b6000610a71838361122d565b9392505050565b600084600160a060020a03811615801590610a9c5750600160a060020a0381163014155b1515610aa757600080fd5b610ab186866113ef565b1515610abc57600080fd5b85600160a060020a031633600160a060020a03167fe19260aff97b920c7df27010903aeb9c8d2be5d310a2c67824cf3f15396e4c16878787604051808481526020018060200182810382528484828181526020019250808284376040519201829003965090945050505050a3610b3186611327565b15610b7d57610b7233878787878080601f016020809104026020016040519081016040528093929190818152602001838380828437506113fb945050505050565b1515610b7d57600080fd5b50600195945050505050565b600654600090600160a060020a03163314610ba357600080fd5b60065474010000000000000000000000000000000000000000900460ff1615610bcb57600080fd5b600454610bde908363ffffffff61134116565b600455600160a060020a038316600090815260036020526040902054610c0a908363ffffffff61134116565b600160a060020a038416600081815260036020908152604091829020939093558051858152905191927f0f6798a560793a54c3bcfe86a93cde1e73087d944c0ea20544137d412139688592918290030190a2604080518381529051600160a060020a038516916000916000805160206119f38339815191529181900360200190a350600192915050565b610c9e3382611591565b50565b60408051808201909152600181527f3100000000000000000000000000000000000000000000000000000000000000602082015281565b336000908152600560209081526040808320600160a060020a0386168452909152812054808310610d2c57336000908152600560209081526040808320600160a060020a0388168452909152812055610d61565b610d3c818463ffffffff61132f16565b336000908152600560209081526040808320600160a060020a03891684529091529020555b336000818152600560209081526040808320600160a060020a038916808552908352928190205481519081529051929392600080516020611a13833981519152929181900390910190a35060019392505050565b600654600160a060020a03163314610dcc57600080fd5b80600160a060020a0381161515610de257600080fd5b610dec8383611680565b505050565b600160a060020a031660009081526003602052604090205490565b600654600160a060020a031633146101b657600080fd5b600754600160a060020a0390811691161490565b6000806000fd5b60096020526000908152604090205481565b600260036000909192565b600654600160a060020a031681565b600080600160a060020a038a161515610e8257600080fd5b600160a060020a0389161515610e9757600080fd5b861580610eab575086610ea8611354565b11155b1515610eb657600080fd5b600854604080517fea2aa0a1be11a07ed86d755c93467f4f82362b452371d1ba94d1715123511acb602080830191909152600160a060020a03808f16838501528d166060830152608082018c905260a082018b905289151560c0808401919091528351808403909101815260e090920192839052815191929182918401908083835b60208310610f575780518252601f199092019160209182019101610f38565b51815160209384036101000a6000190180199092169116179052604080519290940182900382207f190100000000000000000000000000000000000000000000000000000000000083830152602283019790975260428083019790975283518083039097018752606290910192839052855192945084935085019190508083835b60208310610ff75780518252601f199092019160209182019101610fd8565b51815160209384036101000a600019018019909216911617905260408051929094018290038220600080845283830180875282905260ff8d1684870152606084018c9052608084018b905294519098506001965060a080840196509194601f19820194509281900390910191865af1158015611077573d6000803e3d6000fd5b50505060206040510351600160a060020a03168a600160a060020a03161415156110a057600080fd5b600160a060020a038a16600090815260096020526040902080546001810190915588146110cc57600080fd5b856110d85760006110dc565b6000195b600160a060020a03808c166000908152600560209081526040808320938e16835292905220819055905085611112576000611114565b865b600160a060020a03808c166000818152600a60209081526040808320948f1680845294825291829020949094558051858152905192939192600080516020611a13833981519152929181900390910190a350505050505050505050565b60018054604080516020600284861615610100026000190190941693909304601f810184900484028201840190925281815292918301828280156107a75780601f1061077c576101008083540402835291602001916107a7565b6000610a718383610cd8565b60006111e383836113ef565b15156111ee57600080fd5b6111f9338484611358565b50600192915050565b610dec338383610863565b611218838383610863565b50505050565b600754600160a060020a031690565b336000908152600560209081526040808320600160a060020a0386168452909152812054611261908363ffffffff61134116565b336000818152600560209081526040808320600160a060020a038916808552908352928190208590558051948552519193600080516020611a13833981519152929081900390910190a350600192915050565b600160a060020a03918216600090815260056020908152604080832093909416825291909152205490565b610dec823383610863565b600654600160a060020a0316331461130157600080fd5b610c9e816116ac565b600a60209081526000928352604080842090915290825290205481565b6000903b1190565b60008282111561133b57fe5b50900390565b8181018281101561134e57fe5b92915050565b4290565b61136182611327565b80156113885750604080516000815260208101909152611386908490849084906113fb565b155b15610dec5761139682610e23565b156113a057600080fd5b60408051600160a060020a0380861682528416602082015280820183905290517f11249f0fc79fc134a15a10d1da8291b79515bf987e036ced05b9ec119614070b9181900360600190a1505050565b6000610a71838361172a565b600083600160a060020a031663a4c0ed367c0100000000000000000000000000000000000000000000000000000000028685856040516024018084600160a060020a0316600160a060020a0316815260200183815260200180602001828103825283818151815260200191508051906020019080838360005b8381101561148c578181015183820152602001611474565b50505050905090810190601f1680156114b95780820380516001836020036101000a031916815260200191505b5060408051601f198184030181529181526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff00000000000000000000000000000000000000000000000000000000909916989098178852518151919790965086955093509150819050838360005b8381101561154757818101518382015260200161152f565b50505050905090810190601f1680156115745780820380516001836020036101000a031916815260200191505b509150506000604051808303816000865af1979650505050505050565b600160a060020a0382166000908152600360205260409020548111156115b657600080fd5b600160a060020a0382166000908152600360205260409020546115df908263ffffffff61132f16565b600160a060020a03831660009081526003602052604090205560045461160b908263ffffffff61132f16565b600455604080518281529051600160a060020a038416917fcc16f5dbb4873280815c1ee09dbd06736cffcc184412cf7a71a0fdb75d397ca5919081900360200190a2604080518281529051600091600160a060020a038516916000805160206119f38339815191529181900360200190a35050565b600160a060020a038216151561169e57611699816117f9565b6116a8565b6116a88282611805565b5050565b600160a060020a03811615156116c157600080fd5b600654604051600160a060020a038084169216907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a36006805473ffffffffffffffffffffffffffffffffffffffff1916600160a060020a0392909216919091179055565b3360009081526003602052604081205482111561174657600080fd5b600160a060020a038316151561175b57600080fd5b3360009081526003602052604090205461177b908363ffffffff61132f16565b3360009081526003602052604080822092909255600160a060020a038516815220546117ad908363ffffffff61134116565b600160a060020a0384166000818152600360209081526040918290209390935580518581529051919233926000805160206119f38339815191529281900390910190a350600192915050565b30316116a882826118a3565b604080517f70a0823100000000000000000000000000000000000000000000000000000000815230600482015290518391600091600160a060020a038416916370a0823191602480830192602092919082900301818787803b15801561186a57600080fd5b505af115801561187e573d6000803e3d6000fd5b505050506040513d602081101561189457600080fd5b5051905061121884848361190b565b604051600160a060020a0383169082156108fc029083906000818181858888f1935050505015156116a85780826118d86119c2565b600160a060020a039091168152604051908190036020019082f080158015611904573d6000803e3d6000fd5b5050505050565b60408051600160a060020a03841660248201526044808201849052825180830390910181526064909101909152602081810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fa9059cbb000000000000000000000000000000000000000000000000000000001781528251606093600093909290918491828a5af160005193508392508080156101b65750506000835111156119ba578115156119ba57600080fd5b505050505050565b6040516021806119d2833901905600608060405260405160208060218339810160405251600160a060020a038116ff00ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925a165627a7a72305820da715ff88e0288dbae664bb8af2f148726bdc8c499fecf88153280d022031e780029")
newContractCode := hexutility.FromHex("0x6080604052600436106101b35763ffffffff60e060020a60003504166305d2035b81146101b857806306fdde03146101e1578063095ea7b31461026b5780630b26cf661461028f57806318160ddd146102b257806323b872dd146102d957806330adf81f14610303578063313ce567146103185780633644e5151461034357806339509351146103585780634000aea01461037c57806340c10f19146103ad57806342966c68146103d157806354fd4d50146103e957806366188463146103fe57806369ffa08a1461042257806370a0823114610449578063715018a61461046a578063726600ce1461047f5780637d64bcb4146104a05780637ecebe00146104b5578063859ba28c146104d65780638da5cb5b146105175780638fcbaf0c1461054857806395d89b4114610586578063a457c2d71461059b578063a9059cbb146105bf578063b753a98c146105e3578063bb35783b14610607578063c6a1dedf14610631578063cd59658314610646578063d505accf1461065b578063d73dd62314610694578063dd62ed3e146106b8578063f2d5d56b146106df578063f2fde38b14610703578063ff9e884d14610724575b600080fd5b3480156101c457600080fd5b506101cd61074b565b604080519115158252519081900360200190f35b3480156101ed57600080fd5b506101f661076c565b6040805160208082528351818301528351919283929083019185019080838360005b83811015610230578181015183820152602001610218565b50505050905090810190601f16801561025d5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561027757600080fd5b506101cd600160a060020a03600435166024356107fa565b34801561029b57600080fd5b506102b0600160a060020a0360043516610810565b005b3480156102be57600080fd5b506102c761086a565b60408051918252519081900360200190f35b3480156102e557600080fd5b506101cd600160a060020a0360043581169060243516604435610870565b34801561030f57600080fd5b506102c7610a38565b34801561032457600080fd5b5061032d610a5c565b6040805160ff9092168252519081900360200190f35b34801561034f57600080fd5b506102c7610a65565b34801561036457600080fd5b506101cd600160a060020a0360043516602435610a6b565b34801561038857600080fd5b506101cd60048035600160a060020a0316906024803591604435918201910135610aac565b3480156103b957600080fd5b506101cd600160a060020a0360043516602435610bbd565b3480156103dd57600080fd5b506102b0600435610cc8565b3480156103f557600080fd5b506101f6610cd5565b34801561040a57600080fd5b506101cd600160a060020a0360043516602435610d0c565b34801561042e57600080fd5b506102b0600160a060020a0360043581169060243516610de9565b34801561045557600080fd5b506102c7600160a060020a0360043516610e0e565b34801561047657600080fd5b506102b0610e29565b34801561048b57600080fd5b506101cd600160a060020a0360043516610e40565b3480156104ac57600080fd5b506101cd610e54565b3480156104c157600080fd5b506102c7600160a060020a0360043516610e5b565b3480156104e257600080fd5b506104eb610e6d565b6040805167ffffffffffffffff9485168152928416602084015292168183015290519081900360600190f35b34801561052357600080fd5b5061052c610e78565b60408051600160a060020a039092168252519081900360200190f35b34801561055457600080fd5b506102b0600160a060020a0360043581169060243516604435606435608435151560ff60a4351660c43560e435610e87565b34801561059257600080fd5b506101f6610fc5565b3480156105a757600080fd5b506101cd600160a060020a036004351660243561101f565b3480156105cb57600080fd5b506101cd600160a060020a0360043516602435611032565b3480156105ef57600080fd5b506102b0600160a060020a0360043516602435611054565b34801561061357600080fd5b506102b0600160a060020a0360043581169060243516604435611064565b34801561063d57600080fd5b506102c7611075565b34801561065257600080fd5b5061052c611099565b34801561066757600080fd5b506102b0600160a060020a036004358116906024351660443560643560ff6084351660a43560c4356110a8565b3480156106a057600080fd5b506101cd600160a060020a0360043516602435611184565b3480156106c457600080fd5b506102c7600160a060020a036004358116906024351661120b565b3480156106eb57600080fd5b506102b0600160a060020a0360043516602435611236565b34801561070f57600080fd5b506102b0600160a060020a0360043516611241565b34801561073057600080fd5b506102c7600160a060020a0360043581169060243516611261565b60065474010000000000000000000000000000000000000000900460ff1681565b6000805460408051602060026001851615610100026000190190941693909304601f810184900484028201840190925281815292918301828280156107f25780601f106107c7576101008083540402835291602001916107f2565b820191906000526020600020905b8154815290600101906020018083116107d557829003601f168201915b505050505081565b600061080733848461127e565b50600192915050565b600654600160a060020a0316331461082757600080fd5b610830816112c0565b151561083b57600080fd5b6007805473ffffffffffffffffffffffffffffffffffffffff1916600160a060020a0392909216919091179055565b60045490565b600080600160a060020a038516151561088857600080fd5b600160a060020a038416151561089d57600080fd5b600160a060020a0385166000908152600360205260409020546108c6908463ffffffff6112c816565b600160a060020a0380871660009081526003602052604080822093909355908616815220546108fb908463ffffffff6112da16565b600160a060020a038086166000818152600360209081526040918290209490945580518781529051919392891692600080516020611d7283398151915292918290030190a3600160a060020a0385163314610a225761095a853361120b565b905060001981146109c457610975818463ffffffff6112c816565b600160a060020a038616600081815260056020908152604080832033808552908352928190208590558051948552519193600080516020611d92833981519152929081900390910190a3610a22565b600160a060020a0385166000908152600a602090815260408083203384529091529020541580610a175750600160a060020a0385166000908152600a602090815260408083203384529091529020544211155b1515610a2257600080fd5b610a2d8585856112ed565b506001949350505050565b7f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c981565b60025460ff1681565b60085481565b336000818152600560209081526040808320600160a060020a03871684529091528120549091610807918590610aa7908663ffffffff6112da16565b61127e565b600084600160a060020a03811615801590610ad05750600160a060020a0381163014155b1515610adb57600080fd5b610ae58686611324565b1515610af057600080fd5b85600160a060020a031633600160a060020a03167fe19260aff97b920c7df27010903aeb9c8d2be5d310a2c67824cf3f15396e4c16878787604051808481526020018060200182810382528484828181526020019250808284376040519201829003965090945050505050a3610b65866112c0565b15610bb157610ba633878787878080601f01602080910402602001604051908101604052809392919081815260200183838082843750611330945050505050565b1515610bb157600080fd5b50600195945050505050565b600654600090600160a060020a03163314610bd757600080fd5b60065474010000000000000000000000000000000000000000900460ff1615610bff57600080fd5b600454610c12908363ffffffff6112da16565b600455600160a060020a038316600090815260036020526040902054610c3e908363ffffffff6112da16565b600160a060020a038416600081815260036020908152604091829020939093558051858152905191927f0f6798a560793a54c3bcfe86a93cde1e73087d944c0ea20544137d412139688592918290030190a2604080518381529051600160a060020a03851691600091600080516020611d728339815191529181900360200190a350600192915050565b610cd233826114ad565b50565b60408051808201909152600181527f3100000000000000000000000000000000000000000000000000000000000000602082015281565b336000908152600560209081526040808320600160a060020a0386168452909152812054808310610d6057336000908152600560209081526040808320600160a060020a0388168452909152812055610d95565b610d70818463ffffffff6112c816565b336000908152600560209081526040808320600160a060020a03891684529091529020555b336000818152600560209081526040808320600160a060020a038916808552908352928190205481519081529051929392600080516020611d92833981519152929181900390910190a35060019392505050565b600654600160a060020a03163314610e0057600080fd5b610e0a828261159c565b5050565b600160a060020a031660009081526003602052604090205490565b600654600160a060020a031633146101b357600080fd5b600754600160a060020a0390811691161490565b6000806000fd5b60096020526000908152604090205481565b600260056000909192565b600654600160a060020a031681565b600080861580610e975750864211155b1515610ea257600080fd5b604080517fea2aa0a1be11a07ed86d755c93467f4f82362b452371d1ba94d1715123511acb6020820152600160a060020a03808d16828401528b166060820152608081018a905260a0810189905287151560c0808301919091528251808303909101815260e0909101909152610f17906115da565b9150610f25828686866116e1565b600160a060020a038b8116911614610f3c57600080fd5b600160a060020a038a1660009081526009602052604090208054600181019091558814610f6857600080fd5b85610f74576000610f78565b6000195b905085610f86576000610f88565b865b600160a060020a03808c166000908152600a60209081526040808320938e1683529290522055610fb98a8a836118e3565b50505050505050505050565b60018054604080516020600284861615610100026000190190941693909304601f810184900484028201840190925281815292918301828280156107f25780601f106107c7576101008083540402835291602001916107f2565b600061102b8383610d0c565b9392505050565b600061103e8383611324565b151561104957600080fd5b6108073384846112ed565b61105f338383610870565b505050565b61106f838383610870565b50505050565b7fea2aa0a1be11a07ed86d755c93467f4f82362b452371d1ba94d1715123511acb81565b600754600160a060020a031690565b600080428610156110b857600080fd5b600160a060020a03808a1660008181526009602090815260409182902080546001810190915582517f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c99281019290925281830193909352928b166060840152608083018a905260a0830182905260c08084018a90528151808503909101815260e090930190529250611149906115da565b9050611157818686866116e1565b600160a060020a038a811691161461116e57600080fd5b61117989898961127e565b505050505050505050565b336000908152600560209081526040808320600160a060020a03861684529091528120546111b8908363ffffffff6112da16565b336000818152600560209081526040808320600160a060020a038916808552908352928190208590558051948552519193600080516020611d92833981519152929081900390910190a350600192915050565b600160a060020a03918216600090815260056020908152604080832093909416825291909152205490565b61105f823383610870565b600654600160a060020a0316331461125857600080fd5b610cd281611a3e565b600a60209081526000928352604080842090915290825290205481565b6112898383836118e3565b60001981141561105f57600160a060020a038084166000908152600a60209081526040808320938616835292905290812055505050565b6000903b1190565b6000828211156112d457fe5b50900390565b818101828110156112e757fe5b92915050565b6112f682610e40565b1561105f5760408051600081526020810190915261131990849084908490611330565b151561105f57600080fd5b600061102b8383611abc565b600083600160a060020a031663a4c0ed3660e060020a028685856040516024018084600160a060020a0316600160a060020a0316815260200183815260200180602001828103825283818151815260200191508051906020019080838360005b838110156113a8578181015183820152602001611390565b50505050905090810190601f1680156113d55780820380516001836020036101000a031916815260200191505b5060408051601f198184030181529181526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff00000000000000000000000000000000000000000000000000000000909916989098178852518151919790965086955093509150819050838360005b8381101561146357818101518382015260200161144b565b50505050905090810190601f1680156114905780820380516001836020036101000a031916815260200191505b509150506000604051808303816000865af1979650505050505050565b600160a060020a0382166000908152600360205260409020548111156114d257600080fd5b600160a060020a0382166000908152600360205260409020546114fb908263ffffffff6112c816565b600160a060020a038316600090815260036020526040902055600454611527908263ffffffff6112c816565b600455604080518281529051600160a060020a038416917fcc16f5dbb4873280815c1ee09dbd06736cffcc184412cf7a71a0fdb75d397ca5919081900360200190a2604080518281529051600091600160a060020a03851691600080516020611d728339815191529181900360200190a35050565b80600160a060020a03811615156115b257600080fd5b600160a060020a03831615156115d0576115cb82611b8b565b61105f565b61105f8383611b97565b6000600854826040518082805190602001908083835b6020831061160f5780518252601f1990920191602091820191016115f0565b51815160209384036101000a6000190180199092169116179052604080519290940182900382207f190100000000000000000000000000000000000000000000000000000000000083830152602283019790975260428083019790975283518083039097018752606290910192839052855192945084935085019190508083835b602083106116af5780518252601f199092019160209182019101611690565b5181516020939093036101000a6000190180199091169216919091179052604051920182900390912095945050505050565b6000808460ff16601b14806116f957508460ff16601c145b1515611775576040805160e560020a62461bcd02815260206004820152602260248201527f45434453413a20696e76616c6964207369676e6174757265202776272076616c60448201527f7565000000000000000000000000000000000000000000000000000000000000606482015290519081900360840190fd5b7f7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a0831115611813576040805160e560020a62461bcd02815260206004820152602260248201527f45434453413a20696e76616c6964207369676e6174757265202773272076616c60448201527f7565000000000000000000000000000000000000000000000000000000000000606482015290519081900360840190fd5b60408051600080825260208083018085528a905260ff8916838501526060830188905260808301879052925160019360a0808501949193601f19840193928390039091019190865af115801561186d573d6000803e3d6000fd5b5050604051601f190151915050600160a060020a03811615156118da576040805160e560020a62461bcd02815260206004820152601860248201527f45434453413a20696e76616c6964207369676e61747572650000000000000000604482015290519081900360640190fd5b95945050505050565b600160a060020a0383161515611968576040805160e560020a62461bcd028152602060048201526024808201527f45524332303a20617070726f76652066726f6d20746865207a65726f2061646460448201527f7265737300000000000000000000000000000000000000000000000000000000606482015290519081900360840190fd5b600160a060020a03821615156119ee576040805160e560020a62461bcd02815260206004820152602260248201527f45524332303a20617070726f766520746f20746865207a65726f20616464726560448201527f7373000000000000000000000000000000000000000000000000000000000000606482015290519081900360840190fd5b600160a060020a0380841660008181526005602090815260408083209487168084529482529182902085905581518581529151600080516020611d928339815191529281900390910190a3505050565b600160a060020a0381161515611a5357600080fd5b600654604051600160a060020a038084169216907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a36006805473ffffffffffffffffffffffffffffffffffffffff1916600160a060020a0392909216919091179055565b33600090815260036020526040812054821115611ad857600080fd5b600160a060020a0383161515611aed57600080fd5b33600090815260036020526040902054611b0d908363ffffffff6112c816565b3360009081526003602052604080822092909255600160a060020a03851681522054611b3f908363ffffffff6112da16565b600160a060020a038416600081815260036020908152604091829020939093558051858152905191923392600080516020611d728339815191529281900390910190a350600192915050565b3031610e0a8282611c44565b604080517f70a0823100000000000000000000000000000000000000000000000000000000815230600482015290518391600091600160a060020a038416916370a0823191602480830192602092919082900301818787803b158015611bfc57600080fd5b505af1158015611c10573d6000803e3d6000fd5b505050506040513d6020811015611c2657600080fd5b5051905061106f600160a060020a038516848363ffffffff611cac16565b604051600160a060020a0383169082156108fc029083906000818181858888f193505050501515610e0a578082611c79611d41565b600160a060020a039091168152604051908190036020019082f080158015611ca5573d6000803e3d6000fd5b5050505050565b82600160a060020a031663a9059cbb83836040518363ffffffff1660e060020a0281526004018083600160a060020a0316600160a060020a0316815260200182815260200192505050600060405180830381600087803b158015611d0f57600080fd5b505af1158015611d23573d6000803e3d6000fd5b505050503d1561105f5760206000803e600051151561105f57600080fd5b604051602180611d51833901905600608060405260405160208060218339810160405251600160a060020a038116ff00ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925a165627a7a72305820b96bb0733a3e45fdddafa592f51114d0cf16cad047ad60b9b91ae91eb772c6940029")
- codeHash, err := common.HashData(oldContractCode)
+ codeHash, err := libcommon.HashData(oldContractCode)
if err != nil {
panic("could not get code hash from old gnosis token contract")
}
@@ -64,7 +73,7 @@ func addGnosisSpecialCase() {
CodeHash: codeHash,
})
- codeHash, err = common.HashData(newContractCode)
+ codeHash, err = libcommon.HashData(newContractCode)
if err != nil {
panic("could not get code hash from new gnosis token contract")
}
@@ -75,19 +84,3 @@ func addGnosisSpecialCase() {
byChain[address] = list
}
-
-func addCodeRecords(upgrade *systemcontracts.Upgrade, blockNum uint64, byChain map[libcommon.Address][]libcommon.CodeRecord) {
- for _, config := range upgrade.Configs {
- list := byChain[config.ContractAddr]
- code, err := hex.DecodeString(config.Code)
- if err != nil {
- panic(fmt.Errorf("failed to decode system contract code: %s", err.Error()))
- }
- codeHash, err := common.HashData(code)
- if err != nil {
- panic(fmt.Errorf("failed to hash system contract code: %s", err.Error()))
- }
- list = append(list, libcommon.CodeRecord{BlockNumber: blockNum, CodeHash: codeHash})
- byChain[config.ContractAddr] = list
- }
-}
diff --git a/core/systemcontracts/const.go b/core/systemcontracts/const.go
deleted file mode 100644
index ab3315e726f..00000000000
--- a/core/systemcontracts/const.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package systemcontracts
-
-import (
- libcommon "github.com/ledgerwatch/erigon-lib/common"
-)
-
-var (
- // genesis contracts
- ValidatorContract = libcommon.HexToAddress("0x0000000000000000000000000000000000001000")
- SlashContract = libcommon.HexToAddress("0x0000000000000000000000000000000000001001")
- SystemRewardContract = libcommon.HexToAddress("0x0000000000000000000000000000000000001002")
- LightClientContract = libcommon.HexToAddress("0x0000000000000000000000000000000000001003")
- TokenHubContract = libcommon.HexToAddress("0x0000000000000000000000000000000000001004")
- RelayerIncentivizeContract = libcommon.HexToAddress("0x0000000000000000000000000000000000001005")
- RelayerHubContract = libcommon.HexToAddress("0x0000000000000000000000000000000000001006")
- GovHubContract = libcommon.HexToAddress("0x0000000000000000000000000000000000001007")
- TokenManagerContract = libcommon.HexToAddress("0x0000000000000000000000000000000000001008")
- MaticTokenContract = libcommon.HexToAddress("0x0000000000000000000000000000000000001010")
- CrossChainContract = libcommon.HexToAddress("0x0000000000000000000000000000000000002000")
- StakingContract = libcommon.HexToAddress("0x0000000000000000000000000000000000002001")
-)
diff --git a/core/systemcontracts/upgrade.go b/core/systemcontracts/upgrade.go
index 677aea242fe..e9d6d005539 100644
--- a/core/systemcontracts/upgrade.go
+++ b/core/systemcontracts/upgrade.go
@@ -1,38 +1,10 @@
package systemcontracts
import (
- "encoding/hex"
- "fmt"
- "math/big"
-
- "github.com/ledgerwatch/erigon-lib/chain"
libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon/core/vm/evmtypes"
- "github.com/ledgerwatch/log/v3"
-
- "github.com/ledgerwatch/erigon/params/networkname"
)
-type UpgradeConfig struct {
- BeforeUpgrade upgradeHook
- AfterUpgrade upgradeHook
- ContractAddr libcommon.Address
- CommitUrl string
- Code string
-}
-
-type Upgrade struct {
- UpgradeName string
- Configs []*UpgradeConfig
-}
-
-type upgradeHook func(blockNumber *big.Int, contractAddr libcommon.Address, statedb evmtypes.IntraBlockState) error
-
var (
- //upgrade config
-
- CalcuttaUpgrade = make(map[string]*Upgrade)
-
// SystemContractCodeLookup is used to address a flaw in the upgrade logic of the system contracts. Since they are updated directly, without first being self-destructed
// and then re-created, the usual incarnation logic does not get activated, and all historical records of the code of these contracts are retrieved as the most
// recent version. This problem will not exist in erigon3, but until then, a workaround will be used to access code of such contracts through this structure
@@ -40,88 +12,3 @@ var (
// to be used in binary search to determine correct historical code
SystemContractCodeLookup = map[string]map[libcommon.Address][]libcommon.CodeRecord{}
)
-
-func init() {
- CalcuttaUpgrade[networkname.BorMainnetChainName] = &Upgrade{
- UpgradeName: "calcutta",
- Configs: []*UpgradeConfig{
- {
- ContractAddr: MaticTokenContract,
- Code: "60806040526004361061019c5760003560e01c806377d32e94116100ec578063acd06cb31161008a578063e306f77911610064578063e306f77914610a7b578063e614d0d614610aa6578063f2fde38b14610ad1578063fc0c546a14610b225761019c565b8063acd06cb31461097a578063b789543c146109cd578063cc79f97b14610a505761019c565b80639025e64c116100c65780639025e64c146107c957806395d89b4114610859578063a9059cbb146108e9578063abceeba21461094f5761019c565b806377d32e94146106315780638da5cb5b146107435780638f32d59b1461079a5761019c565b806347e7ef24116101595780637019d41a116101335780637019d41a1461053357806370a082311461058a578063715018a6146105ef578063771282f6146106065761019c565b806347e7ef2414610410578063485cc9551461046b57806360f96a8f146104dc5761019c565b806306fdde03146101a15780631499c5921461023157806318160ddd1461028257806319d27d9c146102ad5780632e1a7d4d146103b1578063313ce567146103df575b600080fd5b3480156101ad57600080fd5b506101b6610b79565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101f65780820151818401526020810190506101db565b50505050905090810190601f1680156102235780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561023d57600080fd5b506102806004803603602081101561025457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610bb6565b005b34801561028e57600080fd5b50610297610c24565b6040518082815260200191505060405180910390f35b3480156102b957600080fd5b5061036f600480360360a08110156102d057600080fd5b81019080803590602001906401000000008111156102ed57600080fd5b8201836020820111156102ff57600080fd5b8035906020019184600183028401116401000000008311171561032157600080fd5b9091929391929390803590602001909291908035906020019092919080359060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610c3a565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6103dd600480360360208110156103c757600080fd5b8101908080359060200190929190505050610caa565b005b3480156103eb57600080fd5b506103f4610dfc565b604051808260ff1660ff16815260200191505060405180910390f35b34801561041c57600080fd5b506104696004803603604081101561043357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610e05565b005b34801561047757600080fd5b506104da6004803603604081101561048e57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610fc1565b005b3480156104e857600080fd5b506104f1611090565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561053f57600080fd5b506105486110b6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561059657600080fd5b506105d9600480360360208110156105ad57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506110dc565b6040518082815260200191505060405180910390f35b3480156105fb57600080fd5b506106046110fd565b005b34801561061257600080fd5b5061061b6111cd565b6040518082815260200191505060405180910390f35b34801561063d57600080fd5b506107016004803603604081101561065457600080fd5b81019080803590602001909291908035906020019064010000000081111561067b57600080fd5b82018360208201111561068d57600080fd5b803590602001918460018302840111640100000000831117156106af57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f8201169050808301925050505050505091929192905050506111d3565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561074f57600080fd5b50610758611358565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b3480156107a657600080fd5b506107af611381565b604051808215151515815260200191505060405180910390f35b3480156107d557600080fd5b506107de6113d8565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561081e578082015181840152602081019050610803565b50505050905090810190601f16801561084b5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561086557600080fd5b5061086e611411565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156108ae578082015181840152602081019050610893565b50505050905090810190601f1680156108db5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b610935600480360360408110156108ff57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291908035906020019092919050505061144e565b604051808215151515815260200191505060405180910390f35b34801561095b57600080fd5b50610964611474565b6040518082815260200191505060405180910390f35b34801561098657600080fd5b506109b36004803603602081101561099d57600080fd5b8101908080359060200190929190505050611501565b604051808215151515815260200191505060405180910390f35b3480156109d957600080fd5b50610a3a600480360360808110156109f057600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291908035906020019092919080359060200190929190505050611521565b6040518082815260200191505060405180910390f35b348015610a5c57600080fd5b50610a65611541565b6040518082815260200191505060405180910390f35b348015610a8757600080fd5b50610a90611546565b6040518082815260200191505060405180910390f35b348015610ab257600080fd5b50610abb61154c565b6040518082815260200191505060405180910390f35b348015610add57600080fd5b50610b2060048036036020811015610af457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506115d9565b005b348015610b2e57600080fd5b50610b376115f6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b60606040518060400160405280600b81526020017f4d6174696320546f6b656e000000000000000000000000000000000000000000815250905090565b6040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260108152602001807f44697361626c656420666561747572650000000000000000000000000000000081525060200191505060405180910390fd5b6000601260ff16600a0a6402540be40002905090565b60006040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260108152602001807f44697361626c656420666561747572650000000000000000000000000000000081525060200191505060405180910390fd5b60003390506000610cba826110dc565b9050610cd18360065461161c90919063ffffffff16565b600681905550600083118015610ce657508234145b610d58576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f496e73756666696369656e7420616d6f756e740000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167febff2602b3f468259e1e99f613fed6691f3a6526effe6ef3e768ba7ae7a36c4f8584610dd4876110dc565b60405180848152602001838152602001828152602001935050505060405180910390a3505050565b60006012905090565b610e0d611381565b610e1657600080fd5b600081118015610e535750600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b610ea8576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611da76023913960400191505060405180910390fd5b6000610eb3836110dc565b905060008390508073ffffffffffffffffffffffffffffffffffffffff166108fc849081150290604051600060405180830381858888f19350505050158015610f00573d6000803e3d6000fd5b50610f168360065461163c90919063ffffffff16565b6006819055508373ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f4e2ca0515ed1aef1395f66b5303bb5d6f1bf9d61a353fa53f73f8ac9973fa9f68585610f98896110dc565b60405180848152602001838152602001828152602001935050505060405180910390a350505050565b600760009054906101000a900460ff1615611027576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611d846023913960400191505060405180910390fd5b6001600760006101000a81548160ff02191690831515021790555080600260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555061108c8261165b565b5050565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600460009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008173ffffffffffffffffffffffffffffffffffffffff16319050919050565b611105611381565b61110e57600080fd5b600073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a360008060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550565b60065481565b60008060008060418551146111ee5760009350505050611352565b602085015192506040850151915060ff6041860151169050601b8160ff16101561121957601b810190505b601b8160ff16141580156112315750601c8160ff1614155b156112425760009350505050611352565b60018682858560405160008152602001604052604051808581526020018460ff1660ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa15801561129f573d6000803e3d6000fd5b505050602060405103519350600073ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff16141561134e576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f4572726f7220696e2065637265636f766572000000000000000000000000000081525060200191505060405180910390fd5b5050505b92915050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614905090565b6040518060400160405280600181526020017f890000000000000000000000000000000000000000000000000000000000000081525081565b60606040518060400160405280600581526020017f4d41544943000000000000000000000000000000000000000000000000000000815250905090565b6000813414611460576000905061146e565b61146b338484611753565b90505b92915050565b6040518060800160405280605b8152602001611e1c605b91396040516020018082805190602001908083835b602083106114c357805182526020820191506020810190506020830392506114a0565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b60056020528060005260406000206000915054906101000a900460ff1681565b600061153761153286868686611b10565b611be6565b9050949350505050565b608981565b60015481565b604051806080016040528060528152602001611dca605291396040516020018082805190602001908083835b6020831061159b5780518252602082019150602081019050602083039250611578565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b6115e1611381565b6115ea57600080fd5b6115f38161165b565b50565b600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008282111561162b57600080fd5b600082840390508091505092915050565b60008082840190508381101561165157600080fd5b8091505092915050565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16141561169557600080fd5b8073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a3806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b6000803073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156117d357600080fd5b505afa1580156117e7573d6000803e3d6000fd5b505050506040513d60208110156117fd57600080fd5b8101908080519060200190929190505050905060003073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b15801561188f57600080fd5b505afa1580156118a3573d6000803e3d6000fd5b505050506040513d60208110156118b957600080fd5b810190808051906020019092919050505090506118d7868686611c30565b8473ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167fe6497e3ee548a3372136af2fcb0696db31fc6cf20260707645068bd3fe97f3c48786863073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156119df57600080fd5b505afa1580156119f3573d6000803e3d6000fd5b505050506040513d6020811015611a0957600080fd5b81019080805190602001909291905050503073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611a9757600080fd5b505afa158015611aab573d6000803e3d6000fd5b505050506040513d6020811015611ac157600080fd5b8101908080519060200190929190505050604051808681526020018581526020018481526020018381526020018281526020019550505050505060405180910390a46001925050509392505050565b6000806040518060800160405280605b8152602001611e1c605b91396040516020018082805190602001908083835b60208310611b625780518252602082019150602081019050602083039250611b3f565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120905060405181815273ffffffffffffffffffffffffffffffffffffffff8716602082015285604082015284606082015283608082015260a0812092505081915050949350505050565b60008060015490506040517f190100000000000000000000000000000000000000000000000000000000000081528160028201528360228201526042812092505081915050919050565b3073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff161415611cd2576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f63616e27742073656e6420746f204d524332300000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050158015611d18573d6000803e3d6000fd5b508173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a350505056fe54686520636f6e747261637420697320616c726561647920696e697469616c697a6564496e73756666696369656e7420616d6f756e74206f7220696e76616c69642075736572454950373132446f6d61696e28737472696e67206e616d652c737472696e672076657273696f6e2c75696e7432353620636861696e49642c6164647265737320766572696679696e67436f6e747261637429546f6b656e5472616e736665724f726465722861646472657373207370656e6465722c75696e7432353620746f6b656e49644f72416d6f756e742c6279746573333220646174612c75696e743235362065787069726174696f6e29a265627a7a72315820a4a6f71a98ac3fc613c3a8f1e2e11b9eb9b6b39f125f7d9508916c2b8fb02c7164736f6c63430005100032",
- },
- },
- }
-
- CalcuttaUpgrade[networkname.MumbaiChainName] = &Upgrade{
- UpgradeName: "calcutta",
- Configs: []*UpgradeConfig{
- {
- ContractAddr: MaticTokenContract,
- Code: "60806040526004361061019c5760003560e01c806377d32e94116100ec578063acd06cb31161008a578063e306f77911610064578063e306f77914610a7b578063e614d0d614610aa6578063f2fde38b14610ad1578063fc0c546a14610b225761019c565b8063acd06cb31461097a578063b789543c146109cd578063cc79f97b14610a505761019c565b80639025e64c116100c65780639025e64c146107c957806395d89b4114610859578063a9059cbb146108e9578063abceeba21461094f5761019c565b806377d32e94146106315780638da5cb5b146107435780638f32d59b1461079a5761019c565b806347e7ef24116101595780637019d41a116101335780637019d41a1461053357806370a082311461058a578063715018a6146105ef578063771282f6146106065761019c565b806347e7ef2414610410578063485cc9551461046b57806360f96a8f146104dc5761019c565b806306fdde03146101a15780631499c5921461023157806318160ddd1461028257806319d27d9c146102ad5780632e1a7d4d146103b1578063313ce567146103df575b600080fd5b3480156101ad57600080fd5b506101b6610b79565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101f65780820151818401526020810190506101db565b50505050905090810190601f1680156102235780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561023d57600080fd5b506102806004803603602081101561025457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610bb6565b005b34801561028e57600080fd5b50610297610c24565b6040518082815260200191505060405180910390f35b3480156102b957600080fd5b5061036f600480360360a08110156102d057600080fd5b81019080803590602001906401000000008111156102ed57600080fd5b8201836020820111156102ff57600080fd5b8035906020019184600183028401116401000000008311171561032157600080fd5b9091929391929390803590602001909291908035906020019092919080359060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610c3a565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6103dd600480360360208110156103c757600080fd5b8101908080359060200190929190505050610caa565b005b3480156103eb57600080fd5b506103f4610dfc565b604051808260ff1660ff16815260200191505060405180910390f35b34801561041c57600080fd5b506104696004803603604081101561043357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610e05565b005b34801561047757600080fd5b506104da6004803603604081101561048e57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610fc1565b005b3480156104e857600080fd5b506104f1611090565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561053f57600080fd5b506105486110b6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561059657600080fd5b506105d9600480360360208110156105ad57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506110dc565b6040518082815260200191505060405180910390f35b3480156105fb57600080fd5b506106046110fd565b005b34801561061257600080fd5b5061061b6111cd565b6040518082815260200191505060405180910390f35b34801561063d57600080fd5b506107016004803603604081101561065457600080fd5b81019080803590602001909291908035906020019064010000000081111561067b57600080fd5b82018360208201111561068d57600080fd5b803590602001918460018302840111640100000000831117156106af57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f8201169050808301925050505050505091929192905050506111d3565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561074f57600080fd5b50610758611358565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b3480156107a657600080fd5b506107af611381565b604051808215151515815260200191505060405180910390f35b3480156107d557600080fd5b506107de6113d8565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561081e578082015181840152602081019050610803565b50505050905090810190601f16801561084b5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561086557600080fd5b5061086e611411565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156108ae578082015181840152602081019050610893565b50505050905090810190601f1680156108db5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b610935600480360360408110156108ff57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291908035906020019092919050505061144e565b604051808215151515815260200191505060405180910390f35b34801561095b57600080fd5b50610964611474565b6040518082815260200191505060405180910390f35b34801561098657600080fd5b506109b36004803603602081101561099d57600080fd5b8101908080359060200190929190505050611501565b604051808215151515815260200191505060405180910390f35b3480156109d957600080fd5b50610a3a600480360360808110156109f057600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291908035906020019092919080359060200190929190505050611521565b6040518082815260200191505060405180910390f35b348015610a5c57600080fd5b50610a65611541565b6040518082815260200191505060405180910390f35b348015610a8757600080fd5b50610a90611548565b6040518082815260200191505060405180910390f35b348015610ab257600080fd5b50610abb61154e565b6040518082815260200191505060405180910390f35b348015610add57600080fd5b50610b2060048036036020811015610af457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506115db565b005b348015610b2e57600080fd5b50610b376115f8565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b60606040518060400160405280600b81526020017f4d6174696320546f6b656e000000000000000000000000000000000000000000815250905090565b6040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260108152602001807f44697361626c656420666561747572650000000000000000000000000000000081525060200191505060405180910390fd5b6000601260ff16600a0a6402540be40002905090565b60006040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260108152602001807f44697361626c656420666561747572650000000000000000000000000000000081525060200191505060405180910390fd5b60003390506000610cba826110dc565b9050610cd18360065461161e90919063ffffffff16565b600681905550600083118015610ce657508234145b610d58576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f496e73756666696369656e7420616d6f756e740000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167febff2602b3f468259e1e99f613fed6691f3a6526effe6ef3e768ba7ae7a36c4f8584610dd4876110dc565b60405180848152602001838152602001828152602001935050505060405180910390a3505050565b60006012905090565b610e0d611381565b610e1657600080fd5b600081118015610e535750600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b610ea8576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611da96023913960400191505060405180910390fd5b6000610eb3836110dc565b905060008390508073ffffffffffffffffffffffffffffffffffffffff166108fc849081150290604051600060405180830381858888f19350505050158015610f00573d6000803e3d6000fd5b50610f168360065461163e90919063ffffffff16565b6006819055508373ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f4e2ca0515ed1aef1395f66b5303bb5d6f1bf9d61a353fa53f73f8ac9973fa9f68585610f98896110dc565b60405180848152602001838152602001828152602001935050505060405180910390a350505050565b600760009054906101000a900460ff1615611027576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611d866023913960400191505060405180910390fd5b6001600760006101000a81548160ff02191690831515021790555080600260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555061108c8261165d565b5050565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600460009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008173ffffffffffffffffffffffffffffffffffffffff16319050919050565b611105611381565b61110e57600080fd5b600073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a360008060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550565b60065481565b60008060008060418551146111ee5760009350505050611352565b602085015192506040850151915060ff6041860151169050601b8160ff16101561121957601b810190505b601b8160ff16141580156112315750601c8160ff1614155b156112425760009350505050611352565b60018682858560405160008152602001604052604051808581526020018460ff1660ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa15801561129f573d6000803e3d6000fd5b505050602060405103519350600073ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff16141561134e576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f4572726f7220696e2065637265636f766572000000000000000000000000000081525060200191505060405180910390fd5b5050505b92915050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614905090565b6040518060400160405280600381526020017f013881000000000000000000000000000000000000000000000000000000000081525081565b60606040518060400160405280600581526020017f4d41544943000000000000000000000000000000000000000000000000000000815250905090565b6000813414611460576000905061146e565b61146b338484611755565b90505b92915050565b6040518060800160405280605b8152602001611e1e605b91396040516020018082805190602001908083835b602083106114c357805182526020820191506020810190506020830392506114a0565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b60056020528060005260406000206000915054906101000a900460ff1681565b600061153761153286868686611b12565b611be8565b9050949350505050565b6201388181565b60015481565b604051806080016040528060528152602001611dcc605291396040516020018082805190602001908083835b6020831061159d578051825260208201915060208101905060208303925061157a565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b6115e3611381565b6115ec57600080fd5b6115f58161165d565b50565b600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008282111561162d57600080fd5b600082840390508091505092915050565b60008082840190508381101561165357600080fd5b8091505092915050565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16141561169757600080fd5b8073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a3806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b6000803073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156117d557600080fd5b505afa1580156117e9573d6000803e3d6000fd5b505050506040513d60208110156117ff57600080fd5b8101908080519060200190929190505050905060003073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b15801561189157600080fd5b505afa1580156118a5573d6000803e3d6000fd5b505050506040513d60208110156118bb57600080fd5b810190808051906020019092919050505090506118d9868686611c32565b8473ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167fe6497e3ee548a3372136af2fcb0696db31fc6cf20260707645068bd3fe97f3c48786863073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156119e157600080fd5b505afa1580156119f5573d6000803e3d6000fd5b505050506040513d6020811015611a0b57600080fd5b81019080805190602001909291905050503073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611a9957600080fd5b505afa158015611aad573d6000803e3d6000fd5b505050506040513d6020811015611ac357600080fd5b8101908080519060200190929190505050604051808681526020018581526020018481526020018381526020018281526020019550505050505060405180910390a46001925050509392505050565b6000806040518060800160405280605b8152602001611e1e605b91396040516020018082805190602001908083835b60208310611b645780518252602082019150602081019050602083039250611b41565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120905060405181815273ffffffffffffffffffffffffffffffffffffffff8716602082015285604082015284606082015283608082015260a0812092505081915050949350505050565b60008060015490506040517f190100000000000000000000000000000000000000000000000000000000000081528160028201528360228201526042812092505081915050919050565b3073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff161415611cd4576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f63616e27742073656e6420746f204d524332300000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050158015611d1a573d6000803e3d6000fd5b508173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a350505056fe54686520636f6e747261637420697320616c726561647920696e697469616c697a6564496e73756666696369656e7420616d6f756e74206f7220696e76616c69642075736572454950373132446f6d61696e28737472696e67206e616d652c737472696e672076657273696f6e2c75696e7432353620636861696e49642c6164647265737320766572696679696e67436f6e747261637429546f6b656e5472616e736665724f726465722861646472657373207370656e6465722c75696e7432353620746f6b656e49644f72416d6f756e742c6279746573333220646174612c75696e743235362065787069726174696f6e29a265627a7a72315820ccd6c2a9c259832bbb367986ee06cd87af23022681b0cb22311a864b701d939564736f6c63430005100032",
- },
- },
- }
-
- CalcuttaUpgrade[networkname.BorDevnetChainName] = &Upgrade{
- UpgradeName: "calcutta",
- Configs: []*UpgradeConfig{
- {
- ContractAddr: MaticTokenContract,
- Code: "60806040526004361061019c5760003560e01c806377d32e94116100ec578063acd06cb31161008a578063e306f77911610064578063e306f77914610a7b578063e614d0d614610aa6578063f2fde38b14610ad1578063fc0c546a14610b225761019c565b8063acd06cb31461097a578063b789543c146109cd578063cc79f97b14610a505761019c565b80639025e64c116100c65780639025e64c146107c957806395d89b4114610859578063a9059cbb146108e9578063abceeba21461094f5761019c565b806377d32e94146106315780638da5cb5b146107435780638f32d59b1461079a5761019c565b806347e7ef24116101595780637019d41a116101335780637019d41a1461053357806370a082311461058a578063715018a6146105ef578063771282f6146106065761019c565b806347e7ef2414610410578063485cc9551461046b57806360f96a8f146104dc5761019c565b806306fdde03146101a15780631499c5921461023157806318160ddd1461028257806319d27d9c146102ad5780632e1a7d4d146103b1578063313ce567146103df575b600080fd5b3480156101ad57600080fd5b506101b6610b79565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101f65780820151818401526020810190506101db565b50505050905090810190601f1680156102235780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561023d57600080fd5b506102806004803603602081101561025457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610bb6565b005b34801561028e57600080fd5b50610297610c24565b6040518082815260200191505060405180910390f35b3480156102b957600080fd5b5061036f600480360360a08110156102d057600080fd5b81019080803590602001906401000000008111156102ed57600080fd5b8201836020820111156102ff57600080fd5b8035906020019184600183028401116401000000008311171561032157600080fd5b9091929391929390803590602001909291908035906020019092919080359060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610c3a565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6103dd600480360360208110156103c757600080fd5b8101908080359060200190929190505050610caa565b005b3480156103eb57600080fd5b506103f4610dfc565b604051808260ff1660ff16815260200191505060405180910390f35b34801561041c57600080fd5b506104696004803603604081101561043357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610e05565b005b34801561047757600080fd5b506104da6004803603604081101561048e57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610fc1565b005b3480156104e857600080fd5b506104f1611090565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561053f57600080fd5b506105486110b6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561059657600080fd5b506105d9600480360360208110156105ad57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506110dc565b6040518082815260200191505060405180910390f35b3480156105fb57600080fd5b506106046110fd565b005b34801561061257600080fd5b5061061b6111cd565b6040518082815260200191505060405180910390f35b34801561063d57600080fd5b506107016004803603604081101561065457600080fd5b81019080803590602001909291908035906020019064010000000081111561067b57600080fd5b82018360208201111561068d57600080fd5b803590602001918460018302840111640100000000831117156106af57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f8201169050808301925050505050505091929192905050506111d3565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561074f57600080fd5b50610758611358565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b3480156107a657600080fd5b506107af611381565b604051808215151515815260200191505060405180910390f35b3480156107d557600080fd5b506107de6113d8565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561081e578082015181840152602081019050610803565b50505050905090810190601f16801561084b5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561086557600080fd5b5061086e611411565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156108ae578082015181840152602081019050610893565b50505050905090810190601f1680156108db5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b610935600480360360408110156108ff57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291908035906020019092919050505061144e565b604051808215151515815260200191505060405180910390f35b34801561095b57600080fd5b50610964611474565b6040518082815260200191505060405180910390f35b34801561098657600080fd5b506109b36004803603602081101561099d57600080fd5b8101908080359060200190929190505050611501565b604051808215151515815260200191505060405180910390f35b3480156109d957600080fd5b50610a3a600480360360808110156109f057600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291908035906020019092919080359060200190929190505050611521565b6040518082815260200191505060405180910390f35b348015610a5c57600080fd5b50610a65611541565b6040518082815260200191505060405180910390f35b348015610a8757600080fd5b50610a90611546565b6040518082815260200191505060405180910390f35b348015610ab257600080fd5b50610abb61154c565b6040518082815260200191505060405180910390f35b348015610add57600080fd5b50610b2060048036036020811015610af457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506115d9565b005b348015610b2e57600080fd5b50610b376115f6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b60606040518060400160405280600b81526020017f4d6174696320546f6b656e000000000000000000000000000000000000000000815250905090565b6040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260108152602001807f44697361626c656420666561747572650000000000000000000000000000000081525060200191505060405180910390fd5b6000601260ff16600a0a6402540be40002905090565b60006040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260108152602001807f44697361626c656420666561747572650000000000000000000000000000000081525060200191505060405180910390fd5b60003390506000610cba826110dc565b9050610cd18360065461161c90919063ffffffff16565b600681905550600083118015610ce657508234145b610d58576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f496e73756666696369656e7420616d6f756e740000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167febff2602b3f468259e1e99f613fed6691f3a6526effe6ef3e768ba7ae7a36c4f8584610dd4876110dc565b60405180848152602001838152602001828152602001935050505060405180910390a3505050565b60006012905090565b610e0d611381565b610e1657600080fd5b600081118015610e535750600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b610ea8576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611da76023913960400191505060405180910390fd5b6000610eb3836110dc565b905060008390508073ffffffffffffffffffffffffffffffffffffffff166108fc849081150290604051600060405180830381858888f19350505050158015610f00573d6000803e3d6000fd5b50610f168360065461163c90919063ffffffff16565b6006819055508373ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f4e2ca0515ed1aef1395f66b5303bb5d6f1bf9d61a353fa53f73f8ac9973fa9f68585610f98896110dc565b60405180848152602001838152602001828152602001935050505060405180910390a350505050565b600760009054906101000a900460ff1615611027576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611d846023913960400191505060405180910390fd5b6001600760006101000a81548160ff02191690831515021790555080600260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555061108c8261165b565b5050565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600460009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008173ffffffffffffffffffffffffffffffffffffffff16319050919050565b611105611381565b61110e57600080fd5b600073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a360008060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550565b60065481565b60008060008060418551146111ee5760009350505050611352565b602085015192506040850151915060ff6041860151169050601b8160ff16101561121957601b810190505b601b8160ff16141580156112315750601c8160ff1614155b156112425760009350505050611352565b60018682858560405160008152602001604052604051808581526020018460ff1660ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa15801561129f573d6000803e3d6000fd5b505050602060405103519350600073ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff16141561134e576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f4572726f7220696e2065637265636f766572000000000000000000000000000081525060200191505060405180910390fd5b5050505b92915050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614905090565b6040518060400160405280600181526020017f890000000000000000000000000000000000000000000000000000000000000081525081565b60606040518060400160405280600581526020017f4d41544943000000000000000000000000000000000000000000000000000000815250905090565b6000813414611460576000905061146e565b61146b338484611753565b90505b92915050565b6040518060800160405280605b8152602001611e1c605b91396040516020018082805190602001908083835b602083106114c357805182526020820191506020810190506020830392506114a0565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b60056020528060005260406000206000915054906101000a900460ff1681565b600061153761153286868686611b10565b611be6565b9050949350505050565b608981565b60015481565b604051806080016040528060528152602001611dca605291396040516020018082805190602001908083835b6020831061159b5780518252602082019150602081019050602083039250611578565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b6115e1611381565b6115ea57600080fd5b6115f38161165b565b50565b600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008282111561162b57600080fd5b600082840390508091505092915050565b60008082840190508381101561165157600080fd5b8091505092915050565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16141561169557600080fd5b8073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a3806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b6000803073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156117d357600080fd5b505afa1580156117e7573d6000803e3d6000fd5b505050506040513d60208110156117fd57600080fd5b8101908080519060200190929190505050905060003073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b15801561188f57600080fd5b505afa1580156118a3573d6000803e3d6000fd5b505050506040513d60208110156118b957600080fd5b810190808051906020019092919050505090506118d7868686611c30565b8473ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167fe6497e3ee548a3372136af2fcb0696db31fc6cf20260707645068bd3fe97f3c48786863073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156119df57600080fd5b505afa1580156119f3573d6000803e3d6000fd5b505050506040513d6020811015611a0957600080fd5b81019080805190602001909291905050503073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611a9757600080fd5b505afa158015611aab573d6000803e3d6000fd5b505050506040513d6020811015611ac157600080fd5b8101908080519060200190929190505050604051808681526020018581526020018481526020018381526020018281526020019550505050505060405180910390a46001925050509392505050565b6000806040518060800160405280605b8152602001611e1c605b91396040516020018082805190602001908083835b60208310611b625780518252602082019150602081019050602083039250611b3f565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120905060405181815273ffffffffffffffffffffffffffffffffffffffff8716602082015285604082015284606082015283608082015260a0812092505081915050949350505050565b60008060015490506040517f190100000000000000000000000000000000000000000000000000000000000081528160028201528360228201526042812092505081915050919050565b3073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff161415611cd2576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f63616e27742073656e6420746f204d524332300000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050158015611d18573d6000803e3d6000fd5b508173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a350505056fe54686520636f6e747261637420697320616c726561647920696e697469616c697a6564496e73756666696369656e7420616d6f756e74206f7220696e76616c69642075736572454950373132446f6d61696e28737472696e67206e616d652c737472696e672076657273696f6e2c75696e7432353620636861696e49642c6164647265737320766572696679696e67436f6e747261637429546f6b656e5472616e736665724f726465722861646472657373207370656e6465722c75696e7432353620746f6b656e49644f72416d6f756e742c6279746573333220646174612c75696e743235362065787069726174696f6e29a265627a7a72315820a4a6f71a98ac3fc613c3a8f1e2e11b9eb9b6b39f125f7d9508916c2b8fb02c7164736f6c63430005100032",
- },
- },
- }
-}
-
-func UpgradeBuildInSystemContract(config *chain.Config, blockNumber *big.Int, statedb evmtypes.IntraBlockState, logger log.Logger) {
- if config == nil || blockNumber == nil || statedb == nil {
- return
- }
-
- if config.Bor != nil && config.Bor.IsOnCalcutta(blockNumber) {
- applySystemContractUpgrade(CalcuttaUpgrade[config.ChainName], blockNumber, statedb, logger)
- }
-
- /*
- apply other upgrades
- */
-}
-
-func applySystemContractUpgrade(upgrade *Upgrade, blockNumber *big.Int, statedb evmtypes.IntraBlockState, logger log.Logger) {
- if upgrade == nil {
- logger.Info("Empty upgrade config", "height", blockNumber.String())
- return
- }
-
- logger.Info(fmt.Sprintf("Apply upgrade %s at height %d", upgrade.UpgradeName, blockNumber.Int64()))
- for _, cfg := range upgrade.Configs {
- logger.Info(fmt.Sprintf("Upgrade contract %s to commit %s", cfg.ContractAddr.String(), cfg.CommitUrl))
-
- if cfg.BeforeUpgrade != nil {
- err := cfg.BeforeUpgrade(blockNumber, cfg.ContractAddr, statedb)
- if err != nil {
- panic(fmt.Errorf("contract address: %s, execute beforeUpgrade error: %s", cfg.ContractAddr.String(), err.Error()))
- }
- }
-
- newContractCode, err := hex.DecodeString(cfg.Code)
- if err != nil {
- panic(fmt.Errorf("failed to decode new contract code: %s", err.Error()))
- }
-
- prevContractCode := statedb.GetCode(cfg.ContractAddr)
- if len(prevContractCode) == 0 && len(newContractCode) > 0 {
- // system contracts defined after genesis need to be explicitly created
- statedb.CreateAccount(cfg.ContractAddr, true)
- }
-
- statedb.SetCode(cfg.ContractAddr, newContractCode)
-
- if cfg.AfterUpgrade != nil {
- err := cfg.AfterUpgrade(blockNumber, cfg.ContractAddr, statedb)
- if err != nil {
- panic(fmt.Errorf("contract address: %s, execute afterUpgrade error: %s", cfg.ContractAddr.String(), err.Error()))
- }
- }
- }
-}
diff --git a/core/types/access_list_tx.go b/core/types/access_list_tx.go
index 562205d3327..0f6131ea91e 100644
--- a/core/types/access_list_tx.go
+++ b/core/types/access_list_tx.go
@@ -28,9 +28,9 @@ import (
"github.com/ledgerwatch/erigon-lib/chain"
libcommon "github.com/ledgerwatch/erigon-lib/common"
+ rlp2 "github.com/ledgerwatch/erigon-lib/rlp"
types2 "github.com/ledgerwatch/erigon-lib/types"
- "github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/common/u256"
"github.com/ledgerwatch/erigon/rlp"
)
@@ -52,7 +52,7 @@ func (tx AccessListTx) copy() *AccessListTx {
},
Nonce: tx.Nonce,
To: tx.To, // TODO: copy pointed-to address
- Data: common.CopyBytes(tx.Data),
+ Data: libcommon.CopyBytes(tx.Data),
Gas: tx.Gas,
// These are copied below.
Value: new(uint256.Int),
@@ -93,13 +93,8 @@ func (tx *AccessListTx) Unwrap() Transaction {
// EncodingSize returns the RLP encoding size of the whole transaction envelope
func (tx AccessListTx) EncodingSize() int {
payloadSize, _, _, _ := tx.payloadSize()
- envelopeSize := payloadSize
// Add envelope size and type size
- if payloadSize >= 56 {
- envelopeSize += libcommon.BitLenToByteLen(bits.Len(uint(payloadSize)))
- }
- envelopeSize += 2
- return envelopeSize
+ return 1 + rlp2.ListPrefixLen(payloadSize) + payloadSize
}
// payloadSize calculates the RLP encoding size of transaction, without TxType and envelope
@@ -127,26 +122,10 @@ func (tx AccessListTx) payloadSize() (payloadSize int, nonceLen, gasLen, accessL
payloadSize++
payloadSize += rlp.Uint256LenExcludingHead(tx.Value)
// size of Data
- payloadSize++
- switch len(tx.Data) {
- case 0:
- case 1:
- if tx.Data[0] >= 128 {
- payloadSize++
- }
- default:
- if len(tx.Data) >= 56 {
- payloadSize += libcommon.BitLenToByteLen(bits.Len(uint(len(tx.Data))))
- }
- payloadSize += len(tx.Data)
- }
+ payloadSize += rlp2.StringLen(tx.Data)
// size of AccessList
- payloadSize++
accessListLen = accessListSize(tx.AccessList)
- if accessListLen >= 56 {
- payloadSize += libcommon.BitLenToByteLen(bits.Len(uint(accessListLen)))
- }
- payloadSize += accessListLen
+ payloadSize += rlp2.ListPrefixLen(accessListLen) + accessListLen
// size of V
payloadSize++
payloadSize += rlp.Uint256LenExcludingHead(&tx.V)
@@ -164,18 +143,10 @@ func accessListSize(al types2.AccessList) int {
for _, tuple := range al {
tupleLen := 21 // For the address
// size of StorageKeys
- tupleLen++
// Each storage key takes 33 bytes
storageLen := 33 * len(tuple.StorageKeys)
- if storageLen >= 56 {
- tupleLen += libcommon.BitLenToByteLen(bits.Len(uint(storageLen))) // BE encoding of the length of the storage keys
- }
- tupleLen += storageLen
- accessListLen++
- if tupleLen >= 56 {
- accessListLen += libcommon.BitLenToByteLen(bits.Len(uint(tupleLen))) // BE encoding of the length of the storage keys
- }
- accessListLen += tupleLen
+ tupleLen += rlp2.ListPrefixLen(storageLen) + storageLen
+ accessListLen += rlp2.ListPrefixLen(tupleLen) + tupleLen
}
return accessListLen
}
@@ -183,13 +154,9 @@ func accessListSize(al types2.AccessList) int {
func encodeAccessList(al types2.AccessList, w io.Writer, b []byte) error {
for _, tuple := range al {
tupleLen := 21
- tupleLen++
// Each storage key takes 33 bytes
storageLen := 33 * len(tuple.StorageKeys)
- if storageLen >= 56 {
- tupleLen += libcommon.BitLenToByteLen(bits.Len(uint(storageLen))) // BE encoding of the length of the storage keys
- }
- tupleLen += storageLen
+ tupleLen += rlp2.ListPrefixLen(storageLen) + storageLen
if err := EncodeStructSizePrefix(tupleLen, w, b); err != nil {
return err
}
@@ -320,12 +287,8 @@ func (tx AccessListTx) encodePayload(w io.Writer, b []byte, payloadSize, nonceLe
// EncodeRLP implements rlp.Encoder
func (tx AccessListTx) EncodeRLP(w io.Writer) error {
payloadSize, nonceLen, gasLen, accessListLen := tx.payloadSize()
- envelopeSize := payloadSize
- if payloadSize >= 56 {
- envelopeSize += libcommon.BitLenToByteLen(bits.Len(uint(payloadSize)))
- }
// size of struct prefix and TxType
- envelopeSize += 2
+ envelopeSize := 1 + rlp2.ListPrefixLen(payloadSize) + payloadSize
var b [33]byte
// envelope
if err := rlp.EncodeStringSizePrefix(envelopeSize, w, b[:]); err != nil {
diff --git a/core/types/accounts/account.go b/core/types/accounts/account.go
index 1953249c652..c89150c7429 100644
--- a/core/types/accounts/account.go
+++ b/core/types/accounts/account.go
@@ -8,6 +8,7 @@ import (
"github.com/holiman/uint256"
libcommon "github.com/ledgerwatch/erigon-lib/common"
+ rlp2 "github.com/ledgerwatch/erigon-lib/rlp"
"github.com/ledgerwatch/erigon/crypto"
"github.com/ledgerwatch/erigon/rlp"
@@ -17,12 +18,13 @@ import (
// These objects are stored in the main account trie.
// DESCRIBED: docs/programmers_guide/guide.md#ethereum-state
type Account struct {
- Initialised bool
- Nonce uint64
- Balance uint256.Int
- Root libcommon.Hash // merkle root of the storage trie
- CodeHash libcommon.Hash // hash of the bytecode
- Incarnation uint64
+ Initialised bool
+ Nonce uint64
+ Balance uint256.Int
+ Root libcommon.Hash // merkle root of the storage trie
+ CodeHash libcommon.Hash // hash of the bytecode
+ Incarnation uint64
+ PrevIncarnation uint64
}
const (
@@ -77,8 +79,6 @@ func (a *Account) EncodingLengthForStorage() uint {
}
func (a *Account) EncodingLengthForHashing() uint {
- var structLength uint
-
balanceBytes := 0
if !a.Balance.LtUint64(128) {
balanceBytes = a.Balance.ByteLen()
@@ -86,17 +86,11 @@ func (a *Account) EncodingLengthForHashing() uint {
nonceBytes := rlp.IntLenExcludingHead(a.Nonce)
- structLength += uint(balanceBytes + nonceBytes + 2)
+ structLength := balanceBytes + nonceBytes + 2
structLength += 66 // Two 32-byte arrays + 2 prefixes
- if structLength < 56 {
- return 1 + structLength
- }
-
- lengthBytes := libcommon.BitLenToByteLen(bits.Len(structLength))
-
- return uint(1+lengthBytes) + structLength
+ return uint(rlp2.ListPrefixLen(structLength) + structLength)
}
func (a *Account) EncodeForStorage(buffer []byte) {
diff --git a/core/types/accounts/account_proof.go b/core/types/accounts/account_proof.go
index 7653445d1d1..9239e6b3fcf 100644
--- a/core/types/accounts/account_proof.go
+++ b/core/types/accounts/account_proof.go
@@ -2,9 +2,8 @@ package accounts
import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
-
- "github.com/ledgerwatch/erigon/common/hexutil"
)
// Result structs for GetProof
diff --git a/core/types/accounts/account_test.go b/core/types/accounts/account_test.go
index 6383a1440e7..25eb2038876 100644
--- a/core/types/accounts/account_test.go
+++ b/core/types/accounts/account_test.go
@@ -10,6 +10,7 @@ import (
)
func TestEmptyAccount(t *testing.T) {
+ t.Parallel()
a := Account{
Initialised: true,
Nonce: 100,
@@ -31,6 +32,7 @@ func TestEmptyAccount(t *testing.T) {
}
func TestEmptyAccount2(t *testing.T) {
+ t.Parallel()
encodedAccount := Account{}
b := make([]byte, encodedAccount.EncodingLengthForStorage())
@@ -45,6 +47,7 @@ func TestEmptyAccount2(t *testing.T) {
// fails if run package tests
// account_test.go:57: cant decode the account malformed RLP for Account(c064): prefixLength(1) + dataLength(0) != sliceLength(2) �d
func TestEmptyAccount_BufferStrangeBehaviour(t *testing.T) {
+ t.Parallel()
a := Account{}
encodedAccount := make([]byte, a.EncodingLengthForStorage())
@@ -57,6 +60,7 @@ func TestEmptyAccount_BufferStrangeBehaviour(t *testing.T) {
}
func TestAccountEncodeWithCode(t *testing.T) {
+ t.Parallel()
a := Account{
Initialised: true,
Nonce: 2,
@@ -79,6 +83,7 @@ func TestAccountEncodeWithCode(t *testing.T) {
}
func TestAccountEncodeWithCodeWithStorageSizeHack(t *testing.T) {
+ t.Parallel()
a := Account{
Initialised: true,
Nonce: 2,
@@ -101,6 +106,7 @@ func TestAccountEncodeWithCodeWithStorageSizeHack(t *testing.T) {
}
func TestAccountEncodeWithoutCode(t *testing.T) {
+ t.Parallel()
a := Account{
Initialised: true,
Nonce: 2,
@@ -123,6 +129,7 @@ func TestAccountEncodeWithoutCode(t *testing.T) {
}
func TestEncodeAccountWithEmptyBalanceNonNilContractAndNotZeroIncarnation(t *testing.T) {
+ t.Parallel()
a := Account{
Initialised: true,
Nonce: 0,
@@ -143,6 +150,7 @@ func TestEncodeAccountWithEmptyBalanceNonNilContractAndNotZeroIncarnation(t *tes
isAccountsEqual(t, a, decodedAccount)
}
func TestEncodeAccountWithEmptyBalanceAndNotZeroIncarnation(t *testing.T) {
+ t.Parallel()
a := Account{
Initialised: true,
Nonce: 0,
@@ -191,6 +199,7 @@ func isAccountsEqual(t *testing.T, src, dst Account) {
}
func TestIncarnationForEmptyAccount(t *testing.T) {
+ t.Parallel()
a := Account{
Initialised: true,
Nonce: 100,
@@ -216,6 +225,7 @@ func TestIncarnationForEmptyAccount(t *testing.T) {
}
func TestEmptyIncarnationForEmptyAccount2(t *testing.T) {
+ t.Parallel()
a := Account{}
encodedAccount := make([]byte, a.EncodingLengthForStorage())
@@ -235,6 +245,7 @@ func TestEmptyIncarnationForEmptyAccount2(t *testing.T) {
}
func TestIncarnationWithNonEmptyAccount(t *testing.T) {
+ t.Parallel()
a := Account{
Initialised: true,
Nonce: 2,
@@ -261,6 +272,7 @@ func TestIncarnationWithNonEmptyAccount(t *testing.T) {
}
func TestIncarnationWithNoIncarnation(t *testing.T) {
+ t.Parallel()
a := Account{
Initialised: true,
Nonce: 2,
diff --git a/core/types/blob_tx.go b/core/types/blob_tx.go
index 94c99cb5d5b..d2bede77c40 100644
--- a/core/types/blob_tx.go
+++ b/core/types/blob_tx.go
@@ -4,13 +4,13 @@ import (
"fmt"
"io"
"math/big"
- "math/bits"
"github.com/holiman/uint256"
"github.com/ledgerwatch/erigon-lib/chain"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/fixedgas"
+ rlp2 "github.com/ledgerwatch/erigon-lib/rlp"
types2 "github.com/ledgerwatch/erigon-lib/types"
"github.com/ledgerwatch/erigon/rlp"
@@ -102,12 +102,8 @@ func (stx BlobTx) payloadSize() (payloadSize, nonceLen, gasLen, accessListLen, b
payloadSize++
payloadSize += rlp.Uint256LenExcludingHead(stx.MaxFeePerBlobGas)
// size of BlobVersionedHashes
- payloadSize++
blobHashesLen = blobVersionedHashesSize(stx.BlobVersionedHashes)
- if blobHashesLen >= 56 {
- payloadSize += libcommon.BitLenToByteLen(bits.Len(uint(blobHashesLen)))
- }
- payloadSize += blobHashesLen
+ payloadSize += rlp2.ListPrefixLen(blobHashesLen) + blobHashesLen
return
}
@@ -150,18 +146,12 @@ func (stx BlobTx) encodePayload(w io.Writer, b []byte, payloadSize, nonceLen, ga
return err
}
// encode To
- if stx.To == nil {
- b[0] = 128
- } else {
- b[0] = 128 + 20
- }
+ b[0] = 128 + 20
if _, err := w.Write(b[:1]); err != nil {
return err
}
- if stx.To != nil {
- if _, err := w.Write(stx.To.Bytes()); err != nil {
- return err
- }
+ if _, err := w.Write(stx.To.Bytes()); err != nil {
+ return err
}
// encode Value
if err := stx.Value.EncodeRLP(w); err != nil {
@@ -208,12 +198,8 @@ func (stx BlobTx) encodePayload(w io.Writer, b []byte, payloadSize, nonceLen, ga
func (stx BlobTx) EncodeRLP(w io.Writer) error {
payloadSize, nonceLen, gasLen, accessListLen, blobHashesLen := stx.payloadSize()
- envelopeSize := payloadSize
- if payloadSize >= 56 {
- envelopeSize += libcommon.BitLenToByteLen(bits.Len(uint(payloadSize)))
- }
// size of struct prefix and TxType
- envelopeSize += 2
+ envelopeSize := 1 + rlp2.ListPrefixLen(payloadSize) + payloadSize
var b [33]byte
// envelope
if err := rlp.EncodeStringSizePrefix(envelopeSize, w, b[:]); err != nil {
@@ -276,13 +262,11 @@ func (stx *BlobTx) DecodeRLP(s *rlp.Stream) error {
if b, err = s.Bytes(); err != nil {
return err
}
- if len(b) > 0 && len(b) != 20 {
+ if len(b) != 20 {
return fmt.Errorf("wrong size for To: %d", len(b))
}
- if len(b) > 0 {
- stx.To = &libcommon.Address{}
- copy((*stx.To)[:], b)
- }
+ stx.To = &libcommon.Address{}
+ copy((*stx.To)[:], b)
if b, err = s.Uint256Bytes(); err != nil {
return err
diff --git a/core/types/blob_tx_wrapper.go b/core/types/blob_tx_wrapper.go
index cc06694272c..654e53e7122 100644
--- a/core/types/blob_tx_wrapper.go
+++ b/core/types/blob_tx_wrapper.go
@@ -271,7 +271,7 @@ func (txw *BlobTxWrapper) ValidateBlobTransactionWrapper() error {
// the following check isn't strictly necessary as it would be caught by blob gas processing
// (and hence it is not explicitly in the spec for this function), but it doesn't hurt to fail
// early in case we are getting spammed with too many blobs or there is a bug somewhere:
- if uint64(l1) > fixedgas.MaxBlobsPerBlock {
+ if uint64(l1) > fixedgas.DefaultMaxBlobsPerBlock {
return fmt.Errorf("number of blobs exceeds max: %v", l1)
}
kzgCtx := libkzg.Ctx()
@@ -345,110 +345,6 @@ func (txw *BlobTxWrapper) IsContractDeploy() bool { return txw.Tx.IsContractDepl
func (txw *BlobTxWrapper) Unwrap() Transaction { return &txw.Tx }
-func (txw BlobTxWrapper) EncodingSize() int {
- total, _, _, _, _ := txw.payloadSize()
- envelopeSize := total
- // Add envelope size and type size
- if total >= 56 {
- envelopeSize += libcommon.BitLenToByteLen(bits.Len(uint(total)))
- }
- envelopeSize += 2
- return envelopeSize
-}
-
-func (txw BlobTxWrapper) payloadSize() (int, int, int, int, int) {
- total := 1
- txSize, _, _, _, _ := txw.Tx.payloadSize()
- if txSize >= 56 {
- total += libcommon.BitLenToByteLen(bits.Len(uint(txSize)))
- }
- total += txSize
-
- total++
- commitmentsSize := txw.Commitments.payloadSize()
- if commitmentsSize >= 56 {
- total += libcommon.BitLenToByteLen(bits.Len(uint(commitmentsSize)))
- }
- total += commitmentsSize
-
- total++
- blobsSize := txw.Blobs.payloadSize()
- if blobsSize >= 56 {
- total += libcommon.BitLenToByteLen(bits.Len(uint(blobsSize)))
- }
- total += blobsSize
-
- total++
- proofsSize := txw.Proofs.payloadSize()
- if proofsSize >= 56 {
- total += libcommon.BitLenToByteLen(bits.Len(uint(proofsSize)))
- }
- total += proofsSize
- return total, txSize, commitmentsSize, blobsSize, proofsSize
-}
-
-func (txw BlobTxWrapper) encodePayload(w io.Writer, b []byte, total, txSize, commitmentsSize, blobsSize, proofsSize int) error {
- // prefix, encode txw payload size
- if err := EncodeStructSizePrefix(total, w, b); err != nil {
- return err
- }
-
- txPayloadSize, nonceLen, gasLen, accessListLen, blobHashesLen := txw.Tx.payloadSize()
-
- if err := txw.Tx.encodePayload(w, b, txPayloadSize, nonceLen, gasLen, accessListLen, blobHashesLen); err != nil {
- return err
- }
-
- if err := txw.Blobs.encodePayload(w, b, blobsSize); err != nil {
- return err
- }
- if err := txw.Commitments.encodePayload(w, b, commitmentsSize); err != nil {
- return err
- }
- if err := txw.Proofs.encodePayload(w, b, proofsSize); err != nil {
- return err
- }
- return nil
-}
-
-func (txw *BlobTxWrapper) MarshalBinary(w io.Writer) error {
- total, txSize, commitmentsSize, blobsSize, proofsSize := txw.payloadSize()
- var b [33]byte
- // encode TxType
- b[0] = BlobTxType
- if _, err := w.Write(b[:1]); err != nil {
- return err
- }
- if err := txw.encodePayload(w, b[:], total, txSize, commitmentsSize, blobsSize, proofsSize); err != nil {
- return err
- }
- return nil
-}
-
-func (txw BlobTxWrapper) EncodeRLP(w io.Writer) error {
- total, txSize, commitmentsSize, proofsSize, blobsSize := txw.payloadSize()
- envelopeSize := total
- if total >= 56 {
- envelopeSize += libcommon.BitLenToByteLen(bits.Len(uint(total)))
- }
- // size of struct prefix and TxType
- envelopeSize += 2
- var b [33]byte
- // envelope
- if err := rlp.EncodeStringSizePrefix(envelopeSize, w, b[:]); err != nil {
- return err
- }
- // encode TxType
- b[0] = BlobTxType
- if _, err := w.Write(b[:1]); err != nil {
- return err
- }
- if err := txw.encodePayload(w, b[:], total, txSize, commitmentsSize, proofsSize, blobsSize); err != nil {
- return err
- }
- return nil
-}
-
func (txw *BlobTxWrapper) DecodeRLP(s *rlp.Stream) error {
_, err := s.List()
if err != nil {
@@ -473,3 +369,15 @@ func (txw *BlobTxWrapper) DecodeRLP(s *rlp.Stream) error {
return s.ListEnd()
}
+
+// We deliberately encode only the transaction payload because the only case we need to serialize
+// blobs/commitments/proofs is when we reply to GetPooledTransactions (and that's handled by the txpool).
+func (txw BlobTxWrapper) EncodingSize() int {
+ return txw.Tx.EncodingSize()
+}
+func (txw *BlobTxWrapper) MarshalBinary(w io.Writer) error {
+ return txw.Tx.MarshalBinary(w)
+}
+func (txw BlobTxWrapper) EncodeRLP(w io.Writer) error {
+ return txw.Tx.EncodeRLP(w)
+}
diff --git a/core/types/block.go b/core/types/block.go
index 748cbcb7a59..1a434098bbf 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -24,23 +24,26 @@ import (
"fmt"
"io"
"math/big"
- "math/bits"
"reflect"
"sync/atomic"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
+
"github.com/gballet/go-verkle"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
rlp2 "github.com/ledgerwatch/erigon-lib/rlp"
"github.com/ledgerwatch/erigon/common"
- "github.com/ledgerwatch/erigon/common/hexutil"
"github.com/ledgerwatch/erigon/rlp"
)
var (
EmptyRootHash = libcommon.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
EmptyUncleHash = rlpHash([]*Header(nil))
+
+ ExtraVanityLength = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity
+ ExtraSealLength = 65 // Fixed number of extra-data suffix bytes reserved for signer seal
)
// A BlockNonce is a 64-bit hash which proves (combined with the
@@ -128,25 +131,11 @@ func (h *Header) EncodingSize() int {
encodingSize++
encodingSize += rlp.IntLenExcludingHead(h.Time)
// size of Extra
- encodingSize++
- switch len(h.Extra) {
- case 0:
- case 1:
- if h.Extra[0] >= 128 {
- encodingSize++
- }
- default:
- if len(h.Extra) >= 56 {
- encodingSize += libcommon.BitLenToByteLen(bits.Len(uint(len(h.Extra))))
- }
- encodingSize += len(h.Extra)
- }
+ encodingSize += rlp2.StringLen(h.Extra)
if len(h.AuRaSeal) != 0 {
- encodingSize += 1 + rlp.IntLenExcludingHead(h.AuRaStep) + 1 + len(h.AuRaSeal)
- if len(h.AuRaSeal) >= 56 {
- encodingSize += libcommon.BitLenToByteLen(bits.Len(uint(len(h.AuRaSeal))))
- }
+ encodingSize += 1 + rlp.IntLenExcludingHead(h.AuRaStep)
+ encodingSize += rlp2.ListPrefixLen(len(h.AuRaSeal)) + len(h.AuRaSeal)
} else {
encodingSize += 33 /* MixDigest */ + 9 /* BlockNonce */
}
@@ -175,26 +164,12 @@ func (h *Header) EncodingSize() int {
if h.Verkle {
// Encoding of Verkle Proof
- encodingSize++
- switch len(h.VerkleProof) {
- case 0:
- case 1:
- if h.VerkleProof[0] >= 128 {
- encodingSize++
- }
- default:
- if len(h.VerkleProof) >= 56 {
- encodingSize += libcommon.BitLenToByteLen(bits.Len(uint(len(h.VerkleProof))))
- }
- encodingSize += len(h.VerkleProof)
- }
- encodingSize++
-
+ encodingSize += rlp2.StringLen(h.VerkleProof)
var tmpBuffer bytes.Buffer
if err := rlp.Encode(&tmpBuffer, h.VerkleKeyVals); err != nil {
panic(err)
}
- encodingSize += tmpBuffer.Len()
+ encodingSize += rlp2.ListPrefixLen(tmpBuffer.Len()) + tmpBuffer.Len()
}
return encodingSize
@@ -641,6 +616,23 @@ type RawBlock struct {
Body *RawBody
}
+func (r RawBlock) AsBlock() (*Block, error) {
+ b := &Block{header: r.Header}
+ b.uncles = r.Body.Uncles
+ b.withdrawals = r.Body.Withdrawals
+
+ txs := make([]Transaction, len(r.Body.Transactions))
+ for i, tx := range r.Body.Transactions {
+ var err error
+ if txs[i], err = DecodeTransaction(tx); err != nil {
+ return nil, err
+ }
+ }
+ b.transactions = txs
+
+ return b, nil
+}
+
// Block represents an entire block in the Ethereum blockchain.
type Block struct {
header *Header
@@ -681,45 +673,25 @@ func (rb RawBody) EncodingSize() int {
func (rb RawBody) payloadSize() (payloadSize, txsLen, unclesLen, withdrawalsLen int) {
// size of Transactions
- payloadSize++
for _, tx := range rb.Transactions {
txsLen += len(tx)
}
- if txsLen >= 56 {
- payloadSize += libcommon.BitLenToByteLen(bits.Len(uint(txsLen)))
- }
- payloadSize += txsLen
+ payloadSize += rlp2.ListPrefixLen(txsLen) + txsLen
// size of Uncles
- payloadSize++
for _, uncle := range rb.Uncles {
- unclesLen++
uncleLen := uncle.EncodingSize()
- if uncleLen >= 56 {
- unclesLen += libcommon.BitLenToByteLen(bits.Len(uint(uncleLen)))
- }
- unclesLen += uncleLen
+ unclesLen += rlp2.ListPrefixLen(uncleLen) + uncleLen
}
- if unclesLen >= 56 {
- payloadSize += libcommon.BitLenToByteLen(bits.Len(uint(unclesLen)))
- }
- payloadSize += unclesLen
+ payloadSize += rlp2.ListPrefixLen(unclesLen) + unclesLen
// size of Withdrawals
if rb.Withdrawals != nil {
- payloadSize++
for _, withdrawal := range rb.Withdrawals {
- withdrawalsLen++
withdrawalLen := withdrawal.EncodingSize()
- if withdrawalLen >= 56 {
- withdrawalLen += libcommon.BitLenToByteLen(bits.Len(uint(withdrawalLen)))
- }
- withdrawalsLen += withdrawalLen
+ withdrawalsLen += rlp2.ListPrefixLen(withdrawalLen) + withdrawalLen
}
- if withdrawalsLen >= 56 {
- payloadSize += libcommon.BitLenToByteLen(bits.Len(uint(withdrawalsLen)))
- }
- payloadSize += withdrawalsLen
+ payloadSize += rlp2.ListPrefixLen(withdrawalsLen) + withdrawalsLen
}
return payloadSize, txsLen, unclesLen, withdrawalsLen
@@ -836,9 +808,6 @@ func (rb *RawBody) DecodeRLP(s *rlp.Stream) error {
}
func (bfs BodyForStorage) payloadSize() (payloadSize, unclesLen, withdrawalsLen int) {
-
- payloadSize++
-
baseTxIdLen := 1 + rlp.IntLenExcludingHead(bfs.BaseTxId)
txAmountLen := 1 + rlp.IntLenExcludingHead(uint64(bfs.TxAmount))
@@ -847,33 +816,18 @@ func (bfs BodyForStorage) payloadSize() (payloadSize, unclesLen, withdrawalsLen
// size of Uncles
for _, uncle := range bfs.Uncles {
- unclesLen++
uncleLen := uncle.EncodingSize()
- if uncleLen >= 56 {
- unclesLen += libcommon.BitLenToByteLen(bits.Len(uint(uncleLen)))
- }
- unclesLen += uncleLen
- }
- if unclesLen >= 56 {
- payloadSize += libcommon.BitLenToByteLen(bits.Len(uint(unclesLen)))
+ unclesLen += rlp2.ListPrefixLen(uncleLen) + uncleLen
}
- payloadSize += unclesLen
+ payloadSize += rlp2.ListPrefixLen(unclesLen) + unclesLen
// size of Withdrawals
if bfs.Withdrawals != nil {
- payloadSize++
for _, withdrawal := range bfs.Withdrawals {
- withdrawalsLen++
withdrawalLen := withdrawal.EncodingSize()
- if withdrawalLen >= 56 {
- withdrawalLen += libcommon.BitLenToByteLen(bits.Len(uint(withdrawalLen)))
- }
- withdrawalsLen += withdrawalLen
- }
- if withdrawalsLen >= 56 {
- payloadSize += libcommon.BitLenToByteLen(bits.Len(uint(withdrawalsLen)))
+ withdrawalsLen += rlp2.ListPrefixLen(withdrawalLen) + withdrawalLen
}
- payloadSize += withdrawalsLen
+ payloadSize += rlp2.ListPrefixLen(withdrawalsLen) + withdrawalsLen
}
return payloadSize, unclesLen, withdrawalsLen
@@ -995,50 +949,26 @@ func (bb Body) EncodingSize() int {
func (bb Body) payloadSize() (payloadSize int, txsLen, unclesLen, withdrawalsLen int) {
// size of Transactions
- payloadSize++
for _, tx := range bb.Transactions {
- txsLen++
txLen := tx.EncodingSize()
- if txLen >= 56 {
- txsLen += libcommon.BitLenToByteLen(bits.Len(uint(txLen)))
- }
- txsLen += txLen
- }
- if txsLen >= 56 {
- payloadSize += libcommon.BitLenToByteLen(bits.Len(uint(txsLen)))
+ txsLen += rlp2.ListPrefixLen(txLen) + txLen
}
- payloadSize += txsLen
+ payloadSize += rlp2.ListPrefixLen(txsLen) + txsLen
// size of Uncles
- payloadSize++
for _, uncle := range bb.Uncles {
- unclesLen++
uncleLen := uncle.EncodingSize()
- if uncleLen >= 56 {
- unclesLen += libcommon.BitLenToByteLen(bits.Len(uint(uncleLen)))
- }
- unclesLen += uncleLen
- }
- if unclesLen >= 56 {
- payloadSize += libcommon.BitLenToByteLen(bits.Len(uint(unclesLen)))
+ unclesLen += rlp2.ListPrefixLen(uncleLen) + uncleLen
}
- payloadSize += unclesLen
+ payloadSize += rlp2.ListPrefixLen(unclesLen) + unclesLen
// size of Withdrawals
if bb.Withdrawals != nil {
- payloadSize++
for _, withdrawal := range bb.Withdrawals {
- withdrawalsLen++
withdrawalLen := withdrawal.EncodingSize()
- if withdrawalLen >= 56 {
- withdrawalLen += libcommon.BitLenToByteLen(bits.Len(uint(withdrawalLen)))
- }
- withdrawalsLen += withdrawalLen
+ withdrawalsLen += rlp2.ListPrefixLen(withdrawalLen) + withdrawalLen
}
- if withdrawalsLen >= 56 {
- payloadSize += libcommon.BitLenToByteLen(bits.Len(uint(withdrawalsLen)))
- }
- payloadSize += withdrawalsLen
+ payloadSize += rlp2.ListPrefixLen(withdrawalsLen) + withdrawalsLen
}
return payloadSize, txsLen, unclesLen, withdrawalsLen
@@ -1342,58 +1272,30 @@ func (bb *Block) DecodeRLP(s *rlp.Stream) error {
func (bb Block) payloadSize() (payloadSize int, txsLen, unclesLen, withdrawalsLen int) {
// size of Header
- payloadSize++
headerLen := bb.header.EncodingSize()
- if headerLen >= 56 {
- payloadSize += libcommon.BitLenToByteLen(bits.Len(uint(headerLen)))
- }
- payloadSize += headerLen
+ payloadSize += rlp2.ListPrefixLen(headerLen) + headerLen
// size of Transactions
- payloadSize++
for _, tx := range bb.transactions {
- txsLen++
txLen := tx.EncodingSize()
- if txLen >= 56 {
- txsLen += libcommon.BitLenToByteLen(bits.Len(uint(txLen)))
- }
- txsLen += txLen
- }
- if txsLen >= 56 {
- payloadSize += libcommon.BitLenToByteLen(bits.Len(uint(txsLen)))
+ txsLen += rlp2.ListPrefixLen(txLen) + txLen
}
- payloadSize += txsLen
+ payloadSize += rlp2.ListPrefixLen(txsLen) + txsLen
// size of Uncles
- payloadSize++
for _, uncle := range bb.uncles {
- unclesLen++
uncleLen := uncle.EncodingSize()
- if uncleLen >= 56 {
- unclesLen += libcommon.BitLenToByteLen(bits.Len(uint(uncleLen)))
- }
- unclesLen += uncleLen
- }
- if unclesLen >= 56 {
- payloadSize += libcommon.BitLenToByteLen(bits.Len(uint(unclesLen)))
+ unclesLen += rlp2.ListPrefixLen(uncleLen) + uncleLen
}
- payloadSize += unclesLen
+ payloadSize += rlp2.ListPrefixLen(unclesLen) + unclesLen
// size of Withdrawals
if bb.withdrawals != nil {
- payloadSize++
for _, withdrawal := range bb.withdrawals {
- withdrawalsLen++
withdrawalLen := withdrawal.EncodingSize()
- if withdrawalLen >= 56 {
- withdrawalLen += libcommon.BitLenToByteLen(bits.Len(uint(withdrawalLen)))
- }
- withdrawalsLen += withdrawalLen
+ withdrawalsLen += rlp2.ListPrefixLen(withdrawalLen) + withdrawalLen
}
- if withdrawalsLen >= 56 {
- payloadSize += libcommon.BitLenToByteLen(bits.Len(uint(withdrawalsLen)))
- }
- payloadSize += withdrawalsLen
+ payloadSize += rlp2.ListPrefixLen(withdrawalsLen) + withdrawalsLen
}
return payloadSize, txsLen, unclesLen, withdrawalsLen
@@ -1477,7 +1379,7 @@ func (b *Block) ParentHash() libcommon.Hash { return b.header.ParentHash }
func (b *Block) TxHash() libcommon.Hash { return b.header.TxHash }
func (b *Block) ReceiptHash() libcommon.Hash { return b.header.ReceiptHash }
func (b *Block) UncleHash() libcommon.Hash { return b.header.UncleHash }
-func (b *Block) Extra() []byte { return common.CopyBytes(b.header.Extra) }
+func (b *Block) Extra() []byte { return libcommon.CopyBytes(b.header.Extra) }
func (b *Block) BaseFee() *big.Int {
if b.header.BaseFee == nil {
return nil
@@ -1552,14 +1454,24 @@ func (b *Block) SanityCheck() error {
return b.header.SanityCheck()
}
-// HashCheck checks that uncle, transaction, and withdrawals hashes are correct.
+// HashCheck checks that transactions, receipts, uncles and withdrawals hashes are correct.
func (b *Block) HashCheck() error {
- if hash := CalcUncleHash(b.Uncles()); hash != b.UncleHash() {
- return fmt.Errorf("block has invalid uncle hash: have %x, exp: %x", hash, b.UncleHash())
- }
if hash := DeriveSha(b.Transactions()); hash != b.TxHash() {
return fmt.Errorf("block has invalid transaction hash: have %x, exp: %x", hash, b.TxHash())
}
+
+ if len(b.transactions) > 0 && b.ReceiptHash() == EmptyRootHash {
+ return fmt.Errorf("block has empty receipt hash: %x but it includes %x transactions", b.ReceiptHash(), len(b.transactions))
+ }
+
+ if len(b.transactions) == 0 && b.ReceiptHash() != EmptyRootHash {
+ return fmt.Errorf("block has non-empty receipt hash: %x but no transactions", b.ReceiptHash())
+ }
+
+ if hash := CalcUncleHash(b.Uncles()); hash != b.UncleHash() {
+ return fmt.Errorf("block has invalid uncle hash: have %x, exp: %x", hash, b.UncleHash())
+ }
+
if b.WithdrawalsHash() == nil {
if b.Withdrawals() != nil {
return errors.New("header missing WithdrawalsHash")
@@ -1598,9 +1510,15 @@ func CopyTxs(in Transactions) Transactions {
if err != nil {
panic(fmt.Errorf("DecodeTransactions failed: %w", err))
}
- for i := 0; i < len(in); i++ {
- if s, ok := in[i].GetSender(); ok {
- out[i].SetSender(s)
+ for i, tx := range in {
+ if txWrapper, ok := tx.(*BlobTxWrapper); ok {
+ blobTx := out[i].(*BlobTx)
+ out[i] = &BlobTxWrapper{
+ Tx: *blobTx,
+ Commitments: txWrapper.Commitments.copy(),
+ Blobs: txWrapper.Blobs.copy(),
+ Proofs: txWrapper.Proofs.copy(),
+ }
}
}
return out
diff --git a/core/types/block_test.go b/core/types/block_test.go
index d42fe6b0823..4e2a8303d3f 100644
--- a/core/types/block_test.go
+++ b/core/types/block_test.go
@@ -28,6 +28,7 @@ import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
types2 "github.com/ledgerwatch/erigon-lib/types"
+ "github.com/ledgerwatch/log/v3"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -39,8 +40,89 @@ import (
"github.com/ledgerwatch/erigon/rlp"
)
+// the following 2 functions are replica for the test
+// This is a replica of `bor.GetValidatorBytes` function
+// This was needed because currently, `IsParallelUniverse` will always return false.
+func GetValidatorBytesTest(h *Header) []byte {
+ if len(h.Extra) < ExtraVanityLength+ExtraSealLength {
+ log.Error("length of extra is less than vanity and seal")
+ return nil
+ }
+
+ var blockExtraData BlockExtraDataTest
+ if err := rlp.DecodeBytes(h.Extra[ExtraVanityLength:len(h.Extra)-ExtraSealLength], &blockExtraData); err != nil {
+ log.Error("error while decoding block extra data", "err", err)
+ return nil
+ }
+
+ return blockExtraData.ValidatorBytes
+}
+
+func GetTxDependencyTest(b *Block) [][]uint64 {
+ if len(b.header.Extra) < ExtraVanityLength+ExtraSealLength {
+ log.Error("length of extra less is than vanity and seal")
+ return nil
+ }
+
+ var blockExtraData BlockExtraDataTest
+ if err := rlp.DecodeBytes(b.header.Extra[ExtraVanityLength:len(b.header.Extra)-ExtraSealLength], &blockExtraData); err != nil {
+ log.Error("error while decoding block extra data", "err", err)
+ return nil
+ }
+
+ return blockExtraData.TxDependency
+}
+
+type BlockExtraDataTest struct {
+ // Validator bytes of bor
+ ValidatorBytes []byte
+
+ // length of TxDependency -> n (n = number of transactions in the block)
+ // length of TxDependency[i] -> k (k = a whole number)
+ // k elements in TxDependency[i] -> transaction indexes on which transaction i is dependent on
+ TxDependency [][]uint64
+}
+
+func TestTxDependencyBlockDecoding(t *testing.T) {
+ t.Parallel()
+
+ blockEnc := common.FromHex("f90270f9026ba00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000948888f1f195afa192cfee860698584c030f4c9db1a0ef1552a40b7165c3cd773806b9e0c165b75356e0314bf0706f279c729f51e017a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000080832fefd8825208845506eb07b8710000000000000000000000000000000000000000000000000000000000000000cf8776616c20736574c6c20201c201800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0bd4472abb6659ebe3ee06ee4d7b72a00a9f4d001caca51342001075469aff498880000000000000000c0c0")
+
+ var block Block
+
+ if err := rlp.DecodeBytes(blockEnc, &block); err != nil {
+ t.Fatal("decode error: ", err)
+ }
+ check := func(f string, got, want interface{}) {
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("%s mismatch: got %v, want %v", f, got, want)
+ }
+ }
+
+ check("Coinbase", block.Coinbase(), libcommon.HexToAddress("8888f1f195afa192cfee860698584c030f4c9db1"))
+ check("MixDigest", block.MixDigest(), libcommon.HexToHash("bd4472abb6659ebe3ee06ee4d7b72a00a9f4d001caca51342001075469aff498"))
+ check("Root", block.Root(), libcommon.HexToHash("ef1552a40b7165c3cd773806b9e0c165b75356e0314bf0706f279c729f51e017"))
+ check("Time", block.Time(), uint64(1426516743))
+
+ validatorBytes := GetValidatorBytesTest(block.header)
+ txDependency := GetTxDependencyTest(&block)
+
+ check("validatorBytes", validatorBytes, []byte("val set"))
+ check("txDependency", txDependency, [][]uint64{{2, 1}, {1, 0}})
+
+ ourBlockEnc, err := rlp.EncodeToBytes(&block)
+
+ if err != nil {
+ t.Fatal("encode error: ", err)
+ }
+ if !bytes.Equal(ourBlockEnc, blockEnc) {
+ t.Errorf("encoded block mismatch:\ngot: %x\nwant: %x", ourBlockEnc, blockEnc)
+ }
+}
+
// from bcValidBlockTest.json, "SimpleTx"
func TestBlockEncoding(t *testing.T) {
+ t.Parallel()
blockEnc := common.FromHex("f90260f901f9a083cafc574e1f51ba9dc0568fc617a08ea2429fb384059c972f13b19fa1c8dd55a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0ef1552a40b7165c3cd773806b9e0c165b75356e0314bf0706f279c729f51e017a05fe50b260da6308036625b850b5d6ced6d0a9f814c0688bc91ffb7b7a3a54b67a0bc37d79753ad738a6dac4921e57392f145d8887476de3f783dfa7edae9283e52b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001832fefd8825208845506eb0780a0bd4472abb6659ebe3ee06ee4d7b72a00a9f4d001caca51342001075469aff49888a13a5a8c8f2bb1c4f861f85f800a82c35094095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba09bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094fa08a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b1c0")
var block Block
if err := rlp.DecodeBytes(blockEnc, &block); err != nil {
@@ -64,7 +146,7 @@ func TestBlockEncoding(t *testing.T) {
check("Size", block.Size(), common.StorageSize(len(blockEnc)))
var tx1 Transaction = NewTransaction(0, libcommon.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"), uint256.NewInt(10), 50000, uint256.NewInt(10), nil)
- tx1, _ = tx1.WithSignature(*LatestSignerForChainID(nil), common.Hex2Bytes("9bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094f8a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b100"))
+ tx1, _ = tx1.WithSignature(*LatestSignerForChainID(nil), libcommon.Hex2Bytes("9bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094f8a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b100"))
check("len(Transactions)", len(block.Transactions()), 1)
check("Transactions[0].Hash", block.Transactions()[0].Hash(), tx1.Hash())
ourBlockEnc, err := rlp.EncodeToBytes(&block)
@@ -77,6 +159,7 @@ func TestBlockEncoding(t *testing.T) {
}
func TestEIP1559BlockEncoding(t *testing.T) {
+ t.Parallel()
blockEnc := common.FromHex("f9030bf901fea083cafc574e1f51ba9dc0568fc617a08ea2429fb384059c972f13b19fa1c8dd55a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0ef1552a40b7165c3cd773806b9e0c165b75356e0314bf0706f279c729f51e017a05fe50b260da6308036625b850b5d6ced6d0a9f814c0688bc91ffb7b7a3a54b67a0bc37d79753ad738a6dac4921e57392f145d8887476de3f783dfa7edae9283e52b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001832fefd8825208845506eb0780a0bd4472abb6659ebe3ee06ee4d7b72a00a9f4d001caca51342001075469aff49888a13a5a8c8f2bb1c4843b9aca00f90106f85f800a82c35094095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba09bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094fa08a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b1b8a302f8a0018080843b9aca008301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080a0fe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0a06de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8c0")
var block Block
if err := rlp.DecodeBytes(blockEnc, &block); err != nil {
@@ -102,7 +185,7 @@ func TestEIP1559BlockEncoding(t *testing.T) {
check("BaseFee", block.BaseFee(), new(big.Int).SetUint64(params.InitialBaseFee))
var tx1 Transaction = NewTransaction(0, libcommon.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"), new(uint256.Int).SetUint64(10), 50000, new(uint256.Int).SetUint64(10), nil)
- tx1, _ = tx1.WithSignature(*LatestSignerForChainID(nil), common.Hex2Bytes("9bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094f8a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b100"))
+ tx1, _ = tx1.WithSignature(*LatestSignerForChainID(nil), libcommon.Hex2Bytes("9bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094f8a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b100"))
addr := libcommon.HexToAddress("0x0000000000000000000000000000000000000001")
accesses := types2.AccessList{types2.AccessTuple{
@@ -125,7 +208,7 @@ func TestEIP1559BlockEncoding(t *testing.T) {
Tip: u256.Num0,
AccessList: accesses,
}
- tx2, err := tx2.WithSignature(*LatestSignerForChainID(big.NewInt(1)), common.Hex2Bytes("fe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b06de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a800"))
+ tx2, err := tx2.WithSignature(*LatestSignerForChainID(big.NewInt(1)), libcommon.Hex2Bytes("fe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b06de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a800"))
if err != nil {
t.Fatal("invalid signature error: ", err)
}
@@ -144,6 +227,7 @@ func TestEIP1559BlockEncoding(t *testing.T) {
}
func TestEIP2718BlockEncoding(t *testing.T) {
+ t.Parallel()
blockEnc := common.FromHex("f90319f90211a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0ef1552a40b7165c3cd773806b9e0c165b75356e0314bf0706f279c729f51e017a0e6e49996c7ec59f7a23d22b83239a60151512c65613bf84a0d7da336399ebc4aa0cafe75574d59780665a97fbfd11365c7545aa8f1abf4e5e12e8243334ef7286bb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000820200832fefd882a410845506eb0796636f6f6c65737420626c6f636b206f6e20636861696ea0bd4472abb6659ebe3ee06ee4d7b72a00a9f4d001caca51342001075469aff49888a13a5a8c8f2bb1c4f90101f85f800a82c35094095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba09bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094fa08a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b1b89e01f89b01800a8301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000001a03dbacc8d0259f2508625e97fdfc57cd85fdd16e5821bc2c10bdd1a52649e8335a0476e10695b183a87b0aa292a7f4b78ef0c3fbe62aa2c42c84e1d9c3da159ef14c0")
var block Block
if err := rlp.DecodeBytes(blockEnc, &block); err != nil {
@@ -177,7 +261,7 @@ func TestEIP2718BlockEncoding(t *testing.T) {
},
GasPrice: ten,
}
- sig := common.Hex2Bytes("9bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094f8a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b100")
+ sig := libcommon.Hex2Bytes("9bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094f8a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b100")
tx1, _ = tx1.WithSignature(*LatestSignerForChainID(nil), sig)
chainID, _ := uint256.FromBig(big.NewInt(1))
@@ -195,7 +279,7 @@ func TestEIP2718BlockEncoding(t *testing.T) {
},
AccessList: types2.AccessList{{Address: addr, StorageKeys: []libcommon.Hash{{0}}}},
}
- sig2 := common.Hex2Bytes("3dbacc8d0259f2508625e97fdfc57cd85fdd16e5821bc2c10bdd1a52649e8335476e10695b183a87b0aa292a7f4b78ef0c3fbe62aa2c42c84e1d9c3da159ef1401")
+ sig2 := libcommon.Hex2Bytes("3dbacc8d0259f2508625e97fdfc57cd85fdd16e5821bc2c10bdd1a52649e8335476e10695b183a87b0aa292a7f4b78ef0c3fbe62aa2c42c84e1d9c3da159ef1401")
tx2, _ = tx2.WithSignature(*LatestSignerForChainID(big.NewInt(1)), sig2)
check("len(Transactions)", len(block.Transactions()), 2)
@@ -213,6 +297,7 @@ func TestEIP2718BlockEncoding(t *testing.T) {
}
func TestUncleHash(t *testing.T) {
+ t.Parallel()
uncles := make([]*Header, 0)
h := CalcUncleHash(uncles)
exp := libcommon.HexToHash("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")
@@ -277,6 +362,7 @@ func makeBenchBlock() *Block {
}
func TestCanEncodeAndDecodeRawBody(t *testing.T) {
+ t.Parallel()
body := &RawBody{
Uncles: []*Header{
{
@@ -319,12 +405,12 @@ func TestCanEncodeAndDecodeRawBody(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- rlpBytes := common.CopyBytes(writer.Bytes())
+ rlpBytes := libcommon.CopyBytes(writer.Bytes())
writer.Reset()
writer.WriteString(hexutility.Encode(rlpBytes))
var rawBody RawBody
- fromHex := common.CopyBytes(common.FromHex(writer.String()))
+ fromHex := libcommon.CopyBytes(common.FromHex(writer.String()))
bodyReader := bytes.NewReader(fromHex)
stream := rlp.NewStream(bodyReader, 0)
@@ -359,6 +445,7 @@ func TestCanEncodeAndDecodeRawBody(t *testing.T) {
}
func TestAuRaHeaderEncoding(t *testing.T) {
+ t.Parallel()
difficulty, ok := new(big.Int).SetString("8398142613866510000000000000000000000000000000", 10)
require.True(t, ok)
@@ -390,6 +477,7 @@ func TestAuRaHeaderEncoding(t *testing.T) {
}
func TestWithdrawalsEncoding(t *testing.T) {
+ t.Parallel()
header := Header{
ParentHash: libcommon.HexToHash("0x8b00fcf1e541d371a3a1b79cc999a85cc3db5ee5637b5159646e1acd3613fd15"),
Coinbase: libcommon.HexToAddress("0x571846e42308df2dad8ed792f44a8bfddf0acb4d"),
@@ -443,6 +531,7 @@ func TestWithdrawalsEncoding(t *testing.T) {
}
func TestBlockRawBodyPreShanghai(t *testing.T) {
+ t.Parallel()
require := require.New(t)
const rawBodyForStorageRlp = "f901f4c0f901f0f901eda00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000808080808080a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"
@@ -457,6 +546,7 @@ func TestBlockRawBodyPreShanghai(t *testing.T) {
}
func TestBlockRawBodyPostShanghaiNoWithdrawals(t *testing.T) {
+ t.Parallel()
require := require.New(t)
const rawBodyForStorageRlp = "f901f5c0f901f0f901eda00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000808080808080a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0"
@@ -472,6 +562,7 @@ func TestBlockRawBodyPostShanghaiNoWithdrawals(t *testing.T) {
}
func TestBlockRawBodyPostShanghaiWithdrawals(t *testing.T) {
+ t.Parallel()
require := require.New(t)
const rawBodyForStorageRlp = "f90230c0f901f0f901eda00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000808080808080a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f83adc0f82157c94ff000000000000000000000000000000000000008203e8dc1082157d94ff000000000000000000000000000000000000008203e9"
@@ -486,3 +577,29 @@ func TestBlockRawBodyPostShanghaiWithdrawals(t *testing.T) {
require.Equal(0, len(body.Transactions))
require.Equal(2, len(body.Withdrawals))
}
+
+func TestCopyTxs(t *testing.T) {
+ var txs Transactions
+ txs = append(txs, &LegacyTx{
+ CommonTx: CommonTx{
+ Nonce: 0,
+ Value: new(uint256.Int).SetUint64(10000),
+ Gas: 50000,
+ Data: []byte("Sparta"),
+ },
+ GasPrice: new(uint256.Int).SetUint64(10),
+ })
+
+ populateBlobTxs()
+ for _, tx := range dummyBlobTxs {
+ txs = append(txs, tx)
+ }
+
+ populateBlobWrapperTxs()
+ for _, tx := range dummyBlobWrapperTxs {
+ txs = append(txs, tx)
+ }
+
+ copies := CopyTxs(txs)
+ assert.Equal(t, txs, copies)
+}
diff --git a/core/types/bloom9_test.go b/core/types/bloom9_test.go
index 97b1f8b9f8e..d6e2a469254 100644
--- a/core/types/bloom9_test.go
+++ b/core/types/bloom9_test.go
@@ -28,6 +28,7 @@ import (
)
func TestBloom(t *testing.T) {
+ t.Parallel()
positive := []string{
"testtest",
"test",
@@ -58,6 +59,7 @@ func TestBloom(t *testing.T) {
// TestBloomExtensively does some more thorough tests
func TestBloomExtensively(t *testing.T) {
+ t.Parallel()
var exp = libcommon.HexToHash("c8d3ca65cdb4874300a9e39475508f23ed6da09fdbc487f89a2dcf50b09eb263")
var b Bloom
// Add 100 "random" things
diff --git a/core/types/bor_receipt.go b/core/types/bor_receipt.go
index a993de490c4..16b30605a9d 100644
--- a/core/types/bor_receipt.go
+++ b/core/types/bor_receipt.go
@@ -1,13 +1,13 @@
package types
import (
+ "github.com/ledgerwatch/erigon-lib/kv/dbutils"
"math/big"
"sort"
"github.com/holiman/uint256"
libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon/common/dbutils"
"github.com/ledgerwatch/erigon/crypto"
)
diff --git a/core/types/dynamic_fee_tx.go b/core/types/dynamic_fee_tx.go
index 0fa8b865569..5ee0cf037d9 100644
--- a/core/types/dynamic_fee_tx.go
+++ b/core/types/dynamic_fee_tx.go
@@ -21,15 +21,14 @@ import (
"fmt"
"io"
"math/big"
- "math/bits"
"github.com/holiman/uint256"
"github.com/ledgerwatch/erigon-lib/chain"
libcommon "github.com/ledgerwatch/erigon-lib/common"
+ rlp2 "github.com/ledgerwatch/erigon-lib/rlp"
types2 "github.com/ledgerwatch/erigon-lib/types"
- "github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/common/u256"
"github.com/ledgerwatch/erigon/rlp"
)
@@ -82,7 +81,7 @@ func (tx DynamicFeeTransaction) copy() *DynamicFeeTransaction {
},
Nonce: tx.Nonce,
To: tx.To, // TODO: copy pointed-to address
- Data: common.CopyBytes(tx.Data),
+ Data: libcommon.CopyBytes(tx.Data),
Gas: tx.Gas,
// These are copied below.
Value: new(uint256.Int),
@@ -117,13 +116,8 @@ func (tx DynamicFeeTransaction) GetAccessList() types2.AccessList {
func (tx DynamicFeeTransaction) EncodingSize() int {
payloadSize, _, _, _ := tx.payloadSize()
- envelopeSize := payloadSize
// Add envelope size and type size
- if payloadSize >= 56 {
- envelopeSize += libcommon.BitLenToByteLen(bits.Len(uint(payloadSize)))
- }
- envelopeSize += 2
- return envelopeSize
+ return 1 + rlp2.ListPrefixLen(payloadSize) + payloadSize
}
func (tx DynamicFeeTransaction) payloadSize() (payloadSize int, nonceLen, gasLen, accessListLen int) {
@@ -153,26 +147,10 @@ func (tx DynamicFeeTransaction) payloadSize() (payloadSize int, nonceLen, gasLen
payloadSize++
payloadSize += rlp.Uint256LenExcludingHead(tx.Value)
// size of Data
- payloadSize++
- switch len(tx.Data) {
- case 0:
- case 1:
- if tx.Data[0] >= 128 {
- payloadSize++
- }
- default:
- if len(tx.Data) >= 56 {
- payloadSize += libcommon.BitLenToByteLen(bits.Len(uint(len(tx.Data))))
- }
- payloadSize += len(tx.Data)
- }
+ payloadSize += rlp2.StringLen(tx.Data)
// size of AccessList
- payloadSize++
accessListLen = accessListSize(tx.AccessList)
- if accessListLen >= 56 {
- payloadSize += libcommon.BitLenToByteLen(bits.Len(uint(accessListLen)))
- }
- payloadSize += accessListLen
+ payloadSize += rlp2.ListPrefixLen(accessListLen) + accessListLen
// size of V
payloadSize++
payloadSize += rlp.Uint256LenExcludingHead(&tx.V)
@@ -296,12 +274,8 @@ func (tx DynamicFeeTransaction) encodePayload(w io.Writer, b []byte, payloadSize
func (tx DynamicFeeTransaction) EncodeRLP(w io.Writer) error {
payloadSize, nonceLen, gasLen, accessListLen := tx.payloadSize()
- envelopeSize := payloadSize
- if payloadSize >= 56 {
- envelopeSize += libcommon.BitLenToByteLen(bits.Len(uint(payloadSize)))
- }
// size of struct prefix and TxType
- envelopeSize += 2
+ envelopeSize := 1 + rlp2.ListPrefixLen(payloadSize) + payloadSize
var b [33]byte
// envelope
if err := rlp.EncodeStringSizePrefix(envelopeSize, w, b[:]); err != nil {
diff --git a/core/types/gen_erigon_log_json.go b/core/types/gen_erigon_log_json.go
index 2360e2fdba0..02e6505047c 100644
--- a/core/types/gen_erigon_log_json.go
+++ b/core/types/gen_erigon_log_json.go
@@ -5,11 +5,10 @@ package types
import (
"encoding/json"
"errors"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
-
- "github.com/ledgerwatch/erigon/common/hexutil"
)
var _ = (*logMarshaling)(nil)
diff --git a/core/types/gen_header_json.go b/core/types/gen_header_json.go
index 17b70798b48..9eee9eeb7d2 100644
--- a/core/types/gen_header_json.go
+++ b/core/types/gen_header_json.go
@@ -5,12 +5,11 @@ package types
import (
"encoding/json"
"errors"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
"math/big"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
-
- "github.com/ledgerwatch/erigon/common/hexutil"
)
var _ = (*headerMarshaling)(nil)
diff --git a/core/types/gen_log_json.go b/core/types/gen_log_json.go
index e3db5873dc4..abd1dfd2ebe 100644
--- a/core/types/gen_log_json.go
+++ b/core/types/gen_log_json.go
@@ -5,11 +5,10 @@ package types
import (
"encoding/json"
"errors"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
-
- "github.com/ledgerwatch/erigon/common/hexutil"
)
var _ = (*logMarshaling)(nil)
diff --git a/core/types/gen_receipt_json.go b/core/types/gen_receipt_json.go
index 8a898648386..34b64b591b7 100644
--- a/core/types/gen_receipt_json.go
+++ b/core/types/gen_receipt_json.go
@@ -5,12 +5,11 @@ package types
import (
"encoding/json"
"errors"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
"math/big"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
-
- "github.com/ledgerwatch/erigon/common/hexutil"
)
var _ = (*receiptMarshaling)(nil)
diff --git a/core/types/gen_withdrawal_json.go b/core/types/gen_withdrawal_json.go
index c56eb61537d..6ed318cc54b 100644
--- a/core/types/gen_withdrawal_json.go
+++ b/core/types/gen_withdrawal_json.go
@@ -4,9 +4,9 @@ package types
import (
"encoding/json"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
"github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon/common/hexutil"
)
var _ = (*withdrawalMarshaling)(nil)
diff --git a/core/types/genesis.go b/core/types/genesis.go
index 7f41e39d616..d6f0e8cbc21 100644
--- a/core/types/genesis.go
+++ b/core/types/genesis.go
@@ -41,39 +41,35 @@ var ErrGenesisNoConfig = errors.New("genesis has no chain configuration")
// Genesis specifies the header fields, state of a genesis block. It also defines hard
// fork switch-over blocks through the chain configuration.
type Genesis struct {
- Config *chain.Config `json:"config"`
- Nonce uint64 `json:"nonce"`
- Timestamp uint64 `json:"timestamp"`
- ExtraData []byte `json:"extraData"`
- GasLimit uint64 `json:"gasLimit" gencodec:"required"`
- Difficulty *big.Int `json:"difficulty" gencodec:"required"`
- Mixhash common.Hash `json:"mixHash"`
- Coinbase common.Address `json:"coinbase"`
- BaseFee *big.Int `json:"baseFeePerGas"`
- BlobGasUsed *uint64 `json:"blobGasUsed"`
- ExcessBlobGas *uint64 `json:"excessBlobGas"`
- Alloc GenesisAlloc `json:"alloc" gencodec:"required"`
- AuRaStep uint64 `json:"auRaStep"`
- AuRaSeal []byte `json:"auRaSeal"`
- ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`
+ Config *chain.Config `json:"config"`
+ Nonce uint64 `json:"nonce"`
+ Timestamp uint64 `json:"timestamp"`
+ ExtraData []byte `json:"extraData"`
+ GasLimit uint64 `json:"gasLimit" gencodec:"required"`
+ Difficulty *big.Int `json:"difficulty" gencodec:"required"`
+ Mixhash common.Hash `json:"mixHash"`
+ Coinbase common.Address `json:"coinbase"`
+ Alloc GenesisAlloc `json:"alloc" gencodec:"required"`
+
+ AuRaStep uint64 `json:"auRaStep"`
+ AuRaSeal []byte `json:"auRaSeal"`
// These fields are used for consensus tests. Please don't use them
// in actual genesis blocks.
Number uint64 `json:"number"`
GasUsed uint64 `json:"gasUsed"`
ParentHash common.Hash `json:"parentHash"`
+
+ // Header fields added in London and later hard forks
+ BaseFee *big.Int `json:"baseFeePerGas"` // EIP-1559
+ BlobGasUsed *uint64 `json:"blobGasUsed"` // EIP-4844
+ ExcessBlobGas *uint64 `json:"excessBlobGas"` // EIP-4844
+ ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"` // EIP-4788
}
// GenesisAlloc specifies the initial state that is part of the genesis block.
type GenesisAlloc map[common.Address]GenesisAccount
-type AuthorityRoundSeal struct {
- /// Seal step.
- Step uint64 `json:"step"`
- /// Seal signature.
- Signature common.Hash `json:"signature"`
-}
-
func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error {
m := make(map[common2.UnprefixedAddress]GenesisAccount)
if err := json.Unmarshal(data, &m); err != nil {
@@ -86,6 +82,21 @@ func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error {
return nil
}
+func DecodeGenesisAlloc(i interface{}) (GenesisAlloc, error) {
+ var alloc GenesisAlloc
+
+ b, err := json.Marshal(i)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := json.Unmarshal(b, &alloc); err != nil {
+ return nil, err
+ }
+
+ return alloc, nil
+}
+
// GenesisAccount is an account in the state of the genesis block.
// Either use "constructor" for deployment code or "code" directly for the final code.
type GenesisAccount struct {
diff --git a/core/types/hashing_test.go b/core/types/hashing_test.go
index 5150a5b9a08..56d5e8235d4 100644
--- a/core/types/hashing_test.go
+++ b/core/types/hashing_test.go
@@ -8,7 +8,6 @@ import (
"github.com/holiman/uint256"
libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/rlp"
"github.com/ledgerwatch/erigon/turbo/trie"
)
@@ -25,6 +24,7 @@ func genTransactions(n uint64) Transactions {
}
func TestEncodeUint(t *testing.T) {
+ t.Parallel()
for i := 0; i < 64000; i++ {
bbOld := bytes.NewBuffer(make([]byte, 10))
bbNew := bytes.NewBuffer(make([]byte, 10))
@@ -42,6 +42,7 @@ func TestEncodeUint(t *testing.T) {
}
func TestDeriveSha(t *testing.T) {
+ t.Parallel()
tests := []DerivableList{
Transactions{},
genTransactions(1),
@@ -84,7 +85,7 @@ func legacyDeriveSha(list DerivableList) libcommon.Hash {
valbuf.Reset()
_ = rlp.Encode(keybuf, uint(i))
list.EncodeIndex(i, valbuf)
- trie.Update(keybuf.Bytes(), common.CopyBytes(valbuf.Bytes()))
+ trie.Update(keybuf.Bytes(), libcommon.CopyBytes(valbuf.Bytes()))
}
return trie.Hash()
}
diff --git a/core/types/legacy_tx.go b/core/types/legacy_tx.go
index 92c12f2f964..7e183d6b630 100644
--- a/core/types/legacy_tx.go
+++ b/core/types/legacy_tx.go
@@ -21,14 +21,13 @@ import (
"fmt"
"io"
"math/big"
- "math/bits"
"github.com/holiman/uint256"
"github.com/ledgerwatch/erigon-lib/chain"
libcommon "github.com/ledgerwatch/erigon-lib/common"
+ rlp2 "github.com/ledgerwatch/erigon-lib/rlp"
types2 "github.com/ledgerwatch/erigon-lib/types"
- "github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/common/u256"
"github.com/ledgerwatch/erigon/rlp"
)
@@ -175,7 +174,7 @@ func (tx LegacyTx) copy() *LegacyTx {
},
Nonce: tx.Nonce,
To: tx.To, // TODO: copy pointed-to address
- Data: common.CopyBytes(tx.Data),
+ Data: libcommon.CopyBytes(tx.Data),
Gas: tx.Gas,
// These are initialized below.
Value: new(uint256.Int),
@@ -215,19 +214,7 @@ func (tx LegacyTx) payloadSize() (payloadSize int, nonceLen, gasLen int) {
payloadSize++
payloadSize += rlp.Uint256LenExcludingHead(tx.Value)
// size of Data
- payloadSize++
- switch len(tx.Data) {
- case 0:
- case 1:
- if tx.Data[0] >= 128 {
- payloadSize++
- }
- default:
- if len(tx.Data) >= 56 {
- payloadSize += libcommon.BitLenToByteLen(bits.Len(uint(len(tx.Data))))
- }
- payloadSize += len(tx.Data)
- }
+ payloadSize += rlp2.StringLen(tx.Data)
// size of V
payloadSize++
payloadSize += rlp.Uint256LenExcludingHead(&tx.V)
diff --git a/core/types/log.go b/core/types/log.go
index 552300f49a6..f566bf0c372 100644
--- a/core/types/log.go
+++ b/core/types/log.go
@@ -17,12 +17,12 @@
package types
import (
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
"io"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
- "github.com/ledgerwatch/erigon/common/hexutil"
"github.com/ledgerwatch/erigon/rlp"
)
diff --git a/core/types/log_test.go b/core/types/log_test.go
index 408bb33aaef..c6ecbc348f1 100644
--- a/core/types/log_test.go
+++ b/core/types/log_test.go
@@ -22,10 +22,10 @@ import (
"reflect"
"testing"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
+
"github.com/davecgh/go-spew/spew"
libcommon "github.com/ledgerwatch/erigon-lib/common"
-
- "github.com/ledgerwatch/erigon/common/hexutil"
)
var unmarshalLogTests = map[string]struct {
@@ -106,6 +106,7 @@ var unmarshalLogTests = map[string]struct {
}
func TestUnmarshalLog(t *testing.T) {
+ t.Parallel()
dumper := spew.ConfigState{DisableMethods: true, Indent: " "}
for name, test := range unmarshalLogTests {
var log *Log
@@ -136,6 +137,7 @@ func checkError(t *testing.T, testname string, got, want error) bool {
}
func TestFilterLogsTopics(t *testing.T) {
+ t.Parallel()
// hashes and addresses to make test more readable
var (
A libcommon.Hash = [32]byte{1}
diff --git a/core/types/receipt.go b/core/types/receipt.go
index bc3a4d9a7ba..e5689de13f0 100644
--- a/core/types/receipt.go
+++ b/core/types/receipt.go
@@ -20,13 +20,13 @@ import (
"bytes"
"errors"
"fmt"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
"io"
"math/big"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
- "github.com/ledgerwatch/erigon/common/hexutil"
"github.com/ledgerwatch/erigon/crypto"
"github.com/ledgerwatch/erigon/rlp"
)
diff --git a/core/types/receipt_test.go b/core/types/receipt_test.go
index cef57f007db..4eb2f1a9d67 100644
--- a/core/types/receipt_test.go
+++ b/core/types/receipt_test.go
@@ -35,6 +35,7 @@ import (
)
func TestDecodeEmptyTypedReceipt(t *testing.T) {
+ t.Parallel()
input := []byte{0x80}
var r Receipt
err := rlp.DecodeBytes(input, &r)
@@ -44,6 +45,7 @@ func TestDecodeEmptyTypedReceipt(t *testing.T) {
}
func TestLegacyReceiptDecoding(t *testing.T) {
+ t.Parallel()
tests := []struct {
name string
encode func(*Receipt) ([]byte, error)
@@ -78,7 +80,9 @@ func TestLegacyReceiptDecoding(t *testing.T) {
receipt.Bloom = CreateBloom(Receipts{receipt})
for _, tc := range tests {
+ tc := tc
t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
enc, err := tc.encode(receipt)
if err != nil {
t.Fatalf("Error encoding receipt: %v", err)
@@ -126,6 +130,7 @@ func encodeAsStoredReceiptRLP(want *Receipt) ([]byte, error) {
// Tests that receipt data can be correctly derived from the contextual infos
func TestDeriveFields(t *testing.T) {
+ t.Parallel()
// Create a few transactions to have receipts for
to2 := libcommon.HexToAddress("0x2")
to3 := libcommon.HexToAddress("0x3")
@@ -262,6 +267,7 @@ func TestDeriveFields(t *testing.T) {
// TestTypedReceiptEncodingDecoding reproduces a flaw that existed in the receipt
// rlp decoder, which failed due to a shadowing error.
func TestTypedReceiptEncodingDecoding(t *testing.T) {
+ t.Parallel()
var payload = common.FromHex("f9043eb9010c01f90108018262d4b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0b9010c01f901080182cd14b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0b9010d01f901090183013754b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0b9010d01f90109018301a194b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0")
check := func(bundle []*Receipt) {
t.Helper()
diff --git a/core/types/transaction.go b/core/types/transaction.go
index 61cdedd3fe3..079b008791d 100644
--- a/core/types/transaction.go
+++ b/core/types/transaction.go
@@ -35,7 +35,6 @@ import (
"github.com/ledgerwatch/erigon-lib/common/fixedgas"
types2 "github.com/ledgerwatch/erigon-lib/types"
- "github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/common/math"
"github.com/ledgerwatch/erigon/crypto"
"github.com/ledgerwatch/erigon/rlp"
@@ -95,7 +94,7 @@ type Transaction interface {
GetSender() (libcommon.Address, bool)
SetSender(libcommon.Address)
IsContractDeploy() bool
- Unwrap() Transaction // If this is a network wrapper, returns the unwrapped tx. Otherwiwes returns itself.
+ Unwrap() Transaction // If this is a network wrapper, returns the unwrapped tx. Otherwise returns itself.
}
// TransactionMisc is collection of miscelaneous fields for transaction that is supposed to be embedded into concrete
@@ -246,7 +245,7 @@ func MarshalTransactionsBinary(txs Transactions) ([][]byte, error) {
if err != nil {
return nil, err
}
- result[i] = common.CopyBytes(buf.Bytes())
+ result[i] = libcommon.CopyBytes(buf.Bytes())
}
return result, nil
}
@@ -372,7 +371,7 @@ func (s *TxByPriceAndTime) Pop() interface{} {
old := *s
n := len(old)
x := old[n-1]
- old[n-1] = nil
+ old[n-1] = nil // avoid memory leak
*s = old[0 : n-1]
return x
}
@@ -500,6 +499,7 @@ func (t *TransactionsFixedOrder) Peek() Transaction {
// Shift replaces the current best head with the next one from the same account.
func (t *TransactionsFixedOrder) Shift() {
+ t.Transactions[0] = nil // avoid memory leak
t.Transactions = t.Transactions[1:]
}
@@ -507,6 +507,7 @@ func (t *TransactionsFixedOrder) Shift() {
// the same account. This should be used when a transaction cannot be executed
// and hence all subsequent ones should be discarded from the same account.
func (t *TransactionsFixedOrder) Pop() {
+ t.Transactions[0] = nil // avoid memory leak
t.Transactions = t.Transactions[1:]
}
diff --git a/core/types/transaction_marshalling.go b/core/types/transaction_marshalling.go
index c2d460a6643..b529e517988 100644
--- a/core/types/transaction_marshalling.go
+++ b/core/types/transaction_marshalling.go
@@ -4,6 +4,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
"github.com/holiman/uint256"
"github.com/valyala/fastjson"
@@ -11,8 +12,6 @@ import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/hexutility"
types2 "github.com/ledgerwatch/erigon-lib/types"
-
- "github.com/ledgerwatch/erigon/common/hexutil"
)
// txJSON is the JSON representation of transactions.
diff --git a/core/types/transaction_signing_test.go b/core/types/transaction_signing_test.go
index 76e306528ea..6c669f5f9fd 100644
--- a/core/types/transaction_signing_test.go
+++ b/core/types/transaction_signing_test.go
@@ -23,11 +23,11 @@ import (
"github.com/holiman/uint256"
libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/crypto"
)
func TestEIP1559Signing(t *testing.T) {
+ t.Parallel()
key, _ := crypto.GenerateKey()
addr := crypto.PubkeyToAddress(key.PublicKey)
@@ -48,6 +48,7 @@ func TestEIP1559Signing(t *testing.T) {
}
func TestEIP155Signing(t *testing.T) {
+ t.Parallel()
key, _ := crypto.GenerateKey()
addr := crypto.PubkeyToAddress(key.PublicKey)
@@ -67,6 +68,7 @@ func TestEIP155Signing(t *testing.T) {
}
func TestEIP155ChainId(t *testing.T) {
+ t.Parallel()
key, _ := crypto.GenerateKey()
addr := crypto.PubkeyToAddress(key.PublicKey)
@@ -99,6 +101,7 @@ func TestEIP155ChainId(t *testing.T) {
}
func TestEIP155SigningVitalik(t *testing.T) {
+ t.Parallel()
// Test vectors come from http://vitalik.ca/files/eip155_testvec.txt
for i, test := range []struct {
txRlp, addr string
@@ -116,7 +119,7 @@ func TestEIP155SigningVitalik(t *testing.T) {
} {
signer := LatestSignerForChainID(big.NewInt(1))
- tx, err := DecodeTransaction(common.Hex2Bytes(test.txRlp))
+ tx, err := DecodeTransaction(libcommon.Hex2Bytes(test.txRlp))
if err != nil {
t.Errorf("%d: %v", i, err)
continue
@@ -137,6 +140,7 @@ func TestEIP155SigningVitalik(t *testing.T) {
}
func TestChainId(t *testing.T) {
+ t.Parallel()
key, _ := defaultTestKey()
var tx Transaction = NewTransaction(0, libcommon.Address{}, new(uint256.Int), 0, new(uint256.Int), nil)
diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go
index 162906fe276..cdb05b69680 100644
--- a/core/types/transaction_test.go
+++ b/core/types/transaction_test.go
@@ -63,7 +63,7 @@ var (
common.FromHex("5544"),
).WithSignature(
*LatestSignerForChainID(nil),
- common.Hex2Bytes("98ff921201554726367d2be8c804a7ff89ccf285ebc57dff8ae4c44b9c19ac4a8887321be575c8095f789dd4c743dfe42c1820f9231f98a962b210e3ac2452a301"),
+ libcommon.Hex2Bytes("98ff921201554726367d2be8c804a7ff89ccf285ebc57dff8ae4c44b9c19ac4a8887321be575c8095f789dd4c743dfe42c1820f9231f98a962b210e3ac2452a301"),
)
emptyEip2718Tx = &AccessListTx{
@@ -82,7 +82,7 @@ var (
signedEip2718Tx, _ = emptyEip2718Tx.WithSignature(
*LatestSignerForChainID(big.NewInt(1)),
- common.Hex2Bytes("c9519f4f2b30335884581971573fadf60c6204f59a911df35ee8a540456b266032f1e8e2c5dd761f9e4f88f41c8310aeaba26a8bfcdacfedfa12ec3862d3752101"),
+ libcommon.Hex2Bytes("c9519f4f2b30335884581971573fadf60c6204f59a911df35ee8a540456b266032f1e8e2c5dd761f9e4f88f41c8310aeaba26a8bfcdacfedfa12ec3862d3752101"),
)
dynFeeTx = &DynamicFeeTransaction{
@@ -100,11 +100,12 @@ var (
signedDynFeeTx, _ = dynFeeTx.WithSignature(
*LatestSignerForChainID(big.NewInt(1)),
- common.Hex2Bytes("c9519f4f2b30335884581971573fadf60c6204f59a911df35ee8a540456b266032f1e8e2c5dd761f9e4f88f41c8310aeaba26a8bfcdacfedfa12ec3862d3752101"),
+ libcommon.Hex2Bytes("c9519f4f2b30335884581971573fadf60c6204f59a911df35ee8a540456b266032f1e8e2c5dd761f9e4f88f41c8310aeaba26a8bfcdacfedfa12ec3862d3752101"),
)
)
func TestDecodeEmptyInput(t *testing.T) {
+ t.Parallel()
input := []byte{}
_, err := DecodeTransaction(input)
if !errors.Is(err, io.EOF) {
@@ -113,6 +114,7 @@ func TestDecodeEmptyInput(t *testing.T) {
}
func TestDecodeEmptyTypedTx(t *testing.T) {
+ t.Parallel()
input := []byte{0x80}
_, err := DecodeTransaction(input)
if !errors.Is(err, rlp.EOL) {
@@ -121,6 +123,7 @@ func TestDecodeEmptyTypedTx(t *testing.T) {
}
func TestTransactionSigHash(t *testing.T) {
+ t.Parallel()
if emptyTx.SigningHash(nil) != libcommon.HexToHash("c775b99e7ad12f50d819fcd602390467e28141316969f4b57f0626f74fe3b386") {
t.Errorf("empty transaction hash mismatch, got %x", emptyTx.SigningHash(nil))
}
@@ -130,6 +133,7 @@ func TestTransactionSigHash(t *testing.T) {
}
func TestTransactionEncode(t *testing.T) {
+ t.Parallel()
txb, err := rlp.EncodeToBytes(rightvrsTx)
if err != nil {
t.Fatalf("encode error: %v", err)
@@ -142,6 +146,7 @@ func TestTransactionEncode(t *testing.T) {
}
func TestEIP2718TransactionSigHash(t *testing.T) {
+ t.Parallel()
if emptyEip2718Tx.SigningHash(big.NewInt(1)) != libcommon.HexToHash("49b486f0ec0a60dfbbca2d30cb07c9e8ffb2a2ff41f29a1ab6737475f6ff69f3") {
t.Errorf("empty EIP-2718 transaction hash mismatch, got %x", emptyEip2718Tx.SigningHash(big.NewInt(1)))
}
@@ -233,6 +238,7 @@ func TestEIP2930Signer(t *testing.T) {
}
func TestEIP2718TransactionEncode(t *testing.T) {
+ t.Parallel()
// RLP representation
{
have, err := rlp.EncodeToBytes(signedEip2718Tx)
@@ -260,6 +266,7 @@ func TestEIP2718TransactionEncode(t *testing.T) {
}
}
func TestEIP1559TransactionEncode(t *testing.T) {
+ t.Parallel()
{
var buf bytes.Buffer
if err := signedDynFeeTx.MarshalBinary(&buf); err != nil {
@@ -289,8 +296,9 @@ func defaultTestKey() (*ecdsa.PrivateKey, libcommon.Address) {
}
func TestRecipientEmpty(t *testing.T) {
+ t.Parallel()
_, addr := defaultTestKey()
- tx, err := decodeTx(common.Hex2Bytes("f8498080808080011ca09b16de9d5bdee2cf56c28d16275a4da68cd30273e2525f3959f5d62557489921a0372ebd8fb3345f7db7b5a86d42e24d36e983e259b0664ceb8c227ec9af572f3d"))
+ tx, err := decodeTx(libcommon.Hex2Bytes("f8498080808080011ca09b16de9d5bdee2cf56c28d16275a4da68cd30273e2525f3959f5d62557489921a0372ebd8fb3345f7db7b5a86d42e24d36e983e259b0664ceb8c227ec9af572f3d"))
if err != nil {
t.Fatal(err)
}
@@ -305,9 +313,10 @@ func TestRecipientEmpty(t *testing.T) {
}
func TestRecipientNormal(t *testing.T) {
+ t.Parallel()
_, addr := defaultTestKey()
- tx, err := decodeTx(common.Hex2Bytes("f85d80808094000000000000000000000000000000000000000080011ca0527c0d8f5c63f7b9f41324a7c8a563ee1190bcbf0dac8ab446291bdbf32f5c79a0552c4ef0a09a04395074dab9ed34d3fbfb843c2f2546cc30fe89ec143ca94ca6"))
+ tx, err := decodeTx(libcommon.Hex2Bytes("f85d80808094000000000000000000000000000000000000000080011ca0527c0d8f5c63f7b9f41324a7c8a563ee1190bcbf0dac8ab446291bdbf32f5c79a0552c4ef0a09a04395074dab9ed34d3fbfb843c2f2546cc30fe89ec143ca94ca6"))
if err != nil {
t.Fatal(err)
}
@@ -325,6 +334,7 @@ func TestRecipientNormal(t *testing.T) {
// decreasing order, but at the same time with increasing nonces when issued by
// the same account.
func TestTransactionPriceNonceSort(t *testing.T) {
+ t.Parallel()
// Generate a batch of accounts to start with
keys := make([]*ecdsa.PrivateKey, 25)
for i := 0; i < len(keys); i++ {
@@ -384,6 +394,7 @@ func TestTransactionPriceNonceSort(t *testing.T) {
// Tests that if multiple transactions have the same price, the ones seen earlier
// are prioritized to avoid network spam attacks aiming for a specific ordering.
func TestTransactionTimeSort(t *testing.T) {
+ t.Parallel()
// Generate a batch of accounts to start with
keys := make([]*ecdsa.PrivateKey, 5)
for i := 0; i < len(keys); i++ {
@@ -437,6 +448,7 @@ func TestTransactionTimeSort(t *testing.T) {
// TestTransactionCoding tests serializing/de-serializing to/from rlp and JSON.
func TestTransactionCoding(t *testing.T) {
+ t.Parallel()
key, err := crypto.GenerateKey()
if err != nil {
t.Fatalf("could not generate key: %v", err)
@@ -563,19 +575,6 @@ func encodeDecodeBinary(tx Transaction) (Transaction, error) {
return parsedTx, nil
}
-func encodeDecodeWrappedBinary(tx *BlobTxWrapper) (*BlobTxWrapper, error) {
- var buf bytes.Buffer
- var err error
- if err = tx.MarshalBinary(&buf); err != nil {
- return nil, fmt.Errorf("rlp encoding failed: %w", err)
- }
- var parsedTx Transaction
- if parsedTx, err = UnmarshalWrappedTransactionFromBinary(buf.Bytes()); err != nil {
- return nil, fmt.Errorf("rlp decoding failed: %w", err)
- }
- return parsedTx.(*BlobTxWrapper), nil
-}
-
func assertEqual(orig Transaction, cpy Transaction) error {
// compare nonce, price, gaslimit, recipient, amount, payload, V, R, S
if want, got := orig.Hash(), cpy.Hash(); want != got {
@@ -686,12 +685,9 @@ func newRandBlobTx() *BlobTx {
To: randAddr(),
Value: uint256.NewInt(rand.Uint64()),
Data: randData(),
- // V: *uint256.NewInt(rand.Uint64()),
- // R: *uint256.NewInt(rand.Uint64()),
- // S: *uint256.NewInt(rand.Uint64()),
- V: *uint256.NewInt(0),
- R: *uint256.NewInt(rand.Uint64()),
- S: *uint256.NewInt(rand.Uint64()),
+ V: *uint256.NewInt(0),
+ R: *uint256.NewInt(rand.Uint64()),
+ S: *uint256.NewInt(rand.Uint64()),
},
ChainID: uint256.NewInt(rand.Uint64()),
Tip: uint256.NewInt(rand.Uint64()),
@@ -699,7 +695,7 @@ func newRandBlobTx() *BlobTx {
AccessList: randAccessList(),
},
MaxFeePerBlobGas: uint256.NewInt(rand.Uint64()),
- BlobVersionedHashes: randHashes(randIntInRange(0, 6)),
+ BlobVersionedHashes: randHashes(randIntInRange(1, 6)),
}
return stx
}
@@ -791,16 +787,9 @@ func populateBlobTxs() {
}
func populateBlobWrapperTxs() {
- for i := 0; i < N-1; i++ {
+ for i := 0; i < N; i++ {
dummyBlobWrapperTxs[i] = newRandBlobWrapper()
}
-
- dummyBlobWrapperTxs[N-1] = &BlobTxWrapper{
- Tx: *newRandBlobTx(),
- Commitments: nil,
- Blobs: nil,
- Proofs: nil,
- }
}
func TestBlobTxEncodeDecode(t *testing.T) {
@@ -825,38 +814,5 @@ func TestBlobTxEncodeDecode(t *testing.T) {
if err = assertEqual(dummyBlobTxs[i], tx); err != nil {
t.Fatal(err)
}
-
- }
-}
-
-func TestBlobTxWrappedEncodeDecode(t *testing.T) {
- rand.Seed(time.Now().UnixNano())
- populateBlobWrapperTxs()
- for i := 0; i < N; i++ {
- tx, err := encodeDecodeWrappedBinary(dummyBlobWrapperTxs[i])
- if err != nil {
- t.Fatal(err)
- }
- if err := assertEqual(dummyBlobWrapperTxs[i], tx); err != nil {
- t.Fatal(err)
- }
- if err := assertEqualBlobWrapper(dummyBlobWrapperTxs[i], tx); err != nil {
- t.Fatal(err)
- }
-
- // JSON
- // fails in ValidateBlobTransactionWrapper()
- // error during proof verification: invalid infinity point encoding
-
- // jtx, err := encodeDecodeJSON(dummyBlobWrapperTxs[i])
- // if err != nil {
- // t.Fatal(err)
- // }
- // if err = assertEqual(dummyBlobWrapperTxs[i], jtx); err != nil {
- // t.Fatal(err)
- // }
- // if err := assertEqualBlobWrapper(dummyBlobWrapperTxs[i], jtx.(*BlobTxWrapper)); err != nil {
- // t.Fatal(err)
- // }
}
}
diff --git a/core/types/withdrawal.go b/core/types/withdrawal.go
index c5ec8274a77..5dede6a0abc 100644
--- a/core/types/withdrawal.go
+++ b/core/types/withdrawal.go
@@ -19,15 +19,12 @@ package types
import (
"bytes"
"fmt"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
"io"
libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon-lib/common/length"
"github.com/ledgerwatch/erigon-lib/types/clonable"
- "github.com/ledgerwatch/erigon-lib/types/ssz"
- "github.com/ledgerwatch/erigon/cl/merkle_tree"
- "github.com/ledgerwatch/erigon/common/hexutil"
"github.com/ledgerwatch/erigon/rlp"
)
@@ -42,11 +39,6 @@ type Withdrawal struct {
Amount uint64 `json:"amount"` // value of withdrawal in GWei
}
-func (obj *Withdrawal) Equal(other *Withdrawal) bool {
- return obj.Index == other.Index && obj.Validator == other.Validator &&
- obj.Address == other.Address && obj.Amount == other.Amount
-}
-
func (obj *Withdrawal) EncodingSize() int {
encodingSize := 21 /* Address */
encodingSize++
@@ -84,34 +76,6 @@ func (obj *Withdrawal) EncodeRLP(w io.Writer) error {
return rlp.EncodeInt(obj.Amount, w, b[:])
}
-func (obj *Withdrawal) EncodeSSZ(buf []byte) ([]byte, error) {
- buf = append(buf, ssz.Uint64SSZ(obj.Index)...)
- buf = append(buf, ssz.Uint64SSZ(obj.Validator)...)
- buf = append(buf, obj.Address[:]...)
- buf = append(buf, ssz.Uint64SSZ(obj.Amount)...)
- return buf, nil
-}
-
-func (obj *Withdrawal) DecodeSSZ(buf []byte, _ int) error {
- if len(buf) < obj.EncodingSizeSSZ() {
- return fmt.Errorf("[Withdrawal] err: %s", ssz.ErrLowBufferSize)
- }
- obj.Index = ssz.UnmarshalUint64SSZ(buf)
- obj.Validator = ssz.UnmarshalUint64SSZ(buf[8:])
- copy(obj.Address[:], buf[16:])
- obj.Amount = ssz.UnmarshalUint64SSZ(buf[36:])
- return nil
-}
-
-func (obj *Withdrawal) EncodingSizeSSZ() int {
- // Validator Index (8 bytes) + Index (8 bytes) + Amount (8 bytes) + address length
- return 24 + length.Addr
-}
-
-func (obj *Withdrawal) HashSSZ() ([32]byte, error) { // the [32]byte is temporary
- return merkle_tree.HashTreeRoot(obj.Index, obj.Validator, obj.Address[:], obj.Amount)
-}
-
func (obj *Withdrawal) DecodeRLP(s *rlp.Stream) error {
_, err := s.List()
if err != nil {
diff --git a/core/types/withdrawal_test.go b/core/types/withdrawal_test.go
index fdb11401dc7..65824cc3a67 100644
--- a/core/types/withdrawal_test.go
+++ b/core/types/withdrawal_test.go
@@ -5,12 +5,10 @@ import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "github.com/ledgerwatch/erigon/common"
)
func TestWithdrawalsHash(t *testing.T) {
+ t.Parallel()
w := &Withdrawal{
Index: 0,
Validator: 0,
@@ -23,24 +21,3 @@ func TestWithdrawalsHash(t *testing.T) {
// Its Keccak should be returned, not the node itself.
assert.Equal(t, libcommon.HexToHash("82cc6fbe74c41496b382fcdf25216c5af7bdbb5a3929e8f2e61bd6445ab66436"), hash)
}
-
-// Test taken from: https://github.com/ethereum/consensus-spec-tests/tree/master/tests/mainnet/capella/ssz_static/Withdrawal/ssz_random/case_1
-var testWithdrawalEncodedSSZ = common.Hex2Bytes("09b99ded9629457f21c3c177a3cf80dedbbcbcbeee17b2395d5d3f839fc1ba3559d1a73ef53b8a5325e25ad2")
-var testWithdrawalsSSZHash = libcommon.HexToHash("c1ec17957781f09ab3d8dbfcdfaa6c3b40a1679d3d124588f77a2da5ebb3555f")
-var testWithdrawal = &Withdrawal{
- Index: 9170781944418253065,
- Validator: 16033042974434771745,
- Address: libcommon.HexToAddress("0xdbbcbcbeee17b2395d5d3f839fc1ba3559d1a73e"),
- Amount: 15157676145812061173,
-}
-
-func TestWithdrawalSSZ(t *testing.T) {
- withdrawal := &Withdrawal{}
- require.NoError(t, withdrawal.DecodeSSZ(testWithdrawalEncodedSSZ, 0))
- require.Equal(t, withdrawal, testWithdrawal)
- a, _ := withdrawal.EncodeSSZ(nil)
- require.Equal(t, a, testWithdrawalEncodedSSZ)
- hashSSZ, err := withdrawal.HashSSZ()
- require.NoError(t, err)
- require.Equal(t, libcommon.Hash(hashSSZ), testWithdrawalsSSZHash)
-}
diff --git a/core/vm/analysis_test.go b/core/vm/analysis_test.go
index c2265de4659..8392c602bcf 100644
--- a/core/vm/analysis_test.go
+++ b/core/vm/analysis_test.go
@@ -22,11 +22,11 @@ import (
"github.com/holiman/uint256"
libcommon "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/crypto"
)
func TestJumpDestAnalysis(t *testing.T) {
+ t.Parallel()
tests := []struct {
code []byte
exp uint64
@@ -63,7 +63,7 @@ func BenchmarkJumpdestAnalysisEmpty_1200k(bench *testing.B) {
}
func BenchmarkJumpdestAnalysis_1200k(bench *testing.B) {
- code := common.Hex2Bytes("6060604052361561006c5760e060020a600035046308551a53811461007457806335a063b4146100865780633fa4f245146100a6578063590e1ae3146100af5780637150d8ae146100cf57806373fac6f0146100e1578063c19d93fb146100fe578063d696069714610112575b610131610002565b610133600154600160a060020a031681565b610131600154600160a060020a0390811633919091161461015057610002565b61014660005481565b610131600154600160a060020a039081163391909116146102d557610002565b610133600254600160a060020a031681565b610131600254600160a060020a0333811691161461023757610002565b61014660025460ff60a060020a9091041681565b61013160025460009060ff60a060020a9091041681146101cc57610002565b005b600160a060020a03166060908152602090f35b6060908152602090f35b60025460009060a060020a900460ff16811461016b57610002565b600154600160a060020a03908116908290301631606082818181858883f150506002805460a060020a60ff02191660a160020a179055506040517f72c874aeff0b183a56e2b79c71b46e1aed4dee5e09862134b8821ba2fddbf8bf9250a150565b80546002023414806101dd57610002565b6002805460a060020a60ff021973ffffffffffffffffffffffffffffffffffffffff1990911633171660a060020a1790557fd5d55c8a68912e9a110618df8d5e2e83b8d83211c57a8ddd1203df92885dc881826060a15050565b60025460019060a060020a900460ff16811461025257610002565b60025460008054600160a060020a0390921691606082818181858883f150508354604051600160a060020a0391821694503090911631915082818181858883f150506002805460a060020a60ff02191660a160020a179055506040517fe89152acd703c9d8c7d28829d443260b411454d45394e7995815140c8cbcbcf79250a150565b60025460019060a060020a900460ff1681146102f057610002565b6002805460008054600160a060020a0390921692909102606082818181858883f150508354604051600160a060020a0391821694503090911631915082818181858883f150506002805460a060020a60ff02191660a160020a179055506040517f8616bbbbad963e4e65b1366f1d75dfb63f9e9704bbbf91fb01bec70849906cf79250a15056")
+ code := libcommon.Hex2Bytes("6060604052361561006c5760e060020a600035046308551a53811461007457806335a063b4146100865780633fa4f245146100a6578063590e1ae3146100af5780637150d8ae146100cf57806373fac6f0146100e1578063c19d93fb146100fe578063d696069714610112575b610131610002565b610133600154600160a060020a031681565b610131600154600160a060020a0390811633919091161461015057610002565b61014660005481565b610131600154600160a060020a039081163391909116146102d557610002565b610133600254600160a060020a031681565b610131600254600160a060020a0333811691161461023757610002565b61014660025460ff60a060020a9091041681565b61013160025460009060ff60a060020a9091041681146101cc57610002565b005b600160a060020a03166060908152602090f35b6060908152602090f35b60025460009060a060020a900460ff16811461016b57610002565b600154600160a060020a03908116908290301631606082818181858883f150506002805460a060020a60ff02191660a160020a179055506040517f72c874aeff0b183a56e2b79c71b46e1aed4dee5e09862134b8821ba2fddbf8bf9250a150565b80546002023414806101dd57610002565b6002805460a060020a60ff021973ffffffffffffffffffffffffffffffffffffffff1990911633171660a060020a1790557fd5d55c8a68912e9a110618df8d5e2e83b8d83211c57a8ddd1203df92885dc881826060a15050565b60025460019060a060020a900460ff16811461025257610002565b60025460008054600160a060020a0390921691606082818181858883f150508354604051600160a060020a0391821694503090911631915082818181858883f150506002805460a060020a60ff02191660a160020a179055506040517fe89152acd703c9d8c7d28829d443260b411454d45394e7995815140c8cbcbcf79250a150565b60025460019060a060020a900460ff1681146102f057610002565b6002805460008054600160a060020a0390921692909102606082818181858883f150508354604051600160a060020a0391821694503090911631915082818181858883f150506002805460a060020a60ff02191660a160020a179055506040517f8616bbbbad963e4e65b1366f1d75dfb63f9e9704bbbf91fb01bec70849906cf79250a15056")
bench.ResetTimer()
for i := 0; i < bench.N; i++ {
codeBitmap(code)
@@ -82,7 +82,7 @@ func BenchmarkJumpdestHashing_1200k(bench *testing.B) {
}
func BenchmarkJumpDest(b *testing.B) {
- code := common.Hex2Bytes("6060604052361561006c5760e060020a600035046308551a53811461007457806335a063b4146100865780633fa4f245146100a6578063590e1ae3146100af5780637150d8ae146100cf57806373fac6f0146100e1578063c19d93fb146100fe578063d696069714610112575b610131610002565b610133600154600160a060020a031681565b610131600154600160a060020a0390811633919091161461015057610002565b61014660005481565b610131600154600160a060020a039081163391909116146102d557610002565b610133600254600160a060020a031681565b610131600254600160a060020a0333811691161461023757610002565b61014660025460ff60a060020a9091041681565b61013160025460009060ff60a060020a9091041681146101cc57610002565b005b600160a060020a03166060908152602090f35b6060908152602090f35b60025460009060a060020a900460ff16811461016b57610002565b600154600160a060020a03908116908290301631606082818181858883f150506002805460a060020a60ff02191660a160020a179055506040517f72c874aeff0b183a56e2b79c71b46e1aed4dee5e09862134b8821ba2fddbf8bf9250a150565b80546002023414806101dd57610002565b6002805460a060020a60ff021973ffffffffffffffffffffffffffffffffffffffff1990911633171660a060020a1790557fd5d55c8a68912e9a110618df8d5e2e83b8d83211c57a8ddd1203df92885dc881826060a15050565b60025460019060a060020a900460ff16811461025257610002565b60025460008054600160a060020a0390921691606082818181858883f150508354604051600160a060020a0391821694503090911631915082818181858883f150506002805460a060020a60ff02191660a160020a179055506040517fe89152acd703c9d8c7d28829d443260b411454d45394e7995815140c8cbcbcf79250a150565b60025460019060a060020a900460ff1681146102f057610002565b6002805460008054600160a060020a0390921692909102606082818181858883f150508354604051600160a060020a0391821694503090911631915082818181858883f150506002805460a060020a60ff02191660a160020a179055506040517f8616bbbbad963e4e65b1366f1d75dfb63f9e9704bbbf91fb01bec70849906cf79250a15056")
+ code := libcommon.Hex2Bytes("6060604052361561006c5760e060020a600035046308551a53811461007457806335a063b4146100865780633fa4f245146100a6578063590e1ae3146100af5780637150d8ae146100cf57806373fac6f0146100e1578063c19d93fb146100fe578063d696069714610112575b610131610002565b610133600154600160a060020a031681565b610131600154600160a060020a0390811633919091161461015057610002565b61014660005481565b610131600154600160a060020a039081163391909116146102d557610002565b610133600254600160a060020a031681565b610131600254600160a060020a0333811691161461023757610002565b61014660025460ff60a060020a9091041681565b61013160025460009060ff60a060020a9091041681146101cc57610002565b005b600160a060020a03166060908152602090f35b6060908152602090f35b60025460009060a060020a900460ff16811461016b57610002565b600154600160a060020a03908116908290301631606082818181858883f150506002805460a060020a60ff02191660a160020a179055506040517f72c874aeff0b183a56e2b79c71b46e1aed4dee5e09862134b8821ba2fddbf8bf9250a150565b80546002023414806101dd57610002565b6002805460a060020a60ff021973ffffffffffffffffffffffffffffffffffffffff1990911633171660a060020a1790557fd5d55c8a68912e9a110618df8d5e2e83b8d83211c57a8ddd1203df92885dc881826060a15050565b60025460019060a060020a900460ff16811461025257610002565b60025460008054600160a060020a0390921691606082818181858883f150508354604051600160a060020a0391821694503090911631915082818181858883f150506002805460a060020a60ff02191660a160020a179055506040517fe89152acd703c9d8c7d28829d443260b411454d45394e7995815140c8cbcbcf79250a150565b60025460019060a060020a900460ff1681146102f057610002565b6002805460008054600160a060020a0390921692909102606082818181858883f150508354604051600160a060020a0391821694503090911631915082818181858883f150506002805460a060020a60ff02191660a160020a179055506040517f8616bbbbad963e4e65b1366f1d75dfb63f9e9704bbbf91fb01bec70849906cf79250a15056")
pc := new(uint256.Int)
hash := libcommon.Hash{1, 2, 3, 4, 5}
@@ -90,7 +90,7 @@ func BenchmarkJumpDest(b *testing.B) {
b.ResetTimer()
for n := 0; n < b.N; n++ {
- contract := NewContract(contractRef, contractRef, nil, 0, false /* skipAnalysis */)
+ contract := NewContract(contractRef, libcommon.Address{}, nil, 0, false /* skipAnalysis */)
contract.Code = code
contract.CodeHash = hash
diff --git a/core/vm/contract.go b/core/vm/contract.go
index a3225f1517d..7d6d7daa6ba 100644
--- a/core/vm/contract.go
+++ b/core/vm/contract.go
@@ -46,7 +46,7 @@ type Contract struct {
// needs to be initialised to that of the caller's caller.
CallerAddress libcommon.Address
caller ContractRef
- self ContractRef
+ self libcommon.Address
jumpdests map[libcommon.Hash][]uint64 // Aggregated result of JUMPDEST analysis.
analysis []uint64 // Locally cached result of JUMPDEST analysis
skipAnalysis bool
@@ -61,8 +61,8 @@ type Contract struct {
}
// NewContract returns a new contract environment for the execution of EVM.
-func NewContract(caller ContractRef, object ContractRef, value *uint256.Int, gas uint64, skipAnalysis bool) *Contract {
- c := &Contract{CallerAddress: caller.Address(), caller: caller, self: object}
+func NewContract(caller ContractRef, addr libcommon.Address, value *uint256.Int, gas uint64, skipAnalysis bool) *Contract {
+ c := &Contract{CallerAddress: caller.Address(), caller: caller, self: addr}
if parent, ok := caller.(*Contract); ok {
// Reuse JUMPDEST analysis from parent context if available.
@@ -176,7 +176,7 @@ func (c *Contract) UseGas(gas uint64) (ok bool) {
// Address returns the contracts address
func (c *Contract) Address() libcommon.Address {
- return c.self.Address()
+ return c.self
}
// Value returns the contract's value (sent to it from it's caller)
diff --git a/core/vm/contracts.go b/core/vm/contracts.go
index cec4ac12a8a..ccc55074486 100644
--- a/core/vm/contracts.go
+++ b/core/vm/contracts.go
@@ -17,10 +17,10 @@
package vm
import (
- "bytes"
"crypto/sha256"
"encoding/binary"
"errors"
+ "github.com/ledgerwatch/erigon-lib/crypto/blake2b"
"math/big"
"github.com/holiman/uint256"
@@ -31,9 +31,7 @@ import (
"github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/common/math"
- "github.com/ledgerwatch/erigon/core/vm/evmtypes"
"github.com/ledgerwatch/erigon/crypto"
- "github.com/ledgerwatch/erigon/crypto/blake2b"
"github.com/ledgerwatch/erigon/crypto/bls12381"
"github.com/ledgerwatch/erigon/crypto/bn256"
@@ -51,12 +49,6 @@ type PrecompiledContract interface {
Run(input []byte) ([]byte, error) // Run runs the precompiled contract
}
-type StatefulPrecompiledContract interface {
- RequiredGas(input []byte) uint64 // RequiredPrice calculates the contract gas use
- RunStateful(input []byte, state evmtypes.IntraBlockState) ([]byte, error) // Run runs the precompiled contract
- Run(input []byte) ([]byte, error) // Run runs the precompiled contract
-}
-
// PrecompiledContractsHomestead contains the default set of pre-compiled Ethereum
// contracts used in the Frontier and Homestead releases.
var PrecompiledContractsHomestead = map[libcommon.Address]PrecompiledContract{
@@ -108,21 +100,16 @@ var PrecompiledContractsBerlin = map[libcommon.Address]PrecompiledContract{
}
var PrecompiledContractsCancun = map[libcommon.Address]PrecompiledContract{
- libcommon.BytesToAddress([]byte{1}): &ecrecover{},
- libcommon.BytesToAddress([]byte{2}): &sha256hash{},
- libcommon.BytesToAddress([]byte{3}): &ripemd160hash{},
- libcommon.BytesToAddress([]byte{4}): &dataCopy{},
- libcommon.BytesToAddress([]byte{5}): &bigModExp{eip2565: true},
- libcommon.BytesToAddress([]byte{6}): &bn256AddIstanbul{},
- libcommon.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{},
- libcommon.BytesToAddress([]byte{8}): &bn256PairingIstanbul{},
- libcommon.BytesToAddress([]byte{9}): &blake2F{},
- libcommon.BytesToAddress([]byte{0x0a}): &pointEvaluation{},
- libcommon.BytesToAddress(params.HistoryStorageAddress): &parentBeaconBlockRoot{},
-}
-
-var StatefulPrecompile = map[libcommon.Address]bool{
- libcommon.BytesToAddress(params.HistoryStorageAddress): true,
+ libcommon.BytesToAddress([]byte{0x01}): &ecrecover{},
+ libcommon.BytesToAddress([]byte{0x02}): &sha256hash{},
+ libcommon.BytesToAddress([]byte{0x03}): &ripemd160hash{},
+ libcommon.BytesToAddress([]byte{0x04}): &dataCopy{},
+ libcommon.BytesToAddress([]byte{0x05}): &bigModExp{eip2565: true},
+ libcommon.BytesToAddress([]byte{0x06}): &bn256AddIstanbul{},
+ libcommon.BytesToAddress([]byte{0x07}): &bn256ScalarMulIstanbul{},
+ libcommon.BytesToAddress([]byte{0x08}): &bn256PairingIstanbul{},
+ libcommon.BytesToAddress([]byte{0x09}): &blake2F{},
+ libcommon.BytesToAddress([]byte{0x0a}): &pointEvaluation{},
}
// PrecompiledContractsBLS contains the set of pre-compiled Ethereum
@@ -186,19 +173,14 @@ func ActivePrecompiles(rules *chain.Rules) []libcommon.Address {
// - the returned bytes,
// - the _remaining_ gas,
// - any error that occurred
-func RunPrecompiledContract(p PrecompiledContract, input []byte, suppliedGas uint64, state evmtypes.IntraBlockState) (ret []byte, remainingGas uint64, err error) {
+func RunPrecompiledContract(p PrecompiledContract, input []byte, suppliedGas uint64,
+) (ret []byte, remainingGas uint64, err error) {
gasCost := p.RequiredGas(input)
if suppliedGas < gasCost {
return nil, 0, ErrOutOfGas
}
suppliedGas -= gasCost
- var output []byte
- sp, isStateful := p.(StatefulPrecompiledContract)
- if isStateful {
- output, err = sp.RunStateful(input, state)
- } else {
- output, err = p.Run(input)
- }
+ output, err := p.Run(input)
return output, suppliedGas, err
}
@@ -997,7 +979,7 @@ func (c *bls12381Pairing) Run(input []byte) ([]byte, error) {
return nil, errBLS12381G2PointSubgroup
}
- // Update pairing engine with G1 and G2 ponits
+ // Update pairing engine with G1 and G2 points
e.AddPair(p1, p2)
}
// Prepare 32 byte output
@@ -1116,36 +1098,3 @@ func (c *pointEvaluation) RequiredGas(input []byte) uint64 {
func (c *pointEvaluation) Run(input []byte) ([]byte, error) {
return libkzg.PointEvaluationPrecompile(input)
}
-
-type parentBeaconBlockRoot struct{}
-
-func (c *parentBeaconBlockRoot) RequiredGas(input []byte) uint64 {
- return params.ParentBeaconBlockRootGas
-}
-
-func (c *parentBeaconBlockRoot) Run(input []byte) ([]byte, error) {
- return nil, nil
-}
-
-func (c *parentBeaconBlockRoot) RunStateful(input []byte, state evmtypes.IntraBlockState) ([]byte, error) {
- timestampParam := input[:32]
- if len(timestampParam) < 32 {
- return nil, errors.New("timestamp param too short")
- }
-
- timestampReduced := uint256.NewInt(0).SetBytes(timestampParam).Uint64() % params.HistoricalRootsModulus
- timestampIndex := libcommon.BigToHash(libcommon.Big256.SetUint64((timestampReduced)))
- recordedTimestamp := uint256.NewInt(0)
- root := uint256.NewInt(0)
- state.GetState(libcommon.BytesToAddress(params.HistoryStorageAddress), ×tampIndex, recordedTimestamp)
-
- recordedTimestampBytes := recordedTimestamp.Bytes32()
- if !bytes.Equal(recordedTimestampBytes[:], timestampParam) {
- return make([]byte, 32), nil
- }
- timestampExtended := timestampReduced + params.HistoricalRootsModulus
- rootIndex := libcommon.BigToHash(libcommon.Big256.SetUint64((timestampExtended)))
- state.GetState(libcommon.BytesToAddress(params.HistoryStorageAddress), &rootIndex, root)
- res := root.Bytes32()
- return res[:], nil
-}
diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go
index bb3a9eeda4f..a9ca23ef1b6 100644
--- a/core/vm/contracts_test.go
+++ b/core/vm/contracts_test.go
@@ -96,10 +96,11 @@ var blake2FMalformedInputTests = []precompiledFailureTest{
func testPrecompiled(t *testing.T, addr string, test precompiledTest) {
p := allPrecompiles[libcommon.HexToAddress(addr)]
- in := common.Hex2Bytes(test.Input)
+ in := libcommon.Hex2Bytes(test.Input)
gas := p.RequiredGas(in)
t.Run(fmt.Sprintf("%s-Gas=%d", test.Name, gas), func(t *testing.T) {
- if res, _, err := RunPrecompiledContract(p, in, gas, nil); err != nil {
+ t.Parallel()
+ if res, _, err := RunPrecompiledContract(p, in, gas); err != nil {
t.Error(err)
} else if common.Bytes2Hex(res) != test.Expected {
t.Errorf("Expected %v, got %v", test.Expected, common.Bytes2Hex(res))
@@ -108,7 +109,7 @@ func testPrecompiled(t *testing.T, addr string, test precompiledTest) {
t.Errorf("%v: gas wrong, expected %d, got %d", test.Name, expGas, gas)
}
// Verify that the precompile did not touch the input buffer
- exp := common.Hex2Bytes(test.Input)
+ exp := libcommon.Hex2Bytes(test.Input)
if !bytes.Equal(in, exp) {
t.Errorf("Precompiled %v modified input data", addr)
}
@@ -117,16 +118,17 @@ func testPrecompiled(t *testing.T, addr string, test precompiledTest) {
func testPrecompiledOOG(t *testing.T, addr string, test precompiledTest) {
p := allPrecompiles[libcommon.HexToAddress(addr)]
- in := common.Hex2Bytes(test.Input)
+ in := libcommon.Hex2Bytes(test.Input)
gas := p.RequiredGas(in) - 1
t.Run(fmt.Sprintf("%s-Gas=%d", test.Name, gas), func(t *testing.T) {
- _, _, err := RunPrecompiledContract(p, in, gas, nil)
+ t.Parallel()
+ _, _, err := RunPrecompiledContract(p, in, gas)
if err.Error() != "out of gas" {
t.Errorf("Expected error [out of gas], got [%v]", err)
}
// Verify that the precompile did not touch the input buffer
- exp := common.Hex2Bytes(test.Input)
+ exp := libcommon.Hex2Bytes(test.Input)
if !bytes.Equal(in, exp) {
t.Errorf("Precompiled %v modified input data", addr)
}
@@ -135,15 +137,16 @@ func testPrecompiledOOG(t *testing.T, addr string, test precompiledTest) {
func testPrecompiledFailure(addr string, test precompiledFailureTest, t *testing.T) {
p := allPrecompiles[libcommon.HexToAddress(addr)]
- in := common.Hex2Bytes(test.Input)
+ in := libcommon.Hex2Bytes(test.Input)
gas := p.RequiredGas(in)
t.Run(test.Name, func(t *testing.T) {
- _, _, err := RunPrecompiledContract(p, in, gas, nil)
+ t.Parallel()
+ _, _, err := RunPrecompiledContract(p, in, gas)
if err.Error() != test.ExpectedError {
t.Errorf("Expected error [%v], got [%v]", test.ExpectedError, err)
}
// Verify that the precompile did not touch the input buffer
- exp := common.Hex2Bytes(test.Input)
+ exp := libcommon.Hex2Bytes(test.Input)
if !bytes.Equal(in, exp) {
t.Errorf("Precompiled %v modified input data", addr)
}
@@ -155,7 +158,7 @@ func benchmarkPrecompiled(b *testing.B, addr string, test precompiledTest) {
return
}
p := allPrecompiles[libcommon.HexToAddress(addr)]
- in := common.Hex2Bytes(test.Input)
+ in := libcommon.Hex2Bytes(test.Input)
reqGas := p.RequiredGas(in)
var (
@@ -170,7 +173,7 @@ func benchmarkPrecompiled(b *testing.B, addr string, test precompiledTest) {
bench.ResetTimer()
for i := 0; i < bench.N; i++ {
copy(data, in)
- res, _, err = RunPrecompiledContract(p, data, reqGas, nil)
+ res, _, err = RunPrecompiledContract(p, data, reqGas)
}
bench.StopTimer()
elapsed := uint64(time.Since(start))
@@ -247,6 +250,7 @@ func BenchmarkPrecompiledBn256Add(b *testing.B) { benchJson("bn256Add", "06", b)
// Tests OOG
func TestPrecompiledModExpOOG(t *testing.T) {
+ t.Parallel()
modexpTests, err := loadJson("modexp")
if err != nil {
t.Fatal(err)
@@ -268,6 +272,7 @@ func TestPrecompiledBlake2F(t *testing.T) { testJson("blake2F", "09", t) }
func BenchmarkPrecompiledBlake2F(b *testing.B) { benchJson("blake2F", "09", b) }
func TestPrecompileBlake2FMalformedInput(t *testing.T) {
+ t.Parallel()
for _, test := range blake2FMalformedInputTests {
testPrecompiledFailure("09", test, t)
}
diff --git a/core/vm/eips.go b/core/vm/eips.go
index 4b2a825f40f..8d48f1a7b33 100644
--- a/core/vm/eips.go
+++ b/core/vm/eips.go
@@ -24,10 +24,12 @@ import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon/consensus/misc"
"github.com/ledgerwatch/erigon/params"
)
var activators = map[int]func(*JumpTable){
+ 7516: enable7516,
6780: enable6780,
5656: enable5656,
4844: enable4844,
@@ -215,7 +217,7 @@ func opTstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b
// opBaseFee implements BASEFEE opcode
func opBaseFee(pc *uint64, interpreter *EVMInterpreter, callContext *ScopeContext) ([]byte, error) {
- baseFee := interpreter.evm.Context().BaseFee
+ baseFee := interpreter.evm.Context.BaseFee
callContext.Stack.Push(baseFee)
return nil, nil
}
@@ -258,8 +260,8 @@ func enable4844(jt *JumpTable) {
// opBlobHash implements the BLOBHASH opcode
func opBlobHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
idx := scope.Stack.Peek()
- if idx.LtUint64(uint64(len(interpreter.evm.TxContext().BlobHashes))) {
- hash := interpreter.evm.TxContext().BlobHashes[idx.Uint64()]
+ if idx.LtUint64(uint64(len(interpreter.evm.BlobHashes))) {
+ hash := interpreter.evm.BlobHashes[idx.Uint64()]
idx.SetBytes(hash.Bytes())
} else {
idx.Clear()
@@ -303,3 +305,25 @@ func enable6780(jt *JumpTable) {
numPush: 0,
}
}
+
+// opBlobBaseFee implements the BLOBBASEFEE opcode
+func opBlobBaseFee(pc *uint64, interpreter *EVMInterpreter, callContext *ScopeContext) ([]byte, error) {
+ excessBlobGas := interpreter.evm.Context.ExcessBlobGas
+ blobBaseFee, err := misc.GetBlobGasPrice(interpreter.evm.ChainConfig(), *excessBlobGas)
+ if err != nil {
+ return nil, err
+ }
+ callContext.Stack.Push(blobBaseFee)
+ return nil, nil
+}
+
+// enable7516 applies EIP-7516 (BLOBBASEFEE opcode)
+// - Adds an opcode that returns the current block's blob base fee.
+func enable7516(jt *JumpTable) {
+ jt[BLOBBASEFEE] = &operation{
+ execute: opBlobBaseFee,
+ constantGas: GasQuickStep,
+ numPop: 0,
+ numPush: 1,
+ }
+}
diff --git a/core/vm/evm.go b/core/vm/evm.go
index ce69c8eed57..dc5dce586c8 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -68,8 +68,8 @@ func run(evm *EVM, contract *Contract, input []byte, readOnly bool) ([]byte, err
// The EVM should never be reused and is not thread safe.
type EVM struct {
// Context provides auxiliary blockchain related information
- context evmtypes.BlockContext
- txContext evmtypes.TxContext
+ Context evmtypes.BlockContext
+ evmtypes.TxContext
// IntraBlockState gives access to the underlying state
intraBlockState evmtypes.IntraBlockState
@@ -96,8 +96,8 @@ type EVM struct {
// only ever be used *once*.
func NewEVM(blockCtx evmtypes.BlockContext, txCtx evmtypes.TxContext, state evmtypes.IntraBlockState, chainConfig *chain.Config, vmConfig Config) *EVM {
evm := &EVM{
- context: blockCtx,
- txContext: txCtx,
+ Context: blockCtx,
+ TxContext: txCtx,
intraBlockState: state,
config: vmConfig,
chainConfig: chainConfig,
@@ -112,7 +112,7 @@ func NewEVM(blockCtx evmtypes.BlockContext, txCtx evmtypes.TxContext, state evmt
// Reset resets the EVM with a new transaction context.Reset
// This is not threadsafe and should only be done very cautiously.
func (evm *EVM) Reset(txCtx evmtypes.TxContext, ibs evmtypes.IntraBlockState) {
- evm.txContext = txCtx
+ evm.TxContext = txCtx
evm.intraBlockState = ibs
// ensure the evm is reset to be used again
@@ -120,8 +120,8 @@ func (evm *EVM) Reset(txCtx evmtypes.TxContext, ibs evmtypes.IntraBlockState) {
}
func (evm *EVM) ResetBetweenBlocks(blockCtx evmtypes.BlockContext, txCtx evmtypes.TxContext, ibs evmtypes.IntraBlockState, vmConfig Config, chainRules *chain.Rules) {
- evm.context = blockCtx
- evm.txContext = txCtx
+ evm.Context = blockCtx
+ evm.TxContext = txCtx
evm.intraBlockState = ibs
evm.config = vmConfig
evm.chainRules = chainRules
@@ -170,7 +170,7 @@ func (evm *EVM) call(typ OpCode, caller ContractRef, addr libcommon.Address, inp
}
if typ == CALL || typ == CALLCODE {
// Fail if we're trying to transfer more than the available balance
- if !value.IsZero() && !evm.context.CanTransfer(evm.intraBlockState, caller.Address(), value) {
+ if !value.IsZero() && !evm.Context.CanTransfer(evm.intraBlockState, caller.Address(), value) {
if !bailout {
return nil, gas, ErrInsufficientBalance
}
@@ -205,7 +205,7 @@ func (evm *EVM) call(typ OpCode, caller ContractRef, addr libcommon.Address, inp
}
evm.intraBlockState.CreateAccount(addr, false)
}
- evm.context.Transfer(evm.intraBlockState, caller.Address(), addr, value, bailout)
+ evm.Context.Transfer(evm.intraBlockState, caller.Address(), addr, value, bailout)
} else if typ == STATICCALL {
// We do an AddBalance of zero here, just in order to trigger a touch.
// This doesn't matter on Mainnet, where all empties are gone at the time of Byzantium,
@@ -233,7 +233,7 @@ func (evm *EVM) call(typ OpCode, caller ContractRef, addr libcommon.Address, inp
// It is allowed to call precompiles, even via delegatecall
if isPrecompile {
- ret, gas, err = RunPrecompiledContract(p, input, gas, evm.intraBlockState)
+ ret, gas, err = RunPrecompiledContract(p, input, gas)
} else if len(code) == 0 {
// If the account has no code, we can abort here
// The depth-check is already done, and precompiles handled above
@@ -248,11 +248,11 @@ func (evm *EVM) call(typ OpCode, caller ContractRef, addr libcommon.Address, inp
codeHash := evm.intraBlockState.GetCodeHash(addrCopy)
var contract *Contract
if typ == CALLCODE {
- contract = NewContract(caller, AccountRef(caller.Address()), value, gas, evm.config.SkipAnalysis)
+ contract = NewContract(caller, caller.Address(), value, gas, evm.config.SkipAnalysis)
} else if typ == DELEGATECALL {
- contract = NewContract(caller, AccountRef(caller.Address()), value, gas, evm.config.SkipAnalysis).AsDelegate()
+ contract = NewContract(caller, caller.Address(), value, gas, evm.config.SkipAnalysis).AsDelegate()
} else {
- contract = NewContract(caller, AccountRef(addrCopy), value, gas, evm.config.SkipAnalysis)
+ contract = NewContract(caller, addrCopy, value, gas, evm.config.SkipAnalysis)
}
contract.SetCallCode(&addrCopy, codeHash, code)
readOnly := false
@@ -352,7 +352,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
err = ErrDepth
return nil, libcommon.Address{}, gas, err
}
- if !evm.context.CanTransfer(evm.intraBlockState, caller.Address(), value) {
+ if !evm.Context.CanTransfer(evm.intraBlockState, caller.Address(), value) {
err = ErrInsufficientBalance
return nil, libcommon.Address{}, gas, err
}
@@ -381,11 +381,11 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
if evm.chainRules.IsSpuriousDragon {
evm.intraBlockState.SetNonce(address, 1)
}
- evm.context.Transfer(evm.intraBlockState, caller.Address(), address, value, false /* bailout */)
+ evm.Context.Transfer(evm.intraBlockState, caller.Address(), address, value, false /* bailout */)
// Initialise a new contract and set the code that is to be used by the EVM.
// The contract is a scoped environment for this execution context only.
- contract := NewContract(caller, AccountRef(address), value, gas, evm.config.SkipAnalysis)
+ contract := NewContract(caller, address, value, gas, evm.config.SkipAnalysis)
contract.SetCodeOptionalHash(&address, codeAndHash)
if evm.config.NoRecursion && depth > 0 {
@@ -476,16 +476,6 @@ func (evm *EVM) ChainRules() *chain.Rules {
return evm.chainRules
}
-// Context returns the EVM's BlockContext
-func (evm *EVM) Context() evmtypes.BlockContext {
- return evm.context
-}
-
-// TxContext returns the EVM's TxContext
-func (evm *EVM) TxContext() evmtypes.TxContext {
- return evm.txContext
-}
-
// IntraBlockState returns the EVM's IntraBlockState
func (evm *EVM) IntraBlockState() evmtypes.IntraBlockState {
return evm.intraBlockState
diff --git a/core/vm/evm_test.go b/core/vm/evm_test.go
index 286fb9664e8..9ef0efa7ed4 100644
--- a/core/vm/evm_test.go
+++ b/core/vm/evm_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/core/vm/evmtypes"
"github.com/ledgerwatch/erigon/params"
@@ -12,6 +13,7 @@ import (
)
func TestInterpreterReadonly(t *testing.T) {
+ t.Parallel()
rapid.Check(t, func(t *rapid.T) {
env := NewEVM(evmtypes.BlockContext{}, evmtypes.TxContext{}, &dummyStatedb{}, params.TestChainConfig, Config{})
@@ -39,7 +41,7 @@ func TestInterpreterReadonly(t *testing.T) {
dummyContract := NewContract(
&dummyContractRef{},
- &dummyContractRef{},
+ libcommon.Address{},
new(uint256.Int),
0,
false,
@@ -117,6 +119,7 @@ func TestInterpreterReadonly(t *testing.T) {
}
func TestReadonlyBasicCases(t *testing.T) {
+ t.Parallel()
cases := []struct {
testName string
readonlySliceTest []bool
@@ -266,6 +269,7 @@ func TestReadonlyBasicCases(t *testing.T) {
}
t.Run(testcase.testName+evmsTestcase.suffix, func(t *testing.T) {
+ t.Parallel()
readonlySliceTest := testcase.readonlySliceTest
env := NewEVM(evmtypes.BlockContext{}, evmtypes.TxContext{}, &dummyStatedb{}, params.TestChainConfig, Config{})
@@ -292,7 +296,7 @@ func TestReadonlyBasicCases(t *testing.T) {
dummyContract := NewContract(
&dummyContractRef{},
- &dummyContractRef{},
+ libcommon.Address{},
new(uint256.Int),
0,
false,
@@ -384,7 +388,7 @@ func (st *testSequential) Run(_ *Contract, _ []byte, _ bool) ([]byte, error) {
nextContract := NewContract(
&dummyContractRef{},
- &dummyContractRef{},
+ libcommon.Address{},
new(uint256.Int),
0,
false,
diff --git a/core/vm/evmtypes/evmtypes.go b/core/vm/evmtypes/evmtypes.go
index ac2012a8158..4b919f6b3e3 100644
--- a/core/vm/evmtypes/evmtypes.go
+++ b/core/vm/evmtypes/evmtypes.go
@@ -97,13 +97,12 @@ type IntraBlockState interface {
precompiles []common.Address, txAccesses types2.AccessList)
AddressInAccessList(addr common.Address) bool
- SlotInAccessList(addr common.Address, slot common.Hash) (addressOk bool, slotOk bool)
// AddAddressToAccessList adds the given address to the access list. This operation is safe to perform
// even if the feature/fork is not active yet
- AddAddressToAccessList(addr common.Address)
+ AddAddressToAccessList(addr common.Address) (addrMod bool)
// AddSlotToAccessList adds the given (address,slot) to the access list. This operation is safe to perform
// even if the feature/fork is not active yet
- AddSlotToAccessList(addr common.Address, slot common.Hash)
+ AddSlotToAccessList(addr common.Address, slot common.Hash) (addrMod, slotMod bool)
RevertToSnapshot(int)
Snapshot() int
diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go
index f4495765f4c..90b39100c3e 100644
--- a/core/vm/gas_table.go
+++ b/core/vm/gas_table.go
@@ -68,7 +68,7 @@ func memoryGasCost(mem *Memory, newMemSize uint64) (uint64, error) {
// EXTCODECOPY (stack position 3)
// RETURNDATACOPY (stack position 2)
func memoryCopierGas(stackpos int) gasFunc {
- return func(_ VMInterpreter, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
+ return func(_ *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
// Gas for expanding the memory
gas, err := memoryGasCost(mem, memorySize)
if err != nil {
@@ -99,7 +99,7 @@ var (
gasReturnDataCopy = memoryCopierGas(2)
)
-func gasSStore(evm VMInterpreter, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
+func gasSStore(evm *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
value, x := stack.Back(1), stack.Back(0)
key := libcommon.Hash(x.Bytes32())
var current uint256.Int
@@ -182,7 +182,7 @@ func gasSStore(evm VMInterpreter, contract *Contract, stack *stack.Stack, mem *M
// 2.2.2. If original value equals new value (this storage slot is reset):
// 2.2.2.1. If original value is 0, add SSTORE_SET_GAS - SLOAD_GAS to refund counter.
// 2.2.2.2. Otherwise, add SSTORE_RESET_GAS - SLOAD_GAS gas to refund counter.
-func gasSStoreEIP2200(evm VMInterpreter, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
+func gasSStoreEIP2200(evm *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
// If we fail the minimum gas availability invariant, fail (0)
if contract.Gas <= params.SstoreSentryGasEIP2200 {
return 0, errors.New("not enough gas for reentrancy sentry")
@@ -226,7 +226,7 @@ func gasSStoreEIP2200(evm VMInterpreter, contract *Contract, stack *stack.Stack,
}
func makeGasLog(n uint64) gasFunc {
- return func(_ VMInterpreter, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
+ return func(_ *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
requestedSize, overflow := stack.Back(1).Uint64WithOverflow()
if overflow {
return 0, ErrGasUintOverflow
@@ -255,7 +255,7 @@ func makeGasLog(n uint64) gasFunc {
}
}
-func gasKeccak256(_ VMInterpreter, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
+func gasKeccak256(_ *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
gas, err := memoryGasCost(mem, memorySize)
if err != nil {
return 0, err
@@ -276,7 +276,7 @@ func gasKeccak256(_ VMInterpreter, contract *Contract, stack *stack.Stack, mem *
// pureMemoryGascost is used by several operations, which aside from their
// static cost have a dynamic cost which is solely based on the memory
// expansion
-func pureMemoryGascost(_ VMInterpreter, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
+func pureMemoryGascost(_ *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
return memoryGasCost(mem, memorySize)
}
@@ -289,7 +289,7 @@ var (
gasCreate = pureMemoryGascost
)
-func gasCreate2(_ VMInterpreter, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
+func gasCreate2(_ *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
gas, err := memoryGasCost(mem, memorySize)
if err != nil {
return 0, err
@@ -310,7 +310,7 @@ func gasCreate2(_ VMInterpreter, contract *Contract, stack *stack.Stack, mem *Me
return gas, nil
}
-func gasCreateEip3860(_ VMInterpreter, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
+func gasCreateEip3860(_ *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
gas, err := memoryGasCost(mem, memorySize)
if err != nil {
return 0, err
@@ -329,7 +329,7 @@ func gasCreateEip3860(_ VMInterpreter, contract *Contract, stack *stack.Stack, m
return gas, nil
}
-func gasCreate2Eip3860(_ VMInterpreter, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
+func gasCreate2Eip3860(_ *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
gas, err := memoryGasCost(mem, memorySize)
if err != nil {
return 0, err
@@ -348,7 +348,7 @@ func gasCreate2Eip3860(_ VMInterpreter, contract *Contract, stack *stack.Stack,
return gas, nil
}
-func gasExpFrontier(_ VMInterpreter, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
+func gasExpFrontier(_ *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
expByteLen := uint64(libcommon.BitLenToByteLen(stack.Data[stack.Len()-2].BitLen()))
var (
@@ -361,7 +361,7 @@ func gasExpFrontier(_ VMInterpreter, contract *Contract, stack *stack.Stack, mem
return gas, nil
}
-func gasExpEIP160(_ VMInterpreter, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
+func gasExpEIP160(_ *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
expByteLen := uint64(libcommon.BitLenToByteLen(stack.Data[stack.Len()-2].BitLen()))
var (
@@ -374,7 +374,7 @@ func gasExpEIP160(_ VMInterpreter, contract *Contract, stack *stack.Stack, mem *
return gas, nil
}
-func gasCall(evm VMInterpreter, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
+func gasCall(evm *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
var (
gas uint64
transfersValue = !stack.Back(2).IsZero()
@@ -412,7 +412,7 @@ func gasCall(evm VMInterpreter, contract *Contract, stack *stack.Stack, mem *Mem
return gas, nil
}
-func gasCallCode(evm VMInterpreter, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
+func gasCallCode(evm *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
memoryGas, err := memoryGasCost(mem, memorySize)
if err != nil {
return 0, err
@@ -440,7 +440,7 @@ func gasCallCode(evm VMInterpreter, contract *Contract, stack *stack.Stack, mem
return gas, nil
}
-func gasDelegateCall(evm VMInterpreter, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
+func gasDelegateCall(evm *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
gas, err := memoryGasCost(mem, memorySize)
if err != nil {
return 0, err
@@ -460,7 +460,7 @@ func gasDelegateCall(evm VMInterpreter, contract *Contract, stack *stack.Stack,
return gas, nil
}
-func gasStaticCall(evm VMInterpreter, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
+func gasStaticCall(evm *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
gas, err := memoryGasCost(mem, memorySize)
if err != nil {
return 0, err
@@ -480,7 +480,7 @@ func gasStaticCall(evm VMInterpreter, contract *Contract, stack *stack.Stack, me
return gas, nil
}
-func gasSelfdestruct(evm VMInterpreter, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
+func gasSelfdestruct(evm *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
var gas uint64
// TangerineWhistle (EIP150) gas reprice fork:
if evm.ChainRules().IsTangerineWhistle {
diff --git a/core/vm/gas_table_test.go b/core/vm/gas_table_test.go
index 7e3300a37ea..63c4546bd09 100644
--- a/core/vm/gas_table_test.go
+++ b/core/vm/gas_table_test.go
@@ -23,12 +23,13 @@ import (
"strconv"
"testing"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
+
"github.com/holiman/uint256"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/datadir"
"github.com/ledgerwatch/erigon-lib/kv/memdb"
- "github.com/ledgerwatch/erigon/common/hexutil"
"github.com/ledgerwatch/erigon/core/state"
"github.com/ledgerwatch/erigon/core/state/temporal"
"github.com/ledgerwatch/erigon/core/vm/evmtypes"
@@ -37,6 +38,7 @@ import (
)
func TestMemoryGasCost(t *testing.T) {
+ t.Parallel()
tests := []struct {
size uint64
cost uint64
@@ -92,6 +94,7 @@ func TestEIP2200(t *testing.T) {
i := i
t.Run(strconv.Itoa(i), func(t *testing.T) {
+ t.Parallel()
address := libcommon.BytesToAddress([]byte("contract"))
_, tx := memdb.NewTestTx(t)
@@ -137,6 +140,7 @@ var createGasTests = []struct {
}
func TestCreateGas(t *testing.T) {
+ t.Parallel()
_, db, _ := temporal.NewTestDB(t, datadir.New(t.TempDir()), nil)
for i, tt := range createGasTests {
address := libcommon.BytesToAddress([]byte("contract"))
diff --git a/core/vm/instructions.go b/core/vm/instructions.go
index fa6d937a605..7871e9fd87a 100644
--- a/core/vm/instructions.go
+++ b/core/vm/instructions.go
@@ -294,7 +294,7 @@ func opBalance(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]
}
func opOrigin(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- scope.Stack.Push(new(uint256.Int).SetBytes(interpreter.evm.TxContext().Origin.Bytes()))
+ scope.Stack.Push(new(uint256.Int).SetBytes(interpreter.evm.Origin.Bytes()))
return nil, nil
}
func opCaller(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
@@ -459,7 +459,7 @@ func opExtCodeHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
}
func opGasprice(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- scope.Stack.Push(interpreter.evm.TxContext().GasPrice)
+ scope.Stack.Push(interpreter.evm.GasPrice)
return nil, nil
}
@@ -471,14 +471,14 @@ func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) (
return nil, nil
}
var upper, lower uint64
- upper = interpreter.evm.Context().BlockNumber
+ upper = interpreter.evm.Context.BlockNumber
if upper < 257 {
lower = 0
} else {
lower = upper - 256
}
if num64 >= lower && num64 < upper {
- num.SetBytes(interpreter.evm.Context().GetHash(num64).Bytes())
+ num.SetBytes(interpreter.evm.Context.GetHash(num64).Bytes())
} else {
num.Clear()
}
@@ -486,30 +486,30 @@ func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) (
}
func opCoinbase(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- scope.Stack.Push(new(uint256.Int).SetBytes(interpreter.evm.Context().Coinbase.Bytes()))
+ scope.Stack.Push(new(uint256.Int).SetBytes(interpreter.evm.Context.Coinbase.Bytes()))
return nil, nil
}
func opTimestamp(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- v := new(uint256.Int).SetUint64(interpreter.evm.Context().Time)
+ v := new(uint256.Int).SetUint64(interpreter.evm.Context.Time)
scope.Stack.Push(v)
return nil, nil
}
func opNumber(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- v := new(uint256.Int).SetUint64(interpreter.evm.Context().BlockNumber)
+ v := new(uint256.Int).SetUint64(interpreter.evm.Context.BlockNumber)
scope.Stack.Push(v)
return nil, nil
}
func opDifficulty(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
var v *uint256.Int
- if interpreter.evm.Context().PrevRanDao != nil {
+ if interpreter.evm.Context.PrevRanDao != nil {
// EIP-4399: Supplant DIFFICULTY opcode with PREVRANDAO
- v = new(uint256.Int).SetBytes(interpreter.evm.Context().PrevRanDao.Bytes())
+ v = new(uint256.Int).SetBytes(interpreter.evm.Context.PrevRanDao.Bytes())
} else {
var overflow bool
- v, overflow = uint256.FromBig(interpreter.evm.Context().Difficulty)
+ v, overflow = uint256.FromBig(interpreter.evm.Context.Difficulty)
if overflow {
return nil, fmt.Errorf("interpreter.evm.Context.Difficulty higher than 2^256-1")
}
@@ -519,10 +519,10 @@ func opDifficulty(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
}
func opGasLimit(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- if interpreter.evm.Context().MaxGasLimit {
+ if interpreter.evm.Context.MaxGasLimit {
scope.Stack.Push(new(uint256.Int).SetAllOne())
} else {
- scope.Stack.Push(new(uint256.Int).SetUint64(interpreter.evm.Context().GasLimit))
+ scope.Stack.Push(new(uint256.Int).SetUint64(interpreter.evm.Context.GasLimit))
}
return nil, nil
}
@@ -575,13 +575,13 @@ func opJump(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt
if usedBitmap {
if interpreter.cfg.TraceJumpDest {
log.Warn("Code Bitmap used for detecting invalid jump",
- "tx", fmt.Sprintf("0x%x", interpreter.evm.TxContext().TxHash),
- "block_num", interpreter.evm.Context().BlockNumber,
+ "tx", fmt.Sprintf("0x%x", interpreter.evm.TxHash),
+ "block_num", interpreter.evm.Context.BlockNumber,
)
} else {
// This is "cheaper" version because it does not require calculation of txHash for each transaction
log.Warn("Code Bitmap used for detecting invalid jump",
- "block_num", interpreter.evm.Context().BlockNumber,
+ "block_num", interpreter.evm.Context.BlockNumber,
)
}
}
@@ -598,13 +598,13 @@ func opJumpi(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by
if usedBitmap {
if interpreter.cfg.TraceJumpDest {
log.Warn("Code Bitmap used for detecting invalid jump",
- "tx", fmt.Sprintf("0x%x", interpreter.evm.TxContext().TxHash),
- "block_num", interpreter.evm.Context().BlockNumber,
+ "tx", fmt.Sprintf("0x%x", interpreter.evm.TxHash),
+ "block_num", interpreter.evm.Context.BlockNumber,
)
} else {
// This is "cheaper" version because it does not require calculation of txHash for each transaction
log.Warn("Code Bitmap used for detecting invalid jump",
- "block_num", interpreter.evm.Context().BlockNumber,
+ "block_num", interpreter.evm.Context.BlockNumber,
)
}
}
@@ -741,7 +741,7 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt
}
stack.Push(&temp)
if err == nil || err == ErrExecutionReverted {
- ret = common.CopyBytes(ret)
+ ret = libcommon.CopyBytes(ret)
scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
}
@@ -775,7 +775,7 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([
}
stack.Push(&temp)
if err == nil || err == ErrExecutionReverted {
- ret = common.CopyBytes(ret)
+ ret = libcommon.CopyBytes(ret)
scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
}
@@ -805,7 +805,7 @@ func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext
}
stack.Push(&temp)
if err == nil || err == ErrExecutionReverted {
- ret = common.CopyBytes(ret)
+ ret = libcommon.CopyBytes(ret)
scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
}
@@ -835,7 +835,7 @@ func opStaticCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
}
stack.Push(&temp)
if err == nil || err == ErrExecutionReverted {
- ret = common.CopyBytes(ret)
+ ret = libcommon.CopyBytes(ret)
scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
}
@@ -892,15 +892,15 @@ func opSelfdestruct6780(pc *uint64, interpreter *EVMInterpreter, scope *ScopeCon
beneficiary := scope.Stack.Pop()
callerAddr := scope.Contract.Address()
beneficiaryAddr := libcommon.Address(beneficiary.Bytes20())
- balance := interpreter.evm.IntraBlockState().GetBalance(callerAddr)
+ balance := *interpreter.evm.IntraBlockState().GetBalance(callerAddr)
if interpreter.evm.Config().Debug {
if interpreter.cfg.Debug {
- interpreter.cfg.Tracer.CaptureEnter(SELFDESTRUCT, callerAddr, beneficiaryAddr, false /* precompile */, false /* create */, []byte{}, 0, balance, nil /* code */)
+ interpreter.cfg.Tracer.CaptureEnter(SELFDESTRUCT, callerAddr, beneficiaryAddr, false /* precompile */, false /* create */, []byte{}, 0, &balance, nil /* code */)
interpreter.cfg.Tracer.CaptureExit([]byte{}, 0, nil)
}
}
- interpreter.evm.IntraBlockState().SubBalance(callerAddr, balance)
- interpreter.evm.IntraBlockState().AddBalance(beneficiaryAddr, balance)
+ interpreter.evm.IntraBlockState().SubBalance(callerAddr, &balance)
+ interpreter.evm.IntraBlockState().AddBalance(beneficiaryAddr, &balance)
interpreter.evm.IntraBlockState().Selfdestruct6780(callerAddr)
return nil, errStopToken
}
@@ -928,7 +928,7 @@ func makeLog(size int) executionFunc {
Data: d,
// This is a non-consensus field, but assigned here because
// core/state doesn't know the current block number.
- BlockNumber: interpreter.evm.Context().BlockNumber,
+ BlockNumber: interpreter.evm.Context.BlockNumber,
})
return nil, nil
diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go
index c64b0f34697..62f5304e267 100644
--- a/core/vm/instructions_test.go
+++ b/core/vm/instructions_test.go
@@ -117,9 +117,9 @@ func testTwoOperandOp(t *testing.T, tests []TwoOperandTestcase, opFn executionFu
)
for i, test := range tests {
- x := new(uint256.Int).SetBytes(common.Hex2Bytes(test.X))
- y := new(uint256.Int).SetBytes(common.Hex2Bytes(test.Y))
- expected := new(uint256.Int).SetBytes(common.Hex2Bytes(test.Expected))
+ x := new(uint256.Int).SetBytes(libcommon.Hex2Bytes(test.X))
+ y := new(uint256.Int).SetBytes(libcommon.Hex2Bytes(test.Y))
+ expected := new(uint256.Int).SetBytes(libcommon.Hex2Bytes(test.Expected))
stack.Push(x)
stack.Push(y)
opFn(&pc, evmInterpreter, &ScopeContext{nil, stack, nil})
@@ -135,6 +135,7 @@ func testTwoOperandOp(t *testing.T, tests []TwoOperandTestcase, opFn executionFu
}
func TestByteOp(t *testing.T) {
+ t.Parallel()
tests := []TwoOperandTestcase{
{"ABCDEF0908070605040302010000000000000000000000000000000000000000", "00", "AB"},
{"ABCDEF0908070605040302010000000000000000000000000000000000000000", "01", "CD"},
@@ -149,6 +150,7 @@ func TestByteOp(t *testing.T) {
}
func TestSHL(t *testing.T) {
+ t.Parallel()
// Testcases from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-145.md#shl-shift-left
tests := []TwoOperandTestcase{
{"0000000000000000000000000000000000000000000000000000000000000001", "01", "0000000000000000000000000000000000000000000000000000000000000002"},
@@ -166,6 +168,7 @@ func TestSHL(t *testing.T) {
}
func TestSHR(t *testing.T) {
+ t.Parallel()
// Testcases from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-145.md#shr-logical-shift-right
tests := []TwoOperandTestcase{
{"0000000000000000000000000000000000000000000000000000000000000001", "00", "0000000000000000000000000000000000000000000000000000000000000001"},
@@ -184,6 +187,7 @@ func TestSHR(t *testing.T) {
}
func TestSAR(t *testing.T) {
+ t.Parallel()
// Testcases from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-145.md#sar-arithmetic-shift-right
tests := []TwoOperandTestcase{
{"0000000000000000000000000000000000000000000000000000000000000001", "00", "0000000000000000000000000000000000000000000000000000000000000001"},
@@ -208,6 +212,7 @@ func TestSAR(t *testing.T) {
}
func TestAddMod(t *testing.T) {
+ t.Parallel()
var (
env = NewEVM(evmtypes.BlockContext{}, evmtypes.TxContext{}, nil, params.TestChainConfig, Config{})
stack = stack.New()
@@ -230,10 +235,10 @@ func TestAddMod(t *testing.T) {
// in 256 bit repr, fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd
for i, test := range tests {
- x := new(uint256.Int).SetBytes(common.Hex2Bytes(test.x))
- y := new(uint256.Int).SetBytes(common.Hex2Bytes(test.y))
- z := new(uint256.Int).SetBytes(common.Hex2Bytes(test.z))
- expected := new(uint256.Int).SetBytes(common.Hex2Bytes(test.expected))
+ x := new(uint256.Int).SetBytes(libcommon.Hex2Bytes(test.x))
+ y := new(uint256.Int).SetBytes(libcommon.Hex2Bytes(test.y))
+ z := new(uint256.Int).SetBytes(libcommon.Hex2Bytes(test.z))
+ expected := new(uint256.Int).SetBytes(libcommon.Hex2Bytes(test.expected))
stack.Push(z)
stack.Push(y)
stack.Push(x)
@@ -283,6 +288,7 @@ func TestAddMod(t *testing.T) {
// TestJsonTestcases runs through all the testcases defined as json-files
func TestJsonTestcases(t *testing.T) {
+ t.Parallel()
for name := range twoOpMethods {
data, err := os.ReadFile(fmt.Sprintf("testdata/testcases_%v.json", name))
if err != nil {
@@ -305,7 +311,7 @@ func opBenchmark(b *testing.B, op executionFunc, args ...string) {
// convert args
byteArgs := make([][]byte, len(args))
for i, arg := range args {
- byteArgs[i] = common.Hex2Bytes(arg)
+ byteArgs[i] = libcommon.Hex2Bytes(arg)
}
pc := uint64(0)
b.ResetTimer()
@@ -529,6 +535,7 @@ func BenchmarkOpIsZero(b *testing.B) {
}
func TestOpMstore(t *testing.T) {
+ t.Parallel()
var (
env = NewEVM(evmtypes.BlockContext{}, evmtypes.TxContext{}, nil, params.TestChainConfig, Config{})
stack = stack.New()
@@ -540,7 +547,7 @@ func TestOpMstore(t *testing.T) {
mem.Resize(64)
pc := uint64(0)
v := "abcdef00000000000000abba000000000deaf000000c0de00100000000133700"
- stack.PushN(*new(uint256.Int).SetBytes(common.Hex2Bytes(v)), *new(uint256.Int))
+ stack.PushN(*new(uint256.Int).SetBytes(libcommon.Hex2Bytes(v)), *new(uint256.Int))
opMstore(&pc, evmInterpreter, &ScopeContext{mem, stack, nil})
if got := common.Bytes2Hex(mem.GetCopy(0, 32)); got != v {
t.Fatalf("Mstore fail, got %v, expected %v", got, v)
@@ -574,6 +581,7 @@ func BenchmarkOpMstore(bench *testing.B) {
}
func TestOpTstore(t *testing.T) {
+ t.Parallel()
var (
state = state.New(nil)
env = NewEVM(evmtypes.BlockContext{}, evmtypes.TxContext{}, state, params.TestChainConfig, Config{})
@@ -583,9 +591,9 @@ func TestOpTstore(t *testing.T) {
caller = libcommon.Address{}
to = libcommon.Address{1}
contractRef = contractRef{caller}
- contract = NewContract(contractRef, AccountRef(to), u256.Num0, 0, false)
+ contract = NewContract(contractRef, to, u256.Num0, 0, false)
scopeContext = ScopeContext{mem, stack, contract}
- value = common.Hex2Bytes("abcdef00000000000000abba000000000deaf000000c0de00100000000133700")
+ value = libcommon.Hex2Bytes("abcdef00000000000000abba000000000deaf000000c0de00100000000133700")
)
env.interpreter = evmInterpreter
@@ -632,6 +640,7 @@ func BenchmarkOpKeccak256(bench *testing.B) {
}
func TestCreate2Addreses(t *testing.T) {
+ t.Parallel()
type testcase struct {
origin string
salt string
@@ -706,6 +715,7 @@ func TestCreate2Addreses(t *testing.T) {
}
func TestOpMCopy(t *testing.T) {
+ t.Parallel()
// Test cases from https://eips.ethereum.org/EIPS/eip-5656#test-cases
for i, tc := range []struct {
dst, src, len string
diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go
index f26e272bbd8..161068229bf 100644
--- a/core/vm/interpreter.go
+++ b/core/vm/interpreter.go
@@ -99,7 +99,7 @@ type EVMInterpreter struct {
//
//nolint:structcheck
type VM struct {
- evm VMInterpreter
+ evm *EVM
cfg Config
hasher keccakState // Keccak256 hasher instance shared across opcodes
@@ -121,7 +121,7 @@ func copyJumpTable(jt *JumpTable) *JumpTable {
}
// NewEVMInterpreter returns a new instance of the Interpreter.
-func NewEVMInterpreter(evm VMInterpreter, cfg Config) *EVMInterpreter {
+func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter {
var jt *JumpTable
switch {
case evm.ChainRules().IsPrague:
@@ -183,17 +183,6 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
return nil, nil
}
- // Increment the call depth which is restricted to 1024
- in.depth++
- defer in.decrementDepth()
-
- // Make sure the readOnly is only set if we aren't in readOnly yet.
- // This makes also sure that the readOnly flag isn't removed for child calls.
- if readOnly && !in.readOnly {
- in.readOnly = true
- defer func() { in.readOnly = false }()
- }
-
// Reset the previous call's return data. It's unimportant to preserve the old buffer
// as every returning call will return new data anyway.
in.returnData = nil
@@ -219,25 +208,37 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
logged bool // deferred Tracer should ignore already logged steps
res []byte // result of the opcode execution function
)
- // Don't move this deferrred function, it's placed before the capturestate-deferred method,
- // so that it get's executed _after_: the capturestate needs the stacks before
- // they are returned to the pools
+
mem.Reset()
- defer pool.Put(mem)
- defer stack.ReturnNormalStack(locStack)
+
contract.Input = input
- if in.cfg.Debug {
- defer func() {
- if err != nil {
- if !logged {
- in.cfg.Tracer.CaptureState(pcCopy, op, gasCopy, cost, callContext, in.returnData, in.depth, err) //nolint:errcheck
- } else {
- in.cfg.Tracer.CaptureFault(pcCopy, op, gasCopy, cost, callContext, in.depth, err)
- }
- }
- }()
+ // Make sure the readOnly is only set if we aren't in readOnly yet.
+ // This makes also sure that the readOnly flag isn't removed for child calls.
+ restoreReadonly := readOnly && !in.readOnly
+ if restoreReadonly {
+ in.readOnly = true
}
+ // Increment the call depth which is restricted to 1024
+ in.depth++
+ defer func() {
+ // first: capture data/memory/state/depth/etc... then clenup them
+ if in.cfg.Debug && err != nil {
+ if !logged {
+ in.cfg.Tracer.CaptureState(pcCopy, op, gasCopy, cost, callContext, in.returnData, in.depth, err) //nolint:errcheck
+ } else {
+ in.cfg.Tracer.CaptureFault(pcCopy, op, gasCopy, cost, callContext, in.depth, err)
+ }
+ }
+ // this function must execute _after_: the `CaptureState` needs the stacks before
+ pool.Put(mem)
+ stack.ReturnNormalStack(locStack)
+ if restoreReadonly {
+ in.readOnly = false
+ }
+ in.depth--
+ }()
+
// The Interpreter main run loop (contextual). This loop runs until either an
// explicit STOP, RETURN or SELFDESTRUCT is executed, an error occurred during
// the execution of one of the operations or until the done flag is set by the
@@ -292,11 +293,15 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
if err != nil || !contract.UseGas(dynamicCost) {
return nil, ErrOutOfGas
}
+ // Do tracing before memory expansion
+ if in.cfg.Debug {
+ in.cfg.Tracer.CaptureState(_pc, op, gasCopy, cost, callContext, in.returnData, in.depth, err) //nolint:errcheck
+ logged = true
+ }
if memorySize > 0 {
mem.Resize(memorySize)
}
- }
- if in.cfg.Debug {
+ } else if in.cfg.Debug {
in.cfg.Tracer.CaptureState(_pc, op, gasCopy, cost, callContext, in.returnData, in.depth, err) //nolint:errcheck
logged = true
}
diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go
index 67e636f14f2..047c9f53845 100644
--- a/core/vm/jump_table.go
+++ b/core/vm/jump_table.go
@@ -25,7 +25,7 @@ import (
type (
executionFunc func(pc *uint64, interpreter *EVMInterpreter, callContext *ScopeContext) ([]byte, error)
- gasFunc func(VMInterpreter, *Contract, *stack.Stack, *Memory, uint64) (uint64, error) // last parameter is the requested memory size as a uint64
+ gasFunc func(*EVM, *Contract, *stack.Stack, *Memory, uint64) (uint64, error) // last parameter is the requested memory size as a uint64
// memorySizeFunc returns the required size, and whether the operation overflowed a uint64
memorySizeFunc func(*stack.Stack) (size uint64, overflow bool)
)
@@ -104,6 +104,7 @@ func newCancunInstructionSet() JumpTable {
enable4844(&instructionSet) // BLOBHASH opcode
enable5656(&instructionSet) // MCOPY opcode
enable6780(&instructionSet) // SELFDESTRUCT only in same transaction
+ enable7516(&instructionSet) // BLOBBASEFEE opcode
validateAndFillMaxStack(&instructionSet)
return instructionSet
}
diff --git a/core/vm/logger.go b/core/vm/logger.go
index ff76ae71efb..5677233f97a 100644
--- a/core/vm/logger.go
+++ b/core/vm/logger.go
@@ -33,7 +33,7 @@ type EVMLogger interface {
CaptureTxStart(gasLimit uint64)
CaptureTxEnd(restGas uint64)
// Top call frame
- CaptureStart(env VMInterface, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte)
+ CaptureStart(env *EVM, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte)
CaptureEnd(output []byte, usedGas uint64, err error)
// Rest of the frames
CaptureEnter(typ OpCode, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte)
diff --git a/core/vm/memory_test.go b/core/vm/memory_test.go
index 5418bbbc305..f304e9eb256 100644
--- a/core/vm/memory_test.go
+++ b/core/vm/memory_test.go
@@ -9,6 +9,7 @@ import (
)
func TestMemoryCopy(t *testing.T) {
+ t.Parallel()
// Test cases from https://eips.ethereum.org/EIPS/eip-5656#test-cases
for i, tc := range []struct {
dst, src, len uint64
diff --git a/core/vm/mock_vm.go b/core/vm/mock_vm.go
index f2a99b66f0a..de0a632f454 100644
--- a/core/vm/mock_vm.go
+++ b/core/vm/mock_vm.go
@@ -51,7 +51,7 @@ func (evm *testVM) Run(_ *Contract, _ []byte, readOnly bool) (ret []byte, err er
if *evm.currentIdx < len(evm.readOnlySliceTest) {
res, err := run(evm.env, NewContract(
&dummyContractRef{},
- &dummyContractRef{},
+ libcommon.Address{},
new(uint256.Int),
0,
false,
diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go
index f01c01170ce..97fef071075 100644
--- a/core/vm/opcodes.go
+++ b/core/vm/opcodes.go
@@ -105,6 +105,7 @@ const (
SELFBALANCE OpCode = 0x47
BASEFEE OpCode = 0x48
BLOBHASH OpCode = 0x49
+ BLOBBASEFEE OpCode = 0x4a
)
// 0x50 range - 'storage' and execution.
@@ -282,6 +283,7 @@ var opCodeToString = map[OpCode]string{
SELFBALANCE: "SELFBALANCE",
BASEFEE: "BASEFEE",
BLOBHASH: "BLOBHASH",
+ BLOBBASEFEE: "BLOBBASEFEE",
// 0x50 range - 'storage' and execution.
POP: "POP",
@@ -437,6 +439,7 @@ var stringToOp = map[string]OpCode{
"CHAINID": CHAINID,
"BASEFEE": BASEFEE,
"BLOBHASH": BLOBHASH,
+ "BLOBBASEFEE": BLOBBASEFEE,
"DELEGATECALL": DELEGATECALL,
"STATICCALL": STATICCALL,
"CODESIZE": CODESIZE,
diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go
index 10ace257499..6256ae5740b 100644
--- a/core/vm/operations_acl.go
+++ b/core/vm/operations_acl.go
@@ -28,7 +28,7 @@ import (
)
func makeGasSStoreFunc(clearingRefund uint64) gasFunc {
- return func(evm VMInterpreter, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
+ return func(evm *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
// If we fail the minimum gas availability invariant, fail (0)
if contract.Gas <= params.SstoreSentryGasEIP2200 {
return 0, errors.New("not enough gas for reentrancy sentry")
@@ -40,18 +40,11 @@ func makeGasSStoreFunc(clearingRefund uint64) gasFunc {
current uint256.Int
cost = uint64(0)
)
+
evm.IntraBlockState().GetState(contract.Address(), &slot, ¤t)
- // Check slot presence in the access list
- if addrPresent, slotPresent := evm.IntraBlockState().SlotInAccessList(contract.Address(), slot); !slotPresent {
+ // If the caller cannot afford the cost, this change will be rolled back
+ if _, slotMod := evm.IntraBlockState().AddSlotToAccessList(contract.Address(), slot); slotMod {
cost = params.ColdSloadCostEIP2929
- // If the caller cannot afford the cost, this change will be rolled back
- evm.IntraBlockState().AddSlotToAccessList(contract.Address(), slot)
- if !addrPresent {
- // Once we're done with YOLOv2 and schedule this for mainnet, might
- // be good to remove this panic here, which is just really a
- // canary to have during testing
- panic("impossible case: address was not present in access list during sstore op")
- }
}
var value uint256.Int
value.Set(y)
@@ -107,14 +100,11 @@ func makeGasSStoreFunc(clearingRefund uint64) gasFunc {
// whose storage is being read) is not yet in accessed_storage_keys,
// charge 2100 gas and add the pair to accessed_storage_keys.
// If the pair is already in accessed_storage_keys, charge 100 gas.
-func gasSLoadEIP2929(evm VMInterpreter, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
+func gasSLoadEIP2929(evm *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
loc := stack.Peek()
- slot := libcommon.Hash(loc.Bytes32())
- // Check slot presence in the access list
- if _, slotPresent := evm.IntraBlockState().SlotInAccessList(contract.Address(), slot); !slotPresent {
- // If the caller cannot afford the cost, this change will be rolled back
- // If he does afford it, we can skip checking the same thing later on, during execution
- evm.IntraBlockState().AddSlotToAccessList(contract.Address(), slot)
+ // If the caller cannot afford the cost, this change will be rolled back
+ // If he does afford it, we can skip checking the same thing later on, during execution
+ if _, slotMod := evm.IntraBlockState().AddSlotToAccessList(contract.Address(), loc.Bytes32()); slotMod {
return params.ColdSloadCostEIP2929, nil
}
return params.WarmStorageReadCostEIP2929, nil
@@ -125,7 +115,7 @@ func gasSLoadEIP2929(evm VMInterpreter, contract *Contract, stack *stack.Stack,
// > If the target is not in accessed_addresses,
// > charge COLD_ACCOUNT_ACCESS_COST gas, and add the address to accessed_addresses.
// > Otherwise, charge WARM_STORAGE_READ_COST gas.
-func gasExtCodeCopyEIP2929(evm VMInterpreter, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
+func gasExtCodeCopyEIP2929(evm *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
// memory expansion first (dynamic part of pre-2929 implementation)
gas, err := gasExtCodeCopy(evm, contract, stack, mem, memorySize)
if err != nil {
@@ -133,8 +123,7 @@ func gasExtCodeCopyEIP2929(evm VMInterpreter, contract *Contract, stack *stack.S
}
addr := libcommon.Address(stack.Peek().Bytes20())
// Check slot presence in the access list
- if !evm.IntraBlockState().AddressInAccessList(addr) {
- evm.IntraBlockState().AddAddressToAccessList(addr)
+ if evm.IntraBlockState().AddAddressToAccessList(addr) {
var overflow bool
// We charge (cold-warm), since 'warm' is already charged as constantGas
if gas, overflow = math.SafeAdd(gas, params.ColdAccountAccessCostEIP2929-params.WarmStorageReadCostEIP2929); overflow {
@@ -152,12 +141,10 @@ func gasExtCodeCopyEIP2929(evm VMInterpreter, contract *Contract, stack *stack.S
// - extcodehash,
// - extcodesize,
// - (ext) balance
-func gasEip2929AccountCheck(evm VMInterpreter, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
+func gasEip2929AccountCheck(evm *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
addr := libcommon.Address(stack.Peek().Bytes20())
- // Check slot presence in the access list
- if !evm.IntraBlockState().AddressInAccessList(addr) {
- // If the caller cannot afford the cost, this change will be rolled back
- evm.IntraBlockState().AddAddressToAccessList(addr)
+ // If the caller cannot afford the cost, this change will be rolled back
+ if evm.IntraBlockState().AddAddressToAccessList(addr) {
// The warm storage read cost is already charged as constantGas
return params.ColdAccountAccessCostEIP2929 - params.WarmStorageReadCostEIP2929, nil
}
@@ -165,15 +152,15 @@ func gasEip2929AccountCheck(evm VMInterpreter, contract *Contract, stack *stack.
}
func makeCallVariantGasCallEIP2929(oldCalculator gasFunc) gasFunc {
- return func(evm VMInterpreter, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
+ return func(evm *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
addr := libcommon.Address(stack.Back(1).Bytes20())
- // Check slot presence in the access list
- warmAccess := evm.IntraBlockState().AddressInAccessList(addr)
// The WarmStorageReadCostEIP2929 (100) is already deducted in the form of a constant cost, so
// the cost to charge for cold access, if any, is Cold - Warm
coldCost := params.ColdAccountAccessCostEIP2929 - params.WarmStorageReadCostEIP2929
- if !warmAccess {
- evm.IntraBlockState().AddAddressToAccessList(addr)
+
+ addrMod := evm.IntraBlockState().AddAddressToAccessList(addr)
+ warmAccess := !addrMod
+ if addrMod {
// Charge the remaining difference here already, to correctly calculate available
// gas for call
if !contract.UseGas(coldCost) {
@@ -228,14 +215,13 @@ var (
// makeSelfdestructGasFn can create the selfdestruct dynamic gas function for EIP-2929 and EIP-2539
func makeSelfdestructGasFn(refundsEnabled bool) gasFunc {
- gasFunc := func(evm VMInterpreter, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
+ gasFunc := func(evm *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) {
var (
gas uint64
address = libcommon.Address(stack.Peek().Bytes20())
)
- if !evm.IntraBlockState().AddressInAccessList(address) {
- // If the caller cannot afford the cost, this change will be rolled back
- evm.IntraBlockState().AddAddressToAccessList(address)
+ // If the caller cannot afford the cost, this change will be rolled back
+ if evm.IntraBlockState().AddAddressToAccessList(address) {
gas = params.ColdAccountAccessCostEIP2929
}
// if empty and transfers value
diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go
index 8dd6706448b..df2bb97b7ec 100644
--- a/core/vm/runtime/runtime.go
+++ b/core/vm/runtime/runtime.go
@@ -132,7 +132,7 @@ func Execute(code, input []byte, cfg *Config, bn uint64) ([]byte, *state.IntraBl
address = libcommon.BytesToAddress([]byte("contract"))
vmenv = NewEnv(cfg)
sender = vm.AccountRef(cfg.Origin)
- rules = cfg.ChainConfig.Rules(vmenv.Context().BlockNumber, vmenv.Context().Time)
+ rules = vmenv.ChainRules()
)
cfg.State.Prepare(rules, cfg.Origin, cfg.Coinbase, &address, vm.ActivePrecompiles(rules), nil)
cfg.State.CreateAccount(address, true)
@@ -176,7 +176,7 @@ func Create(input []byte, cfg *Config, blockNr uint64) ([]byte, libcommon.Addres
var (
vmenv = NewEnv(cfg)
sender = vm.AccountRef(cfg.Origin)
- rules = cfg.ChainConfig.Rules(vmenv.Context().BlockNumber, vmenv.Context().Time)
+ rules = vmenv.ChainRules()
)
cfg.State.Prepare(rules, cfg.Origin, cfg.Coinbase, nil, vm.ActivePrecompiles(rules), nil)
@@ -202,7 +202,7 @@ func Call(address libcommon.Address, input []byte, cfg *Config) ([]byte, uint64,
sender := cfg.State.GetOrNewStateObject(cfg.Origin)
statedb := cfg.State
- rules := cfg.ChainConfig.Rules(vmenv.Context().BlockNumber, vmenv.Context().Time)
+ rules := vmenv.ChainRules()
statedb.Prepare(rules, cfg.Origin, cfg.Coinbase, &address, vm.ActivePrecompiles(rules), nil)
// Call the code with the given configuration.
diff --git a/core/vm/runtime/runtime_example_test.go b/core/vm/runtime/runtime_example_test.go
index c558d17d4fa..753e3f97f4c 100644
--- a/core/vm/runtime/runtime_example_test.go
+++ b/core/vm/runtime/runtime_example_test.go
@@ -18,8 +18,8 @@ package runtime_test
import (
"fmt"
+ "github.com/ledgerwatch/erigon-lib/common"
- "github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/core/vm/runtime"
)
diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go
index 40d334748e4..1e326eea237 100644
--- a/core/vm/runtime/runtime_test.go
+++ b/core/vm/runtime/runtime_test.go
@@ -39,6 +39,7 @@ import (
)
func TestDefaults(t *testing.T) {
+ t.Parallel()
cfg := new(Config)
setDefaults(cfg)
@@ -67,6 +68,7 @@ func TestDefaults(t *testing.T) {
}
func TestEVM(t *testing.T) {
+ t.Parallel()
defer func() {
if r := recover(); r != nil {
t.Fatalf("crashed with: %v", r)
@@ -87,6 +89,7 @@ func TestEVM(t *testing.T) {
}
func TestExecute(t *testing.T) {
+ t.Parallel()
ret, _, err := Execute([]byte{
byte(vm.PUSH1), 10,
byte(vm.PUSH1), 0,
@@ -106,6 +109,7 @@ func TestExecute(t *testing.T) {
}
func TestCall(t *testing.T) {
+ t.Parallel()
_, tx := memdb.NewTestTx(t)
state := state.New(state.NewDbStateReader(tx))
address := libcommon.HexToAddress("0xaa")
@@ -132,7 +136,7 @@ func TestCall(t *testing.T) {
func BenchmarkCall(b *testing.B) {
var definition = `[{"constant":true,"inputs":[],"name":"seller","outputs":[{"name":"","type":"address"}],"type":"function"},{"constant":false,"inputs":[],"name":"abort","outputs":[],"type":"function"},{"constant":true,"inputs":[],"name":"value","outputs":[{"name":"","type":"uint256"}],"type":"function"},{"constant":false,"inputs":[],"name":"refund","outputs":[],"type":"function"},{"constant":true,"inputs":[],"name":"buyer","outputs":[{"name":"","type":"address"}],"type":"function"},{"constant":false,"inputs":[],"name":"confirmReceived","outputs":[],"type":"function"},{"constant":true,"inputs":[],"name":"state","outputs":[{"name":"","type":"uint8"}],"type":"function"},{"constant":false,"inputs":[],"name":"confirmPurchase","outputs":[],"type":"function"},{"inputs":[],"type":"constructor"},{"anonymous":false,"inputs":[],"name":"Aborted","type":"event"},{"anonymous":false,"inputs":[],"name":"PurchaseConfirmed","type":"event"},{"anonymous":false,"inputs":[],"name":"ItemReceived","type":"event"},{"anonymous":false,"inputs":[],"name":"Refunded","type":"event"}]`
- var code = common.Hex2Bytes("6060604052361561006c5760e060020a600035046308551a53811461007457806335a063b4146100865780633fa4f245146100a6578063590e1ae3146100af5780637150d8ae146100cf57806373fac6f0146100e1578063c19d93fb146100fe578063d696069714610112575b610131610002565b610133600154600160a060020a031681565b610131600154600160a060020a0390811633919091161461015057610002565b61014660005481565b610131600154600160a060020a039081163391909116146102d557610002565b610133600254600160a060020a031681565b610131600254600160a060020a0333811691161461023757610002565b61014660025460ff60a060020a9091041681565b61013160025460009060ff60a060020a9091041681146101cc57610002565b005b600160a060020a03166060908152602090f35b6060908152602090f35b60025460009060a060020a900460ff16811461016b57610002565b600154600160a060020a03908116908290301631606082818181858883f150506002805460a060020a60ff02191660a160020a179055506040517f72c874aeff0b183a56e2b79c71b46e1aed4dee5e09862134b8821ba2fddbf8bf9250a150565b80546002023414806101dd57610002565b6002805460a060020a60ff021973ffffffffffffffffffffffffffffffffffffffff1990911633171660a060020a1790557fd5d55c8a68912e9a110618df8d5e2e83b8d83211c57a8ddd1203df92885dc881826060a15050565b60025460019060a060020a900460ff16811461025257610002565b60025460008054600160a060020a0390921691606082818181858883f150508354604051600160a060020a0391821694503090911631915082818181858883f150506002805460a060020a60ff02191660a160020a179055506040517fe89152acd703c9d8c7d28829d443260b411454d45394e7995815140c8cbcbcf79250a150565b60025460019060a060020a900460ff1681146102f057610002565b6002805460008054600160a060020a0390921692909102606082818181858883f150508354604051600160a060020a0391821694503090911631915082818181858883f150506002805460a060020a60ff02191660a160020a179055506040517f8616bbbbad963e4e65b1366f1d75dfb63f9e9704bbbf91fb01bec70849906cf79250a15056")
+ var code = libcommon.Hex2Bytes("6060604052361561006c5760e060020a600035046308551a53811461007457806335a063b4146100865780633fa4f245146100a6578063590e1ae3146100af5780637150d8ae146100cf57806373fac6f0146100e1578063c19d93fb146100fe578063d696069714610112575b610131610002565b610133600154600160a060020a031681565b610131600154600160a060020a0390811633919091161461015057610002565b61014660005481565b610131600154600160a060020a039081163391909116146102d557610002565b610133600254600160a060020a031681565b610131600254600160a060020a0333811691161461023757610002565b61014660025460ff60a060020a9091041681565b61013160025460009060ff60a060020a9091041681146101cc57610002565b005b600160a060020a03166060908152602090f35b6060908152602090f35b60025460009060a060020a900460ff16811461016b57610002565b600154600160a060020a03908116908290301631606082818181858883f150506002805460a060020a60ff02191660a160020a179055506040517f72c874aeff0b183a56e2b79c71b46e1aed4dee5e09862134b8821ba2fddbf8bf9250a150565b80546002023414806101dd57610002565b6002805460a060020a60ff021973ffffffffffffffffffffffffffffffffffffffff1990911633171660a060020a1790557fd5d55c8a68912e9a110618df8d5e2e83b8d83211c57a8ddd1203df92885dc881826060a15050565b60025460019060a060020a900460ff16811461025257610002565b60025460008054600160a060020a0390921691606082818181858883f150508354604051600160a060020a0391821694503090911631915082818181858883f150506002805460a060020a60ff02191660a160020a179055506040517fe89152acd703c9d8c7d28829d443260b411454d45394e7995815140c8cbcbcf79250a150565b60025460019060a060020a900460ff1681146102f057610002565b6002805460008054600160a060020a0390921692909102606082818181858883f150508354604051600160a060020a0391821694503090911631915082818181858883f150506002805460a060020a60ff02191660a160020a179055506040517f8616bbbbad963e4e65b1366f1d75dfb63f9e9704bbbf91fb01bec70849906cf79250a15056")
abi, err := abi.JSON(strings.NewReader(definition))
if err != nil {
@@ -264,6 +268,7 @@ func (d *dummyChain) GetHeader(h libcommon.Hash, n uint64) *types.Header {
// TestBlockhash tests the blockhash operation. It's a bit special, since it internally
// requires access to a chain reader.
func TestBlockhash(t *testing.T) {
+ t.Parallel()
// Current head
n := uint64(1000)
parentHash := libcommon.Hash{}
@@ -304,9 +309,9 @@ func TestBlockhash(t *testing.T) {
*/
// The contract above
- data := common.Hex2Bytes("6080604052348015600f57600080fd5b50600436106045576000357c010000000000000000000000000000000000000000000000000000000090048063f8a8fd6d14604a575b600080fd5b60506074565b60405180848152602001838152602001828152602001935050505060405180910390f35b600080600080439050600080600083409050600184034092506000600290505b61010481101560c35760008186034090506000816001900414151560b6578093505b5080806001019150506094565b508083839650965096505050505090919256fea165627a7a72305820462d71b510c1725ff35946c20b415b0d50b468ea157c8c77dff9466c9cb85f560029")
+ data := libcommon.Hex2Bytes("6080604052348015600f57600080fd5b50600436106045576000357c010000000000000000000000000000000000000000000000000000000090048063f8a8fd6d14604a575b600080fd5b60506074565b60405180848152602001838152602001828152602001935050505060405180910390f35b600080600080439050600080600083409050600184034092506000600290505b61010481101560c35760008186034090506000816001900414151560b6578093505b5080806001019150506094565b508083839650965096505050505090919256fea165627a7a72305820462d71b510c1725ff35946c20b415b0d50b468ea157c8c77dff9466c9cb85f560029")
// The method call to 'test()'
- input := common.Hex2Bytes("f8a8fd6d")
+ input := libcommon.Hex2Bytes("f8a8fd6d")
chain := &dummyChain{}
ret, _, err := Execute(data, input, &Config{
GetHashFn: core.GetHashFn(header, chain.GetHeader),
diff --git a/core/vm/stack/stack.go b/core/vm/stack/stack.go
index 9b6e291201e..119d8b61f16 100644
--- a/core/vm/stack/stack.go
+++ b/core/vm/stack/stack.go
@@ -66,20 +66,20 @@ func (st *Stack) Cap() int {
}
func (st *Stack) Swap(n int) {
- st.Data[st.Len()-n], st.Data[st.Len()-1] = st.Data[st.Len()-1], st.Data[st.Len()-n]
+ st.Data[len(st.Data)-n], st.Data[len(st.Data)-1] = st.Data[len(st.Data)-1], st.Data[len(st.Data)-n]
}
func (st *Stack) Dup(n int) {
- st.Push(&st.Data[st.Len()-n])
+ st.Data = append(st.Data, st.Data[len(st.Data)-n])
}
func (st *Stack) Peek() *uint256.Int {
- return &st.Data[st.Len()-1]
+ return &st.Data[len(st.Data)-1]
}
// Back returns the n'th item in stack
func (st *Stack) Back(n int) *uint256.Int {
- return &st.Data[st.Len()-n-1]
+ return &st.Data[len(st.Data)-n-1]
}
func (st *Stack) Reset() {
@@ -107,42 +107,3 @@ func ReturnNormalStack(s *Stack) {
s.Data = s.Data[:0]
stackPool.Put(s)
}
-
-var rStackPool = sync.Pool{
- New: func() interface{} {
- return &ReturnStack{data: make([]uint32, 0, 10)}
- },
-}
-
-func ReturnRStack(rs *ReturnStack) {
- rs.data = rs.data[:0]
- rStackPool.Put(rs)
-}
-
-// ReturnStack is an object for basic return stack operations.
-type ReturnStack struct {
- data []uint32
-}
-
-func NewReturnStack() *ReturnStack {
- rStack, ok := rStackPool.Get().(*ReturnStack)
- if !ok {
- log.Error("Type assertion failure", "err", "cannot get ReturnStack pointer from rStackPool")
- }
- return rStack
-}
-
-func (st *ReturnStack) Push(d uint32) {
- st.data = append(st.data, d)
-}
-
-// A uint32 is sufficient as for code below 4.2G
-func (st *ReturnStack) Pop() (ret uint32) {
- ret = st.data[len(st.data)-1]
- st.data = st.data[:len(st.data)-1]
- return
-}
-
-func (st *ReturnStack) Data() []uint32 {
- return st.data
-}
diff --git a/core/vm/testdata/precompiles/pointEvaluation.json b/core/vm/testdata/precompiles/pointEvaluation.json
index bef9fc57713..ecee94aa7d0 100644
--- a/core/vm/testdata/precompiles/pointEvaluation.json
+++ b/core/vm/testdata/precompiles/pointEvaluation.json
@@ -1,6 +1,6 @@
[
{
- "Input": "013c03613f6fc558fb7e61e75602241ed9a2f04e36d8670aadd286e71b5ca9cc00000000000000000000000000000000000000000000000000000000000000423c8e9f367d9c417c78ca1700993dae1987f44bd5e8ea33a7f62ebc6c35a2e53183fac17c3f237fc51f90e2c660eb202a438bc2025baded5cd193c1a018c5885bc9281ba704d5566082e851235c7be763b2a99adff965e0a121ee972ebc472d02944a74f5c6243e14052e105124b70bf65faf85ad3a494325e269fad097842cba",
+ "Input": "014edfed8547661f6cb416eba53061a2f6dce872c0497e6dd485a876fe2567f1564c0a11a0f704f4fc3e8acfe0f8245f0ad1347b378fbf96e206da11a5d363066d928e13fe443e957d82e3e71d48cb65d51028eb4483e719bf8efcdf12f7c321a421e229565952cfff4ef3517100a97da1d4fe57956fa50a442f92af03b1bf37adacc8ad4ed209b31287ea5bb94d9d06a444d6bb5aadc3ceb615b50d6606bd54bfe529f59247987cd1ab848d19de599a9052f1835fb0d0d44cf70183e19a68c9",
"Expected": "000000000000000000000000000000000000000000000000000000000000100073eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001",
"Name": "pointEvaluation1",
"Gas": 50000,
diff --git a/crypto/crypto.go b/crypto/crypto.go
index 17e592ba6a8..6d385075d92 100644
--- a/crypto/crypto.go
+++ b/crypto/crypto.go
@@ -24,6 +24,7 @@ import (
"encoding/hex"
"errors"
"fmt"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
"hash"
"io"
"math/big"
@@ -35,7 +36,6 @@ import (
"github.com/ledgerwatch/erigon/crypto/cryptopool"
- "github.com/ledgerwatch/erigon/common/hexutil"
"github.com/ledgerwatch/erigon/common/math"
"github.com/ledgerwatch/erigon/common/u256"
"github.com/ledgerwatch/erigon/rlp"
diff --git a/crypto/crypto_test.go b/crypto/crypto_test.go
index 8f33c95f81d..b9a979bf8c2 100644
--- a/crypto/crypto_test.go
+++ b/crypto/crypto_test.go
@@ -20,6 +20,7 @@ import (
"bytes"
"crypto/ecdsa"
"encoding/hex"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
"os"
"reflect"
"testing"
@@ -30,7 +31,6 @@ import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon/common"
- "github.com/ledgerwatch/erigon/common/hexutil"
"github.com/ledgerwatch/erigon/common/u256"
)
diff --git a/crypto/signature_test.go b/crypto/signature_test.go
index 2aef1113c14..1a033a96766 100644
--- a/crypto/signature_test.go
+++ b/crypto/signature_test.go
@@ -19,11 +19,11 @@ package crypto
import (
"bytes"
"crypto/ecdsa"
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/hexutil"
"reflect"
"testing"
- "github.com/ledgerwatch/erigon/common"
- "github.com/ledgerwatch/erigon/common/hexutil"
"github.com/ledgerwatch/erigon/common/math"
)
diff --git a/diagnostics/block_body_download.go b/diagnostics/block_body_download_stats.go
similarity index 82%
rename from diagnostics/block_body_download.go
rename to diagnostics/block_body_download_stats.go
index c60fa1b9bab..4903e1a8c99 100644
--- a/diagnostics/block_body_download.go
+++ b/diagnostics/block_body_download_stats.go
@@ -9,8 +9,8 @@ import (
"github.com/ledgerwatch/erigon/dataflow"
)
-func SetupBlockBodyDownload() {
- http.HandleFunc("/debug/metrics/block_body_download", func(w http.ResponseWriter, r *http.Request) {
+func SetupBlockBodyDownload(metricsMux *http.ServeMux) {
+ metricsMux.HandleFunc("/block_body_download", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
writeBlockBodyDownload(w, r)
})
diff --git a/diagnostics/bootnodes.go b/diagnostics/bootnodes.go
new file mode 100644
index 00000000000..fba0982881e
--- /dev/null
+++ b/diagnostics/bootnodes.go
@@ -0,0 +1,25 @@
+package diagnostics
+
+import (
+ "encoding/json"
+ "net/http"
+
+ "github.com/ledgerwatch/erigon/turbo/node"
+)
+
+func SetupBootnodesAccess(metricsMux *http.ServeMux, node *node.ErigonNode) {
+ metricsMux.HandleFunc("/bootnodes", func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ w.Header().Set("Content-Type", "application/json")
+
+ bootnodes := node.Node().Config().P2P.BootstrapNodesV5
+
+ btNodes := make([]string, 0, len(bootnodes))
+
+ for _, bootnode := range bootnodes {
+ btNodes = append(btNodes, bootnode.String())
+ }
+
+ json.NewEncoder(w).Encode(btNodes)
+ })
+}
diff --git a/diagnostics/cmd_line.go b/diagnostics/cmd_line.go
index 067861d7ba7..db4d9dcfdf5 100644
--- a/diagnostics/cmd_line.go
+++ b/diagnostics/cmd_line.go
@@ -1,22 +1,29 @@
package diagnostics
import (
- "fmt"
- "io"
"net/http"
"os"
+ "strconv"
+ "strings"
)
-func SetupCmdLineAccess() {
- http.HandleFunc("/debug/metrics/cmdline", func(w http.ResponseWriter, r *http.Request) {
+func SetupCmdLineAccess(metricsMux *http.ServeMux) {
+ metricsMux.HandleFunc("/cmdline", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
- writeCmdLine(w)
- })
-}
+ w.Header().Set("Content-Type", "application/json")
+
+ var space []byte
-func writeCmdLine(w io.Writer) {
- fmt.Fprintf(w, "SUCCESS\n")
- for _, arg := range os.Args {
- fmt.Fprintf(w, "%s\n", arg)
- }
+ w.Write([]byte{'"'})
+ for _, arg := range os.Args {
+ if len(space) > 0 {
+ w.Write(space)
+ } else {
+ space = []byte(" ")
+ }
+
+ w.Write([]byte(strings.Trim(strconv.Quote(arg), `"`)))
+ }
+ w.Write([]byte{'"'})
+ })
}
diff --git a/diagnostics/db.go b/diagnostics/db.go
new file mode 100644
index 00000000000..6769b29425e
--- /dev/null
+++ b/diagnostics/db.go
@@ -0,0 +1,258 @@
+package diagnostics
+
+import (
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "path/filepath"
+ "strings"
+
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/mdbx"
+ "github.com/ledgerwatch/erigon/common/paths"
+ "github.com/urfave/cli/v2"
+)
+
+func SetupDbAccess(ctx *cli.Context, metricsMux *http.ServeMux) {
+ var dataDir string
+ if ctx.IsSet("datadir") {
+ dataDir = ctx.String("datadir")
+ } else {
+ dataDir = paths.DataDirForNetwork(paths.DefaultDataDir(), ctx.String("chain"))
+ }
+ metricsMux.HandleFunc("/dbs", func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ w.Header().Set("Content-Type", "application/json")
+ writeDbList(w, dataDir)
+ })
+ metricsMux.HandleFunc("/dbs/", func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+
+ urlPath := r.URL.Path
+
+ if !strings.HasPrefix(urlPath, "/dbs/") {
+ http.Error(w, fmt.Sprintf(`Unexpected path prefix: expected: "/dbs/..." got: "%s"`, urlPath), http.StatusNotFound)
+ return
+ }
+
+ pathParts := strings.Split(urlPath[5:], "/")
+
+ if len(pathParts) < 1 {
+ http.Error(w, fmt.Sprintf(`Unexpected path len: expected: "{db}/tables" got: "%s"`, urlPath), http.StatusNotFound)
+ return
+ }
+
+ var dbname string
+ var sep string
+
+ for len(pathParts) > 0 {
+ dbname += sep + pathParts[0]
+
+ if sep == "" {
+ sep = "/"
+ }
+
+ pathParts = pathParts[1:]
+
+ if pathParts[0] == "tables" {
+ break
+ }
+
+ if len(pathParts) < 2 {
+ http.Error(w, fmt.Sprintf(`Unexpected path part: expected: "tables" got: "%s"`, pathParts[0]), http.StatusNotFound)
+ return
+ }
+ }
+
+ switch len(pathParts) {
+ case 1:
+ writeDbTables(w, r, dataDir, dbname)
+ case 2:
+ offset, err := offsetValue(r.URL.Query())
+
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ limit, err := limitValue(r.URL.Query(), 0)
+
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ writeDbRead(w, r, dataDir, dbname, pathParts[1], nil, offset, limit)
+ case 3:
+ key, err := base64.URLEncoding.DecodeString(pathParts[2])
+
+ if err != nil {
+ http.Error(w, fmt.Sprintf(`key "%s" argument should be base64url encoded: %v`, pathParts[2], err), http.StatusBadRequest)
+ return
+ }
+
+ offset, err := offsetValue(r.URL.Query())
+
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ limit, err := limitValue(r.URL.Query(), 0)
+
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ writeDbRead(w, r, dataDir, dbname, pathParts[1], key, offset, limit)
+
+ default:
+ http.Error(w, fmt.Sprintf(`Unexpected path parts: "%s"`, strings.Join(pathParts[2:], "/")), http.StatusNotFound)
+ }
+ })
+}
+
+func writeDbList(w http.ResponseWriter, dataDir string) {
+ w.Header().Set("Content-Type", "application/json")
+ m := mdbx.PathDbMap()
+ dbs := make([]string, 0, len(m))
+ for path := range m {
+ dbs = append(dbs, strings.ReplaceAll(strings.TrimPrefix(path, dataDir)[1:], "\\", "/"))
+ }
+
+ json.NewEncoder(w).Encode(dbs)
+}
+
+func writeDbTables(w http.ResponseWriter, r *http.Request, dataDir string, dbname string) {
+ m := mdbx.PathDbMap()
+ db, ok := m[filepath.Join(dataDir, dbname)]
+ if !ok {
+ http.Error(w, fmt.Sprintf(`"%s" is not in the list of allowed dbs`, dbname), http.StatusNotFound)
+ return
+ }
+ type table struct {
+ Name string `json:"name"`
+ Count uint64 `json:"count"`
+ Size uint64 `json:"size"`
+ }
+
+ var tables []table
+
+ if err := db.View(context.Background(), func(tx kv.Tx) error {
+ var e error
+ buckets, e := tx.ListBuckets()
+ if e != nil {
+ return e
+ }
+
+ for _, bucket := range buckets {
+ size, e := tx.BucketSize(bucket)
+ if e != nil {
+ return e
+ }
+
+ var count uint64
+
+ if e := db.View(context.Background(), func(tx kv.Tx) error {
+ c, e := tx.Cursor(bucket)
+ if e != nil {
+ return e
+ }
+ defer c.Close()
+ count, e = c.Count()
+ if e != nil {
+ return e
+ }
+
+ return nil
+ }); e != nil {
+ return e
+ }
+
+ tables = append(tables, table{bucket, count, size})
+ }
+
+ return nil
+ }); err != nil {
+ http.Error(w, fmt.Sprintf(`failed to list tables in "%s": %v`, dbname, err), http.StatusInternalServerError)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ json.NewEncoder(w).Encode(tables)
+}
+
+func writeDbRead(w http.ResponseWriter, r *http.Request, dataDir string, dbname string, table string, key []byte, offset int64, limit int64) {
+ m := mdbx.PathDbMap()
+ db, ok := m[filepath.Join(dataDir, dbname)]
+ if !ok {
+ fmt.Fprintf(w, "ERROR: path %s is not in the list of allowed paths", dbname)
+ return
+ }
+
+ var results [][2][]byte
+ var count uint64
+
+ if err := db.View(context.Background(), func(tx kv.Tx) error {
+ c, e := tx.Cursor(table)
+ if e != nil {
+ return e
+ }
+ defer c.Close()
+
+ count, e = c.Count()
+
+ if e != nil {
+ return e
+ }
+
+ var k, v []byte
+ if key == nil {
+ if k, v, e = c.First(); e != nil {
+ return e
+ }
+ } else if k, v, e = c.Seek(key); e != nil {
+ return e
+ }
+
+ var pos int64
+
+ for e == nil && k != nil && pos < offset {
+ //TODO - not sure if this is a good idea it may be slooooow
+ k, _, e = c.Next()
+ pos++
+ }
+
+ for e == nil && k != nil && (limit == 0 || int64(len(results)) < limit) {
+ results = append(results, [2][]byte{k, v})
+ k, v, e = c.Next()
+ }
+ return nil
+ }); err != nil {
+ fmt.Fprintf(w, "ERROR: reading table %s in %s: %v\n", table, dbname, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte("{"))
+ fmt.Fprintf(w, `"offset":%d`, offset)
+ if limit > 0 {
+ fmt.Fprintf(w, `,"limit":%d`, limit)
+ }
+ fmt.Fprintf(w, `,"count":%d`, count)
+ if len(results) > 0 {
+ var comma string
+ w.Write([]byte(`,"results":{`))
+ for _, result := range results {
+ fmt.Fprintf(w, `%s"%s":"%s"`, comma, base64.URLEncoding.EncodeToString(result[0]), base64.URLEncoding.EncodeToString(result[1]))
+
+ if comma == "" {
+ comma = ","
+ }
+ }
+ w.Write([]byte("}"))
+ }
+ w.Write([]byte("}"))
+}
diff --git a/diagnostics/db_access.go b/diagnostics/db_access.go
deleted file mode 100644
index 586cae00ee1..00000000000
--- a/diagnostics/db_access.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package diagnostics
-
-import (
- "context"
- "encoding/hex"
- "fmt"
- "io"
- "net/http"
-
- "github.com/ledgerwatch/erigon-lib/kv"
- "github.com/ledgerwatch/erigon-lib/kv/mdbx"
- "github.com/ledgerwatch/erigon/common/paths"
- "github.com/urfave/cli/v2"
-)
-
-func SetupDbAccess(ctx *cli.Context) {
- var dataDir string
- if ctx.IsSet("datadir") {
- dataDir = ctx.String("datadir")
- } else {
- dataDir = paths.DataDirForNetwork(paths.DefaultDataDir(), ctx.String("chain"))
- }
- http.HandleFunc("/debug/metrics/db/list", func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Access-Control-Allow-Origin", "*")
- writeDbList(w, dataDir)
- })
- http.HandleFunc("/debug/metrics/db/tables", func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Access-Control-Allow-Origin", "*")
- writeDbTables(w, r, dataDir)
- })
- http.HandleFunc("/debug/metrics/db/read", func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Access-Control-Allow-Origin", "*")
- writeDbRead(w, r, dataDir)
- })
-}
-
-func writeDbList(w io.Writer, dataDir string) {
- fmt.Fprintf(w, "SUCCESS\n")
- m := mdbx.PathDbMap()
- for path := range m {
- fmt.Fprintf(w, "%s\n", path)
- }
-}
-
-func writeDbTables(w io.Writer, r *http.Request, dataDir string) {
- if err := r.ParseForm(); err != nil {
- fmt.Fprintf(w, "ERROR: parsing arguments: %v\n", err)
- return
- }
- path := r.Form.Get("path")
- if path == "" {
- fmt.Fprintf(w, "ERROR: path argument is required - specify the relative path to an MDBX database directory")
- return
- }
- m := mdbx.PathDbMap()
- db, ok := m[path]
- if !ok {
- fmt.Fprintf(w, "ERROR: path %s is not in the list of allowed paths", path)
- return
- }
- var tables []string
- if err := db.View(context.Background(), func(tx kv.Tx) error {
- var e error
- tables, e = tx.ListBuckets()
- if e != nil {
- return e
- }
- return nil
- }); err != nil {
- fmt.Fprintf(w, "ERROR: listing tables in %s: %v\n", path, err)
- return
- }
- fmt.Fprintf(w, "SUCCESS\n")
- for _, table := range tables {
- fmt.Fprintf(w, "%s\n", table)
- }
-}
-
-func writeDbRead(w io.Writer, r *http.Request, dataDir string) {
- if err := r.ParseForm(); err != nil {
- fmt.Fprintf(w, "ERROR: parsing arguments: %v\n", err)
- return
- }
- path := r.Form.Get("path")
- if path == "" {
- fmt.Fprintf(w, "ERROR: path argument is required - specify the relative path to an MDBX database directory")
- return
- }
- m := mdbx.PathDbMap()
- db, ok := m[path]
- if !ok {
- fmt.Fprintf(w, "ERROR: path %s is not in the list of allowed paths", path)
- return
- }
- table := r.Form.Get("table")
- if table == "" {
- fmt.Fprintf(w, "ERROR: table argument is required - specify the table to read from")
- return
- }
- var key []byte
- var err error
- keyHex := r.Form.Get("key")
- if keyHex != "" {
- if key, err = hex.DecodeString(keyHex); err != nil {
- fmt.Fprintf(w, "ERROR: key [%s] argument may only contain hexadecimal digits: %v\n", keyHex, err)
- return
- }
- }
- var results []string
- if err := db.View(context.Background(), func(tx kv.Tx) error {
- c, e := tx.Cursor(table)
- if e != nil {
- return e
- }
- defer c.Close()
- var k, v []byte
- if key == nil {
- if k, v, e = c.First(); err != nil {
- return e
- }
- } else if k, v, e = c.Seek(key); e != nil {
- return e
- }
- count := 0
- for e == nil && k != nil && count < 256 {
- results = append(results, fmt.Sprintf("%x | %x", k, v))
- count++
- k, v, e = c.Next()
- }
- return nil
- }); err != nil {
- fmt.Fprintf(w, "ERROR: reading table %s in %s: %v\n", table, path, err)
- return
- }
- fmt.Fprintf(w, "SUCCESS\n")
- for _, result := range results {
- fmt.Fprintf(w, "%s\n", result)
- }
-}
diff --git a/diagnostics/diagnostic.go b/diagnostics/diagnostic.go
new file mode 100644
index 00000000000..c045057d451
--- /dev/null
+++ b/diagnostics/diagnostic.go
@@ -0,0 +1,93 @@
+package diagnostics
+
+import (
+ "context"
+ "net/http"
+
+ "github.com/ledgerwatch/erigon-lib/common"
+ diaglib "github.com/ledgerwatch/erigon-lib/diagnostics"
+ "github.com/ledgerwatch/erigon/turbo/node"
+ "github.com/ledgerwatch/log/v3"
+ "github.com/urfave/cli/v2"
+)
+
+type DiagnosticClient struct {
+ ctx *cli.Context
+ metricsMux *http.ServeMux
+ node *node.ErigonNode
+
+ snapshotDownload diaglib.SnapshotDownloadStatistics
+}
+
+func NewDiagnosticClient(ctx *cli.Context, metricsMux *http.ServeMux, node *node.ErigonNode) *DiagnosticClient {
+ return &DiagnosticClient{ctx: ctx, metricsMux: metricsMux, node: node, snapshotDownload: diaglib.SnapshotDownloadStatistics{}}
+}
+
+func (d *DiagnosticClient) Setup() {
+ d.runSnapshotListener()
+ d.runTorrentListener()
+}
+
+func (d *DiagnosticClient) runSnapshotListener() {
+ go func() {
+ ctx, ch, cancel := diaglib.Context[diaglib.SnapshotDownloadStatistics](context.Background(), 1)
+ defer cancel()
+
+ rootCtx, _ := common.RootContext()
+
+ diaglib.StartProviders(ctx, diaglib.TypeOf(diaglib.SnapshotDownloadStatistics{}), log.Root())
+ for {
+ select {
+ case <-rootCtx.Done():
+ cancel()
+ return
+ case info := <-ch:
+ d.snapshotDownload.Downloaded = info.Downloaded
+ d.snapshotDownload.Total = info.Total
+ d.snapshotDownload.TotalTime = info.TotalTime
+ d.snapshotDownload.DownloadRate = info.DownloadRate
+ d.snapshotDownload.UploadRate = info.UploadRate
+ d.snapshotDownload.Peers = info.Peers
+ d.snapshotDownload.Files = info.Files
+ d.snapshotDownload.Connections = info.Connections
+ d.snapshotDownload.Alloc = info.Alloc
+ d.snapshotDownload.Sys = info.Sys
+ d.snapshotDownload.DownloadFinished = info.DownloadFinished
+ d.snapshotDownload.TorrentMetadataReady = info.TorrentMetadataReady
+
+ if info.DownloadFinished {
+ return
+ }
+ }
+ }
+
+ }()
+}
+
+func (d *DiagnosticClient) SnapshotDownload() diaglib.SnapshotDownloadStatistics {
+ return d.snapshotDownload
+}
+
+func (d *DiagnosticClient) runTorrentListener() {
+ go func() {
+ ctx, ch, cancel := diaglib.Context[diaglib.SegmentDownloadStatistics](context.Background(), 1)
+ defer cancel()
+
+ rootCtx, _ := common.RootContext()
+
+ diaglib.StartProviders(ctx, diaglib.TypeOf(diaglib.SegmentDownloadStatistics{}), log.Root())
+ for {
+ select {
+ case <-rootCtx.Done():
+ cancel()
+ return
+ case info := <-ch:
+ if d.snapshotDownload.Segments == nil {
+ d.snapshotDownload.Segments = map[string]diaglib.SegmentDownloadStatistics{}
+ }
+
+ d.snapshotDownload.Segments[info.Name] = info
+ }
+ }
+ }()
+}
diff --git a/diagnostics/flags.go b/diagnostics/flags.go
index df6c9eaaf24..9cdf0267031 100644
--- a/diagnostics/flags.go
+++ b/diagnostics/flags.go
@@ -1,23 +1,55 @@
package diagnostics
import (
- "fmt"
- "io"
+ "encoding/json"
"net/http"
"github.com/urfave/cli/v2"
)
-func SetupFlagsAccess(ctx *cli.Context) {
- http.HandleFunc("/debug/metrics/flags", func(w http.ResponseWriter, r *http.Request) {
+func SetupFlagsAccess(ctx *cli.Context, metricsMux *http.ServeMux) {
+ metricsMux.HandleFunc("/flags", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
- writeFlags(w, ctx)
- })
-}
+ w.Header().Set("Content-Type", "application/json")
+ flags := map[string]interface{}{}
+
+ ctxFlags := map[string]struct{}{}
+
+ for _, flagName := range ctx.FlagNames() {
+ ctxFlags[flagName] = struct{}{}
+ }
+
+ for _, flag := range ctx.App.Flags {
+ name := flag.Names()[0]
+ value := ctx.Value(name)
-func writeFlags(w io.Writer, ctx *cli.Context) {
- fmt.Fprintf(w, "SUCCESS\n")
- for _, flagName := range ctx.FlagNames() {
- fmt.Fprintf(w, "%s=%v\n", flagName, ctx.Value(flagName))
- }
+ switch typed := value.(type) {
+ case string:
+ if typed == "" {
+ continue
+ }
+ case cli.UintSlice:
+ value = typed.Value()
+ }
+
+ var usage string
+
+ if docFlag, ok := flag.(cli.DocGenerationFlag); ok {
+ usage = docFlag.GetUsage()
+ }
+
+ _, inCtx := ctxFlags[name]
+
+ flags[name] = struct {
+ Value interface{} `json:"value,omitempty"`
+ Usage string `json:"usage,omitempty"`
+ Default bool `json:"default"`
+ }{
+ Value: value,
+ Usage: usage,
+ Default: !inCtx,
+ }
+ }
+ json.NewEncoder(w).Encode(flags)
+ })
}
diff --git a/diagnostics/header_downloader_stats.go b/diagnostics/header_downloader_stats.go
index 946e9d53f73..a388d6fb4ae 100644
--- a/diagnostics/header_downloader_stats.go
+++ b/diagnostics/header_downloader_stats.go
@@ -9,8 +9,8 @@ import (
"github.com/ledgerwatch/erigon/dataflow"
)
-func SetupHeaderDownloadStats() {
- http.HandleFunc("/debug/metrics/headers_download", func(w http.ResponseWriter, r *http.Request) {
+func SetupHeaderDownloadStats(metricsMux *http.ServeMux) {
+ metricsMux.HandleFunc("/headers_download", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
writeHeaderDownload(w, r)
})
diff --git a/diagnostics/logs.go b/diagnostics/logs.go
new file mode 100644
index 00000000000..72196aa79a4
--- /dev/null
+++ b/diagnostics/logs.go
@@ -0,0 +1,190 @@
+package diagnostics
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "strconv"
+
+ "github.com/urfave/cli/v2"
+
+ "github.com/ledgerwatch/erigon/turbo/logging"
+)
+
+func SetupLogsAccess(ctx *cli.Context, metricsMux *http.ServeMux) {
+ dirPath := ctx.String(logging.LogDirPathFlag.Name)
+ if dirPath == "" {
+ datadir := ctx.String("datadir")
+ if datadir != "" {
+ dirPath = filepath.Join(datadir, "logs")
+ }
+ }
+ if dirPath == "" {
+ return
+ }
+ metricsMux.HandleFunc("/logs", func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ writeLogsList(w, dirPath)
+ })
+ metricsMux.HandleFunc("/logs/", func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ writeLogsRead(w, r, dirPath)
+ })
+}
+
+func writeLogsList(w http.ResponseWriter, dirPath string) {
+ entries, err := os.ReadDir(dirPath)
+ if err != nil {
+ http.Error(w, fmt.Sprintf("Failed to list directory %s: %v", dirPath, err), http.StatusInternalServerError)
+ return
+ }
+
+ infos := make([]fs.FileInfo, 0, len(entries))
+
+ for _, entry := range entries {
+ fileInfo, err := os.Stat(filepath.Join(dirPath, entry.Name()))
+ if err != nil {
+ http.Error(w, fmt.Sprintf("Can't stat file %s: %v", entry.Name(), err), http.StatusInternalServerError)
+ return
+ }
+ if fileInfo.IsDir() {
+ continue
+ }
+ infos = append(infos, fileInfo)
+ }
+
+ type file struct {
+ Name string `json:"name"`
+ Size int64 `json:"size"`
+ }
+
+ files := make([]file, len(infos))
+
+ for _, fileInfo := range infos {
+ files = append(files, file{Name: fileInfo.Name(), Size: fileInfo.Size()})
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ json.NewEncoder(w).Encode(files)
+}
+
+func writeLogsRead(w http.ResponseWriter, r *http.Request, dirPath string) {
+ file := path.Base(r.URL.Path)
+
+ if file == "/" || file == "." {
+ http.Error(w, "file is required - specify the name of log file to read", http.StatusBadRequest)
+ return
+ }
+
+ offset, err := offsetValue(r.URL.Query())
+
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ fileInfo, err := os.Stat(filepath.Join(dirPath, file))
+
+ if err != nil {
+ http.Error(w, fmt.Sprintf("Can't stat file %s: %v", file, err), http.StatusInternalServerError)
+ return
+ }
+
+ if fileInfo.IsDir() {
+ http.Error(w, fmt.Sprintf("%s is a directory, needs to be a file", file), http.StatusInternalServerError)
+ return
+ }
+
+ if offset > fileInfo.Size() {
+ http.Error(w, fmt.Sprintf("offset %d must not be greater than this file size %d", offset, fileInfo.Size()), http.StatusBadRequest)
+ return
+ }
+
+ f, err := os.Open(filepath.Join(dirPath, file))
+
+ if err != nil {
+ http.Error(w, fmt.Sprintf("Can't opening file %s: %v\n", file, err), http.StatusInternalServerError)
+ return
+ }
+
+ limit, err := limitValue(r.URL.Query(), fileInfo.Size())
+
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ buf := make([]byte, limit)
+
+ if _, err := f.Seek(offset, 0); err != nil {
+ http.Error(w, fmt.Sprintf("seek failed for file: %s to %d: %v", file, offset, err), http.StatusInternalServerError)
+ return
+ }
+
+ var n int
+ var readTotal int
+
+ for n, err = f.Read(buf[readTotal:]); err == nil && readTotal < len(buf); n, err = f.Read(buf[readTotal:]) {
+ readTotal += n
+ }
+
+ if err != nil && !errors.Is(err, io.EOF) {
+ http.Error(w, fmt.Sprintf("Reading failed for: %s at %d: %v\n", file, readTotal, err), http.StatusInternalServerError)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/octet-stream")
+ w.Header().Set("Content-Length", strconv.FormatInt(int64(readTotal), 10))
+ w.Header().Set("X-Offset", strconv.FormatInt(offset, 10))
+ w.Header().Set("X-Limit", strconv.FormatInt(limit, 10))
+ w.Header().Set("X-Size", strconv.FormatInt(fileInfo.Size(), 10))
+ w.Write(buf[:readTotal])
+}
+
+func limitValue(values url.Values, def int64) (int64, error) {
+ limitStr := values.Get("limit")
+
+ var limit int64
+ var err error
+
+ if limitStr == "" {
+ limit = def
+ } else {
+ limit, err = strconv.ParseInt(limitStr, 10, 64)
+ }
+
+ if err != nil {
+ return 0, fmt.Errorf("limit %s is not a int64 number: %v", limitStr, err)
+ }
+
+ return limit, nil
+}
+
+func offsetValue(values url.Values) (int64, error) {
+
+ offsetStr := values.Get("offset")
+
+ var offset int64
+ var err error
+
+ if offsetStr != "" {
+ offset, err = strconv.ParseInt(offsetStr, 10, 64)
+
+ if err != nil {
+ return 0, fmt.Errorf("offset %s is not a int64 number: %v", offsetStr, err)
+ }
+ }
+
+ if offset < 0 {
+ return 0, fmt.Errorf("offset %d must be non-negative", offset)
+ }
+
+ return offset, nil
+}
diff --git a/diagnostics/logs_access.go b/diagnostics/logs_access.go
deleted file mode 100644
index 58bfe69b15b..00000000000
--- a/diagnostics/logs_access.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package diagnostics
-
-import (
- "errors"
- "fmt"
- "io"
- "io/fs"
- "net/http"
- "os"
- "path/filepath"
- "strconv"
-
- "github.com/urfave/cli/v2"
-
- "github.com/ledgerwatch/erigon/turbo/logging"
-)
-
-func SetupLogsAccess(ctx *cli.Context) {
- dirPath := ctx.String(logging.LogDirPathFlag.Name)
- if dirPath == "" {
- datadir := ctx.String("datadir")
- if datadir != "" {
- dirPath = filepath.Join(datadir, "logs")
- }
- }
- if dirPath == "" {
- return
- }
- http.HandleFunc("/debug/metrics/logs/list", func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Access-Control-Allow-Origin", "*")
- writeLogsList(w, dirPath)
- })
- http.HandleFunc("/debug/metrics/logs/read", func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Access-Control-Allow-Origin", "*")
- writeLogsRead(w, r, dirPath)
- })
-}
-
-func writeLogsList(w io.Writer, dirPath string) {
- entries, err := os.ReadDir(dirPath)
- if err != nil {
- fmt.Fprintf(w, "ERROR: listing directory %s: %v\n", dirPath, err)
- return
- }
- //nolint: prealloc
- var infos []fs.FileInfo
- for _, entry := range entries {
- fileInfo, err := os.Stat(filepath.Join(dirPath, entry.Name()))
- if err != nil {
- fmt.Fprintf(w, "ERROR: stat file %s: %v\n", entry.Name(), err)
- return
- }
- if fileInfo.IsDir() {
- continue
- }
- infos = append(infos, fileInfo)
- }
- fmt.Fprintf(w, "SUCCESS\n")
- for _, fileInfo := range infos {
- fmt.Fprintf(w, "%s | %d\n", fileInfo.Name(), fileInfo.Size())
- }
-}
-
-func writeLogsRead(w io.Writer, r *http.Request, dirPath string) {
- if err := r.ParseForm(); err != nil {
- fmt.Fprintf(w, "ERROR: parsing arguments: %v\n", err)
- return
- }
- file := r.Form.Get("file")
- if file == "" {
- fmt.Fprintf(w, "ERROR: file argument is required - specify the name of log file to read")
- return
- }
- fileInfo, err := os.Stat(filepath.Join(dirPath, file))
- if err != nil {
- fmt.Fprintf(w, "ERROR: stat file %s: %v\n", file, err)
- return
- }
- if fileInfo.IsDir() {
- fmt.Fprintf(w, "ERROR: %s is a directory, needs to be a file", file)
- return
- }
- offsetStr := r.Form.Get("offset")
- if offsetStr == "" {
- fmt.Fprintf(w, "ERROR: offset argument is required - specify where to start reading in the file")
- return
- }
- offset, err := strconv.ParseInt(offsetStr, 10, 64)
- if err != nil {
- fmt.Fprintf(w, "ERROR: offset %s is not a Uint64 number: %v\n", offsetStr, err)
- return
- }
- if offset < 0 {
- fmt.Fprintf(w, "ERROR: offset %d must be non-negative\n", offset)
- return
- }
- if offset > fileInfo.Size() {
- fmt.Fprintf(w, "ERROR: offset %d must be no greater than file size %d\n", offset, fileInfo.Size())
- return
- }
- f, err := os.Open(filepath.Join(dirPath, file))
- if err != nil {
- fmt.Fprintf(w, "ERROR: opening file %s: %v\n", file, err)
- return
- }
- var buf [16 * 1024]byte
- if _, err := f.Seek(offset, 0); err != nil {
- fmt.Fprintf(w, "ERROR: seeking in file: %s to %d: %v\n", file, offset, err)
- return
- }
- var n int
- var readTotal int
- for n, err = f.Read(buf[readTotal:]); err == nil && readTotal < len(buf); n, err = f.Read(buf[readTotal:]) {
- readTotal += n
- }
- if err != nil && !errors.Is(err, io.EOF) {
- fmt.Fprintf(w, "ERROR: reading in file: %s at %d: %v\n", file, readTotal, err)
- return
- }
- fmt.Fprintf(w, "SUCCESS: %d-%d/%d\n", offset, offset+int64(readTotal), fileInfo.Size())
- w.Write(buf[:readTotal])
-}
diff --git a/diagnostics/nodeinfo.go b/diagnostics/nodeinfo.go
new file mode 100644
index 00000000000..198aa77d7d2
--- /dev/null
+++ b/diagnostics/nodeinfo.go
@@ -0,0 +1,26 @@
+package diagnostics
+
+import (
+ "encoding/json"
+ "net/http"
+
+ "github.com/ledgerwatch/erigon/turbo/node"
+)
+
+func SetupNodeInfoAccess(metricsMux *http.ServeMux, node *node.ErigonNode) {
+ metricsMux.HandleFunc("/nodeinfo", func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ writeNodeInfo(w, node)
+ })
+}
+
+func writeNodeInfo(w http.ResponseWriter, node *node.ErigonNode) {
+ reply, err := node.Backend().NodesInfo(0)
+
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ json.NewEncoder(w).Encode(reply)
+}
diff --git a/diagnostics/peers.go b/diagnostics/peers.go
new file mode 100644
index 00000000000..e65e3713d2f
--- /dev/null
+++ b/diagnostics/peers.go
@@ -0,0 +1,150 @@
+package diagnostics
+
+import (
+ "encoding/json"
+ "net/http"
+
+ diagnint "github.com/ledgerwatch/erigon-lib/diagnostics"
+ "github.com/ledgerwatch/erigon/turbo/node"
+ "github.com/urfave/cli/v2"
+)
+
+type PeerNetworkInfo struct {
+ LocalAddress string `json:"localAddress"` // Local endpoint of the TCP data connection
+ RemoteAddress string `json:"remoteAddress"` // Remote endpoint of the TCP data connection
+ Inbound bool `json:"inbound"`
+ Trusted bool `json:"trusted"`
+ Static bool `json:"static"`
+ BytesIn uint64 `json:"bytesIn"`
+ BytesOut uint64 `json:"bytesOut"`
+ CapBytesIn map[string]uint64 `json:"capBytesIn"`
+ CapBytesOut map[string]uint64 `json:"capBytesOut"`
+ TypeBytesIn map[string]uint64 `json:"typeBytesIn"`
+ TypeBytesOut map[string]uint64 `json:"typeBytesOut"`
+}
+
+type PeerResponse struct {
+ ENR string `json:"enr,omitempty"` // Ethereum Node Record
+ Enode string `json:"enode"` // Node URL
+ ID string `json:"id"` // Unique node identifier
+ Name string `json:"name"` // Name of the node, including client type, version, OS, custom data
+ ErrorCount int `json:"errorCount"` // Number of errors
+ LastSeenError string `json:"lastSeenError"` // Last seen error
+ Type string `json:"type"` // Type of connection
+ Caps []string `json:"caps"` // Protocols advertised by this peer
+ Network PeerNetworkInfo `json:"network"`
+ Protocols map[string]interface{} `json:"protocols"` // Sub-protocol specific metadata fields
+}
+
+func SetupPeersAccess(ctxclient *cli.Context, metricsMux *http.ServeMux, node *node.ErigonNode) {
+ metricsMux.HandleFunc("/peers", func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ w.Header().Set("Content-Type", "application/json")
+ writePeers(w, ctxclient, node)
+ })
+}
+
+func writePeers(w http.ResponseWriter, ctx *cli.Context, node *node.ErigonNode) {
+ sentinelPeers, err := sentinelPeers(node)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ sentryPeers, err := sentryPeers(node)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ allPeers := append(sentryPeers, sentinelPeers...)
+
+ json.NewEncoder(w).Encode(allPeers)
+}
+
+func sentinelPeers(node *node.ErigonNode) ([]*PeerResponse, error) {
+ if diag, ok := node.Backend().Sentinel().(diagnint.PeerStatisticsGetter); ok {
+
+ statisticsArray := diag.GetPeersStatistics()
+ peers := make([]*PeerResponse, 0, len(statisticsArray))
+
+ for key, value := range statisticsArray {
+ peer := PeerResponse{
+ ENR: "", //TODO: find a way how to get missing data
+ Enode: "",
+ ID: key,
+ Name: "",
+ Type: "Sentinel",
+ Caps: []string{},
+ Network: PeerNetworkInfo{
+ LocalAddress: "",
+ RemoteAddress: "",
+ Inbound: false,
+ Trusted: false,
+ Static: false,
+ BytesIn: value.BytesIn,
+ BytesOut: value.BytesOut,
+ CapBytesIn: value.CapBytesIn,
+ CapBytesOut: value.CapBytesOut,
+ TypeBytesIn: value.TypeBytesIn,
+ TypeBytesOut: value.TypeBytesOut,
+ },
+ Protocols: nil,
+ }
+
+ peers = append(peers, &peer)
+ }
+
+ return peers, nil
+ } else {
+ return []*PeerResponse{}, nil
+ }
+}
+
+func sentryPeers(node *node.ErigonNode) ([]*PeerResponse, error) {
+
+ statisticsArray := node.Backend().DiagnosticsPeersData()
+
+ peers := make([]*PeerResponse, 0, len(statisticsArray))
+
+ for key, value := range statisticsArray {
+ peer := PeerResponse{
+ ENR: "", //TODO: find a way how to get missing data
+ Enode: "",
+ ID: key,
+ Name: "",
+ Type: "Sentry",
+ Caps: []string{},
+ Network: PeerNetworkInfo{
+ LocalAddress: "",
+ RemoteAddress: "",
+ Inbound: false,
+ Trusted: false,
+ Static: false,
+ BytesIn: value.BytesIn,
+ BytesOut: value.BytesOut,
+ CapBytesIn: value.CapBytesIn,
+ CapBytesOut: value.CapBytesOut,
+ TypeBytesIn: value.TypeBytesIn,
+ TypeBytesOut: value.TypeBytesOut,
+ },
+ Protocols: nil,
+ }
+
+ peers = append(peers, &peer)
+ }
+
+ return filterPeersWithoutBytesIn(peers), nil
+}
+
+func filterPeersWithoutBytesIn(peers []*PeerResponse) []*PeerResponse {
+ filteredPeers := make([]*PeerResponse, 0, len(peers))
+
+ for _, peer := range peers {
+ if peer.Network.BytesIn > 0 {
+ filteredPeers = append(filteredPeers, peer)
+ }
+ }
+
+ return filteredPeers
+}
diff --git a/diagnostics/setup.go b/diagnostics/setup.go
new file mode 100644
index 00000000000..44fc74570fc
--- /dev/null
+++ b/diagnostics/setup.go
@@ -0,0 +1,35 @@
+package diagnostics
+
+import (
+ "net/http"
+ "strings"
+
+ "github.com/ledgerwatch/erigon/turbo/node"
+ "github.com/urfave/cli/v2"
+)
+
+func Setup(ctx *cli.Context, metricsMux *http.ServeMux, node *node.ErigonNode) {
+ debugMux := http.NewServeMux()
+
+ diagnostic := NewDiagnosticClient(ctx, debugMux, node)
+ diagnostic.Setup()
+
+ metricsMux.HandleFunc("/debug/", func(w http.ResponseWriter, r *http.Request) {
+ r.URL.Path = strings.TrimPrefix(r.URL.Path, "/debug")
+ r.URL.RawPath = strings.TrimPrefix(r.URL.RawPath, "/debug")
+ debugMux.ServeHTTP(w, r)
+ })
+
+ SetupLogsAccess(ctx, debugMux)
+ SetupDbAccess(ctx, debugMux)
+ SetupCmdLineAccess(debugMux)
+ SetupFlagsAccess(ctx, debugMux)
+ SetupVersionAccess(debugMux)
+ SetupBlockBodyDownload(debugMux)
+ SetupHeaderDownloadStats(debugMux)
+ SetupNodeInfoAccess(debugMux, node)
+ SetupPeersAccess(ctx, debugMux, node)
+ SetupBootnodesAccess(debugMux, node)
+ SetupStagesAccess(debugMux, diagnostic)
+
+}
diff --git a/diagnostics/snapshot_sync.go b/diagnostics/snapshot_sync.go
new file mode 100644
index 00000000000..66bb2a8a392
--- /dev/null
+++ b/diagnostics/snapshot_sync.go
@@ -0,0 +1,18 @@
+package diagnostics
+
+import (
+ "encoding/json"
+ "net/http"
+)
+
+func SetupStagesAccess(metricsMux *http.ServeMux, diag *DiagnosticClient) {
+ metricsMux.HandleFunc("/snapshot-sync", func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ w.Header().Set("Content-Type", "application/json")
+ writeStages(w, diag)
+ })
+}
+
+func writeStages(w http.ResponseWriter, diag *DiagnosticClient) {
+ json.NewEncoder(w).Encode(diag.SnapshotDownload())
+}
diff --git a/diagnostics/version.go b/diagnostics/version.go
index 384b5ac0b77..f54bfa73b64 100644
--- a/diagnostics/version.go
+++ b/diagnostics/version.go
@@ -1,8 +1,7 @@
package diagnostics
import (
- "fmt"
- "io"
+ "encoding/json"
"net/http"
"github.com/ledgerwatch/erigon/params"
@@ -10,16 +9,18 @@ import (
const Version = 3
-func SetupVersionAccess() {
- http.HandleFunc("/debug/metrics/version", func(w http.ResponseWriter, r *http.Request) {
+func SetupVersionAccess(metricsMux *http.ServeMux) {
+ metricsMux.HandleFunc("/version", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
- writeVersion(w)
+ w.Header().Set("Content-Type", "application/json")
+ json.NewEncoder(w).Encode(struct {
+ Node int `json:"nodeVersion"`
+ Code string `json:"codeVersion"`
+ Git string `json:"gitCommit"`
+ }{
+ Node: Version,
+ Code: params.VersionWithMeta,
+ Git: params.GitCommit,
+ })
})
}
-
-func writeVersion(w io.Writer) {
- fmt.Fprintf(w, "SUCCESS\n")
- fmt.Fprintf(w, "%d\n", Version)
- fmt.Fprintf(w, "%s\n", params.VersionWithMeta)
- fmt.Fprintf(w, "%s\n", params.GitCommit)
-}
diff --git a/docker-compose.yml b/docker-compose.yml
index 636cd17344b..e1a5be919d3 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -41,6 +41,7 @@ services:
- ${XDG_DATA_HOME:-~/.local/share}/erigon:/home/erigon/.local/share/erigon
restart: unless-stopped
mem_swappiness: 0
+ user: ${DOCKER_UID:-1000}:${DOCKER_GID:-1000}
sentry:
<<: *default-erigon-service
@@ -71,7 +72,7 @@ services:
prometheus:
- image: prom/prometheus:v2.45.0
+ image: prom/prometheus:v2.47.2
user: ${DOCKER_UID:-1000}:${DOCKER_GID:-1000} # Uses erigon user from Dockerfile
command: --log.level=warn --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=150d --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles
ports: [ "9090:9090" ]
@@ -81,7 +82,7 @@ services:
restart: unless-stopped
grafana:
- image: grafana/grafana:10.0.1
+ image: grafana/grafana:10.2.1
user: "472:0" # required for grafana version >= 7.3
ports: [ "3000:3000" ]
volumes:
diff --git a/docs/examples/single-process.md b/docs/examples/single-process.md
index 6465128fba7..aaed061b0be 100644
--- a/docs/examples/single-process.md
+++ b/docs/examples/single-process.md
@@ -27,7 +27,7 @@ This runs Erigon with RPCDaemon, TxPool etc. all in one single process. This is
## Flags of Interest
-- `--chain` dictactes the chain (goerli/mainnet etc.) - https://chainlist.org/ is a helpful resource
+- `--chain` dictates the chain (goerli/mainnet etc.) - https://chainlist.org/ is a helpful resource
- `--log.dir.path` dictates where logs will be output - useful for sending reports to the Erigon team when issues occur
- `--http.api` defines the set of APIs which are enabled, the above example is a pretty comprehensive list - what these do is beyond the scope of this example
- `--authrpc.port` is the port which the consensus layer (PoS) uses to talk to Erigon
diff --git a/docs/mining.md b/docs/mining.md
index c85333f857a..c84a4013fb2 100644
--- a/docs/mining.md
+++ b/docs/mining.md
@@ -24,10 +24,3 @@ to `integration state_stages` command:
```
./build/bin/integration state_stages --datadir= --unwind=1 --unwind.every=2 --integrity.fast=false --integrity.slow=false --mine --miner.etherbase=
```
-
-* TODO:
- + we don't broadcast mined blocks to p2p-network yet, [but it's easy to accomplish](https://github.com/ledgerwatch/erigon/blob/9b8cdc0f2289a7cef78218a15043de5bdff4465e/eth/downloader/downloader.go#L673)
- + eth_newPendingTransactionFilter
- + eth_newBlockFilter
- + eth_newFilter
- + websocket Logs
diff --git a/docs/programmers_guide/db_faq.md b/docs/programmers_guide/db_faq.md
index e411996b049..a7f84a1e440 100644
--- a/docs/programmers_guide/db_faq.md
+++ b/docs/programmers_guide/db_faq.md
@@ -28,7 +28,7 @@ We have Go, Rust and C++ implementations of `RoKV` interface.
Rationale and Architecture of DB interface: [./../../ethdb/Readme.md](../../ethdb/Readme.md)
MDBX: [docs](https://libmdbx.website.yandexcloud.net)
-and [mdbx.h](https://github.com/torquem-ch/libmdbx/blob/master/mdbx.h)
+and [mdbx.h](https://github.com/erigontech/libmdbx/blob/master/mdbx.h)
### How RAM used
diff --git a/docs/programmers_guide/dupsort.md b/docs/programmers_guide/dupsort.md
index e6c14c42469..d652d6f82c4 100644
--- a/docs/programmers_guide/dupsort.md
+++ b/docs/programmers_guide/dupsort.md
@@ -145,7 +145,7 @@ feature DupFixed (can add this flag to table configuration).
It means in 1 db call you can Get/Put up to 4Kb of sub-table keys.
-[see mdbx.h](https://github.com/torquem-ch/libmdbx/blob/master/mdbx.h)
+[see mdbx.h](https://github.com/erigontech/libmdbx/blob/master/mdbx.h)
Erigon
---------
diff --git a/docs/programmers_guide/guide.md b/docs/programmers_guide/guide.md
index 43ddef2e6b4..19606453f65 100644
--- a/docs/programmers_guide/guide.md
+++ b/docs/programmers_guide/guide.md
@@ -450,7 +450,7 @@ also pushes the hash of the byte code onto the hash stack.
the node stack.
`ACCOUNTLEAF` opcode is similar to `LEAF`. It consumes the next item from the key tape. The rest of the semantics
-depends on the value of the `field-set`. Field set can be respresented by a bitmask. In that case, bit 0 would
+depends on the value of the `field-set`. Field set can be represented by a bitmask. In that case, bit 0 would
correspond to field 0, bit 1 (number 2) - to field 1, bit 2 (number 4) - to field 2. Currently, field 0 means account
nonce, field 1 means account balance, field 2 means contract storage, field 3 means contract code.
diff --git a/docs/readthedocs/source/rpc/tutorial.rst b/docs/readthedocs/source/rpc/tutorial.rst
index 75f78f28a59..55e19ba5234 100644
--- a/docs/readthedocs/source/rpc/tutorial.rst
+++ b/docs/readthedocs/source/rpc/tutorial.rst
@@ -120,6 +120,6 @@ now it should be all set and we can test it with:
curl -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"myNamespace_getBlockNumberByHash","params":["ANYHASH"],"id":1}' localhost:8545
-another example of custom daemon can be found at https://github.com/torquem-ch/project-1/blob/master/api.go.
+another example of custom daemon can be found at https://github.com/erigontech/project-1/blob/master/api.go.
Happy Building ~~~.
diff --git a/erigon-lib/.github/workflows/ci.yml b/erigon-lib/.github/workflows/ci.yml
new file mode 100644
index 00000000000..bf30c413573
--- /dev/null
+++ b/erigon-lib/.github/workflows/ci.yml
@@ -0,0 +1,63 @@
+name: Continuous integration
+on:
+ push:
+ branches:
+ - main
+ - stable
+ - alpha
+ pull_request:
+ branches:
+ - main
+ - stable
+ - alpha
+env:
+ CGO_ENABLED: "1"
+ CGO_CXXFLAGS: "-g -O2"
+jobs:
+ tests:
+ strategy:
+ matrix:
+ os: [ ubuntu-20.04, macos-11, windows-2022 ] # list of os: https://github.com/actions/virtual-environments
+ runs-on: ${{ matrix.os }}
+
+ steps:
+ - name: configure Pagefile
+ if: matrix.os == 'windows-2022'
+ uses: al-cheb/configure-pagefile-action@v1.3
+ with:
+ minimum-size: 8GB
+ - uses: actions/checkout@v3
+ with:
+ submodules: recursive
+ fetch-depth: 0 # fetch git tags for "git describe"
+ - uses: actions/setup-go@v4
+ with:
+ go-version: '1.20'
+
+ - name: Install deps
+ if: matrix.os == 'ubuntu-20.04'
+ run: sudo apt update && sudo apt install build-essential
+ shell: bash
+ - name: Install deps
+ if: matrix.os == 'windows-2022'
+ run: |
+ choco upgrade mingw -y --no-progress --version 13.2.0
+ choco install cmake -y --no-progress --version 3.27.8
+
+ - name: Lint
+ if: matrix.os == 'ubuntu-20.04'
+ uses: golangci/golangci-lint-action@v3
+ with:
+ version: v1.54
+ skip-build-cache: true
+
+ - name: Lint source code licenses
+ if: matrix.os == 'ubuntu-20.04'
+ run: make lint-licenses-deps lint-licenses
+
+ - name: Test win
+ if: matrix.os == 'windows-2022'
+ run: make test-no-fuzz
+ - name: Test
+ if: matrix.os != 'windows-2022'
+ run: make test
diff --git a/erigon-lib/.gitignore b/erigon-lib/.gitignore
new file mode 100644
index 00000000000..1155933ce5d
--- /dev/null
+++ b/erigon-lib/.gitignore
@@ -0,0 +1,75 @@
+# See http://help.github.com/ignore-files/ for more about ignoring files.
+#
+# If you find yourself ignoring temporary files generated by your text editor
+# or operating system, you probably want to add a global ignore instead:
+# git config --global core.excludesfile ~/.gitignore_global
+
+/tmp
+*/**/*un~
+*/**/*.test
+*un~
+.DS_Store
+*/**/.DS_Store
+.ethtest
+*/**/*tx_database*
+*/**/*dapps*
+build/_vendor/pkg
+/*.a
+docs/readthedocs/build
+
+#*
+.#*
+*#
+*~
+.project
+.settings
+
+# Used by mdbx Makefile
+/ethdb/mdbx/dist/CMakeFiles/*
+/ethdb/mdbx/dist/CMakeCache*
+/ethdb/mdbx/dist/*.cmake
+/ethdb/mdbx/dist/*.dll
+/ethdb/mdbx/dist/*.exe
+/ethdb/mdbx/dist/Makefile
+
+# used by the Makefile
+/build/_workspace/
+/build/cache/
+/build/bin/
+/geth*.zip
+
+# travis
+profile.tmp
+profile.cov
+
+# IdeaIDE
+.idea
+
+# VS Code
+.vscode
+*.code-workspace
+
+# dashboard
+/dashboard/assets/flow-typed
+/dashboard/assets/node_modules
+/dashboard/assets/stats.json
+/dashboard/assets/bundle.js
+/dashboard/assets/bundle.js.map
+/dashboard/assets/package-lock.json
+
+**/yarn-error.log
+/timings.txt
+right_*.txt
+root_*.txt
+
+__pycache__
+docker-compose.dev.yml
+/build
+*.tmp
+
+/ethdb/*.fail
+
+libmdbx/build/*
+tests/testdata/*
+
+go.work*
diff --git a/erigon-lib/.golangci.yml b/erigon-lib/.golangci.yml
new file mode 100644
index 00000000000..4e45c12cb03
--- /dev/null
+++ b/erigon-lib/.golangci.yml
@@ -0,0 +1,139 @@
+run:
+ deadline: 10m
+ build-tags:
+ - nosqlite
+ - noboltdb
+
+linters:
+ presets:
+ - bugs
+ - error
+ - unused
+ - performance
+ disable:
+ - gosec
+ - exhaustive
+ - musttag
+ - contextcheck
+ - wrapcheck
+ - goerr113
+ - unparam
+ - makezero
+ - testifylint #TODO: enable me
+ - protogetter
+ enable:
+ - unconvert
+ - predeclared
+ - wastedassign
+ - thelper
+ - gofmt
+ - gocritic
+# - revive
+# - forcetypeassert
+# - stylecheck
+
+linters-settings:
+ gocritic:
+ # Which checks should be enabled; can't be combined with 'disabled-checks';
+ # See https://go-critic.github.io/overview#checks-overview
+ # To check which checks are enabled run `GL_DEBUG=gocritic golangci-lint run`
+ # By default list of stable checks is used.
+ enabled-checks:
+ - ruleguard
+ - truncateCmp
+ # - defaultCaseOrder
+
+ # Which checks should be disabled; can't be combined with 'enabled-checks'; default is empty
+ disabled-checks:
+ - regexpMust
+ # - hugeParam
+ - rangeValCopy
+ - exitAfterDefer
+ - elseif
+ - dupBranchBody
+ - assignOp
+ - singleCaseSwitch
+ - unlambda
+ - captLocal
+ - commentFormatting
+ - ifElseChain
+ - appendAssign
+
+ # Enable multiple checks by tags, run `GL_DEBUG=gocritic golangci-lint run` to see all tags and checks.
+ # Empty list by default. See https://github.com/go-critic/go-critic#usage -> section "Tags".
+ enabled-tags:
+ - performance
+ - diagnostic
+ # - style
+ # - experimental
+ # - opinionated
+ disabled-tags:
+ - experimental
+ ruleguard:
+ rules: "rules.go"
+ settings:
+ hugeParam:
+ # size in bytes that makes the warning trigger (default 80)
+ sizeThreshold: 1000
+ rangeExprCopy:
+ # size in bytes that makes the warning trigger (default 512)
+ sizeThreshold: 512
+ # whether to check test functions (default true)
+ skipTestFuncs: true
+ truncateCmp:
+ # whether to skip int/uint/uintptr types (default true)
+ skipArchDependent: true
+ underef:
+ # whether to skip (*x).method() calls where x is a pointer receiver (default true)
+ skipRecvDeref: true
+
+ govet:
+ disable:
+ - deepequalerrors
+ - shadow
+ - unsafeptr
+ goconst:
+ min-len: 2
+ min-occurrences: 2
+ gofmt:
+ auto-fix: false
+
+issues:
+ exclude-rules:
+ - linters:
+ - golint
+ text: "should be"
+ - linters:
+ - errcheck
+ text: "not checked"
+ - linters:
+ - staticcheck
+ text: "SA(1019|1029|5011)"
+ # Exclude some linters from running on tests files.
+ - path: test\.go
+ linters:
+ - gosec
+ - unused
+ - deadcode
+ - gocritic
+ - perfsprint
+ - path: hack\.go
+ linters:
+ - gosec
+ - unused
+ - deadcode
+ - gocritic
+ - path: cmd/devp2p
+ linters:
+ - gosec
+ - unused
+ - deadcode
+ - gocritic
+ - path: metrics/sample\.go
+ linters:
+ - gosec
+ - gocritic
+ - path: p2p/simulations
+ linters:
+ - gosec
+ - gocritic
diff --git a/erigon-lib/LICENSE b/erigon-lib/LICENSE
new file mode 100644
index 00000000000..261eeb9e9f8
--- /dev/null
+++ b/erigon-lib/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/erigon-lib/Makefile b/erigon-lib/Makefile
new file mode 100644
index 00000000000..078cac4aa07
--- /dev/null
+++ b/erigon-lib/Makefile
@@ -0,0 +1,103 @@
+GOBINREL = build/bin
+GOBIN = $(CURDIR)/$(GOBINREL)
+
+BUILD_TAGS = nosqlite,noboltdb,disable_libutp
+
+CGO_CXXFLAGS ?= $(shell go env CGO_CXXFLAGS 2>/dev/null)
+ifeq ($(CGO_CXXFLAGS),)
+ CGO_CXXFLAGS += -g
+ CGO_CXXFLAGS += -O2
+endif
+
+GOBUILD = CGO_CXXFLAGS="$(CGO_CXXFLAGS)" go build -trimpath -tags $(BUILD_TAGS)
+GOTEST = CGO_CXXFLAGS="$(CGO_CXXFLAGS)" go test -trimpath -tags $(BUILD_TAGS)
+GOTEST_NOFUZZ = CGO_CXXFLAGS="$(CGO_CXXFLAGS)" go test -trimpath --tags=$(BUILD_TAGS),nofuzz
+
+OS = $(shell uname -s)
+ARCH = $(shell uname -m)
+
+ifeq ($(OS),Darwin)
+PROTOC_OS := osx
+ifeq ($(ARCH),arm64)
+ARCH = aarch_64
+endif
+endif
+ifeq ($(OS),Linux)
+PROTOC_OS = linux
+endif
+
+PROTOC_INCLUDE = build/include/google
+
+
+default: gen
+
+gen: grpc mocks
+
+$(GOBINREL):
+ mkdir -p "$(GOBIN)"
+
+$(GOBINREL)/protoc: | $(GOBINREL)
+ $(eval PROTOC_TMP := $(shell mktemp -d))
+ curl -sSL https://github.com/protocolbuffers/protobuf/releases/download/v24.2/protoc-24.2-$(PROTOC_OS)-$(ARCH).zip -o "$(PROTOC_TMP)/protoc.zip"
+ cd "$(PROTOC_TMP)" && unzip protoc.zip
+ cp "$(PROTOC_TMP)/bin/protoc" "$(GOBIN)"
+ mkdir -p "$(PROTOC_INCLUDE)"
+ cp -R "$(PROTOC_TMP)/include/google/" "$(PROTOC_INCLUDE)"
+ rm -rf "$(PROTOC_TMP)"
+
+# 'protoc-gen-go' tool generates proto messages
+$(GOBINREL)/protoc-gen-go: | $(GOBINREL)
+ $(GOBUILD) -o "$(GOBIN)/protoc-gen-go" google.golang.org/protobuf/cmd/protoc-gen-go
+
+# 'protoc-gen-go-grpc' tool generates grpc services
+$(GOBINREL)/protoc-gen-go-grpc: | $(GOBINREL)
+ $(GOBUILD) -o "$(GOBIN)/protoc-gen-go-grpc" google.golang.org/grpc/cmd/protoc-gen-go-grpc
+
+protoc-all: $(GOBINREL)/protoc $(PROTOC_INCLUDE) $(GOBINREL)/protoc-gen-go $(GOBINREL)/protoc-gen-go-grpc
+
+protoc-clean:
+ rm -f "$(GOBIN)/protoc"*
+ rm -rf "$(PROTOC_INCLUDE)"
+
+grpc: protoc-all
+ go mod vendor
+ PATH="$(GOBIN):$(PATH)" protoc --proto_path=vendor/github.com/ledgerwatch/interfaces --go_out=gointerfaces -I=$(PROTOC_INCLUDE) \
+ types/types.proto
+ PATH="$(GOBIN):$(PATH)" protoc --proto_path=vendor/github.com/ledgerwatch/interfaces --go_out=gointerfaces --go-grpc_out=gointerfaces -I=$(PROTOC_INCLUDE) \
+ --go_opt=Mtypes/types.proto=github.com/ledgerwatch/erigon-lib/gointerfaces/types \
+ --go-grpc_opt=Mtypes/types.proto=github.com/ledgerwatch/erigon-lib/gointerfaces/types \
+ p2psentry/sentry.proto p2psentinel/sentinel.proto \
+ remote/kv.proto remote/ethbackend.proto \
+ downloader/downloader.proto execution/execution.proto \
+ txpool/txpool.proto txpool/mining.proto
+ rm -rf vendor
+
+$(GOBINREL)/moq: | $(GOBINREL)
+ $(GOBUILD) -o "$(GOBIN)/moq" github.com/matryer/moq
+
+mocks: $(GOBINREL)/moq
+ rm -f gointerfaces/remote/mocks.go
+ rm -f gointerfaces/sentry/mocks.go
+ PATH="$(GOBIN):$(PATH)" go generate ./...
+
+lintci-deps:
+ @./tools/golangci_lint.sh --install-deps
+lintci:
+ @CGO_CXXFLAGS="$(CGO_CXXFLAGS)" ./tools/golangci_lint.sh
+
+lint-licenses-deps:
+ @./tools/licenses_check.sh --install-deps
+lint-licenses:
+ @./tools/licenses_check.sh
+
+lint-mod-tidy:
+ @./tools/mod_tidy_check.sh
+
+lint-deps: lintci-deps lint-licenses-deps
+lint: lintci lint-licenses lint-mod-tidy
+
+test:
+ $(GOTEST) --count 1 -p 2 ./...
+
+test-no-fuzz:
+ $(GOTEST_NOFUZZ) --count 1 -p 2 ./...
diff --git a/erigon-lib/README.md b/erigon-lib/README.md
new file mode 100644
index 00000000000..85866c38e63
--- /dev/null
+++ b/erigon-lib/README.md
@@ -0,0 +1,18 @@
+# erigon-lib
+
+Parts of Erigon codebase, written from scratch and licensed under Apache 2.0.
+
+## License requirements
+
+erigon-lib dependencies use various open source licenses compatible with Apache 2.0. This is checked on CI using `make lint-licenses`.
+
+In order to keep license purity it is not allowed to refer to the code in the erigon root module from erigon-lib. This is ensured by the `go.mod` separation.
+
+It is not allowed to copy or move code from erigon to erigon-lib unless all original authors agree to relief the code license from GPL to Apache 2.0.
+
+## Code migration policy
+
+It is encouraged to write new erigon code inside erigon-lib.
+
+It is encouraged to move and relicense parts of the code from erigon to erigon-lib
+that are safe and easy to move. For example, code written from scratch by erigon core contributors that has no significant external contributions could be refactored and moved.
diff --git a/erigon-lib/bptree/binary_file.go b/erigon-lib/bptree/binary_file.go
new file mode 100644
index 00000000000..21babdab931
--- /dev/null
+++ b/erigon-lib/bptree/binary_file.go
@@ -0,0 +1,156 @@
+/*
+ Copyright 2022 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package bptree
+
+import (
+ "bufio"
+ "crypto/rand"
+ "fmt"
+ "io"
+ "math/big"
+ "os"
+ "strconv"
+)
+
+// Size in bytes of data blocks read/written from/to the file system.
+const BLOCKSIZE int64 = 4096
+
+// BinaryFile type represents an open binary file.
+type BinaryFile struct {
+ file *os.File
+ path string
+ blockSize int64
+ size int64
+ opened bool
+}
+
+// RandomBinaryReader reads data chuncks randomly from a binary file.
+type RandomBinaryReader struct {
+ sourceFile *BinaryFile
+ chunckSize int
+}
+
+func (r RandomBinaryReader) Read(b []byte) (n int, err error) {
+ numKeys := len(b) / r.chunckSize
+ for i := 0; i < numKeys; i++ {
+ bytesRead, err := r.readAtRandomOffset(b[i*r.chunckSize : i*r.chunckSize+r.chunckSize])
+ if err != nil {
+ return i*r.chunckSize + bytesRead, fmt.Errorf("cannot random read at iteration %d: %w", i, err)
+ }
+ n += bytesRead
+ }
+ remainderSize := len(b) % r.chunckSize
+ bytesRead, err := r.readAtRandomOffset(b[numKeys*r.chunckSize : numKeys*r.chunckSize+remainderSize])
+ if err != nil {
+ return numKeys*r.chunckSize + bytesRead, fmt.Errorf("cannot random read remainder %d: %w", remainderSize, err)
+ }
+ n += bytesRead
+ return n, nil
+}
+
+func (r RandomBinaryReader) readAtRandomOffset(b []byte) (n int, err error) {
+ randomValue, err := rand.Int(rand.Reader, big.NewInt(r.sourceFile.size-int64(len(b))))
+ if err != nil {
+ return 0, fmt.Errorf("cannot generate random offset: %w", err)
+ }
+ randomOffset := randomValue.Int64()
+ _, err = r.sourceFile.file.Seek(randomOffset, io.SeekStart)
+ if err != nil {
+ return 0, fmt.Errorf("cannot seek to offset %d: %w", randomOffset, err)
+ }
+ bytesRead, err := r.sourceFile.file.Read(b)
+ if err != nil {
+ return 0, fmt.Errorf("cannot read from source file: %w", err)
+ }
+ return bytesRead, nil
+}
+
+func CreateBinaryFileByRandomSampling(path string, size int64, sourceFile *BinaryFile, keySize int) *BinaryFile {
+ return CreateBinaryFileFromReader(path, "_onlyexisting", size, RandomBinaryReader{sourceFile, keySize})
+}
+
+func CreateBinaryFileByPRNG(path string, size int64) *BinaryFile {
+ return CreateBinaryFileFromReader(path, "", size, rand.Reader)
+}
+
+func CreateBinaryFileFromReader(path, suffix string, size int64, reader io.Reader) *BinaryFile {
+ file, err := os.OpenFile(path+strconv.FormatInt(size, 10)+suffix, os.O_RDWR|os.O_CREATE, 0644)
+ ensure(err == nil, fmt.Sprintf("CreateBinaryFileFromReader: cannot create file %s, error %s\n", file.Name(), err))
+
+ err = file.Truncate(size)
+ ensure(err == nil, fmt.Sprintf("CreateBinaryFileFromReader: cannot truncate file %s to %d, error %s\n", file.Name(), size, err))
+
+ bufferedFile := bufio.NewWriter(file)
+ numBlocks := size / BLOCKSIZE
+ remainderSize := size % BLOCKSIZE
+ buffer := make([]byte, BLOCKSIZE)
+ for i := int64(0); i <= numBlocks; i++ {
+ if i == numBlocks {
+ buffer = make([]byte, remainderSize)
+ }
+ bytesRead, err := io.ReadFull(reader, buffer)
+ ensure(bytesRead == len(buffer), fmt.Sprintf("CreateBinaryFileFromReader: insufficient bytes read %d, error %s\n", bytesRead, err))
+ bytesWritten, err := bufferedFile.Write(buffer)
+ ensure(bytesWritten == len(buffer), fmt.Sprintf("CreateBinaryFileFromReader: insufficient bytes written %d, error %s\n", bytesWritten, err))
+ }
+
+ err = bufferedFile.Flush()
+ ensure(err == nil, fmt.Sprintf("CreateBinaryFileFromReader: error during flushing %s\n", err))
+
+ binaryFile := &BinaryFile{path: file.Name(), blockSize: BLOCKSIZE, size: size, file: file, opened: true}
+ binaryFile.rewind()
+ return binaryFile
+}
+
+func OpenBinaryFile(path string) *BinaryFile {
+ file, err := os.Open(path)
+ ensure(err == nil, fmt.Sprintf("OpenBinaryFile: cannot open file %s, error %s\n", path, err))
+
+ info, err := file.Stat()
+ ensure(err == nil, fmt.Sprintf("OpenBinaryFile: cannot stat file %s error %s\n", path, err))
+ ensure(info.Size() >= 0, fmt.Sprintf("OpenBinaryFile: negative size %d file %s\n", info.Size(), path))
+
+ binaryFile := &BinaryFile{path: path, blockSize: BLOCKSIZE, size: info.Size(), file: file, opened: true}
+ return binaryFile
+}
+
+func (f *BinaryFile) rewind() {
+ offset, err := f.file.Seek(0, io.SeekStart)
+ ensure(err == nil, fmt.Sprintf("rewind: error during seeking %s\n", err))
+ ensure(offset == 0, fmt.Sprintf("rewind: unexpected offset after seeking: %d\n", offset))
+}
+
+func (f *BinaryFile) Name() string {
+ return f.path
+}
+
+func (f *BinaryFile) Size() int64 {
+ return f.size
+}
+
+func (f *BinaryFile) NewReader() *bufio.Reader {
+ ensure(f.opened, fmt.Sprintf("NewReader: file %s is not opened\n", f.path))
+ f.rewind()
+ return bufio.NewReader(f.file)
+}
+
+func (f *BinaryFile) Close() {
+ ensure(f.opened, fmt.Sprintf("Close: file %s is not opened\n", f.path))
+ err := f.file.Close()
+ ensure(err == nil, fmt.Sprintf("Close: cannot close file %s, error %s\n", f.path, err))
+ f.opened = false
+}
diff --git a/erigon-lib/bptree/bulk.go b/erigon-lib/bptree/bulk.go
new file mode 100644
index 00000000000..31709a8ba3d
--- /dev/null
+++ b/erigon-lib/bptree/bulk.go
@@ -0,0 +1,812 @@
+/*
+ Copyright 2022 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package bptree
+
+import (
+ "fmt"
+ "sort"
+)
+
+func upsert(n *Node23, kvItems KeyValues, stats *Stats) (nodes []*Node23, newFirstKey *Felt, intermediateKeys []*Felt) {
+ ensure(sort.IsSorted(kvItems), "kvItems are not sorted by key")
+
+ if kvItems.Len() == 0 && n == nil {
+ return []*Node23{n}, nil, []*Felt{}
+ }
+ if n == nil {
+ n = makeEmptyLeafNode()
+ }
+ if n.isLeaf {
+ return upsertLeaf(n, kvItems, stats)
+ } else {
+ return upsertInternal(n, kvItems, stats)
+ }
+}
+
+func upsertLeaf(n *Node23, kvItems KeyValues, stats *Stats) (nodes []*Node23, newFirstKey *Felt, intermediateKeys []*Felt) {
+ ensure(n.isLeaf, "node is not leaf")
+
+ if kvItems.Len() == 0 {
+ if n.nextKey() != nil {
+ intermediateKeys = append(intermediateKeys, n.nextKey())
+ }
+ return []*Node23{n}, nil, intermediateKeys
+ }
+
+ if !n.exposed {
+ n.exposed = true
+ stats.ExposedCount++
+ stats.OpeningHashes += n.howManyHashes()
+ }
+
+ currentFirstKey := n.firstKey()
+ addOrReplaceLeaf(n, kvItems, stats)
+ if n.firstKey() != currentFirstKey {
+ newFirstKey = n.firstKey()
+ } else {
+ newFirstKey = nil
+ }
+
+ if n.keyCount() > 3 {
+ for n.keyCount() > 3 {
+ newLeaf := makeLeafNode(n.keys[:3], n.values[:3], stats)
+ intermediateKeys = append(intermediateKeys, n.keys[2])
+ nodes = append(nodes, newLeaf)
+ n.keys, n.values = n.keys[2:], n.values[2:]
+ }
+ newLeaf := makeLeafNode(n.keys, n.values, stats)
+ if n.nextKey() != nil {
+ intermediateKeys = append(intermediateKeys, n.nextKey())
+ }
+ nodes = append(nodes, newLeaf)
+ return nodes, newFirstKey, intermediateKeys
+ } else {
+ if n.nextKey() != nil {
+ intermediateKeys = append(intermediateKeys, n.nextKey())
+ }
+ return []*Node23{n}, newFirstKey, intermediateKeys
+ }
+}
+
+func upsertInternal(n *Node23, kvItems KeyValues, stats *Stats) (nodes []*Node23, newFirstKey *Felt, intermediateKeys []*Felt) {
+ ensure(!n.isLeaf, "node is not internal")
+
+ if kvItems.Len() == 0 {
+ if n.lastLeaf().nextKey() != nil {
+ intermediateKeys = append(intermediateKeys, n.lastLeaf().nextKey())
+ }
+ return []*Node23{n}, nil, intermediateKeys
+ }
+
+ if !n.exposed {
+ n.exposed = true
+ stats.ExposedCount++
+ stats.OpeningHashes += n.howManyHashes()
+ }
+
+ itemSubsets := splitItems(n, kvItems)
+
+ newChildren := make([]*Node23, 0)
+ newKeys := make([]*Felt, 0)
+ for i := len(n.children) - 1; i >= 0; i-- {
+ child := n.children[i]
+ childNodes, childNewFirstKey, childIntermediateKeys := upsert(child, itemSubsets[i], stats)
+ newChildren = append(childNodes, newChildren...)
+ newKeys = append(childIntermediateKeys, newKeys...)
+ if childNewFirstKey != nil {
+ if i > 0 {
+ // Handle newFirstKey here
+ previousChild := n.children[i-1]
+ if previousChild.isLeaf {
+ ensure(len(previousChild.keys) > 0, "upsertInternal: previousChild has no keys")
+ if previousChild.nextKey() != childNewFirstKey {
+ previousChild.setNextKey(childNewFirstKey, stats)
+ }
+ } else {
+ ensure(len(previousChild.children) > 0, "upsertInternal: previousChild has no children")
+ lastLeaf := previousChild.lastLeaf()
+ if lastLeaf.nextKey() != childNewFirstKey {
+ lastLeaf.setNextKey(childNewFirstKey, stats)
+ }
+ }
+ // TODO(canepat): previousChild/previousLastLeaf changed instead of making new node
+ } else {
+ // Propagate newFirstKey up
+ newFirstKey = childNewFirstKey
+ }
+ }
+ }
+
+ n.children = newChildren
+ if n.childrenCount() > 3 {
+ ensure(len(newKeys) >= n.childrenCount()-1 || n.childrenCount()%2 == 0 && n.childrenCount()%len(newKeys) == 0, "upsertInternal: inconsistent #children vs #newKeys")
+ var hasIntermediateKeys bool
+ if len(newKeys) == n.childrenCount()-1 || len(newKeys) == n.childrenCount() {
+ /* Groups are: 2,2...2 or 3 */
+ hasIntermediateKeys = true
+ } else {
+ /* Groups are: 2,2...2 */
+ hasIntermediateKeys = false
+ }
+ for n.childrenCount() > 3 {
+ nodes = append(nodes, makeInternalNode(n.children[:2], newKeys[:1], stats))
+ n.children = n.children[2:]
+ if hasIntermediateKeys {
+ intermediateKeys = append(intermediateKeys, newKeys[1])
+ newKeys = newKeys[2:]
+ } else {
+ newKeys = newKeys[1:]
+ }
+ }
+ ensure(n.childrenCount() > 0 && len(newKeys) > 0, "upsertInternal: inconsistent #children vs #newKeys")
+ if n.childrenCount() == 2 {
+ ensure(len(newKeys) > 0, "upsertInternal: inconsistent #newKeys")
+ nodes = append(nodes, makeInternalNode(n.children, newKeys[:1], stats))
+ intermediateKeys = append(intermediateKeys, newKeys[1:]...)
+ } else if n.childrenCount() == 3 {
+ ensure(len(newKeys) > 1, "upsertInternal: inconsistent #newKeys")
+ nodes = append(nodes, makeInternalNode(n.children, newKeys[:2], stats))
+ intermediateKeys = append(intermediateKeys, newKeys[2:]...)
+ } else {
+ ensure(false, fmt.Sprintf("upsertInternal: inconsistent #children=%d #newKeys=%d\n", n.childrenCount(), len(newKeys)))
+ }
+ return nodes, newFirstKey, intermediateKeys
+ } else { // n.childrenCount() is 2 or 3
+ ensure(len(newKeys) > 0, "upsertInternal: newKeys count is zero")
+ if len(newKeys) == len(n.children) {
+ n.keys = newKeys[:len(newKeys)-1]
+ intermediateKeys = append(intermediateKeys, newKeys[len(newKeys)-1])
+ } else {
+ n.keys = newKeys
+ }
+ // TODO(canepat): n.keys changed instead of making new node
+ n.updated = true
+ stats.UpdatedCount++
+ return []*Node23{n}, newFirstKey, intermediateKeys
+ }
+}
+
+func addOrReplaceLeaf(n *Node23, kvItems KeyValues, stats *Stats) {
+ ensure(n.isLeaf, "addOrReplaceLeaf: node is not leaf")
+ ensure(len(n.keys) > 0 && len(n.values) > 0, "addOrReplaceLeaf: node keys/values are empty")
+ ensure(len(kvItems.keys) > 0 && len(kvItems.keys) == len(kvItems.values), "addOrReplaceLeaf: invalid kvItems")
+
+ // Temporarily remove next key/value
+ nextKey, nextValue := n.nextKey(), n.nextValue()
+
+ n.keys = n.keys[:len(n.keys)-1]
+ n.values = n.values[:len(n.values)-1]
+
+ // kvItems are ordered by key: search there using n.keys that here are 1 or 2 by design (0 just for empty tree)
+ switch n.keyCount() {
+ case 0:
+ n.keys = append(n.keys, kvItems.keys...)
+ n.values = append(n.values, kvItems.values...)
+ case 1:
+ addOrReplaceLeaf1(n, kvItems, stats)
+ case 2:
+ addOrReplaceLeaf2(n, kvItems, stats)
+ default:
+ ensure(false, fmt.Sprintf("addOrReplaceLeaf: invalid key count %d", n.keyCount()))
+ }
+
+ // Restore next key/value
+ n.keys = append(n.keys, nextKey)
+ n.values = append(n.values, nextValue)
+}
+
+func addOrReplaceLeaf1(n *Node23, kvItems KeyValues, stats *Stats) {
+ ensure(n.isLeaf, "addOrReplaceLeaf1: node is not leaf")
+ ensure(n.keyCount() == 1, "addOrReplaceLeaf1: leaf has not 1 *canonical* key")
+
+ key0, value0 := n.keys[0], n.values[0]
+ index0 := sort.Search(kvItems.Len(), func(i int) bool { return *kvItems.keys[i] >= *key0 })
+ if index0 < kvItems.Len() {
+ // Insert keys/values concatenating new ones around key0
+ n.keys = append(make([]*Felt, 0), kvItems.keys[:index0]...)
+ n.values = append(make([]*Felt, 0), kvItems.values[:index0]...)
+ n.keys = append(n.keys, key0)
+ n.values = append(n.values, value0)
+ if *kvItems.keys[index0] == *key0 {
+ // Incoming key matches an existing key: update
+ n.keys = append(n.keys, kvItems.keys[index0+1:]...)
+ n.values = append(n.values, kvItems.values[index0+1:]...)
+ n.updated = true
+ stats.UpdatedCount++
+ } else {
+ n.keys = append(n.keys, kvItems.keys[index0:]...)
+ n.values = append(n.values, kvItems.values[index0:]...)
+ }
+ } else {
+ // key0 greater than any input key
+ n.keys = append(kvItems.keys, key0)
+ n.values = append(kvItems.values, value0)
+ }
+}
+
+func addOrReplaceLeaf2(n *Node23, kvItems KeyValues, stats *Stats) {
+ ensure(n.isLeaf, "addOrReplaceLeaf2: node is not leaf")
+ ensure(n.keyCount() == 2, "addOrReplaceLeaf2: leaf has not 2 *canonical* keys")
+
+ key0, value0, key1, value1 := n.keys[0], n.values[0], n.keys[1], n.values[1]
+ index0 := sort.Search(kvItems.Len(), func(i int) bool { return *kvItems.keys[i] >= *key0 })
+ index1 := sort.Search(kvItems.Len(), func(i int) bool { return *kvItems.keys[i] >= *key1 })
+ ensure(index1 >= index0, "addOrReplaceLeaf2: keys not ordered")
+ if index0 < kvItems.Len() {
+ if index1 < kvItems.Len() {
+ // Insert keys/values concatenating new ones around key0 and key1
+ n.keys = append(make([]*Felt, 0), kvItems.keys[:index0]...)
+ n.values = append(make([]*Felt, 0), kvItems.values[:index0]...)
+ n.keys = append(n.keys, key0)
+ n.values = append(n.values, value0)
+ if *kvItems.keys[index0] == *key0 {
+ // Incoming key matches an existing key: update
+ n.keys = append(n.keys, kvItems.keys[index0+1:index1]...)
+ n.values = append(n.values, kvItems.values[index0+1:index1]...)
+ n.updated = true
+ stats.UpdatedCount++
+ } else {
+ n.keys = append(n.keys, kvItems.keys[index0:index1]...)
+ n.values = append(n.values, kvItems.values[index0:index1]...)
+ }
+ n.keys = append(n.keys, key1)
+ n.values = append(n.values, value1)
+ if *kvItems.keys[index1] == *key1 {
+ // Incoming key matches an existing key: update
+ n.keys = append(n.keys, kvItems.keys[index1+1:]...)
+ n.values = append(n.values, kvItems.values[index1+1:]...)
+ if !n.updated {
+ n.updated = true
+ stats.UpdatedCount++
+ }
+ } else {
+ n.keys = append(n.keys, kvItems.keys[index1:]...)
+ n.values = append(n.values, kvItems.values[index1:]...)
+ }
+ } else {
+ // Insert keys/values concatenating new ones around key0, then add key1
+ n.keys = append(make([]*Felt, 0), kvItems.keys[:index0]...)
+ n.values = append(make([]*Felt, 0), kvItems.values[:index0]...)
+ n.keys = append(n.keys, key0)
+ n.values = append(n.values, value0)
+ if *kvItems.keys[index0] == *key0 {
+ // Incoming key matches an existing key: update
+ n.keys = append(n.keys, kvItems.keys[index0+1:]...)
+ n.values = append(n.values, kvItems.values[index0+1:]...)
+ n.updated = true
+ stats.UpdatedCount++
+ } else {
+ n.keys = append(n.keys, kvItems.keys[index0:]...)
+ n.values = append(n.values, kvItems.values[index0:]...)
+ }
+ n.keys = append(n.keys, key1)
+ n.values = append(n.values, value1)
+ }
+ } else {
+ ensure(index1 == index0, "addOrReplaceLeaf2: keys not ordered")
+ // Both key0 and key1 greater than any input key
+ n.keys = append(kvItems.keys, key0, key1)
+ n.values = append(kvItems.values, value0, value1)
+ }
+}
+
+func splitItems(n *Node23, kvItems KeyValues) []KeyValues {
+ ensure(!n.isLeaf, "splitItems: node is not internal")
+ ensure(len(n.keys) > 0, "splitItems: internal node has no keys")
+
+ itemSubsets := make([]KeyValues, 0)
+ for i, key := range n.keys {
+ splitIndex := sort.Search(kvItems.Len(), func(i int) bool { return *kvItems.keys[i] >= *key })
+ itemSubsets = append(itemSubsets, KeyValues{kvItems.keys[:splitIndex], kvItems.values[:splitIndex]})
+ kvItems = KeyValues{kvItems.keys[splitIndex:], kvItems.values[splitIndex:]}
+ if i == len(n.keys)-1 {
+ itemSubsets = append(itemSubsets, kvItems)
+ }
+ }
+ ensure(len(itemSubsets) == len(n.children), "item subsets and children have different cardinality")
+ return itemSubsets
+}
+
+func del(n *Node23, keysToDelete []Felt, stats *Stats) (deleted *Node23, nextKey *Felt, intermediateKeys []*Felt) {
+ ensure(sort.IsSorted(Keys(keysToDelete)), "keysToDelete are not sorted")
+
+ if n == nil {
+ return n, nil, intermediateKeys
+ }
+ if n.isLeaf {
+ return deleteLeaf(n, keysToDelete, stats)
+ } else {
+ return deleteInternal(n, keysToDelete, stats)
+ }
+}
+
+func deleteLeaf(n *Node23, keysToDelete []Felt, stats *Stats) (deleted *Node23, nextKey *Felt, intermediateKeys []*Felt) {
+ ensure(n.isLeaf, fmt.Sprintf("node %s is not leaf", n))
+
+ if len(keysToDelete) == 0 {
+ if n.nextKey() != nil {
+ intermediateKeys = append(intermediateKeys, n.nextKey())
+ }
+ return n, nil, intermediateKeys
+ }
+
+ if !n.exposed {
+ n.exposed = true
+ stats.ExposedCount++
+ stats.OpeningHashes += n.howManyHashes()
+ }
+
+ currentFirstKey := n.firstKey()
+ deleteLeafKeys(n, keysToDelete, stats)
+ if n.keyCount() == 1 {
+ return nil, n.nextKey(), intermediateKeys
+ } else {
+ if n.nextKey() != nil {
+ intermediateKeys = append(intermediateKeys, n.nextKey())
+ }
+ if n.firstKey() != currentFirstKey {
+ return n, n.firstKey(), intermediateKeys
+ } else {
+ return n, nil, intermediateKeys
+ }
+ }
+}
+
+func deleteLeafKeys(n *Node23, keysToDelete []Felt, stats *Stats) (deleted KeyValues) {
+ ensure(n.isLeaf, "deleteLeafKeys: node is not leaf")
+ switch n.keyCount() {
+ case 2:
+ if Keys(keysToDelete).Contains(*n.keys[0]) {
+ deleted.keys = n.keys[:1]
+ deleted.values = n.values[:1]
+ n.keys = n.keys[1:]
+ n.values = n.values[1:]
+ stats.DeletedCount++
+ }
+ case 3:
+ if Keys(keysToDelete).Contains(*n.keys[0]) {
+ if Keys(keysToDelete).Contains(*n.keys[1]) {
+ deleted.keys = n.keys[:2]
+ deleted.values = n.values[:2]
+ n.keys = n.keys[2:]
+ n.values = n.values[2:]
+ stats.DeletedCount++
+ } else {
+ deleted.keys = n.keys[:1]
+ deleted.values = n.values[:1]
+ n.keys = n.keys[1:]
+ n.values = n.values[1:]
+ n.updated = true
+ stats.UpdatedCount++
+ }
+ } else {
+ if Keys(keysToDelete).Contains(*n.keys[1]) {
+ deleted.keys = n.keys[1:2]
+ deleted.values = n.values[1:2]
+ n.keys = append(n.keys[:1], n.keys[2])
+ n.values = append(n.values[:1], n.values[2])
+ n.updated = true
+ stats.UpdatedCount++
+ }
+ }
+ default:
+ ensure(false, fmt.Sprintf("unexpected number of keys in %s", n))
+ }
+ return deleted
+}
+
+func deleteInternal(n *Node23, keysToDelete []Felt, stats *Stats) (deleted *Node23, nextKey *Felt, intermediateKeys []*Felt) {
+ ensure(!n.isLeaf, fmt.Sprintf("node %s is not internal", n))
+
+ if len(keysToDelete) == 0 {
+ if n.lastLeaf().nextKey() != nil {
+ intermediateKeys = append(intermediateKeys, n.lastLeaf().nextKey())
+ }
+ return n, nil, intermediateKeys
+ }
+
+ if !n.exposed {
+ n.exposed = true
+ stats.ExposedCount++
+ stats.OpeningHashes += n.howManyHashes()
+ }
+
+ keySubsets := splitKeys(n, keysToDelete)
+
+ newKeys := make([]*Felt, 0)
+ for i := len(n.children) - 1; i >= 0; i-- {
+ child, childNextKey, childIntermediateKeys := del(n.children[i], keySubsets[i], stats)
+ newKeys = append(childIntermediateKeys, newKeys...)
+ if i > 0 {
+ previousIndex := i - 1
+ previousChild := n.children[previousIndex]
+ for previousChild.isEmpty() && previousIndex-1 >= 0 {
+ previousChild = n.children[previousIndex-1]
+ previousIndex = previousIndex - 1
+ }
+ if child == nil || childNextKey != nil {
+ if previousChild.isLeaf {
+ ensure(len(previousChild.keys) > 0, "delete: previousChild has no keys")
+ if previousChild.nextKey() != childNextKey {
+ previousChild.setNextKey(childNextKey, stats)
+ }
+ } else {
+ ensure(len(previousChild.children) > 0, "delete: previousChild has no children")
+ lastLeaf := previousChild.lastLeaf()
+ if lastLeaf.nextKey() != childNextKey {
+ lastLeaf.setNextKey(childNextKey, stats)
+ }
+ }
+ }
+ if !previousChild.isEmpty() && child != nil && child.childrenCount() == 1 {
+ child.keys = child.keys[:0]
+ newLeft, newRight := mergeRight2Left(previousChild, child, stats)
+ n.children = append(n.children[:previousIndex], append([]*Node23{newLeft, newRight}, n.children[i+1:]...)...)
+ }
+ } else {
+ nextIndex := i + 1
+ nextChild := n.children[nextIndex]
+ for nextChild.isEmpty() && nextIndex+1 < n.childrenCount() {
+ nextChild = n.children[nextIndex+1]
+ nextIndex = nextIndex + 1
+ }
+ if !nextChild.isEmpty() && child != nil && child.childrenCount() == 1 {
+ child.keys = child.keys[:0]
+ newLeft, newRight := mergeLeft2Right(child, nextChild, stats)
+ n.children = append([]*Node23{newLeft, newRight}, n.children[nextIndex+1:]...)
+ }
+ if childNextKey != nil {
+ nextKey = childNextKey
+ }
+ }
+ }
+ switch len(n.children) {
+ case 2:
+ nextKey, intermediateKeys = update2Node(n, newKeys, nextKey, intermediateKeys, stats)
+ case 3:
+ nextKey, intermediateKeys = update3Node(n, newKeys, nextKey, intermediateKeys, stats)
+ default:
+ ensure(false, fmt.Sprintf("unexpected number of children in %s", n))
+ }
+
+ for _, child := range n.children {
+ if child.updated {
+ n.updated = true
+ stats.UpdatedCount++
+ break
+ }
+ }
+
+ if n.keyCount() == 0 {
+ return nil, nextKey, intermediateKeys
+ } else {
+ return n, nextKey, intermediateKeys
+ }
+}
+
+func mergeLeft2Right(left, right *Node23, stats *Stats) (newLeft, newRight *Node23) {
+ ensure(!left.isLeaf, "mergeLeft2Right: left is leaf")
+ ensure(left.childrenCount() > 0, "mergeLeft2Right: left has no children")
+
+ if left.firstChild().childrenCount() == 1 {
+ newLeftFirstChild, newRightFirstChild := mergeLeft2Right(left.firstChild(), right.firstChild(), stats)
+ left = makeInternalNode(
+ []*Node23{newLeftFirstChild},
+ left.keys,
+ stats,
+ )
+ right = makeInternalNode(
+ append([]*Node23{newRightFirstChild}, right.children[1:]...),
+ right.keys,
+ stats,
+ )
+ }
+
+ if right.childrenCount() >= 3 {
+ return mergeRight2Left(left, right, stats)
+ }
+ if left.firstChild().isEmpty() {
+ newRight = right
+ newLeft = makeInternalNode([]*Node23{}, []*Felt{}, stats)
+ return newLeft, newRight
+ }
+ if right.childrenCount() == 1 {
+ if right.firstChild().isEmpty() {
+ newLeft = left
+ newRight = makeInternalNode([]*Node23{}, []*Felt{}, stats)
+ } else {
+ newRight = makeInternalNode(
+ append([]*Node23{left.firstChild()}, right.children...),
+ []*Felt{left.lastLeaf().nextKey()},
+ stats,
+ )
+ if left.keyCount() > 1 {
+ newLeft = makeInternalNode(left.children[1:], left.keys[1:], stats)
+ } else {
+ newLeft = makeInternalNode(left.children[1:], left.keys, stats)
+ }
+ }
+ } else {
+ newRight = makeInternalNode(
+ append([]*Node23{left.firstChild()}, right.children...),
+ append([]*Felt{left.lastLeaf().nextKey()}, right.keys...),
+ stats,
+ )
+ if left.keyCount() > 1 {
+ newLeft = makeInternalNode(left.children[1:], left.keys[1:], stats)
+ } else {
+ newLeft = makeInternalNode(left.children[1:], left.keys, stats)
+ }
+ }
+ return newLeft, newRight
+}
+
+func mergeRight2Left(left, right *Node23, stats *Stats) (newLeft, newRight *Node23) {
+ ensure(!right.isLeaf, "mergeRight2Left: right is leaf")
+ ensure(right.childrenCount() > 0, "mergeRight2Left: right has no children")
+
+ if right.firstChild().childrenCount() == 1 {
+ newLeftLastChild, newRightFirstChild := mergeRight2Left(left.lastChild(), right.firstChild(), stats)
+ left = makeInternalNode(
+ append(left.children[:len(left.children)-1], newLeftLastChild),
+ left.keys,
+ stats,
+ )
+ right = makeInternalNode(
+ []*Node23{newRightFirstChild},
+ right.keys,
+ stats,
+ )
+ }
+
+ if left.childrenCount() < 3 {
+ if !right.firstChild().isEmpty() {
+ if left.childrenCount() == 1 {
+ if left.firstChild().isEmpty() {
+ newLeft = makeInternalNode([]*Node23{}, []*Felt{}, stats)
+ newRight = right
+ } else {
+ newLeft = makeInternalNode(
+ append(left.children, right.firstChild()),
+ []*Felt{right.firstLeaf().firstKey()},
+ stats,
+ )
+ if right.keyCount() > 1 {
+ newRight = makeInternalNode(right.children[1:], right.keys[1:], stats)
+ } else {
+ newRight = makeInternalNode(right.children[1:], right.keys, stats)
+ }
+ }
+ } else {
+ newLeft = makeInternalNode(
+ append(left.children, right.firstChild()),
+ append(left.keys, right.firstLeaf().firstKey()),
+ stats,
+ )
+ if right.keyCount() > 1 {
+ newRight = makeInternalNode(right.children[1:], right.keys[1:], stats)
+ } else {
+ newRight = makeInternalNode(right.children[1:], right.keys, stats)
+ }
+ }
+ } else {
+ newLeft = left
+ newRight = makeInternalNode([]*Node23{}, []*Felt{}, stats)
+ }
+ } else {
+ newLeft, newRight = mergeLeft2Right(left, right, stats)
+ }
+ return newLeft, newRight
+}
+
+func splitKeys(n *Node23, keysToDelete []Felt) [][]Felt {
+ ensure(!n.isLeaf, "splitKeys: node is not internal")
+ ensure(len(n.keys) > 0, fmt.Sprintf("splitKeys: internal node %s has no keys", n))
+
+ keySubsets := make([][]Felt, 0)
+ for i, key := range n.keys {
+ splitIndex := sort.Search(len(keysToDelete), func(i int) bool { return keysToDelete[i] >= *key })
+ keySubsets = append(keySubsets, keysToDelete[:splitIndex])
+ keysToDelete = keysToDelete[splitIndex:]
+ if i == len(n.keys)-1 {
+ keySubsets = append(keySubsets, keysToDelete)
+ }
+ }
+ ensure(len(keySubsets) == len(n.children), "key subsets and children have different cardinality")
+ return keySubsets
+}
+
+func update2Node(n *Node23, newKeys []*Felt, nextKey *Felt, intermediateKeys []*Felt, stats *Stats) (*Felt, []*Felt) {
+ ensure(len(n.children) == 2, "update2Node: wrong number of children")
+
+ switch len(newKeys) {
+ case 0:
+ break
+ case 1:
+ n.keys = newKeys
+ case 2:
+ n.keys = newKeys[:1]
+ intermediateKeys = append(intermediateKeys, newKeys[1])
+ default:
+ ensure(false, fmt.Sprintf("update2Node: wrong number of newKeys=%d", len(newKeys)))
+ }
+ nodeA, nodeC := n.children[0], n.children[1]
+ if nodeA.isEmpty() {
+ if nodeC.isEmpty() {
+ /* A is empty, a_next is the "next key"; C is empty, c_next is the "next key" */
+ n.children = n.children[:0]
+ n.keys = n.keys[:0]
+ if nodeC.isLeaf {
+ return nodeC.nextKey(), intermediateKeys
+ }
+ return nextKey, intermediateKeys
+ } else {
+ /* A is empty, a_next is the "next key"; C is not empty */
+ n.children = n.children[1:]
+ /// n.keys = []*Felt{nodeC.lastLeaf().nextKey()}
+ if nodeA.isLeaf {
+ return nodeA.nextKey(), intermediateKeys
+ }
+ return nextKey, intermediateKeys
+ }
+ } else {
+ if nodeC.isEmpty() {
+ /* A is not empty; C is empty, c_next is the "next key" */
+ n.children = n.children[:1]
+ /// n.keys = []*Felt{nodeA.lastLeaf().nextKey()}
+ if nodeC.isLeaf {
+ nodeA.setNextKey(nodeC.nextKey(), stats)
+ }
+ return nextKey, intermediateKeys
+ } else {
+ /* A is not empty; C is not empty */
+ n.keys = []*Felt{nodeA.lastLeaf().nextKey()}
+ return nextKey, intermediateKeys
+ }
+ }
+}
+
+func update3Node(n *Node23, newKeys []*Felt, nextKey *Felt, intermediateKeys []*Felt, stats *Stats) (*Felt, []*Felt) {
+ ensure(len(n.children) == 3, "update3Node: wrong number of children")
+
+ switch len(newKeys) {
+ case 0:
+ break
+ case 1:
+ n.keys = newKeys
+ case 2:
+ n.keys = newKeys
+ case 3:
+ n.keys = newKeys[:2]
+ intermediateKeys = append(intermediateKeys, newKeys[2])
+ default:
+ ensure(false, fmt.Sprintf("update3Node: wrong number of newKeys=%d", len(newKeys)))
+ }
+ nodeA, nodeB, nodeC := n.children[0], n.children[1], n.children[2]
+ if nodeA.isEmpty() {
+ if nodeB.isEmpty() {
+ if nodeC.isEmpty() {
+ /* A is empty, a_next is the "next key"; B is empty, b_next is the "next key"; C is empty, c_next is the "next key" */
+ n.children = n.children[:0]
+ n.keys = n.keys[:0]
+ if nodeA.isLeaf {
+ return nodeC.nextKey(), intermediateKeys
+ }
+ return nextKey, intermediateKeys
+ } else {
+ /* A is empty, a_next is the "next key"; B is empty, b_next is the "next key"; C is not empty */
+ n.children = n.children[2:]
+ /// n.keys = []*Felt{nodeC.lastLeaf().nextKey()}
+ if nodeA.isLeaf {
+ return nodeB.nextKey(), intermediateKeys
+ }
+ return nextKey, intermediateKeys
+ }
+ } else {
+ if nodeC.isEmpty() {
+ /* A is empty, a_next is the "next key"; B is not empty; C is empty, c_next is the "next key" */
+ n.children = n.children[1:2]
+ /// n.keys = []*Felt{nodeB.lastLeaf().nextKey()}
+ if nodeA.isLeaf {
+ nodeB.setNextKey(nodeC.nextKey(), stats)
+ return nodeA.nextKey(), intermediateKeys
+ }
+ return nextKey, intermediateKeys
+ } else {
+ /* A is empty, a_next is the "next key"; B is not empty; C is not empty */
+ n.children = n.children[1:]
+ if nodeA.isLeaf {
+ n.keys = []*Felt{nodeB.nextKey()}
+ return nodeA.nextKey(), intermediateKeys
+ }
+ n.keys = []*Felt{nodeB.lastLeaf().nextKey()}
+ return nextKey, intermediateKeys
+ }
+ }
+ } else {
+ if nodeB.isEmpty() {
+ if nodeC.isEmpty() {
+ /* A is not empty; B is empty, b_next is the "next key"; C is empty, c_next is the "next key" */
+ n.children = n.children[:1]
+ if nodeA.isLeaf {
+ nodeA.setNextKey(nodeC.nextKey(), stats)
+ }
+ /// n.keys = []*Felt{nodeA.lastLeaf().nextKey()}
+ return nextKey, intermediateKeys
+ } else {
+ /* A is not empty; B is empty, b_next is the "next key"; C is not empty */
+ n.children = append(n.children[:1], n.children[2])
+ if nodeA.isLeaf {
+ n.keys = []*Felt{nodeB.nextKey()}
+ nodeA.setNextKey(nodeB.nextKey(), stats)
+ } else {
+ n.keys = []*Felt{nodeA.lastLeaf().nextKey()}
+ }
+ return nextKey, intermediateKeys
+ }
+ } else {
+ if nodeC.isEmpty() {
+ /* A is not empty; B is not empty; C is empty, c_next is the "next key" */
+ n.children = n.children[:2]
+ if nodeA.isLeaf {
+ n.keys = []*Felt{nodeA.nextKey()}
+ nodeB.setNextKey(nodeC.nextKey(), stats)
+ } else {
+ n.keys = []*Felt{nodeA.lastLeaf().nextKey()}
+ }
+ return nextKey, intermediateKeys
+ } else {
+ /* A is not empty; B is not empty; C is not empty */
+ ///n.keys = []*Felt{nodeA.lastLeaf().nextKey(), nodeB.lastLeaf().nextKey()}
+ return nextKey, intermediateKeys
+ }
+ }
+ }
+}
+
+func demote(node *Node23, nextKey *Felt, intermediateKeys []*Felt, stats *Stats) (*Node23, *Felt) {
+ if node == nil {
+ return nil, nextKey
+ } else if len(node.children) == 0 {
+ if len(node.keys) == 0 {
+ return nil, nextKey
+ } else {
+ return node, nextKey
+ }
+ } else if len(node.children) == 1 {
+ return demote(node.children[0], nextKey, intermediateKeys, stats)
+ } else if len(node.children) == 2 {
+ firstChild, secondChild := node.children[0], node.children[1]
+ if firstChild.keyCount() == 0 && secondChild.keyCount() == 0 {
+ return nil, nextKey
+ }
+ if firstChild.keyCount() == 0 && secondChild.keyCount() > 0 {
+ return secondChild, nextKey
+ }
+ if firstChild.keyCount() > 0 && secondChild.keyCount() == 0 {
+ return firstChild, nextKey
+ }
+ if firstChild.keyCount() == 2 && secondChild.keyCount() == 2 {
+ if firstChild.isLeaf {
+ keys := []*Felt{firstChild.firstKey(), secondChild.firstKey(), secondChild.nextKey()}
+ values := []*Felt{firstChild.firstValue(), secondChild.firstValue(), secondChild.nextValue()}
+ return makeLeafNode(keys, values, stats), nextKey
+ }
+ }
+ }
+ return node, nextKey
+}
diff --git a/erigon-lib/bptree/bulk_test.go b/erigon-lib/bptree/bulk_test.go
new file mode 100644
index 00000000000..adbefd0141c
--- /dev/null
+++ b/erigon-lib/bptree/bulk_test.go
@@ -0,0 +1,168 @@
+/*
+ Copyright 2022 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package bptree
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func assertNodeEqual(t *testing.T, expected, actual *Node23) {
+ t.Helper()
+ assert.Equal(t, expected.keysInLevelOrder(), actual.keysInLevelOrder(), "different keys by level")
+}
+
+type MergeTest struct {
+ left *Node23
+ right *Node23
+ final *Node23
+}
+
+func KV(keys []Felt, values []Felt) KeyValues {
+ keyPointers := make([]*Felt, len(keys))
+ valuePointers := make([]*Felt, len(values))
+ for i := 0; i < len(keyPointers); i++ {
+ keyPointers[i] = &keys[i]
+ valuePointers[i] = &values[i]
+ }
+ return KeyValues{keyPointers, valuePointers}
+}
+
+func K2K(keys []Felt) []*Felt {
+ kv := KV(keys, keys)
+ return kv.keys
+}
+
+func K2KV(keys []Felt) ([]*Felt, []*Felt) {
+ values := make([]Felt, len(keys))
+ copy(values, keys)
+ kv := KV(keys, values)
+ return kv.keys, kv.values
+}
+
+func newInternalNode(children []*Node23, keys []*Felt) *Node23 {
+ return makeInternalNode(children, keys, &Stats{})
+}
+
+func newLeafNode(keys, values []*Felt) *Node23 {
+ return makeLeafNode(keys, values, &Stats{})
+}
+
+var mergeLeft2RightTestTable = []MergeTest{
+ {
+ newInternalNode([]*Node23{
+ newLeafNode(K2KV([]Felt{12, 127})),
+ }, K2K([]Felt{127})),
+ newInternalNode([]*Node23{
+ newLeafNode(K2KV([]Felt{127, 128})),
+ newLeafNode(K2KV([]Felt{128, 135, 173})),
+ }, K2K([]Felt{128})),
+ newInternalNode([]*Node23{
+ newLeafNode(K2KV([]Felt{12, 127})),
+ newLeafNode(K2KV([]Felt{127, 128})),
+ newLeafNode(K2KV([]Felt{128, 135, 173})),
+ }, K2K([]Felt{127, 128})),
+ },
+ {
+ newInternalNode([]*Node23{
+ newInternalNode([]*Node23{
+ newLeafNode(K2KV([]Felt{12, 127})),
+ }, K2K([]Felt{127})),
+ }, K2K([]Felt{44})),
+ newInternalNode([]*Node23{
+ newInternalNode([]*Node23{
+ newLeafNode(K2KV([]Felt{127, 128})),
+ newLeafNode(K2KV([]Felt{128, 135, 173})),
+ }, K2K([]Felt{128})),
+ newInternalNode([]*Node23{
+ newLeafNode(K2KV([]Felt{173, 237})),
+ newLeafNode(K2KV([]Felt{237, 1000})),
+ }, K2K([]Felt{237})),
+ }, K2K([]Felt{173})),
+ newInternalNode([]*Node23{
+ newInternalNode([]*Node23{
+ newLeafNode(K2KV([]Felt{12, 127})),
+ newLeafNode(K2KV([]Felt{127, 128})),
+ newLeafNode(K2KV([]Felt{128, 135, 173})),
+ }, K2K([]Felt{127, 128})),
+ newInternalNode([]*Node23{
+ newLeafNode(K2KV([]Felt{173, 237})),
+ newLeafNode(K2KV([]Felt{237, 1000})),
+ }, K2K([]Felt{237})),
+ }, K2K([]Felt{173})),
+ },
+}
+
+var mergeRight2LeftTestTable = []MergeTest{
+ {
+ newInternalNode([]*Node23{
+ newLeafNode(K2KV([]Felt{127, 128})),
+ newLeafNode(K2KV([]Felt{128, 135, 173})),
+ }, K2K([]Felt{128})),
+ newInternalNode([]*Node23{
+ newLeafNode(K2KV([]Felt{173, 190})),
+ }, K2K([]Felt{190})),
+ newInternalNode([]*Node23{
+ newLeafNode(K2KV([]Felt{127, 128})),
+ newLeafNode(K2KV([]Felt{128, 135, 173})),
+ newLeafNode(K2KV([]Felt{173, 190})),
+ }, K2K([]Felt{128, 173})),
+ },
+ {
+ newInternalNode([]*Node23{
+ newInternalNode([]*Node23{
+ newLeafNode(K2KV([]Felt{127, 128})),
+ newLeafNode(K2KV([]Felt{128, 135, 173})),
+ }, K2K([]Felt{128})),
+ newInternalNode([]*Node23{
+ newLeafNode(K2KV([]Felt{173, 237})),
+ newLeafNode(K2KV([]Felt{237, 1000})),
+ }, K2K([]Felt{237})),
+ }, K2K([]Felt{173})),
+ newInternalNode([]*Node23{
+ newInternalNode([]*Node23{
+ newLeafNode(K2KV([]Felt{1000, 1002})),
+ }, K2K([]Felt{1002})),
+ }, K2K([]Felt{1100})),
+ newInternalNode([]*Node23{
+ newInternalNode([]*Node23{
+ newLeafNode(K2KV([]Felt{127, 128})),
+ newLeafNode(K2KV([]Felt{128, 135, 173})),
+ }, K2K([]Felt{128})),
+ newInternalNode([]*Node23{
+ newLeafNode(K2KV([]Felt{173, 237})),
+ newLeafNode(K2KV([]Felt{237, 1000})),
+ newLeafNode(K2KV([]Felt{1000, 1002})),
+ }, K2K([]Felt{237, 1000})),
+ }, K2K([]Felt{173})),
+ },
+}
+
+func TestMergeLeft2Right(t *testing.T) {
+ for _, data := range mergeLeft2RightTestTable {
+ _, merged := mergeLeft2Right(data.left, data.right, &Stats{})
+ assertNodeEqual(t, data.final, merged)
+ }
+}
+
+func TestMergeRight2Left(t *testing.T) {
+ for _, data := range mergeRight2LeftTestTable {
+ merged, _ := mergeRight2Left(data.left, data.right, &Stats{})
+ assertNodeEqual(t, data.final, merged)
+ }
+}
diff --git a/erigon-lib/bptree/felt.go b/erigon-lib/bptree/felt.go
new file mode 100644
index 00000000000..f9b1ee5e3d2
--- /dev/null
+++ b/erigon-lib/bptree/felt.go
@@ -0,0 +1,51 @@
+/*
+ Copyright 2022 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package bptree
+
+import (
+ "crypto/sha256"
+ "encoding/binary"
+)
+
+type Felt uint64
+
+func (v *Felt) Binary() []byte {
+ b := make([]byte, 8)
+ binary.BigEndian.PutUint64(b, uint64(*v))
+ return b
+}
+
+func hash2(bytes1, bytes2 []byte) []byte {
+ hashBuilder := sha256.New()
+ bytes1Written, _ := hashBuilder.Write(bytes1)
+ ensure(bytes1Written == len(bytes1), "hash2: invalid number of bytes1 written")
+ bytes2Written, _ := hashBuilder.Write(bytes2)
+ ensure(bytes2Written == len(bytes2), "hash2: invalid number of bytes2 written")
+ return hashBuilder.Sum(nil)
+}
+
+func deref(pointers []*Felt) []Felt {
+ pointees := make([]Felt, 0)
+ for _, ptr := range pointers {
+ if ptr != nil {
+ pointees = append(pointees, *ptr)
+ } else {
+ break
+ }
+ }
+ return pointees
+}
diff --git a/erigon-lib/bptree/graph.go b/erigon-lib/bptree/graph.go
new file mode 100644
index 00000000000..cf6032c8b43
--- /dev/null
+++ b/erigon-lib/bptree/graph.go
@@ -0,0 +1,167 @@
+/*
+ Copyright 2022 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package bptree
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "os/exec"
+ "strconv"
+)
+
+type Node23Graph struct {
+ node *Node23
+}
+
+func NewGraph(node *Node23) *Node23Graph {
+ return &Node23Graph{node}
+}
+
+func (g *Node23Graph) saveDot(filename string, debug bool) {
+ palette := []string{"#FDF3D0", "#DCE8FA", "#D9E7D6", "#F1CFCD", "#F5F5F5", "#E1D5E7", "#FFE6CC", "white"}
+ const unexposedIndex = 0
+ const exposedIndex = 1
+ const updatedIndex = 2
+
+ f, err := os.OpenFile(filename+".dot", os.O_RDWR|os.O_CREATE, 0755)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer func() {
+ if err := f.Close(); err != nil {
+ log.Fatal(err)
+ }
+ }()
+ if g.node == nil {
+ if _, err := f.WriteString("strict digraph {\nnode [shape=record];}\n"); err != nil {
+ log.Fatal(err)
+ }
+ return
+ }
+ if _, err := f.WriteString("strict digraph {\nnode [shape=record];\n"); err != nil {
+ log.Fatal(err)
+ }
+ for _, n := range g.node.walkNodesPostOrder() {
+ left, down, right := "", "", ""
+ switch n.childrenCount() {
+ case 1:
+ left = "L"
+ case 2:
+ left = "L"
+ right = "R"
+ case 3:
+ left = "L"
+ down = "D"
+ right = "R"
+ }
+ var nodeID string
+ if n.isLeaf {
+ var next string
+ if n.keyCount() > 0 {
+ if n.nextKey() == nil {
+ next = "nil"
+ } else {
+ next = strconv.FormatUint(uint64(*n.nextKey()), 10)
+ }
+ if debug {
+ nodeID = fmt.Sprintf("k=%v %s-%v", deref(n.keys[:len(n.keys)-1]), next, n.keys)
+ } else {
+ nodeID = fmt.Sprintf("k=%v %s", deref(n.keys[:len(n.keys)-1]), next)
+ }
+ } else {
+ nodeID = "k=[]"
+ }
+ } else {
+ if debug {
+ nodeID = fmt.Sprintf("k=%v-%v", deref(n.keys), n.keys)
+ } else {
+ nodeID = fmt.Sprintf("k=%v", deref(n.keys))
+ }
+ }
+ var color string
+ if n.exposed {
+ if n.updated {
+ color = palette[updatedIndex]
+ } else {
+ color = palette[exposedIndex]
+ }
+ } else {
+ ensure(!n.updated, fmt.Sprintf("saveDot: node %v is not exposed but updated", n))
+ color = palette[unexposedIndex]
+ }
+ s := fmt.Sprintf("%d [label=\"%s|{%s|%s}|%s\" style=filled fillcolor=\"%s\"];\n", n.rawPointer(), left, nodeID, down, right, color)
+ if _, err := f.WriteString(s); err != nil {
+ log.Fatal(err)
+ }
+ }
+ for _, n := range g.node.walkNodesPostOrder() {
+ var treeLeft, treeDown, treeRight *Node23
+ switch n.childrenCount() {
+ case 1:
+ treeLeft = n.children[0]
+ case 2:
+ treeLeft = n.children[0]
+ treeRight = n.children[1]
+ case 3:
+ treeLeft = n.children[0]
+ treeDown = n.children[1]
+ treeRight = n.children[2]
+ }
+ if treeLeft != nil {
+ //if _, err := f.WriteString(fmt.Sprintln(n.rawPointer(), ":L -> ", treeLeft.rawPointer(), ":C;")); err != nil {
+ if _, err := f.WriteString(fmt.Sprintf("%d:L -> %d:C;\n", n.rawPointer(), treeLeft.rawPointer())); err != nil {
+ log.Fatal(err)
+ }
+ }
+ if treeDown != nil {
+ //if _, err := f.WriteString(fmt.Sprintln(n.rawPointer(), ":D -> ", treeDown.rawPointer(), ":C;")); err != nil {
+ if _, err := f.WriteString(fmt.Sprintf("%d:D -> %d:C;\n", n.rawPointer(), treeDown.rawPointer())); err != nil {
+ log.Fatal(err)
+ }
+ }
+ if treeRight != nil {
+ //if _, err := f.WriteString(fmt.Sprintln(n.rawPointer(), ":R -> ", treeRight.rawPointer(), ":C;")); err != nil {
+ if _, err := f.WriteString(fmt.Sprintf("%d:R -> %d:C;\n", n.rawPointer(), treeRight.rawPointer())); err != nil {
+ log.Fatal(err)
+ }
+ }
+ }
+ if _, err := f.WriteString("}\n"); err != nil {
+ log.Fatal(err)
+ }
+}
+
+func (g *Node23Graph) saveDotAndPicture(filename string, debug bool) error {
+ graphDir := "testdata/graph/"
+ _ = os.MkdirAll(graphDir, os.ModePerm)
+ filepath := graphDir + filename
+ _ = os.Remove(filepath + ".dot")
+ _ = os.Remove(filepath + ".png")
+ g.saveDot(filepath, debug)
+ dotExecutable, _ := exec.LookPath("dot")
+ cmdDot := &exec.Cmd{
+ Path: dotExecutable,
+ Args: []string{dotExecutable, "-Tpng", filepath + ".dot", "-o", filepath + ".png"},
+ Stdout: os.Stdout,
+ Stderr: os.Stderr,
+ }
+ if err := cmdDot.Run(); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/erigon-lib/bptree/key_factory.go b/erigon-lib/bptree/key_factory.go
new file mode 100644
index 00000000000..6b96ab8734d
--- /dev/null
+++ b/erigon-lib/bptree/key_factory.go
@@ -0,0 +1,115 @@
+/*
+ Copyright 2022 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package bptree
+
+import (
+ "bufio"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "sort"
+)
+
+type KeyFactory interface {
+ NewUniqueKeyValues(reader *bufio.Reader) KeyValues
+ NewUniqueKeys(reader *bufio.Reader) Keys
+}
+
+type KeyBinaryFactory struct {
+ keySize int
+}
+
+func NewKeyBinaryFactory(keySize int) KeyFactory {
+ return &KeyBinaryFactory{keySize: keySize}
+}
+
+func (factory *KeyBinaryFactory) NewUniqueKeyValues(reader *bufio.Reader) KeyValues {
+ kvPairs := factory.readUniqueKeyValues(reader)
+ sort.Sort(kvPairs)
+ return kvPairs
+}
+
+func (factory *KeyBinaryFactory) NewUniqueKeys(reader *bufio.Reader) Keys {
+ keys := factory.readUniqueKeys(reader)
+ sort.Sort(keys)
+ return keys
+}
+
+func (factory *KeyBinaryFactory) readUniqueKeyValues(reader *bufio.Reader) KeyValues {
+ kvPairs := KeyValues{make([]*Felt, 0), make([]*Felt, 0)}
+ keyRegistry := make(map[Felt]bool)
+ buffer := make([]byte, BufferSize)
+ for {
+ bytesRead, err := reader.Read(buffer)
+ ensure(err == nil || err == io.EOF, fmt.Sprintf("readUniqueKeyValues: read error %s\n", err))
+ if err == io.EOF {
+ break
+ }
+ keyBytesCount := factory.keySize * (bytesRead / factory.keySize)
+ duplicatedKeys := 0
+ for i := 0; i < keyBytesCount; i += factory.keySize {
+ key := factory.readKey(buffer, i)
+ if _, duplicated := keyRegistry[key]; duplicated {
+ duplicatedKeys++
+ continue
+ }
+ keyRegistry[key] = true
+ value := key // Shortcut: value equal to key
+ kvPairs.keys = append(kvPairs.keys, &key)
+ kvPairs.values = append(kvPairs.values, &value)
+ }
+ }
+ return kvPairs
+}
+
+func (factory *KeyBinaryFactory) readUniqueKeys(reader *bufio.Reader) Keys {
+ keys := make(Keys, 0)
+ keyRegistry := make(map[Felt]bool)
+ buffer := make([]byte, BufferSize)
+ for {
+ bytesRead, err := reader.Read(buffer)
+ if err == io.EOF {
+ break
+ }
+ keyBytesCount := factory.keySize * (bytesRead / factory.keySize)
+ duplicatedKeys := 0
+ for i := 0; i < keyBytesCount; i += factory.keySize {
+ key := factory.readKey(buffer, i)
+ if _, duplicated := keyRegistry[key]; duplicated {
+ duplicatedKeys++
+ continue
+ }
+ keyRegistry[key] = true
+ keys = append(keys, key)
+ }
+ }
+ return keys
+}
+
+func (factory *KeyBinaryFactory) readKey(buffer []byte, offset int) Felt {
+ keySlice := buffer[offset : offset+factory.keySize]
+ switch factory.keySize {
+ case 1:
+ return Felt(keySlice[0])
+ case 2:
+ return Felt(binary.BigEndian.Uint16(keySlice))
+ case 4:
+ return Felt(binary.BigEndian.Uint32(keySlice))
+ default:
+ return Felt(binary.BigEndian.Uint64(keySlice))
+ }
+}
diff --git a/erigon-lib/bptree/node.go b/erigon-lib/bptree/node.go
new file mode 100644
index 00000000000..0b579db853a
--- /dev/null
+++ b/erigon-lib/bptree/node.go
@@ -0,0 +1,481 @@
+/*
+ Copyright 2022 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package bptree
+
+import (
+ "fmt"
+ "strings"
+ "unsafe"
+)
+
+type Keys []Felt
+
+func (keys Keys) Len() int { return len(keys) }
+
+func (keys Keys) Less(i, j int) bool { return keys[i] < keys[j] }
+
+func (keys Keys) Swap(i, j int) { keys[i], keys[j] = keys[j], keys[i] }
+
+func (keys Keys) Contains(key Felt) bool {
+ for _, k := range keys {
+ if k == key {
+ return true
+ }
+ }
+ return false
+}
+
+func (keys Keys) String() string {
+ b := strings.Builder{}
+ for i, k := range keys {
+ fmt.Fprintf(&b, "%v", k)
+ if i != len(keys)-1 {
+ fmt.Fprintf(&b, " ")
+ }
+ }
+ return b.String()
+}
+
+type KeyValues struct {
+ keys []*Felt
+ values []*Felt
+}
+
+func (kv KeyValues) Len() int { return len(kv.keys) }
+
+func (kv KeyValues) Less(i, j int) bool { return *kv.keys[i] < *kv.keys[j] }
+
+func (kv KeyValues) Swap(i, j int) {
+ kv.keys[i], kv.keys[j] = kv.keys[j], kv.keys[i]
+ kv.values[i], kv.values[j] = kv.values[j], kv.values[i]
+}
+
+func (kv KeyValues) String() string {
+ b := strings.Builder{}
+ for i, k := range kv.keys {
+ v := kv.values[i]
+ fmt.Fprintf(&b, "{%v, %v}", *k, *v)
+ if i != len(kv.keys)-1 {
+ fmt.Fprintf(&b, " ")
+ }
+ }
+ return b.String()
+}
+
+type Node23 struct {
+ children []*Node23
+ keys []*Felt
+ values []*Felt
+ isLeaf bool
+ exposed bool
+ updated bool
+}
+
+func (n *Node23) String() string {
+ s := fmt.Sprintf("{%p isLeaf=%t keys=%v-%v children=[", n, n.isLeaf, deref(n.keys), n.keys)
+ for i, child := range n.children {
+ s += fmt.Sprintf("%p", child)
+ if i != len(n.children)-1 {
+ s += " "
+ }
+ }
+ s += "]}"
+ return s
+}
+
+func makeInternalNode(children []*Node23, keys []*Felt, stats *Stats) *Node23 {
+ stats.CreatedCount++
+ n := &Node23{isLeaf: false, children: children, keys: keys, values: make([]*Felt, 0), exposed: true, updated: true}
+ return n
+}
+
+func makeLeafNode(keys, values []*Felt, stats *Stats) *Node23 {
+ ensure(len(keys) > 0, "number of keys is zero")
+ ensure(len(keys) == len(values), "keys and values have different cardinality")
+ stats.CreatedCount++
+ n := &Node23{isLeaf: true, children: make([]*Node23, 0), keys: keys, values: values, exposed: true, updated: true}
+ return n
+}
+
+func makeEmptyLeafNode() *Node23 {
+ // At least nil next key is always present
+ return makeLeafNode(make([]*Felt, 1), make([]*Felt, 1), &Stats{}) // do not count it into stats
+}
+
+func promote(nodes []*Node23, intermediateKeys []*Felt, stats *Stats) *Node23 {
+ if len(nodes) > 3 {
+ promotedNodes := make([]*Node23, 0)
+ promotedKeys := make([]*Felt, 0)
+ for len(nodes) > 3 {
+ promotedNodes = append(promotedNodes, makeInternalNode(nodes[:2], intermediateKeys[:1], stats))
+ nodes = nodes[2:]
+ promotedKeys = append(promotedKeys, intermediateKeys[1])
+ intermediateKeys = intermediateKeys[2:]
+ }
+ promotedNodes = append(promotedNodes, makeInternalNode(nodes, intermediateKeys, stats))
+ return promote(promotedNodes, promotedKeys, stats)
+ }
+ promotedRoot := makeInternalNode(nodes, intermediateKeys, stats)
+ return promotedRoot
+}
+
+func (n *Node23) reset() {
+ n.exposed = false
+ n.updated = false
+ if !n.isLeaf {
+ for _, child := range n.children {
+ child.reset()
+ }
+ }
+}
+
+func (n *Node23) isValid() (bool, error) {
+ ensure(n.exposed || !n.updated, "isValid: node is not exposed but updated")
+ if n.isLeaf {
+ return n.isValidLeaf()
+ }
+ return n.isValidInternal()
+}
+
+func (n *Node23) isValidLeaf() (bool, error) {
+ ensure(n.isLeaf, "isValidLeaf: node is not leaf")
+
+ /* Any leaf node shall have no children */
+ if n.childrenCount() != 0 {
+ return false, fmt.Errorf("invalid %d children in %v", n.childrenCount(), n)
+ }
+ /* Any leaf node can have either 1 or 2 keys (plus next key) */
+ return n.keyCount() == 1+1 || n.keyCount() == 2+1, fmt.Errorf("invalid %d keys in %v", n.keyCount(), n)
+}
+
+func (n *Node23) isValidInternal() (bool, error) {
+ ensure(!n.isLeaf, "isValidInternal: node is leaf")
+
+ /* Any internal node can have either 1 keys and 2 children or 2 keys and 3 children */
+ if n.keyCount() != 1 && n.keyCount() != 2 {
+ return false, fmt.Errorf("invalid %d keys in %v", n.keyCount(), n)
+ }
+ if n.keyCount() == 1 && n.childrenCount() != 2 {
+ return false, fmt.Errorf("invalid %d keys %d children in %v", n.keyCount(), n.childrenCount(), n)
+ }
+ if n.keyCount() == 2 && n.childrenCount() != 3 {
+ return false, fmt.Errorf("invalid %d children in %v", n.keyCount(), n)
+ }
+ subtree := n.walkNodesPostOrder()
+ // Check that each internal node has unique keys corresponding to leaf next keys
+ for _, key := range n.keys {
+ hasNextKey := false
+ for _, node := range subtree {
+ if !node.isLeaf {
+ if node != n && node.hasKey(key) {
+ return false, fmt.Errorf("internal key %d not unique", *key)
+ }
+ continue
+ }
+ leafNextKey := node.nextKey()
+ if leafNextKey != nil && *key == *leafNextKey {
+ hasNextKey = true
+ }
+ }
+ if !hasNextKey {
+ return false, fmt.Errorf("internal key %d not present in next keys", *key)
+ }
+ }
+ // Check that leaves in subtree are chained together (next key -> first key)
+ for i, node := range subtree {
+ if !node.isLeaf {
+ // Post-order walk => previous and next nodes are contiguous leaves except last
+ if i == len(subtree)-1 {
+ continue
+ }
+ previous, next := subtree[i], subtree[i+1]
+ if previous.isLeaf && next.isLeaf {
+ // Previous node's next key must be equal to next node's first key
+ if previous.nextKey() != next.firstKey() {
+ return false, fmt.Errorf("nodes %v and %v not chained by next key", previous, next)
+ }
+ }
+ continue
+ }
+ }
+ for i := len(n.children) - 1; i >= 0; i-- {
+ child := n.children[i]
+ // Check that each child subtree is a 2-3 tree
+ childValid, err := child.isValid()
+ if !childValid {
+ return false, fmt.Errorf("invalid child %v in %v, error: %w", child, n, err)
+ }
+ }
+ return true, nil
+}
+
+func (n *Node23) keyCount() int {
+ return len(n.keys)
+}
+
+func (n *Node23) childrenCount() int {
+ return len(n.children)
+}
+
+func (n *Node23) valueCount() int {
+ return len(n.values)
+}
+
+func (n *Node23) firstKey() *Felt {
+ ensure(len(n.keys) > 0, "firstKey: node has no key")
+ return n.keys[0]
+}
+
+func (n *Node23) firstValue() *Felt {
+ ensure(len(n.values) > 0, "firstValue: node has no value")
+ return n.values[0]
+}
+
+func (n *Node23) firstChild() *Node23 {
+ ensure(len(n.children) > 0, "firstChild: node has no children")
+ return n.children[0]
+}
+
+func (n *Node23) firstLeaf() *Node23 {
+ if n.isLeaf {
+ return n
+ }
+ firstLeaf := n.firstChild()
+ for !firstLeaf.isLeaf {
+ firstLeaf = firstLeaf.firstChild()
+ }
+ ensure(firstLeaf.isLeaf, "firstLeaf: last is not leaf")
+ return firstLeaf
+}
+
+func (n *Node23) lastChild() *Node23 {
+ ensure(len(n.children) > 0, "lastChild: node has no children")
+ return n.children[len(n.children)-1]
+}
+
+func (n *Node23) lastLeaf() *Node23 {
+ if n.isLeaf {
+ return n
+ }
+ lastLeaf := n.lastChild()
+ for !lastLeaf.isLeaf {
+ lastLeaf = lastLeaf.lastChild()
+ }
+ ensure(lastLeaf.isLeaf, "lastLeaf: last is not leaf")
+ return lastLeaf
+}
+
+func (n *Node23) nextKey() *Felt {
+ ensure(len(n.keys) > 0, "nextKey: node has no key")
+ return n.keys[len(n.keys)-1]
+}
+
+func (n *Node23) nextValue() *Felt {
+ ensure(len(n.values) > 0, "nextValue: node has no value")
+ return n.values[len(n.values)-1]
+}
+
+func (n *Node23) rawPointer() uintptr {
+ return uintptr(unsafe.Pointer(n))
+}
+
+func (n *Node23) setNextKey(nextKey *Felt, stats *Stats) {
+ ensure(len(n.keys) > 0, "setNextKey: node has no key")
+ n.keys[len(n.keys)-1] = nextKey
+ if !n.exposed {
+ n.exposed = true
+ stats.ExposedCount++
+ stats.OpeningHashes += n.howManyHashes()
+ }
+ n.updated = true
+ stats.UpdatedCount++
+}
+
+func (n *Node23) canonicalKeys() []Felt {
+ if n.isLeaf {
+ ensure(len(n.keys) > 0, "canonicalKeys: node has no key")
+ return deref(n.keys[:len(n.keys)-1])
+ } else {
+ return deref(n.keys)
+ }
+}
+
+func (n *Node23) hasKey(targetKey *Felt) bool {
+ var keys []*Felt
+ if n.isLeaf {
+ ensure(len(n.keys) > 0, "hasKey: node has no key")
+ keys = n.keys[:len(n.keys)-1]
+ } else {
+ keys = n.keys
+ }
+ for _, key := range keys {
+ if *key == *targetKey {
+ return true
+ }
+ }
+ return false
+}
+
+func (n *Node23) isEmpty() bool {
+ if n.isLeaf {
+ // At least next key is always present
+ return n.keyCount() == 1
+ } else {
+ return n.childrenCount() == 0
+ }
+}
+
+func (n *Node23) height() int {
+ if n.isLeaf {
+ return 1
+ } else {
+ ensure(len(n.children) > 0, "height: internal node has zero children")
+ return n.children[0].height() + 1
+ }
+}
+
+func (n *Node23) keysInLevelOrder() []Felt {
+ keysByLevel := make([]Felt, 0)
+ for i := 0; i < n.height(); i++ {
+ keysByLevel = append(keysByLevel, n.keysByLevel(i)...)
+ }
+ return keysByLevel
+}
+
+func (n *Node23) keysByLevel(level int) []Felt {
+ if level == 0 {
+ return n.canonicalKeys()
+ } else {
+ levelKeys := make([]Felt, 0)
+ for _, child := range n.children {
+ childLevelKeys := child.keysByLevel(level - 1)
+ levelKeys = append(levelKeys, childLevelKeys...)
+ }
+ return levelKeys
+ }
+}
+
+type Walker func(*Node23) interface{}
+
+func (n *Node23) walkPostOrder(w Walker) []interface{} {
+ items := make([]interface{}, 0)
+ if !n.isLeaf {
+ for _, child := range n.children {
+ childItems := child.walkPostOrder(w)
+ items = append(items, childItems...)
+ }
+ }
+ items = append(items, w(n))
+ return items
+}
+
+func (n *Node23) walkNodesPostOrder() []*Node23 {
+ nodeItems := n.walkPostOrder(func(n *Node23) interface{} { return n })
+ nodes := make([]*Node23, len(nodeItems))
+ for i := range nodeItems {
+ nodes[i] = nodeItems[i].(*Node23)
+ }
+ return nodes
+}
+
+func (n *Node23) howManyHashes() uint {
+ if n.isLeaf {
+ // all leaves except last one: 2 or 3 keys + 1 or 2 values => 3 or 5 data => 2 or 4 hashes
+ // last leaf: 1 or 2 keys + 1 or 2 values => 2 or 4 data => 1 or 3 hashes
+ switch n.keyCount() {
+ case 2:
+ nextKey := n.keys[1]
+ if nextKey == nil {
+ return 1
+ } else {
+ return 2
+ }
+ case 3:
+ nextKey := n.keys[2]
+ if nextKey == nil {
+ return 3
+ } else {
+ return 4
+ }
+ default:
+ ensure(false, fmt.Sprintf("howManyHashes: unexpected keyCount=%d\n", n.keyCount()))
+ return 0
+ }
+ } else {
+ // internal node: 2 or 3 children => 1 or 2 hashes
+ switch n.childrenCount() {
+ case 2:
+ return 1
+ case 3:
+ return 2
+ default:
+ ensure(false, fmt.Sprintf("howManyHashes: unexpected childrenCount=%d\n", n.childrenCount()))
+ return 0
+ }
+ }
+}
+
+func (n *Node23) hashNode() []byte {
+ if n.isLeaf {
+ return n.hashLeaf()
+ } else {
+ return n.hashInternal()
+ }
+}
+
+func (n *Node23) hashLeaf() []byte {
+ ensure(n.isLeaf, "hashLeaf: node is not leaf")
+ ensure(n.valueCount() == n.keyCount(), "hashLeaf: insufficient number of values")
+ switch n.keyCount() {
+ case 2:
+ k, nextKey, v := *n.keys[0], n.keys[1], *n.values[0]
+ h := hash2(k.Binary(), v.Binary())
+ if nextKey == nil {
+ return h
+ } else {
+ return hash2(h, (*nextKey).Binary())
+ }
+ case 3:
+ k1, k2, nextKey, v1, v2 := *n.keys[0], *n.keys[1], n.keys[2], *n.values[0], *n.values[1]
+ h1 := hash2(k1.Binary(), v1.Binary())
+ h2 := hash2(k2.Binary(), v2.Binary())
+ h12 := hash2(h1, h2)
+ if nextKey == nil {
+ return h12
+ } else {
+ return hash2(h12, (*nextKey).Binary())
+ }
+ default:
+ ensure(false, fmt.Sprintf("hashLeaf: unexpected keyCount=%d\n", n.keyCount()))
+ return []byte{}
+ }
+}
+
+func (n *Node23) hashInternal() []byte {
+ ensure(!n.isLeaf, "hashInternal: node is not internal")
+ switch n.childrenCount() {
+ case 2:
+ child1, child2 := n.children[0], n.children[1]
+ return hash2(child1.hashNode(), child2.hashNode())
+ case 3:
+ child1, child2, child3 := n.children[0], n.children[1], n.children[2]
+ return hash2(hash2(child1.hashNode(), child2.hashNode()), child3.hashNode())
+ default:
+ ensure(false, fmt.Sprintf("hashInternal: unexpected childrenCount=%d\n", n.childrenCount()))
+ return []byte{}
+ }
+}
diff --git a/erigon-lib/bptree/tree.go b/erigon-lib/bptree/tree.go
new file mode 100644
index 00000000000..3acb5a9fbcc
--- /dev/null
+++ b/erigon-lib/bptree/tree.go
@@ -0,0 +1,177 @@
+/*
+ Copyright 2022 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package bptree
+
+import (
+ "fmt"
+)
+
+type Stats struct {
+ ExposedCount uint
+ RehashedCount uint
+ CreatedCount uint
+ UpdatedCount uint
+ DeletedCount uint
+ OpeningHashes uint
+ ClosingHashes uint
+}
+
+type Tree23 struct {
+ root *Node23
+}
+
+func NewEmptyTree23() *Tree23 {
+ return &Tree23{}
+}
+
+func NewTree23(kvItems KeyValues) *Tree23 {
+ tree := new(Tree23).Upsert(kvItems)
+ tree.reset()
+ return tree
+}
+
+func (t *Tree23) String() string {
+ return fmt.Sprintf("root={keys=%v #children=%d} size=%d", deref(t.root.keys), t.root.childrenCount(), t.Size())
+}
+
+func (t *Tree23) Size() int {
+ count := 0
+ t.WalkPostOrder(func(n *Node23) interface{} { count++; return nil })
+ return count
+}
+
+func (t *Tree23) RootHash() []byte {
+ if t.root == nil {
+ return []byte{}
+ }
+ return t.root.hashNode()
+}
+
+func (t *Tree23) IsValid() (bool, error) {
+ if t.root == nil {
+ return true, nil
+ }
+ // Last leaf must have sentinel next key
+ if lastLeaf := t.root.lastLeaf(); lastLeaf.keyCount() > 0 && lastLeaf.nextKey() != nil {
+ return false, fmt.Errorf("no sentinel next key in last leaf %d", &lastLeaf)
+ }
+ return t.root.isValid()
+}
+
+func (t *Tree23) Graph(filename string, debug bool) {
+ graph := NewGraph(t.root)
+ graph.saveDot(filename, debug)
+}
+
+func (t *Tree23) GraphAndPicture(filename string) error {
+ graph := NewGraph(t.root)
+ return graph.saveDotAndPicture(filename, false)
+}
+
+func (t *Tree23) GraphAndPictureDebug(filename string) error {
+ graph := NewGraph(t.root)
+ return graph.saveDotAndPicture(filename, true)
+}
+
+func (t *Tree23) Height() int {
+ if t.root == nil {
+ return 0
+ }
+ return t.root.height()
+}
+
+func (t *Tree23) KeysInLevelOrder() []Felt {
+ if t.root == nil {
+ return []Felt{}
+ }
+ return t.root.keysInLevelOrder()
+}
+
+func (t *Tree23) WalkPostOrder(w Walker) []interface{} {
+ if t.root == nil {
+ return make([]interface{}, 0)
+ }
+ return t.root.walkPostOrder(w)
+}
+
+func (t *Tree23) WalkKeysPostOrder() []Felt {
+ keyPointers := make([]*Felt, 0)
+ t.WalkPostOrder(func(n *Node23) interface{} {
+ if n.isLeaf && n.keyCount() > 0 {
+ keyPointers = append(keyPointers, n.keys[:len(n.keys)-1]...)
+ }
+ return nil
+ })
+ keys := deref(keyPointers)
+ return keys
+}
+
+func (t *Tree23) Upsert(kvItems KeyValues) *Tree23 {
+ return t.UpsertWithStats(kvItems, &Stats{})
+}
+
+func (t *Tree23) UpsertWithStats(kvItems KeyValues, stats *Stats) *Tree23 {
+ promoted, _, intermediateKeys := upsert(t.root, kvItems, stats)
+ ensure(len(promoted) > 0, "nodes length is zero")
+ if len(promoted) == 1 {
+ t.root = promoted[0]
+ } else {
+ t.root = promote(promoted, intermediateKeys, stats)
+ }
+ stats.RehashedCount, stats.ClosingHashes = t.countUpsertRehashedNodes()
+ return t
+}
+
+func (t *Tree23) Delete(keyToDelete []Felt) *Tree23 {
+ return t.DeleteWithStats(keyToDelete, &Stats{})
+}
+
+func (t *Tree23) DeleteWithStats(keysToDelete []Felt, stats *Stats) *Tree23 {
+ newRoot, nextKey, intermediateKeys := del(t.root, keysToDelete, stats)
+ t.root, _ = demote(newRoot, nextKey, intermediateKeys, stats)
+ stats.RehashedCount, stats.ClosingHashes = t.countDeleteRehashedNodes()
+ return t
+}
+
+func (t *Tree23) countUpsertRehashedNodes() (rehashedCount uint, closingHashes uint) {
+ t.WalkPostOrder(func(n *Node23) interface{} {
+ if n.exposed {
+ rehashedCount++
+ closingHashes += n.howManyHashes()
+ }
+ return nil
+ })
+ return rehashedCount, closingHashes
+}
+
+func (t *Tree23) countDeleteRehashedNodes() (rehashedCount uint, closingHashes uint) {
+ t.WalkPostOrder(func(n *Node23) interface{} {
+ if n.updated {
+ rehashedCount++
+ closingHashes += n.howManyHashes()
+ }
+ return nil
+ })
+ return rehashedCount, closingHashes
+}
+
+func (t *Tree23) reset() {
+ if t.root == nil {
+ return
+ }
+ t.root.reset()
+}
diff --git a/erigon-lib/bptree/tree_test.go b/erigon-lib/bptree/tree_test.go
new file mode 100644
index 00000000000..6d0bc52a4cf
--- /dev/null
+++ b/erigon-lib/bptree/tree_test.go
@@ -0,0 +1,427 @@
+/*
+ Copyright 2022 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package bptree
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/hex"
+ "sort"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func assertTwoThreeTree(t *testing.T, tree *Tree23, expectedKeysLevelOrder []Felt) {
+ t.Helper()
+ treeValid, err := tree.IsValid()
+ assert.True(t, treeValid, "2-3-tree properties do not hold for tree: %v, error: %v", tree.KeysInLevelOrder(), err)
+ if expectedKeysLevelOrder != nil {
+ assert.Equal(t, expectedKeysLevelOrder, tree.KeysInLevelOrder(), "different keys by level")
+ }
+}
+
+func require23Tree(t *testing.T, tree *Tree23, expectedKeysLevelOrder []Felt, input1, input2 []byte) {
+ t.Helper()
+ treeValid, err := tree.IsValid()
+ require.True(t, treeValid, "2-3-tree properties do not hold: input [%v %v] [%+q %+q], error: %v",
+ input1, input2, string(input1), string(input2), err)
+ if expectedKeysLevelOrder != nil {
+ assert.Equal(t, expectedKeysLevelOrder, tree.KeysInLevelOrder(), "different keys by level")
+ }
+}
+
+type HeightTest struct {
+ initialItems KeyValues
+ expectedHeight int
+}
+
+type IsTree23Test struct {
+ initialItems KeyValues
+ expectedKeysLevelOrder []Felt
+}
+
+type RootHashTest struct {
+ expectedHash string
+ initialItems KeyValues
+}
+
+type UpsertTest struct {
+ initialItems KeyValues
+ initialKeysLevelOrder []Felt
+ deltaItems KeyValues
+ finalKeysLevelOrder []Felt
+}
+
+type DeleteTest struct {
+ initialItems KeyValues
+ initialKeysLevelOrder []Felt
+ keysToDelete []Felt
+ finalKeysLevelOrder []Felt
+}
+
+func K(keys []Felt) KeyValues {
+ values := make([]Felt, len(keys))
+ copy(values, keys)
+ return KV(keys, values)
+}
+
+var heightTestTable = []HeightTest{
+ {K([]Felt{}), 0},
+ {K([]Felt{1}), 1},
+ {K([]Felt{1, 2}), 1},
+ {K([]Felt{1, 2, 3}), 2},
+ {K([]Felt{1, 2, 3, 4}), 2},
+ {K([]Felt{1, 2, 3, 4, 5}), 2},
+ {K([]Felt{1, 2, 3, 4, 5, 6}), 2},
+ {K([]Felt{1, 2, 3, 4, 5, 6, 7}), 3},
+ {K([]Felt{1, 2, 3, 4, 5, 6, 7, 8}), 3},
+}
+
+var isTree23TestTable = []IsTree23Test{
+ {K([]Felt{}), []Felt{}},
+ {K([]Felt{1}), []Felt{1}},
+ {K([]Felt{1, 2}), []Felt{1, 2}},
+ {K([]Felt{1, 2, 3}), []Felt{3, 1, 2, 3}},
+ {K([]Felt{1, 2, 3, 4}), []Felt{3, 1, 2, 3, 4}},
+ {K([]Felt{1, 2, 3, 4, 5}), []Felt{3, 5, 1, 2, 3, 4, 5}},
+ {K([]Felt{1, 2, 3, 4, 5, 6}), []Felt{3, 5, 1, 2, 3, 4, 5, 6}},
+ {K([]Felt{1, 2, 3, 4, 5, 6, 7}), []Felt{5, 3, 7, 1, 2, 3, 4, 5, 6, 7}},
+ {K([]Felt{1, 2, 3, 4, 5, 6, 7, 8}), []Felt{5, 3, 7, 1, 2, 3, 4, 5, 6, 7, 8}},
+ {K([]Felt{1, 2, 3, 4, 5, 6, 7, 8, 9}), []Felt{5, 3, 7, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9}},
+ {K([]Felt{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}), []Felt{5, 3, 7, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}},
+ {K([]Felt{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}), []Felt{5, 9, 3, 7, 11, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}},
+ {K([]Felt{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}), []Felt{5, 9, 3, 7, 11, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}},
+ {K([]Felt{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17}), []Felt{9, 5, 13, 3, 7, 11, 15, 17, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17}},
+ {K([]Felt{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}), []Felt{9, 5, 13, 3, 7, 11, 15, 17, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}},
+}
+
+var rootHashTestTable = []RootHashTest{
+ {"", K([]Felt{})},
+ {"532deabf88729cb43995ab5a9cd49bf9b90a079904dc0645ecda9e47ce7345a9", K([]Felt{1})},
+ {"d3782c59c224da5b6344108ef3431ba4e01d2c30b6570137a91b8b383908c361", K([]Felt{1, 2})},
+}
+
+var insertTestTable = []UpsertTest{
+ {K([]Felt{}), []Felt{}, K([]Felt{1}), []Felt{1}},
+ {K([]Felt{}), []Felt{}, K([]Felt{1, 2}), []Felt{1, 2}},
+ {K([]Felt{}), []Felt{}, K([]Felt{1, 2, 3}), []Felt{3, 1, 2, 3}},
+ {K([]Felt{}), []Felt{}, K([]Felt{1, 2, 3, 4}), []Felt{3, 1, 2, 3, 4}},
+
+ {K([]Felt{1}), []Felt{1}, K([]Felt{0}), []Felt{0, 1}},
+ {K([]Felt{1}), []Felt{1}, K([]Felt{2}), []Felt{1, 2}},
+ {K([]Felt{1}), []Felt{1}, K([]Felt{0, 2}), []Felt{2, 0, 1, 2}},
+ {K([]Felt{1}), []Felt{1}, K([]Felt{0, 2, 3}), []Felt{2, 0, 1, 2, 3}},
+ {K([]Felt{1}), []Felt{1}, K([]Felt{0, 2, 3, 4}), []Felt{2, 4, 0, 1, 2, 3, 4}},
+ {K([]Felt{2}), []Felt{2}, K([]Felt{0, 1, 3, 4}), []Felt{2, 4, 0, 1, 2, 3, 4}},
+ {K([]Felt{3}), []Felt{3}, K([]Felt{0, 1, 2, 4}), []Felt{2, 4, 0, 1, 2, 3, 4}},
+ {K([]Felt{4}), []Felt{4}, K([]Felt{0, 1, 2, 3}), []Felt{2, 4, 0, 1, 2, 3, 4}},
+
+ {K([]Felt{1, 2}), []Felt{1, 2}, K([]Felt{0}), []Felt{2, 0, 1, 2}},
+ {K([]Felt{1, 2}), []Felt{1, 2}, K([]Felt{0, 3}), []Felt{2, 0, 1, 2, 3}},
+ {K([]Felt{1, 2}), []Felt{1, 2}, K([]Felt{0, 3, 4}), []Felt{2, 4, 0, 1, 2, 3, 4}},
+ {K([]Felt{1, 2}), []Felt{1, 2}, K([]Felt{0, 3, 4, 5}), []Felt{2, 4, 0, 1, 2, 3, 4, 5}},
+ {K([]Felt{2, 3}), []Felt{2, 3}, K([]Felt{0}), []Felt{3, 0, 2, 3}},
+ {K([]Felt{2, 3}), []Felt{2, 3}, K([]Felt{0, 1}), []Felt{2, 0, 1, 2, 3}},
+ {K([]Felt{2, 3}), []Felt{2, 3}, K([]Felt{5}), []Felt{5, 2, 3, 5}},
+ {K([]Felt{2, 3}), []Felt{2, 3}, K([]Felt{4, 5}), []Felt{4, 2, 3, 4, 5}},
+ {K([]Felt{2, 3}), []Felt{2, 3}, K([]Felt{0, 4, 5}), []Felt{3, 5, 0, 2, 3, 4, 5}},
+ {K([]Felt{2, 3}), []Felt{2, 3}, K([]Felt{0, 1, 4, 5}), []Felt{2, 4, 0, 1, 2, 3, 4, 5}},
+ {K([]Felt{4, 5}), []Felt{4, 5}, K([]Felt{0}), []Felt{5, 0, 4, 5}},
+ {K([]Felt{4, 5}), []Felt{4, 5}, K([]Felt{0, 1}), []Felt{4, 0, 1, 4, 5}},
+ {K([]Felt{4, 5}), []Felt{4, 5}, K([]Felt{0, 1, 2}), []Felt{2, 5, 0, 1, 2, 4, 5}},
+ {K([]Felt{4, 5}), []Felt{4, 5}, K([]Felt{0, 1, 2, 3}), []Felt{2, 4, 0, 1, 2, 3, 4, 5}},
+ {K([]Felt{1, 4}), []Felt{1, 4}, K([]Felt{0}), []Felt{4, 0, 1, 4}},
+ {K([]Felt{1, 4}), []Felt{1, 4}, K([]Felt{0, 2}), []Felt{2, 0, 1, 2, 4}},
+ {K([]Felt{1, 4}), []Felt{1, 4}, K([]Felt{0, 2, 5}), []Felt{2, 5, 0, 1, 2, 4, 5}},
+ {K([]Felt{1, 4}), []Felt{1, 4}, K([]Felt{0, 2, 3, 5}), []Felt{2, 4, 0, 1, 2, 3, 4, 5}},
+
+ {K([]Felt{1, 3, 5}), []Felt{5, 1, 3, 5}, K([]Felt{0}), []Felt{3, 5, 0, 1, 3, 5}},
+ {K([]Felt{1, 3, 5}), []Felt{5, 1, 3, 5}, K([]Felt{0, 2, 4}), []Felt{4, 2, 5, 0, 1, 2, 3, 4, 5}},
+ {K([]Felt{1, 3, 5}), []Felt{5, 1, 3, 5}, K([]Felt{6, 7, 8}), []Felt{5, 7, 1, 3, 5, 6, 7, 8}},
+ {K([]Felt{1, 3, 5}), []Felt{5, 1, 3, 5}, K([]Felt{6, 7, 8, 9}), []Felt{7, 5, 9, 1, 3, 5, 6, 7, 8, 9}},
+
+ {K([]Felt{1, 2, 3, 4}), []Felt{3, 1, 2, 3, 4}, K([]Felt{0}), []Felt{2, 3, 0, 1, 2, 3, 4}},
+ {K([]Felt{1, 3, 5, 7}), []Felt{5, 1, 3, 5, 7}, K([]Felt{0}), []Felt{3, 5, 0, 1, 3, 5, 7}},
+
+ {K([]Felt{1, 3, 5, 7, 9}), []Felt{5, 9, 1, 3, 5, 7, 9}, K([]Felt{0}), []Felt{5, 3, 9, 0, 1, 3, 5, 7, 9}},
+
+ // Debug
+ {K([]Felt{1, 2, 3, 5, 6, 7, 8}), []Felt{6, 3, 8, 1, 2, 3, 5, 6, 7, 8}, K([]Felt{4}), []Felt{6, 3, 5, 8, 1, 2, 3, 4, 5, 6, 7, 8}},
+
+ {
+ K([]Felt{10, 15, 20}),
+ []Felt{20, 10, 15, 20},
+ K([]Felt{1, 2, 3, 4, 5, 11, 13, 18, 19, 30, 31}),
+ []Felt{15, 5, 20, 3, 11, 19, 31, 1, 2, 3, 4, 5, 10, 11, 13, 15, 18, 19, 20, 30, 31},
+ },
+
+ {
+ K([]Felt{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20}),
+ []Felt{8, 16, 4, 12, 20, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20},
+ K([]Felt{1, 3, 5}),
+ []Felt{8, 4, 16, 2, 6, 12, 20, 0, 1, 2, 3, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20},
+ },
+
+ {
+ K([]Felt{4, 10, 17, 85, 104, 107, 112, 115, 136, 156, 191}),
+ []Felt{104, 136, 17, 112, 191, 4, 10, 17, 85, 104, 107, 112, 115, 136, 156, 191},
+ K([]Felt{0, 96, 120, 129, 133, 164, 187, 189}),
+ nil,
+ },
+}
+
+var updateTestTable = []UpsertTest{
+ {K([]Felt{10}), []Felt{10}, KV([]Felt{10}, []Felt{100}), []Felt{10}},
+ {K([]Felt{10, 20}), []Felt{10, 20}, KV([]Felt{10, 20}, []Felt{100, 200}), []Felt{10, 20}},
+}
+
+var deleteTestTable = []DeleteTest{
+ /// POSITIVE TEST CASES
+ {K([]Felt{}), []Felt{}, []Felt{}, []Felt{}},
+
+ {K([]Felt{1}), []Felt{1}, []Felt{}, []Felt{1}},
+ {K([]Felt{1}), []Felt{1}, []Felt{1}, []Felt{}},
+
+ {K([]Felt{1, 2}), []Felt{1, 2}, []Felt{}, []Felt{1, 2}},
+ {K([]Felt{1, 2}), []Felt{1, 2}, []Felt{1}, []Felt{2}},
+ {K([]Felt{1, 2}), []Felt{1, 2}, []Felt{2}, []Felt{1}},
+ {K([]Felt{1, 2}), []Felt{1, 2}, []Felt{1, 2}, []Felt{}},
+
+ {K([]Felt{1, 2, 3}), []Felt{3, 1, 2, 3}, []Felt{}, []Felt{3, 1, 2, 3}},
+ {K([]Felt{1, 2, 3}), []Felt{3, 1, 2, 3}, []Felt{1}, []Felt{2, 3}},
+ {K([]Felt{1, 2, 3}), []Felt{3, 1, 2, 3}, []Felt{2}, []Felt{1, 3}},
+ {K([]Felt{1, 2, 3}), []Felt{3, 1, 2, 3}, []Felt{3}, []Felt{1, 2}},
+ {K([]Felt{1, 2, 3}), []Felt{3, 1, 2, 3}, []Felt{1, 2}, []Felt{3}},
+ {K([]Felt{1, 2, 3}), []Felt{3, 1, 2, 3}, []Felt{1, 3}, []Felt{2}},
+ {K([]Felt{1, 2, 3}), []Felt{3, 1, 2, 3}, []Felt{2, 3}, []Felt{1}},
+ {K([]Felt{1, 2, 3}), []Felt{3, 1, 2, 3}, []Felt{1, 2, 3}, []Felt{}},
+
+ {K([]Felt{1, 2, 3, 4}), []Felt{3, 1, 2, 3, 4}, []Felt{1}, []Felt{3, 2, 3, 4}},
+ {K([]Felt{1, 2, 3, 4}), []Felt{3, 1, 2, 3, 4}, []Felt{2}, []Felt{3, 1, 3, 4}},
+ {K([]Felt{1, 2, 3, 4}), []Felt{3, 1, 2, 3, 4}, []Felt{3}, []Felt{4, 1, 2, 4}},
+ {K([]Felt{1, 2, 3, 4}), []Felt{3, 1, 2, 3, 4}, []Felt{4}, []Felt{3, 1, 2, 3}},
+
+ {K([]Felt{1, 2, 3, 4, 5}), []Felt{3, 5, 1, 2, 3, 4, 5}, []Felt{1}, []Felt{3, 5, 2, 3, 4, 5}},
+ {K([]Felt{1, 2, 3, 4, 5}), []Felt{3, 5, 1, 2, 3, 4, 5}, []Felt{2}, []Felt{3, 5, 1, 3, 4, 5}},
+ {K([]Felt{1, 2, 3, 4, 5}), []Felt{3, 5, 1, 2, 3, 4, 5}, []Felt{3}, []Felt{4, 5, 1, 2, 4, 5}},
+ {K([]Felt{1, 2, 3, 4, 5}), []Felt{3, 5, 1, 2, 3, 4, 5}, []Felt{4}, []Felt{3, 5, 1, 2, 3, 5}},
+ {K([]Felt{1, 2, 3, 4, 5}), []Felt{3, 5, 1, 2, 3, 4, 5}, []Felt{5}, []Felt{3, 1, 2, 3, 4}},
+ {K([]Felt{1, 2, 3, 4, 5, 6, 7}), []Felt{5, 3, 7, 1, 2, 3, 4, 5, 6, 7}, []Felt{7}, []Felt{3, 5, 1, 2, 3, 4, 5, 6}},
+
+ {K([]Felt{16, 25, 155, 182, 184, 210, 215}), []Felt{184, 155, 215, 16, 25, 155, 182, 184, 210, 215}, []Felt{155, 182}, []Felt{184, 215, 16, 25, 184, 210, 215}},
+
+ /// NEGATIVE TEST CASES
+ {K([]Felt{}), []Felt{}, []Felt{1}, []Felt{}},
+ {K([]Felt{1}), []Felt{1}, []Felt{2}, []Felt{1}},
+ {K([]Felt{1, 2}), []Felt{1, 2}, []Felt{3}, []Felt{1, 2}},
+ {K([]Felt{1, 2, 3}), []Felt{3, 1, 2, 3}, []Felt{4}, []Felt{3, 1, 2, 3}},
+ {K([]Felt{1, 2, 3, 4}), []Felt{3, 1, 2, 3, 4}, []Felt{5}, []Felt{3, 1, 2, 3, 4}},
+ {K([]Felt{1, 2, 3, 4, 5}), []Felt{3, 5, 1, 2, 3, 4, 5}, []Felt{6}, []Felt{3, 5, 1, 2, 3, 4, 5}},
+
+ /// MIXED TEST CASES
+ {K([]Felt{0, 46, 50, 89, 134, 218}), []Felt{50, 134, 0, 46, 50, 89, 134, 218}, []Felt{46, 50, 89, 134, 218}, []Felt{0}},
+}
+
+func TestHeight(t *testing.T) {
+ for _, data := range heightTestTable {
+ tree := NewTree23(data.initialItems)
+ assert.Equal(t, data.expectedHeight, tree.Height(), "different height")
+ }
+}
+
+func TestIs23Tree(t *testing.T) {
+ for _, data := range isTree23TestTable {
+ tree := NewTree23(data.initialItems)
+ //tree.GraphAndPicture("is23Tree")
+ assertTwoThreeTree(t, tree, data.expectedKeysLevelOrder)
+ }
+}
+
+func Test23TreeSeries(t *testing.T) {
+ maxNumberOfNodes := 100
+ for i := 0; i < maxNumberOfNodes; i++ {
+ kvPairs := KeyValues{make([]*Felt, 0), make([]*Felt, 0)}
+ for j := 0; j < i; j++ {
+ key, value := Felt(j), Felt(j)
+ kvPairs.keys = append(kvPairs.keys, &key)
+ kvPairs.values = append(kvPairs.values, &value)
+ }
+ tree := NewTree23(kvPairs)
+ assertTwoThreeTree(t, tree, nil)
+ }
+}
+
+func TestRootHash(t *testing.T) {
+ for _, data := range rootHashTestTable {
+ tree := NewTree23(data.initialItems)
+ assert.Equal(t, data.expectedHash, hex.EncodeToString(tree.RootHash()), "different root hash")
+ }
+}
+
+func TestUpsertInsert(t *testing.T) {
+ for _, data := range insertTestTable {
+ tree := NewTree23(data.initialItems)
+ assertTwoThreeTree(t, tree, data.initialKeysLevelOrder)
+ //tree.GraphAndPicture("tree_step1")
+ tree.Upsert(data.deltaItems)
+ //tree.GraphAndPicture("tree_step2")
+ assertTwoThreeTree(t, tree, data.finalKeysLevelOrder)
+ }
+}
+
+func TestUpsertUpdate(t *testing.T) {
+ for _, data := range updateTestTable {
+ tree := NewTree23(data.initialItems)
+ assertTwoThreeTree(t, tree, data.initialKeysLevelOrder)
+ // TODO: add check for old values
+ tree.Upsert(data.deltaItems)
+ assertTwoThreeTree(t, tree, data.finalKeysLevelOrder)
+ // TODO: add check for new values
+ }
+}
+
+func TestUpsertIdempotent(t *testing.T) {
+ for _, data := range isTree23TestTable {
+ tree := NewTree23(data.initialItems)
+ assertTwoThreeTree(t, tree, data.expectedKeysLevelOrder)
+ tree.Upsert(data.initialItems)
+ assertTwoThreeTree(t, tree, data.expectedKeysLevelOrder)
+ }
+}
+
+func TestUpsertNextKey(t *testing.T) {
+ dataCount := 4
+ data := KeyValues{make([]*Felt, dataCount), make([]*Felt, dataCount)}
+ for i := 0; i < dataCount; i++ {
+ key, value := Felt(i*2), Felt(i*2)
+ data.keys[i], data.values[i] = &key, &value
+ }
+ tn := NewTree23(data)
+ //tn.GraphAndPicture("tn1")
+
+ for i := 0; i < dataCount; i++ {
+ key, value := Felt(i*2+1), Felt(i*2+1)
+ data.keys[i], data.values[i] = &key, &value
+ }
+ tn = tn.Upsert(data)
+ //tn.GraphAndPicture("tn2")
+ assertTwoThreeTree(t, tn, []Felt{4, 2, 6, 0, 1, 2, 3, 4, 5, 6, 7})
+
+ data = K([]Felt{100, 101, 200, 201, 202})
+ tn = tn.Upsert(data)
+ //tn.GraphAndPicture("tn3")
+ assertTwoThreeTree(t, tn, []Felt{4, 100, 2, 6, 200, 202, 0, 1, 2, 3, 4, 5, 6, 7, 100, 101, 200, 201, 202})
+
+ data = K([]Felt{10, 150, 250, 251, 252})
+ tn = tn.Upsert(data)
+ //tn.GraphAndPicture("tn4")
+ assertTwoThreeTree(t, tn, []Felt{100, 4, 200, 2, 6, 10, 150, 202, 251, 0, 1, 2, 3, 4, 5, 6, 7, 10, 100, 101, 150, 200, 201, 202, 250, 251, 252})
+}
+
+func TestUpsertFirstKey(t *testing.T) {
+}
+
+func TestDelete(t *testing.T) {
+ for _, data := range deleteTestTable {
+ tree := NewTree23(data.initialItems)
+ assertTwoThreeTree(t, tree, data.initialKeysLevelOrder)
+ //tree.GraphAndPicture("tree_delete1")
+ tree.Delete(data.keysToDelete)
+ //tree.GraphAndPicture("tree_delete2")
+ assertTwoThreeTree(t, tree, data.finalKeysLevelOrder)
+ }
+}
+
+func FuzzUpsert(f *testing.F) {
+ f.Fuzz(func(t *testing.T, input1, input2 []byte) {
+ //t.Parallel()
+ keyFactory := NewKeyBinaryFactory(1)
+ bytesReader1 := bytes.NewReader(input1)
+ kvStatePairs := keyFactory.NewUniqueKeyValues(bufio.NewReader(bytesReader1))
+ require.True(t, sort.IsSorted(kvStatePairs), "kvStatePairs is not sorted")
+ bytesReader2 := bytes.NewReader(input2)
+ kvStateChangesPairs := keyFactory.NewUniqueKeyValues(bufio.NewReader(bytesReader2))
+ //fmt.Printf("kvStatePairs=%v kvStateChangesPairs=%v\n", kvStatePairs, kvStateChangesPairs)
+ require.True(t, sort.IsSorted(kvStateChangesPairs), "kvStateChangesPairs is not sorted")
+ tree := NewTree23(kvStatePairs)
+ //tree.GraphAndPicture("fuzz_tree_upsert1")
+ assertTwoThreeTree(t, tree, nil)
+ tree = tree.Upsert(kvStateChangesPairs)
+ //tree.GraphAndPicture("fuzz_tree_upsert2")
+ assertTwoThreeTree(t, tree, nil)
+ })
+}
+
+func FuzzDelete(f *testing.F) {
+ f.Fuzz(func(t *testing.T, input1, input2 []byte) {
+ //t.Parallel()
+ //fmt.Printf("input1=%v input2=%v\n", input1, input2)
+ keyFactory := NewKeyBinaryFactory(1)
+ bytesReader1 := bytes.NewReader(input1)
+ kvStatePairs := keyFactory.NewUniqueKeyValues(bufio.NewReader(bytesReader1))
+ require.True(t, sort.IsSorted(kvStatePairs), "kvStatePairs is not sorted")
+ bytesReader2 := bytes.NewReader(input2)
+ keysToDelete := keyFactory.NewUniqueKeys(bufio.NewReader(bytesReader2))
+ //fmt.Printf("kvStatePairs=%v keysToDelete=%v\n", kvStatePairs, keysToDelete)
+ require.True(t, sort.IsSorted(keysToDelete), "keysToDelete is not sorted")
+ tree1 := NewTree23(kvStatePairs)
+ //tree1.GraphAndPicture("fuzz_tree_delete1")
+ require23Tree(t, tree1, nil, input1, input2)
+ tree2 := tree1.Delete(keysToDelete)
+ //tree2.GraphAndPicture("fuzz_tree_delete2")
+ require23Tree(t, tree2, nil, input1, input2)
+ // TODO: check the difference properties
+ // Check that *each* T1 node is present either in Td or in T2
+ // Check that *each* T2 node is not present in Td
+ // Check that *each* Td node is present in T1 but not in T2
+ })
+}
+
+func BenchmarkNewTree23(b *testing.B) {
+ const dataCount = 1_000_000
+ data := KeyValues{make([]*Felt, dataCount), make([]*Felt, dataCount)}
+ for i := 0; i < dataCount; i++ {
+ key, value := Felt(i*2), Felt(i*2)
+ data.keys[i], data.values[i] = &key, &value
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ NewTree23(data)
+ }
+}
+
+func BenchmarkUpsert(b *testing.B) {
+ dataCount := 5_000_000
+ data := KeyValues{make([]*Felt, dataCount), make([]*Felt, dataCount)}
+ for i := 0; i < dataCount; i++ {
+ key, value := Felt(i*2), Felt(i*2)
+ data.keys[i], data.values[i] = &key, &value
+ }
+ tree := NewTree23(data)
+ dataCount = 500_000
+ data = KeyValues{make([]*Felt, dataCount), make([]*Felt, dataCount)}
+ for i := 0; i < dataCount; i++ {
+ key, value := Felt(i*2+1), Felt(i*2+1)
+ data.keys[i], data.values[i] = &key, &value
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ tree.Upsert(data)
+ }
+}
diff --git a/erigon-lib/bptree/util.go b/erigon-lib/bptree/util.go
new file mode 100644
index 00000000000..5751008ab66
--- /dev/null
+++ b/erigon-lib/bptree/util.go
@@ -0,0 +1,25 @@
+/*
+ Copyright 2022 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package bptree
+
+const BufferSize uint = 4096
+
+func ensure(condition bool, message string) {
+ if !condition {
+ panic(message)
+ }
+}
diff --git a/erigon-lib/chain/aura_config.go b/erigon-lib/chain/aura_config.go
new file mode 100644
index 00000000000..a8fa90190f2
--- /dev/null
+++ b/erigon-lib/chain/aura_config.go
@@ -0,0 +1,93 @@
+/*
+ Copyright 2023 The Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package chain
+
+import (
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/hexutility"
+)
+
+// Different ways of specifying validators.
+type ValidatorSetJson struct {
+ // A simple list of authorities.
+ List []common.Address `json:"list"`
+ // Address of a contract that indicates the list of authorities.
+ SafeContract *common.Address `json:"safeContract"`
+ // Address of a contract that indicates the list of authorities and enables reporting of their misbehaviour using transactions.
+ Contract *common.Address `json:"contract"`
+ // A map of starting blocks for each validator set.
+ Multi map[uint64]*ValidatorSetJson `json:"multi"`
+}
+
+// AuRaConfig is the consensus engine configs for proof-of-authority based sealing.
+type AuRaConfig struct {
+ StepDuration *uint64 `json:"stepDuration"` // Block duration, in seconds.
+ Validators *ValidatorSetJson `json:"validators"` // Valid authorities
+
+ // Starting step. Determined automatically if not specified.
+ // To be used for testing only.
+ StartStep *uint64 `json:"startStep"`
+ ValidateScoreTransition *uint64 `json:"validateScoreTransition"` // Block at which score validation should start.
+ ValidateStepTransition *uint64 `json:"validateStepTransition"` // Block from which monotonic steps start.
+ ImmediateTransitions *bool `json:"immediateTransitions"` // Whether transitions should be immediate.
+ BlockReward *uint64 `json:"blockReward"` // Reward per block in wei.
+ // Block at which the block reward contract should start being used. This option allows one to
+ // add a single block reward contract transition and is compatible with the multiple address
+ // option `block_reward_contract_transitions` below.
+ BlockRewardContractTransition *uint64 `json:"blockRewardContractTransition"`
+ /// Block reward contract address which overrides the `block_reward` setting. This option allows
+ /// one to add a single block reward contract address and is compatible with the multiple
+ /// address option `block_reward_contract_transitions` below.
+ BlockRewardContractAddress *common.Address `json:"blockRewardContractAddress"`
+ // Block reward contract addresses with their associated starting block numbers.
+ //
+ // Setting the block reward contract overrides `block_reward`. If the single block reward
+ // contract address is also present then it is added into the map at the block number stored in
+ // `block_reward_contract_transition` or 0 if that block number is not provided. Therefore both
+ // a single block reward contract transition and a map of reward contract transitions can be
+ // used simultaneously in the same configuration. In such a case the code requires that the
+ // block number of the single transition is strictly less than any of the block numbers in the
+ // map.
+ BlockRewardContractTransitions map[uint]common.Address `json:"blockRewardContractTransitions"`
+ // Block at which maximum uncle count should be considered.
+ MaximumUncleCountTransition *uint64 `json:"maximumUncleCountTransition"`
+ // Maximum number of accepted uncles.
+ MaximumUncleCount *uint `json:"maximumUncleCount"`
+ // Strict validation of empty steps transition block.
+ StrictEmptyStepsTransition *uint `json:"strictEmptyStepsTransition"`
+ // The random number contract's address, or a map of contract transitions.
+ RandomnessContractAddress map[uint64]common.Address `json:"randomnessContractAddress"`
+ // The addresses of contracts that determine the block gas limit starting from the block number
+ // associated with each of those contracts.
+ BlockGasLimitContractTransitions map[uint64]common.Address `json:"blockGasLimitContractTransitions"`
+ // The block number at which the consensus engine switches from AuRa to AuRa with POSDAO
+ // modifications.
+ PosdaoTransition *uint64 `json:"PosdaoTransition"`
+ // Stores human-readable keys associated with addresses, like DNS information.
+ // This contract is primarily required to store the address of the Certifier contract.
+ Registrar *common.Address `json:"registrar"`
+
+ // See https://github.com/gnosischain/specs/blob/master/execution/withdrawals.md
+ WithdrawalContractAddress *common.Address `json:"withdrawalContractAddress"`
+
+ RewriteBytecode map[uint64]map[common.Address]hexutility.Bytes `json:"rewriteBytecode"`
+}
+
+// String implements the stringer interface, returning the consensus engine details.
+func (c *AuRaConfig) String() string {
+ return "aura"
+}
diff --git a/erigon-lib/chain/chain_config.go b/erigon-lib/chain/chain_config.go
new file mode 100644
index 00000000000..6e93c59ff90
--- /dev/null
+++ b/erigon-lib/chain/chain_config.go
@@ -0,0 +1,679 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package chain
+
+import (
+ "fmt"
+ "math/big"
+ "sort"
+ "strconv"
+
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/fixedgas"
+)
+
+// Config is the core config which determines the blockchain settings.
+//
+// Config is stored in the database on a per block basis. This means
+// that any network, identified by its genesis block, can have its own
+// set of configuration options.
+type Config struct {
+ ChainName string
+ ChainID *big.Int `json:"chainId"` // chainId identifies the current chain and is used for replay protection
+
+ Consensus ConsensusName `json:"consensus,omitempty"` // aura, ethash or clique
+
+ // *Block fields activate the corresponding hard fork at a certain block number,
+ // while *Time fields do so based on the block's time stamp.
+ // nil means that the hard-fork is not scheduled,
+ // while 0 means that it's already activated from genesis.
+
+ // ETH mainnet upgrades
+ // See https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades
+ HomesteadBlock *big.Int `json:"homesteadBlock,omitempty"`
+ DAOForkBlock *big.Int `json:"daoForkBlock,omitempty"`
+ TangerineWhistleBlock *big.Int `json:"eip150Block,omitempty"`
+ SpuriousDragonBlock *big.Int `json:"eip155Block,omitempty"`
+ ByzantiumBlock *big.Int `json:"byzantiumBlock,omitempty"`
+ ConstantinopleBlock *big.Int `json:"constantinopleBlock,omitempty"`
+ PetersburgBlock *big.Int `json:"petersburgBlock,omitempty"`
+ IstanbulBlock *big.Int `json:"istanbulBlock,omitempty"`
+ MuirGlacierBlock *big.Int `json:"muirGlacierBlock,omitempty"`
+ BerlinBlock *big.Int `json:"berlinBlock,omitempty"`
+ LondonBlock *big.Int `json:"londonBlock,omitempty"`
+ ArrowGlacierBlock *big.Int `json:"arrowGlacierBlock,omitempty"`
+ GrayGlacierBlock *big.Int `json:"grayGlacierBlock,omitempty"`
+
+ // EIP-3675: Upgrade consensus to Proof-of-Stake (a.k.a. "Paris", "The Merge")
+ TerminalTotalDifficulty *big.Int `json:"terminalTotalDifficulty,omitempty"` // The merge happens when terminal total difficulty is reached
+ TerminalTotalDifficultyPassed bool `json:"terminalTotalDifficultyPassed,omitempty"` // Disable PoW sync for networks that have already passed through the Merge
+ MergeNetsplitBlock *big.Int `json:"mergeNetsplitBlock,omitempty"` // Virtual fork after The Merge to use as a network splitter; see FORK_NEXT_VALUE in EIP-3675
+
+ // Mainnet fork scheduling switched from block numbers to timestamps after The Merge
+ ShanghaiTime *big.Int `json:"shanghaiTime,omitempty"`
+ CancunTime *big.Int `json:"cancunTime,omitempty"`
+ PragueTime *big.Int `json:"pragueTime,omitempty"`
+
+ // Optional EIP-4844 parameters
+ MinBlobGasPrice *uint64 `json:"minBlobGasPrice,omitempty"`
+ MaxBlobGasPerBlock *uint64 `json:"maxBlobGasPerBlock,omitempty"`
+ TargetBlobGasPerBlock *uint64 `json:"targetBlobGasPerBlock,omitempty"`
+ BlobGasPriceUpdateFraction *uint64 `json:"blobGasPriceUpdateFraction,omitempty"`
+
+ // (Optional) governance contract where EIP-1559 fees will be sent to that otherwise would be burnt since the London fork
+ BurntContract map[string]common.Address `json:"burntContract,omitempty"`
+
+ // Various consensus engines
+ Ethash *EthashConfig `json:"ethash,omitempty"`
+ Clique *CliqueConfig `json:"clique,omitempty"`
+ Aura *AuRaConfig `json:"aura,omitempty"`
+ Bor *BorConfig `json:"bor,omitempty"`
+}
+
+func (c *Config) String() string {
+ engine := c.getEngine()
+
+ return fmt.Sprintf("{ChainID: %v, Homestead: %v, DAO: %v, Tangerine Whistle: %v, Spurious Dragon: %v, Byzantium: %v, Constantinople: %v, Petersburg: %v, Istanbul: %v, Muir Glacier: %v, Berlin: %v, London: %v, Arrow Glacier: %v, Gray Glacier: %v, Terminal Total Difficulty: %v, Merge Netsplit: %v, Shanghai: %v, Cancun: %v, Prague: %v, Engine: %v}",
+ c.ChainID,
+ c.HomesteadBlock,
+ c.DAOForkBlock,
+ c.TangerineWhistleBlock,
+ c.SpuriousDragonBlock,
+ c.ByzantiumBlock,
+ c.ConstantinopleBlock,
+ c.PetersburgBlock,
+ c.IstanbulBlock,
+ c.MuirGlacierBlock,
+ c.BerlinBlock,
+ c.LondonBlock,
+ c.ArrowGlacierBlock,
+ c.GrayGlacierBlock,
+ c.TerminalTotalDifficulty,
+ c.MergeNetsplitBlock,
+ c.ShanghaiTime,
+ c.CancunTime,
+ c.PragueTime,
+ engine,
+ )
+}
+
+func (c *Config) getEngine() string {
+ switch {
+ case c.Ethash != nil:
+ return c.Ethash.String()
+ case c.Clique != nil:
+ return c.Clique.String()
+ case c.Bor != nil:
+ return c.Bor.String()
+ case c.Aura != nil:
+ return c.Aura.String()
+ default:
+ return "unknown"
+ }
+}
+
+// IsHomestead returns whether num is either equal to the homestead block or greater.
+func (c *Config) IsHomestead(num uint64) bool {
+ return isForked(c.HomesteadBlock, num)
+}
+
+// IsDAOFork returns whether num is either equal to the DAO fork block or greater.
+func (c *Config) IsDAOFork(num uint64) bool {
+ return isForked(c.DAOForkBlock, num)
+}
+
+// IsTangerineWhistle returns whether num is either equal to the Tangerine Whistle (EIP150) fork block or greater.
+func (c *Config) IsTangerineWhistle(num uint64) bool {
+ return isForked(c.TangerineWhistleBlock, num)
+}
+
+// IsSpuriousDragon returns whether num is either equal to the Spurious Dragon fork block or greater.
+func (c *Config) IsSpuriousDragon(num uint64) bool {
+ return isForked(c.SpuriousDragonBlock, num)
+}
+
+// IsByzantium returns whether num is either equal to the Byzantium fork block or greater.
+func (c *Config) IsByzantium(num uint64) bool {
+ return isForked(c.ByzantiumBlock, num)
+}
+
+// IsConstantinople returns whether num is either equal to the Constantinople fork block or greater.
+func (c *Config) IsConstantinople(num uint64) bool {
+ return isForked(c.ConstantinopleBlock, num)
+}
+
+// IsMuirGlacier returns whether num is either equal to the Muir Glacier (EIP-2384) fork block or greater.
+func (c *Config) IsMuirGlacier(num uint64) bool {
+ return isForked(c.MuirGlacierBlock, num)
+}
+
+// IsPetersburg returns whether num is either
+// - equal to or greater than the PetersburgBlock fork block,
+// - OR is nil, and Constantinople is active
+func (c *Config) IsPetersburg(num uint64) bool {
+ return isForked(c.PetersburgBlock, num) || c.PetersburgBlock == nil && isForked(c.ConstantinopleBlock, num)
+}
+
+// IsIstanbul returns whether num is either equal to the Istanbul fork block or greater.
+func (c *Config) IsIstanbul(num uint64) bool {
+ return isForked(c.IstanbulBlock, num)
+}
+
+// IsBerlin returns whether num is either equal to the Berlin fork block or greater.
+func (c *Config) IsBerlin(num uint64) bool {
+ return isForked(c.BerlinBlock, num)
+}
+
+// IsLondon returns whether num is either equal to the London fork block or greater.
+func (c *Config) IsLondon(num uint64) bool {
+ return isForked(c.LondonBlock, num)
+}
+
+// IsArrowGlacier returns whether num is either equal to the Arrow Glacier (EIP-4345) fork block or greater.
+func (c *Config) IsArrowGlacier(num uint64) bool {
+ return isForked(c.ArrowGlacierBlock, num)
+}
+
+// IsGrayGlacier returns whether num is either equal to the Gray Glacier (EIP-5133) fork block or greater.
+func (c *Config) IsGrayGlacier(num uint64) bool {
+ return isForked(c.GrayGlacierBlock, num)
+}
+
+// IsShanghai returns whether time is either equal to the Shanghai fork time or greater.
+func (c *Config) IsShanghai(time uint64) bool {
+ return isForked(c.ShanghaiTime, time)
+}
+
+// IsAgra returns whether num is either equal to the Agra fork block or greater.
+// The Agra hard fork is based on the Shanghai hard fork, but it doesn't include withdrawals.
+// Also Agra is activated based on the block number rather than the timestamp.
+// Refer to https://forum.polygon.technology/t/pip-28-agra-hardfork
+func (c *Config) IsAgra(num uint64) bool {
+ if c == nil || c.Bor == nil {
+ return false
+ }
+ return isForked(c.Bor.AgraBlock, num)
+}
+
+// IsCancun returns whether time is either equal to the Cancun fork time or greater.
+func (c *Config) IsCancun(time uint64) bool {
+ return isForked(c.CancunTime, time)
+}
+
+// IsPrague returns whether time is either equal to the Prague fork time or greater.
+func (c *Config) IsPrague(time uint64) bool {
+ return isForked(c.PragueTime, time)
+}
+
+func (c *Config) GetBurntContract(num uint64) *common.Address {
+ if len(c.BurntContract) == 0 {
+ return nil
+ }
+ addr := borKeyValueConfigHelper(c.BurntContract, num)
+ return &addr
+}
+
+func (c *Config) GetMinBlobGasPrice() uint64 {
+ if c.MinBlobGasPrice != nil {
+ return *c.MinBlobGasPrice
+ }
+ return 1 // MIN_BLOB_GASPRICE (EIP-4844)
+}
+
+func (c *Config) GetMaxBlobGasPerBlock() uint64 {
+ if c.MaxBlobGasPerBlock != nil {
+ return *c.MaxBlobGasPerBlock
+ }
+ return 786432 // MAX_BLOB_GAS_PER_BLOCK (EIP-4844)
+}
+
+func (c *Config) GetTargetBlobGasPerBlock() uint64 {
+ if c.TargetBlobGasPerBlock != nil {
+ return *c.TargetBlobGasPerBlock
+ }
+ return 393216 // TARGET_BLOB_GAS_PER_BLOCK (EIP-4844)
+}
+
+func (c *Config) GetBlobGasPriceUpdateFraction() uint64 {
+ if c.BlobGasPriceUpdateFraction != nil {
+ return *c.BlobGasPriceUpdateFraction
+ }
+ return 3338477 // BLOB_GASPRICE_UPDATE_FRACTION (EIP-4844)
+}
+
+func (c *Config) GetMaxBlobsPerBlock() uint64 {
+ return c.GetMaxBlobGasPerBlock() / fixedgas.BlobGasPerBlob
+}
+
+// CheckCompatible checks whether scheduled fork transitions have been imported
+// with a mismatching chain configuration.
+func (c *Config) CheckCompatible(newcfg *Config, height uint64) *ConfigCompatError {
+ bhead := height
+
+ // Iterate checkCompatible to find the lowest conflict.
+ var lasterr *ConfigCompatError
+ for {
+ err := c.checkCompatible(newcfg, bhead)
+ if err == nil || (lasterr != nil && err.RewindTo == lasterr.RewindTo) {
+ break
+ }
+ lasterr = err
+ bhead = err.RewindTo
+ }
+ return lasterr
+}
+
+type forkBlockNumber struct {
+ name string
+ blockNumber *big.Int
+ optional bool // if true, the fork may be nil and next fork is still allowed
+}
+
+func (c *Config) forkBlockNumbers() []forkBlockNumber {
+ return []forkBlockNumber{
+ {name: "homesteadBlock", blockNumber: c.HomesteadBlock},
+ {name: "daoForkBlock", blockNumber: c.DAOForkBlock, optional: true},
+ {name: "eip150Block", blockNumber: c.TangerineWhistleBlock},
+ {name: "eip155Block", blockNumber: c.SpuriousDragonBlock},
+ {name: "byzantiumBlock", blockNumber: c.ByzantiumBlock},
+ {name: "constantinopleBlock", blockNumber: c.ConstantinopleBlock},
+ {name: "petersburgBlock", blockNumber: c.PetersburgBlock},
+ {name: "istanbulBlock", blockNumber: c.IstanbulBlock},
+ {name: "muirGlacierBlock", blockNumber: c.MuirGlacierBlock, optional: true},
+ {name: "berlinBlock", blockNumber: c.BerlinBlock},
+ {name: "londonBlock", blockNumber: c.LondonBlock},
+ {name: "arrowGlacierBlock", blockNumber: c.ArrowGlacierBlock, optional: true},
+ {name: "grayGlacierBlock", blockNumber: c.GrayGlacierBlock, optional: true},
+ {name: "mergeNetsplitBlock", blockNumber: c.MergeNetsplitBlock, optional: true},
+ }
+}
+
+// CheckConfigForkOrder checks that we don't "skip" any forks
+func (c *Config) CheckConfigForkOrder() error {
+ if c != nil && c.ChainID != nil && c.ChainID.Uint64() == 77 {
+ return nil
+ }
+
+ var lastFork forkBlockNumber
+
+ for _, fork := range c.forkBlockNumbers() {
+ if lastFork.name != "" {
+ // Next one must be higher number
+ if lastFork.blockNumber == nil && fork.blockNumber != nil {
+ return fmt.Errorf("unsupported fork ordering: %v not enabled, but %v enabled at %v",
+ lastFork.name, fork.name, fork.blockNumber)
+ }
+ if lastFork.blockNumber != nil && fork.blockNumber != nil {
+ if lastFork.blockNumber.Cmp(fork.blockNumber) > 0 {
+ return fmt.Errorf("unsupported fork ordering: %v enabled at %v, but %v enabled at %v",
+ lastFork.name, lastFork.blockNumber, fork.name, fork.blockNumber)
+ }
+ }
+ // If it was optional and not set, then ignore it
+ }
+ if !fork.optional || fork.blockNumber != nil {
+ lastFork = fork
+ }
+ }
+ return nil
+}
+
+func (c *Config) checkCompatible(newcfg *Config, head uint64) *ConfigCompatError {
+ // returns true if a fork scheduled at s1 cannot be rescheduled to block s2 because head is already past the fork.
+ incompatible := func(s1, s2 *big.Int, head uint64) bool {
+ return (isForked(s1, head) || isForked(s2, head)) && !numEqual(s1, s2)
+ }
+
+ // Ethereum mainnet forks
+ if incompatible(c.HomesteadBlock, newcfg.HomesteadBlock, head) {
+ return newCompatError("Homestead fork block", c.HomesteadBlock, newcfg.HomesteadBlock)
+ }
+ if incompatible(c.DAOForkBlock, newcfg.DAOForkBlock, head) {
+ return newCompatError("DAO fork block", c.DAOForkBlock, newcfg.DAOForkBlock)
+ }
+ if incompatible(c.TangerineWhistleBlock, newcfg.TangerineWhistleBlock, head) {
+ return newCompatError("Tangerine Whistle fork block", c.TangerineWhistleBlock, newcfg.TangerineWhistleBlock)
+ }
+ if incompatible(c.SpuriousDragonBlock, newcfg.SpuriousDragonBlock, head) {
+ return newCompatError("Spurious Dragon fork block", c.SpuriousDragonBlock, newcfg.SpuriousDragonBlock)
+ }
+ if c.IsSpuriousDragon(head) && !numEqual(c.ChainID, newcfg.ChainID) {
+ return newCompatError("EIP155 chain ID", c.SpuriousDragonBlock, newcfg.SpuriousDragonBlock)
+ }
+ if incompatible(c.ByzantiumBlock, newcfg.ByzantiumBlock, head) {
+ return newCompatError("Byzantium fork block", c.ByzantiumBlock, newcfg.ByzantiumBlock)
+ }
+ if incompatible(c.ConstantinopleBlock, newcfg.ConstantinopleBlock, head) {
+ return newCompatError("Constantinople fork block", c.ConstantinopleBlock, newcfg.ConstantinopleBlock)
+ }
+ if incompatible(c.PetersburgBlock, newcfg.PetersburgBlock, head) {
+ // the only case where we allow Petersburg to be set in the past is if it is equal to Constantinople
+ // mainly to satisfy fork ordering requirements which state that Petersburg fork be set if Constantinople fork is set
+ if incompatible(c.ConstantinopleBlock, newcfg.PetersburgBlock, head) {
+ return newCompatError("Petersburg fork block", c.PetersburgBlock, newcfg.PetersburgBlock)
+ }
+ }
+ if incompatible(c.IstanbulBlock, newcfg.IstanbulBlock, head) {
+ return newCompatError("Istanbul fork block", c.IstanbulBlock, newcfg.IstanbulBlock)
+ }
+ if incompatible(c.MuirGlacierBlock, newcfg.MuirGlacierBlock, head) {
+ return newCompatError("Muir Glacier fork block", c.MuirGlacierBlock, newcfg.MuirGlacierBlock)
+ }
+ if incompatible(c.BerlinBlock, newcfg.BerlinBlock, head) {
+ return newCompatError("Berlin fork block", c.BerlinBlock, newcfg.BerlinBlock)
+ }
+ if incompatible(c.LondonBlock, newcfg.LondonBlock, head) {
+ return newCompatError("London fork block", c.LondonBlock, newcfg.LondonBlock)
+ }
+ if incompatible(c.ArrowGlacierBlock, newcfg.ArrowGlacierBlock, head) {
+ return newCompatError("Arrow Glacier fork block", c.ArrowGlacierBlock, newcfg.ArrowGlacierBlock)
+ }
+ if incompatible(c.GrayGlacierBlock, newcfg.GrayGlacierBlock, head) {
+ return newCompatError("Gray Glacier fork block", c.GrayGlacierBlock, newcfg.GrayGlacierBlock)
+ }
+ if incompatible(c.MergeNetsplitBlock, newcfg.MergeNetsplitBlock, head) {
+ return newCompatError("Merge netsplit block", c.MergeNetsplitBlock, newcfg.MergeNetsplitBlock)
+ }
+
+ return nil
+}
+
+func numEqual(x, y *big.Int) bool {
+ if x == nil {
+ return y == nil
+ }
+ if y == nil {
+ return x == nil
+ }
+ return x.Cmp(y) == 0
+}
+
+// ConfigCompatError is raised if the locally-stored blockchain is initialised with a
+// ChainConfig that would alter the past.
+type ConfigCompatError struct {
+ What string
+ // block numbers of the stored and new configurations
+ StoredConfig, NewConfig *big.Int
+ // the block number to which the local chain must be rewound to correct the error
+ RewindTo uint64
+}
+
+func newCompatError(what string, storedblock, newblock *big.Int) *ConfigCompatError {
+ var rew *big.Int
+ switch {
+ case storedblock == nil:
+ rew = newblock
+ case newblock == nil || storedblock.Cmp(newblock) < 0:
+ rew = storedblock
+ default:
+ rew = newblock
+ }
+ err := &ConfigCompatError{what, storedblock, newblock, 0}
+ if rew != nil && rew.Sign() > 0 {
+ err.RewindTo = rew.Uint64() - 1
+ }
+ return err
+}
+
+func (err *ConfigCompatError) Error() string {
+ return fmt.Sprintf("mismatching %s in database (have %d, want %d, rewindto %d)", err.What, err.StoredConfig, err.NewConfig, err.RewindTo)
+}
+
+// EthashConfig is the consensus engine configs for proof-of-work based sealing.
+type EthashConfig struct{}
+
+// String implements the stringer interface, returning the consensus engine details.
+func (c *EthashConfig) String() string {
+ return "ethash"
+}
+
+// CliqueConfig is the consensus engine configs for proof-of-authority based sealing.
+type CliqueConfig struct {
+ Period uint64 `json:"period"` // Number of seconds between blocks to enforce
+ Epoch uint64 `json:"epoch"` // Epoch length to reset votes and checkpoint
+}
+
+// String implements the stringer interface, returning the consensus engine details.
+func (c *CliqueConfig) String() string {
+ return "clique"
+}
+
+// BorConfig is the consensus engine configs for Matic bor based sealing.
+type BorConfig struct {
+ Period map[string]uint64 `json:"period"` // Number of seconds between blocks to enforce
+ ProducerDelay map[string]uint64 `json:"producerDelay"` // Number of seconds delay between two producer interval
+ Sprint map[string]uint64 `json:"sprint"` // Epoch length to proposer
+ BackupMultiplier map[string]uint64 `json:"backupMultiplier"` // Backup multiplier to determine the wiggle time
+ ValidatorContract string `json:"validatorContract"` // Validator set contract
+ StateReceiverContract string `json:"stateReceiverContract"` // State receiver contract
+
+ OverrideStateSyncRecords map[string]int `json:"overrideStateSyncRecords"` // override state records count
+ BlockAlloc map[string]interface{} `json:"blockAlloc"`
+
+ JaipurBlock *big.Int `json:"jaipurBlock"` // Jaipur switch block (nil = no fork, 0 = already on jaipur)
+ DelhiBlock *big.Int `json:"delhiBlock"` // Delhi switch block (nil = no fork, 0 = already on delhi)
+ IndoreBlock *big.Int `json:"indoreBlock"` // Indore switch block (nil = no fork, 0 = already on indore)
+ AgraBlock *big.Int `json:"agraBlock"` // Agra switch block (nil = no fork, 0 = already in agra)
+ StateSyncConfirmationDelay map[string]uint64 `json:"stateSyncConfirmationDelay"` // StateSync Confirmation Delay, in seconds, to calculate `to`
+
+ ParallelUniverseBlock *big.Int `json:"parallelUniverseBlock"` // TODO: update all occurrence, change name and finalize number (hardfork for block-stm related changes)
+
+ sprints sprints
+}
+
+// String implements the stringer interface, returning the consensus engine details.
+func (b *BorConfig) String() string {
+ return "bor"
+}
+
+func (c *BorConfig) CalculateProducerDelay(number uint64) uint64 {
+ return borKeyValueConfigHelper(c.ProducerDelay, number)
+}
+
+func (c *BorConfig) CalculateSprint(number uint64) uint64 {
+ if c.sprints == nil {
+ c.sprints = asSprints(c.Sprint)
+ }
+
+ for i := 0; i < len(c.sprints)-1; i++ {
+ if number >= c.sprints[i].from && number < c.sprints[i+1].from {
+ return c.sprints[i].size
+ }
+ }
+
+ return c.sprints[len(c.sprints)-1].size
+}
+
+func (c *BorConfig) CalculateSprintCount(from, to uint64) int {
+ switch {
+ case from > to:
+ return 0
+ case from < to:
+ to--
+ }
+
+ if c.sprints == nil {
+ c.sprints = asSprints(c.Sprint)
+ }
+
+ count := uint64(0)
+ startCalc := from
+
+ zeroth := func(boundary uint64, size uint64) uint64 {
+ if boundary%size == 0 {
+ return 1
+ }
+
+ return 0
+ }
+
+ for i := 0; i < len(c.sprints)-1; i++ {
+ if startCalc >= c.sprints[i].from && startCalc < c.sprints[i+1].from {
+ if to >= c.sprints[i].from && to < c.sprints[i+1].from {
+ if startCalc == to {
+ return int(count + zeroth(startCalc, c.sprints[i].size))
+ }
+ return int(count + zeroth(startCalc, c.sprints[i].size) + (to-startCalc)/c.sprints[i].size)
+ } else {
+ endCalc := c.sprints[i+1].from - 1
+ count += zeroth(startCalc, c.sprints[i].size) + (endCalc-startCalc)/c.sprints[i].size
+ startCalc = endCalc + 1
+ }
+ }
+ }
+
+ if startCalc == to {
+ return int(count + zeroth(startCalc, c.sprints[len(c.sprints)-1].size))
+ }
+
+ return int(count + zeroth(startCalc, c.sprints[len(c.sprints)-1].size) + (to-startCalc)/c.sprints[len(c.sprints)-1].size)
+}
+
+func (c *BorConfig) CalculateBackupMultiplier(number uint64) uint64 {
+ return borKeyValueConfigHelper(c.BackupMultiplier, number)
+}
+
+func (c *BorConfig) CalculatePeriod(number uint64) uint64 {
+ return borKeyValueConfigHelper(c.Period, number)
+}
+
+func (c *BorConfig) IsJaipur(number uint64) bool {
+ return isForked(c.JaipurBlock, number)
+}
+
+func (c *BorConfig) IsDelhi(number uint64) bool {
+ return isForked(c.DelhiBlock, number)
+}
+
+func (c *BorConfig) IsIndore(number uint64) bool {
+ return isForked(c.IndoreBlock, number)
+}
+
+// TODO: modify this function once the block number is finalized
+func (c *BorConfig) IsParallelUniverse(number uint64) bool {
+ if c.ParallelUniverseBlock != nil {
+ if c.ParallelUniverseBlock.Cmp(big.NewInt(0)) == 0 {
+ return false
+ }
+ }
+
+ return isForked(c.ParallelUniverseBlock, number)
+}
+
+func (c *BorConfig) CalculateStateSyncDelay(number uint64) uint64 {
+ return borKeyValueConfigHelper(c.StateSyncConfirmationDelay, number)
+}
+
+func borKeyValueConfigHelper[T uint64 | common.Address](field map[string]T, number uint64) T {
+ fieldUint := make(map[uint64]T)
+ for k, v := range field {
+ keyUint, err := strconv.ParseUint(k, 10, 64)
+ if err != nil {
+ panic(err)
+ }
+ fieldUint[keyUint] = v
+ }
+
+ keys := common.SortedKeys(fieldUint)
+
+ for i := 0; i < len(keys)-1; i++ {
+ if number >= keys[i] && number < keys[i+1] {
+ return fieldUint[keys[i]]
+ }
+ }
+
+ return fieldUint[keys[len(keys)-1]]
+}
+
+type sprint struct {
+ from, size uint64
+}
+
+type sprints []sprint
+
+func (s sprints) Len() int {
+ return len(s)
+}
+
+func (s sprints) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s sprints) Less(i, j int) bool {
+ return s[i].from < s[j].from
+}
+
+func asSprints(configSprints map[string]uint64) sprints {
+ sprints := make(sprints, len(configSprints))
+
+ i := 0
+ for key, value := range configSprints {
+ sprints[i].from, _ = strconv.ParseUint(key, 10, 64)
+ sprints[i].size = value
+ i++
+ }
+
+ sort.Sort(sprints)
+
+ return sprints
+}
+
+// Rules is syntactic sugar over Config. It can be used for functions
+// that do not have or require information about the block.
+//
+// Rules is a one time interface meaning that it shouldn't be used in between transition
+// phases.
+type Rules struct {
+ ChainID *big.Int
+ IsHomestead, IsTangerineWhistle, IsSpuriousDragon bool
+ IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool
+ IsBerlin, IsLondon, IsShanghai, IsCancun, IsPrague bool
+ IsAura bool
+}
+
+// Rules ensures c's ChainID is not nil and returns a new Rules instance
+func (c *Config) Rules(num uint64, time uint64) *Rules {
+ chainID := c.ChainID
+ if chainID == nil {
+ chainID = new(big.Int)
+ }
+
+ return &Rules{
+ ChainID: new(big.Int).Set(chainID),
+ IsHomestead: c.IsHomestead(num),
+ IsTangerineWhistle: c.IsTangerineWhistle(num),
+ IsSpuriousDragon: c.IsSpuriousDragon(num),
+ IsByzantium: c.IsByzantium(num),
+ IsConstantinople: c.IsConstantinople(num),
+ IsPetersburg: c.IsPetersburg(num),
+ IsIstanbul: c.IsIstanbul(num),
+ IsBerlin: c.IsBerlin(num),
+ IsLondon: c.IsLondon(num),
+ IsShanghai: c.IsShanghai(time) || c.IsAgra(num),
+ IsCancun: c.IsCancun(time),
+ IsPrague: c.IsPrague(time),
+ IsAura: c.Aura != nil,
+ }
+}
+
+// isForked returns whether a fork scheduled at block s is active at the given head block.
+func isForked(s *big.Int, head uint64) bool {
+ if s == nil {
+ return false
+ }
+ return s.Uint64() <= head
+}
diff --git a/erigon-lib/chain/chain_config_test.go b/erigon-lib/chain/chain_config_test.go
new file mode 100644
index 00000000000..990202dd1c7
--- /dev/null
+++ b/erigon-lib/chain/chain_config_test.go
@@ -0,0 +1,68 @@
+/*
+ Copyright 2023 The Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package chain
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/ledgerwatch/erigon-lib/common"
+)
+
+func TestBorKeyValueConfigHelper(t *testing.T) {
+ backupMultiplier := map[string]uint64{
+ "0": 2,
+ "25275000": 5,
+ "29638656": 2,
+ }
+ assert.Equal(t, borKeyValueConfigHelper(backupMultiplier, 0), uint64(2))
+ assert.Equal(t, borKeyValueConfigHelper(backupMultiplier, 1), uint64(2))
+ assert.Equal(t, borKeyValueConfigHelper(backupMultiplier, 25275000-1), uint64(2))
+ assert.Equal(t, borKeyValueConfigHelper(backupMultiplier, 25275000), uint64(5))
+ assert.Equal(t, borKeyValueConfigHelper(backupMultiplier, 25275000+1), uint64(5))
+ assert.Equal(t, borKeyValueConfigHelper(backupMultiplier, 29638656-1), uint64(5))
+ assert.Equal(t, borKeyValueConfigHelper(backupMultiplier, 29638656), uint64(2))
+ assert.Equal(t, borKeyValueConfigHelper(backupMultiplier, 29638656+1), uint64(2))
+
+ config := map[string]uint64{
+ "0": 1,
+ "90000000": 2,
+ "100000000": 3,
+ }
+ assert.Equal(t, borKeyValueConfigHelper(config, 0), uint64(1))
+ assert.Equal(t, borKeyValueConfigHelper(config, 1), uint64(1))
+ assert.Equal(t, borKeyValueConfigHelper(config, 90000000-1), uint64(1))
+ assert.Equal(t, borKeyValueConfigHelper(config, 90000000), uint64(2))
+ assert.Equal(t, borKeyValueConfigHelper(config, 90000000+1), uint64(2))
+ assert.Equal(t, borKeyValueConfigHelper(config, 100000000-1), uint64(2))
+ assert.Equal(t, borKeyValueConfigHelper(config, 100000000), uint64(3))
+ assert.Equal(t, borKeyValueConfigHelper(config, 100000000+1), uint64(3))
+
+ address1 := common.HexToAddress("0x70bcA57F4579f58670aB2d18Ef16e02C17553C38")
+ address2 := common.HexToAddress("0x617b94CCCC2511808A3C9478ebb96f455CF167aA")
+
+ burntContract := map[string]common.Address{
+ "22640000": address1,
+ "41874000": address2,
+ }
+ assert.Equal(t, borKeyValueConfigHelper(burntContract, 22640000), address1)
+ assert.Equal(t, borKeyValueConfigHelper(burntContract, 22640000+1), address1)
+ assert.Equal(t, borKeyValueConfigHelper(burntContract, 41874000-1), address1)
+ assert.Equal(t, borKeyValueConfigHelper(burntContract, 41874000), address2)
+ assert.Equal(t, borKeyValueConfigHelper(burntContract, 41874000+1), address2)
+}
diff --git a/erigon-lib/chain/chain_db.go b/erigon-lib/chain/chain_db.go
new file mode 100644
index 00000000000..6b41d708c6a
--- /dev/null
+++ b/erigon-lib/chain/chain_db.go
@@ -0,0 +1,96 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package chain
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/kv"
+)
+
+// GetConfig retrieves the consensus settings based on the given genesis hash.
+func GetConfig(db kv.Getter, buf []byte) (*Config, error) {
+ hash, err := CanonicalHash(db, 0, buf)
+ if err != nil {
+ return nil, fmt.Errorf("failed ReadCanonicalHash: %w", err)
+ }
+ if hash == nil {
+ return nil, nil
+ }
+ data, err := db.GetOne(kv.ConfigTable, hash)
+ if err != nil {
+ return nil, err
+ }
+ if len(data) == 0 {
+ return nil, nil
+ }
+ var config Config
+ if err := json.Unmarshal(data, &config); err != nil {
+ return nil, fmt.Errorf("invalid chain config JSON: %s, %w", data, err)
+ }
+ return &config, nil
+}
+
+func CanonicalHash(db kv.Getter, number uint64, buf []byte) ([]byte, error) {
+ buf = common.EnsureEnoughSize(buf, 8)
+ binary.BigEndian.PutUint64(buf, number)
+ data, err := db.GetOne(kv.HeaderCanonical, buf)
+ if err != nil {
+ return nil, fmt.Errorf("failed CanonicalHash: %w, number=%d", err, number)
+ }
+ if len(data) == 0 {
+ return nil, nil
+ }
+
+ return data, nil
+}
+
+// HeadHeaderHash retrieves the hash of the current canonical head header.
+func HeadHeaderHash(db kv.Getter) ([]byte, error) {
+ data, err := db.GetOne(kv.HeadHeaderKey, []byte(kv.HeadHeaderKey))
+ if err != nil {
+ return nil, fmt.Errorf("ReadHeadHeaderHash failed: %w", err)
+ }
+ return data, nil
+}
+
+func CurrentBlockNumber(db kv.Getter) (*uint64, error) {
+ headHash, err := HeadHeaderHash(db)
+ if err != nil {
+ return nil, err
+ }
+ return HeaderNumber(db, headHash)
+}
+
+// HeaderNumber returns the header number assigned to a hash.
+func HeaderNumber(db kv.Getter, hash []byte) (*uint64, error) {
+ data, err := db.GetOne(kv.HeaderNumber, hash)
+ if err != nil {
+ return nil, fmt.Errorf("ReadHeaderNumber failed: %w", err)
+ }
+ if len(data) == 0 {
+ return nil, nil
+ }
+ if len(data) != 8 {
+ return nil, fmt.Errorf("ReadHeaderNumber got wrong data len: %d", len(data))
+ }
+ number := binary.BigEndian.Uint64(data)
+ return &number, nil
+}
diff --git a/erigon-lib/chain/consensus.go b/erigon-lib/chain/consensus.go
new file mode 100644
index 00000000000..bf7ec9fbacd
--- /dev/null
+++ b/erigon-lib/chain/consensus.go
@@ -0,0 +1,10 @@
+package chain
+
+type ConsensusName string
+
+const (
+ AuRaConsensus ConsensusName = "aura"
+ EtHashConsensus ConsensusName = "ethash"
+ CliqueConsensus ConsensusName = "clique"
+ BorConsensus ConsensusName = "bor"
+)
diff --git a/erigon-lib/chain/networkname/network_name.go b/erigon-lib/chain/networkname/network_name.go
new file mode 100644
index 00000000000..877b3738976
--- /dev/null
+++ b/erigon-lib/chain/networkname/network_name.go
@@ -0,0 +1,29 @@
+package networkname
+
+const (
+ MainnetChainName = "mainnet"
+ HoleskyChainName = "holesky"
+ SepoliaChainName = "sepolia"
+ GoerliChainName = "goerli"
+ DevChainName = "dev"
+ MumbaiChainName = "mumbai"
+ AmoyChainName = "amoy"
+ BorMainnetChainName = "bor-mainnet"
+ BorDevnetChainName = "bor-devnet"
+ GnosisChainName = "gnosis"
+ BorE2ETestChain2ValName = "bor-e2e-test-2Val"
+ ChiadoChainName = "chiado"
+)
+
+var All = []string{
+ MainnetChainName,
+ HoleskyChainName,
+ SepoliaChainName,
+ GoerliChainName,
+ MumbaiChainName,
+ AmoyChainName,
+ BorMainnetChainName,
+ BorDevnetChainName,
+ GnosisChainName,
+ ChiadoChainName,
+}
diff --git a/erigon-lib/chain/snapcfg/util.go b/erigon-lib/chain/snapcfg/util.go
new file mode 100644
index 00000000000..8f5ecf2f3ae
--- /dev/null
+++ b/erigon-lib/chain/snapcfg/util.go
@@ -0,0 +1,141 @@
+package snapcfg
+
+import (
+ _ "embed"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/ledgerwatch/erigon-lib/chain/networkname"
+ snapshothashes "github.com/ledgerwatch/erigon-snapshot"
+ "github.com/ledgerwatch/erigon-snapshot/webseed"
+ "github.com/pelletier/go-toml/v2"
+ "golang.org/x/exp/slices"
+)
+
+var (
+ Mainnet = fromToml(snapshothashes.Mainnet)
+ // Holesky = fromToml(snapshothashes.Holesky)
+ Sepolia = fromToml(snapshothashes.Sepolia)
+ Goerli = fromToml(snapshothashes.Goerli)
+ Mumbai = fromToml(snapshothashes.Mumbai)
+ Amoy = fromToml(snapshothashes.Amoy)
+ BorMainnet = fromToml(snapshothashes.BorMainnet)
+ Gnosis = fromToml(snapshothashes.Gnosis)
+ Chiado = fromToml(snapshothashes.Chiado)
+)
+
+type PreverifiedItem struct {
+ Name string
+ Hash string
+}
+type Preverified []PreverifiedItem
+type preverified map[string]string
+
+func fromToml(in []byte) (out Preverified) {
+ var outMap preverified
+ if err := toml.Unmarshal(in, &outMap); err != nil {
+ panic(err)
+ }
+ return doSort(outMap)
+}
+func doSort(in preverified) Preverified {
+ out := make(Preverified, 0, len(in))
+ for k, v := range in {
+ out = append(out, PreverifiedItem{k, v})
+ }
+ slices.SortFunc(out, func(i, j PreverifiedItem) int { return strings.Compare(i.Name, j.Name) })
+ return out
+}
+
+var (
+ MainnetChainSnapshotCfg = newCfg(Mainnet)
+ // HoleskyChainSnapshotCfg = newCfg(Holesky, HoleskyHistory)
+ SepoliaChainSnapshotCfg = newCfg(Sepolia)
+ GoerliChainSnapshotCfg = newCfg(Goerli)
+ MumbaiChainSnapshotCfg = newCfg(Mumbai)
+ AmoyChainSnapshotCfg = newCfg(Amoy)
+ BorMainnetChainSnapshotCfg = newCfg(BorMainnet)
+ GnosisChainSnapshotCfg = newCfg(Gnosis)
+ ChiadoChainSnapshotCfg = newCfg(Chiado)
+)
+
+func newCfg(preverified Preverified) *Cfg {
+ return &Cfg{ExpectBlocks: maxBlockNum(preverified), Preverified: preverified}
+}
+
+func maxBlockNum(preverified Preverified) uint64 {
+ max := uint64(0)
+ for _, p := range preverified {
+ _, fileName := filepath.Split(p.Name)
+ ext := filepath.Ext(fileName)
+ if ext != ".seg" {
+ continue
+ }
+ onlyName := fileName[:len(fileName)-len(ext)]
+ parts := strings.Split(onlyName, "-")
+ if parts[3] != "headers" {
+ continue
+ }
+ to, err := strconv.ParseUint(parts[2], 10, 64)
+ if err != nil {
+ panic(err)
+ }
+ if max < to {
+ max = to
+ }
+ }
+ if max == 0 { // to prevent underflow
+ return 0
+ }
+ return max*1_000 - 1
+}
+
+type Cfg struct {
+ ExpectBlocks uint64
+ Preverified Preverified
+}
+
+var KnownCfgs = map[string]*Cfg{
+ networkname.MainnetChainName: MainnetChainSnapshotCfg,
+ // networkname.HoleskyChainName: HoleskyChainSnapshotCfg,
+ networkname.SepoliaChainName: SepoliaChainSnapshotCfg,
+ networkname.GoerliChainName: GoerliChainSnapshotCfg,
+ networkname.MumbaiChainName: MumbaiChainSnapshotCfg,
+ networkname.AmoyChainName: AmoyChainSnapshotCfg,
+ networkname.BorMainnetChainName: BorMainnetChainSnapshotCfg,
+ networkname.GnosisChainName: GnosisChainSnapshotCfg,
+ networkname.ChiadoChainName: ChiadoChainSnapshotCfg,
+}
+
+// KnownCfg return list of preverified hashes for given network, but apply whiteList filter if it's not empty
+func KnownCfg(networkName string) *Cfg {
+ c, ok := KnownCfgs[networkName]
+ if !ok {
+ return newCfg(Preverified{})
+ }
+ return newCfg(c.Preverified)
+}
+
+var KnownWebseeds = map[string][]string{
+ networkname.MainnetChainName: webseedsParse(webseed.Mainnet),
+ networkname.SepoliaChainName: webseedsParse(webseed.Sepolia),
+ networkname.GoerliChainName: webseedsParse(webseed.Goerli),
+ networkname.MumbaiChainName: webseedsParse(webseed.Mumbai),
+ networkname.AmoyChainName: webseedsParse(webseed.Amoy),
+ networkname.BorMainnetChainName: webseedsParse(webseed.BorMainnet),
+ networkname.GnosisChainName: webseedsParse(webseed.Gnosis),
+ networkname.ChiadoChainName: webseedsParse(webseed.Chiado),
+}
+
+func webseedsParse(in []byte) (res []string) {
+ a := map[string]string{}
+ if err := toml.Unmarshal(in, &a); err != nil {
+ panic(err)
+ }
+ for _, l := range a {
+ res = append(res, l)
+ }
+ slices.Sort(res)
+ return res
+}
diff --git a/erigon-lib/commitment/bin_patricia_hashed.go b/erigon-lib/commitment/bin_patricia_hashed.go
new file mode 100644
index 00000000000..0c4aebdb637
--- /dev/null
+++ b/erigon-lib/commitment/bin_patricia_hashed.go
@@ -0,0 +1,1818 @@
+/*
+ Copyright 2022 The Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package commitment
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "math/bits"
+
+ "github.com/holiman/uint256"
+ "github.com/ledgerwatch/log/v3"
+ "golang.org/x/crypto/sha3"
+
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/length"
+ "github.com/ledgerwatch/erigon-lib/rlp"
+)
+
+const (
+ maxKeySize = 512
+ halfKeySize = maxKeySize / 2
+ maxChild = 2
+)
+
+type bitstring []uint8
+
+// converts slice of nibbles (lowest 4 bits of each byte) to bitstring
+func hexToBin(hex []byte) bitstring {
+ bin := make([]byte, 4*len(hex))
+ for i := range bin {
+ if hex[i/4]&(1<<(3-i%4)) != 0 {
+ bin[i] = 1
+ }
+ }
+ return bin
+}
+
+// encodes bitstring to its compact representation
+func binToCompact(bin []byte) []byte {
+ compact := make([]byte, 2+common.BitLenToByteLen(len(bin)))
+ binary.BigEndian.PutUint16(compact, uint16(len(bin)))
+ for i := 0; i < len(bin); i++ {
+ if bin[i] != 0 {
+ compact[2+i/8] |= byte(1) << (i % 8)
+ }
+ }
+ return compact
+}
+
+// decodes compact bitstring representation into actual bitstring
+func compactToBin(compact []byte) []byte {
+ bin := make([]byte, binary.BigEndian.Uint16(compact))
+ for i := 0; i < len(bin); i++ {
+ if compact[2+i/8]&(byte(1)<<(i%8)) == 0 {
+ bin[i] = 0
+ } else {
+ bin[i] = 1
+ }
+ }
+ return bin
+}
+
+// BinHashed implements commitment based on patricia merkle tree with radix 16,
+// with keys pre-hashed by keccak256
+type BinPatriciaHashed struct {
+ root BinaryCell // Root cell of the tree
+ // Rows of the grid correspond to the level of depth in the patricia tree
+ // Columns of the grid correspond to pointers to the nodes further from the root
+ grid [maxKeySize][maxChild]BinaryCell // First halfKeySize rows of this grid are for account trie, and next halfKeySize rows are for storage trie
+ // How many rows (starting from row 0) are currently active and have corresponding selected columns
+ // Last active row does not have selected column
+ activeRows int
+ // Length of the key that reflects current positioning of the grid. It maybe larger than number of active rows,
+ // if a account leaf cell represents multiple nibbles in the key
+ currentKeyLen int
+ currentKey [maxKeySize]byte // For each row indicates which column is currently selected
+ depths [maxKeySize]int // For each row, the depth of cells in that row
+ rootChecked bool // Set to false if it is not known whether the root is empty, set to true if it is checked
+ rootTouched bool
+ rootPresent bool
+ branchBefore [maxKeySize]bool // For each row, whether there was a branch node in the database loaded in unfold
+ touchMap [maxKeySize]uint16 // For each row, bitmap of cells that were either present before modification, or modified or deleted
+ afterMap [maxKeySize]uint16 // For each row, bitmap of cells that were present after modification
+ keccak keccakState
+ keccak2 keccakState
+ accountKeyLen int
+ trace bool
+ hashAuxBuffer [maxKeySize]byte // buffer to compute cell hash or write hash-related things
+ auxBuffer *bytes.Buffer // auxiliary buffer used during branch updates encoding
+
+ // Function used to load branch node and fill up the cells
+ // For each cell, it sets the cell type, clears the modified flag, fills the hash,
+ // and for the extension, account, and leaf type, the `l` and `k`
+ branchFn func(prefix []byte) ([]byte, error)
+ // Function used to fetch account with given plain key
+ accountFn func(plainKey []byte, cell *BinaryCell) error
+ // Function used to fetch account with given plain key
+ storageFn func(plainKey []byte, cell *BinaryCell) error
+}
+
+func NewBinPatriciaHashed(accountKeyLen int,
+ branchFn func(prefix []byte) ([]byte, error),
+ accountFn func(plainKey []byte, cell *Cell) error,
+ storageFn func(plainKey []byte, cell *Cell) error,
+) *BinPatriciaHashed {
+ return &BinPatriciaHashed{
+ keccak: sha3.NewLegacyKeccak256().(keccakState),
+ keccak2: sha3.NewLegacyKeccak256().(keccakState),
+ accountKeyLen: accountKeyLen,
+ branchFn: branchFn,
+ accountFn: wrapAccountStorageFn(accountFn),
+ storageFn: wrapAccountStorageFn(storageFn),
+ auxBuffer: bytes.NewBuffer(make([]byte, 8192)),
+ }
+}
+
+type BinaryCell struct {
+ h [length.Hash]byte // cell hash
+ hl int // Length of the hash (or embedded)
+ apk [length.Addr]byte // account plain key
+ apl int // length of account plain key
+ spk [length.Addr + length.Hash]byte // storage plain key
+ spl int // length of the storage plain key
+ downHashedKey [maxKeySize]byte
+ downHashedLen int
+ extension [halfKeySize]byte
+ extLen int
+ Nonce uint64
+ Balance uint256.Int
+ CodeHash [length.Hash]byte // hash of the bytecode
+ Storage [length.Hash]byte
+ StorageLen int
+ Delete bool
+}
+
+func (cell *BinaryCell) unwrapToHexCell() (cl *Cell) {
+ cl = new(Cell)
+ cl.Balance = *cell.Balance.Clone()
+ cl.Nonce = cell.Nonce
+ cl.StorageLen = cell.StorageLen
+ cl.apl = cell.apl
+ cl.spl = cell.spl
+ cl.hl = cell.hl
+
+ copy(cl.apk[:], cell.apk[:])
+ copy(cl.spk[:], cell.spk[:])
+ copy(cl.h[:], cell.h[:])
+
+ if cell.extLen > 0 {
+ compactedExt := binToCompact(cell.extension[:cell.extLen])
+ copy(cl.extension[:], compactedExt)
+ cl.extLen = len(compactedExt)
+ }
+ if cell.downHashedLen > 0 {
+ compactedDHK := binToCompact(cell.downHashedKey[:cell.downHashedLen])
+ copy(cl.downHashedKey[:], compactedDHK)
+ cl.downHashedLen = len(compactedDHK)
+ }
+
+ copy(cl.CodeHash[:], cell.CodeHash[:])
+ copy(cl.Storage[:], cell.Storage[:])
+ cl.Delete = cell.Delete
+ return cl
+}
+
+var ( // TODO REEAVL
+ EmptyBinRootHash, _ = hex.DecodeString("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+ EmptyBinCodeHash, _ = hex.DecodeString("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
+)
+
+func (cell *BinaryCell) fillEmpty() {
+ cell.apl = 0
+ cell.spl = 0
+ cell.downHashedLen = 0
+ cell.extLen = 0
+ cell.hl = 0
+ cell.Nonce = 0
+ cell.Balance.Clear()
+ copy(cell.CodeHash[:], EmptyCodeHash)
+ cell.StorageLen = 0
+ cell.Delete = false
+}
+
+func (cell *BinaryCell) fillFromUpperCell(upBinaryCell *BinaryCell, depth, depthIncrement int) {
+ if upBinaryCell.downHashedLen >= depthIncrement {
+ cell.downHashedLen = upBinaryCell.downHashedLen - depthIncrement
+ } else {
+ cell.downHashedLen = 0
+ }
+ if upBinaryCell.downHashedLen > depthIncrement {
+ copy(cell.downHashedKey[:], upBinaryCell.downHashedKey[depthIncrement:upBinaryCell.downHashedLen])
+ }
+ if upBinaryCell.extLen >= depthIncrement {
+ cell.extLen = upBinaryCell.extLen - depthIncrement
+ } else {
+ cell.extLen = 0
+ }
+ if upBinaryCell.extLen > depthIncrement {
+ copy(cell.extension[:], upBinaryCell.extension[depthIncrement:upBinaryCell.extLen])
+ }
+ if depth <= halfKeySize {
+ cell.apl = upBinaryCell.apl
+ if upBinaryCell.apl > 0 {
+ copy(cell.apk[:], upBinaryCell.apk[:cell.apl])
+ cell.Balance.Set(&upBinaryCell.Balance)
+ cell.Nonce = upBinaryCell.Nonce
+ copy(cell.CodeHash[:], upBinaryCell.CodeHash[:])
+ cell.extLen = upBinaryCell.extLen
+ if upBinaryCell.extLen > 0 {
+ copy(cell.extension[:], upBinaryCell.extension[:upBinaryCell.extLen])
+ }
+ }
+ } else {
+ cell.apl = 0
+ }
+ cell.spl = upBinaryCell.spl
+ if upBinaryCell.spl > 0 {
+ copy(cell.spk[:], upBinaryCell.spk[:upBinaryCell.spl])
+ cell.StorageLen = upBinaryCell.StorageLen
+ if upBinaryCell.StorageLen > 0 {
+ copy(cell.Storage[:], upBinaryCell.Storage[:upBinaryCell.StorageLen])
+ }
+ }
+ cell.hl = upBinaryCell.hl
+ if upBinaryCell.hl > 0 {
+ copy(cell.h[:], upBinaryCell.h[:upBinaryCell.hl])
+ }
+}
+
+func (cell *BinaryCell) fillFromLowerBinaryCell(lowBinaryCell *BinaryCell, lowDepth int, preExtension []byte, nibble int) {
+ if lowBinaryCell.apl > 0 || lowDepth < halfKeySize {
+ cell.apl = lowBinaryCell.apl
+ }
+ if lowBinaryCell.apl > 0 {
+ copy(cell.apk[:], lowBinaryCell.apk[:cell.apl])
+ cell.Balance.Set(&lowBinaryCell.Balance)
+ cell.Nonce = lowBinaryCell.Nonce
+ copy(cell.CodeHash[:], lowBinaryCell.CodeHash[:])
+ }
+ cell.spl = lowBinaryCell.spl
+ if lowBinaryCell.spl > 0 {
+ copy(cell.spk[:], lowBinaryCell.spk[:cell.spl])
+ cell.StorageLen = lowBinaryCell.StorageLen
+ if lowBinaryCell.StorageLen > 0 {
+ copy(cell.Storage[:], lowBinaryCell.Storage[:lowBinaryCell.StorageLen])
+ }
+ }
+ if lowBinaryCell.hl > 0 {
+ if (lowBinaryCell.apl == 0 && lowDepth < halfKeySize) || (lowBinaryCell.spl == 0 && lowDepth > halfKeySize) {
+ // Extension is related to either accounts branch node, or storage branch node, we prepend it by preExtension | nibble
+ if len(preExtension) > 0 {
+ copy(cell.extension[:], preExtension)
+ }
+ cell.extension[len(preExtension)] = byte(nibble)
+ if lowBinaryCell.extLen > 0 {
+ copy(cell.extension[1+len(preExtension):], lowBinaryCell.extension[:lowBinaryCell.extLen])
+ }
+ cell.extLen = lowBinaryCell.extLen + 1 + len(preExtension)
+ } else {
+ // Extension is related to a storage branch node, so we copy it upwards as is
+ cell.extLen = lowBinaryCell.extLen
+ if lowBinaryCell.extLen > 0 {
+ copy(cell.extension[:], lowBinaryCell.extension[:lowBinaryCell.extLen])
+ }
+ }
+ }
+ cell.hl = lowBinaryCell.hl
+ if lowBinaryCell.hl > 0 {
+ copy(cell.h[:], lowBinaryCell.h[:lowBinaryCell.hl])
+ }
+}
+
+func (cell *BinaryCell) deriveHashedKeys(depth int, keccak keccakState, accountKeyLen int) error {
+ extraLen := 0
+ if cell.apl > 0 {
+ if depth > halfKeySize {
+ return fmt.Errorf("deriveHashedKeys accountPlainKey present at depth > halfKeySize")
+ }
+ extraLen = halfKeySize - depth
+ }
+ if cell.spl > 0 {
+ if depth >= halfKeySize {
+ extraLen = maxKeySize - depth
+ } else {
+ extraLen += halfKeySize
+ }
+ }
+ if extraLen > 0 {
+ if cell.downHashedLen > 0 {
+ copy(cell.downHashedKey[extraLen:], cell.downHashedKey[:cell.downHashedLen])
+ }
+ cell.downHashedLen += extraLen
+ var hashedKeyOffset, downOffset int
+ if cell.apl > 0 {
+ if err := binHashKey(keccak, cell.apk[:cell.apl], cell.downHashedKey[:], depth); err != nil {
+ return err
+ }
+ downOffset = halfKeySize - depth
+ }
+ if cell.spl > 0 {
+ if depth >= halfKeySize {
+ hashedKeyOffset = depth - halfKeySize
+ }
+ if err := binHashKey(keccak, cell.spk[accountKeyLen:cell.spl], cell.downHashedKey[downOffset:], hashedKeyOffset); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (cell *BinaryCell) fillFromFields(data []byte, pos int, fieldBits PartFlags) (int, error) {
+ if fieldBits&HashedKeyPart != 0 {
+ l, n := binary.Uvarint(data[pos:])
+ if n == 0 {
+ return 0, fmt.Errorf("fillFromFields buffer too small for hashedKey len")
+ } else if n < 0 {
+ return 0, fmt.Errorf("fillFromFields value overflow for hashedKey len")
+ }
+ pos += n
+ if len(data) < pos+int(l) {
+ return 0, fmt.Errorf("fillFromFields buffer too small for hashedKey exp %d got %d", pos+int(l), len(data))
+ }
+ cell.downHashedLen = int(l)
+ cell.extLen = int(l)
+ if l > 0 {
+ copy(cell.downHashedKey[:], data[pos:pos+int(l)])
+ copy(cell.extension[:], data[pos:pos+int(l)])
+ pos += int(l)
+ }
+ } else {
+ cell.downHashedLen = 0
+ cell.extLen = 0
+ }
+ if fieldBits&AccountPlainPart != 0 {
+ l, n := binary.Uvarint(data[pos:])
+ if n == 0 {
+ return 0, fmt.Errorf("fillFromFields buffer too small for accountPlainKey len")
+ } else if n < 0 {
+ return 0, fmt.Errorf("fillFromFields value overflow for accountPlainKey len")
+ }
+ pos += n
+ if len(data) < pos+int(l) {
+ return 0, fmt.Errorf("fillFromFields buffer too small for accountPlainKey")
+ }
+ cell.apl = int(l)
+ if l > 0 {
+ copy(cell.apk[:], data[pos:pos+int(l)])
+ pos += int(l)
+ }
+ } else {
+ cell.apl = 0
+ }
+ if fieldBits&StoragePlainPart != 0 {
+ l, n := binary.Uvarint(data[pos:])
+ if n == 0 {
+ return 0, fmt.Errorf("fillFromFields buffer too small for storagePlainKey len")
+ } else if n < 0 {
+ return 0, fmt.Errorf("fillFromFields value overflow for storagePlainKey len")
+ }
+ pos += n
+ if len(data) < pos+int(l) {
+ return 0, fmt.Errorf("fillFromFields buffer too small for storagePlainKey")
+ }
+ cell.spl = int(l)
+ if l > 0 {
+ copy(cell.spk[:], data[pos:pos+int(l)])
+ pos += int(l)
+ }
+ } else {
+ cell.spl = 0
+ }
+ if fieldBits&HashPart != 0 {
+ l, n := binary.Uvarint(data[pos:])
+ if n == 0 {
+ return 0, fmt.Errorf("fillFromFields buffer too small for hash len")
+ } else if n < 0 {
+ return 0, fmt.Errorf("fillFromFields value overflow for hash len")
+ }
+ pos += n
+ if len(data) < pos+int(l) {
+ return 0, fmt.Errorf("fillFromFields buffer too small for hash")
+ }
+ cell.hl = int(l)
+ if l > 0 {
+ copy(cell.h[:], data[pos:pos+int(l)])
+ pos += int(l)
+ }
+ } else {
+ cell.hl = 0
+ }
+ return pos, nil
+}
+
+func (cell *BinaryCell) setStorage(value []byte) {
+ cell.StorageLen = len(value)
+ if len(value) > 0 {
+ copy(cell.Storage[:], value)
+ }
+}
+
+func (cell *BinaryCell) setAccountFields(codeHash []byte, balance *uint256.Int, nonce uint64) {
+ copy(cell.CodeHash[:], codeHash)
+
+ cell.Balance.SetBytes(balance.Bytes())
+ cell.Nonce = nonce
+}
+
+func (cell *BinaryCell) accountForHashing(buffer []byte, storageRootHash [length.Hash]byte) int {
+ balanceBytes := 0
+ if !cell.Balance.LtUint64(128) {
+ balanceBytes = cell.Balance.ByteLen()
+ }
+
+ var nonceBytes int
+ if cell.Nonce < 128 && cell.Nonce != 0 {
+ nonceBytes = 0
+ } else {
+ nonceBytes = common.BitLenToByteLen(bits.Len64(cell.Nonce))
+ }
+
+ var structLength = uint(balanceBytes + nonceBytes + 2)
+ structLength += 66 // Two 32-byte arrays + 2 prefixes
+
+ var pos int
+ if structLength < 56 {
+ buffer[0] = byte(192 + structLength)
+ pos = 1
+ } else {
+ lengthBytes := common.BitLenToByteLen(bits.Len(structLength))
+ buffer[0] = byte(247 + lengthBytes)
+
+ for i := lengthBytes; i > 0; i-- {
+ buffer[i] = byte(structLength)
+ structLength >>= 8
+ }
+
+ pos = lengthBytes + 1
+ }
+
+ // Encoding nonce
+ if cell.Nonce < 128 && cell.Nonce != 0 {
+ buffer[pos] = byte(cell.Nonce)
+ } else {
+ buffer[pos] = byte(128 + nonceBytes)
+ var nonce = cell.Nonce
+ for i := nonceBytes; i > 0; i-- {
+ buffer[pos+i] = byte(nonce)
+ nonce >>= 8
+ }
+ }
+ pos += 1 + nonceBytes
+
+ // Encoding balance
+ if cell.Balance.LtUint64(128) && !cell.Balance.IsZero() {
+ buffer[pos] = byte(cell.Balance.Uint64())
+ pos++
+ } else {
+ buffer[pos] = byte(128 + balanceBytes)
+ pos++
+ cell.Balance.WriteToSlice(buffer[pos : pos+balanceBytes])
+ pos += balanceBytes
+ }
+
+ // Encoding Root and CodeHash
+ buffer[pos] = 128 + 32
+ pos++
+ copy(buffer[pos:], storageRootHash[:])
+ pos += 32
+ buffer[pos] = 128 + 32
+ pos++
+ copy(buffer[pos:], cell.CodeHash[:])
+ pos += 32
+ return pos
+}
+
+func (bph *BinPatriciaHashed) completeLeafHash(buf, keyPrefix []byte, kp, kl, compactLen int, key []byte, compact0 byte, ni int, val rlp.RlpSerializable, singleton bool) ([]byte, error) {
+ totalLen := kp + kl + val.DoubleRLPLen()
+ var lenPrefix [4]byte
+ pt := rlp.GenerateStructLen(lenPrefix[:], totalLen)
+ embedded := !singleton && totalLen+pt < length.Hash
+ var writer io.Writer
+ if embedded {
+ //bph.byteArrayWriter.Setup(buf)
+ bph.auxBuffer.Reset()
+ writer = bph.auxBuffer
+ } else {
+ bph.keccak.Reset()
+ writer = bph.keccak
+ }
+ if _, err := writer.Write(lenPrefix[:pt]); err != nil {
+ return nil, err
+ }
+ if _, err := writer.Write(keyPrefix[:kp]); err != nil {
+ return nil, err
+ }
+ var b [1]byte
+ b[0] = compact0
+ if _, err := writer.Write(b[:]); err != nil {
+ return nil, err
+ }
+ for i := 1; i < compactLen; i++ {
+ b[0] = key[ni]*16 + key[ni+1]
+ if _, err := writer.Write(b[:]); err != nil {
+ return nil, err
+ }
+ ni += 2
+ }
+ var prefixBuf [8]byte
+ if err := val.ToDoubleRLP(writer, prefixBuf[:]); err != nil {
+ return nil, err
+ }
+ if embedded {
+ buf = bph.auxBuffer.Bytes()
+ } else {
+ var hashBuf [33]byte
+ hashBuf[0] = 0x80 + length.Hash
+ if _, err := bph.keccak.Read(hashBuf[1:]); err != nil {
+ return nil, err
+ }
+ buf = append(buf, hashBuf[:]...)
+ }
+ return buf, nil
+}
+
+func (bph *BinPatriciaHashed) leafHashWithKeyVal(buf, key []byte, val rlp.RlpSerializableBytes, singleton bool) ([]byte, error) {
+ // Compute the total length of binary representation
+ var kp, kl int
+ // Write key
+ var compactLen int
+ var ni int
+ var compact0 byte
+ compactLen = (len(key)-1)/2 + 1
+ if len(key)&1 == 0 {
+ compact0 = 0x30 + key[0] // Odd: (3<<4) + first nibble
+ ni = 1
+ } else {
+ compact0 = 0x20
+ }
+ var keyPrefix [1]byte
+ if compactLen > 1 {
+ keyPrefix[0] = 0x80 + byte(compactLen)
+ kp = 1
+ kl = compactLen
+ } else {
+ kl = 1
+ }
+ return bph.completeLeafHash(buf, keyPrefix[:], kp, kl, compactLen, key, compact0, ni, val, singleton)
+}
+
+func (bph *BinPatriciaHashed) accountLeafHashWithKey(buf, key []byte, val rlp.RlpSerializable) ([]byte, error) {
+ // Compute the total length of binary representation
+ var kp, kl int
+ // Write key
+ var compactLen int
+ var ni int
+ var compact0 byte
+ if hasTerm(key) {
+ compactLen = (len(key)-1)/2 + 1
+ if len(key)&1 == 0 {
+ compact0 = 48 + key[0] // Odd (1<<4) + first nibble
+ ni = 1
+ } else {
+ compact0 = 32
+ }
+ } else {
+ compactLen = len(key)/2 + 1
+ if len(key)&1 == 1 {
+ compact0 = 16 + key[0] // Odd (1<<4) + first nibble
+ ni = 1
+ }
+ }
+ var keyPrefix [1]byte
+ if compactLen > 1 {
+ keyPrefix[0] = byte(128 + compactLen)
+ kp = 1
+ kl = compactLen
+ } else {
+ kl = 1
+ }
+ return bph.completeLeafHash(buf, keyPrefix[:], kp, kl, compactLen, key, compact0, ni, val, true)
+}
+
+func (bph *BinPatriciaHashed) extensionHash(key []byte, hash []byte) ([length.Hash]byte, error) {
+ var hashBuf [length.Hash]byte
+
+ // Compute the total length of binary representation
+ var kp, kl int
+ // Write key
+ var compactLen int
+ var ni int
+ var compact0 byte
+ if hasTerm(key) {
+ compactLen = (len(key)-1)/2 + 1
+ if len(key)&1 == 0 {
+ compact0 = 0x30 + key[0] // Odd: (3<<4) + first nibble
+ ni = 1
+ } else {
+ compact0 = 0x20
+ }
+ } else {
+ compactLen = len(key)/2 + 1
+ if len(key)&1 == 1 {
+ compact0 = 0x10 + key[0] // Odd: (1<<4) + first nibble
+ ni = 1
+ }
+ }
+ var keyPrefix [1]byte
+ if compactLen > 1 {
+ keyPrefix[0] = 0x80 + byte(compactLen)
+ kp = 1
+ kl = compactLen
+ } else {
+ kl = 1
+ }
+ totalLen := kp + kl + 33
+ var lenPrefix [4]byte
+ pt := rlp.GenerateStructLen(lenPrefix[:], totalLen)
+ bph.keccak.Reset()
+ if _, err := bph.keccak.Write(lenPrefix[:pt]); err != nil {
+ return hashBuf, err
+ }
+ if _, err := bph.keccak.Write(keyPrefix[:kp]); err != nil {
+ return hashBuf, err
+ }
+ var b [1]byte
+ b[0] = compact0
+ if _, err := bph.keccak.Write(b[:]); err != nil {
+ return hashBuf, err
+ }
+ for i := 1; i < compactLen; i++ {
+ b[0] = key[ni]*16 + key[ni+1]
+ if _, err := bph.keccak.Write(b[:]); err != nil {
+ return hashBuf, err
+ }
+ ni += 2
+ }
+ b[0] = 0x80 + length.Hash
+ if _, err := bph.keccak.Write(b[:]); err != nil {
+ return hashBuf, err
+ }
+ if _, err := bph.keccak.Write(hash); err != nil {
+ return hashBuf, err
+ }
+ // Replace previous hash with the new one
+ if _, err := bph.keccak.Read(hashBuf[:]); err != nil {
+ return hashBuf, err
+ }
+ return hashBuf, nil
+}
+
+func (bph *BinPatriciaHashed) computeBinaryCellHashLen(cell *BinaryCell, depth int) int {
+ if cell.spl > 0 && depth >= halfKeySize {
+ keyLen := 128 - depth + 1 // Length of hex key with terminator character
+ var kp, kl int
+ compactLen := (keyLen-1)/2 + 1
+ if compactLen > 1 {
+ kp = 1
+ kl = compactLen
+ } else {
+ kl = 1
+ }
+ val := rlp.RlpSerializableBytes(cell.Storage[:cell.StorageLen])
+ totalLen := kp + kl + val.DoubleRLPLen()
+ var lenPrefix [4]byte
+ pt := rlp.GenerateStructLen(lenPrefix[:], totalLen)
+ if totalLen+pt < length.Hash {
+ return totalLen + pt
+ }
+ }
+ return length.Hash + 1
+}
+
+func (bph *BinPatriciaHashed) computeBinaryCellHash(cell *BinaryCell, depth int, buf []byte) ([]byte, error) {
+ var err error
+ var storageRootHash [length.Hash]byte
+ storageRootHashIsSet := false
+ if cell.spl > 0 {
+ var hashedKeyOffset int
+ if depth >= halfKeySize {
+ hashedKeyOffset = depth - halfKeySize
+ }
+ singleton := depth <= halfKeySize
+ if err := binHashKey(bph.keccak, cell.spk[bph.accountKeyLen:cell.spl], cell.downHashedKey[:], hashedKeyOffset); err != nil {
+ return nil, err
+ }
+ cell.downHashedKey[halfKeySize-hashedKeyOffset] = 16 // Add terminator
+ if singleton {
+ if bph.trace {
+ fmt.Printf("leafHashWithKeyVal(singleton) for [%x]=>[%x]\n", cell.downHashedKey[:halfKeySize-hashedKeyOffset+1], cell.Storage[:cell.StorageLen])
+ }
+ aux := make([]byte, 0, 33)
+ if aux, err = bph.leafHashWithKeyVal(aux, cell.downHashedKey[:halfKeySize-hashedKeyOffset+1], cell.Storage[:cell.StorageLen], true); err != nil {
+ return nil, err
+ }
+ storageRootHash = *(*[length.Hash]byte)(aux[1:])
+ storageRootHashIsSet = true
+ } else {
+ if bph.trace {
+ fmt.Printf("leafHashWithKeyVal for [%x]=>[%x]\n", cell.downHashedKey[:halfKeySize-hashedKeyOffset+1], cell.Storage[:cell.StorageLen])
+ }
+ return bph.leafHashWithKeyVal(buf, cell.downHashedKey[:halfKeySize-hashedKeyOffset+1], cell.Storage[:cell.StorageLen], false)
+ }
+ }
+ if cell.apl > 0 {
+ if err := binHashKey(bph.keccak, cell.apk[:cell.apl], cell.downHashedKey[:], depth); err != nil {
+ return nil, err
+ }
+ cell.downHashedKey[halfKeySize-depth] = 16 // Add terminator
+ if !storageRootHashIsSet {
+ if cell.extLen > 0 {
+ // Extension
+ if cell.hl > 0 {
+ if bph.trace {
+ fmt.Printf("extensionHash for [%x]=>[%x]\n", cell.extension[:cell.extLen], cell.h[:cell.hl])
+ }
+ if storageRootHash, err = bph.extensionHash(cell.extension[:cell.extLen], cell.h[:cell.hl]); err != nil {
+ return nil, err
+ }
+ } else {
+ return nil, fmt.Errorf("computeBinaryCellHash extension without hash")
+ }
+ } else if cell.hl > 0 {
+ storageRootHash = cell.h
+ } else {
+ storageRootHash = *(*[length.Hash]byte)(EmptyRootHash)
+ }
+ }
+ var valBuf [128]byte
+ valLen := cell.accountForHashing(valBuf[:], storageRootHash)
+ if bph.trace {
+ fmt.Printf("accountLeafHashWithKey for [%x]=>[%x]\n", bph.hashAuxBuffer[:halfKeySize+1-depth], valBuf[:valLen])
+ }
+ return bph.accountLeafHashWithKey(buf, cell.downHashedKey[:halfKeySize+1-depth], rlp.RlpEncodedBytes(valBuf[:valLen]))
+ }
+ buf = append(buf, 0x80+32)
+ if cell.extLen > 0 {
+ // Extension
+ if cell.hl > 0 {
+ if bph.trace {
+ fmt.Printf("extensionHash for [%x]=>[%x]\n", cell.extension[:cell.extLen], cell.h[:cell.hl])
+ }
+ var hash [length.Hash]byte
+ if hash, err = bph.extensionHash(cell.extension[:cell.extLen], cell.h[:cell.hl]); err != nil {
+ return nil, err
+ }
+ buf = append(buf, hash[:]...)
+ } else {
+ return nil, fmt.Errorf("computeBinaryCellHash extension without hash")
+ }
+ } else if cell.hl > 0 {
+ buf = append(buf, cell.h[:cell.hl]...)
+ } else {
+ buf = append(buf, EmptyRootHash...)
+ }
+ return buf, nil
+}
+
+func (bph *BinPatriciaHashed) needUnfolding(hashedKey []byte) int {
+ var cell *BinaryCell
+ var depth int
+ if bph.activeRows == 0 {
+ if bph.trace {
+ fmt.Printf("needUnfolding root, rootChecked = %t\n", bph.rootChecked)
+ }
+ if bph.rootChecked && bph.root.downHashedLen == 0 && bph.root.hl == 0 {
+ // Previously checked, empty root, no unfolding needed
+ return 0
+ }
+ cell = &bph.root
+ if cell.downHashedLen == 0 && cell.hl == 0 && !bph.rootChecked {
+ // Need to attempt to unfold the root
+ return 1
+ }
+ } else {
+ col := int(hashedKey[bph.currentKeyLen])
+ cell = &bph.grid[bph.activeRows-1][col]
+ depth = bph.depths[bph.activeRows-1]
+ if bph.trace {
+ fmt.Printf("needUnfolding cell (%d, %x), currentKey=[%x], depth=%d, cell.h=[%x]\n", bph.activeRows-1, col, bph.currentKey[:bph.currentKeyLen], depth, cell.h[:cell.hl])
+ }
+ }
+ if len(hashedKey) <= depth {
+ return 0
+ }
+ if cell.downHashedLen == 0 {
+ if cell.hl == 0 {
+ // cell is empty, no need to unfold further
+ return 0
+ }
+ // unfold branch node
+ return 1
+ }
+ cpl := commonPrefixLen(hashedKey[depth:], cell.downHashedKey[:cell.downHashedLen-1])
+ if bph.trace {
+ fmt.Printf("cpl=%d, cell.downHashedKey=[%x], depth=%d, hashedKey[depth:]=[%x]\n", cpl, cell.downHashedKey[:cell.downHashedLen], depth, hashedKey[depth:])
+ }
+ unfolding := cpl + 1
+ if depth < halfKeySize && depth+unfolding > halfKeySize {
+ // This is to make sure that unfolding always breaks at the level where storage subtrees start
+ unfolding = halfKeySize - depth
+ if bph.trace {
+ fmt.Printf("adjusted unfolding=%d\n", unfolding)
+ }
+ }
+ return unfolding
+}
+
+// unfoldBranchNode returns true if unfolding has been done
+func (bph *BinPatriciaHashed) unfoldBranchNode(row int, deleted bool, depth int) (bool, error) {
+ branchData, err := bph.branchFn(binToCompact(bph.currentKey[:bph.currentKeyLen]))
+ if err != nil {
+ return false, err
+ }
+ if !bph.rootChecked && bph.currentKeyLen == 0 && len(branchData) == 0 {
+ // Special case - empty or deleted root
+ bph.rootChecked = true
+ return false, nil
+ }
+ if len(branchData) == 0 {
+ log.Warn("got empty branch data during unfold", "row", row, "depth", depth, "deleted", deleted)
+ }
+ bph.branchBefore[row] = true
+ bitmap := binary.BigEndian.Uint16(branchData[0:])
+ pos := 2
+ if deleted {
+ // All cells come as deleted (touched but not present after)
+ bph.afterMap[row] = 0
+ bph.touchMap[row] = bitmap
+ } else {
+ bph.afterMap[row] = bitmap
+ bph.touchMap[row] = 0
+ }
+ //fmt.Printf("unfoldBranchNode [%x], afterMap = [%016b], touchMap = [%016b]\n", branchData, bph.afterMap[row], bph.touchMap[row])
+ // Loop iterating over the set bits of modMask
+ for bitset, j := bitmap, 0; bitset != 0; j++ {
+ bit := bitset & -bitset
+ nibble := bits.TrailingZeros16(bit)
+ cell := &bph.grid[row][nibble]
+ fieldBits := branchData[pos]
+ pos++
+ var err error
+ if pos, err = cell.fillFromFields(branchData, pos, PartFlags(fieldBits)); err != nil {
+ return false, fmt.Errorf("prefix [%x], branchData[%x]: %w", bph.currentKey[:bph.currentKeyLen], branchData, err)
+ }
+ if bph.trace {
+ fmt.Printf("cell (%d, %x) depth=%d, hash=[%x], a=[%x], s=[%x], ex=[%x]\n", row, nibble, depth, cell.h[:cell.hl], cell.apk[:cell.apl], cell.spk[:cell.spl], cell.extension[:cell.extLen])
+ }
+ if cell.apl > 0 {
+ bph.accountFn(cell.apk[:cell.apl], cell)
+ if bph.trace {
+ fmt.Printf("accountFn[%x] return balance=%d, nonce=%d code=%x\n", cell.apk[:cell.apl], &cell.Balance, cell.Nonce, cell.CodeHash[:])
+ }
+ }
+ if cell.spl > 0 {
+ bph.storageFn(cell.spk[:cell.spl], cell)
+ }
+ if err = cell.deriveHashedKeys(depth, bph.keccak, bph.accountKeyLen); err != nil {
+ return false, err
+ }
+ bitset ^= bit
+ }
+ return true, nil
+}
+
+func (bph *BinPatriciaHashed) unfold(hashedKey []byte, unfolding int) error {
+ if bph.trace {
+ fmt.Printf("unfold %d: activeRows: %d\n", unfolding, bph.activeRows)
+ }
+ var upCell *BinaryCell
+ var touched, present bool
+ var col byte
+ var upDepth, depth int
+ if bph.activeRows == 0 {
+ if bph.rootChecked && bph.root.hl == 0 && bph.root.downHashedLen == 0 {
+ // No unfolding for empty root
+ return nil
+ }
+ upCell = &bph.root
+ touched = bph.rootTouched
+ present = bph.rootPresent
+ if bph.trace {
+ fmt.Printf("unfold root, touched %t, present %t, column %d\n", touched, present, col)
+ }
+ } else {
+ upDepth = bph.depths[bph.activeRows-1]
+ col = hashedKey[upDepth-1]
+ upCell = &bph.grid[bph.activeRows-1][col]
+ touched = bph.touchMap[bph.activeRows-1]&(uint16(1)<= unfolding {
+ depth = upDepth + unfolding
+ nibble := upCell.downHashedKey[unfolding-1]
+ if touched {
+ bph.touchMap[row] = uint16(1) << nibble
+ }
+ if present {
+ bph.afterMap[row] = uint16(1) << nibble
+ }
+ cell := &bph.grid[row][nibble]
+ cell.fillFromUpperCell(upCell, depth, unfolding)
+ if bph.trace {
+ fmt.Printf("cell (%d, %x) depth=%d\n", row, nibble, depth)
+ }
+ if row >= halfKeySize {
+ cell.apl = 0
+ }
+ if unfolding > 1 {
+ copy(bph.currentKey[bph.currentKeyLen:], upCell.downHashedKey[:unfolding-1])
+ }
+ bph.currentKeyLen += unfolding - 1
+ } else {
+ // upCell.downHashedLen < unfolding
+ depth = upDepth + upCell.downHashedLen
+ nibble := upCell.downHashedKey[upCell.downHashedLen-1]
+ if touched {
+ bph.touchMap[row] = uint16(1) << nibble
+ }
+ if present {
+ bph.afterMap[row] = uint16(1) << nibble
+ }
+ cell := &bph.grid[row][nibble]
+ cell.fillFromUpperCell(upCell, depth, upCell.downHashedLen)
+ if bph.trace {
+ fmt.Printf("cell (%d, %x) depth=%d\n", row, nibble, depth)
+ }
+ if row >= halfKeySize {
+ cell.apl = 0
+ }
+ if upCell.downHashedLen > 1 {
+ copy(bph.currentKey[bph.currentKeyLen:], upCell.downHashedKey[:upCell.downHashedLen-1])
+ }
+ bph.currentKeyLen += upCell.downHashedLen - 1
+ }
+ bph.depths[bph.activeRows] = depth
+ bph.activeRows++
+ return nil
+}
+
+func (bph *BinPatriciaHashed) needFolding(hashedKey []byte) bool {
+ return !bytes.HasPrefix(hashedKey, bph.currentKey[:bph.currentKeyLen])
+}
+
+// The purpose of fold is to reduce hph.currentKey[:hph.currentKeyLen]. It should be invoked
+// until that current key becomes a prefix of hashedKey that we will proccess next
+// (in other words until the needFolding function returns 0)
+func (bph *BinPatriciaHashed) fold() (branchData BranchData, updateKey []byte, err error) {
+ updateKeyLen := bph.currentKeyLen
+ if bph.activeRows == 0 {
+ return nil, nil, fmt.Errorf("cannot fold - no active rows")
+ }
+ if bph.trace {
+ fmt.Printf("fold: activeRows: %d, currentKey: [%x], touchMap: %016b, afterMap: %016b\n", bph.activeRows, bph.currentKey[:bph.currentKeyLen], bph.touchMap[bph.activeRows-1], bph.afterMap[bph.activeRows-1])
+ }
+ // Move information to the row above
+ row := bph.activeRows - 1
+ var upBinaryCell *BinaryCell
+ var col int
+ var upDepth int
+ if bph.activeRows == 1 {
+ if bph.trace {
+ fmt.Printf("upcell is root\n")
+ }
+ upBinaryCell = &bph.root
+ } else {
+ upDepth = bph.depths[bph.activeRows-2]
+ col = int(bph.currentKey[upDepth-1])
+ if bph.trace {
+ fmt.Printf("upcell is (%d x %x), upDepth=%d\n", row-1, col, upDepth)
+ }
+ upBinaryCell = &bph.grid[row-1][col]
+ }
+
+ depth := bph.depths[bph.activeRows-1]
+ updateKey = binToCompact(bph.currentKey[:updateKeyLen])
+ partsCount := bits.OnesCount16(bph.afterMap[row])
+
+ if bph.trace {
+ fmt.Printf("touchMap[%d]=%016b, afterMap[%d]=%016b\n", row, bph.touchMap[row], row, bph.afterMap[row])
+ }
+ switch partsCount {
+ case 0:
+ // Everything deleted
+ if bph.touchMap[row] != 0 {
+ if row == 0 {
+ // Root is deleted because the tree is empty
+ bph.rootTouched = true
+ bph.rootPresent = false
+ } else if upDepth == halfKeySize {
+ // Special case - all storage items of an account have been deleted, but it does not automatically delete the account, just makes it empty storage
+ // Therefore we are not propagating deletion upwards, but turn it into a modification
+ bph.touchMap[row-1] |= uint16(1) << col
+ } else {
+ // Deletion is propagated upwards
+ bph.touchMap[row-1] |= uint16(1) << col
+ bph.afterMap[row-1] &^= uint16(1) << col
+ }
+ }
+ upBinaryCell.hl = 0
+ upBinaryCell.apl = 0
+ upBinaryCell.spl = 0
+ upBinaryCell.extLen = 0
+ upBinaryCell.downHashedLen = 0
+ if bph.branchBefore[row] {
+ branchData, _, err = EncodeBranch(0, bph.touchMap[row], 0, func(nibble int, skip bool) (*Cell, error) { return nil, nil })
+ if err != nil {
+ return nil, updateKey, fmt.Errorf("failed to encode leaf node update: %w", err)
+ }
+ }
+ bph.activeRows--
+ if upDepth > 0 {
+ bph.currentKeyLen = upDepth - 1
+ } else {
+ bph.currentKeyLen = 0
+ }
+ case 1:
+ // Leaf or extension node
+ if bph.touchMap[row] != 0 {
+ // any modifications
+ if row == 0 {
+ bph.rootTouched = true
+ } else {
+ // Modifiction is propagated upwards
+ bph.touchMap[row-1] |= uint16(1) << col
+ }
+ }
+ nibble := bits.TrailingZeros16(bph.afterMap[row])
+ cell := &bph.grid[row][nibble]
+ upBinaryCell.extLen = 0
+ upBinaryCell.fillFromLowerBinaryCell(cell, depth, bph.currentKey[upDepth:bph.currentKeyLen], nibble)
+ // Delete if it existed
+ if bph.branchBefore[row] {
+ //branchData, _, err = bph.EncodeBranchDirectAccess(0, row, depth)
+ branchData, _, err = EncodeBranch(0, bph.touchMap[row], 0, func(nibble int, skip bool) (*Cell, error) { return nil, nil })
+ if err != nil {
+ return nil, updateKey, fmt.Errorf("failed to encode leaf node update: %w", err)
+ }
+ }
+ bph.activeRows--
+ if upDepth > 0 {
+ bph.currentKeyLen = upDepth - 1
+ } else {
+ bph.currentKeyLen = 0
+ }
+ default:
+ // Branch node
+ if bph.touchMap[row] != 0 {
+ // any modifications
+ if row == 0 {
+ bph.rootTouched = true
+ } else {
+ // Modifiction is propagated upwards
+ bph.touchMap[row-1] |= uint16(1) << col
+ }
+ }
+ bitmap := bph.touchMap[row] & bph.afterMap[row]
+ if !bph.branchBefore[row] {
+ // There was no branch node before, so we need to touch even the singular child that existed
+ bph.touchMap[row] |= bph.afterMap[row]
+ bitmap |= bph.afterMap[row]
+ }
+ // Calculate total length of all hashes
+ totalBranchLen := 17 - partsCount // For every empty cell, one byte
+ for bitset, j := bph.afterMap[row], 0; bitset != 0; j++ {
+ bit := bitset & -bitset
+ nibble := bits.TrailingZeros16(bit)
+ cell := &bph.grid[row][nibble]
+ totalBranchLen += bph.computeBinaryCellHashLen(cell, depth)
+ bitset ^= bit
+ }
+
+ bph.keccak2.Reset()
+ pt := rlp.GenerateStructLen(bph.hashAuxBuffer[:], totalBranchLen)
+ if _, err := bph.keccak2.Write(bph.hashAuxBuffer[:pt]); err != nil {
+ return nil, nil, err
+ }
+
+ b := [...]byte{0x80}
+ cellGetter := func(nibble int, skip bool) (*Cell, error) {
+ if skip {
+ if _, err := bph.keccak2.Write(b[:]); err != nil {
+ return nil, fmt.Errorf("failed to write empty nibble to hash: %w", err)
+ }
+ if bph.trace {
+ fmt.Printf("%x: empty(%d,%x)\n", nibble, row, nibble)
+ }
+ return nil, nil
+ }
+ cell := &bph.grid[row][nibble]
+ cellHash, err := bph.computeBinaryCellHash(cell, depth, bph.hashAuxBuffer[:0])
+ if err != nil {
+ return nil, err
+ }
+ if bph.trace {
+ fmt.Printf("%x: computeBinaryCellHash(%d,%x,depth=%d)=[%x]\n", nibble, row, nibble, depth, cellHash)
+ }
+ if _, err := bph.keccak2.Write(cellHash); err != nil {
+ return nil, err
+ }
+
+ // TODO extension and downHashedKey should be encoded to hex format and vice versa, data loss due to array sizes
+ return cell.unwrapToHexCell(), nil
+ }
+
+ var lastNibble int
+ var err error
+ _ = cellGetter
+
+ //branchData, lastNibble, err = bph.EncodeBranchDirectAccess(bitmap, row, depth, branchData)
+ branchData, lastNibble, err = EncodeBranch(bitmap, bph.touchMap[row], bph.afterMap[row], cellGetter)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to encode branch update: %w", err)
+ }
+ for i := lastNibble; i <= maxChild; i++ {
+ if _, err := bph.keccak2.Write(b[:]); err != nil {
+ return nil, nil, err
+ }
+ if bph.trace {
+ fmt.Printf("%x: empty(%d,%x)\n", i, row, i)
+ }
+ }
+ upBinaryCell.extLen = depth - upDepth - 1
+ upBinaryCell.downHashedLen = upBinaryCell.extLen
+ if upBinaryCell.extLen > 0 {
+ copy(upBinaryCell.extension[:], bph.currentKey[upDepth:bph.currentKeyLen])
+ copy(upBinaryCell.downHashedKey[:], bph.currentKey[upDepth:bph.currentKeyLen])
+ }
+ if depth < halfKeySize {
+ upBinaryCell.apl = 0
+ }
+ upBinaryCell.spl = 0
+ upBinaryCell.hl = 32
+ if _, err := bph.keccak2.Read(upBinaryCell.h[:]); err != nil {
+ return nil, nil, err
+ }
+ if bph.trace {
+ fmt.Printf("} [%x]\n", upBinaryCell.h[:])
+ }
+ bph.activeRows--
+ if upDepth > 0 {
+ bph.currentKeyLen = upDepth - 1
+ } else {
+ bph.currentKeyLen = 0
+ }
+ }
+ if branchData != nil {
+ if bph.trace {
+ fmt.Printf("fold: update key: %x, branchData: [%x]\n", CompactedKeyToHex(updateKey), branchData)
+ }
+ }
+ return branchData, updateKey, nil
+}
+
+func (bph *BinPatriciaHashed) deleteBinaryCell(hashedKey []byte) {
+ if bph.trace {
+ fmt.Printf("deleteBinaryCell, activeRows = %d\n", bph.activeRows)
+ }
+ var cell *BinaryCell
+ if bph.activeRows == 0 {
+ // Remove the root
+ cell = &bph.root
+ bph.rootTouched = true
+ bph.rootPresent = false
+ } else {
+ row := bph.activeRows - 1
+ if bph.depths[row] < len(hashedKey) {
+ if bph.trace {
+ fmt.Printf("deleteBinaryCell skipping spurious delete depth=%d, len(hashedKey)=%d\n", bph.depths[row], len(hashedKey))
+ }
+ return
+ }
+ col := int(hashedKey[bph.currentKeyLen])
+ cell = &bph.grid[row][col]
+ if bph.afterMap[row]&(uint16(1)< 0; unfolding = bph.needUnfolding(hashedKey) {
+ if err := bph.unfold(hashedKey, unfolding); err != nil {
+ return nil, nil, fmt.Errorf("unfold: %w", err)
+ }
+ }
+
+ // Update the cell
+ stagedBinaryCell.fillEmpty()
+ if len(plainKey) == bph.accountKeyLen {
+ if err := bph.accountFn(plainKey, stagedBinaryCell); err != nil {
+ return nil, nil, fmt.Errorf("accountFn for key %x failed: %w", plainKey, err)
+ }
+ if !stagedBinaryCell.Delete {
+ cell := bph.updateBinaryCell(plainKey, hashedKey)
+ cell.setAccountFields(stagedBinaryCell.CodeHash[:], &stagedBinaryCell.Balance, stagedBinaryCell.Nonce)
+
+ if bph.trace {
+ fmt.Printf("accountFn reading key %x => balance=%v nonce=%v codeHash=%x\n", cell.apk, cell.Balance.Uint64(), cell.Nonce, cell.CodeHash)
+ }
+ }
+ } else {
+ if err = bph.storageFn(plainKey, stagedBinaryCell); err != nil {
+ return nil, nil, fmt.Errorf("storageFn for key %x failed: %w", plainKey, err)
+ }
+ if !stagedBinaryCell.Delete {
+ bph.updateBinaryCell(plainKey, hashedKey).setStorage(stagedBinaryCell.Storage[:stagedBinaryCell.StorageLen])
+ if bph.trace {
+ fmt.Printf("storageFn reading key %x => %x\n", plainKey, stagedBinaryCell.Storage[:stagedBinaryCell.StorageLen])
+ }
+ }
+ }
+
+ if stagedBinaryCell.Delete {
+ if bph.trace {
+ fmt.Printf("delete cell %x hash %x\n", plainKey, hashedKey)
+ }
+ bph.deleteBinaryCell(hashedKey)
+ }
+ }
+ // Folding everything up to the root
+ for bph.activeRows > 0 {
+ if branchData, updateKey, err := bph.fold(); err != nil {
+ return nil, nil, fmt.Errorf("final fold: %w", err)
+ } else if branchData != nil {
+ branchNodeUpdates[string(updateKey)] = branchData
+ }
+ }
+
+ rootHash, err = bph.RootHash()
+ if err != nil {
+ return nil, branchNodeUpdates, fmt.Errorf("root hash evaluation failed: %w", err)
+ }
+ return rootHash, branchNodeUpdates, nil
+}
+
+func (bph *BinPatriciaHashed) SetTrace(trace bool) { bph.trace = trace }
+
+func (bph *BinPatriciaHashed) Variant() TrieVariant { return VariantBinPatriciaTrie }
+
+// Reset allows BinPatriciaHashed instance to be reused for the new commitment calculation
+func (bph *BinPatriciaHashed) Reset() {
+ bph.rootChecked = false
+ bph.root.hl = 0
+ bph.root.downHashedLen = 0
+ bph.root.apl = 0
+ bph.root.spl = 0
+ bph.root.extLen = 0
+ copy(bph.root.CodeHash[:], EmptyCodeHash)
+ bph.root.StorageLen = 0
+ bph.root.Balance.Clear()
+ bph.root.Nonce = 0
+ bph.rootTouched = false
+ bph.rootPresent = true
+}
+
+func (bph *BinPatriciaHashed) ResetFns(
+ branchFn func(prefix []byte) ([]byte, error),
+ accountFn func(plainKey []byte, cell *Cell) error,
+ storageFn func(plainKey []byte, cell *Cell) error,
+) {
+ bph.branchFn = branchFn
+ bph.accountFn = wrapAccountStorageFn(accountFn)
+ bph.storageFn = wrapAccountStorageFn(storageFn)
+}
+
+func (c *BinaryCell) bytes() []byte {
+ var pos = 1
+ size := 1 + c.hl + 1 + c.apl + c.spl + 1 + c.downHashedLen + 1 + c.extLen + 1 // max size
+ buf := make([]byte, size)
+
+ var flags uint8
+ if c.hl != 0 {
+ flags |= 1
+ buf[pos] = byte(c.hl)
+ pos++
+ copy(buf[pos:pos+c.hl], c.h[:])
+ pos += c.hl
+ }
+ if c.apl != 0 {
+ flags |= 2
+ buf[pos] = byte(c.hl)
+ pos++
+ copy(buf[pos:pos+c.apl], c.apk[:])
+ pos += c.apl
+ }
+ if c.spl != 0 {
+ flags |= 4
+ buf[pos] = byte(c.spl)
+ pos++
+ copy(buf[pos:pos+c.spl], c.spk[:])
+ pos += c.spl
+ }
+ if c.downHashedLen != 0 {
+ flags |= 8
+ buf[pos] = byte(c.downHashedLen)
+ pos++
+ copy(buf[pos:pos+c.downHashedLen], c.downHashedKey[:])
+ pos += c.downHashedLen
+ }
+ if c.extLen != 0 {
+ flags |= 16
+ buf[pos] = byte(c.extLen)
+ pos++
+ copy(buf[pos:pos+c.downHashedLen], c.downHashedKey[:])
+ //pos += c.downHashedLen
+ }
+ buf[0] = flags
+ return buf
+}
+
+func (c *BinaryCell) decodeBytes(buf []byte) error {
+ if len(buf) < 1 {
+ return fmt.Errorf("invalid buffer size to contain BinaryCell (at least 1 byte expected)")
+ }
+ c.fillEmpty()
+
+ var pos int
+ flags := buf[pos]
+ pos++
+
+ if flags&1 != 0 {
+ c.hl = int(buf[pos])
+ pos++
+ copy(c.h[:], buf[pos:pos+c.hl])
+ pos += c.hl
+ }
+ if flags&2 != 0 {
+ c.apl = int(buf[pos])
+ pos++
+ copy(c.apk[:], buf[pos:pos+c.apl])
+ pos += c.apl
+ }
+ if flags&4 != 0 {
+ c.spl = int(buf[pos])
+ pos++
+ copy(c.spk[:], buf[pos:pos+c.spl])
+ pos += c.spl
+ }
+ if flags&8 != 0 {
+ c.downHashedLen = int(buf[pos])
+ pos++
+ copy(c.downHashedKey[:], buf[pos:pos+c.downHashedLen])
+ pos += c.downHashedLen
+ }
+ if flags&16 != 0 {
+ c.extLen = int(buf[pos])
+ pos++
+ copy(c.extension[:], buf[pos:pos+c.extLen])
+ //pos += c.extLen
+ }
+ return nil
+}
+
+// Encode current state of hph into bytes
+func (bph *BinPatriciaHashed) EncodeCurrentState(buf []byte) ([]byte, error) {
+ s := binState{
+ CurrentKeyLen: int16(bph.currentKeyLen),
+ RootChecked: bph.rootChecked,
+ RootTouched: bph.rootTouched,
+ RootPresent: bph.rootPresent,
+ Root: make([]byte, 0),
+ }
+
+ s.Root = bph.root.bytes()
+ copy(s.CurrentKey[:], bph.currentKey[:])
+ copy(s.Depths[:], bph.depths[:])
+ copy(s.BranchBefore[:], bph.branchBefore[:])
+ copy(s.TouchMap[:], bph.touchMap[:])
+ copy(s.AfterMap[:], bph.afterMap[:])
+
+ return s.Encode(buf)
+}
+
+// buf expected to be encoded hph state. Decode state and set up hph to that state.
+func (bph *BinPatriciaHashed) SetState(buf []byte) error {
+ if bph.activeRows != 0 {
+ return fmt.Errorf("has active rows, could not reset state")
+ }
+
+ var s state
+ if err := s.Decode(buf); err != nil {
+ return err
+ }
+
+ bph.Reset()
+
+ if err := bph.root.decodeBytes(s.Root); err != nil {
+ return err
+ }
+
+ bph.currentKeyLen = int(s.CurrentKeyLen)
+ bph.rootChecked = s.RootChecked
+ bph.rootTouched = s.RootTouched
+ bph.rootPresent = s.RootPresent
+
+ copy(bph.currentKey[:], s.CurrentKey[:])
+ copy(bph.depths[:], s.Depths[:])
+ copy(bph.branchBefore[:], s.BranchBefore[:])
+ copy(bph.touchMap[:], s.TouchMap[:])
+ copy(bph.afterMap[:], s.AfterMap[:])
+
+ return nil
+}
+
+func (bph *BinPatriciaHashed) ProcessUpdates(plainKeys, hashedKeys [][]byte, updates []Update) (rootHash []byte, branchNodeUpdates map[string]BranchData, err error) {
+ branchNodeUpdates = make(map[string]BranchData)
+
+ for i, plainKey := range plainKeys {
+ hashedKey := hashedKeys[i]
+ if bph.trace {
+ fmt.Printf("plainKey=[%x], hashedKey=[%x], currentKey=[%x]\n", plainKey, hashedKey, bph.currentKey[:bph.currentKeyLen])
+ }
+ // Keep folding until the currentKey is the prefix of the key we modify
+ for bph.needFolding(hashedKey) {
+ if branchData, updateKey, err := bph.fold(); err != nil {
+ return nil, nil, fmt.Errorf("fold: %w", err)
+ } else if branchData != nil {
+ branchNodeUpdates[string(updateKey)] = branchData
+ }
+ }
+ // Now unfold until we step on an empty cell
+ for unfolding := bph.needUnfolding(hashedKey); unfolding > 0; unfolding = bph.needUnfolding(hashedKey) {
+ if err := bph.unfold(hashedKey, unfolding); err != nil {
+ return nil, nil, fmt.Errorf("unfold: %w", err)
+ }
+ }
+
+ update := updates[i]
+ // Update the cell
+ if update.Flags == DeleteUpdate {
+ bph.deleteBinaryCell(hashedKey)
+ if bph.trace {
+ fmt.Printf("key %x deleted\n", plainKey)
+ }
+ } else {
+ cell := bph.updateBinaryCell(plainKey, hashedKey)
+ if bph.trace {
+ fmt.Printf("accountFn updated key %x =>", plainKey)
+ }
+ if update.Flags&BalanceUpdate != 0 {
+ if bph.trace {
+ fmt.Printf(" balance=%d", update.Balance.Uint64())
+ }
+ cell.Balance.Set(&update.Balance)
+ }
+ if update.Flags&NonceUpdate != 0 {
+ if bph.trace {
+ fmt.Printf(" nonce=%d", update.Nonce)
+ }
+ cell.Nonce = update.Nonce
+ }
+ if update.Flags&CodeUpdate != 0 {
+ if bph.trace {
+ fmt.Printf(" codeHash=%x", update.CodeHashOrStorage)
+ }
+ copy(cell.CodeHash[:], update.CodeHashOrStorage[:])
+ }
+ if bph.trace {
+ fmt.Printf("\n")
+ }
+ if update.Flags&StorageUpdate != 0 {
+ cell.setStorage(update.CodeHashOrStorage[:update.ValLength])
+ if bph.trace {
+ fmt.Printf("\rstorageFn filled key %x => %x\n", plainKey, update.CodeHashOrStorage[:update.ValLength])
+ }
+ }
+ }
+ }
+ // Folding everything up to the root
+ for bph.activeRows > 0 {
+ if branchData, updateKey, err := bph.fold(); err != nil {
+ return nil, nil, fmt.Errorf("final fold: %w", err)
+ } else if branchData != nil {
+ branchNodeUpdates[string(updateKey)] = branchData
+ }
+ }
+
+ rootHash, err = bph.RootHash()
+ if err != nil {
+ return nil, branchNodeUpdates, fmt.Errorf("root hash evaluation failed: %w", err)
+ }
+ return rootHash, branchNodeUpdates, nil
+}
+
+// Hashes provided key and expands resulting hash into nibbles (each byte split into two nibbles by 4 bits)
+func (bph *BinPatriciaHashed) hashAndNibblizeKey2(key []byte) []byte { //nolint
+ hashedKey := make([]byte, length.Hash)
+
+ bph.keccak.Reset()
+ bph.keccak.Write(key[:length.Addr])
+ copy(hashedKey[:length.Hash], bph.keccak.Sum(nil))
+
+ if len(key[length.Addr:]) > 0 {
+ hashedKey = append(hashedKey, make([]byte, length.Hash)...)
+ bph.keccak.Reset()
+ bph.keccak.Write(key[length.Addr:])
+ copy(hashedKey[length.Hash:], bph.keccak.Sum(nil))
+ }
+
+ nibblized := make([]byte, len(hashedKey)*2)
+ for i, b := range hashedKey {
+ nibblized[i*2] = (b >> 4) & 0xf
+ nibblized[i*2+1] = b & 0xf
+ }
+ return nibblized
+}
+
+func binHashKey(keccak keccakState, plainKey []byte, dest []byte, hashedKeyOffset int) error {
+ keccak.Reset()
+ var hashBufBack [length.Hash]byte
+ hashBuf := hashBufBack[:]
+ if _, err := keccak.Write(plainKey); err != nil {
+ return err
+ }
+ if _, err := keccak.Read(hashBuf); err != nil {
+ return err
+ }
+ for k := hashedKeyOffset; k < 256; k++ {
+ if hashBuf[k/8]&(1<<(7-k%8)) == 0 {
+ dest[k-hashedKeyOffset] = 0
+ } else {
+ dest[k-hashedKeyOffset] = 1
+ }
+ }
+ return nil
+}
+
+func wrapAccountStorageFn(fn func([]byte, *Cell) error) func(pk []byte, bc *BinaryCell) error {
+ return func(pk []byte, bc *BinaryCell) error {
+ cl := bc.unwrapToHexCell()
+
+ if err := fn(pk, cl); err != nil {
+ return err
+ }
+
+ bc.Balance = *cl.Balance.Clone()
+ bc.Nonce = cl.Nonce
+ bc.StorageLen = cl.StorageLen
+ bc.apl = cl.apl
+ bc.spl = cl.spl
+ bc.hl = cl.hl
+ copy(bc.apk[:], cl.apk[:])
+ copy(bc.spk[:], cl.spk[:])
+ copy(bc.h[:], cl.h[:])
+
+ if cl.extLen > 0 {
+ binExt := compactToBin(cl.extension[:cl.extLen])
+ copy(bc.extension[:], binExt)
+ bc.extLen = len(binExt)
+ }
+ if cl.downHashedLen > 0 {
+ bindhk := compactToBin(cl.downHashedKey[:cl.downHashedLen])
+ copy(bc.downHashedKey[:], bindhk)
+ bc.downHashedLen = len(bindhk)
+ }
+
+ copy(bc.CodeHash[:], cl.CodeHash[:])
+ copy(bc.Storage[:], cl.Storage[:])
+ bc.Delete = cl.Delete
+ return nil
+ }
+}
+
+// represents state of the tree
+type binState struct {
+ TouchMap [maxKeySize]uint16 // For each row, bitmap of cells that were either present before modification, or modified or deleted
+ AfterMap [maxKeySize]uint16 // For each row, bitmap of cells that were present after modification
+ CurrentKeyLen int16
+ Root []byte // encoded root cell
+ RootChecked bool // Set to false if it is not known whether the root is empty, set to true if it is checked
+ RootTouched bool
+ RootPresent bool
+ BranchBefore [maxKeySize]bool // For each row, whether there was a branch node in the database loaded in unfold
+ CurrentKey [maxKeySize]byte // For each row indicates which column is currently selected
+ Depths [maxKeySize]int // For each row, the depth of cells in that row
+}
+
+func (s *binState) Encode(buf []byte) ([]byte, error) {
+ var rootFlags stateRootFlag
+ if s.RootPresent {
+ rootFlags |= stateRootPresent
+ }
+ if s.RootChecked {
+ rootFlags |= stateRootChecked
+ }
+ if s.RootTouched {
+ rootFlags |= stateRootTouched
+ }
+
+ ee := bytes.NewBuffer(buf)
+ if err := binary.Write(ee, binary.BigEndian, s.CurrentKeyLen); err != nil {
+ return nil, fmt.Errorf("encode currentKeyLen: %w", err)
+ }
+ if err := binary.Write(ee, binary.BigEndian, int8(rootFlags)); err != nil {
+ return nil, fmt.Errorf("encode rootFlags: %w", err)
+ }
+ if n, err := ee.Write(s.CurrentKey[:]); err != nil || n != len(s.CurrentKey) {
+ return nil, fmt.Errorf("encode currentKey: %w", err)
+ }
+ if err := binary.Write(ee, binary.BigEndian, uint16(len(s.Root))); err != nil {
+ return nil, fmt.Errorf("encode root len: %w", err)
+ }
+ if n, err := ee.Write(s.Root); err != nil || n != len(s.Root) {
+ return nil, fmt.Errorf("encode root: %w", err)
+ }
+ d := make([]byte, len(s.Depths))
+ for i := 0; i < len(s.Depths); i++ {
+ d[i] = byte(s.Depths[i])
+ }
+ if n, err := ee.Write(d); err != nil || n != len(s.Depths) {
+ return nil, fmt.Errorf("encode depths: %w", err)
+ }
+ if err := binary.Write(ee, binary.BigEndian, s.TouchMap); err != nil {
+ return nil, fmt.Errorf("encode touchMap: %w", err)
+ }
+ if err := binary.Write(ee, binary.BigEndian, s.AfterMap); err != nil {
+ return nil, fmt.Errorf("encode afterMap: %w", err)
+ }
+
+ var before1, before2 uint64
+ for i := 0; i < halfKeySize; i++ {
+ if s.BranchBefore[i] {
+ before1 |= 1 << i
+ }
+ }
+ for i, j := halfKeySize, 0; i < maxKeySize; i, j = i+1, j+1 {
+ if s.BranchBefore[i] {
+ before2 |= 1 << j
+ }
+ }
+ if err := binary.Write(ee, binary.BigEndian, before1); err != nil {
+ return nil, fmt.Errorf("encode branchBefore_1: %w", err)
+ }
+ if err := binary.Write(ee, binary.BigEndian, before2); err != nil {
+ return nil, fmt.Errorf("encode branchBefore_2: %w", err)
+ }
+ return ee.Bytes(), nil
+}
+
+func (s *binState) Decode(buf []byte) error {
+ aux := bytes.NewBuffer(buf)
+ if err := binary.Read(aux, binary.BigEndian, &s.CurrentKeyLen); err != nil {
+ return fmt.Errorf("currentKeyLen: %w", err)
+ }
+ var rootFlags stateRootFlag
+ if err := binary.Read(aux, binary.BigEndian, &rootFlags); err != nil {
+ return fmt.Errorf("rootFlags: %w", err)
+ }
+
+ if rootFlags&stateRootPresent != 0 {
+ s.RootPresent = true
+ }
+ if rootFlags&stateRootTouched != 0 {
+ s.RootTouched = true
+ }
+ if rootFlags&stateRootChecked != 0 {
+ s.RootChecked = true
+ }
+ if n, err := aux.Read(s.CurrentKey[:]); err != nil || n != maxKeySize {
+ return fmt.Errorf("currentKey: %w", err)
+ }
+ var rootSize uint16
+ if err := binary.Read(aux, binary.BigEndian, &rootSize); err != nil {
+ return fmt.Errorf("root size: %w", err)
+ }
+ s.Root = make([]byte, rootSize)
+ if _, err := aux.Read(s.Root); err != nil {
+ return fmt.Errorf("root: %w", err)
+ }
+ d := make([]byte, len(s.Depths))
+ if err := binary.Read(aux, binary.BigEndian, &d); err != nil {
+ return fmt.Errorf("depths: %w", err)
+ }
+ for i := 0; i < len(s.Depths); i++ {
+ s.Depths[i] = int(d[i])
+ }
+ if err := binary.Read(aux, binary.BigEndian, &s.TouchMap); err != nil {
+ return fmt.Errorf("touchMap: %w", err)
+ }
+ if err := binary.Read(aux, binary.BigEndian, &s.AfterMap); err != nil {
+ return fmt.Errorf("afterMap: %w", err)
+ }
+ var branch1, branch2 uint64
+ if err := binary.Read(aux, binary.BigEndian, &branch1); err != nil {
+ return fmt.Errorf("branchBefore1: %w", err)
+ }
+ if err := binary.Read(aux, binary.BigEndian, &branch2); err != nil {
+ return fmt.Errorf("branchBefore2: %w", err)
+ }
+
+ // TODO invalid branch encode
+ for i := 0; i < halfKeySize; i++ {
+ if branch1&(1< %s\n", CompactedKeyToHex([]byte(key)), branchNodeUpdate.String())
+ }
+}
+
+func Test_BinPatriciaHashed_UniqueRepresentation(t *testing.T) {
+ t.Skip()
+
+ ms := NewMockState(t)
+ ms2 := NewMockState(t)
+
+ plainKeys, hashedKeys, updates := NewUpdateBuilder().
+ Balance("f5", 4).
+ Balance("ff", 900234).
+ Balance("04", 1233).
+ Storage("04", "01", "0401").
+ Balance("ba", 065606).
+ Balance("00", 4).
+ Balance("01", 5).
+ Balance("02", 6).
+ Balance("03", 7).
+ Storage("03", "56", "050505").
+ Balance("05", 9).
+ Storage("03", "87", "060606").
+ Balance("b9", 6).
+ Nonce("ff", 169356).
+ Storage("05", "02", "8989").
+ Storage("f5", "04", "9898").
+ Build()
+
+ trieOne := NewBinPatriciaHashed(1, ms.branchFn, ms.accountFn, ms.storageFn)
+ trieTwo := NewBinPatriciaHashed(1, ms2.branchFn, ms2.accountFn, ms2.storageFn)
+
+ trieOne.SetTrace(true)
+ trieTwo.SetTrace(true)
+
+ // single sequential update
+ roots := make([][]byte, 0)
+ // branchNodeUpdatesOne := make(map[string]BranchData)
+ fmt.Printf("1. Trie sequential update generated following branch updates\n")
+ for i := 0; i < len(updates); i++ {
+ if err := ms.applyPlainUpdates(plainKeys[i:i+1], updates[i:i+1]); err != nil {
+ t.Fatal(err)
+ }
+
+ sequentialRoot, branchNodeUpdates, err := trieOne.ReviewKeys(plainKeys[i:i+1], hashedKeys[i:i+1])
+ require.NoError(t, err)
+ roots = append(roots, sequentialRoot)
+
+ ms.applyBranchNodeUpdates(branchNodeUpdates)
+ renderUpdates(branchNodeUpdates)
+ }
+
+ err := ms2.applyPlainUpdates(plainKeys, updates)
+ require.NoError(t, err)
+
+ fmt.Printf("\n2. Trie batch update generated following branch updates\n")
+ // batch update
+ batchRoot, branchNodeUpdatesTwo, err := trieTwo.ReviewKeys(plainKeys, hashedKeys)
+ require.NoError(t, err)
+ renderUpdates(branchNodeUpdatesTwo)
+
+ fmt.Printf("\n sequential roots:\n")
+ for i, rh := range roots {
+ fmt.Printf("%2d %+v\n", i, hex.EncodeToString(rh))
+ }
+
+ ms2.applyBranchNodeUpdates(branchNodeUpdatesTwo)
+
+ require.EqualValues(t, batchRoot, roots[len(roots)-1],
+ "expected equal roots, got sequential [%v] != batch [%v]", hex.EncodeToString(roots[len(roots)-1]), hex.EncodeToString(batchRoot))
+ require.Lenf(t, batchRoot, 32, "root hash length should be equal to 32 bytes")
+}
+func Test_BinPatriciaHashed_EmptyState(t *testing.T) {
+ ms := NewMockState(t)
+ hph := NewBinPatriciaHashed(1, ms.branchFn, ms.accountFn, ms.storageFn)
+ hph.SetTrace(false)
+ plainKeys, hashedKeys, updates := NewUpdateBuilder().
+ Balance("00", 4).
+ Balance("01", 5).
+ Balance("02", 6).
+ Balance("03", 7).
+ Balance("04", 8).
+ Storage("04", "01", "0401").
+ Storage("03", "56", "050505").
+ Storage("03", "57", "060606").
+ Balance("05", 9).
+ Storage("05", "02", "8989").
+ Storage("05", "04", "9898").
+ Build()
+
+ err := ms.applyPlainUpdates(plainKeys, updates)
+ require.NoError(t, err)
+
+ firstRootHash, branchNodeUpdates, err := hph.ReviewKeys(plainKeys, hashedKeys)
+ require.NoError(t, err)
+
+ t.Logf("root hash %x\n", firstRootHash)
+
+ ms.applyBranchNodeUpdates(branchNodeUpdates)
+
+ fmt.Printf("1. Generated updates\n")
+ renderUpdates(branchNodeUpdates)
+
+ // More updates
+ hph.Reset()
+ hph.SetTrace(false)
+ plainKeys, hashedKeys, updates = NewUpdateBuilder().
+ Storage("03", "58", "050505").
+ Build()
+ err = ms.applyPlainUpdates(plainKeys, updates)
+ require.NoError(t, err)
+
+ secondRootHash, branchNodeUpdates, err := hph.ReviewKeys(plainKeys, hashedKeys)
+ require.NoError(t, err)
+ require.NotEqualValues(t, firstRootHash, secondRootHash)
+
+ ms.applyBranchNodeUpdates(branchNodeUpdates)
+ fmt.Printf("2. Generated single update\n")
+ renderUpdates(branchNodeUpdates)
+
+ // More updates
+ hph.Reset()
+ hph.SetTrace(false)
+ plainKeys, hashedKeys, updates = NewUpdateBuilder().
+ Storage("03", "58", "070807").
+ Build()
+ err = ms.applyPlainUpdates(plainKeys, updates)
+ require.NoError(t, err)
+
+ thirdRootHash, branchNodeUpdates, err := hph.ReviewKeys(plainKeys, hashedKeys)
+ require.NoError(t, err)
+ require.NotEqualValues(t, secondRootHash, thirdRootHash)
+
+ ms.applyBranchNodeUpdates(branchNodeUpdates)
+ fmt.Printf("3. Generated single update\n")
+ renderUpdates(branchNodeUpdates)
+}
+
+func Test_BinPatriciaHashed_EmptyUpdateState(t *testing.T) {
+ ms := NewMockState(t)
+ hph := NewBinPatriciaHashed(1, ms.branchFn, ms.accountFn, ms.storageFn)
+ hph.SetTrace(false)
+ plainKeys, hashedKeys, updates := NewUpdateBuilder().
+ Balance("00", 4).
+ Nonce("00", 246462653).
+ Balance("01", 5).
+ CodeHash("03", "aaaaaaaaaaf7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a870").
+ Delete("00").
+ Storage("04", "01", "0401").
+ Storage("03", "56", "050505").
+ Build()
+
+ err := ms.applyPlainUpdates(plainKeys, updates)
+ require.NoError(t, err)
+
+ hashBeforeEmptyUpdate, branchNodeUpdates, err := hph.ReviewKeys(plainKeys, hashedKeys)
+ require.NoError(t, err)
+ require.NotEmpty(t, hashBeforeEmptyUpdate)
+
+ ms.applyBranchNodeUpdates(branchNodeUpdates)
+
+ fmt.Println("1. Updates applied")
+ renderUpdates(branchNodeUpdates)
+
+ // generate empty updates and do NOT reset tree
+ hph.SetTrace(true)
+
+ plainKeys, hashedKeys, updates = NewUpdateBuilder().Build()
+
+ err = ms.applyPlainUpdates(plainKeys, updates)
+ require.NoError(t, err)
+
+ hashAfterEmptyUpdate, branchNodeUpdates, err := hph.ReviewKeys(plainKeys, hashedKeys)
+ require.NoError(t, err)
+
+ ms.applyBranchNodeUpdates(branchNodeUpdates)
+ fmt.Println("2. Empty updates applied without state reset")
+
+ require.EqualValues(t, hashBeforeEmptyUpdate, hashAfterEmptyUpdate)
+}
diff --git a/erigon-lib/commitment/commitment.go b/erigon-lib/commitment/commitment.go
new file mode 100644
index 00000000000..a51cfcb59ce
--- /dev/null
+++ b/erigon-lib/commitment/commitment.go
@@ -0,0 +1,586 @@
+package commitment
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "hash"
+ "math/bits"
+ "strings"
+
+ "golang.org/x/crypto/sha3"
+
+ "github.com/ledgerwatch/erigon-lib/common/length"
+)
+
+// Trie represents commitment variant.
+type Trie interface {
+ // RootHash produces root hash of the trie
+ RootHash() (hash []byte, err error)
+
+ // Variant returns commitment trie variant
+ Variant() TrieVariant
+
+ // Reset Drops everything from the trie
+ Reset()
+
+ ReviewKeys(pk, hk [][]byte) (rootHash []byte, branchNodeUpdates map[string]BranchData, err error)
+
+ ProcessUpdates(pk, hk [][]byte, updates []Update) (rootHash []byte, branchNodeUpdates map[string]BranchData, err error)
+
+ ResetFns(
+ branchFn func(prefix []byte) ([]byte, error),
+ accountFn func(plainKey []byte, cell *Cell) error,
+ storageFn func(plainKey []byte, cell *Cell) error,
+ )
+
+ // Makes trie more verbose
+ SetTrace(bool)
+}
+
+type TrieVariant string
+
+const (
+ // VariantHexPatriciaTrie used as default commitment approach
+ VariantHexPatriciaTrie TrieVariant = "hex-patricia-hashed"
+ // VariantBinPatriciaTrie - Experimental mode with binary key representation
+ VariantBinPatriciaTrie TrieVariant = "bin-patricia-hashed"
+)
+
+func InitializeTrie(tv TrieVariant) Trie {
+ switch tv {
+ case VariantBinPatriciaTrie:
+ return NewBinPatriciaHashed(length.Addr, nil, nil, nil)
+ case VariantHexPatriciaTrie:
+ fallthrough
+ default:
+ return NewHexPatriciaHashed(length.Addr, nil, nil, nil)
+ }
+}
+
+type PartFlags uint8
+
+const (
+ HashedKeyPart PartFlags = 1
+ AccountPlainPart PartFlags = 2
+ StoragePlainPart PartFlags = 4
+ HashPart PartFlags = 8
+)
+
+type BranchData []byte
+
+func (branchData BranchData) String() string {
+ touchMap := binary.BigEndian.Uint16(branchData[0:])
+ afterMap := binary.BigEndian.Uint16(branchData[2:])
+ pos := 4
+ var sb strings.Builder
+ var cell Cell
+ fmt.Fprintf(&sb, "touchMap %016b, afterMap %016b\n", touchMap, afterMap)
+ for bitset, j := touchMap, 0; bitset != 0; j++ {
+ bit := bitset & -bitset
+ nibble := bits.TrailingZeros16(bit)
+ fmt.Fprintf(&sb, " %x => ", nibble)
+ if afterMap&bit == 0 {
+ sb.WriteString("{DELETED}\n")
+ } else {
+ fieldBits := PartFlags(branchData[pos])
+ pos++
+ var err error
+ if pos, err = cell.fillFromFields(branchData, pos, fieldBits); err != nil {
+ // This is used for test output, so ok to panic
+ panic(err)
+ }
+ sb.WriteString("{")
+ var comma string
+ if cell.downHashedLen > 0 {
+ fmt.Fprintf(&sb, "hashedKey=[%x]", cell.downHashedKey[:cell.downHashedLen])
+ comma = ","
+ }
+ if cell.apl > 0 {
+ fmt.Fprintf(&sb, "%saccountPlainKey=[%x]", comma, cell.apk[:cell.apl])
+ comma = ","
+ }
+ if cell.spl > 0 {
+ fmt.Fprintf(&sb, "%sstoragePlainKey=[%x]", comma, cell.spk[:cell.spl])
+ comma = ","
+ }
+ if cell.hl > 0 {
+ fmt.Fprintf(&sb, "%shash=[%x]", comma, cell.h[:cell.hl])
+ }
+ sb.WriteString("}\n")
+ }
+ bitset ^= bit
+ }
+ return sb.String()
+}
+
+func EncodeBranch(bitmap, touchMap, afterMap uint16, retriveCell func(nibble int, skip bool) (*Cell, error)) (branchData BranchData, lastNibble int, err error) {
+ branchData = make(BranchData, 0, 32)
+ var bitmapBuf [binary.MaxVarintLen64]byte
+
+ binary.BigEndian.PutUint16(bitmapBuf[0:], touchMap)
+ binary.BigEndian.PutUint16(bitmapBuf[2:], afterMap)
+
+ branchData = append(branchData, bitmapBuf[:4]...)
+
+ for bitset, j := afterMap, 0; bitset != 0; j++ {
+ bit := bitset & -bitset
+ nibble := bits.TrailingZeros16(bit)
+ for i := lastNibble; i < nibble; i++ {
+ if _, err := retriveCell(i, true /* skip */); err != nil {
+ return nil, 0, err
+ } // only writes 0x80 into hasher
+ }
+ lastNibble = nibble + 1
+
+ cell, err := retriveCell(nibble, false)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ if bitmap&bit != 0 {
+ var fieldBits PartFlags
+ if cell.extLen > 0 && cell.spl == 0 {
+ fieldBits |= HashedKeyPart
+ }
+ if cell.apl > 0 {
+ fieldBits |= AccountPlainPart
+ }
+ if cell.spl > 0 {
+ fieldBits |= StoragePlainPart
+ }
+ if cell.hl > 0 {
+ fieldBits |= HashPart
+ }
+ branchData = append(branchData, byte(fieldBits))
+ if cell.extLen > 0 && cell.spl == 0 {
+ n := binary.PutUvarint(bitmapBuf[:], uint64(cell.extLen))
+ branchData = append(branchData, bitmapBuf[:n]...)
+ branchData = append(branchData, cell.extension[:cell.extLen]...)
+ }
+ if cell.apl > 0 {
+ n := binary.PutUvarint(bitmapBuf[:], uint64(cell.apl))
+ branchData = append(branchData, bitmapBuf[:n]...)
+ branchData = append(branchData, cell.apk[:cell.apl]...)
+ }
+ if cell.spl > 0 {
+ n := binary.PutUvarint(bitmapBuf[:], uint64(cell.spl))
+ branchData = append(branchData, bitmapBuf[:n]...)
+ branchData = append(branchData, cell.spk[:cell.spl]...)
+ }
+ if cell.hl > 0 {
+ n := binary.PutUvarint(bitmapBuf[:], uint64(cell.hl))
+ branchData = append(branchData, bitmapBuf[:n]...)
+ branchData = append(branchData, cell.h[:cell.hl]...)
+ }
+ }
+ bitset ^= bit
+ }
+ return branchData, lastNibble, nil
+}
+
+// ExtractPlainKeys parses branchData and extract the plain keys for accounts and storage in the same order
+// they appear witjin the branchData
+func (branchData BranchData) ExtractPlainKeys() (accountPlainKeys [][]byte, storagePlainKeys [][]byte, err error) {
+ touchMap := binary.BigEndian.Uint16(branchData[0:])
+ afterMap := binary.BigEndian.Uint16(branchData[2:])
+ pos := 4
+ for bitset, j := touchMap&afterMap, 0; bitset != 0; j++ {
+ bit := bitset & -bitset
+ fieldBits := PartFlags(branchData[pos])
+ pos++
+ if fieldBits&HashedKeyPart != 0 {
+ l, n := binary.Uvarint(branchData[pos:])
+ if n == 0 {
+ return nil, nil, fmt.Errorf("extractPlainKeys buffer too small for hashedKey len")
+ } else if n < 0 {
+ return nil, nil, fmt.Errorf("extractPlainKeys value overflow for hashedKey len")
+ }
+ pos += n
+ if len(branchData) < pos+int(l) {
+ return nil, nil, fmt.Errorf("extractPlainKeys buffer too small for hashedKey")
+ }
+ if l > 0 {
+ pos += int(l)
+ }
+ }
+ if fieldBits&AccountPlainPart != 0 {
+ l, n := binary.Uvarint(branchData[pos:])
+ if n == 0 {
+ return nil, nil, fmt.Errorf("extractPlainKeys buffer too small for accountPlainKey len")
+ } else if n < 0 {
+ return nil, nil, fmt.Errorf("extractPlainKeys value overflow for accountPlainKey len")
+ }
+ pos += n
+ if len(branchData) < pos+int(l) {
+ return nil, nil, fmt.Errorf("extractPlainKeys buffer too small for accountPlainKey")
+ }
+ accountPlainKeys = append(accountPlainKeys, branchData[pos:pos+int(l)])
+ if l > 0 {
+ pos += int(l)
+ }
+ }
+ if fieldBits&StoragePlainPart != 0 {
+ l, n := binary.Uvarint(branchData[pos:])
+ if n == 0 {
+ return nil, nil, fmt.Errorf("extractPlainKeys buffer too small for storagePlainKey len")
+ } else if n < 0 {
+ return nil, nil, fmt.Errorf("extractPlainKeys value overflow for storagePlainKey len")
+ }
+ pos += n
+ if len(branchData) < pos+int(l) {
+ return nil, nil, fmt.Errorf("extractPlainKeys buffer too small for storagePlainKey")
+ }
+ storagePlainKeys = append(storagePlainKeys, branchData[pos:pos+int(l)])
+ if l > 0 {
+ pos += int(l)
+ }
+ }
+ if fieldBits&HashPart != 0 {
+ l, n := binary.Uvarint(branchData[pos:])
+ if n == 0 {
+ return nil, nil, fmt.Errorf("extractPlainKeys buffer too small for hash len")
+ } else if n < 0 {
+ return nil, nil, fmt.Errorf("extractPlainKeys value overflow for hash len")
+ }
+ pos += n
+ if len(branchData) < pos+int(l) {
+ return nil, nil, fmt.Errorf("extractPlainKeys buffer too small for hash")
+ }
+ if l > 0 {
+ pos += int(l)
+ }
+ }
+ bitset ^= bit
+ }
+ return
+}
+
+func (branchData BranchData) ReplacePlainKeys(accountPlainKeys [][]byte, storagePlainKeys [][]byte, newData []byte) (BranchData, error) {
+ var numBuf [binary.MaxVarintLen64]byte
+ touchMap := binary.BigEndian.Uint16(branchData[0:])
+ afterMap := binary.BigEndian.Uint16(branchData[2:])
+ pos := 4
+ newData = append(newData, branchData[:4]...)
+ var accountI, storageI int
+ for bitset, j := touchMap&afterMap, 0; bitset != 0; j++ {
+ bit := bitset & -bitset
+ fieldBits := PartFlags(branchData[pos])
+ newData = append(newData, byte(fieldBits))
+ pos++
+ if fieldBits&HashedKeyPart != 0 {
+ l, n := binary.Uvarint(branchData[pos:])
+ if n == 0 {
+ return nil, fmt.Errorf("replacePlainKeys buffer too small for hashedKey len")
+ } else if n < 0 {
+ return nil, fmt.Errorf("replacePlainKeys value overflow for hashedKey len")
+ }
+ newData = append(newData, branchData[pos:pos+n]...)
+ pos += n
+ if len(branchData) < pos+int(l) {
+ return nil, fmt.Errorf("replacePlainKeys buffer too small for hashedKey")
+ }
+ if l > 0 {
+ newData = append(newData, branchData[pos:pos+int(l)]...)
+ pos += int(l)
+ }
+ }
+ if fieldBits&AccountPlainPart != 0 {
+ l, n := binary.Uvarint(branchData[pos:])
+ if n == 0 {
+ return nil, fmt.Errorf("replacePlainKeys buffer too small for accountPlainKey len")
+ } else if n < 0 {
+ return nil, fmt.Errorf("replacePlainKeys value overflow for accountPlainKey len")
+ }
+ pos += n
+ if len(branchData) < pos+int(l) {
+ return nil, fmt.Errorf("replacePlainKeys buffer too small for accountPlainKey")
+ }
+ if l > 0 {
+ pos += int(l)
+ }
+ n = binary.PutUvarint(numBuf[:], uint64(len(accountPlainKeys[accountI])))
+ newData = append(newData, numBuf[:n]...)
+ newData = append(newData, accountPlainKeys[accountI]...)
+ accountI++
+ }
+ if fieldBits&StoragePlainPart != 0 {
+ l, n := binary.Uvarint(branchData[pos:])
+ if n == 0 {
+ return nil, fmt.Errorf("replacePlainKeys buffer too small for storagePlainKey len")
+ } else if n < 0 {
+ return nil, fmt.Errorf("replacePlainKeys value overflow for storagePlainKey len")
+ }
+ pos += n
+ if len(branchData) < pos+int(l) {
+ return nil, fmt.Errorf("replacePlainKeys buffer too small for storagePlainKey")
+ }
+ if l > 0 {
+ pos += int(l)
+ }
+ n = binary.PutUvarint(numBuf[:], uint64(len(storagePlainKeys[storageI])))
+ newData = append(newData, numBuf[:n]...)
+ newData = append(newData, storagePlainKeys[storageI]...)
+ storageI++
+ }
+ if fieldBits&HashPart != 0 {
+ l, n := binary.Uvarint(branchData[pos:])
+ if n == 0 {
+ return nil, fmt.Errorf("replacePlainKeys buffer too small for hash len")
+ } else if n < 0 {
+ return nil, fmt.Errorf("replacePlainKeys value overflow for hash len")
+ }
+ newData = append(newData, branchData[pos:pos+n]...)
+ pos += n
+ if len(branchData) < pos+int(l) {
+ return nil, fmt.Errorf("replacePlainKeys buffer too small for hash")
+ }
+ if l > 0 {
+ newData = append(newData, branchData[pos:pos+int(l)]...)
+ pos += int(l)
+ }
+ }
+ bitset ^= bit
+ }
+ return newData, nil
+}
+
+// IsComplete determines whether given branch data is complete, meaning that all information about all the children is present
+// Each of 16 children of a branch node have two attributes
+// touch - whether this child has been modified or deleted in this branchData (corresponding bit in touchMap is set)
+// after - whether after this branchData application, the child is present in the tree or not (corresponding bit in afterMap is set)
+func (branchData BranchData) IsComplete() bool {
+ touchMap := binary.BigEndian.Uint16(branchData[0:])
+ afterMap := binary.BigEndian.Uint16(branchData[2:])
+ return ^touchMap&afterMap == 0
+}
+
+// MergeHexBranches combines two branchData, number 2 coming after (and potentially shadowing) number 1
+func (branchData BranchData) MergeHexBranches(branchData2 BranchData, newData []byte) (BranchData, error) {
+ if branchData2 == nil {
+ return branchData, nil
+ }
+ if branchData == nil {
+ return branchData2, nil
+ }
+
+ touchMap1 := binary.BigEndian.Uint16(branchData[0:])
+ afterMap1 := binary.BigEndian.Uint16(branchData[2:])
+ bitmap1 := touchMap1 & afterMap1
+ pos1 := 4
+ touchMap2 := binary.BigEndian.Uint16(branchData2[0:])
+ afterMap2 := binary.BigEndian.Uint16(branchData2[2:])
+ bitmap2 := touchMap2 & afterMap2
+ pos2 := 4
+ var bitmapBuf [4]byte
+ binary.BigEndian.PutUint16(bitmapBuf[0:], touchMap1|touchMap2)
+ binary.BigEndian.PutUint16(bitmapBuf[2:], afterMap2)
+ newData = append(newData, bitmapBuf[:]...)
+ for bitset, j := bitmap1|bitmap2, 0; bitset != 0; j++ {
+ bit := bitset & -bitset
+ if bitmap2&bit != 0 {
+ // Add fields from branchData2
+ fieldBits := PartFlags(branchData2[pos2])
+ newData = append(newData, byte(fieldBits))
+ pos2++
+ for i := 0; i < bits.OnesCount8(byte(fieldBits)); i++ {
+ l, n := binary.Uvarint(branchData2[pos2:])
+ if n == 0 {
+ return nil, fmt.Errorf("MergeHexBranches buffer2 too small for field")
+ } else if n < 0 {
+ return nil, fmt.Errorf("MergeHexBranches value2 overflow for field")
+ }
+ newData = append(newData, branchData2[pos2:pos2+n]...)
+ pos2 += n
+ if len(branchData2) < pos2+int(l) {
+ return nil, fmt.Errorf("MergeHexBranches buffer2 too small for field")
+ }
+ if l > 0 {
+ newData = append(newData, branchData2[pos2:pos2+int(l)]...)
+ pos2 += int(l)
+ }
+ }
+ }
+ if bitmap1&bit != 0 {
+ add := (touchMap2&bit == 0) && (afterMap2&bit != 0) // Add fields from branchData1
+ fieldBits := PartFlags(branchData[pos1])
+ if add {
+ newData = append(newData, byte(fieldBits))
+ }
+ pos1++
+ for i := 0; i < bits.OnesCount8(byte(fieldBits)); i++ {
+ l, n := binary.Uvarint(branchData[pos1:])
+ if n == 0 {
+ return nil, fmt.Errorf("MergeHexBranches buffer1 too small for field")
+ } else if n < 0 {
+ return nil, fmt.Errorf("MergeHexBranches value1 overflow for field")
+ }
+ if add {
+ newData = append(newData, branchData[pos1:pos1+n]...)
+ }
+ pos1 += n
+ if len(branchData) < pos1+int(l) {
+ return nil, fmt.Errorf("MergeHexBranches buffer1 too small for field")
+ }
+ if l > 0 {
+ if add {
+ newData = append(newData, branchData[pos1:pos1+int(l)]...)
+ }
+ pos1 += int(l)
+ }
+ }
+ }
+ bitset ^= bit
+ }
+ return newData, nil
+}
+
+func (branchData BranchData) DecodeCells() (touchMap, afterMap uint16, row [16]*Cell, err error) {
+ touchMap = binary.BigEndian.Uint16(branchData[0:])
+ afterMap = binary.BigEndian.Uint16(branchData[2:])
+ pos := 4
+ for bitset, j := touchMap, 0; bitset != 0; j++ {
+ bit := bitset & -bitset
+ nibble := bits.TrailingZeros16(bit)
+ if afterMap&bit != 0 {
+ fieldBits := PartFlags(branchData[pos])
+ pos++
+ row[nibble] = new(Cell)
+ if pos, err = row[nibble].fillFromFields(branchData, pos, fieldBits); err != nil {
+ err = fmt.Errorf("faield to fill cell at nibble %x: %w", nibble, err)
+ return
+ }
+ }
+ bitset ^= bit
+ }
+ return
+}
+
+type BranchMerger struct {
+ buf *bytes.Buffer
+ num [4]byte
+ keccak hash.Hash
+}
+
+func NewHexBranchMerger(capacity uint64) *BranchMerger {
+ return &BranchMerger{buf: bytes.NewBuffer(make([]byte, capacity)), keccak: sha3.NewLegacyKeccak256()}
+}
+
+// MergeHexBranches combines two branchData, number 2 coming after (and potentially shadowing) number 1
+func (m *BranchMerger) Merge(branch1 BranchData, branch2 BranchData) (BranchData, error) {
+ if branch2 == nil {
+ return branch1, nil
+ }
+ if branch1 == nil {
+ return branch2, nil
+ }
+
+ touchMap1 := binary.BigEndian.Uint16(branch1[0:])
+ afterMap1 := binary.BigEndian.Uint16(branch1[2:])
+ bitmap1 := touchMap1 & afterMap1
+ pos1 := 4
+
+ touchMap2 := binary.BigEndian.Uint16(branch2[0:])
+ afterMap2 := binary.BigEndian.Uint16(branch2[2:])
+ bitmap2 := touchMap2 & afterMap2
+ pos2 := 4
+
+ binary.BigEndian.PutUint16(m.num[0:], touchMap1|touchMap2)
+ binary.BigEndian.PutUint16(m.num[2:], afterMap2)
+ dataPos := 4
+
+ m.buf.Reset()
+ if _, err := m.buf.Write(m.num[:]); err != nil {
+ return nil, err
+ }
+
+ for bitset, j := bitmap1|bitmap2, 0; bitset != 0; j++ {
+ bit := bitset & -bitset
+ if bitmap2&bit != 0 {
+ // Add fields from branch2
+ fieldBits := PartFlags(branch2[pos2])
+ if err := m.buf.WriteByte(byte(fieldBits)); err != nil {
+ return nil, err
+ }
+ pos2++
+
+ for i := 0; i < bits.OnesCount8(byte(fieldBits)); i++ {
+ l, n := binary.Uvarint(branch2[pos2:])
+ if n == 0 {
+ return nil, fmt.Errorf("MergeHexBranches branch2 is too small: expected node info size")
+ } else if n < 0 {
+ return nil, fmt.Errorf("MergeHexBranches branch2: size overflow for length")
+ }
+
+ _, err := m.buf.Write(branch2[pos2 : pos2+n])
+ if err != nil {
+ return nil, err
+ }
+ pos2 += n
+ dataPos += n
+ if len(branch2) < pos2+int(l) {
+ return nil, fmt.Errorf("MergeHexBranches branch2 is too small: expected at least %d got %d bytes", pos2+int(l), len(branch2))
+ }
+ if l > 0 {
+ if _, err := m.buf.Write(branch2[pos2 : pos2+int(l)]); err != nil {
+ return nil, err
+ }
+ pos2 += int(l)
+ dataPos += int(l)
+ }
+ }
+ }
+ if bitmap1&bit != 0 {
+ add := (touchMap2&bit == 0) && (afterMap2&bit != 0) // Add fields from branchData1
+ fieldBits := PartFlags(branch1[pos1])
+ if add {
+ if err := m.buf.WriteByte(byte(fieldBits)); err != nil {
+ return nil, err
+ }
+ }
+ pos1++
+ for i := 0; i < bits.OnesCount8(byte(fieldBits)); i++ {
+ l, n := binary.Uvarint(branch1[pos1:])
+ if n == 0 {
+ return nil, fmt.Errorf("MergeHexBranches branch1 is too small: expected node info size")
+ } else if n < 0 {
+ return nil, fmt.Errorf("MergeHexBranches branch1: size overflow for length")
+ }
+ if add {
+ if _, err := m.buf.Write(branch1[pos1 : pos1+n]); err != nil {
+ return nil, err
+ }
+ }
+ pos1 += n
+ if len(branch1) < pos1+int(l) {
+ return nil, fmt.Errorf("MergeHexBranches branch1 is too small: expected at least %d got %d bytes", pos1+int(l), len(branch1))
+ }
+ if l > 0 {
+ if add {
+ if _, err := m.buf.Write(branch1[pos1 : pos1+int(l)]); err != nil {
+ return nil, err
+ }
+ }
+ pos1 += int(l)
+ }
+ }
+ }
+ bitset ^= bit
+ }
+ target := make([]byte, m.buf.Len())
+ copy(target, m.buf.Bytes())
+ return target, nil
+}
+
+func ParseTrieVariant(s string) TrieVariant {
+ var trieVariant TrieVariant
+ switch s {
+ case "bin":
+ trieVariant = VariantBinPatriciaTrie
+ case "hex":
+ fallthrough
+ default:
+ trieVariant = VariantHexPatriciaTrie
+ }
+ return trieVariant
+}
diff --git a/erigon-lib/commitment/commitment_test.go b/erigon-lib/commitment/commitment_test.go
new file mode 100644
index 00000000000..848385412b7
--- /dev/null
+++ b/erigon-lib/commitment/commitment_test.go
@@ -0,0 +1,201 @@
+package commitment
+
+import (
+ "encoding/hex"
+ "fmt"
+ "math/rand"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func generateCellRow(t *testing.T, size int) (row []*Cell, bitmap uint16) {
+ t.Helper()
+
+ row = make([]*Cell, size)
+ var bm uint16
+ for i := 0; i < len(row); i++ {
+ row[i] = new(Cell)
+ row[i].hl = 32
+ n, err := rand.Read(row[i].h[:])
+ require.NoError(t, err)
+ require.EqualValues(t, row[i].hl, n)
+
+ th := rand.Intn(120)
+ switch {
+ case th > 70:
+ n, err = rand.Read(row[i].apk[:])
+ require.NoError(t, err)
+ row[i].apl = n
+ case th > 20 && th <= 70:
+ n, err = rand.Read(row[i].spk[:])
+ require.NoError(t, err)
+ row[i].spl = n
+ case th <= 20:
+ n, err = rand.Read(row[i].extension[:th])
+ row[i].extLen = n
+ require.NoError(t, err)
+ require.EqualValues(t, th, n)
+ }
+ bm |= uint16(1 << i)
+ }
+ return row, bm
+}
+
+func TestBranchData_MergeHexBranches2(t *testing.T) {
+ row, bm := generateCellRow(t, 16)
+
+ enc, _, err := EncodeBranch(bm, bm, bm, func(i int, skip bool) (*Cell, error) {
+ return row[i], nil
+ })
+
+ require.NoError(t, err)
+ require.NotEmpty(t, enc)
+ t.Logf("enc [%d] %x\n", len(enc), enc)
+
+ bmg := NewHexBranchMerger(8192)
+ res, err := bmg.Merge(enc, enc)
+ require.NoError(t, err)
+ require.EqualValues(t, enc, res)
+
+ tm, am, origins, err := res.DecodeCells()
+ require.NoError(t, err)
+ require.EqualValues(t, tm, am)
+ require.EqualValues(t, bm, am)
+
+ i := 0
+ for _, c := range origins {
+ if c == nil {
+ continue
+ }
+ require.EqualValues(t, row[i].extLen, c.extLen)
+ require.EqualValues(t, row[i].extension, c.extension)
+ require.EqualValues(t, row[i].apl, c.apl)
+ require.EqualValues(t, row[i].apk, c.apk)
+ require.EqualValues(t, row[i].spl, c.spl)
+ require.EqualValues(t, row[i].spk, c.spk)
+ i++
+ }
+}
+
+func TestBranchData_MergeHexBranches3(t *testing.T) {
+ encs := "0405040b04080f0b080d030204050b0502090805050d01060e060d070f0903090c04070a0d0a000e090b060b0c040c0700020e0b0c060b0106020c0607050a0b0209070d06040808"
+ enc, err := hex.DecodeString(encs)
+ require.NoError(t, err)
+
+ //tm, am, origins, err := BranchData(enc).DecodeCells()
+ require.NoError(t, err)
+ t.Logf("%s", BranchData(enc).String())
+ //require.EqualValues(t, tm, am)
+ //_, _ = tm, am
+}
+
+// helper to decode row of cells from string
+func Test_UTIL_UnfoldBranchDataFromString(t *testing.T) {
+ t.Skip()
+
+ //encs := "0405040b04080f0b080d030204050b0502090805050d01060e060d070f0903090c04070a0d0a000e090b060b0c040c0700020e0b0c060b0106020c0607050a0b0209070d06040808"
+ encs := "37ad10eb75ea0fc1c363db0dda0cd2250426ee2c72787155101ca0e50804349a94b649deadcc5cddc0d2fd9fb358c2edc4e7912d165f88877b1e48c69efacf418e923124506fbb2fd64823fd41cbc10427c423"
+ enc, err := hex.DecodeString(encs)
+ require.NoError(t, err)
+
+ bfn := func(pref []byte) ([]byte, error) {
+ return enc, nil
+ }
+ sfn := func(pref []byte, c *Cell) error {
+ return nil
+ }
+
+ hph := NewHexPatriciaHashed(20, bfn, nil, sfn)
+ hph.unfoldBranchNode(1, false, 0)
+ tm, am, origins, err := BranchData(enc).DecodeCells()
+ require.NoError(t, err)
+ t.Logf("%s", BranchData(enc).String())
+ //require.EqualValues(t, tm, am)
+ _, _ = tm, am
+
+ i := 0
+ for _, c := range origins {
+ if c == nil {
+ continue
+ }
+ fmt.Printf("i %d, c %#+v\n", i, c)
+ i++
+ }
+}
+
+func TestBranchData_ExtractPlainKeys(t *testing.T) {
+ row, bm := generateCellRow(t, 16)
+
+ cg := func(nibble int, skip bool) (*Cell, error) {
+ return row[nibble], nil
+ }
+
+ enc, _, err := EncodeBranch(bm, bm, bm, cg)
+ require.NoError(t, err)
+
+ extAPK, extSPK, err := enc.ExtractPlainKeys()
+ require.NoError(t, err)
+
+ for i, c := range row {
+ if c == nil {
+ continue
+ }
+ switch {
+ case c.apl != 0:
+ require.Containsf(t, extAPK, c.apk[:], "at pos %d expected %x..", i, c.apk[:8])
+ case c.spl != 0:
+ require.Containsf(t, extSPK, c.spk[:], "at pos %d expected %x..", i, c.spk[:8])
+ default:
+ continue
+ }
+ }
+}
+
+func TestBranchData_ReplacePlainKeys(t *testing.T) {
+ row, bm := generateCellRow(t, 16)
+
+ cg := func(nibble int, skip bool) (*Cell, error) {
+ return row[nibble], nil
+ }
+
+ enc, _, err := EncodeBranch(bm, bm, bm, cg)
+ require.NoError(t, err)
+
+ extAPK, extSPK, err := enc.ExtractPlainKeys()
+ require.NoError(t, err)
+
+ shortApk, shortSpk := make([][]byte, 0), make([][]byte, 0)
+ for i, c := range row {
+ if c == nil {
+ continue
+ }
+ switch {
+ case c.apl != 0:
+ shortApk = append(shortApk, c.apk[:8])
+ require.Containsf(t, extAPK, c.apk[:], "at pos %d expected %x..", i, c.apk[:8])
+ case c.spl != 0:
+ shortSpk = append(shortSpk, c.spk[:8])
+ require.Containsf(t, extSPK, c.spk[:], "at pos %d expected %x..", i, c.spk[:8])
+ default:
+ continue
+ }
+ }
+
+ target := make([]byte, 0, len(enc))
+ replaced, err := enc.ReplacePlainKeys(shortApk, shortSpk, target)
+ require.NoError(t, err)
+ require.Truef(t, len(replaced) < len(enc), "replaced expected to be shorter than original enc")
+
+ rextA, rextS, err := replaced.ExtractPlainKeys()
+ require.NoError(t, err)
+
+ for _, apk := range shortApk {
+ require.Containsf(t, rextA, apk, "expected %x to be in replaced account keys", apk)
+ }
+ for _, spk := range shortSpk {
+ require.Containsf(t, rextS, spk, "expected %x to be in replaced storage keys", spk)
+ }
+ require.True(t, len(shortApk) == len(rextA))
+ require.True(t, len(shortSpk) == len(rextS))
+}
diff --git a/erigon-lib/commitment/hex_patricia_hashed.go b/erigon-lib/commitment/hex_patricia_hashed.go
new file mode 100644
index 00000000000..3ba53a41759
--- /dev/null
+++ b/erigon-lib/commitment/hex_patricia_hashed.go
@@ -0,0 +1,1982 @@
+/*
+ Copyright 2022 The Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package commitment
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/hex"
+ "fmt"
+ "hash"
+ "io"
+ "math/bits"
+ "strings"
+
+ "github.com/holiman/uint256"
+ "github.com/ledgerwatch/log/v3"
+ "golang.org/x/crypto/sha3"
+
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/length"
+ "github.com/ledgerwatch/erigon-lib/rlp"
+)
+
+// keccakState wraps sha3.state. In addition to the usual hash methods, it also supports
+// Read to get a variable amount of data from the hash state. Read is faster than Sum
+// because it doesn't copy the internal state, but also modifies the internal state.
+type keccakState interface {
+ hash.Hash
+ Read([]byte) (int, error)
+}
+
+// HexPatriciaHashed implements commitment based on patricia merkle tree with radix 16,
+// with keys pre-hashed by keccak256
+type HexPatriciaHashed struct {
+ root Cell // Root cell of the tree
+ // How many rows (starting from row 0) are currently active and have corresponding selected columns
+ // Last active row does not have selected column
+ activeRows int
+ // Length of the key that reflects current positioning of the grid. It maybe larger than number of active rows,
+ // if an account leaf cell represents multiple nibbles in the key
+ currentKeyLen int
+ accountKeyLen int
+ // Rows of the grid correspond to the level of depth in the patricia tree
+ // Columns of the grid correspond to pointers to the nodes further from the root
+ grid [128][16]Cell // First 64 rows of this grid are for account trie, and next 64 rows are for storage trie
+ currentKey [128]byte // For each row indicates which column is currently selected
+ depths [128]int // For each row, the depth of cells in that row
+ branchBefore [128]bool // For each row, whether there was a branch node in the database loaded in unfold
+ touchMap [128]uint16 // For each row, bitmap of cells that were either present before modification, or modified or deleted
+ afterMap [128]uint16 // For each row, bitmap of cells that were present after modification
+ keccak keccakState
+ keccak2 keccakState
+ rootChecked bool // Set to false if it is not known whether the root is empty, set to true if it is checked
+ rootTouched bool
+ rootPresent bool
+ trace bool
+ // Function used to load branch node and fill up the cells
+ // For each cell, it sets the cell type, clears the modified flag, fills the hash,
+ // and for the extension, account, and leaf type, the `l` and `k`
+ branchFn func(prefix []byte) ([]byte, error)
+ // Function used to fetch account with given plain key
+ accountFn func(plainKey []byte, cell *Cell) error
+ // Function used to fetch storage with given plain key
+ storageFn func(plainKey []byte, cell *Cell) error
+
+ hashAuxBuffer [128]byte // buffer to compute cell hash or write hash-related things
+ auxBuffer *bytes.Buffer // auxiliary buffer used during branch updates encoding
+}
+
+// represents state of the tree
+type state struct {
+ Root []byte // encoded root cell
+ Depths [128]int // For each row, the depth of cells in that row
+ TouchMap [128]uint16 // For each row, bitmap of cells that were either present before modification, or modified or deleted
+ AfterMap [128]uint16 // For each row, bitmap of cells that were present after modification
+ BranchBefore [128]bool // For each row, whether there was a branch node in the database loaded in unfold
+ CurrentKey [128]byte // For each row indicates which column is currently selected
+ CurrentKeyLen int8
+ RootChecked bool // Set to false if it is not known whether the root is empty, set to true if it is checked
+ RootTouched bool
+ RootPresent bool
+}
+
+func NewHexPatriciaHashed(accountKeyLen int,
+ branchFn func(prefix []byte) ([]byte, error),
+ accountFn func(plainKey []byte, cell *Cell) error,
+ storageFn func(plainKey []byte, cell *Cell) error,
+) *HexPatriciaHashed {
+ return &HexPatriciaHashed{
+ keccak: sha3.NewLegacyKeccak256().(keccakState),
+ keccak2: sha3.NewLegacyKeccak256().(keccakState),
+ accountKeyLen: accountKeyLen,
+ branchFn: branchFn,
+ accountFn: accountFn,
+ storageFn: storageFn,
+ auxBuffer: bytes.NewBuffer(make([]byte, 8192)),
+ }
+}
+
+type Cell struct {
+ Balance uint256.Int
+ Nonce uint64
+ hl int // Length of the hash (or embedded)
+ StorageLen int
+ apl int // length of account plain key
+ spl int // length of the storage plain key
+ downHashedLen int
+ extLen int
+ downHashedKey [128]byte
+ extension [64]byte
+ spk [length.Addr + length.Hash]byte // storage plain key
+ h [length.Hash]byte // cell hash
+ CodeHash [length.Hash]byte // hash of the bytecode
+ Storage [length.Hash]byte
+ apk [length.Addr]byte // account plain key
+ Delete bool
+}
+
+var (
+ EmptyRootHash, _ = hex.DecodeString("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+ EmptyCodeHash, _ = hex.DecodeString("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
+)
+
+func (cell *Cell) fillEmpty() {
+ cell.apl = 0
+ cell.spl = 0
+ cell.downHashedLen = 0
+ cell.extLen = 0
+ cell.hl = 0
+ cell.Nonce = 0
+ cell.Balance.Clear()
+ copy(cell.CodeHash[:], EmptyCodeHash)
+ cell.StorageLen = 0
+ cell.Delete = false
+}
+
+func (cell *Cell) fillFromUpperCell(upCell *Cell, depth, depthIncrement int) {
+ if upCell.downHashedLen >= depthIncrement {
+ cell.downHashedLen = upCell.downHashedLen - depthIncrement
+ } else {
+ cell.downHashedLen = 0
+ }
+ if upCell.downHashedLen > depthIncrement {
+ copy(cell.downHashedKey[:], upCell.downHashedKey[depthIncrement:upCell.downHashedLen])
+ }
+ if upCell.extLen >= depthIncrement {
+ cell.extLen = upCell.extLen - depthIncrement
+ } else {
+ cell.extLen = 0
+ }
+ if upCell.extLen > depthIncrement {
+ copy(cell.extension[:], upCell.extension[depthIncrement:upCell.extLen])
+ }
+ if depth <= 64 {
+ cell.apl = upCell.apl
+ if upCell.apl > 0 {
+ copy(cell.apk[:], upCell.apk[:cell.apl])
+ cell.Balance.Set(&upCell.Balance)
+ cell.Nonce = upCell.Nonce
+ copy(cell.CodeHash[:], upCell.CodeHash[:])
+ cell.extLen = upCell.extLen
+ if upCell.extLen > 0 {
+ copy(cell.extension[:], upCell.extension[:upCell.extLen])
+ }
+ }
+ } else {
+ cell.apl = 0
+ }
+ cell.spl = upCell.spl
+ if upCell.spl > 0 {
+ copy(cell.spk[:], upCell.spk[:upCell.spl])
+ cell.StorageLen = upCell.StorageLen
+ if upCell.StorageLen > 0 {
+ copy(cell.Storage[:], upCell.Storage[:upCell.StorageLen])
+ }
+ }
+ cell.hl = upCell.hl
+ if upCell.hl > 0 {
+ copy(cell.h[:], upCell.h[:upCell.hl])
+ }
+}
+
+func (cell *Cell) fillFromLowerCell(lowCell *Cell, lowDepth int, preExtension []byte, nibble int) {
+ if lowCell.apl > 0 || lowDepth < 64 {
+ cell.apl = lowCell.apl
+ }
+ if lowCell.apl > 0 {
+ copy(cell.apk[:], lowCell.apk[:cell.apl])
+ cell.Balance.Set(&lowCell.Balance)
+ cell.Nonce = lowCell.Nonce
+ copy(cell.CodeHash[:], lowCell.CodeHash[:])
+ }
+ cell.spl = lowCell.spl
+ if lowCell.spl > 0 {
+ copy(cell.spk[:], lowCell.spk[:cell.spl])
+ cell.StorageLen = lowCell.StorageLen
+ if lowCell.StorageLen > 0 {
+ copy(cell.Storage[:], lowCell.Storage[:lowCell.StorageLen])
+ }
+ }
+ if lowCell.hl > 0 {
+ if (lowCell.apl == 0 && lowDepth < 64) || (lowCell.spl == 0 && lowDepth > 64) {
+ // Extension is related to either accounts branch node, or storage branch node, we prepend it by preExtension | nibble
+ if len(preExtension) > 0 {
+ copy(cell.extension[:], preExtension)
+ }
+ cell.extension[len(preExtension)] = byte(nibble)
+ if lowCell.extLen > 0 {
+ copy(cell.extension[1+len(preExtension):], lowCell.extension[:lowCell.extLen])
+ }
+ cell.extLen = lowCell.extLen + 1 + len(preExtension)
+ } else {
+ // Extension is related to a storage branch node, so we copy it upwards as is
+ cell.extLen = lowCell.extLen
+ if lowCell.extLen > 0 {
+ copy(cell.extension[:], lowCell.extension[:lowCell.extLen])
+ }
+ }
+ }
+ cell.hl = lowCell.hl
+ if lowCell.hl > 0 {
+ copy(cell.h[:], lowCell.h[:lowCell.hl])
+ }
+}
+
+func hashKey(keccak keccakState, plainKey []byte, dest []byte, hashedKeyOffset int) error {
+ keccak.Reset()
+ var hashBufBack [length.Hash]byte
+ hashBuf := hashBufBack[:]
+ if _, err := keccak.Write(plainKey); err != nil {
+ return err
+ }
+ if _, err := keccak.Read(hashBuf); err != nil {
+ return err
+ }
+ hashBuf = hashBuf[hashedKeyOffset/2:]
+ var k int
+ if hashedKeyOffset%2 == 1 {
+ dest[0] = hashBuf[0] & 0xf
+ k++
+ hashBuf = hashBuf[1:]
+ }
+ for _, c := range hashBuf {
+ dest[k] = (c >> 4) & 0xf
+ k++
+ dest[k] = c & 0xf
+ k++
+ }
+ return nil
+}
+
+func (cell *Cell) deriveHashedKeys(depth int, keccak keccakState, accountKeyLen int) error {
+ extraLen := 0
+ if cell.apl > 0 {
+ if depth > 64 {
+ return fmt.Errorf("deriveHashedKeys accountPlainKey present at depth > 64")
+ }
+ extraLen = 64 - depth
+ }
+ if cell.spl > 0 {
+ if depth >= 64 {
+ extraLen = 128 - depth
+ } else {
+ extraLen += 64
+ }
+ }
+ if extraLen > 0 {
+ if cell.downHashedLen > 0 {
+ copy(cell.downHashedKey[extraLen:], cell.downHashedKey[:cell.downHashedLen])
+ }
+ cell.downHashedLen += extraLen
+ var hashedKeyOffset, downOffset int
+ if cell.apl > 0 {
+ if err := hashKey(keccak, cell.apk[:cell.apl], cell.downHashedKey[:], depth); err != nil {
+ return err
+ }
+ downOffset = 64 - depth
+ }
+ if cell.spl > 0 {
+ if depth >= 64 {
+ hashedKeyOffset = depth - 64
+ }
+ if err := hashKey(keccak, cell.spk[accountKeyLen:cell.spl], cell.downHashedKey[downOffset:], hashedKeyOffset); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (cell *Cell) fillFromFields(data []byte, pos int, fieldBits PartFlags) (int, error) {
+ if fieldBits&HashedKeyPart != 0 {
+ l, n := binary.Uvarint(data[pos:])
+ if n == 0 {
+ return 0, fmt.Errorf("fillFromFields buffer too small for hashedKey len")
+ } else if n < 0 {
+ return 0, fmt.Errorf("fillFromFields value overflow for hashedKey len")
+ }
+ pos += n
+ if len(data) < pos+int(l) {
+ return 0, fmt.Errorf("fillFromFields buffer too small for hashedKey exp %d got %d", pos+int(l), len(data))
+ }
+ cell.downHashedLen = int(l)
+ cell.extLen = int(l)
+ if l > 0 {
+ copy(cell.downHashedKey[:], data[pos:pos+int(l)])
+ copy(cell.extension[:], data[pos:pos+int(l)])
+ pos += int(l)
+ }
+ } else {
+ cell.downHashedLen = 0
+ cell.extLen = 0
+ }
+ if fieldBits&AccountPlainPart != 0 {
+ l, n := binary.Uvarint(data[pos:])
+ if n == 0 {
+ return 0, fmt.Errorf("fillFromFields buffer too small for accountPlainKey len")
+ } else if n < 0 {
+ return 0, fmt.Errorf("fillFromFields value overflow for accountPlainKey len")
+ }
+ pos += n
+ if len(data) < pos+int(l) {
+ return 0, fmt.Errorf("fillFromFields buffer too small for accountPlainKey")
+ }
+ cell.apl = int(l)
+ if l > 0 {
+ copy(cell.apk[:], data[pos:pos+int(l)])
+ pos += int(l)
+ }
+ } else {
+ cell.apl = 0
+ }
+ if fieldBits&StoragePlainPart != 0 {
+ l, n := binary.Uvarint(data[pos:])
+ if n == 0 {
+ return 0, fmt.Errorf("fillFromFields buffer too small for storagePlainKey len")
+ } else if n < 0 {
+ return 0, fmt.Errorf("fillFromFields value overflow for storagePlainKey len")
+ }
+ pos += n
+ if len(data) < pos+int(l) {
+ return 0, fmt.Errorf("fillFromFields buffer too small for storagePlainKey")
+ }
+ cell.spl = int(l)
+ if l > 0 {
+ copy(cell.spk[:], data[pos:pos+int(l)])
+ pos += int(l)
+ }
+ } else {
+ cell.spl = 0
+ }
+ if fieldBits&HashPart != 0 {
+ l, n := binary.Uvarint(data[pos:])
+ if n == 0 {
+ return 0, fmt.Errorf("fillFromFields buffer too small for hash len")
+ } else if n < 0 {
+ return 0, fmt.Errorf("fillFromFields value overflow for hash len")
+ }
+ pos += n
+ if len(data) < pos+int(l) {
+ return 0, fmt.Errorf("fillFromFields buffer too small for hash")
+ }
+ cell.hl = int(l)
+ if l > 0 {
+ copy(cell.h[:], data[pos:pos+int(l)])
+ pos += int(l)
+ }
+ } else {
+ cell.hl = 0
+ }
+ return pos, nil
+}
+
+func (cell *Cell) setStorage(value []byte) {
+ cell.StorageLen = len(value)
+ if len(value) > 0 {
+ copy(cell.Storage[:], value)
+ }
+}
+
+func (cell *Cell) setAccountFields(codeHash []byte, balance *uint256.Int, nonce uint64) {
+ copy(cell.CodeHash[:], codeHash)
+
+ cell.Balance.SetBytes(balance.Bytes())
+ cell.Nonce = nonce
+}
+
+func (cell *Cell) accountForHashing(buffer []byte, storageRootHash [length.Hash]byte) int {
+ balanceBytes := 0
+ if !cell.Balance.LtUint64(128) {
+ balanceBytes = cell.Balance.ByteLen()
+ }
+
+ var nonceBytes int
+ if cell.Nonce < 128 && cell.Nonce != 0 {
+ nonceBytes = 0
+ } else {
+ nonceBytes = common.BitLenToByteLen(bits.Len64(cell.Nonce))
+ }
+
+ var structLength = uint(balanceBytes + nonceBytes + 2)
+ structLength += 66 // Two 32-byte arrays + 2 prefixes
+
+ var pos int
+ if structLength < 56 {
+ buffer[0] = byte(192 + structLength)
+ pos = 1
+ } else {
+ lengthBytes := common.BitLenToByteLen(bits.Len(structLength))
+ buffer[0] = byte(247 + lengthBytes)
+
+ for i := lengthBytes; i > 0; i-- {
+ buffer[i] = byte(structLength)
+ structLength >>= 8
+ }
+
+ pos = lengthBytes + 1
+ }
+
+ // Encoding nonce
+ if cell.Nonce < 128 && cell.Nonce != 0 {
+ buffer[pos] = byte(cell.Nonce)
+ } else {
+ buffer[pos] = byte(128 + nonceBytes)
+ var nonce = cell.Nonce
+ for i := nonceBytes; i > 0; i-- {
+ buffer[pos+i] = byte(nonce)
+ nonce >>= 8
+ }
+ }
+ pos += 1 + nonceBytes
+
+ // Encoding balance
+ if cell.Balance.LtUint64(128) && !cell.Balance.IsZero() {
+ buffer[pos] = byte(cell.Balance.Uint64())
+ pos++
+ } else {
+ buffer[pos] = byte(128 + balanceBytes)
+ pos++
+ cell.Balance.WriteToSlice(buffer[pos : pos+balanceBytes])
+ pos += balanceBytes
+ }
+
+ // Encoding Root and CodeHash
+ buffer[pos] = 128 + 32
+ pos++
+ copy(buffer[pos:], storageRootHash[:])
+ pos += 32
+ buffer[pos] = 128 + 32
+ pos++
+ copy(buffer[pos:], cell.CodeHash[:])
+ pos += 32
+ return pos
+}
+
+func (hph *HexPatriciaHashed) completeLeafHash(buf, keyPrefix []byte, kp, kl, compactLen int, key []byte, compact0 byte, ni int, val rlp.RlpSerializable, singleton bool) ([]byte, error) {
+ totalLen := kp + kl + val.DoubleRLPLen()
+ var lenPrefix [4]byte
+ pt := rlp.GenerateStructLen(lenPrefix[:], totalLen)
+ embedded := !singleton && totalLen+pt < length.Hash
+ var writer io.Writer
+ if embedded {
+ //hph.byteArrayWriter.Setup(buf)
+ hph.auxBuffer.Reset()
+ writer = hph.auxBuffer
+ } else {
+ hph.keccak.Reset()
+ writer = hph.keccak
+ }
+ if _, err := writer.Write(lenPrefix[:pt]); err != nil {
+ return nil, err
+ }
+ if _, err := writer.Write(keyPrefix[:kp]); err != nil {
+ return nil, err
+ }
+ var b [1]byte
+ b[0] = compact0
+ if _, err := writer.Write(b[:]); err != nil {
+ return nil, err
+ }
+ for i := 1; i < compactLen; i++ {
+ b[0] = key[ni]*16 + key[ni+1]
+ if _, err := writer.Write(b[:]); err != nil {
+ return nil, err
+ }
+ ni += 2
+ }
+ var prefixBuf [8]byte
+ if err := val.ToDoubleRLP(writer, prefixBuf[:]); err != nil {
+ return nil, err
+ }
+ if embedded {
+ buf = hph.auxBuffer.Bytes()
+ } else {
+ var hashBuf [33]byte
+ hashBuf[0] = 0x80 + length.Hash
+ if _, err := hph.keccak.Read(hashBuf[1:]); err != nil {
+ return nil, err
+ }
+ buf = append(buf, hashBuf[:]...)
+ }
+ return buf, nil
+}
+
+func (hph *HexPatriciaHashed) leafHashWithKeyVal(buf, key []byte, val rlp.RlpSerializableBytes, singleton bool) ([]byte, error) {
+ // Compute the total length of binary representation
+ var kp, kl int
+ // Write key
+ var compactLen int
+ var ni int
+ var compact0 byte
+ compactLen = (len(key)-1)/2 + 1
+ if len(key)&1 == 0 {
+ compact0 = 0x30 + key[0] // Odd: (3<<4) + first nibble
+ ni = 1
+ } else {
+ compact0 = 0x20
+ }
+ var keyPrefix [1]byte
+ if compactLen > 1 {
+ keyPrefix[0] = 0x80 + byte(compactLen)
+ kp = 1
+ kl = compactLen
+ } else {
+ kl = 1
+ }
+ return hph.completeLeafHash(buf, keyPrefix[:], kp, kl, compactLen, key, compact0, ni, val, singleton)
+}
+
+func (hph *HexPatriciaHashed) accountLeafHashWithKey(buf, key []byte, val rlp.RlpSerializable) ([]byte, error) {
+ // Compute the total length of binary representation
+ var kp, kl int
+ // Write key
+ var compactLen int
+ var ni int
+ var compact0 byte
+ if hasTerm(key) {
+ compactLen = (len(key)-1)/2 + 1
+ if len(key)&1 == 0 {
+ compact0 = 48 + key[0] // Odd (1<<4) + first nibble
+ ni = 1
+ } else {
+ compact0 = 32
+ }
+ } else {
+ compactLen = len(key)/2 + 1
+ if len(key)&1 == 1 {
+ compact0 = 16 + key[0] // Odd (1<<4) + first nibble
+ ni = 1
+ }
+ }
+ var keyPrefix [1]byte
+ if compactLen > 1 {
+ keyPrefix[0] = byte(128 + compactLen)
+ kp = 1
+ kl = compactLen
+ } else {
+ kl = 1
+ }
+ return hph.completeLeafHash(buf, keyPrefix[:], kp, kl, compactLen, key, compact0, ni, val, true)
+}
+
+func (hph *HexPatriciaHashed) extensionHash(key []byte, hash []byte) ([length.Hash]byte, error) {
+ var hashBuf [length.Hash]byte
+
+ // Compute the total length of binary representation
+ var kp, kl int
+ // Write key
+ var compactLen int
+ var ni int
+ var compact0 byte
+ if hasTerm(key) {
+ compactLen = (len(key)-1)/2 + 1
+ if len(key)&1 == 0 {
+ compact0 = 0x30 + key[0] // Odd: (3<<4) + first nibble
+ ni = 1
+ } else {
+ compact0 = 0x20
+ }
+ } else {
+ compactLen = len(key)/2 + 1
+ if len(key)&1 == 1 {
+ compact0 = 0x10 + key[0] // Odd: (1<<4) + first nibble
+ ni = 1
+ }
+ }
+ var keyPrefix [1]byte
+ if compactLen > 1 {
+ keyPrefix[0] = 0x80 + byte(compactLen)
+ kp = 1
+ kl = compactLen
+ } else {
+ kl = 1
+ }
+ totalLen := kp + kl + 33
+ var lenPrefix [4]byte
+ pt := rlp.GenerateStructLen(lenPrefix[:], totalLen)
+ hph.keccak.Reset()
+ if _, err := hph.keccak.Write(lenPrefix[:pt]); err != nil {
+ return hashBuf, err
+ }
+ if _, err := hph.keccak.Write(keyPrefix[:kp]); err != nil {
+ return hashBuf, err
+ }
+ var b [1]byte
+ b[0] = compact0
+ if _, err := hph.keccak.Write(b[:]); err != nil {
+ return hashBuf, err
+ }
+ for i := 1; i < compactLen; i++ {
+ b[0] = key[ni]*16 + key[ni+1]
+ if _, err := hph.keccak.Write(b[:]); err != nil {
+ return hashBuf, err
+ }
+ ni += 2
+ }
+ b[0] = 0x80 + length.Hash
+ if _, err := hph.keccak.Write(b[:]); err != nil {
+ return hashBuf, err
+ }
+ if _, err := hph.keccak.Write(hash); err != nil {
+ return hashBuf, err
+ }
+ // Replace previous hash with the new one
+ if _, err := hph.keccak.Read(hashBuf[:]); err != nil {
+ return hashBuf, err
+ }
+ return hashBuf, nil
+}
+
+func (hph *HexPatriciaHashed) computeCellHashLen(cell *Cell, depth int) int {
+ if cell.spl > 0 && depth >= 64 {
+ keyLen := 128 - depth + 1 // Length of hex key with terminator character
+ var kp, kl int
+ compactLen := (keyLen-1)/2 + 1
+ if compactLen > 1 {
+ kp = 1
+ kl = compactLen
+ } else {
+ kl = 1
+ }
+ val := rlp.RlpSerializableBytes(cell.Storage[:cell.StorageLen])
+ totalLen := kp + kl + val.DoubleRLPLen()
+ var lenPrefix [4]byte
+ pt := rlp.GenerateStructLen(lenPrefix[:], totalLen)
+ if totalLen+pt < length.Hash {
+ return totalLen + pt
+ }
+ }
+ return length.Hash + 1
+}
+
+func (hph *HexPatriciaHashed) computeCellHash(cell *Cell, depth int, buf []byte) ([]byte, error) {
+ var err error
+ var storageRootHash [length.Hash]byte
+ storageRootHashIsSet := false
+ if cell.spl > 0 {
+ var hashedKeyOffset int
+ if depth >= 64 {
+ hashedKeyOffset = depth - 64
+ }
+ singleton := depth <= 64
+ if err := hashKey(hph.keccak, cell.spk[hph.accountKeyLen:cell.spl], cell.downHashedKey[:], hashedKeyOffset); err != nil {
+ return nil, err
+ }
+ cell.downHashedKey[64-hashedKeyOffset] = 16 // Add terminator
+ if singleton {
+ if hph.trace {
+ fmt.Printf("leafHashWithKeyVal(singleton) for [%x]=>[%x]\n", cell.downHashedKey[:64-hashedKeyOffset+1], cell.Storage[:cell.StorageLen])
+ }
+ aux := make([]byte, 0, 33)
+ if aux, err = hph.leafHashWithKeyVal(aux, cell.downHashedKey[:64-hashedKeyOffset+1], cell.Storage[:cell.StorageLen], true); err != nil {
+ return nil, err
+ }
+ storageRootHash = *(*[length.Hash]byte)(aux[1:])
+ storageRootHashIsSet = true
+ } else {
+ if hph.trace {
+ fmt.Printf("leafHashWithKeyVal for [%x]=>[%x]\n", cell.downHashedKey[:64-hashedKeyOffset+1], cell.Storage[:cell.StorageLen])
+ }
+ return hph.leafHashWithKeyVal(buf, cell.downHashedKey[:64-hashedKeyOffset+1], cell.Storage[:cell.StorageLen], false)
+ }
+ }
+ if cell.apl > 0 {
+ if err := hashKey(hph.keccak, cell.apk[:cell.apl], cell.downHashedKey[:], depth); err != nil {
+ return nil, err
+ }
+ cell.downHashedKey[64-depth] = 16 // Add terminator
+ if !storageRootHashIsSet {
+ if cell.extLen > 0 {
+ // Extension
+ if cell.hl > 0 {
+ if hph.trace {
+ fmt.Printf("extensionHash for [%x]=>[%x]\n", cell.extension[:cell.extLen], cell.h[:cell.hl])
+ }
+ if storageRootHash, err = hph.extensionHash(cell.extension[:cell.extLen], cell.h[:cell.hl]); err != nil {
+ return nil, err
+ }
+ } else {
+ return nil, fmt.Errorf("computeCellHash extension without hash")
+ }
+ } else if cell.hl > 0 {
+ storageRootHash = cell.h
+ } else {
+ storageRootHash = *(*[length.Hash]byte)(EmptyRootHash)
+ }
+ }
+ var valBuf [128]byte
+ valLen := cell.accountForHashing(valBuf[:], storageRootHash)
+ if hph.trace {
+ fmt.Printf("accountLeafHashWithKey for [%x]=>[%x]\n", hph.hashAuxBuffer[:65-depth], valBuf[:valLen])
+ }
+ return hph.accountLeafHashWithKey(buf, cell.downHashedKey[:65-depth], rlp.RlpEncodedBytes(valBuf[:valLen]))
+ }
+ buf = append(buf, 0x80+32)
+ if cell.extLen > 0 {
+ // Extension
+ if cell.hl > 0 {
+ if hph.trace {
+ fmt.Printf("extensionHash for [%x]=>[%x]\n", cell.extension[:cell.extLen], cell.h[:cell.hl])
+ }
+ var hash [length.Hash]byte
+ if hash, err = hph.extensionHash(cell.extension[:cell.extLen], cell.h[:cell.hl]); err != nil {
+ return nil, err
+ }
+ buf = append(buf, hash[:]...)
+ } else {
+ return nil, fmt.Errorf("computeCellHash extension without hash")
+ }
+ } else if cell.hl > 0 {
+ buf = append(buf, cell.h[:cell.hl]...)
+ } else {
+ buf = append(buf, EmptyRootHash...)
+ }
+ return buf, nil
+}
+
+func (hph *HexPatriciaHashed) needUnfolding(hashedKey []byte) int {
+ var cell *Cell
+ var depth int
+ if hph.activeRows == 0 {
+ if hph.trace {
+ fmt.Printf("needUnfolding root, rootChecked = %t\n", hph.rootChecked)
+ }
+ if hph.rootChecked && hph.root.downHashedLen == 0 && hph.root.hl == 0 {
+ // Previously checked, empty root, no unfolding needed
+ return 0
+ }
+ cell = &hph.root
+ if cell.downHashedLen == 0 && cell.hl == 0 && !hph.rootChecked {
+ // Need to attempt to unfold the root
+ return 1
+ }
+ } else {
+ col := int(hashedKey[hph.currentKeyLen])
+ cell = &hph.grid[hph.activeRows-1][col]
+ depth = hph.depths[hph.activeRows-1]
+ if hph.trace {
+ fmt.Printf("needUnfolding cell (%d, %x), currentKey=[%x], depth=%d, cell.h=[%x]\n", hph.activeRows-1, col, hph.currentKey[:hph.currentKeyLen], depth, cell.h[:cell.hl])
+ }
+ }
+ if len(hashedKey) <= depth {
+ return 0
+ }
+ if cell.downHashedLen == 0 {
+ if cell.hl == 0 {
+ // cell is empty, no need to unfold further
+ return 0
+ }
+ // unfold branch node
+ return 1
+ }
+ cpl := commonPrefixLen(hashedKey[depth:], cell.downHashedKey[:cell.downHashedLen-1])
+ if hph.trace {
+ fmt.Printf("cpl=%d, cell.downHashedKey=[%x], depth=%d, hashedKey[depth:]=[%x]\n", cpl, cell.downHashedKey[:cell.downHashedLen], depth, hashedKey[depth:])
+ }
+ unfolding := cpl + 1
+ if depth < 64 && depth+unfolding > 64 {
+ // This is to make sure that unfolding always breaks at the level where storage subtrees start
+ unfolding = 64 - depth
+ if hph.trace {
+ fmt.Printf("adjusted unfolding=%d\n", unfolding)
+ }
+ }
+ return unfolding
+}
+
+// unfoldBranchNode returns true if unfolding has been done
+func (hph *HexPatriciaHashed) unfoldBranchNode(row int, deleted bool, depth int) (bool, error) {
+ branchData, err := hph.branchFn(hexToCompact(hph.currentKey[:hph.currentKeyLen]))
+ if err != nil {
+ return false, err
+ }
+ if !hph.rootChecked && hph.currentKeyLen == 0 && len(branchData) == 0 {
+ // Special case - empty or deleted root
+ hph.rootChecked = true
+ return false, nil
+ }
+ if len(branchData) == 0 {
+ log.Warn("got empty branch data during unfold", "key", hex.EncodeToString(hexToCompact(hph.currentKey[:hph.currentKeyLen])), "row", row, "depth", depth, "deleted", deleted)
+ }
+ hph.branchBefore[row] = true
+ bitmap := binary.BigEndian.Uint16(branchData[0:])
+ pos := 2
+ if deleted {
+ // All cells come as deleted (touched but not present after)
+ hph.afterMap[row] = 0
+ hph.touchMap[row] = bitmap
+ } else {
+ hph.afterMap[row] = bitmap
+ hph.touchMap[row] = 0
+ }
+ //fmt.Printf("unfoldBranchNode [%x], afterMap = [%016b], touchMap = [%016b]\n", branchData, hph.afterMap[row], hph.touchMap[row])
+ // Loop iterating over the set bits of modMask
+ for bitset, j := bitmap, 0; bitset != 0; j++ {
+ bit := bitset & -bitset
+ nibble := bits.TrailingZeros16(bit)
+ cell := &hph.grid[row][nibble]
+ fieldBits := branchData[pos]
+ pos++
+ var err error
+ if pos, err = cell.fillFromFields(branchData, pos, PartFlags(fieldBits)); err != nil {
+ return false, fmt.Errorf("prefix [%x], branchData[%x]: %w", hph.currentKey[:hph.currentKeyLen], branchData, err)
+ }
+ if hph.trace {
+ fmt.Printf("cell (%d, %x) depth=%d, hash=[%x], a=[%x], s=[%x], ex=[%x]\n", row, nibble, depth, cell.h[:cell.hl], cell.apk[:cell.apl], cell.spk[:cell.spl], cell.extension[:cell.extLen])
+ }
+ if cell.apl > 0 {
+ hph.accountFn(cell.apk[:cell.apl], cell)
+ if hph.trace {
+ fmt.Printf("accountFn[%x] return balance=%d, nonce=%d code=%x\n", cell.apk[:cell.apl], &cell.Balance, cell.Nonce, cell.CodeHash[:])
+ }
+ }
+ if cell.spl > 0 {
+ hph.storageFn(cell.spk[:cell.spl], cell)
+ }
+ if err = cell.deriveHashedKeys(depth, hph.keccak, hph.accountKeyLen); err != nil {
+ return false, err
+ }
+ bitset ^= bit
+ }
+ return true, nil
+}
+
+func (hph *HexPatriciaHashed) unfold(hashedKey []byte, unfolding int) error {
+ if hph.trace {
+ fmt.Printf("unfold %d: activeRows: %d\n", unfolding, hph.activeRows)
+ }
+ var upCell *Cell
+ var touched, present bool
+ var col byte
+ var upDepth, depth int
+ if hph.activeRows == 0 {
+ if hph.rootChecked && hph.root.hl == 0 && hph.root.downHashedLen == 0 {
+ // No unfolding for empty root
+ return nil
+ }
+ upCell = &hph.root
+ touched = hph.rootTouched
+ present = hph.rootPresent
+ if hph.trace {
+ fmt.Printf("unfold root, touched %t, present %t, column %d\n", touched, present, col)
+ }
+ } else {
+ upDepth = hph.depths[hph.activeRows-1]
+ col = hashedKey[upDepth-1]
+ upCell = &hph.grid[hph.activeRows-1][col]
+ touched = hph.touchMap[hph.activeRows-1]&(uint16(1)<= unfolding {
+ depth = upDepth + unfolding
+ nibble := upCell.downHashedKey[unfolding-1]
+ if touched {
+ hph.touchMap[row] = uint16(1) << nibble
+ }
+ if present {
+ hph.afterMap[row] = uint16(1) << nibble
+ }
+ cell := &hph.grid[row][nibble]
+ cell.fillFromUpperCell(upCell, depth, unfolding)
+ if hph.trace {
+ fmt.Printf("cell (%d, %x) depth=%d\n", row, nibble, depth)
+ }
+ if row >= 64 {
+ cell.apl = 0
+ }
+ if unfolding > 1 {
+ copy(hph.currentKey[hph.currentKeyLen:], upCell.downHashedKey[:unfolding-1])
+ }
+ hph.currentKeyLen += unfolding - 1
+ } else {
+ // upCell.downHashedLen < unfolding
+ depth = upDepth + upCell.downHashedLen
+ nibble := upCell.downHashedKey[upCell.downHashedLen-1]
+ if touched {
+ hph.touchMap[row] = uint16(1) << nibble
+ }
+ if present {
+ hph.afterMap[row] = uint16(1) << nibble
+ }
+ cell := &hph.grid[row][nibble]
+ cell.fillFromUpperCell(upCell, depth, upCell.downHashedLen)
+ if hph.trace {
+ fmt.Printf("cell (%d, %x) depth=%d\n", row, nibble, depth)
+ }
+ if row >= 64 {
+ cell.apl = 0
+ }
+ if upCell.downHashedLen > 1 {
+ copy(hph.currentKey[hph.currentKeyLen:], upCell.downHashedKey[:upCell.downHashedLen-1])
+ }
+ hph.currentKeyLen += upCell.downHashedLen - 1
+ }
+ hph.depths[hph.activeRows] = depth
+ hph.activeRows++
+ return nil
+}
+
+func (hph *HexPatriciaHashed) needFolding(hashedKey []byte) bool {
+ return !bytes.HasPrefix(hashedKey, hph.currentKey[:hph.currentKeyLen])
+}
+
+// The purpose of fold is to reduce hph.currentKey[:hph.currentKeyLen]. It should be invoked
+// until that current key becomes a prefix of hashedKey that we will proccess next
+// (in other words until the needFolding function returns 0)
+func (hph *HexPatriciaHashed) fold() (branchData BranchData, updateKey []byte, err error) {
+ updateKeyLen := hph.currentKeyLen
+ if hph.activeRows == 0 {
+ return nil, nil, fmt.Errorf("cannot fold - no active rows")
+ }
+ if hph.trace {
+ fmt.Printf("fold: activeRows: %d, currentKey: [%x], touchMap: %016b, afterMap: %016b\n", hph.activeRows, hph.currentKey[:hph.currentKeyLen], hph.touchMap[hph.activeRows-1], hph.afterMap[hph.activeRows-1])
+ }
+ // Move information to the row above
+ row := hph.activeRows - 1
+ var upCell *Cell
+ var col int
+ var upDepth int
+ if hph.activeRows == 1 {
+ if hph.trace {
+ fmt.Printf("upcell is root\n")
+ }
+ upCell = &hph.root
+ } else {
+ upDepth = hph.depths[hph.activeRows-2]
+ col = int(hph.currentKey[upDepth-1])
+ if hph.trace {
+ fmt.Printf("upcell is (%d x %x), upDepth=%d\n", row-1, col, upDepth)
+ }
+ upCell = &hph.grid[row-1][col]
+ }
+
+ depth := hph.depths[hph.activeRows-1]
+ updateKey = hexToCompact(hph.currentKey[:updateKeyLen])
+ partsCount := bits.OnesCount16(hph.afterMap[row])
+
+ if hph.trace {
+ fmt.Printf("touchMap[%d]=%016b, afterMap[%d]=%016b\n", row, hph.touchMap[row], row, hph.afterMap[row])
+ }
+ switch partsCount {
+ case 0:
+ // Everything deleted
+ if hph.touchMap[row] != 0 {
+ if row == 0 {
+ // Root is deleted because the tree is empty
+ hph.rootTouched = true
+ hph.rootPresent = false
+ } else if upDepth == 64 {
+ // Special case - all storage items of an account have been deleted, but it does not automatically delete the account, just makes it empty storage
+ // Therefore we are not propagating deletion upwards, but turn it into a modification
+ hph.touchMap[row-1] |= (uint16(1) << col)
+ } else {
+ // Deletion is propagated upwards
+ hph.touchMap[row-1] |= (uint16(1) << col)
+ hph.afterMap[row-1] &^= (uint16(1) << col)
+ }
+ }
+ upCell.hl = 0
+ upCell.apl = 0
+ upCell.spl = 0
+ upCell.extLen = 0
+ upCell.downHashedLen = 0
+ if hph.branchBefore[row] {
+ branchData, _, err = EncodeBranch(0, hph.touchMap[row], 0, func(nibble int, skip bool) (*Cell, error) { return nil, nil })
+ if err != nil {
+ return nil, updateKey, fmt.Errorf("failed to encode leaf node update: %w", err)
+ }
+ }
+ hph.activeRows--
+ if upDepth > 0 {
+ hph.currentKeyLen = upDepth - 1
+ } else {
+ hph.currentKeyLen = 0
+ }
+ case 1:
+ // Leaf or extension node
+ if hph.touchMap[row] != 0 {
+ // any modifications
+ if row == 0 {
+ hph.rootTouched = true
+ } else {
+ // Modifiction is propagated upwards
+ hph.touchMap[row-1] |= (uint16(1) << col)
+ }
+ }
+ nibble := bits.TrailingZeros16(hph.afterMap[row])
+ cell := &hph.grid[row][nibble]
+ upCell.extLen = 0
+ upCell.fillFromLowerCell(cell, depth, hph.currentKey[upDepth:hph.currentKeyLen], nibble)
+ // Delete if it existed
+ if hph.branchBefore[row] {
+ //branchData, _, err = hph.EncodeBranchDirectAccess(0, row, depth)
+ branchData, _, err = EncodeBranch(0, hph.touchMap[row], 0, func(nibble int, skip bool) (*Cell, error) { return nil, nil })
+ if err != nil {
+ return nil, updateKey, fmt.Errorf("failed to encode leaf node update: %w", err)
+ }
+ }
+ hph.activeRows--
+ if upDepth > 0 {
+ hph.currentKeyLen = upDepth - 1
+ } else {
+ hph.currentKeyLen = 0
+ }
+ default:
+ // Branch node
+ if hph.touchMap[row] != 0 {
+ // any modifications
+ if row == 0 {
+ hph.rootTouched = true
+ } else {
+ // Modifiction is propagated upwards
+ hph.touchMap[row-1] |= (uint16(1) << col)
+ }
+ }
+ bitmap := hph.touchMap[row] & hph.afterMap[row]
+ if !hph.branchBefore[row] {
+ // There was no branch node before, so we need to touch even the singular child that existed
+ hph.touchMap[row] |= hph.afterMap[row]
+ bitmap |= hph.afterMap[row]
+ }
+ // Calculate total length of all hashes
+ totalBranchLen := 17 - partsCount // For every empty cell, one byte
+ for bitset, j := hph.afterMap[row], 0; bitset != 0; j++ {
+ bit := bitset & -bitset
+ nibble := bits.TrailingZeros16(bit)
+ cell := &hph.grid[row][nibble]
+ totalBranchLen += hph.computeCellHashLen(cell, depth)
+ bitset ^= bit
+ }
+
+ hph.keccak2.Reset()
+ pt := rlp.GenerateStructLen(hph.hashAuxBuffer[:], totalBranchLen)
+ if _, err := hph.keccak2.Write(hph.hashAuxBuffer[:pt]); err != nil {
+ return nil, nil, err
+ }
+
+ b := [...]byte{0x80}
+ cellGetter := func(nibble int, skip bool) (*Cell, error) {
+ if skip {
+ if _, err := hph.keccak2.Write(b[:]); err != nil {
+ return nil, fmt.Errorf("failed to write empty nibble to hash: %w", err)
+ }
+ if hph.trace {
+ fmt.Printf("%x: empty(%d,%x)\n", nibble, row, nibble)
+ }
+ return nil, nil
+ }
+ cell := &hph.grid[row][nibble]
+ cellHash, err := hph.computeCellHash(cell, depth, hph.hashAuxBuffer[:0])
+ if err != nil {
+ return nil, err
+ }
+ if hph.trace {
+ fmt.Printf("%x: computeCellHash(%d,%x,depth=%d)=[%x]\n", nibble, row, nibble, depth, cellHash)
+ }
+ if _, err := hph.keccak2.Write(cellHash); err != nil {
+ return nil, err
+ }
+
+ return cell, nil
+ }
+
+ var lastNibble int
+ var err error
+ _ = cellGetter
+
+ //branchData, lastNibble, err = hph.EncodeBranchDirectAccess(bitmap, row, depth, branchData)
+ branchData, lastNibble, err = EncodeBranch(bitmap, hph.touchMap[row], hph.afterMap[row], cellGetter)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to encode branch update: %w", err)
+ }
+ for i := lastNibble; i < 17; i++ {
+ if _, err := hph.keccak2.Write(b[:]); err != nil {
+ return nil, nil, err
+ }
+ if hph.trace {
+ fmt.Printf("%x: empty(%d,%x)\n", i, row, i)
+ }
+ }
+ upCell.extLen = depth - upDepth - 1
+ upCell.downHashedLen = upCell.extLen
+ if upCell.extLen > 0 {
+ copy(upCell.extension[:], hph.currentKey[upDepth:hph.currentKeyLen])
+ copy(upCell.downHashedKey[:], hph.currentKey[upDepth:hph.currentKeyLen])
+ }
+ if depth < 64 {
+ upCell.apl = 0
+ }
+ upCell.spl = 0
+ upCell.hl = 32
+ if _, err := hph.keccak2.Read(upCell.h[:]); err != nil {
+ return nil, nil, err
+ }
+ if hph.trace {
+ fmt.Printf("} [%x]\n", upCell.h[:])
+ }
+ hph.activeRows--
+ if upDepth > 0 {
+ hph.currentKeyLen = upDepth - 1
+ } else {
+ hph.currentKeyLen = 0
+ }
+ }
+ if branchData != nil {
+ if hph.trace {
+ fmt.Printf("fold: update key: %x, branchData: [%x]\n", CompactedKeyToHex(updateKey), branchData)
+ }
+ }
+ return branchData, updateKey, nil
+}
+
+func (hph *HexPatriciaHashed) deleteCell(hashedKey []byte) {
+ if hph.trace {
+ fmt.Printf("deleteCell, activeRows = %d\n", hph.activeRows)
+ }
+ var cell *Cell
+ if hph.activeRows == 0 {
+ // Remove the root
+ cell = &hph.root
+ hph.rootTouched = true
+ hph.rootPresent = false
+ } else {
+ row := hph.activeRows - 1
+ if hph.depths[row] < len(hashedKey) {
+ if hph.trace {
+ fmt.Printf("deleteCell skipping spurious delete depth=%d, len(hashedKey)=%d\n", hph.depths[row], len(hashedKey))
+ }
+ return
+ }
+ col := int(hashedKey[hph.currentKeyLen])
+ cell = &hph.grid[row][col]
+ if hph.afterMap[row]&(uint16(1)< 0; unfolding = hph.needUnfolding(hashedKey) {
+ if err := hph.unfold(hashedKey, unfolding); err != nil {
+ return nil, nil, fmt.Errorf("unfold: %w", err)
+ }
+ }
+
+ // Update the cell
+ stagedCell.fillEmpty()
+ if len(plainKey) == hph.accountKeyLen {
+ if err := hph.accountFn(plainKey, stagedCell); err != nil {
+ return nil, nil, fmt.Errorf("accountFn for key %x failed: %w", plainKey, err)
+ }
+ if !stagedCell.Delete {
+ cell := hph.updateCell(plainKey, hashedKey)
+ cell.setAccountFields(stagedCell.CodeHash[:], &stagedCell.Balance, stagedCell.Nonce)
+
+ if hph.trace {
+ fmt.Printf("accountFn reading key %x => balance=%v nonce=%v codeHash=%x\n", cell.apk, cell.Balance.Uint64(), cell.Nonce, cell.CodeHash)
+ }
+ }
+ } else {
+ if err = hph.storageFn(plainKey, stagedCell); err != nil {
+ return nil, nil, fmt.Errorf("storageFn for key %x failed: %w", plainKey, err)
+ }
+ if !stagedCell.Delete {
+ hph.updateCell(plainKey, hashedKey).setStorage(stagedCell.Storage[:stagedCell.StorageLen])
+ if hph.trace {
+ fmt.Printf("storageFn reading key %x => %x\n", plainKey, stagedCell.Storage[:stagedCell.StorageLen])
+ }
+ }
+ }
+
+ if stagedCell.Delete {
+ if hph.trace {
+ fmt.Printf("delete cell %x hash %x\n", plainKey, hashedKey)
+ }
+ hph.deleteCell(hashedKey)
+ }
+ }
+ // Folding everything up to the root
+ for hph.activeRows > 0 {
+ if branchData, updateKey, err := hph.fold(); err != nil {
+ return nil, nil, fmt.Errorf("final fold: %w", err)
+ } else if branchData != nil {
+ branchNodeUpdates[string(updateKey)] = branchData
+ }
+ }
+
+ rootHash, err = hph.RootHash()
+ if err != nil {
+ return nil, branchNodeUpdates, fmt.Errorf("root hash evaluation failed: %w", err)
+ }
+ return rootHash, branchNodeUpdates, nil
+}
+
+func (hph *HexPatriciaHashed) SetTrace(trace bool) { hph.trace = trace }
+
+func (hph *HexPatriciaHashed) Variant() TrieVariant { return VariantHexPatriciaTrie }
+
+// Reset allows HexPatriciaHashed instance to be reused for the new commitment calculation
+func (hph *HexPatriciaHashed) Reset() {
+ hph.rootChecked = false
+ hph.root.hl = 0
+ hph.root.downHashedLen = 0
+ hph.root.apl = 0
+ hph.root.spl = 0
+ hph.root.extLen = 0
+ copy(hph.root.CodeHash[:], EmptyCodeHash)
+ hph.root.StorageLen = 0
+ hph.root.Balance.Clear()
+ hph.root.Nonce = 0
+ hph.rootTouched = false
+ hph.rootPresent = true
+}
+
+func (hph *HexPatriciaHashed) ResetFns(
+ branchFn func(prefix []byte) ([]byte, error),
+ accountFn func(plainKey []byte, cell *Cell) error,
+ storageFn func(plainKey []byte, cell *Cell) error,
+) {
+ hph.branchFn = branchFn
+ hph.accountFn = accountFn
+ hph.storageFn = storageFn
+}
+
+type stateRootFlag int8
+
+var (
+ stateRootPresent stateRootFlag = 1
+ stateRootChecked stateRootFlag = 2
+ stateRootTouched stateRootFlag = 4
+)
+
+func (s *state) Encode(buf []byte) ([]byte, error) {
+ var rootFlags stateRootFlag
+ if s.RootPresent {
+ rootFlags |= stateRootPresent
+ }
+ if s.RootChecked {
+ rootFlags |= stateRootChecked
+ }
+ if s.RootTouched {
+ rootFlags |= stateRootTouched
+ }
+
+ ee := bytes.NewBuffer(buf)
+ if err := binary.Write(ee, binary.BigEndian, s.CurrentKeyLen); err != nil {
+ return nil, fmt.Errorf("encode currentKeyLen: %w", err)
+ }
+ if err := binary.Write(ee, binary.BigEndian, int8(rootFlags)); err != nil {
+ return nil, fmt.Errorf("encode rootFlags: %w", err)
+ }
+ if n, err := ee.Write(s.CurrentKey[:]); err != nil || n != len(s.CurrentKey) {
+ return nil, fmt.Errorf("encode currentKey: %w", err)
+ }
+ if err := binary.Write(ee, binary.BigEndian, uint16(len(s.Root))); err != nil {
+ return nil, fmt.Errorf("encode root len: %w", err)
+ }
+ if n, err := ee.Write(s.Root); err != nil || n != len(s.Root) {
+ return nil, fmt.Errorf("encode root: %w", err)
+ }
+ d := make([]byte, len(s.Depths))
+ for i := 0; i < len(s.Depths); i++ {
+ d[i] = byte(s.Depths[i])
+ }
+ if n, err := ee.Write(d); err != nil || n != len(s.Depths) {
+ return nil, fmt.Errorf("encode depths: %w", err)
+ }
+ if err := binary.Write(ee, binary.BigEndian, s.TouchMap); err != nil {
+ return nil, fmt.Errorf("encode touchMap: %w", err)
+ }
+ if err := binary.Write(ee, binary.BigEndian, s.AfterMap); err != nil {
+ return nil, fmt.Errorf("encode afterMap: %w", err)
+ }
+
+ var before1, before2 uint64
+ for i := 0; i < 64; i++ {
+ if s.BranchBefore[i] {
+ before1 |= 1 << i
+ }
+ }
+ for i, j := 64, 0; i < 128; i, j = i+1, j+1 {
+ if s.BranchBefore[i] {
+ before2 |= 1 << j
+ }
+ }
+ if err := binary.Write(ee, binary.BigEndian, before1); err != nil {
+ return nil, fmt.Errorf("encode branchBefore_1: %w", err)
+ }
+ if err := binary.Write(ee, binary.BigEndian, before2); err != nil {
+ return nil, fmt.Errorf("encode branchBefore_2: %w", err)
+ }
+ return ee.Bytes(), nil
+}
+
+func (s *state) Decode(buf []byte) error {
+ aux := bytes.NewBuffer(buf)
+ if err := binary.Read(aux, binary.BigEndian, &s.CurrentKeyLen); err != nil {
+ return fmt.Errorf("currentKeyLen: %w", err)
+ }
+ var rootFlags stateRootFlag
+ if err := binary.Read(aux, binary.BigEndian, &rootFlags); err != nil {
+ return fmt.Errorf("rootFlags: %w", err)
+ }
+
+ if rootFlags&stateRootPresent != 0 {
+ s.RootPresent = true
+ }
+ if rootFlags&stateRootTouched != 0 {
+ s.RootTouched = true
+ }
+ if rootFlags&stateRootChecked != 0 {
+ s.RootChecked = true
+ }
+ if n, err := aux.Read(s.CurrentKey[:]); err != nil || n != 128 {
+ return fmt.Errorf("currentKey: %w", err)
+ }
+ var rootSize uint16
+ if err := binary.Read(aux, binary.BigEndian, &rootSize); err != nil {
+ return fmt.Errorf("root size: %w", err)
+ }
+ s.Root = make([]byte, rootSize)
+ if _, err := aux.Read(s.Root); err != nil {
+ return fmt.Errorf("root: %w", err)
+ }
+ d := make([]byte, len(s.Depths))
+ if err := binary.Read(aux, binary.BigEndian, &d); err != nil {
+ return fmt.Errorf("depths: %w", err)
+ }
+ for i := 0; i < len(s.Depths); i++ {
+ s.Depths[i] = int(d[i])
+ }
+ if err := binary.Read(aux, binary.BigEndian, &s.TouchMap); err != nil {
+ return fmt.Errorf("touchMap: %w", err)
+ }
+ if err := binary.Read(aux, binary.BigEndian, &s.AfterMap); err != nil {
+ return fmt.Errorf("afterMap: %w", err)
+ }
+ var branch1, branch2 uint64
+ if err := binary.Read(aux, binary.BigEndian, &branch1); err != nil {
+ return fmt.Errorf("branchBefore1: %w", err)
+ }
+ if err := binary.Read(aux, binary.BigEndian, &branch2); err != nil {
+ return fmt.Errorf("branchBefore2: %w", err)
+ }
+
+ for i := 0; i < 64; i++ {
+ if branch1&(1< 0
+ buf := make([]byte, bufLen)
+ buf[0] = zeroByte
+ return decodeKey(key[keyPos:], buf)
+}
+
+func makeCompactZeroByte(key []byte) (compactZeroByte byte, keyPos, keyLen int) {
+ keyLen = len(key)
+ if hasTerm(key) {
+ keyLen--
+ compactZeroByte = 0x20
+ }
+ var firstNibble byte
+ if len(key) > 0 {
+ firstNibble = key[0]
+ }
+ if keyLen&1 == 1 {
+ compactZeroByte |= 0x10 | firstNibble // Odd: (1<<4) + first nibble
+ keyPos++
+ }
+
+ return
+}
+
+func decodeKey(key, buf []byte) []byte {
+ keyLen := len(key)
+ if hasTerm(key) {
+ keyLen--
+ }
+ for keyIndex, bufIndex := 0, 1; keyIndex < keyLen; keyIndex, bufIndex = keyIndex+2, bufIndex+1 {
+ if keyIndex == keyLen-1 {
+ buf[bufIndex] = buf[bufIndex] & 0x0f
+ } else {
+ buf[bufIndex] = key[keyIndex+1]
+ }
+ buf[bufIndex] |= key[keyIndex] << 4
+ }
+ return buf
+}
+
+func CompactedKeyToHex(compact []byte) []byte {
+ if len(compact) == 0 {
+ return compact
+ }
+ base := keybytesToHexNibbles(compact)
+ // delete terminator flag
+ if base[0] < 2 {
+ base = base[:len(base)-1]
+ }
+ // apply odd flag
+ chop := 2 - base[0]&1
+ return base[chop:]
+}
+
+func keybytesToHexNibbles(str []byte) []byte {
+ l := len(str)*2 + 1
+ var nibbles = make([]byte, l)
+ for i, b := range str {
+ nibbles[i*2] = b / 16
+ nibbles[i*2+1] = b % 16
+ }
+ nibbles[l-1] = 16
+ return nibbles
+}
+
+// hasTerm returns whether a hex key has the terminator flag.
+func hasTerm(s []byte) bool {
+ return len(s) > 0 && s[len(s)-1] == 16
+}
+
+func commonPrefixLen(b1, b2 []byte) int {
+ var i int
+ for i = 0; i < len(b1) && i < len(b2); i++ {
+ if b1[i] != b2[i] {
+ break
+ }
+ }
+ return i
+}
+
+func (hph *HexPatriciaHashed) ProcessUpdates(plainKeys, hashedKeys [][]byte, updates []Update) (rootHash []byte, branchNodeUpdates map[string]BranchData, err error) {
+ branchNodeUpdates = make(map[string]BranchData)
+
+ for i, plainKey := range plainKeys {
+ hashedKey := hashedKeys[i]
+ if hph.trace {
+ fmt.Printf("plainKey=[%x], hashedKey=[%x], currentKey=[%x]\n", plainKey, hashedKey, hph.currentKey[:hph.currentKeyLen])
+ }
+ // Keep folding until the currentKey is the prefix of the key we modify
+ for hph.needFolding(hashedKey) {
+ if branchData, updateKey, err := hph.fold(); err != nil {
+ return nil, nil, fmt.Errorf("fold: %w", err)
+ } else if branchData != nil {
+ branchNodeUpdates[string(updateKey)] = branchData
+ }
+ }
+ // Now unfold until we step on an empty cell
+ for unfolding := hph.needUnfolding(hashedKey); unfolding > 0; unfolding = hph.needUnfolding(hashedKey) {
+ if err := hph.unfold(hashedKey, unfolding); err != nil {
+ return nil, nil, fmt.Errorf("unfold: %w", err)
+ }
+ }
+
+ update := updates[i]
+ // Update the cell
+ if update.Flags == DeleteUpdate {
+ hph.deleteCell(hashedKey)
+ if hph.trace {
+ fmt.Printf("key %x deleted\n", plainKey)
+ }
+ } else {
+ cell := hph.updateCell(plainKey, hashedKey)
+ if hph.trace {
+ fmt.Printf("accountFn updated key %x =>", plainKey)
+ }
+ if update.Flags&BalanceUpdate != 0 {
+ if hph.trace {
+ fmt.Printf(" balance=%d", update.Balance.Uint64())
+ }
+ cell.Balance.Set(&update.Balance)
+ }
+ if update.Flags&NonceUpdate != 0 {
+ if hph.trace {
+ fmt.Printf(" nonce=%d", update.Nonce)
+ }
+ cell.Nonce = update.Nonce
+ }
+ if update.Flags&CodeUpdate != 0 {
+ if hph.trace {
+ fmt.Printf(" codeHash=%x", update.CodeHashOrStorage)
+ }
+ copy(cell.CodeHash[:], update.CodeHashOrStorage[:])
+ }
+ if hph.trace {
+ fmt.Printf("\n")
+ }
+ if update.Flags&StorageUpdate != 0 {
+ cell.setStorage(update.CodeHashOrStorage[:update.ValLength])
+ if hph.trace {
+ fmt.Printf("\rstorageFn filled key %x => %x\n", plainKey, update.CodeHashOrStorage[:update.ValLength])
+ }
+ }
+ }
+ }
+ // Folding everything up to the root
+ for hph.activeRows > 0 {
+ if branchData, updateKey, err := hph.fold(); err != nil {
+ return nil, nil, fmt.Errorf("final fold: %w", err)
+ } else if branchData != nil {
+ branchNodeUpdates[string(updateKey)] = branchData
+ }
+ }
+
+ rootHash, err = hph.RootHash()
+ if err != nil {
+ return nil, branchNodeUpdates, fmt.Errorf("root hash evaluation failed: %w", err)
+ }
+ return rootHash, branchNodeUpdates, nil
+}
+
+// nolint
+// Hashes provided key and expands resulting hash into nibbles (each byte split into two nibbles by 4 bits)
+func (hph *HexPatriciaHashed) hashAndNibblizeKey(key []byte) []byte {
+ hashedKey := make([]byte, length.Hash)
+
+ hph.keccak.Reset()
+ hph.keccak.Write(key[:length.Addr])
+ copy(hashedKey[:length.Hash], hph.keccak.Sum(nil))
+
+ if len(key[length.Addr:]) > 0 {
+ hashedKey = append(hashedKey, make([]byte, length.Hash)...)
+ hph.keccak.Reset()
+ hph.keccak.Write(key[length.Addr:])
+ copy(hashedKey[length.Hash:], hph.keccak.Sum(nil))
+ }
+
+ nibblized := make([]byte, len(hashedKey)*2)
+ for i, b := range hashedKey {
+ nibblized[i*2] = (b >> 4) & 0xf
+ nibblized[i*2+1] = b & 0xf
+ }
+ return nibblized
+}
+
+type UpdateFlags uint8
+
+const (
+ CodeUpdate UpdateFlags = 1
+ DeleteUpdate UpdateFlags = 2
+ BalanceUpdate UpdateFlags = 4
+ NonceUpdate UpdateFlags = 8
+ StorageUpdate UpdateFlags = 16
+)
+
+func (uf UpdateFlags) String() string {
+ var sb strings.Builder
+ if uf == DeleteUpdate {
+ sb.WriteString("Delete")
+ } else {
+ if uf&BalanceUpdate != 0 {
+ sb.WriteString("+Balance")
+ }
+ if uf&NonceUpdate != 0 {
+ sb.WriteString("+Nonce")
+ }
+ if uf&CodeUpdate != 0 {
+ sb.WriteString("+Code")
+ }
+ if uf&StorageUpdate != 0 {
+ sb.WriteString("+Storage")
+ }
+ }
+ return sb.String()
+}
+
+type Update struct {
+ Flags UpdateFlags
+ Balance uint256.Int
+ Nonce uint64
+ CodeHashOrStorage [length.Hash]byte
+ ValLength int
+}
+
+func (u *Update) DecodeForStorage(enc []byte) {
+ u.Nonce = 0
+ u.Balance.Clear()
+ copy(u.CodeHashOrStorage[:], EmptyCodeHash)
+
+ pos := 0
+ nonceBytes := int(enc[pos])
+ pos++
+ if nonceBytes > 0 {
+ u.Nonce = bytesToUint64(enc[pos : pos+nonceBytes])
+ pos += nonceBytes
+ }
+ balanceBytes := int(enc[pos])
+ pos++
+ if balanceBytes > 0 {
+ u.Balance.SetBytes(enc[pos : pos+balanceBytes])
+ pos += balanceBytes
+ }
+ codeHashBytes := int(enc[pos])
+ pos++
+ if codeHashBytes > 0 {
+ copy(u.CodeHashOrStorage[:], enc[pos:pos+codeHashBytes])
+ }
+}
+
+func (u *Update) Encode(buf []byte, numBuf []byte) []byte {
+ buf = append(buf, byte(u.Flags))
+ if u.Flags&BalanceUpdate != 0 {
+ buf = append(buf, byte(u.Balance.ByteLen()))
+ buf = append(buf, u.Balance.Bytes()...)
+ }
+ if u.Flags&NonceUpdate != 0 {
+ n := binary.PutUvarint(numBuf, u.Nonce)
+ buf = append(buf, numBuf[:n]...)
+ }
+ if u.Flags&CodeUpdate != 0 {
+ buf = append(buf, u.CodeHashOrStorage[:]...)
+ }
+ if u.Flags&StorageUpdate != 0 {
+ n := binary.PutUvarint(numBuf, uint64(u.ValLength))
+ buf = append(buf, numBuf[:n]...)
+ if u.ValLength > 0 {
+ buf = append(buf, u.CodeHashOrStorage[:u.ValLength]...)
+ }
+ }
+ return buf
+}
+
+func (u *Update) Decode(buf []byte, pos int) (int, error) {
+ if len(buf) < pos+1 {
+ return 0, fmt.Errorf("decode Update: buffer too small for flags")
+ }
+ u.Flags = UpdateFlags(buf[pos])
+ pos++
+ if u.Flags&BalanceUpdate != 0 {
+ if len(buf) < pos+1 {
+ return 0, fmt.Errorf("decode Update: buffer too small for balance len")
+ }
+ balanceLen := int(buf[pos])
+ pos++
+ if len(buf) < pos+balanceLen {
+ return 0, fmt.Errorf("decode Update: buffer too small for balance")
+ }
+ u.Balance.SetBytes(buf[pos : pos+balanceLen])
+ pos += balanceLen
+ }
+ if u.Flags&NonceUpdate != 0 {
+ var n int
+ u.Nonce, n = binary.Uvarint(buf[pos:])
+ if n == 0 {
+ return 0, fmt.Errorf("decode Update: buffer too small for nonce")
+ }
+ if n < 0 {
+ return 0, fmt.Errorf("decode Update: nonce overflow")
+ }
+ pos += n
+ }
+ if u.Flags&CodeUpdate != 0 {
+ if len(buf) < pos+32 {
+ return 0, fmt.Errorf("decode Update: buffer too small for codeHash")
+ }
+ copy(u.CodeHashOrStorage[:], buf[pos:pos+32])
+ pos += 32
+ }
+ if u.Flags&StorageUpdate != 0 {
+ l, n := binary.Uvarint(buf[pos:])
+ if n == 0 {
+ return 0, fmt.Errorf("decode Update: buffer too small for storage len")
+ }
+ if n < 0 {
+ return 0, fmt.Errorf("decode Update: storage lee overflow")
+ }
+ pos += n
+ if len(buf) < pos+int(l) {
+ return 0, fmt.Errorf("decode Update: buffer too small for storage")
+ }
+ u.ValLength = int(l)
+ copy(u.CodeHashOrStorage[:], buf[pos:pos+int(l)])
+ pos += int(l)
+ }
+ return pos, nil
+}
+
+func (u *Update) String() string {
+ var sb strings.Builder
+ sb.WriteString(fmt.Sprintf("Flags: [%s]", u.Flags))
+ if u.Flags&BalanceUpdate != 0 {
+ sb.WriteString(fmt.Sprintf(", Balance: [%d]", &u.Balance))
+ }
+ if u.Flags&NonceUpdate != 0 {
+ sb.WriteString(fmt.Sprintf(", Nonce: [%d]", u.Nonce))
+ }
+ if u.Flags&CodeUpdate != 0 {
+ sb.WriteString(fmt.Sprintf(", CodeHash: [%x]", u.CodeHashOrStorage))
+ }
+ if u.Flags&StorageUpdate != 0 {
+ sb.WriteString(fmt.Sprintf(", Storage: [%x]", u.CodeHashOrStorage[:u.ValLength]))
+ }
+ return sb.String()
+}
diff --git a/erigon-lib/commitment/hex_patricia_hashed_bench_test.go b/erigon-lib/commitment/hex_patricia_hashed_bench_test.go
new file mode 100644
index 00000000000..a44d4e7c865
--- /dev/null
+++ b/erigon-lib/commitment/hex_patricia_hashed_bench_test.go
@@ -0,0 +1,42 @@
+package commitment
+
+import (
+ "encoding/hex"
+ "math/rand"
+ "testing"
+
+ "github.com/ledgerwatch/erigon-lib/common/length"
+)
+
+func Benchmark_HexPatriciaHahsed_ReviewKeys(b *testing.B) {
+ ms := NewMockState(&testing.T{})
+ hph := NewHexPatriciaHashed(length.Addr, ms.branchFn, ms.accountFn, ms.storageFn)
+ hph.SetTrace(false)
+
+ builder := NewUpdateBuilder()
+
+ rnd := rand.New(rand.NewSource(133777))
+ keysCount := rnd.Int31n(10_000_0)
+
+ // generate updates
+ for i := int32(0); i < keysCount; i++ {
+ key := make([]byte, length.Addr)
+
+ for j := 0; j < len(key); j++ {
+ key[j] = byte(rnd.Intn(256))
+ }
+ builder.Balance(hex.EncodeToString(key), rnd.Uint64())
+ }
+
+ pk, hk, _ := builder.Build()
+
+ b.Run("review_keys", func(b *testing.B) {
+ for i, j := 0, 0; i < b.N; i, j = i+1, j+1 {
+ if j >= len(pk) {
+ j = 0
+ }
+
+ hph.ReviewKeys(pk[j:j+1], hk[j:j+1])
+ }
+ })
+}
diff --git a/erigon-lib/commitment/hex_patricia_hashed_fuzz_test.go b/erigon-lib/commitment/hex_patricia_hashed_fuzz_test.go
new file mode 100644
index 00000000000..e1e772b8385
--- /dev/null
+++ b/erigon-lib/commitment/hex_patricia_hashed_fuzz_test.go
@@ -0,0 +1,214 @@
+//go:build !nofuzz
+
+package commitment
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/hex"
+ "math/rand"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "golang.org/x/crypto/sha3"
+
+ "github.com/ledgerwatch/erigon-lib/common/length"
+)
+
+// go test -trimpath -v -fuzz=Fuzz_ProcessUpdate$ -fuzztime=300s ./commitment
+
+func Fuzz_ProcessUpdate(f *testing.F) {
+ ha, _ := hex.DecodeString("13ccfe8074645cab4cb42b423625e055f0293c87")
+ hb, _ := hex.DecodeString("73f822e709a0016bfaed8b5e81b5f86de31d6895")
+
+ f.Add(uint64(2), ha, uint64(1235105), hb)
+
+ f.Fuzz(func(t *testing.T, balanceA uint64, accountA []byte, balanceB uint64, accountB []byte) {
+ if len(accountA) == 0 || len(accountA) > 20 || len(accountB) == 0 || len(accountB) > 20 {
+ t.Skip()
+ }
+
+ builder := NewUpdateBuilder().
+ Balance(hex.EncodeToString(accountA), balanceA).
+ Balance(hex.EncodeToString(accountB), balanceB)
+
+ ms := NewMockState(t)
+ ms2 := NewMockState(t)
+ hph := NewHexPatriciaHashed(20, ms.branchFn, ms.accountFn, ms.storageFn)
+ hphAnother := NewHexPatriciaHashed(20, ms2.branchFn, ms2.accountFn, ms2.storageFn)
+
+ hph.SetTrace(false)
+ hphAnother.SetTrace(false)
+
+ plainKeys, hashedKeys, updates := builder.Build()
+ if err := ms.applyPlainUpdates(plainKeys, updates); err != nil {
+ t.Fatal(err)
+ }
+ if err := ms2.applyPlainUpdates(plainKeys, updates); err != nil {
+ t.Fatal(err)
+ }
+
+ rootHash, branchNodeUpdates, err := hph.ReviewKeys(plainKeys, hashedKeys)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ms.applyBranchNodeUpdates(branchNodeUpdates)
+ if len(rootHash) != 32 {
+ t.Fatalf("invalid root hash length: expected 32 bytes, got %v", len(rootHash))
+ }
+
+ rootHashAnother, branchNodeUpdates, err := hphAnother.ReviewKeys(plainKeys, hashedKeys)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ms2.applyBranchNodeUpdates(branchNodeUpdates)
+
+ if len(rootHashAnother) > 32 {
+ t.Fatalf("invalid root hash length: expected 32 bytes, got %v", len(rootHash))
+ }
+ if !bytes.Equal(rootHash, rootHashAnother) {
+ t.Fatalf("invalid second root hash with same updates: [%v] != [%v]", hex.EncodeToString(rootHash), hex.EncodeToString(rootHashAnother))
+ }
+ })
+}
+
+// go test -trimpath -v -fuzz=Fuzz_ProcessUpdates_ArbitraryUpdateCount -fuzztime=300s ./commitment
+
+func Fuzz_ProcessUpdates_ArbitraryUpdateCount(f *testing.F) {
+ ha, _ := hex.DecodeString("0008852883b2850c7a48f4b0eea3ccc4c04e6cb6025e9e8f7db2589c7dae81517c514790cfd6f668903161349e")
+
+ f.Add(ha)
+
+ f.Fuzz(func(t *testing.T, build []byte) {
+ if len(build) < 12 {
+ t.Skip()
+ }
+ i := 0
+ keysCount := binary.BigEndian.Uint32(build[i : i+4])
+ i += 4
+ ks := binary.BigEndian.Uint32(build[i : i+4])
+ keysSeed := rand.New(rand.NewSource(int64(ks)))
+ i += 4
+ us := binary.BigEndian.Uint32(build[i : i+4])
+ updateSeed := rand.New(rand.NewSource(int64(us)))
+
+ t.Logf("fuzzing %d keys keysSeed=%d updateSeed=%d", keysCount, ks, us)
+
+ builder := NewUpdateBuilder()
+ for k := uint32(0); k < keysCount; k++ {
+ var key [length.Addr]byte
+ n, err := keysSeed.Read(key[:])
+ pkey := hex.EncodeToString(key[:])
+ require.NoError(t, err)
+ require.EqualValues(t, length.Addr, n)
+
+ aux := make([]byte, 32)
+
+ flg := UpdateFlags(updateSeed.Intn(int(CodeUpdate | DeleteUpdate | StorageUpdate | NonceUpdate | BalanceUpdate)))
+ switch {
+ case flg&BalanceUpdate != 0:
+ builder.Balance(pkey, updateSeed.Uint64()).Nonce(pkey, updateSeed.Uint64())
+ continue
+ case flg&CodeUpdate != 0:
+ keccak := sha3.NewLegacyKeccak256().(keccakState)
+ var s [8]byte
+ n, err := updateSeed.Read(s[:])
+ require.NoError(t, err)
+ require.EqualValues(t, len(s), n)
+ keccak.Write(s[:])
+ keccak.Read(aux)
+
+ builder.CodeHash(pkey, hex.EncodeToString(aux))
+ continue
+ case flg&StorageUpdate != 0:
+ sz := updateSeed.Intn(length.Hash)
+ n, err = updateSeed.Read(aux[:sz])
+ require.NoError(t, err)
+ require.EqualValues(t, sz, n)
+
+ loc := make([]byte, updateSeed.Intn(length.Hash-1)+1)
+ keysSeed.Read(loc)
+ builder.Storage(pkey, hex.EncodeToString(loc), hex.EncodeToString(aux[:sz]))
+ continue
+ case flg&DeleteUpdate != 0:
+ continue
+ default:
+ continue
+ }
+ }
+
+ ms := NewMockState(t)
+ ms2 := NewMockState(t)
+ hph := NewHexPatriciaHashed(20, ms.branchFn, ms.accountFn, ms.storageFn)
+ hphAnother := NewHexPatriciaHashed(20, ms2.branchFn, ms2.accountFn, ms2.storageFn)
+
+ plainKeys, hashedKeys, updates := builder.Build()
+
+ hph.SetTrace(false)
+ hphAnother.SetTrace(false)
+
+ err := ms.applyPlainUpdates(plainKeys, updates)
+ require.NoError(t, err)
+
+ rootHashReview, branchNodeUpdates, err := hph.ReviewKeys(plainKeys, hashedKeys)
+ require.NoError(t, err)
+
+ ms.applyBranchNodeUpdates(branchNodeUpdates)
+ require.Len(t, rootHashReview, length.Hash, "invalid root hash length")
+
+ err = ms2.applyPlainUpdates(plainKeys, updates)
+ require.NoError(t, err)
+
+ rootHashAnother, branchUpdatesAnother, err := hphAnother.ReviewKeys(plainKeys, hashedKeys)
+ require.NoError(t, err)
+ ms2.applyBranchNodeUpdates(branchUpdatesAnother)
+
+ require.Len(t, rootHashAnother, length.Hash, "invalid root hash length")
+ require.EqualValues(t, rootHashReview, rootHashAnother, "storage-based and update-based rootHash mismatch")
+ })
+}
+
+func Fuzz_HexPatriciaHashed_ReviewKeys(f *testing.F) {
+ var (
+ keysCount uint64 = 100
+ seed int64 = 1234123415
+ )
+
+ f.Add(keysCount, seed)
+
+ f.Fuzz(func(t *testing.T, keysCount uint64, seed int64) {
+ if keysCount > 10e9 {
+ return
+ }
+
+ rnd := rand.New(rand.NewSource(seed))
+ builder := NewUpdateBuilder()
+
+ // generate updates
+ for i := 0; i < int(keysCount); i++ {
+ key := make([]byte, length.Addr)
+
+ for j := 0; j < len(key); j++ {
+ key[j] = byte(rnd.Intn(256))
+ }
+ builder.Balance(hex.EncodeToString(key), rnd.Uint64())
+ }
+
+ ms := NewMockState(t)
+ hph := NewHexPatriciaHashed(length.Addr, ms.branchFn, ms.accountFn, ms.storageFn)
+
+ hph.SetTrace(false)
+
+ plainKeys, hashedKeys, updates := builder.Build()
+ if err := ms.applyPlainUpdates(plainKeys, updates); err != nil {
+ t.Fatal(err)
+ }
+
+ rootHash, branchNodeUpdates, err := hph.ReviewKeys(plainKeys, hashedKeys)
+ require.NoError(t, err)
+
+ ms.applyBranchNodeUpdates(branchNodeUpdates)
+ require.Lenf(t, rootHash, length.Hash, "invalid root hash length")
+ })
+}
diff --git a/erigon-lib/commitment/hex_patricia_hashed_test.go b/erigon-lib/commitment/hex_patricia_hashed_test.go
new file mode 100644
index 00000000000..3798701c7c5
--- /dev/null
+++ b/erigon-lib/commitment/hex_patricia_hashed_test.go
@@ -0,0 +1,523 @@
+/*
+ Copyright 2022 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package commitment
+
+import (
+ "encoding/hex"
+ "fmt"
+ "math/rand"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/ledgerwatch/erigon-lib/common/length"
+)
+
+func Test_HexPatriciaHashed_ResetThenSingularUpdates(t *testing.T) {
+ ms := NewMockState(t)
+ hph := NewHexPatriciaHashed(1, ms.branchFn, ms.accountFn, ms.storageFn)
+ hph.SetTrace(false)
+ plainKeys, hashedKeys, updates := NewUpdateBuilder().
+ Balance("00", 4).
+ Balance("01", 5).
+ Balance("02", 6).
+ Balance("03", 7).
+ Balance("04", 8).
+ Storage("04", "01", "0401").
+ Storage("03", "56", "050505").
+ Storage("03", "57", "060606").
+ Balance("05", 9).
+ Storage("05", "02", "8989").
+ Storage("05", "04", "9898").
+ Build()
+
+ err := ms.applyPlainUpdates(plainKeys, updates)
+ require.NoError(t, err)
+
+ firstRootHash, branchNodeUpdates, err := hph.ReviewKeys(plainKeys, hashedKeys)
+ require.NoError(t, err)
+
+ t.Logf("root hash %x\n", firstRootHash)
+
+ ms.applyBranchNodeUpdates(branchNodeUpdates)
+
+ fmt.Printf("1. Generated updates\n")
+ renderUpdates(branchNodeUpdates)
+
+ // More updates
+ hph.Reset()
+ hph.SetTrace(false)
+ plainKeys, hashedKeys, updates = NewUpdateBuilder().
+ Storage("03", "58", "050505").
+ Build()
+ err = ms.applyPlainUpdates(plainKeys, updates)
+ require.NoError(t, err)
+
+ secondRootHash, branchNodeUpdates, err := hph.ReviewKeys(plainKeys, hashedKeys)
+ require.NoError(t, err)
+ require.NotEqualValues(t, firstRootHash, secondRootHash)
+
+ ms.applyBranchNodeUpdates(branchNodeUpdates)
+ fmt.Printf("2. Generated single update\n")
+ renderUpdates(branchNodeUpdates)
+
+ // More updates
+ hph.Reset()
+ hph.SetTrace(false)
+ plainKeys, hashedKeys, updates = NewUpdateBuilder().
+ Storage("03", "58", "070807").
+ Build()
+ err = ms.applyPlainUpdates(plainKeys, updates)
+ require.NoError(t, err)
+
+ thirdRootHash, branchNodeUpdates, err := hph.ReviewKeys(plainKeys, hashedKeys)
+ require.NoError(t, err)
+ require.NotEqualValues(t, secondRootHash, thirdRootHash)
+
+ ms.applyBranchNodeUpdates(branchNodeUpdates)
+ fmt.Printf("3. Generated single update\n")
+ renderUpdates(branchNodeUpdates)
+}
+
+func Test_HexPatriciaHashed_EmptyUpdate(t *testing.T) {
+ ms := NewMockState(t)
+ hph := NewHexPatriciaHashed(1, ms.branchFn, ms.accountFn, ms.storageFn)
+ hph.SetTrace(false)
+ plainKeys, hashedKeys, updates := NewUpdateBuilder().
+ Balance("00", 4).
+ Nonce("00", 246462653).
+ Balance("01", 5).
+ CodeHash("03", "aaaaaaaaaaf7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a870").
+ Delete("00").
+ Storage("04", "01", "0401").
+ Storage("03", "56", "050505").
+ Build()
+
+ err := ms.applyPlainUpdates(plainKeys, updates)
+ require.NoError(t, err)
+
+ hashBeforeEmptyUpdate, branchNodeUpdates, err := hph.ReviewKeys(plainKeys, hashedKeys)
+ require.NoError(t, err)
+ require.NotEmpty(t, hashBeforeEmptyUpdate)
+
+ ms.applyBranchNodeUpdates(branchNodeUpdates)
+
+ fmt.Println("1. Updates applied")
+ renderUpdates(branchNodeUpdates)
+
+ // generate empty updates and do NOT reset tree
+ hph.SetTrace(true)
+
+ plainKeys, hashedKeys, updates = NewUpdateBuilder().Build()
+
+ err = ms.applyPlainUpdates(plainKeys, updates)
+ require.NoError(t, err)
+
+ hashAfterEmptyUpdate, branchNodeUpdates, err := hph.ReviewKeys(plainKeys, hashedKeys)
+ require.NoError(t, err)
+
+ ms.applyBranchNodeUpdates(branchNodeUpdates)
+ fmt.Println("2. Empty updates applied without state reset")
+
+ require.EqualValues(t, hashBeforeEmptyUpdate, hashAfterEmptyUpdate)
+}
+
+func Test_HexPatriciaHashed_UniqueRepresentation(t *testing.T) {
+ ms := NewMockState(t)
+ ms2 := NewMockState(t)
+
+ plainKeys, hashedKeys, updates := NewUpdateBuilder().
+ Balance("f5", 4).
+ Balance("ff", 900234).
+ Balance("04", 1233).
+ Storage("04", "01", "0401").
+ Balance("ba", 065606).
+ Balance("00", 4).
+ Balance("01", 5).
+ Balance("02", 6).
+ Balance("03", 7).
+ Storage("03", "56", "050505").
+ Balance("05", 9).
+ Storage("03", "87", "060606").
+ Balance("b9", 6).
+ Nonce("ff", 169356).
+ Storage("05", "02", "8989").
+ Storage("f5", "04", "9898").
+ Build()
+
+ trieOne := NewHexPatriciaHashed(1, ms.branchFn, ms.accountFn, ms.storageFn)
+ trieTwo := NewHexPatriciaHashed(1, ms2.branchFn, ms2.accountFn, ms2.storageFn)
+
+ trieOne.SetTrace(true)
+ trieTwo.SetTrace(true)
+
+ // single sequential update
+ roots := make([][]byte, 0)
+ // branchNodeUpdatesOne := make(map[string]BranchData)
+ fmt.Printf("1. Trie sequential update generated following branch updates\n")
+ for i := 0; i < len(updates); i++ {
+ if err := ms.applyPlainUpdates(plainKeys[i:i+1], updates[i:i+1]); err != nil {
+ t.Fatal(err)
+ }
+
+ sequentialRoot, branchNodeUpdates, err := trieOne.ReviewKeys(plainKeys[i:i+1], hashedKeys[i:i+1])
+ require.NoError(t, err)
+ roots = append(roots, sequentialRoot)
+
+ ms.applyBranchNodeUpdates(branchNodeUpdates)
+ renderUpdates(branchNodeUpdates)
+ }
+
+ err := ms2.applyPlainUpdates(plainKeys, updates)
+ require.NoError(t, err)
+
+ fmt.Printf("\n2. Trie batch update generated following branch updates\n")
+ // batch update
+ batchRoot, branchNodeUpdatesTwo, err := trieTwo.ReviewKeys(plainKeys, hashedKeys)
+ require.NoError(t, err)
+ renderUpdates(branchNodeUpdatesTwo)
+
+ fmt.Printf("\n sequential roots:\n")
+ for i, rh := range roots {
+ fmt.Printf("%2d %+v\n", i, hex.EncodeToString(rh))
+ }
+
+ ms2.applyBranchNodeUpdates(branchNodeUpdatesTwo)
+
+ require.EqualValues(t, batchRoot, roots[len(roots)-1],
+ "expected equal roots, got sequential [%v] != batch [%v]", hex.EncodeToString(roots[len(roots)-1]), hex.EncodeToString(batchRoot))
+ require.Lenf(t, batchRoot, 32, "root hash length should be equal to 32 bytes")
+}
+
+func Test_Sepolia(t *testing.T) {
+ ms := NewMockState(t)
+
+ type TestData struct {
+ balances map[string][]byte
+ expectedRoot string
+ }
+
+ tests := []TestData{
+ {
+ expectedRoot: "5eb6e371a698b8d68f665192350ffcecbbbf322916f4b51bd79bb6887da3f494",
+ balances: map[string][]byte{
+ "a2a6d93439144ffe4d27c9e088dcd8b783946263": {0xd3, 0xc2, 0x1b, 0xce, 0xcc, 0xed, 0xa1, 0x00, 0x00, 0x00},
+ "bc11295936aa79d594139de1b2e12629414f3bdb": {0xd3, 0xc2, 0x1b, 0xce, 0xcc, 0xed, 0xa1, 0x00, 0x00, 0x00},
+ "7cf5b79bfe291a67ab02b393e456ccc4c266f753": {0xd3, 0xc2, 0x1b, 0xce, 0xcc, 0xed, 0xa1, 0x00, 0x00, 0x00},
+ "aaec86394441f915bce3e6ab399977e9906f3b69": {0xd3, 0xc2, 0x1b, 0xce, 0xcc, 0xed, 0xa1, 0x00, 0x00, 0x00},
+ "f47cae1cf79ca6758bfc787dbd21e6bdbe7112b8": {0xd3, 0xc2, 0x1b, 0xce, 0xcc, 0xed, 0xa1, 0x00, 0x00, 0x00},
+ "d7eddb78ed295b3c9629240e8924fb8d8874ddd8": {0xd3, 0xc2, 0x1b, 0xce, 0xcc, 0xed, 0xa1, 0x00, 0x00, 0x00},
+ "8b7f0977bb4f0fbe7076fa22bc24aca043583f5e": {0xd3, 0xc2, 0x1b, 0xce, 0xcc, 0xed, 0xa1, 0x00, 0x00, 0x00},
+ "e2e2659028143784d557bcec6ff3a0721048880a": {0xd3, 0xc2, 0x1b, 0xce, 0xcc, 0xed, 0xa1, 0x00, 0x00, 0x00},
+ "d9a5179f091d85051d3c982785efd1455cec8699": {0xd3, 0xc2, 0x1b, 0xce, 0xcc, 0xed, 0xa1, 0x00, 0x00, 0x00},
+ "beef32ca5b9a198d27b4e02f4c70439fe60356cf": {0xd3, 0xc2, 0x1b, 0xce, 0xcc, 0xed, 0xa1, 0x00, 0x00, 0x00},
+ "0000006916a87b82333f4245046623b23794c65c": {0x08, 0x45, 0x95, 0x16, 0x14, 0x01, 0x48, 0x4a, 0x00, 0x00, 0x00},
+ "b21c33de1fab3fa15499c62b59fe0cc3250020d1": {0x52, 0xb7, 0xd2, 0xdc, 0xc8, 0x0c, 0xd2, 0xe4, 0x00, 0x00, 0x00},
+ "10f5d45854e038071485ac9e402308cf80d2d2fe": {0x52, 0xb7, 0xd2, 0xdc, 0xc8, 0x0c, 0xd2, 0xe4, 0x00, 0x00, 0x00},
+ "d7d76c58b3a519e9fa6cc4d22dc017259bc49f1e": {0x52, 0xb7, 0xd2, 0xdc, 0xc8, 0x0c, 0xd2, 0xe4, 0x00, 0x00, 0x00},
+ "799d329e5f583419167cd722962485926e338f4a": {0x0d, 0xe0, 0xb6, 0xb3, 0xa7, 0x64, 0x00, 0x00},
+ },
+ },
+ {
+ expectedRoot: "c91d4ecd59dce3067d340b3aadfc0542974b4fb4db98af39f980a91ea00db9dc",
+ balances: map[string][]byte{
+ "2f14582947e292a2ecd20c430b46f2d27cfe213c": {0x1B, 0xC1, 0x6D, 0x67, 0x4E, 0xC8, 0x00, 0x00},
+ },
+ },
+ {
+ expectedRoot: "c91d4ecd59dce3067d340b3aadfc0542974b4fb4db98af39f980a91ea00db9dc",
+ balances: map[string][]byte{},
+ },
+ }
+
+ hph := NewHexPatriciaHashed(length.Addr, ms.branchFn, ms.accountFn, ms.storageFn)
+ hph.SetTrace(true)
+
+ for _, testData := range tests {
+ builder := NewUpdateBuilder()
+
+ for address, balance := range testData.balances {
+ builder.IncrementBalance(address, balance)
+ }
+ plainKeys, hashedKeys, updates := builder.Build()
+
+ if err := ms.applyPlainUpdates(plainKeys, updates); err != nil {
+ t.Fatal(err)
+ }
+
+ rootHash, branchNodeUpdates, err := hph.ReviewKeys(plainKeys, hashedKeys)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ms.applyBranchNodeUpdates(branchNodeUpdates)
+
+ require.EqualValues(t, testData.expectedRoot, fmt.Sprintf("%x", rootHash))
+ }
+}
+
+func Test_HexPatriciaHashed_StateEncode(t *testing.T) {
+ //trie := NewHexPatriciaHashed(length.Hash, nil, nil, nil)
+ var s state
+ s.Root = make([]byte, 128)
+ rnd := rand.New(rand.NewSource(42))
+ n, err := rnd.Read(s.CurrentKey[:])
+ require.NoError(t, err)
+ require.EqualValues(t, 128, n)
+ n, err = rnd.Read(s.Root[:])
+ require.NoError(t, err)
+ require.EqualValues(t, len(s.Root), n)
+ s.RootPresent = true
+ s.RootTouched = true
+ s.RootChecked = true
+
+ s.CurrentKeyLen = int8(rnd.Intn(129))
+ for i := 0; i < len(s.Depths); i++ {
+ s.Depths[i] = rnd.Intn(256)
+ }
+ for i := 0; i < len(s.TouchMap); i++ {
+ s.TouchMap[i] = uint16(rnd.Intn(1<<16 - 1))
+ }
+ for i := 0; i < len(s.AfterMap); i++ {
+ s.AfterMap[i] = uint16(rnd.Intn(1<<16 - 1))
+ }
+ for i := 0; i < len(s.BranchBefore); i++ {
+ if rnd.Intn(100) > 49 {
+ s.BranchBefore[i] = true
+ }
+ }
+
+ enc, err := s.Encode(nil)
+ require.NoError(t, err)
+ require.NotEmpty(t, enc)
+
+ var s1 state
+ err = s1.Decode(enc)
+ require.NoError(t, err)
+
+ require.EqualValues(t, s.Root[:], s1.Root[:])
+ require.EqualValues(t, s.Depths[:], s1.Depths[:])
+ require.EqualValues(t, s.CurrentKeyLen, s1.CurrentKeyLen)
+ require.EqualValues(t, s.CurrentKey[:], s1.CurrentKey[:])
+ require.EqualValues(t, s.AfterMap[:], s1.AfterMap[:])
+ require.EqualValues(t, s.TouchMap[:], s1.TouchMap[:])
+ require.EqualValues(t, s.BranchBefore[:], s1.BranchBefore[:])
+ require.EqualValues(t, s.RootTouched, s1.RootTouched)
+ require.EqualValues(t, s.RootPresent, s1.RootPresent)
+ require.EqualValues(t, s.RootChecked, s1.RootChecked)
+}
+
+func Test_HexPatriciaHashed_StateEncodeDecodeSetup(t *testing.T) {
+ ms := NewMockState(t)
+
+ plainKeys, hashedKeys, updates := NewUpdateBuilder().
+ Balance("f5", 4).
+ Balance("ff", 900234).
+ Balance("03", 7).
+ Storage("03", "56", "050505").
+ Balance("05", 9).
+ Storage("03", "87", "060606").
+ Balance("b9", 6).
+ Nonce("ff", 169356).
+ Storage("05", "02", "8989").
+ Storage("f5", "04", "9898").
+ Build()
+
+ before := NewHexPatriciaHashed(1, ms.branchFn, ms.accountFn, ms.storageFn)
+ after := NewHexPatriciaHashed(1, ms.branchFn, ms.accountFn, ms.storageFn)
+
+ err := ms.applyPlainUpdates(plainKeys, updates)
+ require.NoError(t, err)
+
+ rhBefore, branchUpdates, err := before.ReviewKeys(plainKeys, hashedKeys)
+ require.NoError(t, err)
+ ms.applyBranchNodeUpdates(branchUpdates)
+
+ state, err := before.EncodeCurrentState(nil)
+ require.NoError(t, err)
+
+ err = after.SetState(state)
+ require.NoError(t, err)
+
+ rhAfter, err := after.RootHash()
+ require.NoError(t, err)
+ require.EqualValues(t, rhBefore, rhAfter)
+
+ // create new update and apply it to both tries
+ nextPK, nextHashed, nextUpdates := NewUpdateBuilder().
+ Nonce("ff", 4).
+ Balance("b9", 6000000000).
+ Balance("ad", 8000000000).
+ Build()
+
+ err = ms.applyPlainUpdates(nextPK, nextUpdates)
+ require.NoError(t, err)
+
+ rh2Before, branchUpdates, err := before.ReviewKeys(nextPK, nextHashed)
+ require.NoError(t, err)
+ ms.applyBranchNodeUpdates(branchUpdates)
+
+ rh2After, branchUpdates, err := after.ReviewKeys(nextPK, nextHashed)
+ require.NoError(t, err)
+
+ _ = branchUpdates
+
+ require.EqualValues(t, rh2Before, rh2After)
+}
+
+func Test_HexPatriciaHashed_RestoreAndContinue(t *testing.T) {
+ ms := NewMockState(t)
+ ms2 := NewMockState(t)
+
+ plainKeys, hashedKeys, updates := NewUpdateBuilder().
+ Balance("f5", 4).
+ Balance("ff", 900234).
+ Balance("04", 1233).
+ Storage("04", "01", "0401").
+ Balance("ba", 065606).
+ Balance("00", 4).
+ Balance("01", 5).
+ Balance("02", 6).
+ Balance("03", 7).
+ Storage("03", "56", "050505").
+ Balance("05", 9).
+ Storage("03", "87", "060606").
+ Balance("b9", 6).
+ Nonce("ff", 169356).
+ Storage("05", "02", "8989").
+ Storage("f5", "04", "9898").
+ Build()
+
+ trieOne := NewHexPatriciaHashed(1, ms.branchFn, ms.accountFn, ms.storageFn)
+ trieTwo := NewHexPatriciaHashed(1, ms2.branchFn, ms2.accountFn, ms2.storageFn)
+
+ err := ms2.applyPlainUpdates(plainKeys, updates)
+ require.NoError(t, err)
+
+ _ = updates
+
+ batchRoot, branchNodeUpdatesTwo, err := trieTwo.ReviewKeys(plainKeys, hashedKeys)
+ require.NoError(t, err)
+ renderUpdates(branchNodeUpdatesTwo)
+ ms2.applyBranchNodeUpdates(branchNodeUpdatesTwo)
+
+ buf, err := trieTwo.EncodeCurrentState(nil)
+ require.NoError(t, err)
+ require.NotEmpty(t, buf)
+
+ err = trieOne.SetState(buf)
+ require.NoError(t, err)
+ require.EqualValues(t, batchRoot[:], trieOne.root.h[:])
+ require.EqualValues(t, trieTwo.root.hl, trieOne.root.hl)
+ require.EqualValues(t, trieTwo.root.apl, trieOne.root.apl)
+ if trieTwo.root.apl > 0 {
+ require.EqualValues(t, trieTwo.root.apk, trieOne.root.apk)
+ }
+ require.EqualValues(t, trieTwo.root.spl, trieOne.root.spl)
+ if trieTwo.root.apl > 0 {
+ require.EqualValues(t, trieTwo.root.spk, trieOne.root.spk)
+ }
+ if trieTwo.root.downHashedLen > 0 {
+ require.EqualValues(t, trieTwo.root.downHashedKey, trieOne.root.downHashedKey)
+ }
+ require.EqualValues(t, trieTwo.root.Nonce, trieOne.root.Nonce)
+ //require.EqualValues(t, trieTwo.root.CodeHash, trieOne.root.CodeHash)
+ require.EqualValues(t, trieTwo.root.StorageLen, trieOne.root.StorageLen)
+ require.EqualValues(t, trieTwo.root.extension, trieOne.root.extension)
+
+ require.EqualValues(t, trieTwo.currentKey, trieOne.currentKey)
+ require.EqualValues(t, trieTwo.afterMap, trieOne.afterMap)
+ require.EqualValues(t, trieTwo.touchMap[:], trieOne.touchMap[:])
+ require.EqualValues(t, trieTwo.branchBefore[:], trieOne.branchBefore[:])
+ require.EqualValues(t, trieTwo.rootTouched, trieOne.rootTouched)
+ require.EqualValues(t, trieTwo.rootPresent, trieOne.rootPresent)
+ require.EqualValues(t, trieTwo.rootChecked, trieOne.rootChecked)
+ require.EqualValues(t, trieTwo.currentKeyLen, trieOne.currentKeyLen)
+}
+
+func Test_HexPatriciaHashed_ProcessUpdates_UniqueRepresentation_AfterStateRestore(t *testing.T) {
+ ms := NewMockState(t)
+ ms2 := NewMockState(t)
+
+ plainKeys, hashedKeys, updates := NewUpdateBuilder().
+ Balance("f5", 4).
+ Balance("ff", 900234).
+ Balance("04", 1233).
+ Storage("04", "01", "0401").
+ Balance("ba", 065606).
+ Balance("00", 4).
+ Balance("01", 5).
+ Balance("02", 6).
+ Balance("03", 7).
+ Storage("03", "56", "050505").
+ Balance("05", 9).
+ Storage("03", "87", "060606").
+ Balance("b9", 6).
+ Nonce("ff", 169356).
+ Storage("05", "02", "8989").
+ Storage("f5", "04", "9898").
+ Build()
+
+ sequential := NewHexPatriciaHashed(1, ms.branchFn, ms.accountFn, ms.storageFn)
+ batch := NewHexPatriciaHashed(1, ms2.branchFn, ms2.accountFn, ms2.storageFn)
+
+ batch.Reset()
+ sequential.Reset()
+ sequential.SetTrace(true)
+ batch.SetTrace(true)
+
+ // single sequential update
+ roots := make([][]byte, 0)
+ prevState := make([]byte, 0)
+ fmt.Printf("1. Trie sequential update generated following branch updates\n")
+ for i := 0; i < len(updates); i++ {
+ if err := ms.applyPlainUpdates(plainKeys[i:i+1], updates[i:i+1]); err != nil {
+ t.Fatal(err)
+ }
+ if i == (len(updates) / 2) {
+ sequential.Reset()
+ sequential.ResetFns(ms.branchFn, ms.accountFn, ms.storageFn)
+ err := sequential.SetState(prevState)
+ require.NoError(t, err)
+ }
+
+ sequentialRoot, branchNodeUpdates, err := sequential.ReviewKeys(plainKeys[i:i+1], hashedKeys[i:i+1])
+ require.NoError(t, err)
+ roots = append(roots, sequentialRoot)
+
+ renderUpdates(branchNodeUpdates)
+ ms.applyBranchNodeUpdates(branchNodeUpdates)
+
+ if i == (len(updates)/2 - 1) {
+ prevState, err = sequential.EncodeCurrentState(nil)
+ require.NoError(t, err)
+ }
+ }
+
+ err := ms2.applyPlainUpdates(plainKeys, updates)
+ require.NoError(t, err)
+
+ fmt.Printf("\n2. Trie batch update generated following branch updates\n")
+ // batch update
+ batchRoot, branchNodeUpdatesTwo, err := batch.ReviewKeys(plainKeys, hashedKeys)
+ require.NoError(t, err)
+ renderUpdates(branchNodeUpdatesTwo)
+ ms2.applyBranchNodeUpdates(branchNodeUpdatesTwo)
+
+ require.EqualValues(t, batchRoot, roots[len(roots)-1],
+ "expected equal roots, got sequential [%v] != batch [%v]", hex.EncodeToString(roots[len(roots)-1]), hex.EncodeToString(batchRoot))
+ require.Lenf(t, batchRoot, 32, "root hash length should be equal to 32 bytes")
+}
diff --git a/erigon-lib/commitment/patricia_state_mock_test.go b/erigon-lib/commitment/patricia_state_mock_test.go
new file mode 100644
index 00000000000..82dc932a2cb
--- /dev/null
+++ b/erigon-lib/commitment/patricia_state_mock_test.go
@@ -0,0 +1,422 @@
+package commitment
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "fmt"
+ "testing"
+
+ "github.com/holiman/uint256"
+ "golang.org/x/crypto/sha3"
+ "golang.org/x/exp/slices"
+
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/length"
+)
+
+// In memory commitment and state to use with the tests
+type MockState struct {
+ t *testing.T
+ sm map[string][]byte // backbone of the state
+ cm map[string]BranchData // backbone of the commitments
+ numBuf [binary.MaxVarintLen64]byte
+}
+
+func NewMockState(t *testing.T) *MockState {
+ t.Helper()
+ return &MockState{
+ t: t,
+ sm: make(map[string][]byte),
+ cm: make(map[string]BranchData),
+ }
+}
+
+func (ms MockState) branchFn(prefix []byte) ([]byte, error) {
+ if exBytes, ok := ms.cm[string(prefix)]; ok {
+ return exBytes[2:], nil // Skip touchMap, but keep afterMap
+ }
+ return nil, nil
+}
+
+func (ms MockState) accountFn(plainKey []byte, cell *Cell) error {
+ exBytes, ok := ms.sm[string(plainKey[:])]
+ if !ok {
+ ms.t.Logf("accountFn not found key [%x]", plainKey)
+ cell.Delete = true
+ return nil
+ }
+ var ex Update
+ pos, err := ex.Decode(exBytes, 0)
+ if err != nil {
+ ms.t.Fatalf("accountFn decode existing [%x], bytes: [%x]: %v", plainKey, exBytes, err)
+ return nil
+ }
+ if pos != len(exBytes) {
+ ms.t.Fatalf("accountFn key [%x] leftover bytes in [%x], comsumed %x", plainKey, exBytes, pos)
+ return nil
+ }
+ if ex.Flags&StorageUpdate != 0 {
+ ms.t.Logf("accountFn reading storage item for key [%x]", plainKey)
+ return fmt.Errorf("storage read by accountFn")
+ }
+ if ex.Flags&DeleteUpdate != 0 {
+ ms.t.Fatalf("accountFn reading deleted account for key [%x]", plainKey)
+ return nil
+ }
+ if ex.Flags&BalanceUpdate != 0 {
+ cell.Balance.Set(&ex.Balance)
+ } else {
+ cell.Balance.Clear()
+ }
+ if ex.Flags&NonceUpdate != 0 {
+ cell.Nonce = ex.Nonce
+ } else {
+ cell.Nonce = 0
+ }
+ if ex.Flags&CodeUpdate != 0 {
+ copy(cell.CodeHash[:], ex.CodeHashOrStorage[:])
+ } else {
+ copy(cell.CodeHash[:], EmptyCodeHash)
+ }
+ return nil
+}
+
+func (ms MockState) storageFn(plainKey []byte, cell *Cell) error {
+ exBytes, ok := ms.sm[string(plainKey[:])]
+ if !ok {
+ ms.t.Logf("storageFn not found key [%x]", plainKey)
+ cell.Delete = true
+ return nil
+ }
+ var ex Update
+ pos, err := ex.Decode(exBytes, 0)
+ if err != nil {
+ ms.t.Fatalf("storageFn decode existing [%x], bytes: [%x]: %v", plainKey, exBytes, err)
+ return nil
+ }
+ if pos != len(exBytes) {
+ ms.t.Fatalf("storageFn key [%x] leftover bytes in [%x], comsumed %x", plainKey, exBytes, pos)
+ return nil
+ }
+ if ex.Flags&BalanceUpdate != 0 {
+ ms.t.Logf("storageFn reading balance for key [%x]", plainKey)
+ return nil
+ }
+ if ex.Flags&NonceUpdate != 0 {
+ ms.t.Fatalf("storageFn reading nonce for key [%x]", plainKey)
+ return nil
+ }
+ if ex.Flags&CodeUpdate != 0 {
+ ms.t.Fatalf("storageFn reading codeHash for key [%x]", plainKey)
+ return nil
+ }
+ if ex.Flags&DeleteUpdate != 0 {
+ ms.t.Fatalf("storageFn reading deleted item for key [%x]", plainKey)
+ return nil
+ }
+ if ex.Flags&StorageUpdate != 0 {
+ copy(cell.Storage[:], ex.CodeHashOrStorage[:])
+ cell.StorageLen = len(ex.CodeHashOrStorage)
+ } else {
+ cell.StorageLen = 0
+ cell.Storage = [length.Hash]byte{}
+ }
+ return nil
+}
+
+func (ms *MockState) applyPlainUpdates(plainKeys [][]byte, updates []Update) error {
+ for i, key := range plainKeys {
+ update := updates[i]
+ if update.Flags&DeleteUpdate != 0 {
+ delete(ms.sm, string(key))
+ } else {
+ if exBytes, ok := ms.sm[string(key)]; ok {
+ var ex Update
+ pos, err := ex.Decode(exBytes, 0)
+ if err != nil {
+ return fmt.Errorf("applyPlainUpdates decode existing [%x], bytes: [%x]: %w", key, exBytes, err)
+ }
+ if pos != len(exBytes) {
+ return fmt.Errorf("applyPlainUpdates key [%x] leftover bytes in [%x], comsumed %x", key, exBytes, pos)
+ }
+ if update.Flags&BalanceUpdate != 0 {
+ ex.Flags |= BalanceUpdate
+ ex.Balance.Set(&update.Balance)
+ }
+ if update.Flags&NonceUpdate != 0 {
+ ex.Flags |= NonceUpdate
+ ex.Nonce = update.Nonce
+ }
+ if update.Flags&CodeUpdate != 0 {
+ ex.Flags |= CodeUpdate
+ copy(ex.CodeHashOrStorage[:], update.CodeHashOrStorage[:])
+ }
+ if update.Flags&StorageUpdate != 0 {
+ ex.Flags |= StorageUpdate
+ copy(ex.CodeHashOrStorage[:], update.CodeHashOrStorage[:])
+ }
+ ms.sm[string(key)] = ex.Encode(nil, ms.numBuf[:])
+ } else {
+ ms.sm[string(key)] = update.Encode(nil, ms.numBuf[:])
+ }
+ }
+ }
+ return nil
+}
+
+func (ms *MockState) applyBranchNodeUpdates(updates map[string]BranchData) {
+ for key, update := range updates {
+ if pre, ok := ms.cm[key]; ok {
+ // Merge
+ merged, err := pre.MergeHexBranches(update, nil)
+ if err != nil {
+ panic(err)
+ }
+ ms.cm[key] = merged
+ } else {
+ ms.cm[key] = update
+ }
+ }
+}
+
+func decodeHex(in string) []byte {
+ payload, err := hex.DecodeString(in)
+ if err != nil {
+ panic(err)
+ }
+ return payload
+}
+
+// UpdateBuilder collects updates to the state
+// and provides them in properly sorted form
+type UpdateBuilder struct {
+ balances map[string]*uint256.Int
+ nonces map[string]uint64
+ codeHashes map[string][length.Hash]byte
+ storages map[string]map[string][]byte
+ deletes map[string]struct{}
+ deletes2 map[string]map[string]struct{}
+ keyset map[string]struct{}
+ keyset2 map[string]map[string]struct{}
+}
+
+func NewUpdateBuilder() *UpdateBuilder {
+ return &UpdateBuilder{
+ balances: make(map[string]*uint256.Int),
+ nonces: make(map[string]uint64),
+ codeHashes: make(map[string][length.Hash]byte),
+ storages: make(map[string]map[string][]byte),
+ deletes: make(map[string]struct{}),
+ deletes2: make(map[string]map[string]struct{}),
+ keyset: make(map[string]struct{}),
+ keyset2: make(map[string]map[string]struct{}),
+ }
+}
+
+func (ub *UpdateBuilder) Balance(addr string, balance uint64) *UpdateBuilder {
+ sk := string(decodeHex(addr))
+ delete(ub.deletes, sk)
+ ub.balances[sk] = uint256.NewInt(balance)
+ ub.keyset[sk] = struct{}{}
+ return ub
+}
+
+func (ub *UpdateBuilder) Nonce(addr string, nonce uint64) *UpdateBuilder {
+ sk := string(decodeHex(addr))
+ delete(ub.deletes, sk)
+ ub.nonces[sk] = nonce
+ ub.keyset[sk] = struct{}{}
+ return ub
+}
+
+func (ub *UpdateBuilder) CodeHash(addr string, hash string) *UpdateBuilder {
+ sk := string(decodeHex(addr))
+ delete(ub.deletes, sk)
+ hcode, err := hex.DecodeString(hash)
+ if err != nil {
+ panic(fmt.Errorf("invalid code hash provided: %w", err))
+ }
+ if len(hcode) != length.Hash {
+ panic(fmt.Errorf("code hash should be %d bytes long, got %d", length.Hash, len(hcode)))
+ }
+
+ dst := [length.Hash]byte{}
+ copy(dst[:32], hcode)
+
+ ub.codeHashes[sk] = dst
+ ub.keyset[sk] = struct{}{}
+ return ub
+}
+
+func (ub *UpdateBuilder) Storage(addr string, loc string, value string) *UpdateBuilder {
+ sk1 := string(decodeHex(addr))
+ sk2 := string(decodeHex(loc))
+ v := decodeHex(value)
+ if d, ok := ub.deletes2[sk1]; ok {
+ delete(d, sk2)
+ if len(d) == 0 {
+ delete(ub.deletes2, sk1)
+ }
+ }
+ if k, ok := ub.keyset2[sk1]; ok {
+ k[sk2] = struct{}{}
+ } else {
+ ub.keyset2[sk1] = make(map[string]struct{})
+ ub.keyset2[sk1][sk2] = struct{}{}
+ }
+ if s, ok := ub.storages[sk1]; ok {
+ s[sk2] = v
+ } else {
+ ub.storages[sk1] = make(map[string][]byte)
+ ub.storages[sk1][sk2] = v
+ }
+ return ub
+}
+
+func (ub *UpdateBuilder) IncrementBalance(addr string, balance []byte) *UpdateBuilder {
+ sk := string(decodeHex(addr))
+ delete(ub.deletes, sk)
+ increment := uint256.NewInt(0)
+ increment.SetBytes(balance)
+ if old, ok := ub.balances[sk]; ok {
+ balance := uint256.NewInt(0)
+ balance.Add(old, increment)
+ ub.balances[sk] = balance
+ } else {
+ ub.balances[sk] = increment
+ }
+ ub.keyset[sk] = struct{}{}
+ return ub
+}
+
+func (ub *UpdateBuilder) Delete(addr string) *UpdateBuilder {
+ sk := string(decodeHex(addr))
+ delete(ub.balances, sk)
+ delete(ub.nonces, sk)
+ delete(ub.codeHashes, sk)
+ delete(ub.storages, sk)
+ ub.deletes[sk] = struct{}{}
+ ub.keyset[sk] = struct{}{}
+ return ub
+}
+
+func (ub *UpdateBuilder) DeleteStorage(addr string, loc string) *UpdateBuilder {
+ sk1 := string(decodeHex(addr))
+ sk2 := string(decodeHex(loc))
+ if s, ok := ub.storages[sk1]; ok {
+ delete(s, sk2)
+ if len(s) == 0 {
+ delete(ub.storages, sk1)
+ }
+ }
+ if k, ok := ub.keyset2[sk1]; ok {
+ k[sk2] = struct{}{}
+ } else {
+ ub.keyset2[sk1] = make(map[string]struct{})
+ ub.keyset2[sk1][sk2] = struct{}{}
+ }
+ if d, ok := ub.deletes2[sk1]; ok {
+ d[sk2] = struct{}{}
+ } else {
+ ub.deletes2[sk1] = make(map[string]struct{})
+ ub.deletes2[sk1][sk2] = struct{}{}
+ }
+ return ub
+}
+
+// Build returns three slices (in the order sorted by the hashed keys)
+// 1. Plain keys
+// 2. Corresponding hashed keys
+// 3. Corresponding updates
+func (ub *UpdateBuilder) Build() (plainKeys, hashedKeys [][]byte, updates []Update) {
+ hashed := make([]string, 0, len(ub.keyset)+len(ub.keyset2))
+ preimages := make(map[string][]byte)
+ preimages2 := make(map[string][]byte)
+ keccak := sha3.NewLegacyKeccak256()
+ for key := range ub.keyset {
+ keccak.Reset()
+ keccak.Write([]byte(key))
+ h := keccak.Sum(nil)
+ hashedKey := make([]byte, len(h)*2)
+ for i, c := range h {
+ hashedKey[i*2] = (c >> 4) & 0xf
+ hashedKey[i*2+1] = c & 0xf
+ }
+ hashed = append(hashed, string(hashedKey))
+ preimages[string(hashedKey)] = []byte(key)
+ }
+ hashedKey := make([]byte, 128)
+ for sk1, k := range ub.keyset2 {
+ keccak.Reset()
+ keccak.Write([]byte(sk1))
+ h := keccak.Sum(nil)
+ for i, c := range h {
+ hashedKey[i*2] = (c >> 4) & 0xf
+ hashedKey[i*2+1] = c & 0xf
+ }
+ for sk2 := range k {
+ keccak.Reset()
+ keccak.Write([]byte(sk2))
+ h2 := keccak.Sum(nil)
+ for i, c := range h2 {
+ hashedKey[64+i*2] = (c >> 4) & 0xf
+ hashedKey[64+i*2+1] = c & 0xf
+ }
+ hs := string(common.Copy(hashedKey))
+ hashed = append(hashed, hs)
+ preimages[hs] = []byte(sk1)
+ preimages2[hs] = []byte(sk2)
+ }
+
+ }
+ slices.Sort(hashed)
+ plainKeys = make([][]byte, len(hashed))
+ hashedKeys = make([][]byte, len(hashed))
+ updates = make([]Update, len(hashed))
+ for i, hashedKey := range hashed {
+ hashedKeys[i] = []byte(hashedKey)
+ key := preimages[hashedKey]
+ key2 := preimages2[hashedKey]
+ plainKey := make([]byte, len(key)+len(key2))
+ copy(plainKey[:], key)
+ if key2 != nil {
+ copy(plainKey[len(key):], key2)
+ }
+ plainKeys[i] = plainKey
+ u := &updates[i]
+ if key2 == nil {
+ if balance, ok := ub.balances[string(key)]; ok {
+ u.Flags |= BalanceUpdate
+ u.Balance.Set(balance)
+ }
+ if nonce, ok := ub.nonces[string(key)]; ok {
+ u.Flags |= NonceUpdate
+ u.Nonce = nonce
+ }
+ if codeHash, ok := ub.codeHashes[string(key)]; ok {
+ u.Flags |= CodeUpdate
+ copy(u.CodeHashOrStorage[:], codeHash[:])
+ }
+ if _, del := ub.deletes[string(key)]; del {
+ u.Flags = DeleteUpdate
+ continue
+ }
+ } else {
+ if dm, ok1 := ub.deletes2[string(key)]; ok1 {
+ if _, ok2 := dm[string(key2)]; ok2 {
+ u.Flags = DeleteUpdate
+ continue
+ }
+ }
+ if sm, ok1 := ub.storages[string(key)]; ok1 {
+ if storage, ok2 := sm[string(key2)]; ok2 {
+ u.Flags |= StorageUpdate
+ u.CodeHashOrStorage = [length.Hash]byte{}
+ u.ValLength = len(storage)
+ copy(u.CodeHashOrStorage[:], storage)
+ }
+ }
+ }
+ }
+ return
+}
diff --git a/erigon-lib/common/address.go b/erigon-lib/common/address.go
new file mode 100644
index 00000000000..4ad22034520
--- /dev/null
+++ b/erigon-lib/common/address.go
@@ -0,0 +1,183 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package common
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/hex"
+ "fmt"
+ "math/big"
+ "reflect"
+
+ "github.com/ledgerwatch/erigon-lib/common/hexutility"
+ "github.com/ledgerwatch/erigon-lib/common/length"
+ "github.com/ledgerwatch/erigon-lib/crypto/cryptopool"
+)
+
+var (
+ addressT = reflect.TypeOf(Address{})
+)
+
+// Address represents the 20 byte address of an Ethereum account.
+type Address [length.Addr]byte
+
+// BytesToAddress returns Address with value b.
+// If b is larger than len(h), b will be cropped from the left.
+func BytesToAddress(b []byte) Address {
+ var a Address
+ a.SetBytes(b)
+ return a
+}
+
+// BigToAddress returns Address with byte values of b.
+// If b is larger than len(h), b will be cropped from the left.
+func BigToAddress(b *big.Int) Address { return BytesToAddress(b.Bytes()) }
+
+// HexToAddress returns Address with byte values of s.
+// If s is larger than len(h), s will be cropped from the left.
+func HexToAddress(s string) Address { return BytesToAddress(hexutility.FromHex(s)) }
+
+// IsHexAddress verifies whether a string can represent a valid hex-encoded
+// Ethereum address or not.
+func IsHexAddress(s string) bool {
+ if hexutility.Has0xPrefix(s) {
+ s = s[2:]
+ }
+ return len(s) == 2*length.Addr && hexutility.IsHex(s)
+}
+
+// Bytes gets the string representation of the underlying address.
+func (a Address) Bytes() []byte { return a[:] }
+
+// Hash converts an address to a hash by left-padding it with zeros.
+func (a Address) Hash() Hash { return BytesToHash(a[:]) }
+
+// Hex returns an EIP55-compliant hex string representation of the address.
+func (a Address) Hex() string {
+ return string(a.checksumHex())
+}
+
+// String implements fmt.Stringer.
+func (a Address) String() string {
+ return a.Hex()
+}
+
+func (a *Address) checksumHex() []byte {
+ buf := a.hex()
+
+ // compute checksum
+ sha := cryptopool.GetLegacyKeccak256()
+ //nolint:errcheck
+ sha.Write(buf[2:])
+ hash := sha.Sum(nil)
+ cryptopool.ReturnLegacyKeccak256(sha)
+
+ for i := 2; i < len(buf); i++ {
+ hashByte := hash[(i-2)/2]
+ if i%2 == 0 {
+ hashByte = hashByte >> 4
+ } else {
+ hashByte &= 0xf
+ }
+ if buf[i] > '9' && hashByte > 7 {
+ buf[i] -= 32
+ }
+ }
+ return buf
+}
+
+func (a Address) hex() []byte {
+ var buf [len(a)*2 + 2]byte
+ copy(buf[:2], "0x")
+ hex.Encode(buf[2:], a[:])
+ return buf[:]
+}
+
+// Format implements fmt.Formatter.
+// Address supports the %v, %s, %v, %x, %X and %d format verbs.
+func (a Address) Format(s fmt.State, c rune) {
+ switch c {
+ case 'v', 's':
+ s.Write(a.checksumHex())
+ case 'q':
+ q := []byte{'"'}
+ s.Write(q)
+ s.Write(a.checksumHex())
+ s.Write(q)
+ case 'x', 'X':
+ // %x disables the checksum.
+ hex := a.hex()
+ if !s.Flag('#') {
+ hex = hex[2:]
+ }
+ if c == 'X' {
+ hex = bytes.ToUpper(hex)
+ }
+ s.Write(hex)
+ case 'd':
+ fmt.Fprint(s, ([len(a)]byte)(a))
+ default:
+ fmt.Fprintf(s, "%%!%c(address=%x)", c, a)
+ }
+}
+
+// SetBytes sets the address to the value of b.
+// If b is larger than len(a), b will be cropped from the left.
+func (a *Address) SetBytes(b []byte) {
+ if len(b) > len(a) {
+ b = b[len(b)-length.Addr:]
+ }
+ copy(a[length.Addr-len(b):], b)
+}
+
+// MarshalText returns the hex representation of a.
+func (a Address) MarshalText() ([]byte, error) {
+ b := a[:]
+ result := make([]byte, len(b)*2+2)
+ copy(result, hexPrefix)
+ hex.Encode(result[2:], b)
+ return result, nil
+}
+
+// UnmarshalText parses a hash in hex syntax.
+func (a *Address) UnmarshalText(input []byte) error {
+ return hexutility.UnmarshalFixedText("Address", input, a[:])
+}
+
+// UnmarshalJSON parses a hash in hex syntax.
+func (a *Address) UnmarshalJSON(input []byte) error {
+ return hexutility.UnmarshalFixedJSON(addressT, input, a[:])
+}
+
+// Scan implements Scanner for database/sql.
+func (a *Address) Scan(src interface{}) error {
+ srcB, ok := src.([]byte)
+ if !ok {
+ return fmt.Errorf("can't scan %T into Address", src)
+ }
+ if len(srcB) != length.Addr {
+ return fmt.Errorf("can't scan []byte of len %d into Address, want %d", len(srcB), length.Addr)
+ }
+ copy(a[:], srcB)
+ return nil
+}
+
+// Value implements valuer for database/sql.
+func (a Address) Value() (driver.Value, error) {
+ return a[:], nil
+}
diff --git a/erigon-lib/common/assert/assert_disable.go b/erigon-lib/common/assert/assert_disable.go
new file mode 100644
index 00000000000..baf2dd9c2f1
--- /dev/null
+++ b/erigon-lib/common/assert/assert_disable.go
@@ -0,0 +1,5 @@
+//go:build !assert
+
+package assert
+
+const Enable = false
diff --git a/erigon-lib/common/assert/assert_enable.go b/erigon-lib/common/assert/assert_enable.go
new file mode 100644
index 00000000000..8539e4a4bbf
--- /dev/null
+++ b/erigon-lib/common/assert/assert_enable.go
@@ -0,0 +1,5 @@
+//go:build assert
+
+package assert
+
+const Enable = true
diff --git a/erigon-lib/common/background/progress.go b/erigon-lib/common/background/progress.go
new file mode 100644
index 00000000000..5a4f702bfa5
--- /dev/null
+++ b/erigon-lib/common/background/progress.go
@@ -0,0 +1,98 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package background
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ btree2 "github.com/tidwall/btree"
+)
+
+// Progress - tracks background job progress
+type Progress struct {
+ Name atomic.Pointer[string]
+ Processed, Total atomic.Uint64
+ i int
+}
+
+func (p *Progress) percent() int {
+ return int(
+ (float64(p.Processed.Load()) / float64(p.Total.Load())) * 100,
+ )
+}
+
+// ProgressSet - tracks multiple background job progress
+type ProgressSet struct {
+ list *btree2.Map[int, *Progress]
+ i int
+ lock sync.RWMutex
+}
+
+func NewProgressSet() *ProgressSet {
+ return &ProgressSet{list: btree2.NewMap[int, *Progress](128)}
+}
+func (s *ProgressSet) AddNew(fName string, total uint64) *Progress {
+ p := &Progress{}
+ p.Name.Store(&fName)
+ p.Total.Store(total)
+ s.Add(p)
+ return p
+}
+func (s *ProgressSet) Add(p *Progress) {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+ s.i++
+ p.i = s.i
+ s.list.Set(p.i, p)
+}
+
+func (s *ProgressSet) Delete(p *Progress) {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+ s.list.Delete(p.i)
+}
+func (s *ProgressSet) Has() bool {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+ return s.list.Len() > 0
+}
+
+func (s *ProgressSet) String() string {
+ s.lock.RLock()
+ defer s.lock.RUnlock()
+ var sb strings.Builder
+ var i int
+ s.list.Scan(func(_ int, p *Progress) bool {
+ if p == nil {
+ return true
+ }
+ namePtr := p.Name.Load()
+ if namePtr == nil {
+ return true
+ }
+ sb.WriteString(fmt.Sprintf("%s=%d%%", *namePtr, p.percent()))
+ i++
+ if i != s.list.Len() {
+ sb.WriteString(", ")
+ }
+ return true
+ })
+ return sb.String()
+}
diff --git a/erigon-lib/common/big.go b/erigon-lib/common/big.go
new file mode 100644
index 00000000000..8be8ac3cbd2
--- /dev/null
+++ b/erigon-lib/common/big.go
@@ -0,0 +1,29 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package common
+
+import "math/big"
+
+var (
+ Big0 = big.NewInt(0)
+ Big1 = big.NewInt(1)
+ Big2 = big.NewInt(2)
+ Big3 = big.NewInt(3)
+ Big32 = big.NewInt(32)
+ Big256 = big.NewInt(256)
+ Big257 = big.NewInt(257)
+)
diff --git a/erigon-lib/common/bitutil/select.go b/erigon-lib/common/bitutil/select.go
new file mode 100644
index 00000000000..f3266c2695f
--- /dev/null
+++ b/erigon-lib/common/bitutil/select.go
@@ -0,0 +1,105 @@
+/*
+Copyright 2021 Erigon contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package bitutil
+
+import (
+ "math/bits"
+)
+
+// Required by select64
+var kSelectInByte = [2048]byte{
+ 8, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 8, 8, 8, 1, 8, 2, 2, 1, 8, 3, 3, 1, 3, 2, 2, 1, 8, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 8, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
+ 8, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1, 6, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
+ 8, 7, 7, 1, 7, 2, 2, 1, 7, 3, 3, 1, 3, 2, 2, 1, 7, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 7, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
+ 7, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1, 6, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
+ 8, 8, 8, 8, 8, 8, 8, 2, 8, 8, 8, 3, 8, 3, 3, 2, 8, 8, 8, 4, 8, 4, 4, 2, 8, 4, 4, 3, 4, 3, 3, 2, 8, 8, 8, 5, 8, 5, 5, 2, 8, 5, 5, 3, 5, 3, 3, 2, 8, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2,
+ 8, 8, 8, 6, 8, 6, 6, 2, 8, 6, 6, 3, 6, 3, 3, 2, 8, 6, 6, 4, 6, 4, 4, 2, 6, 4, 4, 3, 4, 3, 3, 2, 8, 6, 6, 5, 6, 5, 5, 2, 6, 5, 5, 3, 5, 3, 3, 2, 6, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2,
+ 8, 8, 8, 7, 8, 7, 7, 2, 8, 7, 7, 3, 7, 3, 3, 2, 8, 7, 7, 4, 7, 4, 4, 2, 7, 4, 4, 3, 4, 3, 3, 2, 8, 7, 7, 5, 7, 5, 5, 2, 7, 5, 5, 3, 5, 3, 3, 2, 7, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2,
+ 8, 7, 7, 6, 7, 6, 6, 2, 7, 6, 6, 3, 6, 3, 3, 2, 7, 6, 6, 4, 6, 4, 4, 2, 6, 4, 4, 3, 4, 3, 3, 2, 7, 6, 6, 5, 6, 5, 5, 2, 6, 5, 5, 3, 5, 3, 3, 2, 6, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 3, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 4, 8, 4, 4, 3, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 5, 8, 5, 5, 3, 8, 8, 8, 5, 8, 5, 5, 4, 8, 5, 5, 4, 5, 4, 4, 3,
+ 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 3, 8, 8, 8, 6, 8, 6, 6, 4, 8, 6, 6, 4, 6, 4, 4, 3, 8, 8, 8, 6, 8, 6, 6, 5, 8, 6, 6, 5, 6, 5, 5, 3, 8, 6, 6, 5, 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3,
+ 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 3, 8, 8, 8, 7, 8, 7, 7, 4, 8, 7, 7, 4, 7, 4, 4, 3, 8, 8, 8, 7, 8, 7, 7, 5, 8, 7, 7, 5, 7, 5, 5, 3, 8, 7, 7, 5, 7, 5, 5, 4, 7, 5, 5, 4, 5, 4, 4, 3,
+ 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 3, 8, 7, 7, 6, 7, 6, 6, 4, 7, 6, 6, 4, 6, 4, 4, 3, 8, 7, 7, 6, 7, 6, 6, 5, 7, 6, 6, 5, 6, 5, 5, 3, 7, 6, 6, 5, 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 5, 8, 5, 5, 4,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 4, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 5, 8, 8, 8, 6, 8, 6, 6, 5, 8, 6, 6, 5, 6, 5, 5, 4,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 4, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 5, 8, 8, 8, 7, 8, 7, 7, 5, 8, 7, 7, 5, 7, 5, 5, 4,
+ 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 4, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 5, 8, 7, 7, 6, 7, 6, 6, 5, 7, 6, 6, 5, 6, 5, 5, 4,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 5,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 5,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 5,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 5,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7}
+
+const (
+ kOnesStep4 uint64 = 0x1111111111111111
+ kOnesStep8 uint64 = 0x0101010101010101
+ kLAMBDAsStep8 uint64 = 0x80 * kOnesStep8
+
+ kOnesStep4x3 = 0x3 * kOnesStep4
+ kOnesStep4xA = 0xA * kOnesStep4
+ kOnesStep8xF = 0xF * kOnesStep8
+)
+
+/** Returns the index of the k-th 1-bit in the 64-bit word x.
+ * @param x 64-bit word.
+ * @param k 0-based rank (`k = 0` returns the position of the first 1-bit).
+ *
+ * Uses the broadword selection algorithm by Vigna [1], improved by Gog and Petri [2] and Vigna [3].
+ * Facebook's Folly implementation [4].
+ *
+ * [1] Sebastiano Vigna. Broadword Implementation of Rank/Select Queries. WEA, 2008
+ *
+ * [2] Simon Gog, Matthias Petri. Optimized succinct data structures for massive data. Softw. Pract.
+ * Exper., 2014
+ *
+ * [3] Sebastiano Vigna. MG4J 5.2.1. http://mg4j.di.unimi.it/
+ *
+ * [4] Facebook Folly library: https://github.com/facebook/folly
+ *
+ */
+
+func Select64(x uint64, k int) (place int) {
+ /* Original implementation - a bit obfuscated to satisfy Golang's inlining costs
+ s := x
+ s = s - ((s & (0xA * kOnesStep4)) >> 1)
+ s = (s & (0x3 * kOnesStep4)) + ((s >> 2) & (0x3 * kOnesStep4))
+ s = (s + (s >> 4)) & (0xF * kOnesStep8)
+ byteSums := s * kOnesStep8
+ */
+ s := x - ((x & kOnesStep4xA) >> 1)
+ s = (s & kOnesStep4x3) + ((s >> 2) & kOnesStep4x3)
+ byteSums := ((s + (s >> 4)) & kOnesStep8xF) * kOnesStep8
+ /* Original implementaiton:
+ kStep8 := uint64(k) * kOnesStep8
+ geqKStep8 := ((kStep8 | kLAMBDAsStep8) - byteSums) & kLAMBDAsStep8
+ place = bits.OnesCount64(geqKStep8) * 8
+ byteRank := uint64(k) - (((byteSums << 8) >> place) & uint64(0xFF))
+ */
+ place = bits.OnesCount64((((uint64(k)*kOnesStep8)|kLAMBDAsStep8)-byteSums)&kLAMBDAsStep8) * 8
+ byteRank := uint64(k) - (((byteSums << 8) >> place) & uint64(0xFF))
+ return place + int(kSelectInByte[((x>>place)&0xFF)|(byteRank<<8)])
+}
diff --git a/erigon-lib/common/bytes.go b/erigon-lib/common/bytes.go
new file mode 100644
index 00000000000..59929736cf8
--- /dev/null
+++ b/erigon-lib/common/bytes.go
@@ -0,0 +1,67 @@
+/*
+ Copyright 2021 The Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package common
+
+import (
+ "fmt"
+)
+
+func ByteCount(b uint64) string {
+ const unit = 1024
+ if b < unit {
+ return fmt.Sprintf("%dB", b)
+ }
+ bGb, exp := MBToGB(b)
+ return fmt.Sprintf("%.1f%cB", bGb, "KMGTPE"[exp])
+}
+
+func MBToGB(b uint64) (float64, int) {
+ const unit = 1024
+ if b < unit {
+ return float64(b), 0
+ }
+
+ div, exp := uint64(unit), 0
+ for n := b / unit; n >= unit; n /= unit {
+ div *= unit
+ exp++
+ }
+
+ return float64(b) / float64(div), exp
+}
+
+func Copy(b []byte) []byte {
+ if b == nil {
+ return nil
+ }
+ c := make([]byte, len(b))
+ copy(c, b)
+ return c
+}
+
+func EnsureEnoughSize(in []byte, size int) []byte {
+ if cap(in) < size {
+ newBuf := make([]byte, size)
+ copy(newBuf, in)
+ return newBuf
+ }
+ return in[:size] // Reuse the space if it has enough capacity
+}
+
+func BitLenToByteLen(bitLen int) (byteLen int) {
+ return (bitLen + 7) / 8
+}
diff --git a/erigon-lib/common/bytes4.go b/erigon-lib/common/bytes4.go
new file mode 100644
index 00000000000..2687074ef2a
--- /dev/null
+++ b/erigon-lib/common/bytes4.go
@@ -0,0 +1,107 @@
+package common
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/hex"
+ "fmt"
+ "math/rand"
+ "reflect"
+
+ "github.com/ledgerwatch/erigon-lib/common/hexutility"
+ "github.com/ledgerwatch/erigon-lib/common/length"
+)
+
+var (
+ bytes4T = reflect.TypeOf(Bytes4{})
+)
+
+type Bytes4 [length.Bytes4]byte
+
+// Hex converts a hash to a hex string.
+func (b Bytes4) Hex() string { return hexutility.Encode(b[:]) }
+
+// UnmarshalJSON parses a hash in hex syntax.
+func (b *Bytes4) UnmarshalJSON(input []byte) error {
+ return hexutility.UnmarshalFixedJSON(bytes4T, input, b[:])
+}
+
+// UnmarshalText parses a hash in hex syntax.
+func (b *Bytes4) UnmarshalText(input []byte) error {
+ return hexutility.UnmarshalFixedText("Bytes4", input, b[:])
+}
+
+// MarshalText returns the hex representation of a.
+func (b Bytes4) MarshalText() ([]byte, error) {
+ bl := b[:]
+ result := make([]byte, len(b)*2+2)
+ copy(result, hexPrefix)
+ hex.Encode(result[2:], bl)
+ return result, nil
+}
+
+// Format implements fmt.Formatter.
+// Hash supports the %v, %s, %v, %x, %X and %d format verbs.
+func (b Bytes4) Format(s fmt.State, c rune) {
+ hexb := make([]byte, 2+len(b)*2)
+ copy(hexb, "0x")
+ hex.Encode(hexb[2:], b[:])
+
+ switch c {
+ case 'x', 'X':
+ if !s.Flag('#') {
+ hexb = hexb[2:]
+ }
+ if c == 'X' {
+ hexb = bytes.ToUpper(hexb)
+ }
+ fallthrough
+ case 'v', 's':
+ s.Write(hexb)
+ case 'q':
+ q := []byte{'"'}
+ s.Write(q)
+ s.Write(hexb)
+ s.Write(q)
+ case 'd':
+ fmt.Fprint(s, ([len(b)]byte)(b))
+ default:
+ fmt.Fprintf(s, "%%!%c(hash=%x)", c, b)
+ }
+}
+
+// String implements the stringer interface and is used also by the logger when
+// doing full logging into a file.
+func (b Bytes4) String() string {
+ return b.Hex()
+}
+
+// SetBytes sets the hash to the value of i.
+// If b is larger than len(h), b will be cropped from the left.
+func (b *Bytes4) SetBytes(i []byte) {
+ if len(i) > len(b) {
+ i = i[len(i)-length.Hash:]
+ }
+
+ copy(b[length.Hash-len(i):], i)
+}
+
+// Generate implements testing/quick.Generator.
+func (b Bytes4) Generate(rand *rand.Rand, size int) reflect.Value {
+ m := rand.Intn(len(b))
+ for i := len(b) - 1; i > m; i-- {
+ b[i] = byte(rand.Uint32())
+ }
+ return reflect.ValueOf(b)
+}
+
+// Value implements valuer for database/sql.
+func (b Bytes4) Value() (driver.Value, error) {
+ return b[:], nil
+}
+
+// TerminalString implements log.TerminalStringer, formatting a string for console
+// output during logging.
+func (b Bytes4) TerminalString() string {
+ return fmt.Sprintf("%x", b)
+}
diff --git a/erigon-lib/common/bytes48.go b/erigon-lib/common/bytes48.go
new file mode 100644
index 00000000000..092e451f64e
--- /dev/null
+++ b/erigon-lib/common/bytes48.go
@@ -0,0 +1,107 @@
+package common
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/hex"
+ "fmt"
+ "math/rand"
+ "reflect"
+
+ "github.com/ledgerwatch/erigon-lib/common/hexutility"
+ "github.com/ledgerwatch/erigon-lib/common/length"
+)
+
+var (
+ bytes48T = reflect.TypeOf(Bytes48{})
+)
+
+type Bytes48 [length.Bytes48]byte
+
+// Hex converts a hash to a hex string.
+func (b Bytes48) Hex() string { return hexutility.Encode(b[:]) }
+
+// UnmarshalJSON parses a hash in hex syntax.
+func (b *Bytes48) UnmarshalJSON(input []byte) error {
+ return hexutility.UnmarshalFixedJSON(bytes48T, input, b[:])
+}
+
+// UnmarshalText parses a hash in hex syntax.
+func (b *Bytes48) UnmarshalText(input []byte) error {
+ return hexutility.UnmarshalFixedText("Bytes48", input, b[:])
+}
+
+// MarshalText returns the hex representation of a.
+func (b Bytes48) MarshalText() ([]byte, error) {
+ bl := b[:]
+ result := make([]byte, len(b)*2+2)
+ copy(result, hexPrefix)
+ hex.Encode(result[2:], bl)
+ return result, nil
+}
+
+// Format implements fmt.Formatter.
+// Hash supports the %v, %s, %v, %x, %X and %d format verbs.
+func (b Bytes48) Format(s fmt.State, c rune) {
+ hexb := make([]byte, 2+len(b)*2)
+ copy(hexb, "0x")
+ hex.Encode(hexb[2:], b[:])
+
+ switch c {
+ case 'x', 'X':
+ if !s.Flag('#') {
+ hexb = hexb[2:]
+ }
+ if c == 'X' {
+ hexb = bytes.ToUpper(hexb)
+ }
+ fallthrough
+ case 'v', 's':
+ s.Write(hexb)
+ case 'q':
+ q := []byte{'"'}
+ s.Write(q)
+ s.Write(hexb)
+ s.Write(q)
+ case 'd':
+ fmt.Fprint(s, ([len(b)]byte)(b))
+ default:
+ fmt.Fprintf(s, "%%!%c(hash=%x)", c, b)
+ }
+}
+
+// String implements the stringer interface and is used also by the logger when
+// doing full logging into a file.
+func (b Bytes48) String() string {
+ return b.Hex()
+}
+
+// SetBytes sets the hash to the value of i.
+// If b is larger than len(h), b will be cropped from the left.
+func (b *Bytes48) SetBytes(i []byte) {
+ if len(i) > len(b) {
+ i = i[len(i)-length.Hash:]
+ }
+
+ copy(b[length.Hash-len(i):], i)
+}
+
+// Generate implements testing/quick.Generator.
+func (b Bytes48) Generate(rand *rand.Rand, size int) reflect.Value {
+ m := rand.Intn(len(b))
+ for i := len(b) - 1; i > m; i-- {
+ b[i] = byte(rand.Uint32())
+ }
+ return reflect.ValueOf(b)
+}
+
+// Value implements valuer for database/sql.
+func (b Bytes48) Value() (driver.Value, error) {
+ return b[:], nil
+}
+
+// TerminalString implements log.TerminalStringer, formatting a string for console
+// output during logging.
+func (b Bytes48) TerminalString() string {
+ return fmt.Sprintf("%x…%x", b[:3], b[len(b)-3:])
+}
diff --git a/erigon-lib/common/bytes64.go b/erigon-lib/common/bytes64.go
new file mode 100644
index 00000000000..bd407e8aaf3
--- /dev/null
+++ b/erigon-lib/common/bytes64.go
@@ -0,0 +1,97 @@
+package common
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/hex"
+ "fmt"
+ "reflect"
+
+ "github.com/ledgerwatch/erigon-lib/common/hexutility"
+ "github.com/ledgerwatch/erigon-lib/common/length"
+)
+
+var (
+ bytes64T = reflect.TypeOf(Bytes64{})
+)
+
+type Bytes64 [length.Bytes64]byte
+
+// Hex converts a hash to a hex string.
+func (b Bytes64) Hex() string { return hexutility.Encode(b[:]) }
+
+// UnmarshalJSON parses a hash in hex syntax.
+func (b *Bytes64) UnmarshalJSON(input []byte) error {
+ return hexutility.UnmarshalFixedJSON(bytes64T, input, b[:])
+}
+
+// UnmarshalText parses a hash in hex syntax.
+func (b *Bytes64) UnmarshalText(input []byte) error {
+ return hexutility.UnmarshalFixedText("Bytes64", input, b[:])
+}
+
+// MarshalText returns the hex representation of a.
+func (b Bytes64) MarshalText() ([]byte, error) {
+ bl := b[:]
+ result := make([]byte, len(b)*2+2)
+ copy(result, hexPrefix)
+ hex.Encode(result[2:], bl)
+ return result, nil
+}
+
+// Format implements fmt.Formatter.
+// Hash supports the %v, %s, %v, %x, %X and %d format verbs.
+func (b Bytes64) Format(s fmt.State, c rune) {
+ hexb := make([]byte, 2+len(b)*2)
+ copy(hexb, "0x")
+ hex.Encode(hexb[2:], b[:])
+
+ switch c {
+ case 'x', 'X':
+ if !s.Flag('#') {
+ hexb = hexb[2:]
+ }
+ if c == 'X' {
+ hexb = bytes.ToUpper(hexb)
+ }
+ fallthrough
+ case 'v', 's':
+ s.Write(hexb)
+ case 'q':
+ q := []byte{'"'}
+ s.Write(q)
+ s.Write(hexb)
+ s.Write(q)
+ case 'd':
+ fmt.Fprint(s, ([len(b)]byte)(b))
+ default:
+ fmt.Fprintf(s, "%%!%c(hash=%x)", c, b)
+ }
+}
+
+// String implements the stringer interface and is used also by the logger when
+// doing full logging into a file.
+func (b Bytes64) String() string {
+ return b.Hex()
+}
+
+// SetBytes sets the hash to the value of i.
+// If b is larger than len(h), b will be cropped from the left.
+func (b *Bytes64) SetBytes(i []byte) {
+ if len(i) > len(b) {
+ i = i[len(i)-length.Hash:]
+ }
+
+ copy(b[length.Hash-len(i):], i)
+}
+
+// Value implements valuer for database/sql.
+func (b Bytes64) Value() (driver.Value, error) {
+ return b[:], nil
+}
+
+// TerminalString implements log.TerminalStringer, formatting a string for console
+// output during logging.
+func (b Bytes64) TerminalString() string {
+ return fmt.Sprintf("%x…%x", b[:3], b[len(b)-3:])
+}
diff --git a/erigon-lib/common/bytes96.go b/erigon-lib/common/bytes96.go
new file mode 100644
index 00000000000..e15850d2e33
--- /dev/null
+++ b/erigon-lib/common/bytes96.go
@@ -0,0 +1,107 @@
+package common
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/hex"
+ "fmt"
+ "math/rand"
+ "reflect"
+
+ "github.com/ledgerwatch/erigon-lib/common/hexutility"
+ "github.com/ledgerwatch/erigon-lib/common/length"
+)
+
+var (
+ blsSignatureT = reflect.TypeOf(Bytes96{})
+)
+
+type Bytes96 [length.Bytes96]byte
+
+// Hex converts a hash to a hex string.
+func (b Bytes96) Hex() string { return hexutility.Encode(b[:]) }
+
+// UnmarshalJSON parses a hash in hex syntax.
+func (b *Bytes96) UnmarshalJSON(input []byte) error {
+ return hexutility.UnmarshalFixedJSON(blsSignatureT, input, b[:])
+}
+
+// UnmarshalText parses a hash in hex syntax.
+func (b *Bytes96) UnmarshalText(input []byte) error {
+ return hexutility.UnmarshalFixedText("BLSSignature", input, b[:])
+}
+
+// MarshalText returns the hex representation of a.
+func (b Bytes96) MarshalText() ([]byte, error) {
+ bl := b[:]
+ result := make([]byte, len(b)*2+2)
+ copy(result, hexPrefix)
+ hex.Encode(result[2:], bl)
+ return result, nil
+}
+
+// Format implements fmt.Formatter.
+// Hash supports the %v, %s, %v, %x, %X and %d format verbs.
+func (b Bytes96) Format(s fmt.State, c rune) {
+ hexb := make([]byte, 2+len(b)*2)
+ copy(hexb, "0x")
+ hex.Encode(hexb[2:], b[:])
+
+ switch c {
+ case 'x', 'X':
+ if !s.Flag('#') {
+ hexb = hexb[2:]
+ }
+ if c == 'X' {
+ hexb = bytes.ToUpper(hexb)
+ }
+ fallthrough
+ case 'v', 's':
+ s.Write(hexb)
+ case 'q':
+ q := []byte{'"'}
+ s.Write(q)
+ s.Write(hexb)
+ s.Write(q)
+ case 'd':
+ fmt.Fprint(s, ([len(b)]byte)(b))
+ default:
+ fmt.Fprintf(s, "%%!%c(hash=%x)", c, b)
+ }
+}
+
+// String implements the stringer interface and is used also by the logger when
+// doing full logging into a file.
+func (b Bytes96) String() string {
+ return b.Hex()
+}
+
+// SetBytes sets the hash to the value of i.
+// If b is larger than len(h), b will be cropped from the left.
+func (b *Bytes96) SetBytes(i []byte) {
+ if len(i) > len(b) {
+ i = i[len(i)-length.Hash:]
+ }
+
+ copy(b[length.Hash-len(i):], i)
+}
+
+// Generate implements testing/quick.Generator.
+func (b Bytes96) Generate(rand *rand.Rand, size int) reflect.Value {
+ m := rand.Intn(len(b))
+ for i := len(b) - 1; i > m; i-- {
+ b[i] = byte(rand.Uint32())
+ }
+ return reflect.ValueOf(b)
+}
+
+// Value implements valuer for database/sql.
+func (b Bytes96) Value() (driver.Value, error) {
+ return b[:], nil
+}
+
+// TerminalString implements log.TerminalStringer, formatting a string for console
+// output during logging.
+func (b Bytes96) TerminalString() string {
+ return fmt.Sprintf("%x…%x", b[:3], b[len(b)-3:])
+}
diff --git a/erigon-lib/common/chan.go b/erigon-lib/common/chan.go
new file mode 100644
index 00000000000..ac9fdbf6fc7
--- /dev/null
+++ b/erigon-lib/common/chan.go
@@ -0,0 +1,61 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package common
+
+import "errors"
+
+var ErrStopped = errors.New("stopped")
+var ErrUnwind = errors.New("unwound")
+
+func Stopped(ch <-chan struct{}) error {
+ if ch == nil {
+ return nil
+ }
+ select {
+ case <-ch:
+ return ErrStopped
+ default:
+ }
+ return nil
+}
+
+func SafeClose(ch chan struct{}) {
+ if ch == nil {
+ return
+ }
+ select {
+ case <-ch:
+ // Channel was already closed
+ default:
+ close(ch)
+ }
+}
+
+// PrioritizedSend message to channel, but if channel is full (slow consumer) - drop half of old messages (not new)
+func PrioritizedSend[t any](ch chan t, msg t) {
+ select {
+ case ch <- msg:
+ default: //if channel is full (slow consumer), drop old messages (not new)
+ for i := 0; i < cap(ch)/2; i++ {
+ select {
+ case <-ch:
+ default:
+ }
+ }
+ ch <- msg
+ }
+}
diff --git a/erigon-lib/common/cli.go b/erigon-lib/common/cli.go
new file mode 100644
index 00000000000..c195b9151e5
--- /dev/null
+++ b/erigon-lib/common/cli.go
@@ -0,0 +1,58 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package common
+
+import (
+ "context"
+ "os"
+ "os/signal"
+ "strings"
+ "syscall"
+
+ "github.com/ledgerwatch/log/v3"
+)
+
+func RootContext() (context.Context, context.CancelFunc) {
+ ctx, cancel := context.WithCancel(context.Background())
+ go func() {
+ defer cancel()
+
+ ch := make(chan os.Signal, 1)
+ defer close(ch)
+
+ signal.Notify(ch, os.Interrupt, syscall.SIGTERM)
+ defer signal.Stop(ch)
+
+ select {
+ case sig := <-ch:
+ log.Info("Got interrupt, shutting down...", "sig", sig)
+ case <-ctx.Done():
+ }
+ }()
+ return ctx, cancel
+}
+
+func CliString2Array(input string) []string {
+ l := strings.Split(input, ",")
+ res := make([]string, 0, len(l))
+ for _, r := range l {
+ if r = strings.TrimSpace(r); r != "" {
+ res = append(res, r)
+ }
+ }
+ return res
+}
diff --git a/erigon-lib/common/cmp/cmp.go b/erigon-lib/common/cmp/cmp.go
new file mode 100644
index 00000000000..7e7334010a6
--- /dev/null
+++ b/erigon-lib/common/cmp/cmp.go
@@ -0,0 +1,57 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package cmp
+
+import (
+ "golang.org/x/exp/constraints"
+)
+
+// InRange - ensure val is in [min,max] range
+func InRange[T constraints.Ordered](min, max, val T) T {
+ if min >= val {
+ return min
+ }
+ if max <= val {
+ return max
+ }
+ return val
+}
+
+func Min[T constraints.Ordered](a, b T) T {
+ if a <= b {
+ return a
+ }
+ return b
+}
+
+func Max[T constraints.Ordered](a, b T) T {
+ if a >= b {
+ return a
+ }
+ return b
+}
+
+func Compare[T constraints.Ordered](a, b T) int {
+ switch {
+ case a < b:
+ return -1
+ case a == b:
+ return 0
+ default:
+ return 1
+ }
+}
diff --git a/erigon-lib/common/copybytes.go b/erigon-lib/common/copybytes.go
new file mode 100644
index 00000000000..02457e0a954
--- /dev/null
+++ b/erigon-lib/common/copybytes.go
@@ -0,0 +1,12 @@
+package common
+
+// CopyBytes returns an exact copy of the provided bytes.
+func CopyBytes(b []byte) (copiedBytes []byte) {
+ if b == nil {
+ return nil
+ }
+ copiedBytes = make([]byte, len(b))
+ copy(copiedBytes, b)
+
+ return
+}
diff --git a/erigon-lib/common/datadir/dirs.go b/erigon-lib/common/datadir/dirs.go
new file mode 100644
index 00000000000..161d381c17f
--- /dev/null
+++ b/erigon-lib/common/datadir/dirs.go
@@ -0,0 +1,192 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package datadir
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "syscall"
+
+ "github.com/gofrs/flock"
+ "github.com/ledgerwatch/erigon-lib/common/dir"
+)
+
+// Dirs is the file system folder the node should use for any data storage
+// requirements. The configured data directory will not be directly shared with
+// registered services, instead those can use utility methods to create/access
+// databases or flat files
+type Dirs struct {
+ DataDir string
+ RelativeDataDir string // like dataDir, but without filepath.Abs() resolution
+ Chaindata string
+ Tmp string
+ Snap string
+ SnapIdx string
+ SnapHistory string
+ SnapDomain string
+ SnapAccessors string
+ Downloader string
+ TxPool string
+ Nodes string
+ CaplinHistory string
+ CaplinIndexing string
+}
+
+func New(datadir string) Dirs {
+ relativeDataDir := datadir
+ if datadir != "" {
+ var err error
+ absdatadir, err := filepath.Abs(datadir)
+ if err != nil {
+ panic(err)
+ }
+ datadir = absdatadir
+ }
+
+ dirs := Dirs{
+ RelativeDataDir: relativeDataDir,
+ DataDir: datadir,
+ Chaindata: filepath.Join(datadir, "chaindata"),
+ Tmp: filepath.Join(datadir, "temp"),
+ Snap: filepath.Join(datadir, "snapshots"),
+ SnapIdx: filepath.Join(datadir, "snapshots", "idx"),
+ SnapHistory: filepath.Join(datadir, "snapshots", "history"),
+ SnapDomain: filepath.Join(datadir, "snapshots", "domain"),
+ SnapAccessors: filepath.Join(datadir, "snapshots", "accessor"),
+ Downloader: filepath.Join(datadir, "downloader"),
+ TxPool: filepath.Join(datadir, "txpool"),
+ Nodes: filepath.Join(datadir, "nodes"),
+ CaplinHistory: filepath.Join(datadir, "caplin/history"),
+ CaplinIndexing: filepath.Join(datadir, "caplin/indexing"),
+ }
+
+ dir.MustExist(dirs.Chaindata, dirs.Tmp,
+ dirs.SnapIdx, dirs.SnapHistory, dirs.SnapDomain, dirs.SnapAccessors,
+ dirs.Downloader, dirs.TxPool, dirs.Nodes, dirs.CaplinHistory, dirs.CaplinIndexing)
+ return dirs
+}
+
+var (
+ ErrDataDirLocked = errors.New("datadir already used by another process")
+
+ datadirInUseErrNos = map[uint]bool{11: true, 32: true, 35: true}
+)
+
+func convertFileLockError(err error) error {
+ //nolint
+ if errno, ok := err.(syscall.Errno); ok && datadirInUseErrNos[uint(errno)] {
+ return ErrDataDirLocked
+ }
+ return err
+}
+
+func TryFlock(dirs Dirs) (*flock.Flock, bool, error) {
+ // Lock the instance directory to prevent concurrent use by another instance as well as
+ // accidental use of the instance directory as a database.
+ l := flock.New(filepath.Join(dirs.DataDir, "LOCK"))
+ locked, err := l.TryLock()
+ if err != nil {
+ return nil, false, convertFileLockError(err)
+ }
+ return l, locked, nil
+}
+
+// ApplyMigrations - if can get flock.
+func ApplyMigrations(dirs Dirs) error {
+ need := downloaderV2MigrationNeeded(dirs)
+ if !need {
+ return nil
+ }
+
+ lock, locked, err := TryFlock(dirs)
+ if err != nil {
+ return err
+ }
+ if !locked {
+ return nil
+ }
+ defer lock.Unlock()
+
+ // add your migration here
+
+ if err := downloaderV2Migration(dirs); err != nil {
+ return err
+ }
+ return nil
+}
+
+func downloaderV2MigrationNeeded(dirs Dirs) bool {
+ return dir.FileExist(filepath.Join(dirs.Snap, "db", "mdbx.dat"))
+}
+func downloaderV2Migration(dirs Dirs) error {
+ // move db from `datadir/snapshot/db` to `datadir/downloader`
+ if !downloaderV2MigrationNeeded(dirs) {
+ return nil
+ }
+ from, to := filepath.Join(dirs.Snap, "db", "mdbx.dat"), filepath.Join(dirs.Downloader, "mdbx.dat")
+ if err := os.Rename(from, to); err != nil {
+ //fall back to copy-file if folders are on different disks
+ if err := copyFile(from, to); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// nolint
+func moveFiles(from, to string, ext string) error {
+ files, err := os.ReadDir(from)
+ if err != nil {
+ return fmt.Errorf("ReadDir: %w, %s", err, from)
+ }
+ for _, f := range files {
+ if f.Type().IsDir() || !f.Type().IsRegular() {
+ continue
+ }
+ if filepath.Ext(f.Name()) != ext {
+ continue
+ }
+ _ = os.Rename(filepath.Join(from, f.Name()), filepath.Join(to, f.Name()))
+ }
+ return nil
+}
+
+func copyFile(from, to string) error {
+ r, err := os.Open(from)
+ if err != nil {
+ return fmt.Errorf("please manually move file: from %s to %s. error: %w", from, to, err)
+ }
+ defer r.Close()
+ w, err := os.Create(to)
+ if err != nil {
+ return fmt.Errorf("please manually move file: from %s to %s. error: %w", from, to, err)
+ }
+ defer w.Close()
+ if _, err = w.ReadFrom(r); err != nil {
+ w.Close()
+ os.Remove(to)
+ return fmt.Errorf("please manually move file: from %s to %s. error: %w", from, to, err)
+ }
+ if err = w.Sync(); err != nil {
+ w.Close()
+ os.Remove(to)
+ return fmt.Errorf("please manually move file: from %s to %s. error: %w", from, to, err)
+ }
+ return nil
+}
diff --git a/erigon-lib/common/dbg/experiments.go b/erigon-lib/common/dbg/experiments.go
new file mode 100644
index 00000000000..ff4f966d63f
--- /dev/null
+++ b/erigon-lib/common/dbg/experiments.go
@@ -0,0 +1,283 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package dbg
+
+import (
+ "os"
+ "runtime"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/ledgerwatch/log/v3"
+)
+
+var doMemstat = true
+
+func init() {
+ _, ok := os.LookupEnv("NO_MEMSTAT")
+ if ok {
+ doMemstat = false
+ }
+}
+
+func DoMemStat() bool { return doMemstat }
+func ReadMemStats(m *runtime.MemStats) {
+ if doMemstat {
+ runtime.ReadMemStats(m)
+ }
+}
+
+var (
+ writeMap bool
+ writeMapOnce sync.Once
+)
+
+func WriteMap() bool {
+ writeMapOnce.Do(func() {
+ v, _ := os.LookupEnv("WRITE_MAP")
+ if v == "true" {
+ writeMap = true
+ log.Info("[Experiment]", "WRITE_MAP", writeMap)
+ }
+ })
+ return writeMap
+}
+
+var (
+ dirtySace uint64
+ dirtySaceOnce sync.Once
+)
+
+func DirtySpace() uint64 {
+ dirtySaceOnce.Do(func() {
+ v, _ := os.LookupEnv("MDBX_DIRTY_SPACE_MB")
+ if v != "" {
+ i, err := strconv.Atoi(v)
+ if err != nil {
+ panic(err)
+ }
+ dirtySace = uint64(i * 1024 * 1024)
+ log.Info("[Experiment]", "MDBX_DIRTY_SPACE_MB", dirtySace)
+ }
+ })
+ return dirtySace
+}
+
+var (
+ noSync bool
+ noSyncOnce sync.Once
+)
+
+func NoSync() bool {
+ noSyncOnce.Do(func() {
+ v, _ := os.LookupEnv("NO_SYNC")
+ if v == "true" {
+ noSync = true
+ log.Info("[Experiment]", "NO_SYNC", noSync)
+ }
+ })
+ return noSync
+}
+
+var (
+ mergeTr int
+ mergeTrOnce sync.Once
+)
+
+func MergeTr() int {
+ mergeTrOnce.Do(func() {
+ v, _ := os.LookupEnv("MERGE_THRESHOLD")
+ if v != "" {
+ i, err := strconv.Atoi(v)
+ if err != nil {
+ panic(err)
+ }
+ if i < 0 || i > 4 {
+ panic(i)
+ }
+ mergeTr = i
+ log.Info("[Experiment]", "MERGE_THRESHOLD", mergeTr)
+ }
+ })
+ return mergeTr
+}
+
+var (
+ mdbxReadahead bool
+ mdbxReadaheadOnce sync.Once
+)
+
+func MdbxReadAhead() bool {
+ mdbxReadaheadOnce.Do(func() {
+ v, _ := os.LookupEnv("MDBX_READAHEAD")
+ if v == "true" {
+ mdbxReadahead = true
+ log.Info("[Experiment]", "MDBX_READAHEAD", mdbxReadahead)
+ }
+ })
+ return mdbxReadahead
+}
+
+var (
+ discardHistory bool
+ discardHistoryOnce sync.Once
+)
+
+func DiscardHistory() bool {
+ discardHistoryOnce.Do(func() {
+ v, _ := os.LookupEnv("DISCARD_HISTORY")
+ if v == "true" {
+ discardHistory = true
+ log.Info("[Experiment]", "DISCARD_HISTORY", discardHistory)
+ }
+ })
+ return discardHistory
+}
+
+var (
+ bigRoTx uint
+ getBigRoTx sync.Once
+)
+
+// DEBUG_BIG_RO_TX_KB - print logs with info about large read-only transactions
+// DEBUG_BIG_RW_TX_KB - print logs with info about large read-write transactions
+// DEBUG_SLOW_COMMIT_MS - print logs with commit timing details if commit is slower than this threshold
+func BigRoTxKb() uint {
+ getBigRoTx.Do(func() {
+ v, _ := os.LookupEnv("DEBUG_BIG_RO_TX_KB")
+ if v != "" {
+ i, err := strconv.Atoi(v)
+ if err != nil {
+ panic(err)
+ }
+ bigRoTx = uint(i)
+ log.Info("[Experiment]", "DEBUG_BIG_RO_TX_KB", bigRoTx)
+ }
+ })
+ return bigRoTx
+}
+
+var (
+ bigRwTx uint
+ getBigRwTx sync.Once
+)
+
+func BigRwTxKb() uint {
+ getBigRwTx.Do(func() {
+ v, _ := os.LookupEnv("DEBUG_BIG_RW_TX_KB")
+ if v != "" {
+ i, err := strconv.Atoi(v)
+ if err != nil {
+ panic(err)
+ }
+ bigRwTx = uint(i)
+ log.Info("[Experiment]", "DEBUG_BIG_RW_TX_KB", bigRwTx)
+ }
+ })
+ return bigRwTx
+}
+
+var (
+ slowCommit time.Duration
+ slowCommitOnce sync.Once
+)
+
+func SlowCommit() time.Duration {
+ slowCommitOnce.Do(func() {
+ v, _ := os.LookupEnv("SLOW_COMMIT")
+ if v != "" {
+ var err error
+ slowCommit, err = time.ParseDuration(v)
+ if err != nil {
+ panic(err)
+ }
+ log.Info("[Experiment]", "SLOW_COMMIT", slowCommit.String())
+ }
+ })
+ return slowCommit
+}
+
+var (
+ slowTx time.Duration
+ slowTxOnce sync.Once
+)
+
+func SlowTx() time.Duration {
+ slowTxOnce.Do(func() {
+ v, _ := os.LookupEnv("SLOW_TX")
+ if v != "" {
+ var err error
+ slowTx, err = time.ParseDuration(v)
+ if err != nil {
+ panic(err)
+ }
+ log.Info("[Experiment]", "SLOW_TX", slowTx.String())
+ }
+ })
+ return slowTx
+}
+
+var (
+ stopBeforeStage string
+ stopBeforeStageFlag sync.Once
+ stopAfterStage string
+ stopAfterStageFlag sync.Once
+)
+
+func StopBeforeStage() string {
+ f := func() {
+ v, _ := os.LookupEnv("STOP_BEFORE_STAGE") // see names in eth/stagedsync/stages/stages.go
+ if v != "" {
+ stopBeforeStage = v
+ log.Info("[Experiment]", "STOP_BEFORE_STAGE", stopBeforeStage)
+ }
+ }
+ stopBeforeStageFlag.Do(f)
+ return stopBeforeStage
+}
+
+// TODO(allada) We should possibly consider removing `STOP_BEFORE_STAGE`, as `STOP_AFTER_STAGE` can
+// perform all same the functionality, but due to reverse compatibility reasons we are going to
+// leave it.
+func StopAfterStage() string {
+ f := func() {
+ v, _ := os.LookupEnv("STOP_AFTER_STAGE") // see names in eth/stagedsync/stages/stages.go
+ if v != "" {
+ stopAfterStage = v
+ log.Info("[Experiment]", "STOP_AFTER_STAGE", stopAfterStage)
+ }
+ }
+ stopAfterStageFlag.Do(f)
+ return stopAfterStage
+}
+
+var (
+ stopAfterReconst bool
+ stopAfterReconstOnce sync.Once
+)
+
+func StopAfterReconst() bool {
+ stopAfterReconstOnce.Do(func() {
+ v, _ := os.LookupEnv("STOP_AFTER_RECONSTITUTE")
+ if v == "true" {
+ stopAfterReconst = true
+ log.Info("[Experiment]", "STOP_AFTER_RECONSTITUTE", writeMap)
+ }
+ })
+ return stopAfterReconst
+}
diff --git a/erigon-lib/common/dbg/leak_detector.go b/erigon-lib/common/dbg/leak_detector.go
new file mode 100644
index 00000000000..d4369f2be9e
--- /dev/null
+++ b/erigon-lib/common/dbg/leak_detector.go
@@ -0,0 +1,107 @@
+package dbg
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ledgerwatch/log/v3"
+)
+
+const FileCloseLogLevel = log.LvlTrace
+
+// LeakDetector - use it to find which resource was created but not closed (leaked)
+// periodically does print in logs resources which living longer than 1min with their creation stack trace
+// For example db transactions can call Add/Del from Begin/Commit/Rollback methods
+type LeakDetector struct {
+ enabled atomic.Bool
+ slowThreshold atomic.Pointer[time.Duration]
+ autoIncrement atomic.Uint64
+
+ list map[uint64]LeakDetectorItem
+ listLock sync.Mutex
+}
+
+type LeakDetectorItem struct {
+ stack string
+ started time.Time
+}
+
+func NewLeakDetector(name string, slowThreshold time.Duration) *LeakDetector {
+ enabled := slowThreshold > 0
+ if !enabled {
+ return nil
+ }
+ d := &LeakDetector{list: map[uint64]LeakDetectorItem{}}
+ d.SetSlowThreshold(slowThreshold)
+
+ if enabled {
+ go func() {
+ logEvery := time.NewTicker(60 * time.Second)
+ defer logEvery.Stop()
+
+ for {
+ select {
+ case <-logEvery.C:
+ if list := d.slowList(); len(list) > 0 {
+ log.Info(fmt.Sprintf("[dbg.%s] long living resources", name), "list", strings.Join(d.slowList(), ", "))
+ }
+ }
+ }
+ }()
+ }
+ return d
+}
+
+func (d *LeakDetector) slowList() (res []string) {
+ if d == nil || !d.Enabled() {
+ return res
+ }
+ slowThreshold := *d.slowThreshold.Load()
+
+ d.listLock.Lock()
+ defer d.listLock.Unlock()
+ i := 0
+ for key, value := range d.list {
+ living := time.Since(value.started)
+ if living > slowThreshold {
+ res = append(res, fmt.Sprintf("%d(%s): %s", key, living, value.stack))
+ }
+ i++
+ if i > 10 { // protect logs from too many output
+ break
+ }
+ }
+ return res
+}
+
+func (d *LeakDetector) Del(id uint64) {
+ if d == nil || !d.Enabled() {
+ return
+ }
+ d.listLock.Lock()
+ defer d.listLock.Unlock()
+ delete(d.list, id)
+}
+func (d *LeakDetector) Add() uint64 {
+ if d == nil || !d.Enabled() {
+ return 0
+ }
+ ac := LeakDetectorItem{
+ stack: StackSkip(2),
+ started: time.Now(),
+ }
+ id := d.autoIncrement.Add(1)
+ d.listLock.Lock()
+ defer d.listLock.Unlock()
+ d.list[id] = ac
+ return id
+}
+
+func (d *LeakDetector) Enabled() bool { return d.enabled.Load() }
+func (d *LeakDetector) SetSlowThreshold(t time.Duration) {
+ d.slowThreshold.Store(&t)
+ d.enabled.Store(t > 0)
+}
diff --git a/erigon-lib/common/dbg/log_panic.go b/erigon-lib/common/dbg/log_panic.go
new file mode 100644
index 00000000000..a05314e8ab9
--- /dev/null
+++ b/erigon-lib/common/dbg/log_panic.go
@@ -0,0 +1,29 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package dbg
+
+import (
+ stack2 "github.com/go-stack/stack"
+)
+
+// Stack returns stack-trace in logger-friendly compact formatting
+func Stack() string {
+ return stack2.Trace().TrimBelow(stack2.Caller(1)).String()
+}
+func StackSkip(skip int) string {
+ return stack2.Trace().TrimBelow(stack2.Caller(skip)).String()
+}
diff --git a/erigon-lib/common/dir/rw_dir.go b/erigon-lib/common/dir/rw_dir.go
new file mode 100644
index 00000000000..0bbf76d8f5f
--- /dev/null
+++ b/erigon-lib/common/dir/rw_dir.go
@@ -0,0 +1,134 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package dir
+
+import (
+ "os"
+ "path/filepath"
+)
+
+func MustExist(path ...string) {
+ const perm = 0764 // user rwx, group rw, other r
+ for _, p := range path {
+ if err := os.MkdirAll(p, perm); err != nil {
+ panic(err)
+ }
+ }
+}
+
+func Exist(path string) bool {
+ _, err := os.Stat(path)
+ if err != nil && os.IsNotExist(err) {
+ return false
+ }
+ return true
+}
+
+func FileExist(path string) bool {
+ fi, err := os.Stat(path)
+ if err != nil && os.IsNotExist(err) {
+ return false
+ }
+ if !fi.Mode().IsRegular() {
+ return false
+ }
+ return true
+}
+
+// nolint
+func WriteFileWithFsync(name string, data []byte, perm os.FileMode) error {
+ f, err := os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ _, err = f.Write(data)
+ if err != nil {
+ return err
+ }
+ err = f.Sync()
+ if err != nil {
+ return err
+ }
+ return err
+}
+
+func Recreate(dir string) {
+ if Exist(dir) {
+ _ = os.RemoveAll(dir)
+ }
+ MustExist(dir)
+}
+
+func HasFileOfType(dir, ext string) bool {
+ files, err := os.ReadDir(dir)
+ if err != nil {
+ return false
+ }
+ for _, f := range files {
+ if f.IsDir() {
+ continue
+ }
+ if filepath.Ext(f.Name()) == ext {
+ return true
+ }
+ }
+ return false
+}
+
+// nolint
+func DeleteFiles(dirs ...string) error {
+ for _, dir := range dirs {
+ files, err := ListFiles(dir)
+ if err != nil {
+ return err
+ }
+ for _, fPath := range files {
+ if err := os.Remove(fPath); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func ListFiles(dir string, extensions ...string) ([]string, error) {
+ files, err := os.ReadDir(dir)
+ if err != nil {
+ return nil, err
+ }
+ res := make([]string, 0, len(files))
+ for _, f := range files {
+ if f.IsDir() && !f.Type().IsRegular() {
+ continue
+ }
+ match := false
+ if len(extensions) == 0 {
+ match = true
+ }
+ for _, ext := range extensions {
+ if filepath.Ext(f.Name()) == ext { // filter out only compressed files
+ match = true
+ }
+ }
+ if !match {
+ continue
+ }
+ res = append(res, filepath.Join(dir, f.Name()))
+ }
+ return res, nil
+}
diff --git a/erigon-lib/common/eth.go b/erigon-lib/common/eth.go
new file mode 100644
index 00000000000..4781f78083d
--- /dev/null
+++ b/erigon-lib/common/eth.go
@@ -0,0 +1,23 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package common
+
+const (
+ Wei = 1
+ GWei = 1e9
+ Ether = 1e18
+)
diff --git a/common/eth2shuffle/shuffle.go b/erigon-lib/common/eth2shuffle/shuffle.go
similarity index 100%
rename from common/eth2shuffle/shuffle.go
rename to erigon-lib/common/eth2shuffle/shuffle.go
diff --git a/common/eth2shuffle/shuffle_bench_test.go b/erigon-lib/common/eth2shuffle/shuffle_bench_test.go
similarity index 97%
rename from common/eth2shuffle/shuffle_bench_test.go
rename to erigon-lib/common/eth2shuffle/shuffle_bench_test.go
index a44ef6e428b..45fe2027642 100644
--- a/common/eth2shuffle/shuffle_bench_test.go
+++ b/erigon-lib/common/eth2shuffle/shuffle_bench_test.go
@@ -4,7 +4,7 @@ import (
"fmt"
"testing"
- "github.com/ledgerwatch/erigon/common/eth2shuffle"
+ "github.com/ledgerwatch/erigon-lib/common/eth2shuffle"
)
func BenchmarkPermuteIndex(b *testing.B) {
diff --git a/common/eth2shuffle/shuffle_test.go b/erigon-lib/common/eth2shuffle/shuffle_test.go
similarity index 98%
rename from common/eth2shuffle/shuffle_test.go
rename to erigon-lib/common/eth2shuffle/shuffle_test.go
index b19c45305bb..1f5cfda530c 100644
--- a/common/eth2shuffle/shuffle_test.go
+++ b/erigon-lib/common/eth2shuffle/shuffle_test.go
@@ -10,7 +10,7 @@ import (
"strings"
"testing"
- "github.com/ledgerwatch/erigon/common/eth2shuffle"
+ "github.com/ledgerwatch/erigon-lib/common/eth2shuffle"
"github.com/stretchr/testify/assert"
)
diff --git a/common/eth2shuffle/spec/tests.csv b/erigon-lib/common/eth2shuffle/spec/tests.csv
similarity index 100%
rename from common/eth2shuffle/spec/tests.csv
rename to erigon-lib/common/eth2shuffle/spec/tests.csv
diff --git a/erigon-lib/common/fixedgas/protocol.go b/erigon-lib/common/fixedgas/protocol.go
new file mode 100644
index 00000000000..038e8bd0586
--- /dev/null
+++ b/erigon-lib/common/fixedgas/protocol.go
@@ -0,0 +1,39 @@
+/*
+ Copyright 2021 The Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package fixedgas
+
+const (
+ TxGas uint64 = 21000 // Per transaction not creating a contract. NOTE: Not payable on data of calls between transactions.
+ TxGasContractCreation uint64 = 53000 // Per transaction that creates a contract. NOTE: Not payable on data of calls between transactions.
+ TxDataZeroGas uint64 = 4 // Per byte of data attached to a transaction that equals zero. NOTE: Not payable on data of calls between transactions.
+ TxDataNonZeroGasFrontier uint64 = 68 // Per byte of data attached to a transaction that is not equal to zero. NOTE: Not payable on data of calls between transactions.
+ TxDataNonZeroGasEIP2028 uint64 = 16 // Per byte of non zero data attached to a transaction after EIP 2028 (part in Istanbul)
+ TxAccessListAddressGas uint64 = 2400 // Per address specified in EIP 2930 access list
+ TxAccessListStorageKeyGas uint64 = 1900 // Per storage key specified in EIP 2930 access list
+
+ MaxCodeSize = 24576 // Maximum bytecode to permit for a contract
+
+ // EIP-3860 to limit size of initcode
+ MaxInitCodeSize = 2 * MaxCodeSize // Maximum initcode to permit in a creation transaction and create instructions
+ InitCodeWordGas = 2
+
+ // EIP-4844: Shard Blob Transactions
+ FieldElementsPerBlob = 4096 // each field element is 32 bytes
+ BlobSize = FieldElementsPerBlob * 32
+ BlobGasPerBlob uint64 = 0x20000
+ DefaultMaxBlobsPerBlock uint64 = 6 // lower for Gnosis
+)
diff --git a/erigon-lib/common/hash.go b/erigon-lib/common/hash.go
new file mode 100644
index 00000000000..a2353a4e6b9
--- /dev/null
+++ b/erigon-lib/common/hash.go
@@ -0,0 +1,179 @@
+/*
+Copyright 2021 Erigon contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package common
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/hex"
+ "fmt"
+ "math/big"
+ "math/rand"
+ "reflect"
+
+ "github.com/ledgerwatch/erigon-lib/common/hexutility"
+ "github.com/ledgerwatch/erigon-lib/common/length"
+)
+
+var (
+ hashT = reflect.TypeOf(Hash{})
+)
+
+const (
+ hexPrefix = `0x`
+)
+
+// Hash represents the 32 byte Keccak256 hash of arbitrary data.
+type Hash [length.Hash]byte
+
+// BytesToHash sets b to hash.
+// If b is larger than len(h), b will be cropped from the left.
+func BytesToHash(b []byte) Hash {
+ var h Hash
+ h.SetBytes(b)
+ return h
+}
+
+// CastToHash - sets b to hash
+// If b is larger than len(h), b will be cropped from the left.
+// panics if input is shorter than 32 bytes, see https://go.dev/doc/go1.17#language
+// faster than BytesToHash
+func CastToHash(b []byte) Hash { return *(*Hash)(b) }
+
+// BigToHash sets byte representation of b to hash.
+// If b is larger than len(h), b will be cropped from the left.
+func BigToHash(b *big.Int) Hash { return BytesToHash(b.Bytes()) }
+
+// HexToHash sets byte representation of s to hash.
+// If b is larger than len(h), b will be cropped from the left.
+func HexToHash(s string) Hash { return BytesToHash(hexutility.FromHex(s)) }
+
+// Bytes gets the byte representation of the underlying hash.
+func (h Hash) Bytes() []byte { return h[:] }
+
+// Big converts a hash to a big integer.
+func (h Hash) Big() *big.Int { return new(big.Int).SetBytes(h[:]) }
+
+// Hex converts a hash to a hex string.
+func (h Hash) Hex() string { return hexutility.Encode(h[:]) }
+
+// TerminalString implements log.TerminalStringer, formatting a string for console
+// output during logging.
+func (h Hash) TerminalString() string {
+ return fmt.Sprintf("%x…%x", h[:3], h[29:])
+}
+
+// String implements the stringer interface and is used also by the logger when
+// doing full logging into a file.
+func (h Hash) String() string {
+ return h.Hex()
+}
+
+// Format implements fmt.Formatter.
+// Hash supports the %v, %s, %v, %x, %X and %d format verbs.
+func (h Hash) Format(s fmt.State, c rune) {
+ hexb := make([]byte, 2+len(h)*2)
+ copy(hexb, "0x")
+ hex.Encode(hexb[2:], h[:])
+
+ switch c {
+ case 'x', 'X':
+ if !s.Flag('#') {
+ hexb = hexb[2:]
+ }
+ if c == 'X' {
+ hexb = bytes.ToUpper(hexb)
+ }
+ fallthrough
+ case 'v', 's':
+ s.Write(hexb)
+ case 'q':
+ q := []byte{'"'}
+ s.Write(q)
+ s.Write(hexb)
+ s.Write(q)
+ case 'd':
+ fmt.Fprint(s, ([len(h)]byte)(h))
+ default:
+ fmt.Fprintf(s, "%%!%c(hash=%x)", c, h)
+ }
+}
+
+// UnmarshalText parses a hash in hex syntax.
+func (h *Hash) UnmarshalText(input []byte) error {
+ return hexutility.UnmarshalFixedText("Hash", input, h[:])
+}
+
+// UnmarshalJSON parses a hash in hex syntax.
+func (h *Hash) UnmarshalJSON(input []byte) error {
+ return hexutility.UnmarshalFixedJSON(hashT, input, h[:])
+}
+
+// MarshalText returns the hex representation of h.
+func (h Hash) MarshalText() ([]byte, error) {
+ b := h[:]
+ result := make([]byte, len(b)*2+2)
+ copy(result, hexPrefix)
+ hex.Encode(result[2:], b)
+ return result, nil
+}
+
+// SetBytes sets the hash to the value of b.
+// If b is larger than len(h), b will be cropped from the left.
+func (h *Hash) SetBytes(b []byte) {
+ if len(b) > len(h) {
+ b = b[len(b)-length.Hash:]
+ }
+
+ copy(h[length.Hash-len(b):], b)
+}
+
+// Generate implements testing/quick.Generator.
+func (h Hash) Generate(rand *rand.Rand, size int) reflect.Value {
+ m := rand.Intn(len(h))
+ for i := len(h) - 1; i > m; i-- {
+ h[i] = byte(rand.Uint32())
+ }
+ return reflect.ValueOf(h)
+}
+
+// Scan implements Scanner for database/sql.
+func (h *Hash) Scan(src interface{}) error {
+ srcB, ok := src.([]byte)
+ if !ok {
+ return fmt.Errorf("can't scan %T into Hash", src)
+ }
+ if len(srcB) != length.Hash {
+ return fmt.Errorf("can't scan []byte of len %d into Hash, want %d", len(srcB), length.Hash)
+ }
+ copy(h[:], srcB)
+ return nil
+}
+
+// Value implements valuer for database/sql.
+func (h Hash) Value() (driver.Value, error) {
+ return h[:], nil
+}
+
+func FromHex(in string) []byte {
+ return hexutility.MustDecodeHex(in)
+}
+
+type CodeRecord struct {
+ BlockNumber uint64
+ TxNumber uint64
+ CodeHash Hash
+}
diff --git a/common/hasher.go b/erigon-lib/common/hasher.go
similarity index 81%
rename from common/hasher.go
rename to erigon-lib/common/hasher.go
index e92367f9bd0..f9b25295035 100644
--- a/common/hasher.go
+++ b/erigon-lib/common/hasher.go
@@ -4,7 +4,6 @@ import (
"hash"
"sync"
- libcommon "github.com/ledgerwatch/erigon-lib/common"
"golang.org/x/crypto/sha3"
)
@@ -33,19 +32,19 @@ func NewHasher() *Hasher {
}
func ReturnHasherToPool(h *Hasher) { hashersPool.Put(h) }
-func HashData(data []byte) (libcommon.Hash, error) {
+func HashData(data []byte) (Hash, error) {
h := NewHasher()
defer ReturnHasherToPool(h)
_, err := h.Sha.Write(data)
if err != nil {
- return libcommon.Hash{}, err
+ return Hash{}, err
}
- var buf libcommon.Hash
+ var buf Hash
_, err = h.Sha.Read(buf[:])
if err != nil {
- return libcommon.Hash{}, err
+ return Hash{}, err
}
return buf, nil
}
diff --git a/erigon-lib/common/hextobytes.go b/erigon-lib/common/hextobytes.go
new file mode 100644
index 00000000000..79473730a1e
--- /dev/null
+++ b/erigon-lib/common/hextobytes.go
@@ -0,0 +1,9 @@
+package common
+
+import "encoding/hex"
+
+// Hex2Bytes returns the bytes represented by the hexadecimal string str.
+func Hex2Bytes(str string) []byte {
+ h, _ := hex.DecodeString(str)
+ return h
+}
diff --git a/common/hexutil/hexutil.go b/erigon-lib/common/hexutil/hexutil.go
similarity index 75%
rename from common/hexutil/hexutil.go
rename to erigon-lib/common/hexutil/hexutil.go
index 3df5272075c..3efc5d4c1a5 100644
--- a/common/hexutil/hexutil.go
+++ b/erigon-lib/common/hexutil/hexutil.go
@@ -1,33 +1,3 @@
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-/*
-Package hexutil implements hex encoding with 0x prefix.
-This encoding is used by the Ethereum RPC API to transport binary data in JSON payloads.
-
-# Encoding Rules
-
-All hex data must have prefix "0x".
-
-For byte slices, the hex data must be of even length. An empty byte slice
-encodes as "0x".
-
-Integers are encoded using the least amount of digits (no leading zero digits). Their
-encoding may be of uneven length. The number zero encodes as "0x0".
-*/
package hexutil
import (
@@ -39,7 +9,7 @@ import (
const uintBits = 32 << (uint64(^uint(0)) >> 63)
-// Errors
+// These errors are from go-ethereum in order to keep compatibility with geth error codes.
var (
ErrEmptyString = &decError{"empty hex string"}
ErrSyntax = &decError{"invalid hex string"}
@@ -66,9 +36,9 @@ func Decode(input string) ([]byte, error) {
}
b, err := hex.DecodeString(input[2:])
if err != nil {
- err = mapError(err)
+ return nil, mapError(err)
}
- return b, err
+ return b, nil
}
// MustDecode decodes a hex string with 0x prefix. It panics for invalid input.
@@ -88,19 +58,9 @@ func DecodeUint64(input string) (uint64, error) {
}
dec, err := strconv.ParseUint(raw, 16, 64)
if err != nil {
- err = mapError(err)
+ return 0, mapError(err)
}
- return dec, err
-}
-
-// MustDecodeUint64 decodes a hex string with 0x prefix as a quantity.
-// It panics for invalid input.
-func MustDecodeUint64(input string) uint64 {
- dec, err := DecodeUint64(input)
- if err != nil {
- panic(err)
- }
- return dec
+ return dec, nil
}
// EncodeUint64 encodes i as a hex string with 0x prefix.
@@ -213,6 +173,8 @@ func decodeNibble(in byte) uint64 {
}
}
+// ignore these errors to keep compatiblity with go ethereum
+// nolint:errorlint
func mapError(err error) error {
if err, ok := err.(*strconv.NumError); ok {
switch err.Err {
@@ -232,7 +194,6 @@ func mapError(err error) error {
}
// CompressNibbles - supports only even number of nibbles
-// This method supports only arrays of even nibbles
//
// HI_NIBBLE(b) = (b >> 4) & 0x0F
// LO_NIBBLE(b) = b & 0x0F
diff --git a/common/hexutil/hexutil_test.go b/erigon-lib/common/hexutil/hexutil_test.go
similarity index 56%
rename from common/hexutil/hexutil_test.go
rename to erigon-lib/common/hexutil/hexutil_test.go
index 17ebd6bb769..4c9f508ad5e 100644
--- a/common/hexutil/hexutil_test.go
+++ b/erigon-lib/common/hexutil/hexutil_test.go
@@ -1,23 +1,8 @@
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
package hexutil
import (
- "bytes"
+ "fmt"
+ "github.com/stretchr/testify/require"
"math/big"
"testing"
)
@@ -40,14 +25,13 @@ var (
{[]byte{0}, "0x00"},
{[]byte{0, 0, 1, 2}, "0x00000102"},
}
-
encodeBigTests = []marshalTest{
- {referenceBig("0"), "0x0"},
- {referenceBig("1"), "0x1"},
- {referenceBig("ff"), "0xff"},
- {referenceBig("112233445566778899aabbccddeeff"), "0x112233445566778899aabbccddeeff"},
- {referenceBig("80a7f2c1bcc396c00"), "0x80a7f2c1bcc396c00"},
- {referenceBig("-80a7f2c1bcc396c00"), "-0x80a7f2c1bcc396c00"},
+ {bigFromString("0"), "0x0"},
+ {bigFromString("1"), "0x1"},
+ {bigFromString("ff"), "0xff"},
+ {bigFromString("112233445566778899aabbccddeeff"), "0x112233445566778899aabbccddeeff"},
+ {bigFromString("80a7f2c1bcc396c00"), "0x80a7f2c1bcc396c00"},
+ {bigFromString("-80a7f2c1bcc396c00"), "-0x80a7f2c1bcc396c00"},
}
encodeUint64Tests = []marshalTest{
@@ -105,15 +89,15 @@ var (
{input: `0xfffffffff`, want: big.NewInt(0xfffffffff)},
{
input: `0x112233445566778899aabbccddeeff`,
- want: referenceBig("112233445566778899aabbccddeeff"),
+ want: bigFromString("112233445566778899aabbccddeeff"),
},
{
input: `0xffffffffffffffffffffffffffffffffffff`,
- want: referenceBig("ffffffffffffffffffffffffffffffffffff"),
+ want: bigFromString("ffffffffffffffffffffffffffffffffffff"),
},
{
input: `0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff`,
- want: referenceBig("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
+ want: bigFromString("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
}
@@ -137,58 +121,55 @@ var (
)
func TestDecode(t *testing.T) {
- for _, test := range decodeBytesTests {
- dec, err := Decode(test.input)
- if !checkError(t, test.input, err, test.wantErr) {
- continue
- }
- if !bytes.Equal(test.want.([]byte), dec) {
- t.Errorf("input %s: value mismatch: got %x, want %x", test.input, dec, test.want)
- continue
- }
+ for idx, test := range decodeBytesTests {
+ t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) {
+ dec, err := Decode(test.input)
+ checkError(t, test.input, err, test.wantErr)
+ if test.want != nil {
+ require.EqualValues(t, test.want, dec)
+ }
+ })
}
}
func TestEncodeBig(t *testing.T) {
- for _, test := range encodeBigTests {
- enc := EncodeBig(test.input.(*big.Int))
- if enc != test.want {
- t.Errorf("input %x: wrong encoding %s", test.input, enc)
- }
+ for idx, test := range encodeBigTests {
+ t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) {
+ enc := EncodeBig(test.input.(*big.Int))
+ require.EqualValues(t, test.want, enc)
+ })
}
}
func TestDecodeBig(t *testing.T) {
- for _, test := range decodeBigTests {
- dec, err := DecodeBig(test.input)
- if !checkError(t, test.input, err, test.wantErr) {
- continue
- }
- if dec.Cmp(test.want.(*big.Int)) != 0 {
- t.Errorf("input %s: value mismatch: got %x, want %x", test.input, dec, test.want)
- continue
- }
+ for idx, test := range decodeBigTests {
+ t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) {
+ dec, err := DecodeBig(test.input)
+ checkError(t, test.input, err, test.wantErr)
+ if test.want != nil {
+ require.EqualValues(t, test.want.(*big.Int).String(), dec.String())
+ }
+ })
}
}
func TestEncodeUint64(t *testing.T) {
- for _, test := range encodeUint64Tests {
- enc := EncodeUint64(test.input.(uint64))
- if enc != test.want {
- t.Errorf("input %x: wrong encoding %s", test.input, enc)
- }
+ for idx, test := range encodeUint64Tests {
+ t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) {
+ enc := EncodeUint64(test.input.(uint64))
+ require.EqualValues(t, test.want, enc)
+ })
}
}
func TestDecodeUint64(t *testing.T) {
- for _, test := range decodeUint64Tests {
- dec, err := DecodeUint64(test.input)
- if !checkError(t, test.input, err, test.wantErr) {
- continue
- }
- if dec != test.want.(uint64) {
- t.Errorf("input %s: value mismatch: got %x, want %x", test.input, dec, test.want)
- continue
- }
+ for idx, test := range decodeUint64Tests {
+ t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) {
+ dec, err := DecodeUint64(test.input)
+ checkError(t, test.input, err, test.wantErr)
+ if test.want != nil {
+ require.EqualValues(t, test.want, dec)
+ }
+ })
}
}
diff --git a/common/hexutil/json.go b/erigon-lib/common/hexutil/json.go
similarity index 95%
rename from common/hexutil/json.go
rename to erigon-lib/common/hexutil/json.go
index ce1ce9a05d7..ab9f820a58b 100644
--- a/common/hexutil/json.go
+++ b/erigon-lib/common/hexutil/json.go
@@ -19,6 +19,7 @@ package hexutil
import (
"encoding/hex"
"encoding/json"
+ "errors"
"fmt"
"math/big"
"reflect"
@@ -115,6 +116,10 @@ func (b *Big) String() string {
return EncodeBig(b.ToInt())
}
+func (b *Big) Uint64() uint64 {
+ return ((*big.Int)(b)).Uint64()
+}
+
// Uint64 marshals/unmarshals as a JSON string with 0x prefix.
// The zero value marshals as "0x0".
type Uint64 uint64
@@ -162,6 +167,10 @@ func (b Uint64) String() string {
return EncodeUint64(uint64(b))
}
+func (b Uint64) Uint64() uint64 {
+ return (uint64)(b)
+}
+
// Uint marshals/unmarshals as a JSON string with 0x prefix.
// The zero value marshals as "0x0".
type Uint uint
@@ -183,7 +192,7 @@ func (b *Uint) UnmarshalJSON(input []byte) error {
func (b *Uint) UnmarshalText(input []byte) error {
var u64 Uint64
err := u64.UnmarshalText(input)
- if u64 > Uint64(^uint(0)) || err == ErrUint64Range {
+ if u64 > Uint64(^uint(0)) || errors.Is(err, ErrUint64Range) {
return ErrUintRange
} else if err != nil {
return err
@@ -238,6 +247,8 @@ func checkNumberText(input []byte) (raw []byte, err error) {
}
func wrapTypeError(err error, typ reflect.Type) error {
+ // keeping compatiblity with go ethereum tests
+ // nolint:errorlint
if _, ok := err.(*decError); ok {
return &json.UnmarshalTypeError{Value: err.Error(), Type: typ}
}
diff --git a/common/hexutil/json_test.go b/erigon-lib/common/hexutil/json_test.go
similarity index 52%
rename from common/hexutil/json_test.go
rename to erigon-lib/common/hexutil/json_test.go
index 21f4a35ce58..42c6fc172e2 100644
--- a/common/hexutil/json_test.go
+++ b/erigon-lib/common/hexutil/json_test.go
@@ -1,46 +1,29 @@
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
package hexutil
import (
- "bytes"
"encoding/json"
"errors"
+ "fmt"
"math/big"
"testing"
+
+ "github.com/stretchr/testify/require"
)
-func checkError(t *testing.T, input string, got, want error) bool {
- if got == nil {
- if want != nil {
- t.Errorf("input %s: got no error, want %q", input, want)
- return false
- }
- return true
- }
+func checkError(t *testing.T, input string, got, want error) {
+ t.Helper()
if want == nil {
- t.Errorf("input %s: unexpected error %q", input, got)
- } else if got.Error() != want.Error() {
- t.Errorf("input %s: got error %q, want %q", input, got, want)
+ require.NoErrorf(t, got, "input %s", input)
+ return
+ }
+ if got == nil {
+ require.NoError(t, want, "input %s", input)
+ return
}
- return false
+ require.EqualValues(t, want.Error(), got.Error(), "input %s", input)
}
-func referenceBig(s string) *big.Int {
+func bigFromString(s string) *big.Int {
b, ok := new(big.Int).SetString(s, 16)
if !ok {
panic("invalid")
@@ -64,7 +47,6 @@ var unmarshalBigTests = []unmarshalTest{
input: `"0x10000000000000000000000000000000000000000000000000000000000000000"`,
wantErr: wrapTypeError(ErrBig256Range, bigT),
},
-
// valid encoding
{input: `""`, want: big.NewInt(0)},
{input: `"0x0"`, want: big.NewInt(0)},
@@ -76,29 +58,28 @@ var unmarshalBigTests = []unmarshalTest{
{input: `"0xfffffffff"`, want: big.NewInt(0xfffffffff)},
{
input: `"0x112233445566778899aabbccddeeff"`,
- want: referenceBig("112233445566778899aabbccddeeff"),
+ want: bigFromString("112233445566778899aabbccddeeff"),
},
{
input: `"0xffffffffffffffffffffffffffffffffffff"`,
- want: referenceBig("ffffffffffffffffffffffffffffffffffff"),
+ want: bigFromString("ffffffffffffffffffffffffffffffffffff"),
},
{
input: `"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"`,
- want: referenceBig("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
+ want: bigFromString("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
}
func TestUnmarshalBig(t *testing.T) {
- for _, test := range unmarshalBigTests {
- var v Big
- err := json.Unmarshal([]byte(test.input), &v)
- if !checkError(t, test.input, err, test.wantErr) {
- continue
- }
- if test.want != nil && test.want.(*big.Int).Cmp((*big.Int)(&v)) != 0 {
- t.Errorf("input %s: value mismatch: got %x, want %x", test.input, (*big.Int)(&v), test.want)
- continue
- }
+ for idx, test := range unmarshalBigTests {
+ t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) {
+ var v Big
+ err := json.Unmarshal([]byte(test.input), &v)
+ checkError(t, test.input, err, test.wantErr)
+ if test.want != nil {
+ require.EqualValues(t, test.want.(*big.Int).Bytes(), v.ToInt().Bytes())
+ }
+ })
}
}
@@ -113,21 +94,15 @@ func BenchmarkUnmarshalBig(b *testing.B) {
}
func TestMarshalBig(t *testing.T) {
- for _, test := range encodeBigTests {
- in := test.input.(*big.Int)
- out, err := json.Marshal((*Big)(in))
- if err != nil {
- t.Errorf("%d: %v", in, err)
- continue
- }
- if want := `"` + test.want + `"`; string(out) != want {
- t.Errorf("%d: MarshalJSON output mismatch: got %q, want %q", in, out, want)
- continue
- }
- if out := (*Big)(in).String(); out != test.want {
- t.Errorf("%x: String mismatch: got %q, want %q", in, out, test.want)
- continue
- }
+ for idx, test := range encodeBigTests {
+ t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) {
+ in := test.input.(*big.Int)
+ out, err := json.Marshal((*Big)(in))
+ require.NoError(t, err)
+ want := `"` + test.want + `"`
+ require.EqualValues(t, want, string(out))
+ require.EqualValues(t, test.want, (*Big)(in).String())
+ })
}
}
@@ -155,16 +130,15 @@ var unmarshalUint64Tests = []unmarshalTest{
}
func TestUnmarshalUint64(t *testing.T) {
- for _, test := range unmarshalUint64Tests {
- var v Uint64
- err := json.Unmarshal([]byte(test.input), &v)
- if !checkError(t, test.input, err, test.wantErr) {
- continue
- }
- if uint64(v) != test.want.(uint64) {
- t.Errorf("input %s: value mismatch: got %d, want %d", test.input, v, test.want)
- continue
- }
+ for idx, test := range unmarshalUint64Tests {
+ t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) {
+ var v Uint64
+ err := json.Unmarshal([]byte(test.input), &v)
+ checkError(t, test.input, err, test.wantErr)
+ if test.want != nil {
+ require.EqualValues(t, test.want, v)
+ }
+ })
}
}
@@ -172,51 +146,37 @@ func BenchmarkUnmarshalUint64(b *testing.B) {
input := []byte(`"0x123456789abcdf"`)
for i := 0; i < b.N; i++ {
var v Uint64
- v.UnmarshalJSON(input)
+ _ = v.UnmarshalJSON(input)
}
}
func TestMarshalUint64(t *testing.T) {
- for _, test := range encodeUint64Tests {
- in := test.input.(uint64)
- out, err := json.Marshal(Uint64(in))
- if err != nil {
- t.Errorf("%d: %v", in, err)
- continue
- }
- if want := `"` + test.want + `"`; string(out) != want {
- t.Errorf("%d: MarshalJSON output mismatch: got %q, want %q", in, out, want)
- continue
- }
- if out := (Uint64)(in).String(); out != test.want {
- t.Errorf("%x: String mismatch: got %q, want %q", in, out, test.want)
- continue
- }
+ for idx, test := range encodeUint64Tests {
+ t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) {
+ in := test.input.(uint64)
+ out, err := json.Marshal(Uint64(in))
+ require.NoError(t, err)
+ want := `"` + test.want + `"`
+ require.EqualValues(t, want, string(out))
+ require.EqualValues(t, test.want, (Uint64)(in).String())
+ })
}
}
func TestMarshalUint(t *testing.T) {
- for _, test := range encodeUintTests {
- in := test.input.(uint)
- out, err := json.Marshal(Uint(in))
- if err != nil {
- t.Errorf("%d: %v", in, err)
- continue
- }
- if want := `"` + test.want + `"`; string(out) != want {
- t.Errorf("%d: MarshalJSON output mismatch: got %q, want %q", in, out, want)
- continue
- }
- if out := (Uint)(in).String(); out != test.want {
- t.Errorf("%x: String mismatch: got %q, want %q", in, out, test.want)
- continue
- }
+ for idx, test := range encodeUintTests {
+ t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) {
+ in := test.input.(uint)
+ out, err := json.Marshal(Uint(in))
+ require.NoError(t, err)
+ want := `"` + test.want + `"`
+ require.EqualValues(t, want, string(out))
+ require.EqualValues(t, test.want, (Uint)(in).String())
+ })
}
}
var (
- // These are variables (not constants) to avoid constant overflow
- // checks in the compiler on 32bit platforms.
maxUint33bits = uint64(^uint32(0)) + 1
maxUint64bits = ^uint64(0)
)
@@ -247,20 +207,20 @@ var unmarshalUintTests = []unmarshalTest{
}
func TestUnmarshalUint(t *testing.T) {
- for _, test := range unmarshalUintTests {
- var v Uint
- err := json.Unmarshal([]byte(test.input), &v)
- if uintBits == 32 && test.wantErr32bit != nil {
- checkError(t, test.input, err, test.wantErr32bit)
- continue
- }
- if !checkError(t, test.input, err, test.wantErr) {
- continue
- }
- if uint(v) != test.want.(uint) {
- t.Errorf("input %s: value mismatch: got %d, want %d", test.input, v, test.want)
- continue
- }
+ for idx, test := range unmarshalUintTests {
+ t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) {
+ var v Uint
+ err := json.Unmarshal([]byte(test.input), &v)
+ if uintBits == 32 && test.wantErr32bit != nil {
+ checkError(t, test.input, err, test.wantErr32bit)
+ return
+ }
+ checkError(t, test.input, err, test.wantErr)
+ if test.want != nil {
+ require.EqualValues(t, test.want, v)
+ }
+
+ })
}
}
@@ -282,19 +242,14 @@ func TestUnmarshalFixedUnprefixedText(t *testing.T) {
{input: "0x44444444", want: []byte{0x44, 0x44, 0x44, 0x44}},
}
- for _, test := range tests {
- out := make([]byte, 4)
- err := UnmarshalFixedUnprefixedText("x", []byte(test.input), out)
- switch {
- case err == nil && test.wantErr != nil:
- t.Errorf("%q: got no error, expected %q", test.input, test.wantErr)
- case err != nil && test.wantErr == nil:
- t.Errorf("%q: unexpected error %q", test.input, err)
- case err != nil && err.Error() != test.wantErr.Error():
- t.Errorf("%q: error mismatch: got %q, want %q", test.input, err, test.wantErr)
- }
- if test.want != nil && !bytes.Equal(out, test.want) {
- t.Errorf("%q: output mismatch: got %x, want %x", test.input, out, test.want)
- }
+ for idx, test := range tests {
+ t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) {
+ out := make([]byte, 4)
+ err := UnmarshalFixedUnprefixedText("x", []byte(test.input), out)
+ checkError(t, test.input, err, test.wantErr)
+ if test.want != nil {
+ require.EqualValues(t, out, test.want)
+ }
+ })
}
}
diff --git a/erigon-lib/common/hexutility/bytes.go b/erigon-lib/common/hexutility/bytes.go
new file mode 100644
index 00000000000..fe40256c2d1
--- /dev/null
+++ b/erigon-lib/common/hexutility/bytes.go
@@ -0,0 +1,66 @@
+/*
+ Copyright 2023 The Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package hexutility
+
+import (
+ "encoding/hex"
+ "encoding/json"
+ "reflect"
+)
+
+var bytesT = reflect.TypeOf(Bytes(nil))
+
+// Bytes marshals/unmarshals as a JSON string with 0x prefix.
+// The empty slice marshals as "0x".
+type Bytes []byte
+
+const hexPrefix = `0x`
+
+// MarshalText implements encoding.TextMarshaler
+func (b Bytes) MarshalText() ([]byte, error) {
+ result := make([]byte, len(b)*2+2)
+ copy(result, hexPrefix)
+ hex.Encode(result[2:], b)
+ return result, nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (b *Bytes) UnmarshalJSON(input []byte) error {
+ if !isString(input) {
+ return &json.UnmarshalTypeError{Value: "non-string", Type: bytesT}
+ }
+ return wrapTypeError(b.UnmarshalText(input[1:len(input)-1]), bytesT)
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (b *Bytes) UnmarshalText(input []byte) error {
+ raw, err := checkText(input, true)
+ if err != nil {
+ return err
+ }
+ dec := make([]byte, len(raw)/2)
+ _, err = hex.Decode(dec, raw)
+ if err == nil {
+ *b = dec
+ }
+ return err
+}
+
+// String returns the hex encoding of b.
+func (b Bytes) String() string {
+ return Encode(b)
+}
diff --git a/erigon-lib/common/hexutility/errors.go b/erigon-lib/common/hexutility/errors.go
new file mode 100644
index 00000000000..929ded993d4
--- /dev/null
+++ b/erigon-lib/common/hexutility/errors.go
@@ -0,0 +1,27 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package hexutility
+
+var (
+ ErrMissingPrefix = &decError{"hex string without 0x prefix"}
+ ErrOddLength = &decError{"hex string of odd length"}
+ ErrSyntax = &decError{"invalid hex string"}
+)
+
+type decError struct{ msg string }
+
+func (err decError) Error() string { return err.msg }
diff --git a/erigon-lib/common/hexutility/hex.go b/erigon-lib/common/hexutility/hex.go
new file mode 100644
index 00000000000..8a2c6ba62eb
--- /dev/null
+++ b/erigon-lib/common/hexutility/hex.go
@@ -0,0 +1,103 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package hexutility
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+)
+
+func MustDecodeHex(in string) []byte {
+ in = strip0x(in)
+ if len(in)%2 == 1 {
+ in = "0" + in
+ }
+ payload, err := hex.DecodeString(in)
+ if err != nil {
+ panic(err)
+ }
+ return payload
+}
+
+func strip0x(str string) string {
+ if len(str) >= 2 && str[0] == '0' && (str[1] == 'x' || str[1] == 'X') {
+ return str[2:]
+ }
+ return str
+}
+
+// EncodeTs encodes a TimeStamp (BlockNumber or TxNumber or other uin64) as big endian
+func EncodeTs(number uint64) []byte {
+ enc := make([]byte, 8)
+ binary.BigEndian.PutUint64(enc, number)
+ return enc
+}
+
+// Encode encodes b as a hex string with 0x prefix.
+func Encode(b []byte) string {
+ enc := make([]byte, len(b)*2+2)
+ copy(enc, "0x")
+ hex.Encode(enc[2:], b)
+ return string(enc)
+}
+
+func FromHex(s string) []byte {
+ if Has0xPrefix(s) {
+ s = s[2:]
+ }
+ if len(s)%2 == 1 {
+ s = "0" + s
+ }
+ return Hex2Bytes(s)
+}
+
+// Has0xPrefix validates str begins with '0x' or '0X'.
+func Has0xPrefix(str string) bool {
+ return len(str) >= 2 && str[0] == '0' && (str[1] == 'x' || str[1] == 'X')
+}
+
+// Hex2Bytes returns the bytes represented by the hexadecimal string str.
+func Hex2Bytes(str string) []byte {
+ h, _ := hex.DecodeString(str)
+ return h
+}
+
+// IsHex validates whether each byte is valid hexadecimal string.
+func IsHex(str string) bool {
+ if len(str)%2 != 0 {
+ return false
+ }
+ for _, c := range []byte(str) {
+ if !isHexCharacter(c) {
+ return false
+ }
+ }
+ return true
+}
+
+// isHexCharacter returns bool of c being a valid hexadecimal.
+func isHexCharacter(c byte) bool {
+ return ('0' <= c && c <= '9') || ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F')
+}
+
+func MustDecodeString(s string) []byte {
+ r, err := hex.DecodeString(s)
+ if err != nil {
+ panic(err)
+ }
+ return r
+}
diff --git a/erigon-lib/common/hexutility/hex_test.go b/erigon-lib/common/hexutility/hex_test.go
new file mode 100644
index 00000000000..11486ece356
--- /dev/null
+++ b/erigon-lib/common/hexutility/hex_test.go
@@ -0,0 +1,43 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package hexutility
+
+import (
+ "testing"
+)
+
+type marshalTest struct {
+ input interface{}
+ want string
+}
+
+var (
+ encodeBytesTests = []marshalTest{
+ {[]byte{}, "0x"},
+ {[]byte{0}, "0x00"},
+ {[]byte{0, 0, 1, 2}, "0x00000102"},
+ }
+)
+
+func TestEncode(t *testing.T) {
+ for _, test := range encodeBytesTests {
+ enc := Encode(test.input.([]byte))
+ if enc != test.want {
+ t.Errorf("input %x: wrong encoding %s", test.input, enc)
+ }
+ }
+}
diff --git a/erigon-lib/common/hexutility/json.go b/erigon-lib/common/hexutility/json.go
new file mode 100644
index 00000000000..1f6c240e3e3
--- /dev/null
+++ b/erigon-lib/common/hexutility/json.go
@@ -0,0 +1,45 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package hexutility
+
+import (
+ "encoding/json"
+ "errors"
+ "reflect"
+)
+
+// UnmarshalFixedJSON decodes the input as a string with 0x prefix. The length of out
+// determines the required input length. This function is commonly used to implement the
+// UnmarshalJSON method for fixed-size types.
+func UnmarshalFixedJSON(typ reflect.Type, input, out []byte) error {
+ if !isString(input) {
+ return &json.UnmarshalTypeError{Value: "non-string", Type: typ}
+ }
+ return wrapTypeError(UnmarshalFixedText(typ.String(), input[1:len(input)-1], out), typ)
+}
+
+func isString(input []byte) bool {
+ return len(input) >= 2 && input[0] == '"' && input[len(input)-1] == '"'
+}
+
+func wrapTypeError(err error, typ reflect.Type) error {
+ var dec *decError
+ if errors.As(err, &dec) {
+ return &json.UnmarshalTypeError{Value: err.Error(), Type: typ}
+ }
+ return err
+}
diff --git a/erigon-lib/common/hexutility/text.go b/erigon-lib/common/hexutility/text.go
new file mode 100644
index 00000000000..0c51aeec17d
--- /dev/null
+++ b/erigon-lib/common/hexutility/text.go
@@ -0,0 +1,79 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package hexutility
+
+import (
+ "encoding/hex"
+ "fmt"
+)
+
+const (
+ badNibble = ^uint64(0)
+)
+
+// UnmarshalFixedText decodes the input as a string with 0x prefix. The length of out
+// determines the required input length. This function is commonly used to implement the
+// UnmarshalText method for fixed-size types.
+func UnmarshalFixedText(typeName string, input, out []byte) error {
+ raw, err := checkText(input, true)
+ if err != nil {
+ return err
+ }
+ if len(raw)/2 != len(out) {
+ return fmt.Errorf("hex string has length %d, want %d for %s", len(raw), len(out)*2, typeName)
+ }
+ // Pre-verify syntax before modifying out.
+ for _, b := range raw {
+ if decodeNibble(b) == badNibble {
+ return ErrSyntax
+ }
+ }
+ _, err = hex.Decode(out, raw)
+ return err
+}
+
+func checkText(input []byte, wantPrefix bool) ([]byte, error) {
+ if len(input) == 0 {
+ return nil, nil // empty strings are allowed
+ }
+ if bytesHave0xPrefix(input) {
+ input = input[2:]
+ } else if wantPrefix {
+ return nil, ErrMissingPrefix
+ }
+ if len(input)%2 != 0 {
+ return nil, ErrOddLength
+ }
+ return input, nil
+}
+
+func bytesHave0xPrefix(input []byte) bool {
+ return len(input) >= 2 && input[0] == '0' && (input[1] == 'x' || input[1] == 'X')
+}
+
+func decodeNibble(in byte) uint64 {
+ switch {
+ case in >= '0' && in <= '9':
+ return uint64(in - '0')
+ case in >= 'A' && in <= 'F':
+ return uint64(in - 'A' + 10)
+ case in >= 'a' && in <= 'f':
+ return uint64(in - 'a' + 10)
+ default:
+ return badNibble
+ }
+}
diff --git a/erigon-lib/common/length/length.go b/erigon-lib/common/length/length.go
new file mode 100644
index 00000000000..09b126c769e
--- /dev/null
+++ b/erigon-lib/common/length/length.go
@@ -0,0 +1,40 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package length
+
+// Lengths of hashes and addresses in bytes.
+const (
+ PeerID = 64
+ // Hash is the expected length of the hash (in bytes)
+ Hash = 32
+ // expected length of Bytes96 (signature)
+ Bytes96 = 96
+ // expected length of Bytes48 (bls public key and such)
+ Bytes48 = 48
+ // expected length of Bytes64 (sync committee bits)
+ Bytes64 = 64
+ // expected length of Bytes48 (beacon domain and such)
+ Bytes4 = 4
+ // Addr is the expected length of the address (in bytes)
+ Addr = 20
+ // BlockNumberLen length of uint64 big endian
+ BlockNum = 8
+ // Ts TimeStamp (BlockNum, TxNum or any other uint64 equivalent of Time)
+ Ts = 8
+ // Incarnation length of uint64 for contract incarnations
+ Incarnation = 8
+)
diff --git a/erigon-lib/common/math/integer.go b/erigon-lib/common/math/integer.go
new file mode 100644
index 00000000000..d82dae206c6
--- /dev/null
+++ b/erigon-lib/common/math/integer.go
@@ -0,0 +1,33 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package math
+
+import (
+ "math/bits"
+)
+
+// SafeMul returns x*y and checks for overflow.
+func SafeMul(x, y uint64) (uint64, bool) {
+ hi, lo := bits.Mul64(x, y)
+ return lo, hi != 0
+}
+
+// SafeAdd returns x+y and checks for overflow.
+func SafeAdd(x, y uint64) (uint64, bool) {
+ sum, carryOut := bits.Add64(x, y, 0)
+ return sum, carryOut != 0
+}
diff --git a/erigon-lib/common/metrics/metrics_enabled.go b/erigon-lib/common/metrics/metrics_enabled.go
new file mode 100644
index 00000000000..dff5154390b
--- /dev/null
+++ b/erigon-lib/common/metrics/metrics_enabled.go
@@ -0,0 +1,33 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package metrics
+
+// Config contains the configuration for the metric collection.
+type Config struct { //nolint:maligned
+ Enabled bool `toml:",omitempty"`
+ EnabledExpensive bool `toml:",omitempty"`
+ HTTP string `toml:",omitempty"`
+ Port int `toml:",omitempty"`
+}
+
+// DefaultConfig is the default config for metrics used in go-ethereum.
+var DefaultConfig = Config{
+ Enabled: false,
+ EnabledExpensive: false,
+ HTTP: "127.0.0.1",
+ Port: 6060,
+}
diff --git a/erigon-lib/common/ring/ring.go b/erigon-lib/common/ring/ring.go
new file mode 100644
index 00000000000..7b15887be1e
--- /dev/null
+++ b/erigon-lib/common/ring/ring.go
@@ -0,0 +1,156 @@
+package ring
+
+type Buffer[T any] struct {
+ buf []T
+ // real head is head-1, like this so nil ring is valid
+ head int
+ tail int
+ length int
+}
+
+func MakeBuffer[T any](length, capacity int) Buffer[T] {
+ if length > capacity {
+ panic("length must be less than capacity")
+ }
+ return Buffer[T]{
+ buf: make([]T, capacity),
+ tail: length,
+ length: length,
+ }
+}
+
+func NewBuffer[T any](length, capacity int) *Buffer[T] {
+ r := MakeBuffer[T](length, capacity)
+ return &r
+}
+
+func (r *Buffer[T]) grow() {
+ size := len(r.buf) * 2
+ if size == 0 {
+ size = 2
+ }
+
+ buf := make([]T, size)
+ copy(buf, r.buf[r.head:])
+ copy(buf[len(r.buf[r.head:]):], r.buf[:r.head])
+ r.head = 0
+ r.tail = r.length
+ r.buf = buf
+}
+
+func (r *Buffer[T]) incHead() {
+ // resize
+ if r.length == 0 {
+ panic("smashing detected")
+ }
+ r.length--
+
+ r.head++
+ if r.head == len(r.buf) {
+ r.head = 0
+ }
+}
+
+func (r *Buffer[T]) decHead() {
+ // resize
+ if r.length == len(r.buf) {
+ r.grow()
+ }
+ r.length++
+
+ r.head--
+ if r.head == -1 {
+ r.head = len(r.buf) - 1
+ }
+}
+
+func (r *Buffer[T]) incTail() {
+ // resize
+ if r.length == len(r.buf) {
+ r.grow()
+ }
+ r.length++
+
+ r.tail++
+ if r.tail == len(r.buf) {
+ r.tail = 0
+ }
+}
+
+func (r *Buffer[T]) decTail() {
+ // resize
+ if r.length == 0 {
+ panic("smashing detected")
+ }
+ r.length--
+
+ r.tail--
+ if r.tail == -1 {
+ r.tail = len(r.buf) - 1
+ }
+}
+
+func (r *Buffer[T]) tailSub1() int {
+ tail := r.tail - 1
+ if tail == -1 {
+ tail = len(r.buf) - 1
+ }
+ return tail
+}
+
+func (r *Buffer[T]) PopFront() (T, bool) {
+ if r.length == 0 {
+ return *new(T), false
+ }
+
+ front := r.buf[r.head]
+ r.buf[r.head] = *new(T)
+ r.incHead()
+ return front, true
+}
+
+func (r *Buffer[T]) PopBack() (T, bool) {
+ if r.length == 0 {
+ return *new(T), false
+ }
+
+ r.decTail()
+ back := r.buf[r.tail]
+ r.buf[r.tail] = *new(T)
+ return back, true
+}
+
+func (r *Buffer[T]) Clear() {
+ r.head = 0
+ r.tail = 0
+ r.length = 0
+}
+
+func (r *Buffer[T]) PushFront(value T) {
+ r.decHead()
+ r.buf[r.head] = value
+}
+
+func (r *Buffer[T]) PushBack(value T) {
+ r.incTail()
+ r.buf[r.tailSub1()] = value
+}
+
+func (r *Buffer[T]) Length() int {
+ return r.length
+}
+
+func (r *Buffer[T]) Capacity() int {
+ return len(r.buf)
+}
+
+func (r *Buffer[T]) Get(n int) T {
+ if n >= r.length {
+ panic("index out of range")
+ }
+ ptr := r.head + n
+ if ptr >= len(r.buf) {
+ ptr -= len(r.buf)
+ }
+ return r.buf[ptr]
+}
diff --git a/erigon-lib/common/sorted.go b/erigon-lib/common/sorted.go
new file mode 100644
index 00000000000..2c077fffaeb
--- /dev/null
+++ b/erigon-lib/common/sorted.go
@@ -0,0 +1,43 @@
+/*
+ Copyright 2022 The Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package common
+
+import (
+ "golang.org/x/exp/constraints"
+ "golang.org/x/exp/slices"
+)
+
+func SortedKeys[K constraints.Ordered, V any](m map[K]V) []K {
+ keys := make([]K, len(m))
+ i := 0
+ for k := range m {
+ keys[i] = k
+ i++
+ }
+ slices.Sort(keys)
+ return keys
+}
+
+func RemoveDuplicatesFromSorted[T constraints.Ordered](slice []T) []T {
+ for i := 1; i < len(slice); i++ {
+ if slice[i] == slice[i-1] {
+ slice = append(slice[:i], slice[i+1:]...)
+ i--
+ }
+ }
+ return slice
+}
diff --git a/common/sorted_test.go b/erigon-lib/common/sorted_test.go
similarity index 51%
rename from common/sorted_test.go
rename to erigon-lib/common/sorted_test.go
index 6465b6ab87c..1ca29dbfb4b 100644
--- a/common/sorted_test.go
+++ b/erigon-lib/common/sorted_test.go
@@ -1,3 +1,19 @@
+/*
+ Copyright 2022 The Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
package common
import (
diff --git a/erigon-lib/common/u256/big.go b/erigon-lib/common/u256/big.go
new file mode 100644
index 00000000000..bb8ecfb8d4a
--- /dev/null
+++ b/erigon-lib/common/u256/big.go
@@ -0,0 +1,35 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package u256
+
+import (
+ "github.com/holiman/uint256"
+)
+
+// Common big integers often used
+var (
+ N0 = uint256.NewInt(0)
+ N1 = uint256.NewInt(1)
+ N2 = uint256.NewInt(2)
+ N4 = uint256.NewInt(4)
+ N8 = uint256.NewInt(8)
+ N27 = uint256.NewInt(27)
+ N28 = uint256.NewInt(28)
+ N32 = uint256.NewInt(32)
+ N35 = uint256.NewInt(35)
+ N100 = uint256.NewInt(100)
+)
diff --git a/erigon-lib/compress/compress.go b/erigon-lib/compress/compress.go
new file mode 100644
index 00000000000..c9ef174d621
--- /dev/null
+++ b/erigon-lib/compress/compress.go
@@ -0,0 +1,865 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compress
+
+import (
+ "bufio"
+ "bytes"
+ "container/heap"
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math/bits"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "github.com/c2h5oh/datasize"
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/cmp"
+ dir2 "github.com/ledgerwatch/erigon-lib/common/dir"
+ "github.com/ledgerwatch/erigon-lib/etl"
+ "github.com/ledgerwatch/log/v3"
+ "golang.org/x/exp/slices"
+)
+
+// Compressor is the main operating type for performing per-word compression
+// After creating a compression, one needs to add superstrings to it, using `AddWord` function
+// In order to add word without compression, function `AddUncompressedWord` needs to be used
+// Compressor only tracks which words are compressed and which are not until the compressed
+// file is created. After that, the user of the file needs to know when to call
+// `Next` or `NextUncompressed` function on the decompressor.
+// After that, `Compress` function needs to be called to perform the compression
+// and eventually create output file
+type Compressor struct {
+ ctx context.Context
+ wg *sync.WaitGroup
+ superstrings chan []byte
+ uncompressedFile *DecompressedFile
+ tmpDir string // temporary directory to use for ETL when building dictionary
+ logPrefix string
+ outputFile string // File where to output the dictionary and compressed data
+ tmpOutFilePath string // File where to output the dictionary and compressed data
+ suffixCollectors []*etl.Collector
+ // Buffer for "superstring" - transformation of superstrings where each byte of a word, say b,
+ // is turned into 2 bytes, 0x01 and b, and two zero bytes 0x00 0x00 are inserted after each word
+ // this is needed for using ordinary (one string) suffix sorting algorithm instead of a generalised (many superstrings) suffix
+ // sorting algorithm
+ superstring []byte
+ wordsCount uint64
+ superstringCount uint64
+ superstringLen int
+ workers int
+ Ratio CompressionRatio
+ lvl log.Lvl
+ trace bool
+ logger log.Logger
+ noFsync bool // fsync is enabled by default, but tests can manually disable
+}
+
+func NewCompressor(ctx context.Context, logPrefix, outputFile, tmpDir string, minPatternScore uint64, workers int, lvl log.Lvl, logger log.Logger) (*Compressor, error) {
+ dir2.MustExist(tmpDir)
+ dir, fileName := filepath.Split(outputFile)
+ tmpOutFilePath := filepath.Join(dir, fileName) + ".tmp"
+ // UncompressedFile - it's intermediate .idt file, outputFile it's final .seg (or .dat) file.
+ // tmpOutFilePath - it's ".seg.tmp" (".idt.tmp") file which will be renamed to .seg file if everything succeed.
+ // It allow atomically create .seg file (downloader will not see partially ready/ non-ready .seg files).
+ // I didn't create ".seg.tmp" file in tmpDir, because I think tmpDir and snapsthoDir may be mounted to different drives
+ uncompressedPath := filepath.Join(tmpDir, fileName) + ".idt"
+
+ uncompressedFile, err := NewUncompressedFile(uncompressedPath)
+ if err != nil {
+ return nil, err
+ }
+
+ // Collector for dictionary superstrings (sorted by their score)
+ superstrings := make(chan []byte, workers*2)
+ wg := &sync.WaitGroup{}
+ wg.Add(workers)
+ suffixCollectors := make([]*etl.Collector, workers)
+ for i := 0; i < workers; i++ {
+ collector := etl.NewCollector(logPrefix+"_dict", tmpDir, etl.NewSortableBuffer(etl.BufferOptimalSize/2), logger)
+ collector.LogLvl(lvl)
+
+ suffixCollectors[i] = collector
+ go processSuperstring(ctx, superstrings, collector, minPatternScore, wg, logger)
+ }
+
+ return &Compressor{
+ uncompressedFile: uncompressedFile,
+ tmpOutFilePath: tmpOutFilePath,
+ outputFile: outputFile,
+ tmpDir: tmpDir,
+ logPrefix: logPrefix,
+ workers: workers,
+ ctx: ctx,
+ superstrings: superstrings,
+ suffixCollectors: suffixCollectors,
+ lvl: lvl,
+ wg: wg,
+ logger: logger,
+ }, nil
+}
+
+func (c *Compressor) Close() {
+ c.uncompressedFile.Close()
+ for _, collector := range c.suffixCollectors {
+ collector.Close()
+ }
+ c.suffixCollectors = nil
+}
+
+func (c *Compressor) SetTrace(trace bool) { c.trace = trace }
+
+func (c *Compressor) Count() int { return int(c.wordsCount) }
+
+func (c *Compressor) AddWord(word []byte) error {
+ select {
+ case <-c.ctx.Done():
+ return c.ctx.Err()
+ default:
+ }
+
+ c.wordsCount++
+ l := 2*len(word) + 2
+ if c.superstringLen+l > superstringLimit {
+ if c.superstringCount%samplingFactor == 0 {
+ c.superstrings <- c.superstring
+ }
+ c.superstringCount++
+ c.superstring = make([]byte, 0, 1024*1024)
+ c.superstringLen = 0
+ }
+ c.superstringLen += l
+
+ if c.superstringCount%samplingFactor == 0 {
+ for _, a := range word {
+ c.superstring = append(c.superstring, 1, a)
+ }
+ c.superstring = append(c.superstring, 0, 0)
+ }
+
+ return c.uncompressedFile.Append(word)
+}
+
+func (c *Compressor) AddUncompressedWord(word []byte) error {
+ select {
+ case <-c.ctx.Done():
+ return c.ctx.Err()
+ default:
+ }
+
+ c.wordsCount++
+ return c.uncompressedFile.AppendUncompressed(word)
+}
+
+func (c *Compressor) Compress() error {
+ c.uncompressedFile.w.Flush()
+ logEvery := time.NewTicker(20 * time.Second)
+ defer logEvery.Stop()
+ if len(c.superstring) > 0 {
+ c.superstrings <- c.superstring
+ }
+ close(c.superstrings)
+ c.wg.Wait()
+
+ if c.lvl < log.LvlTrace {
+ c.logger.Log(c.lvl, fmt.Sprintf("[%s] BuildDict start", c.logPrefix), "workers", c.workers)
+ }
+ t := time.Now()
+ db, err := DictionaryBuilderFromCollectors(c.ctx, compressLogPrefix, c.tmpDir, c.suffixCollectors, c.lvl, c.logger)
+ if err != nil {
+
+ return err
+ }
+ if c.trace {
+ _, fileName := filepath.Split(c.outputFile)
+ if err := PersistDictrionary(filepath.Join(c.tmpDir, fileName)+".dictionary.txt", db); err != nil {
+ return err
+ }
+ }
+ defer os.Remove(c.tmpOutFilePath)
+ if c.lvl < log.LvlTrace {
+ c.logger.Log(c.lvl, fmt.Sprintf("[%s] BuildDict", c.logPrefix), "took", time.Since(t))
+ }
+
+ cf, err := os.Create(c.tmpOutFilePath)
+ if err != nil {
+ return err
+ }
+ defer cf.Close()
+ t = time.Now()
+ if err := reducedict(c.ctx, c.trace, c.logPrefix, c.tmpOutFilePath, cf, c.uncompressedFile, c.workers, db, c.lvl, c.logger); err != nil {
+ return err
+ }
+ if err = c.fsync(cf); err != nil {
+ return err
+ }
+ if err = cf.Close(); err != nil {
+ return err
+ }
+ if err := os.Rename(c.tmpOutFilePath, c.outputFile); err != nil {
+ return fmt.Errorf("renaming: %w", err)
+ }
+
+ c.Ratio, err = Ratio(c.uncompressedFile.filePath, c.outputFile)
+ if err != nil {
+ return fmt.Errorf("ratio: %w", err)
+ }
+
+ _, fName := filepath.Split(c.outputFile)
+ if c.lvl < log.LvlTrace {
+ c.logger.Log(c.lvl, fmt.Sprintf("[%s] Compress", c.logPrefix), "took", time.Since(t), "ratio", c.Ratio, "file", fName)
+ }
+ return nil
+}
+
+func (c *Compressor) DisableFsync() { c.noFsync = true }
+
+// fsync - other processes/goroutines must see only "fully-complete" (valid) files. No partial-writes.
+// To achieve it: write to .tmp file then `rename` when file is ready.
+// Machine may power-off right after `rename` - it means `fsync` must be before `rename`
+func (c *Compressor) fsync(f *os.File) error {
+ if c.noFsync {
+ return nil
+ }
+ if err := f.Sync(); err != nil {
+ c.logger.Warn("couldn't fsync", "err", err, "file", c.tmpOutFilePath)
+ return err
+ }
+ return nil
+}
+
+// superstringLimit limits how large can one "superstring" get before it is processed
+// CompressorSequential allocates 7 bytes for each uint of superstringLimit. For example,
+// superstingLimit 16m will result in 112Mb being allocated for various arrays
+const superstringLimit = 16 * 1024 * 1024
+
+// minPatternLen is minimum length of pattern we consider to be included into the dictionary
+const minPatternLen = 5
+const maxPatternLen = 128
+
+// maxDictPatterns is the maximum number of patterns allowed in the initial (not reduced dictionary)
+// Large values increase memory consumption of dictionary reduction phase
+/*
+Experiments on 74Gb uncompressed file (bsc 012500-013000-transactions.seg)
+Ram - needed just to open compressed file (Huff tables, etc...)
+dec_speed - loop with `word, _ = g.Next(word[:0])`
+skip_speed - loop with `g.Skip()`
+| DictSize | Ram | file_size | dec_speed | skip_speed |
+| -------- | ---- | --------- | --------- | ---------- |
+| 1M | 70Mb | 35871Mb | 4m06s | 1m58s |
+| 512K | 42Mb | 36496Mb | 3m49s | 1m51s |
+| 256K | 21Mb | 37100Mb | 3m44s | 1m48s |
+| 128K | 11Mb | 37782Mb | 3m25s | 1m44s |
+| 64K | 7Mb | 38597Mb | 3m16s | 1m34s |
+| 32K | 5Mb | 39626Mb | 3m0s | 1m29s |
+
+*/
+const maxDictPatterns = 64 * 1024
+
+// samplingFactor - skip superstrings if `superstringNumber % samplingFactor != 0`
+const samplingFactor = 4
+
+// nolint
+const compressLogPrefix = "compress"
+
+type DictionaryBuilder struct {
+ lastWord []byte
+ items []*Pattern
+ limit int
+ lastWordScore uint64
+}
+
+func (db *DictionaryBuilder) Reset(limit int) {
+ db.limit = limit
+ db.items = db.items[:0]
+}
+
+func (db *DictionaryBuilder) Len() int { return len(db.items) }
+func (db *DictionaryBuilder) Less(i, j int) bool {
+ if db.items[i].score == db.items[j].score {
+ return bytes.Compare(db.items[i].word, db.items[j].word) < 0
+ }
+ return db.items[i].score < db.items[j].score
+}
+
+func dictionaryBuilderCmp(i, j *Pattern) int {
+ if i.score == j.score {
+ return bytes.Compare(i.word, j.word)
+ }
+ return cmp.Compare(i.score, j.score)
+}
+
+func (db *DictionaryBuilder) Swap(i, j int) {
+ db.items[i], db.items[j] = db.items[j], db.items[i]
+}
+func (db *DictionaryBuilder) Sort() { slices.SortFunc(db.items, dictionaryBuilderCmp) }
+
+func (db *DictionaryBuilder) Push(x interface{}) {
+ db.items = append(db.items, x.(*Pattern))
+}
+
+func (db *DictionaryBuilder) Pop() interface{} {
+ old := db.items
+ n := len(old)
+ x := old[n-1]
+ old[n-1] = nil
+ db.items = old[0 : n-1]
+ return x
+}
+
+func (db *DictionaryBuilder) processWord(chars []byte, score uint64) {
+ heap.Push(db, &Pattern{word: common.Copy(chars), score: score})
+ if db.Len() > db.limit {
+ // Remove the element with smallest score
+ heap.Pop(db)
+ }
+}
+
+func (db *DictionaryBuilder) loadFunc(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error {
+ score := binary.BigEndian.Uint64(v)
+ if bytes.Equal(k, db.lastWord) {
+ db.lastWordScore += score
+ } else {
+ if db.lastWord != nil {
+ db.processWord(db.lastWord, db.lastWordScore)
+ }
+ db.lastWord = append(db.lastWord[:0], k...)
+ db.lastWordScore = score
+ }
+ return nil
+}
+
+func (db *DictionaryBuilder) finish() {
+ if db.lastWord != nil {
+ db.processWord(db.lastWord, db.lastWordScore)
+ }
+}
+
+func (db *DictionaryBuilder) ForEach(f func(score uint64, word []byte)) {
+ for i := db.Len(); i > 0; i-- {
+ f(db.items[i-1].score, db.items[i-1].word)
+ }
+}
+
+func (db *DictionaryBuilder) Close() {
+ db.items = nil
+ db.lastWord = nil
+}
+
+// Pattern is representation of a pattern that is searched in the superstrings to compress them
+// patterns are stored in a patricia tree and contain pattern score (calculated during
+// the initial dictionary building), frequency of usage, and code
+type Pattern struct {
+ word []byte // Pattern characters
+ score uint64 // Score assigned to the pattern during dictionary building
+ uses uint64 // How many times this pattern has been used during search and optimisation
+ code uint64 // Allocated numerical code
+ codeBits int // Number of bits in the code
+ depth int // Depth of the pattern in the huffman tree (for encoding in the file)
+}
+
+// PatternList is a sorted list of pattern for the purpose of
+// building Huffman tree to determine efficient coding.
+// Patterns with least usage come first, we use numerical code
+// as a tie breaker to make sure the resulting Huffman code is canonical
+type PatternList []*Pattern
+
+func (pl PatternList) Len() int { return len(pl) }
+func patternListCmp(i, j *Pattern) int {
+ if i.uses == j.uses {
+ return cmp.Compare(bits.Reverse64(i.code), bits.Reverse64(j.code))
+ }
+ return cmp.Compare(i.uses, j.uses)
+}
+
+// PatternHuff is an intermediate node in a huffman tree of patterns
+// It has two children, each of which may either be another intermediate node (h0 or h1)
+// or leaf node, which is Pattern (p0 or p1).
+type PatternHuff struct {
+ p0 *Pattern
+ p1 *Pattern
+ h0 *PatternHuff
+ h1 *PatternHuff
+ uses uint64
+ tieBreaker uint64
+}
+
+func (h *PatternHuff) AddZero() {
+ if h.p0 != nil {
+ h.p0.code <<= 1
+ h.p0.codeBits++
+ } else {
+ h.h0.AddZero()
+ }
+ if h.p1 != nil {
+ h.p1.code <<= 1
+ h.p1.codeBits++
+ } else {
+ h.h1.AddZero()
+ }
+}
+
+func (h *PatternHuff) AddOne() {
+ if h.p0 != nil {
+ h.p0.code <<= 1
+ h.p0.code++
+ h.p0.codeBits++
+ } else {
+ h.h0.AddOne()
+ }
+ if h.p1 != nil {
+ h.p1.code <<= 1
+ h.p1.code++
+ h.p1.codeBits++
+ } else {
+ h.h1.AddOne()
+ }
+}
+
+func (h *PatternHuff) SetDepth(depth int) {
+ if h.p0 != nil {
+ h.p0.depth = depth + 1
+ h.p0.uses = 0
+ }
+ if h.p1 != nil {
+ h.p1.depth = depth + 1
+ h.p1.uses = 0
+ }
+ if h.h0 != nil {
+ h.h0.SetDepth(depth + 1)
+ }
+ if h.h1 != nil {
+ h.h1.SetDepth(depth + 1)
+ }
+}
+
+// PatternHeap is priority queue of pattern for the purpose of building
+// Huffman tree to determine efficient coding. Patterns with least usage
+// have highest priority. We use a tie-breaker to make sure
+// the resulting Huffman code is canonical
+type PatternHeap []*PatternHuff
+
+func (ph PatternHeap) Len() int {
+ return len(ph)
+}
+
+func (ph PatternHeap) Less(i, j int) bool {
+ if ph[i].uses == ph[j].uses {
+ return ph[i].tieBreaker < ph[j].tieBreaker
+ }
+ return ph[i].uses < ph[j].uses
+}
+
+func (ph *PatternHeap) Swap(i, j int) {
+ (*ph)[i], (*ph)[j] = (*ph)[j], (*ph)[i]
+}
+
+func (ph *PatternHeap) Push(x interface{}) {
+ *ph = append(*ph, x.(*PatternHuff))
+}
+
+func (ph *PatternHeap) Pop() interface{} {
+ old := *ph
+ n := len(old)
+ x := old[n-1]
+ old[n-1] = nil
+ *ph = old[0 : n-1]
+ return x
+}
+
+type Position struct {
+ uses uint64
+ pos uint64
+ code uint64
+ codeBits int
+ depth int // Depth of the position in the huffman tree (for encoding in the file)
+}
+
+type PositionHuff struct {
+ p0 *Position
+ p1 *Position
+ h0 *PositionHuff
+ h1 *PositionHuff
+ uses uint64
+ tieBreaker uint64
+}
+
+func (h *PositionHuff) AddZero() {
+ if h.p0 != nil {
+ h.p0.code <<= 1
+ h.p0.codeBits++
+ } else {
+ h.h0.AddZero()
+ }
+ if h.p1 != nil {
+ h.p1.code <<= 1
+ h.p1.codeBits++
+ } else {
+ h.h1.AddZero()
+ }
+}
+
+func (h *PositionHuff) AddOne() {
+ if h.p0 != nil {
+ h.p0.code <<= 1
+ h.p0.code++
+ h.p0.codeBits++
+ } else {
+ h.h0.AddOne()
+ }
+ if h.p1 != nil {
+ h.p1.code <<= 1
+ h.p1.code++
+ h.p1.codeBits++
+ } else {
+ h.h1.AddOne()
+ }
+}
+
+func (h *PositionHuff) SetDepth(depth int) {
+ if h.p0 != nil {
+ h.p0.depth = depth + 1
+ h.p0.uses = 0
+ }
+ if h.p1 != nil {
+ h.p1.depth = depth + 1
+ h.p1.uses = 0
+ }
+ if h.h0 != nil {
+ h.h0.SetDepth(depth + 1)
+ }
+ if h.h1 != nil {
+ h.h1.SetDepth(depth + 1)
+ }
+}
+
+type PositionList []*Position
+
+func (pl PositionList) Len() int { return len(pl) }
+
+func positionListCmp(i, j *Position) int {
+ if i.uses == j.uses {
+ return cmp.Compare(bits.Reverse64(i.code), bits.Reverse64(j.code))
+ }
+ return cmp.Compare(i.uses, j.uses)
+}
+
+type PositionHeap []*PositionHuff
+
+func (ph PositionHeap) Len() int {
+ return len(ph)
+}
+
+func (ph PositionHeap) Less(i, j int) bool {
+ return ph.Compare(i, j) < 0
+}
+
+func (ph PositionHeap) Compare(i, j int) int {
+ if ph[i].uses == ph[j].uses {
+ return cmp.Compare(ph[i].tieBreaker, ph[j].tieBreaker)
+ }
+ return cmp.Compare(ph[i].uses, ph[j].uses)
+}
+
+func (ph *PositionHeap) Swap(i, j int) {
+ (*ph)[i], (*ph)[j] = (*ph)[j], (*ph)[i]
+}
+
+func (ph *PositionHeap) Push(x interface{}) {
+ *ph = append(*ph, x.(*PositionHuff))
+}
+
+func (ph *PositionHeap) Pop() interface{} {
+ old := *ph
+ n := len(old)
+ x := old[n-1]
+ old[n-1] = nil
+ *ph = old[0 : n-1]
+ return x
+}
+
+type HuffmanCoder struct {
+ w *bufio.Writer
+ outputBits int
+ outputByte byte
+}
+
+func (hf *HuffmanCoder) encode(code uint64, codeBits int) error {
+ for codeBits > 0 {
+ var bitsUsed int
+ if hf.outputBits+codeBits > 8 {
+ bitsUsed = 8 - hf.outputBits
+ } else {
+ bitsUsed = codeBits
+ }
+ mask := (uint64(1) << bitsUsed) - 1
+ hf.outputByte |= byte((code & mask) << hf.outputBits)
+ code >>= bitsUsed
+ codeBits -= bitsUsed
+ hf.outputBits += bitsUsed
+ if hf.outputBits == 8 {
+ if e := hf.w.WriteByte(hf.outputByte); e != nil {
+ return e
+ }
+ hf.outputBits = 0
+ hf.outputByte = 0
+ }
+ }
+ return nil
+}
+
+func (hf *HuffmanCoder) flush() error {
+ if hf.outputBits > 0 {
+ if e := hf.w.WriteByte(hf.outputByte); e != nil {
+ return e
+ }
+ hf.outputBits = 0
+ hf.outputByte = 0
+ }
+ return nil
+}
+
+// DynamicCell represents result of dynamic programming for certain starting position
+type DynamicCell struct {
+ optimStart int
+ coverStart int
+ compression int
+ score uint64
+ patternIdx int // offset of the last element in the pattern slice
+}
+
+type Ring struct {
+ cells []DynamicCell
+ head, tail, count int
+}
+
+func NewRing() *Ring {
+ return &Ring{
+ cells: make([]DynamicCell, 16),
+ head: 0,
+ tail: 0,
+ count: 0,
+ }
+}
+
+func (r *Ring) Reset() {
+ r.count = 0
+ r.head = 0
+ r.tail = 0
+}
+
+func (r *Ring) ensureSize() {
+ if r.count < len(r.cells) {
+ return
+ }
+ newcells := make([]DynamicCell, r.count*2)
+ if r.tail > r.head {
+ copy(newcells, r.cells[r.head:r.tail])
+ } else {
+ n := copy(newcells, r.cells[r.head:])
+ copy(newcells[n:], r.cells[:r.tail])
+ }
+ r.head = 0
+ r.tail = r.count
+ r.cells = newcells
+}
+
+func (r *Ring) PushFront() *DynamicCell {
+ r.ensureSize()
+ if r.head == 0 {
+ r.head = len(r.cells)
+ }
+ r.head--
+ r.count++
+ return &r.cells[r.head]
+}
+
+func (r *Ring) PushBack() *DynamicCell {
+ r.ensureSize()
+ if r.tail == len(r.cells) {
+ r.tail = 0
+ }
+ result := &r.cells[r.tail]
+ r.tail++
+ r.count++
+ return result
+}
+
+func (r Ring) Len() int {
+ return r.count
+}
+
+func (r *Ring) Get(i int) *DynamicCell {
+ if i < 0 || i >= r.count {
+ return nil
+ }
+ return &r.cells[(r.head+i)&(len(r.cells)-1)]
+}
+
+// Truncate removes all items starting from i
+func (r *Ring) Truncate(i int) {
+ r.count = i
+ r.tail = (r.head + i) & (len(r.cells) - 1)
+}
+
+type DictAggregator struct {
+ collector *etl.Collector
+ dist map[int]int
+ lastWord []byte
+ lastWordScore uint64
+}
+
+func (da *DictAggregator) processWord(word []byte, score uint64) error {
+ var scoreBuf [8]byte
+ binary.BigEndian.PutUint64(scoreBuf[:], score)
+ return da.collector.Collect(word, scoreBuf[:])
+}
+
+func (da *DictAggregator) Load(loadFunc etl.LoadFunc, args etl.TransformArgs) error {
+ defer da.collector.Close()
+ return da.collector.Load(nil, "", loadFunc, args)
+}
+
+func (da *DictAggregator) aggLoadFunc(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error {
+ if _, ok := da.dist[len(k)]; !ok {
+ da.dist[len(k)] = 0
+ }
+ da.dist[len(k)]++
+
+ score := binary.BigEndian.Uint64(v)
+ if bytes.Equal(k, da.lastWord) {
+ da.lastWordScore += score
+ } else {
+ if da.lastWord != nil {
+ if err := da.processWord(da.lastWord, da.lastWordScore); err != nil {
+ return err
+ }
+ }
+ da.lastWord = append(da.lastWord[:0], k...)
+ da.lastWordScore = score
+ }
+ return nil
+}
+
+func (da *DictAggregator) finish() error {
+ if da.lastWord != nil {
+ return da.processWord(da.lastWord, da.lastWordScore)
+ }
+ return nil
+}
+
+type CompressionRatio float64
+
+func (r CompressionRatio) String() string { return fmt.Sprintf("%.2f", r) }
+
+func Ratio(f1, f2 string) (CompressionRatio, error) {
+ s1, err := os.Stat(f1)
+ if err != nil {
+ return 0, err
+ }
+ s2, err := os.Stat(f2)
+ if err != nil {
+ return 0, err
+ }
+ return CompressionRatio(float64(s1.Size()) / float64(s2.Size())), nil
+}
+
+// DecompressedFile - .dat file format - simple format for temporary data store
+type DecompressedFile struct {
+ f *os.File
+ w *bufio.Writer
+ filePath string
+ buf []byte
+ count uint64
+}
+
+func NewUncompressedFile(filePath string) (*DecompressedFile, error) {
+ f, err := os.Create(filePath)
+ if err != nil {
+ return nil, err
+ }
+ w := bufio.NewWriterSize(f, 2*etl.BufIOSize)
+ return &DecompressedFile{filePath: filePath, f: f, w: w, buf: make([]byte, 128)}, nil
+}
+func (f *DecompressedFile) Close() {
+ f.w.Flush()
+ f.f.Close()
+ os.Remove(f.filePath)
+}
+func (f *DecompressedFile) Append(v []byte) error {
+ f.count++
+ // For compressed words, the length prefix is shifted to make lowest bit zero
+ n := binary.PutUvarint(f.buf, 2*uint64(len(v)))
+ if _, e := f.w.Write(f.buf[:n]); e != nil {
+ return e
+ }
+ if len(v) > 0 {
+ if _, e := f.w.Write(v); e != nil {
+ return e
+ }
+ }
+ return nil
+}
+func (f *DecompressedFile) AppendUncompressed(v []byte) error {
+ f.count++
+ // For uncompressed words, the length prefix is shifted to make lowest bit one
+ n := binary.PutUvarint(f.buf, 2*uint64(len(v))+1)
+ if _, e := f.w.Write(f.buf[:n]); e != nil {
+ return e
+ }
+ if len(v) > 0 {
+ if _, e := f.w.Write(v); e != nil {
+ return e
+ }
+ }
+ return nil
+}
+
+// ForEach - Read keys from the file and generate superstring (with extra byte 0x1 prepended to each character, and with 0x0 0x0 pair inserted between keys and values)
+// We only consider values with length > 2, because smaller values are not compressible without going into bits
+func (f *DecompressedFile) ForEach(walker func(v []byte, compressed bool) error) error {
+ _, err := f.f.Seek(0, 0)
+ if err != nil {
+ return err
+ }
+ r := bufio.NewReaderSize(f.f, int(8*datasize.MB))
+ buf := make([]byte, 16*1024)
+ l, e := binary.ReadUvarint(r)
+ for ; e == nil; l, e = binary.ReadUvarint(r) {
+ // extract lowest bit of length prefix as "uncompressed" flag and shift to obtain correct length
+ compressed := (l & 1) == 0
+ l >>= 1
+ if len(buf) < int(l) {
+ buf = make([]byte, l)
+ }
+ if _, e = io.ReadFull(r, buf[:l]); e != nil {
+ return e
+ }
+ if err := walker(buf[:l], compressed); err != nil {
+ return err
+ }
+ }
+ if e != nil && !errors.Is(e, io.EOF) {
+ return e
+ }
+ return nil
+}
diff --git a/erigon-lib/compress/compress_fuzz_test.go b/erigon-lib/compress/compress_fuzz_test.go
new file mode 100644
index 00000000000..f9403ef8457
--- /dev/null
+++ b/erigon-lib/compress/compress_fuzz_test.go
@@ -0,0 +1,80 @@
+//go:build !nofuzz
+
+/*
+Copyright 2021 Erigon contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package compress
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "path/filepath"
+ "testing"
+
+ "github.com/ledgerwatch/erigon-lib/common/cmp"
+ "github.com/ledgerwatch/log/v3"
+)
+
+func FuzzCompress(f *testing.F) {
+ logger := log.New()
+ f.Fuzz(func(t *testing.T, x []byte, pos []byte, workers int8) {
+ t.Helper()
+ t.Parallel()
+ if len(pos) < 1 || workers < 1 {
+ t.Skip()
+ return
+ }
+ var a [][]byte
+ j := 0
+ for i := 0; i < len(pos) && j < len(x); i++ {
+ if pos[i] == 0 {
+ continue
+ }
+ next := cmp.Min(j+int(pos[i]*10), len(x)-1)
+ bbb := x[j:next]
+ a = append(a, bbb)
+ j = next
+ }
+
+ ctx := context.Background()
+ tmpDir := t.TempDir()
+ file := filepath.Join(tmpDir, fmt.Sprintf("compressed-%d", rand.Int31()))
+ c, err := NewCompressor(ctx, t.Name(), file, tmpDir, 2, int(workers), log.LvlDebug, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+ for _, b := range a {
+ if err = c.AddWord(b); err != nil {
+ t.Fatal(err)
+ }
+ }
+ if err = c.Compress(); err != nil {
+ t.Fatal(err)
+ }
+ c.Close()
+ d, err := NewDecompressor(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer d.Close()
+ g := d.MakeGetter()
+ buf := make([]byte, 0, 100)
+ for g.HasNext() {
+ buf, _ = g.Next(buf[:0])
+ }
+ })
+}
diff --git a/erigon-lib/compress/compress_test.go b/erigon-lib/compress/compress_test.go
new file mode 100644
index 00000000000..d8044b03344
--- /dev/null
+++ b/erigon-lib/compress/compress_test.go
@@ -0,0 +1,271 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compress
+
+import (
+ "context"
+ "fmt"
+ "hash/crc32"
+ "io"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/ledgerwatch/log/v3"
+ "github.com/stretchr/testify/require"
+)
+
+func TestCompressEmptyDict(t *testing.T) {
+ logger := log.New()
+ tmpDir := t.TempDir()
+ file := filepath.Join(tmpDir, "compressed")
+ c, err := NewCompressor(context.Background(), t.Name(), file, tmpDir, 100, 1, log.LvlDebug, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+
+ if err = c.AddWord([]byte("word")); err != nil {
+ t.Fatal(err)
+ }
+ if err = c.Compress(); err != nil {
+ t.Fatal(err)
+ }
+ var d *Decompressor
+ if d, err = NewDecompressor(file); err != nil {
+ t.Fatal(err)
+ }
+ defer d.Close()
+ g := d.MakeGetter()
+ if !g.HasNext() {
+ t.Fatalf("expected a word")
+ }
+ word, _ := g.Next(nil)
+ if string(word) != "word" {
+ t.Fatalf("expeced word, got (hex) %x", word)
+ }
+ if g.HasNext() {
+ t.Fatalf("not expecting anything else")
+ }
+}
+
+// nolint
+func checksum(file string) uint32 {
+ hasher := crc32.NewIEEE()
+ f, err := os.Open(file)
+ if err != nil {
+ panic(err)
+ }
+ defer f.Close()
+ if _, err := io.Copy(hasher, f); err != nil {
+ panic(err)
+ }
+ return hasher.Sum32()
+}
+
+func prepareDict(t *testing.T) *Decompressor {
+ t.Helper()
+ logger := log.New()
+ tmpDir := t.TempDir()
+ file := filepath.Join(tmpDir, "compressed")
+ t.Name()
+ c, err := NewCompressor(context.Background(), t.Name(), file, tmpDir, 1, 2, log.LvlDebug, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+ for i := 0; i < 100; i++ {
+ if err = c.AddWord(nil); err != nil {
+ panic(err)
+ }
+ if err = c.AddWord([]byte("long")); err != nil {
+ t.Fatal(err)
+ }
+ if err = c.AddWord([]byte("word")); err != nil {
+ t.Fatal(err)
+ }
+ if err = c.AddWord([]byte(fmt.Sprintf("%d longlongword %d", i, i))); err != nil {
+ t.Fatal(err)
+ }
+ }
+ if err = c.Compress(); err != nil {
+ t.Fatal(err)
+ }
+ var d *Decompressor
+ if d, err = NewDecompressor(file); err != nil {
+ t.Fatal(err)
+ }
+ return d
+}
+
+func TestCompressDict1(t *testing.T) {
+ d := prepareDict(t)
+ defer d.Close()
+ g := d.MakeGetter()
+ i := 0
+ g.Reset(0)
+ for g.HasNext() {
+ // next word is `nil`
+ require.False(t, g.MatchPrefix([]byte("long")))
+ require.True(t, g.MatchPrefix([]byte("")))
+ require.True(t, g.MatchPrefix([]byte{}))
+
+ require.Equal(t, 1, g.MatchPrefixCmp([]byte("long")))
+ require.Equal(t, 0, g.MatchPrefixCmp([]byte("")))
+ require.Equal(t, 0, g.MatchPrefixCmp([]byte{}))
+ word, _ := g.Next(nil)
+ require.NotNil(t, word)
+ require.Zero(t, len(word))
+
+ // next word is `long`
+ require.True(t, g.MatchPrefix([]byte("long")))
+ require.False(t, g.MatchPrefix([]byte("longlong")))
+ require.False(t, g.MatchPrefix([]byte("wordnotmatch")))
+ require.False(t, g.MatchPrefix([]byte("longnotmatch")))
+ require.True(t, g.MatchPrefix([]byte{}))
+
+ require.Equal(t, 0, g.MatchPrefixCmp([]byte("long")))
+ require.Equal(t, 1, g.MatchPrefixCmp([]byte("longlong")))
+ require.Equal(t, 1, g.MatchPrefixCmp([]byte("wordnotmatch")))
+ require.Equal(t, 1, g.MatchPrefixCmp([]byte("longnotmatch")))
+ require.Equal(t, 0, g.MatchPrefixCmp([]byte{}))
+ _, _ = g.Next(nil)
+
+ // next word is `word`
+ require.False(t, g.MatchPrefix([]byte("long")))
+ require.False(t, g.MatchPrefix([]byte("longlong")))
+ require.True(t, g.MatchPrefix([]byte("word")))
+ require.True(t, g.MatchPrefix([]byte("")))
+ require.True(t, g.MatchPrefix(nil))
+ require.False(t, g.MatchPrefix([]byte("wordnotmatch")))
+ require.False(t, g.MatchPrefix([]byte("longnotmatch")))
+
+ require.Equal(t, -1, g.MatchPrefixCmp([]byte("long")))
+ require.Equal(t, -1, g.MatchPrefixCmp([]byte("longlong")))
+ require.Equal(t, 0, g.MatchPrefixCmp([]byte("word")))
+ require.Equal(t, 0, g.MatchPrefixCmp([]byte("")))
+ require.Equal(t, 0, g.MatchPrefixCmp(nil))
+ require.Equal(t, 1, g.MatchPrefixCmp([]byte("wordnotmatch")))
+ require.Equal(t, -1, g.MatchPrefixCmp([]byte("longnotmatch")))
+ _, _ = g.Next(nil)
+
+ // next word is `longlongword %d`
+ expectPrefix := fmt.Sprintf("%d long", i)
+
+ require.True(t, g.MatchPrefix([]byte(fmt.Sprintf("%d", i))))
+ require.True(t, g.MatchPrefix([]byte(expectPrefix)))
+ require.True(t, g.MatchPrefix([]byte(expectPrefix+"long")))
+ require.True(t, g.MatchPrefix([]byte(expectPrefix+"longword ")))
+ require.False(t, g.MatchPrefix([]byte("wordnotmatch")))
+ require.False(t, g.MatchPrefix([]byte("longnotmatch")))
+ require.True(t, g.MatchPrefix([]byte{}))
+
+ require.Equal(t, 0, g.MatchPrefixCmp([]byte(fmt.Sprintf("%d", i))))
+ require.Equal(t, 0, g.MatchPrefixCmp([]byte(expectPrefix)))
+ require.Equal(t, 0, g.MatchPrefixCmp([]byte(expectPrefix+"long")))
+ require.Equal(t, 0, g.MatchPrefixCmp([]byte(expectPrefix+"longword ")))
+ require.Equal(t, 1, g.MatchPrefixCmp([]byte("wordnotmatch")))
+ require.Equal(t, 1, g.MatchPrefixCmp([]byte("longnotmatch")))
+ require.Equal(t, 0, g.MatchPrefixCmp([]byte{}))
+ savePos := g.dataP
+ word, nextPos := g.Next(nil)
+ expected := fmt.Sprintf("%d longlongword %d", i, i)
+ g.Reset(savePos)
+ require.Equal(t, 0, g.MatchCmp([]byte(expected)))
+ g.Reset(nextPos)
+ if string(word) != expected {
+ t.Errorf("expected %s, got (hex) [%s]", expected, word)
+ }
+ i++
+ }
+
+ if cs := checksum(d.filePath); cs != 3153486123 {
+ // it's ok if hash changed, but need re-generate all existing snapshot hashes
+ // in https://github.com/ledgerwatch/erigon-snapshot
+ t.Errorf("result file hash changed, %d", cs)
+ }
+}
+
+func TestCompressDictCmp(t *testing.T) {
+ d := prepareDict(t)
+ defer d.Close()
+ g := d.MakeGetter()
+ i := 0
+ g.Reset(0)
+ for g.HasNext() {
+ // next word is `nil`
+ savePos := g.dataP
+ require.Equal(t, 1, g.MatchCmp([]byte("long")))
+ require.Equal(t, 0, g.MatchCmp([]byte(""))) // moves offset
+ g.Reset(savePos)
+ require.Equal(t, 0, g.MatchCmp([]byte{})) // moves offset
+ g.Reset(savePos)
+
+ word, _ := g.Next(nil)
+ require.NotNil(t, word)
+ require.Zero(t, len(word))
+
+ // next word is `long`
+ savePos = g.dataP
+ require.Equal(t, 0, g.MatchCmp([]byte("long"))) // moves offset
+ g.Reset(savePos)
+ require.Equal(t, 1, g.MatchCmp([]byte("longlong")))
+ require.Equal(t, 1, g.MatchCmp([]byte("wordnotmatch")))
+ require.Equal(t, 1, g.MatchCmp([]byte("longnotmatch")))
+ require.Equal(t, -1, g.MatchCmp([]byte{}))
+ _, _ = g.Next(nil)
+
+ // next word is `word`
+ savePos = g.dataP
+ require.Equal(t, -1, g.MatchCmp([]byte("long")))
+ require.Equal(t, -1, g.MatchCmp([]byte("longlong")))
+ require.Equal(t, 0, g.MatchCmp([]byte("word"))) // moves offset
+ g.Reset(savePos)
+ require.Equal(t, -1, g.MatchCmp([]byte("")))
+ require.Equal(t, -1, g.MatchCmp(nil))
+ require.Equal(t, 1, g.MatchCmp([]byte("wordnotmatch")))
+ require.Equal(t, -1, g.MatchCmp([]byte("longnotmatch")))
+ _, _ = g.Next(nil)
+
+ // next word is `longlongword %d`
+ expectPrefix := fmt.Sprintf("%d long", i)
+
+ require.Equal(t, -1, g.MatchCmp([]byte(fmt.Sprintf("%d", i))))
+ require.Equal(t, -1, g.MatchCmp([]byte(expectPrefix)))
+ require.Equal(t, -1, g.MatchCmp([]byte(expectPrefix+"long")))
+ require.Equal(t, -1, g.MatchCmp([]byte(expectPrefix+"longword ")))
+ require.Equal(t, 1, g.MatchCmp([]byte("wordnotmatch")))
+ require.Equal(t, 1, g.MatchCmp([]byte("longnotmatch")))
+ require.Equal(t, -1, g.MatchCmp([]byte{}))
+ savePos = g.dataP
+ word, nextPos := g.Next(nil)
+ expected := fmt.Sprintf("%d longlongword %d", i, i)
+ g.Reset(savePos)
+ require.Equal(t, 0, g.MatchCmp([]byte(expected)))
+ g.Reset(nextPos)
+ if string(word) != expected {
+ t.Errorf("expected %s, got (hex) [%s]", expected, word)
+ }
+ i++
+ }
+
+ if cs := checksum(d.filePath); cs != 3153486123 {
+ // it's ok if hash changed, but need re-generate all existing snapshot hashes
+ // in https://github.com/ledgerwatch/erigon-snapshot
+ t.Errorf("result file hash changed, %d", cs)
+ }
+}
diff --git a/erigon-lib/compress/decompress.go b/erigon-lib/compress/decompress.go
new file mode 100644
index 00000000000..52e6bad505c
--- /dev/null
+++ b/erigon-lib/compress/decompress.go
@@ -0,0 +1,1037 @@
+/*
+ Copyright 2022 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compress
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "time"
+ "unsafe"
+
+ "github.com/ledgerwatch/erigon-lib/common/dbg"
+ "github.com/ledgerwatch/erigon-lib/mmap"
+ "github.com/ledgerwatch/log/v3"
+)
+
+type word []byte // plain text word associated with code from dictionary
+
+type codeword struct {
+ pattern *word // Pattern corresponding to entries
+ ptr *patternTable // pointer to deeper level tables
+ code uint16 // code associated with that word
+ len byte // Number of bits in the codes
+}
+
+type patternTable struct {
+ patterns []*codeword
+ bitLen int // Number of bits to lookup in the table
+}
+
+func newPatternTable(bitLen int) *patternTable {
+ pt := &patternTable{
+ bitLen: bitLen,
+ }
+ if bitLen <= condensePatternTableBitThreshold {
+ pt.patterns = make([]*codeword, 1< 0 {
+ codeTo = codeFrom | (uint16(1) << pt.bitLen)
+ }
+
+ for c := codeFrom; c < codeTo; c += codeStep {
+ pt.patterns[c] = cw
+ }
+ return
+ }
+
+ pt.patterns = append(pt.patterns, cw)
+}
+
+func (pt *patternTable) condensedTableSearch(code uint16) *codeword {
+ if pt.bitLen <= condensePatternTableBitThreshold {
+ return pt.patterns[code]
+ }
+ for _, cur := range pt.patterns {
+ if cur.code == code {
+ return cur
+ }
+ d := code - cur.code
+ if d&1 != 0 {
+ continue
+ }
+ if checkDistance(int(cur.len), int(d)) {
+ return cur
+ }
+ }
+ return nil
+}
+
+type posTable struct {
+ pos []uint64
+ lens []byte
+ ptrs []*posTable
+ bitLen int
+}
+
+// Decompressor provides access to the superstrings in a file produced by a compressor
+type Decompressor struct {
+ f *os.File
+ mmapHandle2 *[mmap.MaxMapSize]byte // mmap handle for windows (this is used to close mmap)
+ dict *patternTable
+ posDict *posTable
+ mmapHandle1 []byte // mmap handle for unix (this is used to close mmap)
+ data []byte // slice of correct size for the decompressor to work with
+ wordsStart uint64 // Offset of whether the superstrings actually start
+ size int64
+ modTime time.Time
+ wordsCount uint64
+ emptyWordsCount uint64
+
+ filePath, fileName string
+}
+
+// Tables with bitlen greater than threshold will be condensed.
+// Condensing reduces size of decompression table but leads to slower reads.
+// To disable condesning at all set to 9 (we dont use tables larger than 2^9)
+// To enable condensing for tables of size larger 64 = 6
+// for all tables = 0
+// There is no sense to condense tables of size [1 - 64] in terms of performance
+//
+// Should be set before calling NewDecompression.
+var condensePatternTableBitThreshold = 9
+
+func init() {
+ v, _ := os.LookupEnv("DECOMPRESS_CONDENSITY")
+ if v != "" {
+ i, err := strconv.Atoi(v)
+ if err != nil {
+ panic(err)
+ }
+ if i < 3 || i > 9 {
+ panic("DECOMPRESS_CONDENSITY: only numbers in range 3-9 are acceptable ")
+ }
+ condensePatternTableBitThreshold = i
+ fmt.Printf("set DECOMPRESS_CONDENSITY to %d\n", i)
+ }
+}
+
+func SetDecompressionTableCondensity(fromBitSize int) {
+ condensePatternTableBitThreshold = fromBitSize
+}
+
+func NewDecompressor(compressedFilePath string) (d *Decompressor, err error) {
+ _, fName := filepath.Split(compressedFilePath)
+ d = &Decompressor{
+ filePath: compressedFilePath,
+ fileName: fName,
+ }
+ defer func() {
+
+ if rec := recover(); rec != nil {
+ err = fmt.Errorf("decompressing file: %s, %+v, trace: %s", compressedFilePath, rec, dbg.Stack())
+ }
+ }()
+
+ d.f, err = os.Open(compressedFilePath)
+ if err != nil {
+ return nil, err
+ }
+
+ var stat os.FileInfo
+ if stat, err = d.f.Stat(); err != nil {
+ return nil, err
+ }
+ d.size = stat.Size()
+ if d.size < 32 {
+ return nil, fmt.Errorf("compressed file is too short: %d", d.size)
+ }
+ d.modTime = stat.ModTime()
+ if d.mmapHandle1, d.mmapHandle2, err = mmap.Mmap(d.f, int(d.size)); err != nil {
+ return nil, err
+ }
+ // read patterns from file
+ d.data = d.mmapHandle1[:d.size]
+ defer d.EnableReadAhead().DisableReadAhead() //speedup opening on slow drives
+
+ d.wordsCount = binary.BigEndian.Uint64(d.data[:8])
+ d.emptyWordsCount = binary.BigEndian.Uint64(d.data[8:16])
+ dictSize := binary.BigEndian.Uint64(d.data[16:24])
+ data := d.data[24 : 24+dictSize]
+
+ var depths []uint64
+ var patterns [][]byte
+ var i uint64
+ var patternMaxDepth uint64
+
+ for i < dictSize {
+ d, ns := binary.Uvarint(data[i:])
+ if d > 64 { // mainnet has maxDepth 31
+ return nil, fmt.Errorf("dictionary is invalid: patternMaxDepth=%d", d)
+ }
+ depths = append(depths, d)
+ if d > patternMaxDepth {
+ patternMaxDepth = d
+ }
+ i += uint64(ns)
+ l, n := binary.Uvarint(data[i:])
+ i += uint64(n)
+ patterns = append(patterns, data[i:i+l])
+ //fmt.Printf("depth = %d, pattern = [%x]\n", d, data[i:i+l])
+ i += l
+ }
+
+ if dictSize > 0 {
+ var bitLen int
+ if patternMaxDepth > 9 {
+ bitLen = 9
+ } else {
+ bitLen = int(patternMaxDepth)
+ }
+ // fmt.Printf("pattern maxDepth=%d\n", tree.maxDepth)
+ d.dict = newPatternTable(bitLen)
+ buildCondensedPatternTable(d.dict, depths, patterns, 0, 0, 0, patternMaxDepth)
+ }
+
+ // read positions
+ pos := 24 + dictSize
+ dictSize = binary.BigEndian.Uint64(d.data[pos : pos+8])
+ data = d.data[pos+8 : pos+8+dictSize]
+
+ var posDepths []uint64
+ var poss []uint64
+ var posMaxDepth uint64
+
+ i = 0
+ for i < dictSize {
+ d, ns := binary.Uvarint(data[i:])
+ if d > 2048 {
+ return nil, fmt.Errorf("dictionary is invalid: posMaxDepth=%d", d)
+ }
+ posDepths = append(posDepths, d)
+ if d > posMaxDepth {
+ posMaxDepth = d
+ }
+ i += uint64(ns)
+ pos, n := binary.Uvarint(data[i:])
+ i += uint64(n)
+ poss = append(poss, pos)
+ }
+
+ if dictSize > 0 {
+ var bitLen int
+ if posMaxDepth > 9 {
+ bitLen = 9
+ } else {
+ bitLen = int(posMaxDepth)
+ }
+ //fmt.Printf("pos maxDepth=%d\n", tree.maxDepth)
+ tableSize := 1 << bitLen
+ d.posDict = &posTable{
+ bitLen: bitLen,
+ pos: make([]uint64, tableSize),
+ lens: make([]byte, tableSize),
+ ptrs: make([]*posTable, tableSize),
+ }
+ buildPosTable(posDepths, poss, d.posDict, 0, 0, 0, posMaxDepth)
+ }
+ d.wordsStart = pos + 8 + dictSize
+ return d, nil
+}
+
+func buildCondensedPatternTable(table *patternTable, depths []uint64, patterns [][]byte, code uint16, bits int, depth uint64, maxDepth uint64) int {
+ if len(depths) == 0 {
+ return 0
+ }
+ if depth == depths[0] {
+ pattern := word(patterns[0])
+ //fmt.Printf("depth=%d, maxDepth=%d, code=[%b], codeLen=%d, pattern=[%x]\n", depth, maxDepth, code, bits, pattern)
+ cw := &codeword{code: code, pattern: &pattern, len: byte(bits), ptr: nil}
+ table.insertWord(cw)
+ return 1
+ }
+ if bits == 9 {
+ var bitLen int
+ if maxDepth > 9 {
+ bitLen = 9
+ } else {
+ bitLen = int(maxDepth)
+ }
+ cw := &codeword{code: code, pattern: nil, len: byte(0), ptr: newPatternTable(bitLen)}
+ table.insertWord(cw)
+ return buildCondensedPatternTable(cw.ptr, depths, patterns, 0, 0, depth, maxDepth)
+ }
+ b0 := buildCondensedPatternTable(table, depths, patterns, code, bits+1, depth+1, maxDepth-1)
+ return b0 + buildCondensedPatternTable(table, depths[b0:], patterns[b0:], (uint16(1)< 9 {
+ bitLen = 9
+ } else {
+ bitLen = int(maxDepth)
+ }
+ tableSize := 1 << bitLen
+ newTable := &posTable{
+ bitLen: bitLen,
+ pos: make([]uint64, tableSize),
+ lens: make([]byte, tableSize),
+ ptrs: make([]*posTable, tableSize),
+ }
+ table.pos[code] = 0
+ table.lens[code] = byte(0)
+ table.ptrs[code] = newTable
+ return buildPosTable(depths, poss, newTable, 0, 0, depth, maxDepth)
+ }
+ b0 := buildPosTable(depths, poss, table, code, bits+1, depth+1, maxDepth-1)
+ return b0 + buildPosTable(depths[b0:], poss[b0:], table, (uint16(1)< 0 {
+ g.dataP++
+ g.dataBit = 0
+ }
+ table := g.posDict
+ if table.bitLen == 0 {
+ return table.pos[0]
+ }
+ for l := byte(0); l == 0; {
+ code := uint16(g.data[g.dataP]) >> g.dataBit
+ if 8-g.dataBit < table.bitLen && int(g.dataP)+1 < len(g.data) {
+ code |= uint16(g.data[g.dataP+1]) << (8 - g.dataBit)
+ }
+ code &= (uint16(1) << table.bitLen) - 1
+ l = table.lens[code]
+ if l == 0 {
+ table = table.ptrs[code]
+ g.dataBit += 9
+ } else {
+ g.dataBit += int(l)
+ pos = table.pos[code]
+ }
+ g.dataP += uint64(g.dataBit / 8)
+ g.dataBit %= 8
+ }
+ return pos
+}
+
+func (g *Getter) nextPattern() []byte {
+ table := g.patternDict
+
+ if table.bitLen == 0 {
+ return *table.patterns[0].pattern
+ }
+
+ var l byte
+ var pattern []byte
+ for l == 0 {
+ code := uint16(g.data[g.dataP]) >> g.dataBit
+ if 8-g.dataBit < table.bitLen && int(g.dataP)+1 < len(g.data) {
+ code |= uint16(g.data[g.dataP+1]) << (8 - g.dataBit)
+ }
+ code &= (uint16(1) << table.bitLen) - 1
+
+ cw := table.condensedTableSearch(code)
+ l = cw.len
+ if l == 0 {
+ table = cw.ptr
+ g.dataBit += 9
+ } else {
+ g.dataBit += int(l)
+ pattern = *cw.pattern
+ }
+ g.dataP += uint64(g.dataBit / 8)
+ g.dataBit %= 8
+ }
+ return pattern
+}
+
+var condensedWordDistances = buildCondensedWordDistances()
+
+func checkDistance(power int, d int) bool {
+ for _, dist := range condensedWordDistances[power] {
+ if dist == d {
+ return true
+ }
+ }
+ return false
+}
+
+func buildCondensedWordDistances() [][]int {
+ dist2 := make([][]int, 10)
+ for i := 1; i <= 9; i++ {
+ dl := make([]int, 0)
+ for j := 1 << i; j < 512; j += 1 << i {
+ dl = append(dl, j)
+ }
+ dist2[i] = dl
+ }
+ return dist2
+}
+
+func (g *Getter) Size() int {
+ return len(g.data)
+}
+
+func (d *Decompressor) Count() int { return int(d.wordsCount) }
+func (d *Decompressor) EmptyWordsCount() int { return int(d.emptyWordsCount) }
+
+// MakeGetter creates an object that can be used to access superstrings in the decompressor's file
+// Getter is not thread-safe, but there can be multiple getters used simultaneously and concurrently
+// for the same decompressor
+func (d *Decompressor) MakeGetter() *Getter {
+ return &Getter{
+ posDict: d.posDict,
+ data: d.data[d.wordsStart:],
+ patternDict: d.dict,
+ fName: d.fileName,
+ }
+}
+
+func (g *Getter) Reset(offset uint64) {
+ g.dataP = offset
+ g.dataBit = 0
+}
+
+func (g *Getter) HasNext() bool {
+ return g.dataP < uint64(len(g.data))
+}
+
+// Next extracts a compressed word from current offset in the file
+// and appends it to the given buf, returning the result of appending
+// After extracting next word, it moves to the beginning of the next one
+func (g *Getter) Next(buf []byte) ([]byte, uint64) {
+ savePos := g.dataP
+ wordLen := g.nextPos(true)
+ wordLen-- // because when create huffman tree we do ++ , because 0 is terminator
+ if wordLen == 0 {
+ if g.dataBit > 0 {
+ g.dataP++
+ g.dataBit = 0
+ }
+ if buf == nil { // wordLen == 0, means we have valid record of 0 size. nil - is the marker of "something not found"
+ buf = []byte{}
+ }
+ return buf, g.dataP
+ }
+ bufPos := len(buf) // Tracking position in buf where to insert part of the word
+ lastUncovered := len(buf)
+ if len(buf)+int(wordLen) > cap(buf) {
+ newBuf := make([]byte, len(buf)+int(wordLen))
+ copy(newBuf, buf)
+ buf = newBuf
+ } else {
+ // Expand buffer
+ buf = buf[:len(buf)+int(wordLen)]
+ }
+ // Loop below fills in the patterns
+ for pos := g.nextPos(false /* clean */); pos != 0; pos = g.nextPos(false) {
+ bufPos += int(pos) - 1 // Positions where to insert patterns are encoded relative to one another
+ pt := g.nextPattern()
+ copy(buf[bufPos:], pt)
+ }
+ if g.dataBit > 0 {
+ g.dataP++
+ g.dataBit = 0
+ }
+ postLoopPos := g.dataP
+ g.dataP = savePos
+ g.dataBit = 0
+ g.nextPos(true /* clean */) // Reset the state of huffman reader
+ bufPos = lastUncovered // Restore to the beginning of buf
+ // Loop below fills the data which is not in the patterns
+ for pos := g.nextPos(false); pos != 0; pos = g.nextPos(false) {
+ bufPos += int(pos) - 1 // Positions where to insert patterns are encoded relative to one another
+ if bufPos > lastUncovered {
+ dif := uint64(bufPos - lastUncovered)
+ copy(buf[lastUncovered:bufPos], g.data[postLoopPos:postLoopPos+dif])
+ postLoopPos += dif
+ }
+ lastUncovered = bufPos + len(g.nextPattern())
+ }
+ if int(wordLen) > lastUncovered {
+ dif := wordLen - uint64(lastUncovered)
+ copy(buf[lastUncovered:wordLen], g.data[postLoopPos:postLoopPos+dif])
+ postLoopPos += dif
+ }
+ g.dataP = postLoopPos
+ g.dataBit = 0
+ return buf, postLoopPos
+}
+
+func (g *Getter) NextUncompressed() ([]byte, uint64) {
+ wordLen := g.nextPos(true)
+ wordLen-- // because when create huffman tree we do ++ , because 0 is terminator
+ if wordLen == 0 {
+ if g.dataBit > 0 {
+ g.dataP++
+ g.dataBit = 0
+ }
+ return g.data[g.dataP:g.dataP], g.dataP
+ }
+ g.nextPos(false)
+ if g.dataBit > 0 {
+ g.dataP++
+ g.dataBit = 0
+ }
+ pos := g.dataP
+ g.dataP += wordLen
+ return g.data[pos:g.dataP], g.dataP
+}
+
+// Skip moves offset to the next word and returns the new offset and the length of the word.
+func (g *Getter) Skip() (uint64, int) {
+ l := g.nextPos(true)
+ l-- // because when create huffman tree we do ++ , because 0 is terminator
+ if l == 0 {
+ if g.dataBit > 0 {
+ g.dataP++
+ g.dataBit = 0
+ }
+ return g.dataP, 0
+ }
+ wordLen := int(l)
+
+ var add uint64
+ var bufPos int
+ var lastUncovered int
+ for pos := g.nextPos(false /* clean */); pos != 0; pos = g.nextPos(false) {
+ bufPos += int(pos) - 1
+ if wordLen < bufPos {
+ panic(fmt.Sprintf("likely .idx is invalid: %s", g.fName))
+ }
+ if bufPos > lastUncovered {
+ add += uint64(bufPos - lastUncovered)
+ }
+ lastUncovered = bufPos + len(g.nextPattern())
+ }
+ if g.dataBit > 0 {
+ g.dataP++
+ g.dataBit = 0
+ }
+ if int(l) > lastUncovered {
+ add += l - uint64(lastUncovered)
+ }
+ // Uncovered characters
+ g.dataP += add
+ return g.dataP, wordLen
+}
+
+func (g *Getter) SkipUncompressed() (uint64, int) {
+ wordLen := g.nextPos(true)
+ wordLen-- // because when create huffman tree we do ++ , because 0 is terminator
+ if wordLen == 0 {
+ if g.dataBit > 0 {
+ g.dataP++
+ g.dataBit = 0
+ }
+ return g.dataP, 0
+ }
+ g.nextPos(false)
+ if g.dataBit > 0 {
+ g.dataP++
+ g.dataBit = 0
+ }
+ g.dataP += wordLen
+ return g.dataP, int(wordLen)
+}
+
+// Match returns true and next offset if the word at current offset fully matches the buf
+// returns false and current offset otherwise.
+func (g *Getter) Match(buf []byte) (bool, uint64) {
+ savePos := g.dataP
+ wordLen := g.nextPos(true)
+ wordLen-- // because when create huffman tree we do ++ , because 0 is terminator
+ lenBuf := len(buf)
+ if wordLen == 0 || int(wordLen) != lenBuf {
+ if g.dataBit > 0 {
+ g.dataP++
+ g.dataBit = 0
+ }
+ if lenBuf != 0 {
+ g.dataP, g.dataBit = savePos, 0
+ }
+ return lenBuf == int(wordLen), g.dataP
+ }
+
+ var bufPos int
+ // In the first pass, we only check patterns
+ for pos := g.nextPos(false /* clean */); pos != 0; pos = g.nextPos(false) {
+ bufPos += int(pos) - 1
+ pattern := g.nextPattern()
+ if lenBuf < bufPos+len(pattern) || !bytes.Equal(buf[bufPos:bufPos+len(pattern)], pattern) {
+ g.dataP, g.dataBit = savePos, 0
+ return false, savePos
+ }
+ }
+ if g.dataBit > 0 {
+ g.dataP++
+ g.dataBit = 0
+ }
+ postLoopPos := g.dataP
+ g.dataP, g.dataBit = savePos, 0
+ g.nextPos(true /* clean */) // Reset the state of huffman decoder
+ // Second pass - we check spaces not covered by the patterns
+ var lastUncovered int
+ bufPos = 0
+ for pos := g.nextPos(false /* clean */); pos != 0; pos = g.nextPos(false) {
+ bufPos += int(pos) - 1
+ if bufPos > lastUncovered {
+ dif := uint64(bufPos - lastUncovered)
+ if lenBuf < bufPos || !bytes.Equal(buf[lastUncovered:bufPos], g.data[postLoopPos:postLoopPos+dif]) {
+ g.dataP, g.dataBit = savePos, 0
+ return false, savePos
+ }
+ postLoopPos += dif
+ }
+ lastUncovered = bufPos + len(g.nextPattern())
+ }
+ if int(wordLen) > lastUncovered {
+ dif := wordLen - uint64(lastUncovered)
+ if lenBuf < int(wordLen) || !bytes.Equal(buf[lastUncovered:wordLen], g.data[postLoopPos:postLoopPos+dif]) {
+ g.dataP, g.dataBit = savePos, 0
+ return false, savePos
+ }
+ postLoopPos += dif
+ }
+ if lenBuf != int(wordLen) {
+ g.dataP, g.dataBit = savePos, 0
+ return false, savePos
+ }
+ g.dataP, g.dataBit = postLoopPos, 0
+ return true, postLoopPos
+}
+
+// MatchPrefix only checks if the word at the current offset has a buf prefix. Does not move offset to the next word.
+func (g *Getter) MatchPrefix(prefix []byte) bool {
+ savePos := g.dataP
+ defer func() {
+ g.dataP, g.dataBit = savePos, 0
+ }()
+
+ wordLen := g.nextPos(true /* clean */)
+ wordLen-- // because when create huffman tree we do ++ , because 0 is terminator
+ prefixLen := len(prefix)
+ if wordLen == 0 || int(wordLen) < prefixLen {
+ if g.dataBit > 0 {
+ g.dataP++
+ g.dataBit = 0
+ }
+ if prefixLen != 0 {
+ g.dataP, g.dataBit = savePos, 0
+ }
+ return prefixLen == int(wordLen)
+ }
+
+ var bufPos int
+ // In the first pass, we only check patterns
+ // Only run this loop as far as the prefix goes, there is no need to check further
+ for pos := g.nextPos(false /* clean */); pos != 0; pos = g.nextPos(false) {
+ bufPos += int(pos) - 1
+ pattern := g.nextPattern()
+ var comparisonLen int
+ if prefixLen < bufPos+len(pattern) {
+ comparisonLen = prefixLen - bufPos
+ } else {
+ comparisonLen = len(pattern)
+ }
+ if bufPos < prefixLen {
+ if !bytes.Equal(prefix[bufPos:bufPos+comparisonLen], pattern[:comparisonLen]) {
+ return false
+ }
+ }
+ }
+
+ if g.dataBit > 0 {
+ g.dataP++
+ g.dataBit = 0
+ }
+ postLoopPos := g.dataP
+ g.dataP, g.dataBit = savePos, 0
+ g.nextPos(true /* clean */) // Reset the state of huffman decoder
+ // Second pass - we check spaces not covered by the patterns
+ var lastUncovered int
+ bufPos = 0
+ for pos := g.nextPos(false /* clean */); pos != 0 && lastUncovered < prefixLen; pos = g.nextPos(false) {
+ bufPos += int(pos) - 1
+ if bufPos > lastUncovered {
+ dif := uint64(bufPos - lastUncovered)
+ var comparisonLen int
+ if prefixLen < lastUncovered+int(dif) {
+ comparisonLen = prefixLen - lastUncovered
+ } else {
+ comparisonLen = int(dif)
+ }
+ if !bytes.Equal(prefix[lastUncovered:lastUncovered+comparisonLen], g.data[postLoopPos:postLoopPos+uint64(comparisonLen)]) {
+ return false
+ }
+ postLoopPos += dif
+ }
+ lastUncovered = bufPos + len(g.nextPattern())
+ }
+ if prefixLen > lastUncovered && int(wordLen) > lastUncovered {
+ dif := wordLen - uint64(lastUncovered)
+ var comparisonLen int
+ if prefixLen < int(wordLen) {
+ comparisonLen = prefixLen - lastUncovered
+ } else {
+ comparisonLen = int(dif)
+ }
+ if !bytes.Equal(prefix[lastUncovered:lastUncovered+comparisonLen], g.data[postLoopPos:postLoopPos+uint64(comparisonLen)]) {
+ return false
+ }
+ }
+ return true
+}
+
+// MatchCmp lexicographically compares given buf with the word at the current offset in the file.
+// returns 0 if buf == word, -1 if buf < word, 1 if buf > word
+func (g *Getter) MatchCmp(buf []byte) int {
+ savePos := g.dataP
+ wordLen := g.nextPos(true)
+ wordLen-- // because when create huffman tree we do ++ , because 0 is terminator
+ lenBuf := len(buf)
+ if wordLen == 0 && lenBuf != 0 {
+ g.dataP, g.dataBit = savePos, 0
+ return 1
+ }
+ if wordLen == 0 && lenBuf == 0 {
+ if g.dataBit > 0 {
+ g.dataP++
+ g.dataBit = 0
+ }
+ return 0
+ }
+
+ decoded := make([]byte, wordLen)
+ var bufPos int
+ // In the first pass, we only check patterns
+ for pos := g.nextPos(false /* clean */); pos != 0; pos = g.nextPos(false) {
+ bufPos += int(pos) - 1
+ pattern := g.nextPattern()
+ copy(decoded[bufPos:], pattern)
+ }
+ if g.dataBit > 0 {
+ g.dataP++
+ g.dataBit = 0
+ }
+ postLoopPos := g.dataP
+ g.dataP, g.dataBit = savePos, 0
+ g.nextPos(true /* clean */) // Reset the state of huffman decoder
+ // Second pass - we check spaces not covered by the patterns
+ var lastUncovered int
+ bufPos = 0
+ for pos := g.nextPos(false /* clean */); pos != 0; pos = g.nextPos(false) {
+ bufPos += int(pos) - 1
+ // fmt.Printf("BUF POS: %d, POS: %d, lastUncovered: %d\n", bufPos, pos, lastUncovered)
+ if bufPos > lastUncovered {
+ dif := uint64(bufPos - lastUncovered)
+ copy(decoded[lastUncovered:bufPos], g.data[postLoopPos:postLoopPos+dif])
+ postLoopPos += dif
+ }
+ lastUncovered = bufPos + len(g.nextPattern())
+ }
+
+ if int(wordLen) > lastUncovered {
+ dif := wordLen - uint64(lastUncovered)
+ copy(decoded[lastUncovered:wordLen], g.data[postLoopPos:postLoopPos+dif])
+ postLoopPos += dif
+ }
+ cmp := bytes.Compare(buf, decoded)
+ if cmp == 0 {
+ g.dataP, g.dataBit = postLoopPos, 0
+ } else {
+ g.dataP, g.dataBit = savePos, 0
+ }
+ return cmp
+}
+
+// MatchPrefixCmp lexicographically compares given prefix with the word at the current offset in the file.
+// returns 0 if buf == word, -1 if buf < word, 1 if buf > word
+func (g *Getter) MatchPrefixCmp(prefix []byte) int {
+ savePos := g.dataP
+ defer func() {
+ g.dataP, g.dataBit = savePos, 0
+ }()
+
+ wordLen := g.nextPos(true /* clean */)
+ wordLen-- // because when create huffman tree we do ++ , because 0 is terminator
+ prefixLen := len(prefix)
+ if wordLen == 0 && prefixLen != 0 {
+ return 1
+ }
+ if prefixLen == 0 {
+ return 0
+ }
+
+ decoded := make([]byte, wordLen)
+ var bufPos int
+ // In the first pass, we only check patterns
+ // Only run this loop as far as the prefix goes, there is no need to check further
+ for pos := g.nextPos(false /* clean */); pos != 0; pos = g.nextPos(false) {
+ bufPos += int(pos) - 1
+ if bufPos > prefixLen {
+ break
+ }
+ pattern := g.nextPattern()
+ copy(decoded[bufPos:], pattern)
+ }
+
+ if g.dataBit > 0 {
+ g.dataP++
+ g.dataBit = 0
+ }
+ postLoopPos := g.dataP
+ g.dataP, g.dataBit = savePos, 0
+ g.nextPos(true /* clean */) // Reset the state of huffman decoder
+ // Second pass - we check spaces not covered by the patterns
+ var lastUncovered int
+ bufPos = 0
+ for pos := g.nextPos(false /* clean */); pos != 0 && lastUncovered < prefixLen; pos = g.nextPos(false) {
+ bufPos += int(pos) - 1
+ if bufPos > lastUncovered {
+ dif := uint64(bufPos - lastUncovered)
+ copy(decoded[lastUncovered:bufPos], g.data[postLoopPos:postLoopPos+dif])
+ postLoopPos += dif
+ }
+ lastUncovered = bufPos + len(g.nextPattern())
+ }
+ if prefixLen > lastUncovered && int(wordLen) > lastUncovered {
+ dif := wordLen - uint64(lastUncovered)
+ copy(decoded[lastUncovered:wordLen], g.data[postLoopPos:postLoopPos+dif])
+ // postLoopPos += dif
+ }
+ var cmp int
+ if prefixLen > int(wordLen) {
+ // TODO(racytech): handle this case
+ // e.g: prefix = 'aaacb'
+ // word = 'aaa'
+ cmp = bytes.Compare(prefix, decoded)
+ } else {
+ cmp = bytes.Compare(prefix, decoded[:prefixLen])
+ }
+
+ return cmp
+}
+
+func (g *Getter) MatchPrefixUncompressed(prefix []byte) int {
+ savePos := g.dataP
+ defer func() {
+ g.dataP, g.dataBit = savePos, 0
+ }()
+
+ wordLen := g.nextPos(true /* clean */)
+ wordLen-- // because when create huffman tree we do ++ , because 0 is terminator
+ prefixLen := len(prefix)
+ if wordLen == 0 && prefixLen != 0 {
+ return 1
+ }
+ if prefixLen == 0 {
+ return 0
+ }
+
+ g.nextPos(true)
+
+ // if prefixLen > int(wordLen) {
+ // // TODO(racytech): handle this case
+ // // e.g: prefix = 'aaacb'
+ // // word = 'aaa'
+ // }
+
+ return bytes.Compare(prefix, g.data[g.dataP:g.dataP+wordLen])
+}
+
+// FastNext extracts a compressed word from current offset in the file
+// into the given buf, returning a new byte slice which contains extracted word.
+// It is important to allocate enough buf size. Could throw an error if word in file is larger then the buf size.
+// After extracting next word, it moves to the beginning of the next one
+func (g *Getter) FastNext(buf []byte) ([]byte, uint64) {
+ defer func() {
+ if rec := recover(); rec != nil {
+ panic(fmt.Sprintf("file: %s, %s, %s", g.fName, rec, dbg.Stack()))
+ }
+ }()
+
+ savePos := g.dataP
+ wordLen := g.nextPos(true)
+ wordLen-- // because when create huffman tree we do ++ , because 0 is terminator
+ // decoded := make([]byte, wordLen)
+ if wordLen == 0 {
+ if g.dataBit > 0 {
+ g.dataP++
+ g.dataBit = 0
+ }
+ return buf[:wordLen], g.dataP
+ }
+ bufPos := 0 // Tracking position in buf where to insert part of the word
+ lastUncovered := 0
+
+ // if int(wordLen) > cap(buf) {
+ // newBuf := make([]byte, int(wordLen))
+ // buf = newBuf
+ // }
+ // Loop below fills in the patterns
+ for pos := g.nextPos(false /* clean */); pos != 0; pos = g.nextPos(false) {
+ bufPos += int(pos) - 1 // Positions where to insert patterns are encoded relative to one another
+ pt := g.nextPattern()
+ copy(buf[bufPos:], pt)
+ }
+ if g.dataBit > 0 {
+ g.dataP++
+ g.dataBit = 0
+ }
+ postLoopPos := g.dataP
+ g.dataP = savePos
+ g.dataBit = 0
+ g.nextPos(true /* clean */) // Reset the state of huffman reader
+ bufPos = lastUncovered // Restore to the beginning of buf
+ // Loop below fills the data which is not in the patterns
+ for pos := g.nextPos(false); pos != 0; pos = g.nextPos(false) {
+ bufPos += int(pos) - 1 // Positions where to insert patterns are encoded relative to one another
+ if bufPos > lastUncovered {
+ dif := uint64(bufPos - lastUncovered)
+ copy(buf[lastUncovered:bufPos], g.data[postLoopPos:postLoopPos+dif])
+ postLoopPos += dif
+ }
+ lastUncovered = bufPos + len(g.nextPattern())
+ }
+ if int(wordLen) > lastUncovered {
+ dif := wordLen - uint64(lastUncovered)
+ copy(buf[lastUncovered:wordLen], g.data[postLoopPos:postLoopPos+dif])
+ postLoopPos += dif
+ }
+ g.dataP = postLoopPos
+ g.dataBit = 0
+ return buf[:wordLen], postLoopPos
+}
diff --git a/erigon-lib/compress/decompress_bench_test.go b/erigon-lib/compress/decompress_bench_test.go
new file mode 100644
index 00000000000..9f6cd4b5d9b
--- /dev/null
+++ b/erigon-lib/compress/decompress_bench_test.go
@@ -0,0 +1,147 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compress
+
+import (
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func BenchmarkDecompressNext(b *testing.B) {
+ t := new(testing.T)
+ d := prepareDict(t)
+ defer d.Close()
+ g := d.MakeGetter()
+ for i := 0; i < b.N; i++ {
+ _, _ = g.Next(nil)
+ if !g.HasNext() {
+ g.Reset(0)
+ }
+ }
+}
+
+func BenchmarkDecompressFastNext(b *testing.B) {
+ t := new(testing.T)
+ d := prepareDict(t)
+ defer d.Close()
+ g := d.MakeGetter()
+ buf := make([]byte, 100)
+ for i := 0; i < b.N; i++ {
+ _, _ = g.FastNext(buf)
+ if !g.HasNext() {
+ g.Reset(0)
+ }
+ }
+}
+
+func BenchmarkDecompressSkip(b *testing.B) {
+ t := new(testing.T)
+ d := prepareDict(t)
+ defer d.Close()
+ g := d.MakeGetter()
+
+ for i := 0; i < b.N; i++ {
+ _, _ = g.Skip()
+ if !g.HasNext() {
+ g.Reset(0)
+ }
+ }
+}
+
+func BenchmarkDecompressMatch(b *testing.B) {
+ t := new(testing.T)
+ d := prepareDict(t)
+ defer d.Close()
+ g := d.MakeGetter()
+ for i := 0; i < b.N; i++ {
+ _, _ = g.Match([]byte("longlongword"))
+ }
+}
+
+func BenchmarkDecompressMatchCmp(b *testing.B) {
+ t := new(testing.T)
+ d := prepareDict(t)
+ defer d.Close()
+ g := d.MakeGetter()
+ for i := 0; i < b.N; i++ {
+ _ = g.MatchCmp([]byte("longlongword"))
+ if !g.HasNext() {
+ g.Reset(0)
+ }
+ }
+}
+
+func BenchmarkDecompressMatchPrefix(b *testing.B) {
+ t := new(testing.T)
+ d := prepareDict(t)
+ defer d.Close()
+ g := d.MakeGetter()
+
+ for i := 0; i < b.N; i++ {
+ _ = g.MatchPrefix([]byte("longlongword"))
+ }
+}
+
+func BenchmarkDecompressMatchPrefixCmp(b *testing.B) {
+ t := new(testing.T)
+ d := prepareDict(t)
+ defer d.Close()
+ g := d.MakeGetter()
+
+ for i := 0; i < b.N; i++ {
+ _ = g.MatchPrefixCmp([]byte("longlongword"))
+ }
+}
+
+func BenchmarkDecompressTorrent(t *testing.B) {
+ t.Skip()
+
+ //fpath := "/Volumes/wotah/mainnet/snapshots/v1-013500-014000-bodies.seg"
+ fpath := "/Volumes/wotah/mainnet/snapshots/v1-013500-014000-transactions.seg"
+ //fpath := "./v1-006000-006500-transactions.seg"
+ st, err := os.Stat(fpath)
+ require.NoError(t, err)
+ fmt.Printf("file: %v, size: %d\n", st.Name(), st.Size())
+
+ condensePatternTableBitThreshold = 5
+ fmt.Printf("bit threshold: %d\n", condensePatternTableBitThreshold)
+
+ t.Run("init", func(t *testing.B) {
+ for i := 0; i < t.N; i++ {
+ d, err := NewDecompressor(fpath)
+ require.NoError(t, err)
+ d.Close()
+ }
+ })
+ t.Run("run", func(t *testing.B) {
+ d, err := NewDecompressor(fpath)
+ require.NoError(t, err)
+ defer d.Close()
+
+ getter := d.MakeGetter()
+
+ for i := 0; i < t.N && getter.HasNext(); i++ {
+ _, sz := getter.Next(nil)
+ if sz == 0 {
+ t.Fatal("sz == 0")
+ }
+ }
+ })
+}
diff --git a/erigon-lib/compress/decompress_fuzz_test.go b/erigon-lib/compress/decompress_fuzz_test.go
new file mode 100644
index 00000000000..e127a6240e0
--- /dev/null
+++ b/erigon-lib/compress/decompress_fuzz_test.go
@@ -0,0 +1,96 @@
+package compress
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "math/rand"
+ "path/filepath"
+ "testing"
+
+ "github.com/ledgerwatch/erigon-lib/common/cmp"
+ "github.com/ledgerwatch/log/v3"
+)
+
+func FuzzDecompressMatch(f *testing.F) {
+ logger := log.New()
+ f.Fuzz(func(t *testing.T, x []byte, pos []byte, workers int8) {
+ t.Helper()
+ t.Parallel()
+ if len(pos) < 1 || workers < 1 {
+ t.Skip()
+ return
+ }
+ var a [][]byte
+ j := 0
+ for i := 0; i < len(pos) && j < len(x); i++ {
+ if pos[i] == 0 {
+ continue
+ }
+ next := cmp.Min(j+int(pos[i]*10), len(x)-1)
+ bbb := x[j:next]
+ a = append(a, bbb)
+ j = next
+ }
+
+ ctx := context.Background()
+ tmpDir := t.TempDir()
+ file := filepath.Join(tmpDir, fmt.Sprintf("compressed-%d", rand.Int31()))
+ c, err := NewCompressor(ctx, t.Name(), file, tmpDir, 2, int(workers), log.LvlDebug, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+ c.DisableFsync()
+ defer c.Close()
+ for _, b := range a {
+ if err = c.AddWord(b); err != nil {
+ t.Fatal(err)
+ }
+ }
+ if err = c.Compress(); err != nil {
+ t.Fatal(err)
+ }
+ c.Close()
+ d, err := NewDecompressor(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer d.Close()
+ g := d.MakeGetter()
+ buf := make([]byte, (1 << 16))
+ word_idx := 0
+ for g.HasNext() {
+ expected := a[word_idx]
+ savePos := g.dataP
+ cmp := g.MatchCmp(expected)
+ pos1 := g.dataP
+ if cmp != 0 {
+ t.Fatalf("MatchCmp: expected match: %v\n", expected)
+ }
+ g.Reset(savePos)
+ ok, _ := g.Match(expected)
+ pos2 := g.dataP
+ if !ok {
+ t.Fatalf("MatchBool: expected match: %v\n", expected)
+ }
+ g.Reset(savePos)
+ word, nexPos := g.Next(nil)
+ if bytes.Compare(word, expected) != 0 {
+ t.Fatalf("bytes.Compare: expected match: %v with word %v\n", expected, word)
+ }
+ if pos1 != pos2 && pos2 != nexPos {
+ t.Fatalf("pos1 %v != pos2 %v != nexPos %v\n", pos1, pos2, nexPos)
+ }
+ g.Reset(savePos)
+ word2, nexPos2 := g.FastNext(buf)
+ if bytes.Compare(word2, expected) != 0 {
+ t.Fatalf("bytes.Compare: expected match: %v with word %v\n", expected, word)
+ }
+ if pos1 != pos2 && pos2 != nexPos && nexPos != nexPos2 {
+ t.Fatalf("pos1 %v != pos2 %v != nexPos %v\n", pos1, pos2, nexPos)
+ }
+ word_idx++
+ }
+ })
+
+}
diff --git a/erigon-lib/compress/decompress_test.go b/erigon-lib/compress/decompress_test.go
new file mode 100644
index 00000000000..0becd5bb58a
--- /dev/null
+++ b/erigon-lib/compress/decompress_test.go
@@ -0,0 +1,767 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compress
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/ledgerwatch/log/v3"
+ "github.com/stretchr/testify/require"
+)
+
+func prepareLoremDict(t *testing.T) *Decompressor {
+ t.Helper()
+ logger := log.New()
+ tmpDir := t.TempDir()
+ file := filepath.Join(tmpDir, "compressed")
+ t.Name()
+ c, err := NewCompressor(context.Background(), t.Name(), file, tmpDir, 1, 2, log.LvlDebug, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+ for k, w := range loremStrings {
+ if err = c.AddWord([]byte(fmt.Sprintf("%s %d", w, k))); err != nil {
+ t.Fatal(err)
+ }
+ }
+ if err = c.Compress(); err != nil {
+ t.Fatal(err)
+ }
+ var d *Decompressor
+ if d, err = NewDecompressor(file); err != nil {
+ t.Fatal(err)
+ }
+ return d
+}
+
+func TestDecompressSkip(t *testing.T) {
+ d := prepareLoremDict(t)
+ defer d.Close()
+ g := d.MakeGetter()
+ i := 0
+ for g.HasNext() {
+ w := loremStrings[i]
+ if i%2 == 0 {
+ g.Skip()
+ } else {
+ word, _ := g.Next(nil)
+ expected := fmt.Sprintf("%s %d", w, i)
+ if string(word) != expected {
+ t.Errorf("expected %s, got (hex) %s", expected, word)
+ }
+ }
+ i++
+ }
+}
+
+func TestDecompressMatchOK(t *testing.T) {
+ d := prepareLoremDict(t)
+ defer d.Close()
+ g := d.MakeGetter()
+ i := 0
+ for g.HasNext() {
+ w := loremStrings[i]
+ if i%2 != 0 {
+ expected := fmt.Sprintf("%s %d", w, i)
+ ok, _ := g.Match([]byte(expected))
+ if !ok {
+ t.Errorf("expexted match with %s", expected)
+ }
+ } else {
+ word, _ := g.Next(nil)
+ expected := fmt.Sprintf("%s %d", w, i)
+ if string(word) != expected {
+ t.Errorf("expected %s, got (hex) %s", expected, word)
+ }
+ }
+ i++
+ }
+}
+
+func TestDecompressMatchCmpOK(t *testing.T) {
+ d := prepareLoremDict(t)
+ defer d.Close()
+ g := d.MakeGetter()
+ i := 0
+ for g.HasNext() {
+ w := loremStrings[i]
+ if i%2 != 0 {
+ expected := fmt.Sprintf("%s %d", w, i)
+ result := g.MatchCmp([]byte(expected))
+ if result != 0 {
+ t.Errorf("expexted match with %s", expected)
+ }
+ } else {
+ word, _ := g.Next(nil)
+ expected := fmt.Sprintf("%s %d", w, i)
+ if string(word) != expected {
+ t.Errorf("expected %s, got (hex) %s", expected, word)
+ }
+ }
+ i++
+ }
+}
+
+func prepareStupidDict(t *testing.T, size int) *Decompressor {
+ t.Helper()
+ logger := log.New()
+ tmpDir := t.TempDir()
+ file := filepath.Join(tmpDir, "compressed2")
+ t.Name()
+ c, err := NewCompressor(context.Background(), t.Name(), file, tmpDir, 1, 2, log.LvlDebug, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+ for i := 0; i < size; i++ {
+ if err = c.AddWord([]byte(fmt.Sprintf("word-%d", i))); err != nil {
+ t.Fatal(err)
+ }
+ }
+ if err = c.Compress(); err != nil {
+ t.Fatal(err)
+ }
+ var d *Decompressor
+ if d, err = NewDecompressor(file); err != nil {
+ t.Fatal(err)
+ }
+ return d
+}
+
+func TestDecompressMatchOKCondensed(t *testing.T) {
+ condensePatternTableBitThreshold = 4
+ d := prepareStupidDict(t, 10000)
+ defer func() { condensePatternTableBitThreshold = 9 }()
+ defer d.Close()
+
+ g := d.MakeGetter()
+ i := 0
+ for g.HasNext() {
+ if i%2 != 0 {
+ expected := fmt.Sprintf("word-%d", i)
+ ok, _ := g.Match([]byte(expected))
+ if !ok {
+ t.Errorf("expexted match with %s", expected)
+ }
+ } else {
+ word, _ := g.Next(nil)
+ expected := fmt.Sprintf("word-%d", i)
+ if string(word) != expected {
+ t.Errorf("expected %s, got (hex) %s", expected, word)
+ }
+ }
+ i++
+ }
+}
+
+func TestDecompressMatchNotOK(t *testing.T) {
+ d := prepareLoremDict(t)
+ defer d.Close()
+ g := d.MakeGetter()
+ i := 0
+ skipCount := 0
+ for g.HasNext() {
+ w := loremStrings[i]
+ expected := fmt.Sprintf("%s %d", w, i+1)
+ ok, _ := g.Match([]byte(expected))
+ if ok {
+ t.Errorf("not expexted match with %s", expected)
+ } else {
+ g.Skip()
+ skipCount++
+ }
+ i++
+ }
+ if skipCount != i {
+ t.Errorf("something wrong with match logic")
+ }
+}
+
+func TestDecompressMatchPrefix(t *testing.T) {
+ d := prepareLoremDict(t)
+ defer d.Close()
+ g := d.MakeGetter()
+ i := 0
+ skipCount := 0
+ for g.HasNext() {
+ w := loremStrings[i]
+ expected := []byte(fmt.Sprintf("%s %d", w, i+1))
+ expected = expected[:len(expected)/2]
+ if !g.MatchPrefix(expected) {
+ t.Errorf("expexted match with %s", expected)
+ }
+ g.Skip()
+ skipCount++
+ i++
+ }
+ if skipCount != i {
+ t.Errorf("something wrong with match logic")
+ }
+ g.Reset(0)
+ skipCount = 0
+ i = 0
+ for g.HasNext() {
+ w := loremStrings[i]
+ expected := []byte(fmt.Sprintf("%s %d", w, i+1))
+ expected = expected[:len(expected)/2]
+ if len(expected) > 0 {
+ expected[len(expected)-1]++
+ if g.MatchPrefix(expected) {
+ t.Errorf("not expexted match with %s", expected)
+ }
+ }
+ g.Skip()
+ skipCount++
+ i++
+ }
+}
+
+func TestDecompressMatchPrefixCmp(t *testing.T) {
+ d := prepareLoremDict(t)
+ defer d.Close()
+ g := d.MakeGetter()
+ i := 0
+ skipCount := 0
+ for g.HasNext() {
+ w := loremStrings[i]
+ expected := []byte(fmt.Sprintf("%s %d", w, i+1))
+ expected = expected[:len(expected)/2]
+ cmp := g.MatchPrefixCmp(expected)
+ if cmp != 0 {
+ t.Errorf("expexted match with %s", expected)
+ }
+ g.Skip()
+ skipCount++
+ i++
+ }
+ if skipCount != i {
+ t.Errorf("something wrong with match logic")
+ }
+ g.Reset(0)
+ skipCount = 0
+ i = 0
+ for g.HasNext() {
+ w := loremStrings[i]
+ expected := []byte(fmt.Sprintf("%s %d", w, i+1))
+ expected = expected[:len(expected)/2]
+ if len(expected) > 0 {
+ expected[len(expected)-1]++
+ cmp := g.MatchPrefixCmp(expected)
+ if cmp == 0 {
+ t.Errorf("not expexted match with %s", expected)
+ }
+ }
+ g.Skip()
+ skipCount++
+ i++
+ }
+}
+
+func prepareLoremDictUncompressed(t *testing.T) *Decompressor {
+ t.Helper()
+ logger := log.New()
+ tmpDir := t.TempDir()
+ file := filepath.Join(tmpDir, "compressed")
+ t.Name()
+ c, err := NewCompressor(context.Background(), t.Name(), file, tmpDir, 1, 2, log.LvlDebug, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+ for k, w := range loremStrings {
+ if err = c.AddUncompressedWord([]byte(fmt.Sprintf("%s %d", w, k))); err != nil {
+ t.Fatal(err)
+ }
+ }
+ if err = c.Compress(); err != nil {
+ t.Fatal(err)
+ }
+ var d *Decompressor
+ if d, err = NewDecompressor(file); err != nil {
+ t.Fatal(err)
+ }
+ return d
+}
+
+func TestUncompressed(t *testing.T) {
+ d := prepareLoremDictUncompressed(t)
+ defer d.Close()
+ g := d.MakeGetter()
+ i := 0
+ for g.HasNext() {
+ w := loremStrings[i]
+ expected := []byte(fmt.Sprintf("%s %d", w, i+1))
+ expected = expected[:len(expected)/2]
+ actual, _ := g.NextUncompressed()
+ if bytes.Equal(expected, actual) {
+ t.Errorf("expected %s, actual %s", expected, actual)
+ }
+ i++
+ }
+}
+
+const lorem = `Lorem ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod tempor incididunt ut labore et
+dolore magna aliqua Ut enim ad minim veniam quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo
+consequat Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur
+Excepteur sint occaecat cupidatat non proident sunt in culpa qui officia deserunt mollit anim id est laborum`
+
+var loremStrings = strings.Split(lorem, " ")
+
+func TestDecompressTorrent(t *testing.T) {
+ t.Skip()
+
+ fpath := "/mnt/data/chains/mainnet/snapshots/v1-014000-014500-transactions.seg"
+ st, err := os.Stat(fpath)
+ require.NoError(t, err)
+ fmt.Printf("file: %v, size: %d\n", st.Name(), st.Size())
+
+ condensePatternTableBitThreshold = 9
+ fmt.Printf("bit threshold: %d\n", condensePatternTableBitThreshold)
+ d, err := NewDecompressor(fpath)
+
+ require.NoError(t, err)
+ defer d.Close()
+
+ getter := d.MakeGetter()
+ _ = getter
+
+ for getter.HasNext() {
+ _, sz := getter.Next(nil)
+ // fmt.Printf("%x\n", buf)
+ require.NotZero(t, sz)
+ }
+}
+
+const N = 100
+
+var WORDS = [N][]byte{}
+var WORD_FLAGS = [N]bool{} // false - uncompressed word, true - compressed word
+var INPUT_FLAGS = []int{} // []byte or nil input
+
+func randWord() []byte {
+ size := rand.Intn(256) // size of the word
+ word := make([]byte, size)
+ for i := 0; i < size; i++ {
+ word[i] = byte(rand.Intn(256))
+ }
+ return word
+}
+
+func generateRandWords() {
+ for i := 0; i < N-2; i++ {
+ WORDS[i] = randWord()
+ }
+ // make sure we have at least 2 emtpy []byte
+ WORDS[N-2] = []byte{}
+ WORDS[N-1] = []byte{}
+}
+
+func randIntInRange(min, max int) int {
+ return (rand.Intn(max-min) + min)
+}
+
+func clearPrevDict() {
+ WORDS = [N][]byte{}
+ WORD_FLAGS = [N]bool{}
+ INPUT_FLAGS = []int{}
+}
+
+func prepareRandomDict(t *testing.T) *Decompressor {
+ t.Helper()
+ logger := log.New()
+ tmpDir := t.TempDir()
+ file := filepath.Join(tmpDir, "complex")
+ t.Name()
+ c, err := NewCompressor(context.Background(), t.Name(), file, tmpDir, 1, 2, log.LvlDebug, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // c.DisableFsync()
+ defer c.Close()
+ clearPrevDict()
+ rand.Seed(time.Now().UnixNano())
+ generateRandWords()
+
+ idx := 0
+ for idx < N {
+ n := rand.Intn(2)
+ switch n {
+ case 0: // input case
+ word := WORDS[idx]
+ m := rand.Intn(2)
+ if m == 1 {
+ if err = c.AddWord(word); err != nil {
+ t.Fatal(err)
+ }
+ WORD_FLAGS[idx] = true
+ } else {
+ if err = c.AddUncompressedWord(word); err != nil {
+ t.Fatal(err)
+ }
+ }
+ idx++
+ INPUT_FLAGS = append(INPUT_FLAGS, n)
+ case 1: // nil word
+ if err = c.AddWord(nil); err != nil {
+ t.Fatal(err)
+ }
+ INPUT_FLAGS = append(INPUT_FLAGS, n)
+ default:
+ t.Fatal(fmt.Errorf("case %d\n", n))
+ }
+ }
+
+ if err = c.Compress(); err != nil {
+ t.Fatal(err)
+ }
+ var d *Decompressor
+ if d, err = NewDecompressor(file); err != nil {
+ t.Fatal(err)
+ }
+ return d
+}
+
+func TestDecompressRandomMatchCmp(t *testing.T) {
+ d := prepareRandomDict(t)
+ defer d.Close()
+
+ if d.wordsCount != uint64(len(INPUT_FLAGS)) {
+ t.Fatalf("TestDecompressRandomDict: d.wordsCount != len(INPUT_FLAGS)")
+ }
+
+ g := d.MakeGetter()
+
+ word_idx := 0
+ input_idx := 0
+ total := 0
+ // check for existing and non existing keys
+ for g.HasNext() {
+ pos := g.dataP
+ if INPUT_FLAGS[input_idx] == 0 { // []byte input
+ notExpected := string(WORDS[word_idx]) + "z"
+ cmp := g.MatchCmp([]byte(notExpected))
+ if cmp == 0 {
+ t.Fatalf("not expected match: %v\n got: %v\n", []byte(notExpected), WORDS[word_idx])
+ }
+
+ expected := WORDS[word_idx]
+ cmp = g.MatchCmp(expected) // move offset to the next pos
+ if cmp != 0 {
+ savePos := g.dataP
+ g.Reset(pos)
+ word, nextPos := g.Next(nil)
+ if nextPos != savePos {
+ t.Fatalf("nextPos %d != savePos %d\n", nextPos, savePos)
+ }
+ if bytes.Compare(expected, word) != cmp {
+ fmt.Printf("1 expected: %v, acutal %v, cmp %d\n", expected, word, cmp)
+ }
+ t.Fatalf("expected match: %v\n got: %v\n", expected, word)
+ }
+ word_idx++
+ } else { // nil input
+ notExpected := []byte{0}
+ cmp := g.MatchCmp(notExpected)
+ if cmp == 0 {
+ t.Fatal("not expected match []byte{0} with nil\n")
+ }
+
+ expected := []byte{}
+ cmp = g.MatchCmp(nil)
+ if cmp != 0 {
+ savePos := g.dataP
+ g.Reset(pos)
+ word, nextPos := g.Next(nil)
+ if nextPos != savePos {
+ t.Fatalf("nextPos %d != savePos %d\n", nextPos, savePos)
+ }
+ if bytes.Compare(expected, word) != cmp {
+ fmt.Printf("2 expected: %v, acutal %v, cmp %d\n", expected, word, cmp)
+ }
+ t.Fatalf("expected match: %v\n got: %v\n", expected, word)
+ }
+ }
+ input_idx++
+ total++
+ }
+ if total != int(d.wordsCount) {
+ t.Fatalf("expected word count: %d, got %d\n", int(d.wordsCount), total)
+ }
+}
+
+func TestDecompressRandomMatchBool(t *testing.T) {
+ d := prepareRandomDict(t)
+ defer d.Close()
+
+ if d.wordsCount != uint64(len(INPUT_FLAGS)) {
+ t.Fatalf("TestDecompressRandomDict: d.wordsCount != len(INPUT_FLAGS)")
+ }
+
+ g := d.MakeGetter()
+
+ word_idx := 0
+ input_idx := 0
+ total := 0
+ // check for existing and non existing keys
+ for g.HasNext() {
+ pos := g.dataP
+ if INPUT_FLAGS[input_idx] == 0 { // []byte input
+ notExpected := string(WORDS[word_idx]) + "z"
+ ok, _ := g.Match([]byte(notExpected))
+ if ok {
+ t.Fatalf("not expected match: %v\n got: %v\n", []byte(notExpected), WORDS[word_idx])
+ }
+
+ expected := WORDS[word_idx]
+ ok, _ = g.Match(expected)
+ if !ok {
+ g.Reset(pos)
+ word, _ := g.Next(nil)
+ if bytes.Compare(expected, word) != 0 {
+ fmt.Printf("1 expected: %v, acutal %v, ok %v\n", expected, word, ok)
+ }
+ t.Fatalf("expected match: %v\n got: %v\n", expected, word)
+ }
+ word_idx++
+ } else { // nil input
+ notExpected := []byte{0}
+ ok, _ := g.Match(notExpected)
+ if ok {
+ t.Fatal("not expected match []byte{0} with nil\n")
+ }
+
+ expected := []byte{}
+ ok, _ = g.Match(nil)
+ if !ok {
+ g.Reset(pos)
+ word, _ := g.Next(nil)
+ if bytes.Compare(expected, word) != 0 {
+ fmt.Printf("2 expected: %v, acutal %v, ok %v\n", expected, word, ok)
+ }
+ t.Fatalf("expected match: %v\n got: %v\n", expected, word)
+ }
+ }
+ input_idx++
+ total++
+ }
+ if total != int(d.wordsCount) {
+ t.Fatalf("expected word count: %d, got %d\n", int(d.wordsCount), total)
+ }
+}
+
+func TestDecompressRandomFastNext(t *testing.T) {
+ d := prepareRandomDict(t)
+ defer d.Close()
+
+ if d.wordsCount != uint64(len(INPUT_FLAGS)) {
+ t.Fatalf("TestDecompressRandomDict: d.wordsCount != len(INPUT_FLAGS)")
+ }
+
+ g := d.MakeGetter()
+
+ word_idx := 0
+ input_idx := 0
+ total := 0
+ buf := make([]byte, (1 << 23))
+ // check for existing and non existing keys
+ for g.HasNext() {
+ if INPUT_FLAGS[input_idx] == 0 { // []byte input
+ expected := WORDS[word_idx]
+ word, _ := g.FastNext(buf)
+ if bytes.Compare(expected, word) != 0 {
+ t.Fatalf("1 expected: %v, got %v\n", expected, word)
+ }
+ word_idx++
+ } else { // nil input
+ expected := []byte{}
+ word, _ := g.FastNext(buf)
+ if bytes.Compare(expected, word) != 0 {
+ t.Fatalf("2 expected: %v, got %v\n", expected, word)
+ }
+ }
+ input_idx++
+ total++
+ }
+ if total != int(d.wordsCount) {
+ t.Fatalf("expected word count: %d, got %d\n", int(d.wordsCount), total)
+ }
+}
+
+// func TestDecompressRandomDict(t *testing.T) {
+// d := prepareRandomDict(t)
+// defer d.Close()
+
+// if d.wordsCount != uint64(len(INPUT_FLAGS)) {
+// t.Fatalf("TestDecompressRandomDict: d.wordsCount != len(INPUT_FLAGS)")
+// }
+
+// g := d.MakeGetter()
+
+// word_idx := 0
+// input_idx := 0
+// total := 0
+// // check for existing and non existing keys
+// for g.HasNext() {
+// pos := g.dataP
+// if INPUT_FLAGS[input_idx] == 0 { // []byte input
+// notExpected := string(WORDS[word_idx]) + "z"
+// ok, _ := g.Match([]byte(notExpected))
+// if ok {
+// t.Fatalf("not expected match: %s\n got: %s\n", notExpected, WORDS[word_idx])
+// }
+
+// expected := WORDS[word_idx]
+// ok, _ = g.Match(expected)
+// if !ok {
+// g.Reset(pos)
+// word, _ := g.Next(nil)
+// t.Fatalf("expected match: %s\n got: %s\n", expected, word)
+// }
+// word_idx++
+// } else { // nil input
+// notExpected := []byte{0}
+// ok, _ := g.Match(notExpected)
+// if ok {
+// t.Fatal("not expected match []byte{0} with nil\n")
+// }
+
+// expected := []byte{}
+// ok, _ = g.Match(nil)
+// if !ok {
+// g.Reset(pos)
+// word, _ := g.Next(nil)
+// t.Fatalf("expected match: %s\n got: %s\n", expected, word)
+// }
+// }
+// input_idx++
+// total++
+// }
+// if total != int(d.wordsCount) {
+// t.Fatalf("expected word count: %d, got %d\n", int(d.wordsCount), total)
+// }
+
+// // TODO: check for non existing keys, suffixes, prefixes
+// g.Reset(0)
+
+// word_idx = 0
+// input_idx = 0
+// // check for existing and non existing prefixes
+// var notExpected = []byte{2, 3, 4}
+// for g.HasNext() {
+
+// if INPUT_FLAGS[input_idx] == 0 { // []byte input
+// expected := WORDS[word_idx]
+// prefix_size := len(expected) / 2
+// if len(expected)/2 > 3 {
+// prefix_size = randIntInRange(3, len(expected)/2)
+// }
+// expected = expected[:prefix_size]
+// if len(expected) > 0 {
+// if !g.MatchPrefix(expected) {
+// t.Errorf("expected match with %s", expected)
+// }
+// expected[len(expected)-1]++
+// if g.MatchPrefix(expected) {
+// t.Errorf("not expected match with %s", expected)
+// }
+// } else {
+// if !g.MatchPrefix([]byte{}) {
+// t.Error("expected match with empty []byte")
+// }
+// if g.MatchPrefix(notExpected) {
+// t.Error("not expected empty []byte to match with []byte{2, 3, 4}")
+// }
+// }
+// word_idx++
+// } else { // nil input
+// if !g.MatchPrefix(nil) {
+// t.Error("expected match with nil")
+// }
+// if g.MatchPrefix(notExpected) {
+// t.Error("not expected nil to match with []byte{2, 3, 4}")
+// }
+// }
+
+// g.Skip()
+// input_idx++
+// }
+
+// g.Reset(0)
+
+// word_idx = 0
+// input_idx = 0
+// // check for existing and non existing suffixes
+// notExpected = []byte{2, 3, 4}
+// for g.HasNext() {
+
+// if INPUT_FLAGS[input_idx] == 0 { // []byte input
+// suffix := WORDS[word_idx]
+// if len(suffix) > 1 {
+// prefix := suffix[:len(suffix)/2]
+// suffix = suffix[len(suffix)/2:]
+// equal := reflect.DeepEqual(prefix, suffix)
+// // check existing suffixes
+// if g.MatchPrefix(suffix) { // suffix has to be equal to prefix
+// if !equal {
+// t.Fatalf("MatchPrefix(suffix) expected match: prefix is unequal to suffix %v != %v, full slice %v\n", prefix, suffix, WORDS[word_idx])
+// }
+// } else { // suffix has not to be the same as prefix
+// if equal {
+// t.Fatalf("MatchPrefix(suffix) expected unmatch: prefix is equal to suffix %v != %v, full slice %v\n", prefix, suffix, WORDS[word_idx])
+// }
+// }
+
+// if len(suffix) > 0 {
+// suffix[0]++
+// if g.MatchPrefix(suffix) && reflect.DeepEqual(prefix, suffix) {
+// t.Fatalf("MatchPrefix(suffix) not expected match: prefix is unequal to suffix %v != %v, full slice %v\n", prefix, suffix, WORDS[word_idx])
+// }
+// }
+
+// g.Skip()
+// } else {
+// ok, _ := g.Match(suffix)
+// if !ok {
+// t.Fatal("Match(suffix): expected match suffix")
+// }
+// }
+// word_idx++
+// } else { // nil input
+// if !g.MatchPrefix(nil) {
+// t.Error("MatchPrefix(suffix): expected match with nil")
+// }
+// if g.MatchPrefix(notExpected) {
+// t.Error("MatchPrefix(suffix): not expected nil to match with []byte{2, 3, 4}")
+// }
+// ok, _ := g.Match(nil)
+// if !ok {
+// t.Errorf("Match(suffix): expected to match with nil")
+// }
+// }
+
+// input_idx++
+// }
+// }
diff --git a/erigon-lib/compress/parallel_compress.go b/erigon-lib/compress/parallel_compress.go
new file mode 100644
index 00000000000..552bfb37c1e
--- /dev/null
+++ b/erigon-lib/compress/parallel_compress.go
@@ -0,0 +1,987 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compress
+
+import (
+ "bufio"
+ "container/heap"
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/assert"
+ "github.com/ledgerwatch/erigon-lib/etl"
+ "github.com/ledgerwatch/erigon-lib/patricia"
+ "github.com/ledgerwatch/erigon-lib/sais"
+ "github.com/ledgerwatch/log/v3"
+ "golang.org/x/exp/slices"
+)
+
+// MinPatternScore is minimum score (per superstring) required to consider including pattern into the dictionary
+const MinPatternScore = 1024
+
+func optimiseCluster(trace bool, input []byte, mf2 *patricia.MatchFinder2, output []byte, uncovered []int, patterns []int, cellRing *Ring, posMap map[uint64]uint64) ([]byte, []int, []int) {
+ matches := mf2.FindLongestMatches(input)
+
+ if len(matches) == 0 {
+ output = append(output, 0) // Encoding of 0 in VarUint is 1 zero byte
+ output = append(output, input...)
+ return output, patterns, uncovered
+ }
+ if trace {
+ fmt.Printf("Cluster | input = %x\n", input)
+ for _, match := range matches {
+ fmt.Printf(" [%x %d-%d]", input[match.Start:match.End], match.Start, match.End)
+ }
+ }
+ cellRing.Reset()
+ patterns = append(patterns[:0], 0, 0) // Sentinel entry - no meaning
+ lastF := matches[len(matches)-1]
+ for j := lastF.Start; j < lastF.End; j++ {
+ d := cellRing.PushBack()
+ d.optimStart = j + 1
+ d.coverStart = len(input)
+ d.compression = 0
+ d.patternIdx = 0
+ d.score = 0
+ }
+ // Starting from the last match
+ for i := len(matches); i > 0; i-- {
+ f := matches[i-1]
+ p := f.Val.(*Pattern)
+ firstCell := cellRing.Get(0)
+ maxCompression := firstCell.compression
+ maxScore := firstCell.score
+ maxCell := firstCell
+ var maxInclude bool
+ for e := 0; e < cellRing.Len(); e++ {
+ cell := cellRing.Get(e)
+ comp := cell.compression - 4
+ if cell.coverStart >= f.End {
+ comp += f.End - f.Start
+ } else {
+ comp += cell.coverStart - f.Start
+ }
+ score := cell.score + p.score
+ if comp > maxCompression || (comp == maxCompression && score > maxScore) {
+ maxCompression = comp
+ maxScore = score
+ maxInclude = true
+ maxCell = cell
+ } else if cell.optimStart > f.End {
+ cellRing.Truncate(e)
+ break
+ }
+ }
+ d := cellRing.PushFront()
+ d.optimStart = f.Start
+ d.score = maxScore
+ d.compression = maxCompression
+ if maxInclude {
+ if trace {
+ fmt.Printf("[include] cell for %d: with patterns", f.Start)
+ fmt.Printf(" [%x %d-%d]", input[f.Start:f.End], f.Start, f.End)
+ patternIdx := maxCell.patternIdx
+ for patternIdx != 0 {
+ pattern := patterns[patternIdx]
+ fmt.Printf(" [%x %d-%d]", input[matches[pattern].Start:matches[pattern].End], matches[pattern].Start, matches[pattern].End)
+ patternIdx = patterns[patternIdx+1]
+ }
+ fmt.Printf("\n\n")
+ }
+ d.coverStart = f.Start
+ d.patternIdx = len(patterns)
+ patterns = append(patterns, i-1, maxCell.patternIdx)
+ } else {
+ if trace {
+ fmt.Printf("cell for %d: with patterns", f.Start)
+ patternIdx := maxCell.patternIdx
+ for patternIdx != 0 {
+ pattern := patterns[patternIdx]
+ fmt.Printf(" [%x %d-%d]", input[matches[pattern].Start:matches[pattern].End], matches[pattern].Start, matches[pattern].End)
+ patternIdx = patterns[patternIdx+1]
+ }
+ fmt.Printf("\n\n")
+ }
+ d.coverStart = maxCell.coverStart
+ d.patternIdx = maxCell.patternIdx
+ }
+ }
+ optimCell := cellRing.Get(0)
+ if trace {
+ fmt.Printf("optimal =")
+ }
+ // Count number of patterns
+ var patternCount uint64
+ patternIdx := optimCell.patternIdx
+ for patternIdx != 0 {
+ patternCount++
+ patternIdx = patterns[patternIdx+1]
+ }
+ var numBuf [binary.MaxVarintLen64]byte
+ p := binary.PutUvarint(numBuf[:], patternCount)
+ output = append(output, numBuf[:p]...)
+ patternIdx = optimCell.patternIdx
+ lastStart := 0
+ var lastUncovered int
+ uncovered = uncovered[:0]
+ for patternIdx != 0 {
+ pattern := patterns[patternIdx]
+ p := matches[pattern].Val.(*Pattern)
+ if trace {
+ fmt.Printf(" [%x %d-%d]", input[matches[pattern].Start:matches[pattern].End], matches[pattern].Start, matches[pattern].End)
+ }
+ if matches[pattern].Start > lastUncovered {
+ uncovered = append(uncovered, lastUncovered, matches[pattern].Start)
+ }
+ lastUncovered = matches[pattern].End
+ // Starting position
+ posMap[uint64(matches[pattern].Start-lastStart+1)]++
+ lastStart = matches[pattern].Start
+ n := binary.PutUvarint(numBuf[:], uint64(matches[pattern].Start))
+ output = append(output, numBuf[:n]...)
+ // Code
+ n = binary.PutUvarint(numBuf[:], p.code)
+ output = append(output, numBuf[:n]...)
+ atomic.AddUint64(&p.uses, 1)
+ patternIdx = patterns[patternIdx+1]
+ }
+ if len(input) > lastUncovered {
+ uncovered = append(uncovered, lastUncovered, len(input))
+ }
+ if trace {
+ fmt.Printf("\n\n")
+ }
+ // Add uncoded input
+ for i := 0; i < len(uncovered); i += 2 {
+ output = append(output, input[uncovered[i]:uncovered[i+1]]...)
+ }
+ return output, patterns, uncovered
+}
+
+func reduceDictWorker(trace bool, inputCh chan *CompressionWord, outCh chan *CompressionWord, completion *sync.WaitGroup, trie *patricia.PatriciaTree, inputSize, outputSize *atomic.Uint64, posMap map[uint64]uint64) {
+ defer completion.Done()
+ var output = make([]byte, 0, 256)
+ var uncovered = make([]int, 256)
+ var patterns = make([]int, 0, 256)
+ cellRing := NewRing()
+ mf2 := patricia.NewMatchFinder2(trie)
+ var numBuf [binary.MaxVarintLen64]byte
+ for compW := range inputCh {
+ wordLen := uint64(len(compW.word))
+ n := binary.PutUvarint(numBuf[:], wordLen)
+ output = append(output[:0], numBuf[:n]...) // Prepend with the encoding of length
+ output, patterns, uncovered = optimiseCluster(trace, compW.word, mf2, output, uncovered, patterns, cellRing, posMap)
+ compW.word = append(compW.word[:0], output...)
+ outCh <- compW
+ inputSize.Add(1 + wordLen)
+ outputSize.Add(uint64(len(output)))
+ posMap[wordLen+1]++
+ posMap[0]++
+ }
+}
+
+// CompressionWord hold a word to be compressed (if flag is set), and the result of compression
+// To allow multiple words to be processed concurrently, order field is used to collect all
+// the words after processing without disrupting their order
+type CompressionWord struct {
+ word []byte
+ order uint64
+}
+
+type CompressionQueue []*CompressionWord
+
+func (cq CompressionQueue) Len() int {
+ return len(cq)
+}
+
+func (cq CompressionQueue) Less(i, j int) bool {
+ return cq[i].order < cq[j].order
+}
+
+func (cq *CompressionQueue) Swap(i, j int) {
+ (*cq)[i], (*cq)[j] = (*cq)[j], (*cq)[i]
+}
+
+func (cq *CompressionQueue) Push(x interface{}) {
+ *cq = append(*cq, x.(*CompressionWord))
+}
+
+func (cq *CompressionQueue) Pop() interface{} {
+ old := *cq
+ n := len(old)
+ x := old[n-1]
+ old[n-1] = nil
+ *cq = old[0 : n-1]
+ return x
+}
+
+// reduceDict reduces the dictionary by trying the substitutions and counting frequency for each word
+func reducedict(ctx context.Context, trace bool, logPrefix, segmentFilePath string, cf *os.File, datFile *DecompressedFile, workers int, dictBuilder *DictionaryBuilder, lvl log.Lvl, logger log.Logger) error {
+ logEvery := time.NewTicker(60 * time.Second)
+ defer logEvery.Stop()
+
+ // DictionaryBuilder is for sorting words by their freuency (to assign codes)
+ var pt patricia.PatriciaTree
+ code2pattern := make([]*Pattern, 0, 256)
+ dictBuilder.ForEach(func(score uint64, word []byte) {
+ p := &Pattern{
+ score: score,
+ uses: 0,
+ code: uint64(len(code2pattern)),
+ codeBits: 0,
+ word: word,
+ }
+ pt.Insert(word, p)
+ code2pattern = append(code2pattern, p)
+ })
+ dictBuilder.Close()
+ if lvl < log.LvlTrace {
+ logger.Log(lvl, fmt.Sprintf("[%s] dictionary file parsed", logPrefix), "entries", len(code2pattern))
+ }
+ ch := make(chan *CompressionWord, 10_000)
+ inputSize, outputSize := &atomic.Uint64{}, &atomic.Uint64{}
+
+ var collectors []*etl.Collector
+ defer func() {
+ for _, c := range collectors {
+ c.Close()
+ }
+ }()
+ out := make(chan *CompressionWord, 1024)
+ var compressionQueue CompressionQueue
+ heap.Init(&compressionQueue)
+ queueLimit := 128 * 1024
+
+ // For the case of workers == 1
+ var output = make([]byte, 0, 256)
+ var uncovered = make([]int, 256)
+ var patterns = make([]int, 0, 256)
+ cellRing := NewRing()
+ mf2 := patricia.NewMatchFinder2(&pt)
+
+ var posMaps []map[uint64]uint64
+ uncompPosMap := make(map[uint64]uint64) // For the uncompressed words
+ posMaps = append(posMaps, uncompPosMap)
+ var wg sync.WaitGroup
+ if workers > 1 {
+ for i := 0; i < workers; i++ {
+ posMap := make(map[uint64]uint64)
+ posMaps = append(posMaps, posMap)
+ wg.Add(1)
+ go reduceDictWorker(trace, ch, out, &wg, &pt, inputSize, outputSize, posMap)
+ }
+ }
+ t := time.Now()
+
+ var err error
+ intermediatePath := segmentFilePath + ".tmp"
+ defer os.Remove(intermediatePath)
+ var intermediateFile *os.File
+ if intermediateFile, err = os.Create(intermediatePath); err != nil {
+ return fmt.Errorf("create intermediate file: %w", err)
+ }
+ defer intermediateFile.Close()
+ intermediateW := bufio.NewWriterSize(intermediateFile, 8*etl.BufIOSize)
+
+ var inCount, outCount, emptyWordsCount uint64 // Counters words sent to compression and returned for compression
+ var numBuf [binary.MaxVarintLen64]byte
+ totalWords := datFile.count
+
+ if err = datFile.ForEach(func(v []byte, compression bool) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ if workers > 1 {
+ // take processed words in non-blocking way and push them to the queue
+ outer:
+ for {
+ select {
+ case compW := <-out:
+ heap.Push(&compressionQueue, compW)
+ default:
+ break outer
+ }
+ }
+ // take processed words in blocking way until either:
+ // 1. compressionQueue is below the limit so that new words can be allocated
+ // 2. there is word in order on top of the queue which can be written down and reused
+ for compressionQueue.Len() >= queueLimit && compressionQueue[0].order < outCount {
+ // Blocking wait to receive some outputs until the top of queue can be processed
+ compW := <-out
+ heap.Push(&compressionQueue, compW)
+ }
+ var compW *CompressionWord
+ // Either take the word from the top, write it down and reuse for the next unprocessed word
+ // Or allocate new word
+ if compressionQueue.Len() > 0 && compressionQueue[0].order == outCount {
+ compW = heap.Pop(&compressionQueue).(*CompressionWord)
+ outCount++
+ // Write to intermediate file
+ if _, e := intermediateW.Write(compW.word); e != nil {
+ return e
+ }
+ // Reuse compW for the next word
+ } else {
+ compW = &CompressionWord{}
+ }
+ compW.order = inCount
+ if len(v) == 0 {
+ // Empty word, cannot be compressed
+ compW.word = append(compW.word[:0], 0)
+ uncompPosMap[1]++
+ uncompPosMap[0]++
+ heap.Push(&compressionQueue, compW) // Push to the queue directly, bypassing compression
+ } else if compression {
+ compW.word = append(compW.word[:0], v...)
+ ch <- compW // Send for compression
+ } else {
+ // Prepend word with encoding of length + zero byte, which indicates no patterns to be found in this word
+ wordLen := uint64(len(v))
+ n := binary.PutUvarint(numBuf[:], wordLen)
+ uncompPosMap[wordLen+1]++
+ uncompPosMap[0]++
+ compW.word = append(append(append(compW.word[:0], numBuf[:n]...), 0), v...)
+ heap.Push(&compressionQueue, compW) // Push to the queue directly, bypassing compression
+ }
+ } else {
+ outCount++
+ wordLen := uint64(len(v))
+ n := binary.PutUvarint(numBuf[:], wordLen)
+ if _, e := intermediateW.Write(numBuf[:n]); e != nil {
+ return e
+ }
+ if wordLen > 0 {
+ if compression {
+ output, patterns, uncovered = optimiseCluster(trace, v, mf2, output[:0], uncovered, patterns, cellRing, uncompPosMap)
+ if _, e := intermediateW.Write(output); e != nil {
+ return e
+ }
+ outputSize.Add(uint64(len(output)))
+ } else {
+ if e := intermediateW.WriteByte(0); e != nil {
+ return e
+ }
+ if _, e := intermediateW.Write(v); e != nil {
+ return e
+ }
+ outputSize.Add(1 + uint64(len(v)))
+ }
+ }
+ inputSize.Add(1 + wordLen)
+ uncompPosMap[wordLen+1]++
+ uncompPosMap[0]++
+ }
+ inCount++
+ if len(v) == 0 {
+ emptyWordsCount++
+ }
+
+ select {
+ case <-logEvery.C:
+ if lvl < log.LvlTrace {
+ logger.Log(lvl, fmt.Sprintf("[%s] Replacement preprocessing", logPrefix), "processed", fmt.Sprintf("%.2f%%", 100*float64(outCount)/float64(totalWords)), "ch", len(ch), "workers", workers)
+ }
+ default:
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+ close(ch)
+ // Drain the out queue if necessary
+ if inCount > outCount {
+ for compressionQueue.Len() > 0 && compressionQueue[0].order == outCount {
+ compW := heap.Pop(&compressionQueue).(*CompressionWord)
+ outCount++
+ if outCount == inCount {
+ close(out)
+ }
+ // Write to intermediate file
+ if _, e := intermediateW.Write(compW.word); e != nil {
+ return e
+ }
+ }
+ for compW := range out {
+ heap.Push(&compressionQueue, compW)
+ for compressionQueue.Len() > 0 && compressionQueue[0].order == outCount {
+ compW = heap.Pop(&compressionQueue).(*CompressionWord)
+ outCount++
+ if outCount == inCount {
+ close(out)
+ }
+ // Write to intermediate file
+ if _, e := intermediateW.Write(compW.word); e != nil {
+ return e
+ }
+ }
+ }
+ }
+ if err = intermediateW.Flush(); err != nil {
+ return err
+ }
+ wg.Wait()
+ if lvl < log.LvlTrace {
+ log.Log(lvl, fmt.Sprintf("[%s] Replacement preprocessing", logPrefix), "took", time.Since(t))
+ }
+ if _, err = intermediateFile.Seek(0, 0); err != nil {
+ return fmt.Errorf("return to the start of intermediate file: %w", err)
+ }
+
+ //var m runtime.MemStats
+ //common.ReadMemStats(&m)
+ //logger.Info(fmt.Sprintf("[%s] Dictionary build done", logPrefix), "input", common.ByteCount(inputSize.Load()), "output", common.ByteCount(outputSize.Load()), "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys))
+ posMap := make(map[uint64]uint64)
+ for _, m := range posMaps {
+ for l, c := range m {
+ posMap[l] += c
+ }
+ }
+ //fmt.Printf("posMap = %v\n", posMap)
+ var patternList PatternList
+ distribution := make([]int, maxPatternLen+1)
+ for _, p := range code2pattern {
+ if p.uses > 0 {
+ patternList = append(patternList, p)
+ distribution[len(p.word)]++
+ }
+ }
+ slices.SortFunc(patternList, patternListCmp)
+ logCtx := make([]interface{}, 0, 8)
+ logCtx = append(logCtx, "patternList.Len", patternList.Len())
+
+ i := 0
+ // Build Huffman tree for codes
+ var codeHeap PatternHeap
+ heap.Init(&codeHeap)
+ tieBreaker := uint64(0)
+ for codeHeap.Len()+(patternList.Len()-i) > 1 {
+ // New node
+ h := &PatternHuff{
+ tieBreaker: tieBreaker,
+ }
+ if codeHeap.Len() > 0 && (i >= patternList.Len() || codeHeap[0].uses < patternList[i].uses) {
+ // Take h0 from the heap
+ h.h0 = heap.Pop(&codeHeap).(*PatternHuff)
+ h.h0.AddZero()
+ h.uses += h.h0.uses
+ } else {
+ // Take p0 from the list
+ h.p0 = patternList[i]
+ h.p0.code = 0
+ h.p0.codeBits = 1
+ h.uses += h.p0.uses
+ i++
+ }
+ if codeHeap.Len() > 0 && (i >= patternList.Len() || codeHeap[0].uses < patternList[i].uses) {
+ // Take h1 from the heap
+ h.h1 = heap.Pop(&codeHeap).(*PatternHuff)
+ h.h1.AddOne()
+ h.uses += h.h1.uses
+ } else {
+ // Take p1 from the list
+ h.p1 = patternList[i]
+ h.p1.code = 1
+ h.p1.codeBits = 1
+ h.uses += h.p1.uses
+ i++
+ }
+ tieBreaker++
+ heap.Push(&codeHeap, h)
+ }
+ if codeHeap.Len() > 0 {
+ root := heap.Pop(&codeHeap).(*PatternHuff)
+ root.SetDepth(0)
+ }
+ // Calculate total size of the dictionary
+ var patternsSize uint64
+ for _, p := range patternList {
+ ns := binary.PutUvarint(numBuf[:], uint64(p.depth)) // Length of the word's depth
+ n := binary.PutUvarint(numBuf[:], uint64(len(p.word))) // Length of the word's length
+ patternsSize += uint64(ns + n + len(p.word))
+ }
+
+ logCtx = append(logCtx, "patternsSize", common.ByteCount(patternsSize))
+ for i, n := range distribution {
+ if n == 0 {
+ continue
+ }
+ logCtx = append(logCtx, strconv.Itoa(i), strconv.Itoa(n))
+ }
+ if lvl < log.LvlTrace {
+ logger.Log(lvl, fmt.Sprintf("[%s] Effective dictionary", logPrefix), logCtx...)
+ }
+ cw := bufio.NewWriterSize(cf, 2*etl.BufIOSize)
+ // 1-st, output amount of words - just a useful metadata
+ binary.BigEndian.PutUint64(numBuf[:], inCount) // Dictionary size
+ if _, err = cw.Write(numBuf[:8]); err != nil {
+ return err
+ }
+ binary.BigEndian.PutUint64(numBuf[:], emptyWordsCount)
+ if _, err = cw.Write(numBuf[:8]); err != nil {
+ return err
+ }
+ // 2-nd, output dictionary size
+ binary.BigEndian.PutUint64(numBuf[:], patternsSize) // Dictionary size
+ if _, err = cw.Write(numBuf[:8]); err != nil {
+ return err
+ }
+ //fmt.Printf("patternsSize = %d\n", patternsSize)
+ // Write all the pattens
+ slices.SortFunc(patternList, patternListCmp)
+ for _, p := range patternList {
+ ns := binary.PutUvarint(numBuf[:], uint64(p.depth))
+ if _, err = cw.Write(numBuf[:ns]); err != nil {
+ return err
+ }
+ n := binary.PutUvarint(numBuf[:], uint64(len(p.word)))
+ if _, err = cw.Write(numBuf[:n]); err != nil {
+ return err
+ }
+ if _, err = cw.Write(p.word); err != nil {
+ return err
+ }
+ //fmt.Printf("[comp] depth=%d, code=[%b], codeLen=%d pattern=[%x]\n", p.depth, p.code, p.codeBits, p.word)
+ }
+
+ var positionList PositionList
+ pos2code := make(map[uint64]*Position)
+ for pos, uses := range posMap {
+ p := &Position{pos: pos, uses: uses, code: pos, codeBits: 0}
+ positionList = append(positionList, p)
+ pos2code[pos] = p
+ }
+ slices.SortFunc(positionList, positionListCmp)
+ i = 0
+ // Build Huffman tree for codes
+ var posHeap PositionHeap
+ heap.Init(&posHeap)
+ tieBreaker = uint64(0)
+ for posHeap.Len()+(positionList.Len()-i) > 1 {
+ // New node
+ h := &PositionHuff{
+ tieBreaker: tieBreaker,
+ }
+ if posHeap.Len() > 0 && (i >= positionList.Len() || posHeap[0].uses < positionList[i].uses) {
+ // Take h0 from the heap
+ h.h0 = heap.Pop(&posHeap).(*PositionHuff)
+ h.h0.AddZero()
+ h.uses += h.h0.uses
+ } else {
+ // Take p0 from the list
+ h.p0 = positionList[i]
+ h.p0.code = 0
+ h.p0.codeBits = 1
+ h.uses += h.p0.uses
+ i++
+ }
+ if posHeap.Len() > 0 && (i >= positionList.Len() || posHeap[0].uses < positionList[i].uses) {
+ // Take h1 from the heap
+ h.h1 = heap.Pop(&posHeap).(*PositionHuff)
+ h.h1.AddOne()
+ h.uses += h.h1.uses
+ } else {
+ // Take p1 from the list
+ h.p1 = positionList[i]
+ h.p1.code = 1
+ h.p1.codeBits = 1
+ h.uses += h.p1.uses
+ i++
+ }
+ tieBreaker++
+ heap.Push(&posHeap, h)
+ }
+ if posHeap.Len() > 0 {
+ posRoot := heap.Pop(&posHeap).(*PositionHuff)
+ posRoot.SetDepth(0)
+ }
+ // Calculate the size of pos dictionary
+ var posSize uint64
+ for _, p := range positionList {
+ ns := binary.PutUvarint(numBuf[:], uint64(p.depth)) // Length of the position's depth
+ n := binary.PutUvarint(numBuf[:], p.pos)
+ posSize += uint64(ns + n)
+ }
+ // First, output dictionary size
+ binary.BigEndian.PutUint64(numBuf[:], posSize) // Dictionary size
+ if _, err = cw.Write(numBuf[:8]); err != nil {
+ return err
+ }
+ //fmt.Printf("posSize = %d\n", posSize)
+ // Write all the positions
+ slices.SortFunc(positionList, positionListCmp)
+ for _, p := range positionList {
+ ns := binary.PutUvarint(numBuf[:], uint64(p.depth))
+ if _, err = cw.Write(numBuf[:ns]); err != nil {
+ return err
+ }
+ n := binary.PutUvarint(numBuf[:], p.pos)
+ if _, err = cw.Write(numBuf[:n]); err != nil {
+ return err
+ }
+ //fmt.Printf("[comp] depth=%d, code=[%b], codeLen=%d pos=%d\n", p.depth, p.code, p.codeBits, p.pos)
+ }
+ if lvl < log.LvlTrace {
+ logger.Log(lvl, fmt.Sprintf("[%s] Positional dictionary", logPrefix), "positionList.len", positionList.Len(), "posSize", common.ByteCount(posSize))
+ }
+ // Re-encode all the words with the use of optimised (via Huffman coding) dictionaries
+ wc := 0
+ var hc HuffmanCoder
+ hc.w = cw
+ r := bufio.NewReaderSize(intermediateFile, 2*etl.BufIOSize)
+ var l uint64
+ var e error
+ for l, e = binary.ReadUvarint(r); e == nil; l, e = binary.ReadUvarint(r) {
+ posCode := pos2code[l+1]
+ if posCode != nil {
+ if e = hc.encode(posCode.code, posCode.codeBits); e != nil {
+ return e
+ }
+ }
+ if l == 0 {
+ if e = hc.flush(); e != nil {
+ return e
+ }
+ } else {
+ var pNum uint64 // Number of patterns
+ if pNum, e = binary.ReadUvarint(r); e != nil {
+ return e
+ }
+ // Now reading patterns one by one
+ var lastPos uint64
+ var lastUncovered int
+ var uncoveredCount int
+ for i := 0; i < int(pNum); i++ {
+ var pos uint64 // Starting position for pattern
+ if pos, e = binary.ReadUvarint(r); e != nil {
+ return e
+ }
+ posCode = pos2code[pos-lastPos+1]
+ lastPos = pos
+ if posCode != nil {
+ if e = hc.encode(posCode.code, posCode.codeBits); e != nil {
+ return e
+ }
+ }
+ var code uint64 // Code of the pattern
+ if code, e = binary.ReadUvarint(r); e != nil {
+ return e
+ }
+ patternCode := code2pattern[code]
+ if int(pos) > lastUncovered {
+ uncoveredCount += int(pos) - lastUncovered
+ }
+ lastUncovered = int(pos) + len(patternCode.word)
+ if patternCode != nil {
+ if e = hc.encode(patternCode.code, patternCode.codeBits); e != nil {
+ return e
+ }
+ }
+ }
+ if int(l) > lastUncovered {
+ uncoveredCount += int(l) - lastUncovered
+ }
+ // Terminating position and flush
+ posCode = pos2code[0]
+ if e = hc.encode(posCode.code, posCode.codeBits); e != nil {
+ return e
+ }
+ if e = hc.flush(); e != nil {
+ return e
+ }
+ // Copy uncovered characters
+ if uncoveredCount > 0 {
+ if _, e = io.CopyN(cw, r, int64(uncoveredCount)); e != nil {
+ return e
+ }
+ }
+ }
+ wc++
+ select {
+ case <-logEvery.C:
+ if lvl < log.LvlTrace {
+ logger.Log(lvl, fmt.Sprintf("[%s] Compressed", logPrefix), "processed", fmt.Sprintf("%.2f%%", 100*float64(wc)/float64(totalWords)))
+ }
+ default:
+ }
+ }
+ if e != nil && !errors.Is(e, io.EOF) {
+ return e
+ }
+ if err = intermediateFile.Close(); err != nil {
+ return err
+ }
+ if err = cw.Flush(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// processSuperstring is the worker that processes one superstring and puts results
+// into the collector, using lock to mutual exclusion. At the end (when the input channel is closed),
+// it notifies the waitgroup before exiting, so that the caller known when all work is done
+// No error channels for now
+func processSuperstring(ctx context.Context, superstringCh chan []byte, dictCollector *etl.Collector, minPatternScore uint64, completion *sync.WaitGroup, logger log.Logger) {
+ defer completion.Done()
+ dictVal := make([]byte, 8)
+ dictKey := make([]byte, maxPatternLen)
+ var lcp, sa, inv []int32
+ for superstring := range superstringCh {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+
+ if cap(sa) < len(superstring) {
+ sa = make([]int32, len(superstring))
+ } else {
+ sa = sa[:len(superstring)]
+ }
+ //log.Info("Superstring", "len", len(superstring))
+ //start := time.Now()
+ if err := sais.Sais(superstring, sa); err != nil {
+ panic(err)
+ }
+ //log.Info("Suffix array built", "in", time.Since(start))
+ // filter out suffixes that start with odd positions
+ n := len(sa) / 2
+ filtered := sa[:n]
+ //filtered := make([]int32, n)
+ var j int
+ for i := 0; i < len(sa); i++ {
+ if sa[i]&1 == 0 {
+ filtered[j] = sa[i] >> 1
+ j++
+ }
+ }
+ // Now create an inverted array
+ if cap(inv) < n {
+ inv = make([]int32, n)
+ } else {
+ inv = inv[:n]
+ }
+ for i := 0; i < n; i++ {
+ inv[filtered[i]] = int32(i)
+ }
+ //logger.Info("Inverted array done")
+ var k int
+ // Process all suffixes one by one starting from
+ // first suffix in txt[]
+ if cap(lcp) < n {
+ lcp = make([]int32, n)
+ } else {
+ lcp = lcp[:n]
+ }
+ for i := 0; i < n; i++ {
+ /* If the current suffix is at n-1, then we don’t
+ have next substring to consider. So lcp is not
+ defined for this substring, we put zero. */
+ if inv[i] == int32(n-1) {
+ k = 0
+ continue
+ }
+
+ /* j contains index of the next substring to
+ be considered to compare with the present
+ substring, i.e., next string in suffix array */
+ j := int(filtered[inv[i]+1])
+
+ // Directly start matching from k'th index as
+ // at-least k-1 characters will match
+ for i+k < n && j+k < n && superstring[(i+k)*2] != 0 && superstring[(j+k)*2] != 0 && superstring[(i+k)*2+1] == superstring[(j+k)*2+1] {
+ k++
+ }
+ lcp[inv[i]] = int32(k) // lcp for the present suffix.
+
+ // Deleting the starting character from the string.
+ if k > 0 {
+ k--
+ }
+ }
+ //log.Info("Kasai algorithm finished")
+ // Checking LCP array
+
+ if assert.Enable {
+ for i := 0; i < n-1; i++ {
+ var prefixLen int
+ p1 := int(filtered[i])
+ p2 := int(filtered[i+1])
+ for p1+prefixLen < n &&
+ p2+prefixLen < n &&
+ superstring[(p1+prefixLen)*2] != 0 &&
+ superstring[(p2+prefixLen)*2] != 0 &&
+ superstring[(p1+prefixLen)*2+1] == superstring[(p2+prefixLen)*2+1] {
+ prefixLen++
+ }
+ if prefixLen != int(lcp[i]) {
+ logger.Error("Mismatch", "prefixLen", prefixLen, "lcp[i]", lcp[i], "i", i)
+ break
+ }
+ l := int(lcp[i]) // Length of potential dictionary word
+ if l < 2 {
+ continue
+ }
+ }
+ }
+ //logger.Info("LCP array checked")
+ // Walk over LCP array and compute the scores of the strings
+ var b = inv
+ j = 0
+ for i := 0; i < n-1; i++ {
+ // Only when there is a drop in LCP value
+ if lcp[i+1] >= lcp[i] {
+ j = i
+ continue
+ }
+ prevSkipped := false
+ for l := int(lcp[i]); l > int(lcp[i+1]) && l >= minPatternLen; l-- {
+ if l > maxPatternLen ||
+ l > 20 && (l&(l-1)) != 0 { // is power of 2
+ prevSkipped = true
+ continue
+ }
+
+ // Go back
+ var isNew bool
+ for j > 0 && int(lcp[j-1]) >= l {
+ j--
+ isNew = true
+ }
+
+ if !isNew && !prevSkipped {
+ break
+ }
+
+ window := i - j + 2
+ copy(b, filtered[j:i+2])
+ slices.Sort(b[:window])
+ repeats := 1
+ lastK := 0
+ for k := 1; k < window; k++ {
+ if b[k] >= b[lastK]+int32(l) {
+ repeats++
+ lastK = k
+ }
+ }
+
+ if (l < 8 || l > 64) && repeats < int(minPatternScore) {
+ prevSkipped = true
+ continue
+ }
+
+ score := uint64(repeats * (l))
+ if score < minPatternScore {
+ prevSkipped = true
+ continue
+ }
+
+ dictKey = dictKey[:l]
+ for s := 0; s < l; s++ {
+ dictKey[s] = superstring[(int(filtered[i])+s)*2+1]
+ }
+ binary.BigEndian.PutUint64(dictVal, score)
+ if err := dictCollector.Collect(dictKey, dictVal); err != nil {
+ logger.Error("processSuperstring", "collect", err)
+ }
+ prevSkipped = false //nolint
+ break
+ }
+ }
+ }
+}
+
+func DictionaryBuilderFromCollectors(ctx context.Context, logPrefix, tmpDir string, collectors []*etl.Collector, lvl log.Lvl, logger log.Logger) (*DictionaryBuilder, error) {
+ dictCollector := etl.NewCollector(logPrefix+"_collectDict", tmpDir, etl.NewSortableBuffer(etl.BufferOptimalSize), logger)
+ defer dictCollector.Close()
+ dictCollector.LogLvl(lvl)
+
+ dictAggregator := &DictAggregator{collector: dictCollector, dist: map[int]int{}}
+ for _, collector := range collectors {
+ if err := collector.Load(nil, "", dictAggregator.aggLoadFunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil {
+ return nil, err
+ }
+ collector.Close()
+ }
+ if err := dictAggregator.finish(); err != nil {
+ return nil, err
+ }
+ db := &DictionaryBuilder{limit: maxDictPatterns} // Only collect 1m words with highest scores
+ if err := dictCollector.Load(nil, "", db.loadFunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil {
+ return nil, err
+ }
+ db.finish()
+
+ db.Sort()
+ return db, nil
+}
+
+func PersistDictrionary(fileName string, db *DictionaryBuilder) error {
+ df, err := os.Create(fileName)
+ if err != nil {
+ return err
+ }
+ w := bufio.NewWriterSize(df, 2*etl.BufIOSize)
+ db.ForEach(func(score uint64, word []byte) { fmt.Fprintf(w, "%d %x\n", score, word) })
+ if err = w.Flush(); err != nil {
+ return err
+ }
+ if err := df.Sync(); err != nil {
+ return err
+ }
+ return df.Close()
+}
+
+func ReadSimpleFile(fileName string, walker func(v []byte) error) error {
+ // Read keys from the file and generate superstring (with extra byte 0x1 prepended to each character, and with 0x0 0x0 pair inserted between keys and values)
+ // We only consider values with length > 2, because smaller values are not compressible without going into bits
+ f, err := os.Open(fileName)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ r := bufio.NewReaderSize(f, etl.BufIOSize)
+ buf := make([]byte, 4096)
+ for l, e := binary.ReadUvarint(r); ; l, e = binary.ReadUvarint(r) {
+ if e != nil {
+ if errors.Is(e, io.EOF) {
+ break
+ }
+ return e
+ }
+ if len(buf) < int(l) {
+ buf = make([]byte, l)
+ }
+ if _, e = io.ReadFull(r, buf[:l]); e != nil {
+ return e
+ }
+ if err := walker(buf[:l]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/crypto/blake2b/blake2b.go b/erigon-lib/crypto/blake2b/blake2b.go
similarity index 100%
rename from crypto/blake2b/blake2b.go
rename to erigon-lib/crypto/blake2b/blake2b.go
diff --git a/crypto/blake2b/blake2bAVX2_amd64.go b/erigon-lib/crypto/blake2b/blake2bAVX2_amd64.go
similarity index 100%
rename from crypto/blake2b/blake2bAVX2_amd64.go
rename to erigon-lib/crypto/blake2b/blake2bAVX2_amd64.go
diff --git a/crypto/blake2b/blake2bAVX2_amd64.s b/erigon-lib/crypto/blake2b/blake2bAVX2_amd64.s
similarity index 100%
rename from crypto/blake2b/blake2bAVX2_amd64.s
rename to erigon-lib/crypto/blake2b/blake2bAVX2_amd64.s
diff --git a/crypto/blake2b/blake2b_amd64.go b/erigon-lib/crypto/blake2b/blake2b_amd64.go
similarity index 100%
rename from crypto/blake2b/blake2b_amd64.go
rename to erigon-lib/crypto/blake2b/blake2b_amd64.go
diff --git a/crypto/blake2b/blake2b_amd64.s b/erigon-lib/crypto/blake2b/blake2b_amd64.s
similarity index 100%
rename from crypto/blake2b/blake2b_amd64.s
rename to erigon-lib/crypto/blake2b/blake2b_amd64.s
diff --git a/crypto/blake2b/blake2b_f_fuzz.go b/erigon-lib/crypto/blake2b/blake2b_f_fuzz.go
similarity index 100%
rename from crypto/blake2b/blake2b_f_fuzz.go
rename to erigon-lib/crypto/blake2b/blake2b_f_fuzz.go
diff --git a/crypto/blake2b/blake2b_f_test.go b/erigon-lib/crypto/blake2b/blake2b_f_test.go
similarity index 100%
rename from crypto/blake2b/blake2b_f_test.go
rename to erigon-lib/crypto/blake2b/blake2b_f_test.go
diff --git a/crypto/blake2b/blake2b_generic.go b/erigon-lib/crypto/blake2b/blake2b_generic.go
similarity index 100%
rename from crypto/blake2b/blake2b_generic.go
rename to erigon-lib/crypto/blake2b/blake2b_generic.go
diff --git a/crypto/blake2b/blake2b_ref.go b/erigon-lib/crypto/blake2b/blake2b_ref.go
similarity index 100%
rename from crypto/blake2b/blake2b_ref.go
rename to erigon-lib/crypto/blake2b/blake2b_ref.go
diff --git a/crypto/blake2b/blake2b_test.go b/erigon-lib/crypto/blake2b/blake2b_test.go
similarity index 99%
rename from crypto/blake2b/blake2b_test.go
rename to erigon-lib/crypto/blake2b/blake2b_test.go
index 14f2e3bb73d..dae10de1e55 100644
--- a/crypto/blake2b/blake2b_test.go
+++ b/erigon-lib/crypto/blake2b/blake2b_test.go
@@ -8,14 +8,17 @@ import (
"bytes"
"encoding"
"encoding/hex"
+ "errors"
"fmt"
- "github.com/ledgerwatch/log/v3"
"hash"
"io"
"testing"
+
+ "github.com/ledgerwatch/log/v3"
)
func TestHashes(t *testing.T) {
+ t.Helper()
defer func(sse4, avx, avx2 bool) {
useSSE4, useAVX, useAVX2 = sse4, avx, avx2
}(useSSE4, useAVX, useAVX2)
@@ -40,6 +43,7 @@ func TestHashes(t *testing.T) {
}
func TestHashes2X(t *testing.T) {
+ t.Helper()
defer func(sse4, avx, avx2 bool) {
useSSE4, useAVX, useAVX2 = sse4, avx, avx2
}(useSSE4, useAVX, useAVX2)
@@ -116,6 +120,7 @@ func TestMarshal(t *testing.T) {
}
func testHashes(t *testing.T) {
+ t.Helper()
key, _ := hex.DecodeString("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f")
input := make([]byte, 255)
@@ -149,6 +154,7 @@ func testHashes(t *testing.T) {
}
func testHashes2X(t *testing.T) {
+ t.Helper()
key, _ := hex.DecodeString("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f")
input := make([]byte, 256)
@@ -171,7 +177,7 @@ func testHashes2X(t *testing.T) {
if _, err := h.Read(sum); err != nil {
t.Fatalf("#%d (single write): error from Read: %v", i, err)
}
- if n, err := h.Read(sum); n != 0 || err != io.EOF {
+ if n, err := h.Read(sum); n != 0 || !errors.Is(err, io.EOF) {
t.Fatalf("#%d (single write): Read did not return (0, io.EOF) after exhaustion, got (%v, %v)", i, n, err)
}
if gotHex := fmt.Sprintf("%x", sum); gotHex != expectedHex {
@@ -300,6 +306,7 @@ func TestSelfTest(t *testing.T) {
// Benchmarks
func benchmarkSum(b *testing.B, size int, sse4, avx, avx2 bool) {
+ b.Helper()
// Enable the correct set of instructions
defer func(sse4, avx, avx2 bool) {
useSSE4, useAVX, useAVX2 = sse4, avx, avx2
@@ -315,6 +322,7 @@ func benchmarkSum(b *testing.B, size int, sse4, avx, avx2 bool) {
}
func benchmarkWrite(b *testing.B, size int, sse4, avx, avx2 bool) {
+ b.Helper()
// Enable the correct set of instructions
defer func(sse4, avx, avx2 bool) {
useSSE4, useAVX, useAVX2 = sse4, avx, avx2
diff --git a/crypto/blake2b/blake2x.go b/erigon-lib/crypto/blake2b/blake2x.go
similarity index 100%
rename from crypto/blake2b/blake2x.go
rename to erigon-lib/crypto/blake2b/blake2x.go
diff --git a/crypto/blake2b/register.go b/erigon-lib/crypto/blake2b/register.go
similarity index 100%
rename from crypto/blake2b/register.go
rename to erigon-lib/crypto/blake2b/register.go
diff --git a/erigon-lib/crypto/cryptopool/pool.go b/erigon-lib/crypto/cryptopool/pool.go
new file mode 100644
index 00000000000..3bd7ebc3827
--- /dev/null
+++ b/erigon-lib/crypto/cryptopool/pool.go
@@ -0,0 +1,21 @@
+package cryptopool
+
+import (
+ "hash"
+ "sync"
+
+ "golang.org/x/crypto/sha3"
+)
+
+var pool = sync.Pool{
+ New: func() interface{} {
+ return sha3.NewLegacyKeccak256()
+ },
+}
+
+func GetLegacyKeccak256() hash.Hash {
+ h := pool.Get().(hash.Hash)
+ h.Reset()
+ return h
+}
+func ReturnLegacyKeccak256(h hash.Hash) { pool.Put(h) }
diff --git a/erigon-lib/crypto/kzg/kzg.go b/erigon-lib/crypto/kzg/kzg.go
new file mode 100644
index 00000000000..0008126a286
--- /dev/null
+++ b/erigon-lib/crypto/kzg/kzg.go
@@ -0,0 +1,124 @@
+package kzg
+
+import (
+ "crypto/sha256"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math/big"
+ "os"
+ "sync"
+
+ gokzg4844 "github.com/crate-crypto/go-kzg-4844"
+)
+
+const (
+ BlobCommitmentVersionKZG uint8 = 0x01
+ PrecompileInputLength int = 192
+)
+
+type VersionedHash [32]byte
+
+var (
+ errInvalidInputLength = errors.New("invalid input length")
+
+ // The value that gets returned when the `verify_kzg_proof“ precompile is called
+ precompileReturnValue [64]byte
+
+ trustedSetupFile string
+
+ gokzgCtx *gokzg4844.Context
+ initCryptoCtx sync.Once
+)
+
+func init() {
+ new(big.Int).SetUint64(gokzg4844.ScalarsPerBlob).FillBytes(precompileReturnValue[:32])
+ copy(precompileReturnValue[32:], gokzg4844.BlsModulus[:])
+}
+
+func SetTrustedSetupFilePath(path string) {
+ trustedSetupFile = path
+}
+
+// InitKZGCtx initializes the global context object returned via CryptoCtx
+func InitKZGCtx() {
+ initCryptoCtx.Do(func() {
+ if trustedSetupFile != "" {
+ file, err := os.ReadFile(trustedSetupFile)
+ if err != nil {
+ panic(fmt.Sprintf("could not read file, err: %v", err))
+ }
+
+ setup := new(gokzg4844.JSONTrustedSetup)
+ if err = json.Unmarshal(file, setup); err != nil {
+ panic(fmt.Sprintf("could not unmarshal, err: %v", err))
+ }
+
+ gokzgCtx, err = gokzg4844.NewContext4096(setup)
+ if err != nil {
+ panic(fmt.Sprintf("could not create KZG context, err: %v", err))
+ }
+ } else {
+ var err error
+ // Initialize context to match the configurations that the
+ // specs are using.
+ gokzgCtx, err = gokzg4844.NewContext4096Secure()
+ if err != nil {
+ panic(fmt.Sprintf("could not create context, err : %v", err))
+ }
+ }
+ })
+}
+
+// Ctx returns a context object that stores all of the necessary configurations to allow one to
+// create and verify blob proofs. This function is expensive to run if the crypto context isn't
+// initialized, so production services should pre-initialize by calling InitKZGCtx.
+func Ctx() *gokzg4844.Context {
+ InitKZGCtx()
+ return gokzgCtx
+}
+
+// KZGToVersionedHash implements kzg_to_versioned_hash from EIP-4844
+func KZGToVersionedHash(kzg gokzg4844.KZGCommitment) VersionedHash {
+ h := sha256.Sum256(kzg[:])
+ h[0] = BlobCommitmentVersionKZG
+
+ return VersionedHash(h)
+}
+
+// PointEvaluationPrecompile implements point_evaluation_precompile from EIP-4844
+func PointEvaluationPrecompile(input []byte) ([]byte, error) {
+ if len(input) != PrecompileInputLength {
+ return nil, errInvalidInputLength
+ }
+ // versioned hash: first 32 bytes
+ var versionedHash [32]byte
+ copy(versionedHash[:], input[:32])
+
+ var x, y [32]byte
+ // Evaluation point: next 32 bytes
+ copy(x[:], input[32:64])
+ // Expected output: next 32 bytes
+ copy(y[:], input[64:96])
+
+ // input kzg point: next 48 bytes
+ var dataKZG [48]byte
+ copy(dataKZG[:], input[96:144])
+ if KZGToVersionedHash(dataKZG) != versionedHash {
+ return nil, errors.New("mismatched versioned hash")
+ }
+
+ // Quotient kzg: next 48 bytes
+ var quotientKZG [48]byte
+ copy(quotientKZG[:], input[144:PrecompileInputLength])
+
+ cryptoCtx := Ctx()
+ err := cryptoCtx.VerifyKZGProof(dataKZG, x, y, quotientKZG)
+ if err != nil {
+ return nil, fmt.Errorf("verify_kzg_proof error: %w", err)
+ }
+
+ result := precompileReturnValue // copy the value
+
+ return result[:], nil
+}
diff --git a/erigon-lib/crypto/secp256k1.go b/erigon-lib/crypto/secp256k1.go
new file mode 100644
index 00000000000..3e8f6dc2ff0
--- /dev/null
+++ b/erigon-lib/crypto/secp256k1.go
@@ -0,0 +1,42 @@
+/*
+ Copyright 2022 The Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package crypto
+
+import (
+ "github.com/holiman/uint256"
+
+ "github.com/ledgerwatch/erigon-lib/common/hexutility"
+)
+
+var (
+ secp256k1N = new(uint256.Int).SetBytes(hexutility.MustDecodeHex("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141"))
+ secp256k1halfN = new(uint256.Int).Rsh(secp256k1N, 1)
+)
+
+// See Appendix F "Signing Transactions" of the Yellow Paper
+func TransactionSignatureIsValid(v byte, r, s *uint256.Int, allowPreEip2s bool) bool {
+ if r.IsZero() || s.IsZero() {
+ return false
+ }
+
+ // See EIP-2: Homestead Hard-fork Changes
+ if !allowPreEip2s && s.Gt(secp256k1halfN) {
+ return false
+ }
+
+ return r.Lt(secp256k1N) && s.Lt(secp256k1N) && (v == 0 || v == 1)
+}
diff --git a/erigon-lib/diagnostics/entities.go b/erigon-lib/diagnostics/entities.go
new file mode 100644
index 00000000000..b8bc8c8328c
--- /dev/null
+++ b/erigon-lib/diagnostics/entities.go
@@ -0,0 +1,64 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package diagnostics
+
+type PeerStatisticsGetter interface {
+ GetPeersStatistics() map[string]*PeerStatistics
+}
+
+type PeerStatistics struct {
+ BytesIn uint64
+ BytesOut uint64
+ CapBytesIn map[string]uint64
+ CapBytesOut map[string]uint64
+ TypeBytesIn map[string]uint64
+ TypeBytesOut map[string]uint64
+}
+
+type SnapshotDownloadStatistics struct {
+ Downloaded uint64 `json:"downloaded"`
+ Total uint64 `json:"total"`
+ TotalTime float64 `json:"totalTime"`
+ DownloadRate uint64 `json:"downloadRate"`
+ UploadRate uint64 `json:"uploadRate"`
+ Peers int32 `json:"peers"`
+ Files int32 `json:"files"`
+ Connections uint64 `json:"connections"`
+ Alloc uint64 `json:"alloc"`
+ Sys uint64 `json:"sys"`
+ DownloadFinished bool `json:"downloadFinished"`
+ Segments map[string]SegmentDownloadStatistics `json:"segments"`
+ TorrentMetadataReady int32 `json:"torrentMetadataReady"`
+}
+
+type SegmentDownloadStatistics struct {
+ Name string `json:"name"`
+ TotalBytes uint64 `json:"totalBytes"`
+ DownloadedBytes uint64 `json:"downloadedBytes"`
+ WebseedsCount int `json:"webseedsCount"`
+ PeersCount int `json:"peersCount"`
+ WebseedsRate uint64 `json:"webseedsRate"`
+ PeersRate uint64 `json:"peersRate"`
+}
+
+func (ti SnapshotDownloadStatistics) Type() Type {
+ return TypeOf(ti)
+}
+
+func (ti SegmentDownloadStatistics) Type() Type {
+ return TypeOf(ti)
+}
diff --git a/erigon-lib/diagnostics/network.go b/erigon-lib/diagnostics/network.go
new file mode 100644
index 00000000000..7436a4b9166
--- /dev/null
+++ b/erigon-lib/diagnostics/network.go
@@ -0,0 +1,21 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package diagnostics
+
+func (p PeerStatistics) Type() Type {
+ return TypeOf(p)
+}
diff --git a/erigon-lib/diagnostics/provider.go b/erigon-lib/diagnostics/provider.go
new file mode 100644
index 00000000000..1ff14ab4b03
--- /dev/null
+++ b/erigon-lib/diagnostics/provider.go
@@ -0,0 +1,191 @@
+package diagnostics
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "reflect"
+ "sync"
+ "sync/atomic"
+
+ "github.com/ledgerwatch/erigon-lib/common/dbg"
+ "github.com/ledgerwatch/log/v3"
+)
+
+type ctxKey int
+
+const (
+ ckChan ctxKey = iota
+)
+
+type Type interface {
+ reflect.Type
+ Context() context.Context
+ Err() error
+ Enabled() bool
+}
+
+type diagType struct {
+ reflect.Type
+}
+
+var cancelled = func() context.Context {
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+ return ctx
+}()
+
+func (t diagType) Context() context.Context {
+ providerMutex.Lock()
+ defer providerMutex.Unlock()
+ if reg := providers[t]; reg != nil {
+ return reg.context
+ }
+
+ return cancelled
+}
+
+func (t diagType) Err() error {
+ return t.Context().Err()
+}
+
+func (t diagType) Enabled() bool {
+ return t.Err() == nil
+}
+
+type Info interface {
+ Type() Type
+}
+
+func TypeOf(i Info) Type {
+ t := reflect.TypeOf(i)
+ return diagType{t}
+}
+
+type Provider interface {
+ StartDiagnostics(ctx context.Context) error
+}
+
+type ProviderFunc func(ctx context.Context) error
+
+func (f ProviderFunc) StartDiagnostics(ctx context.Context) error {
+ return f(ctx)
+}
+
+type registry struct {
+ context context.Context
+ providers []Provider
+}
+
+var providers = map[Type]*registry{}
+var providerMutex sync.RWMutex
+
+func RegisterProvider(provider Provider, infoType Type, logger log.Logger) {
+ providerMutex.Lock()
+ defer providerMutex.Unlock()
+
+ reg := providers[infoType]
+
+ if reg != nil {
+ for _, p := range reg.providers {
+ if p == provider {
+ return
+ }
+ }
+ } else {
+ reg = ®istry{}
+ providers[infoType] = reg
+ }
+
+ reg.providers = append(reg.providers, provider)
+
+ if reg.context != nil {
+ go startProvider(reg.context, infoType, provider, logger)
+ }
+}
+
+func StartProviders(ctx context.Context, infoType Type, logger log.Logger) {
+ providerMutex.Lock()
+
+ reg := providers[infoType]
+ if reg == nil {
+ reg = ®istry{}
+ providers[infoType] = reg
+ }
+
+ toStart := make([]Provider, len(reg.providers))
+ copy(toStart, reg.providers)
+
+ reg.context = ctx
+
+ providerMutex.Unlock()
+
+ for _, provider := range toStart {
+ go startProvider(ctx, infoType, provider, logger)
+ }
+}
+
+func startProvider(ctx context.Context, infoType Type, provider Provider, logger log.Logger) {
+ defer func() {
+ if rec := recover(); rec != nil {
+ err := fmt.Errorf("%+v, trace: %s", rec, dbg.Stack())
+ logger.Warn("Diagnostic provider failed", "type", infoType, "err", err)
+ }
+ }()
+
+ if err := provider.StartDiagnostics(ctx); err != nil {
+ if !errors.Is(err, context.Canceled) {
+ logger.Warn("Diagnostic provider failed", "type", infoType, "err", err)
+ }
+ }
+}
+
+func Send[I Info](info I) error {
+ ctx := info.Type().Context()
+
+ if ctx.Err() != nil {
+ if !errors.Is(ctx.Err(), context.Canceled) {
+ // drop the diagnostic message if there is
+ // no active diagnostic context for the type
+ return nil
+ }
+
+ return ctx.Err()
+ }
+
+ cval := ctx.Value(ckChan)
+
+ if cp, ok := cval.(*atomic.Pointer[chan I]); ok {
+ if c := (*cp).Load(); c != nil {
+ select {
+ case *c <- info:
+ default:
+ // drop the diagnostic message if the receiver is busy
+ // so the sender is not blocked on non critcal actions
+ }
+ }
+ } else {
+ return fmt.Errorf("unexpected channel type: %T", cval)
+ }
+
+ return nil
+}
+
+func Context[I Info](ctx context.Context, buffer int) (context.Context, <-chan I, context.CancelFunc) {
+ c := make(chan I, buffer)
+ cp := atomic.Pointer[chan I]{}
+ cp.Store(&c)
+
+ ctx = context.WithValue(ctx, ckChan, &cp)
+ ctx, cancel := context.WithCancel(ctx)
+
+ return ctx, *cp.Load(), func() {
+ cancel()
+
+ if cp.CompareAndSwap(&c, nil) {
+ ch := c
+ c = nil
+ close(ch)
+ }
+ }
+}
diff --git a/erigon-lib/diagnostics/provider_test.go b/erigon-lib/diagnostics/provider_test.go
new file mode 100644
index 00000000000..b5f2fefc7f4
--- /dev/null
+++ b/erigon-lib/diagnostics/provider_test.go
@@ -0,0 +1,106 @@
+package diagnostics_test
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/ledgerwatch/erigon-lib/diagnostics"
+ "github.com/ledgerwatch/log/v3"
+)
+
+type testInfo struct {
+ count int
+}
+
+func (ti testInfo) Type() diagnostics.Type {
+ return diagnostics.TypeOf(ti)
+}
+
+type testProvider struct {
+}
+
+func (t *testProvider) StartDiagnostics(ctx context.Context) error {
+ timer := time.NewTicker(1 * time.Second)
+ defer timer.Stop()
+
+ var count int
+
+ for {
+ select {
+ case <-ctx.Done():
+ return nil
+ case <-timer.C:
+ diagnostics.Send(testInfo{count})
+ count++
+ }
+ }
+}
+
+func TestProviderRegistration(t *testing.T) {
+
+ // diagnostics provider
+ provider := &testProvider{}
+ diagnostics.RegisterProvider(provider, diagnostics.TypeOf(testInfo{}), log.Root())
+
+ // diagnostics receiver
+ ctx, ch, cancel := diagnostics.Context[testInfo](context.Background(), 1)
+ diagnostics.StartProviders(ctx, diagnostics.TypeOf(testInfo{}), log.Root())
+
+ for info := range ch {
+ if info.count == 3 {
+ cancel()
+ }
+ }
+}
+
+func TestDelayedProviderRegistration(t *testing.T) {
+
+ time.AfterFunc(1*time.Second, func() {
+ // diagnostics provider
+ provider := &testProvider{}
+ diagnostics.RegisterProvider(provider, diagnostics.TypeOf(testInfo{}), log.Root())
+ })
+
+ // diagnostics receiver
+ ctx, ch, cancel := diagnostics.Context[testInfo](context.Background(), 1)
+ diagnostics.StartProviders(ctx, diagnostics.TypeOf(testInfo{}), log.Root())
+
+ for info := range ch {
+ if info.count == 3 {
+ cancel()
+ }
+ }
+}
+
+func TestProviderFuncRegistration(t *testing.T) {
+
+ // diagnostics provider
+ diagnostics.RegisterProvider(diagnostics.ProviderFunc(func(ctx context.Context) error {
+ timer := time.NewTicker(1 * time.Second)
+ defer timer.Stop()
+
+ var count int
+
+ for {
+ select {
+ case <-ctx.Done():
+ return nil
+ case <-timer.C:
+ diagnostics.Send(testInfo{count})
+ count++
+ }
+ }
+ }), diagnostics.TypeOf(testInfo{}), log.Root())
+
+ // diagnostics receiver
+ ctx, ch, cancel := diagnostics.Context[testInfo](context.Background(), 1)
+
+ diagnostics.StartProviders(ctx, diagnostics.TypeOf(testInfo{}), log.Root())
+
+ for info := range ch {
+ if info.count == 3 {
+ cancel()
+ }
+ }
+}
diff --git a/erigon-lib/direct/downloader_client.go b/erigon-lib/direct/downloader_client.go
new file mode 100644
index 00000000000..319e3bcd1d2
--- /dev/null
+++ b/erigon-lib/direct/downloader_client.go
@@ -0,0 +1,50 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package direct
+
+import (
+ "context"
+
+ proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader"
+ "google.golang.org/grpc"
+ "google.golang.org/protobuf/types/known/emptypb"
+)
+
+type DownloaderClient struct {
+ server proto_downloader.DownloaderServer
+}
+
+func NewDownloaderClient(server proto_downloader.DownloaderServer) *DownloaderClient {
+ return &DownloaderClient{server: server}
+}
+
+func (c *DownloaderClient) Add(ctx context.Context, in *proto_downloader.AddRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ return c.server.Add(ctx, in)
+}
+
+func (c *DownloaderClient) ProhibitNewDownloads(ctx context.Context, in *proto_downloader.ProhibitNewDownloadsRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ return c.server.ProhibitNewDownloads(ctx, in)
+}
+func (c *DownloaderClient) Delete(ctx context.Context, in *proto_downloader.DeleteRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ return c.server.Delete(ctx, in)
+}
+func (c *DownloaderClient) Verify(ctx context.Context, in *proto_downloader.VerifyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ return c.server.Verify(ctx, in)
+}
+func (c *DownloaderClient) Stats(ctx context.Context, in *proto_downloader.StatsRequest, opts ...grpc.CallOption) (*proto_downloader.StatsReply, error) {
+ return c.server.Stats(ctx, in)
+}
diff --git a/erigon-lib/direct/eth_backend_client.go b/erigon-lib/direct/eth_backend_client.go
new file mode 100644
index 00000000000..7d100a5ee03
--- /dev/null
+++ b/erigon-lib/direct/eth_backend_client.go
@@ -0,0 +1,220 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package direct
+
+import (
+ "context"
+ "io"
+
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/remote"
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+ "google.golang.org/grpc"
+ "google.golang.org/protobuf/types/known/emptypb"
+)
+
+type EthBackendClientDirect struct {
+ server remote.ETHBACKENDServer
+}
+
+func NewEthBackendClientDirect(server remote.ETHBACKENDServer) *EthBackendClientDirect {
+ return &EthBackendClientDirect{server: server}
+}
+
+func (s *EthBackendClientDirect) Etherbase(ctx context.Context, in *remote.EtherbaseRequest, opts ...grpc.CallOption) (*remote.EtherbaseReply, error) {
+ return s.server.Etherbase(ctx, in)
+}
+
+func (s *EthBackendClientDirect) NetVersion(ctx context.Context, in *remote.NetVersionRequest, opts ...grpc.CallOption) (*remote.NetVersionReply, error) {
+ return s.server.NetVersion(ctx, in)
+}
+
+func (s *EthBackendClientDirect) NetPeerCount(ctx context.Context, in *remote.NetPeerCountRequest, opts ...grpc.CallOption) (*remote.NetPeerCountReply, error) {
+ return s.server.NetPeerCount(ctx, in)
+}
+
+func (s *EthBackendClientDirect) Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.VersionReply, error) {
+ return s.server.Version(ctx, in)
+}
+
+func (s *EthBackendClientDirect) ProtocolVersion(ctx context.Context, in *remote.ProtocolVersionRequest, opts ...grpc.CallOption) (*remote.ProtocolVersionReply, error) {
+ return s.server.ProtocolVersion(ctx, in)
+}
+
+func (s *EthBackendClientDirect) ClientVersion(ctx context.Context, in *remote.ClientVersionRequest, opts ...grpc.CallOption) (*remote.ClientVersionReply, error) {
+ return s.server.ClientVersion(ctx, in)
+}
+
+// -- start Subscribe
+
+func (s *EthBackendClientDirect) Subscribe(ctx context.Context, in *remote.SubscribeRequest, opts ...grpc.CallOption) (remote.ETHBACKEND_SubscribeClient, error) {
+ ch := make(chan *subscribeReply, 16384)
+ streamServer := &SubscribeStreamS{ch: ch, ctx: ctx}
+ go func() {
+ defer close(ch)
+ streamServer.Err(s.server.Subscribe(in, streamServer))
+ }()
+ return &SubscribeStreamC{ch: ch, ctx: ctx}, nil
+}
+
+type subscribeReply struct {
+ r *remote.SubscribeReply
+ err error
+}
+type SubscribeStreamS struct {
+ ch chan *subscribeReply
+ ctx context.Context
+ grpc.ServerStream
+}
+
+func (s *SubscribeStreamS) Send(m *remote.SubscribeReply) error {
+ s.ch <- &subscribeReply{r: m}
+ return nil
+}
+func (s *SubscribeStreamS) Context() context.Context { return s.ctx }
+func (s *SubscribeStreamS) Err(err error) {
+ if err == nil {
+ return
+ }
+ s.ch <- &subscribeReply{err: err}
+}
+
+type SubscribeStreamC struct {
+ ch chan *subscribeReply
+ ctx context.Context
+ grpc.ClientStream
+}
+
+func (c *SubscribeStreamC) Recv() (*remote.SubscribeReply, error) {
+ m, ok := <-c.ch
+ if !ok || m == nil {
+ return nil, io.EOF
+ }
+ return m.r, m.err
+}
+func (c *SubscribeStreamC) Context() context.Context { return c.ctx }
+
+// -- end Subscribe
+
+// -- SubscribeLogs
+
+func (s *EthBackendClientDirect) SubscribeLogs(ctx context.Context, opts ...grpc.CallOption) (remote.ETHBACKEND_SubscribeLogsClient, error) {
+ subscribeLogsRequestChan := make(chan *subscribeLogsRequest, 16384)
+ subscribeLogsReplyChan := make(chan *subscribeLogsReply, 16384)
+ srv := &SubscribeLogsStreamS{
+ chSend: subscribeLogsReplyChan,
+ chRecv: subscribeLogsRequestChan,
+ ctx: ctx,
+ }
+ go func() {
+ defer close(subscribeLogsRequestChan)
+ defer close(subscribeLogsReplyChan)
+ srv.Err(s.server.SubscribeLogs(srv))
+ }()
+ cli := &SubscribeLogsStreamC{
+ chSend: subscribeLogsRequestChan,
+ chRecv: subscribeLogsReplyChan,
+ ctx: ctx,
+ }
+ return cli, nil
+}
+
+type SubscribeLogsStreamS struct {
+ chSend chan *subscribeLogsReply
+ chRecv chan *subscribeLogsRequest
+ ctx context.Context
+ grpc.ServerStream
+}
+
+type subscribeLogsReply struct {
+ r *remote.SubscribeLogsReply
+ err error
+}
+
+type subscribeLogsRequest struct {
+ r *remote.LogsFilterRequest
+ err error
+}
+
+func (s *SubscribeLogsStreamS) Send(m *remote.SubscribeLogsReply) error {
+ s.chSend <- &subscribeLogsReply{r: m}
+ return nil
+}
+
+func (s *SubscribeLogsStreamS) Recv() (*remote.LogsFilterRequest, error) {
+ m, ok := <-s.chRecv
+ if !ok || m == nil {
+ return nil, io.EOF
+ }
+ return m.r, m.err
+}
+
+func (s *SubscribeLogsStreamS) Err(err error) {
+ if err == nil {
+ return
+ }
+ s.chSend <- &subscribeLogsReply{err: err}
+}
+
+type SubscribeLogsStreamC struct {
+ chSend chan *subscribeLogsRequest
+ chRecv chan *subscribeLogsReply
+ ctx context.Context
+ grpc.ClientStream
+}
+
+func (c *SubscribeLogsStreamC) Send(m *remote.LogsFilterRequest) error {
+ c.chSend <- &subscribeLogsRequest{r: m}
+ return nil
+}
+
+func (c *SubscribeLogsStreamC) Recv() (*remote.SubscribeLogsReply, error) {
+ m, ok := <-c.chRecv
+ if !ok || m == nil {
+ return nil, io.EOF
+ }
+ return m.r, m.err
+}
+
+// -- end SubscribeLogs
+
+func (s *EthBackendClientDirect) Block(ctx context.Context, in *remote.BlockRequest, opts ...grpc.CallOption) (*remote.BlockReply, error) {
+ return s.server.Block(ctx, in)
+}
+
+func (s *EthBackendClientDirect) TxnLookup(ctx context.Context, in *remote.TxnLookupRequest, opts ...grpc.CallOption) (*remote.TxnLookupReply, error) {
+ return s.server.TxnLookup(ctx, in)
+}
+
+func (s *EthBackendClientDirect) NodeInfo(ctx context.Context, in *remote.NodesInfoRequest, opts ...grpc.CallOption) (*remote.NodesInfoReply, error) {
+ return s.server.NodeInfo(ctx, in)
+}
+
+func (s *EthBackendClientDirect) Peers(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*remote.PeersReply, error) {
+ return s.server.Peers(ctx, in)
+}
+
+func (s *EthBackendClientDirect) AddPeer(ctx context.Context, in *remote.AddPeerRequest, opts ...grpc.CallOption) (*remote.AddPeerReply, error) {
+ return s.server.AddPeer(ctx, in)
+}
+
+func (s *EthBackendClientDirect) PendingBlock(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*remote.PendingBlockReply, error) {
+ return s.server.PendingBlock(ctx, in)
+}
+
+func (s *EthBackendClientDirect) BorEvent(ctx context.Context, in *remote.BorEventRequest, opts ...grpc.CallOption) (*remote.BorEventReply, error) {
+ return s.server.BorEvent(ctx, in)
+}
diff --git a/erigon-lib/direct/execution_client.go b/erigon-lib/direct/execution_client.go
new file mode 100644
index 00000000000..e2e8d8da1c9
--- /dev/null
+++ b/erigon-lib/direct/execution_client.go
@@ -0,0 +1,102 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package direct
+
+import (
+ "context"
+
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/execution"
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+ "google.golang.org/grpc"
+ "google.golang.org/protobuf/types/known/emptypb"
+)
+
+type ExecutionClientDirect struct {
+ server execution.ExecutionServer
+}
+
+func NewExecutionClientDirect(server execution.ExecutionServer) execution.ExecutionClient {
+ return &ExecutionClientDirect{server: server}
+}
+
+func (s *ExecutionClientDirect) AssembleBlock(ctx context.Context, in *execution.AssembleBlockRequest, opts ...grpc.CallOption) (*execution.AssembleBlockResponse, error) {
+ return s.server.AssembleBlock(ctx, in)
+}
+
+func (s *ExecutionClientDirect) GetBodiesByHashes(ctx context.Context, in *execution.GetBodiesByHashesRequest, opts ...grpc.CallOption) (*execution.GetBodiesBatchResponse, error) {
+ return s.server.GetBodiesByHashes(ctx, in)
+}
+
+func (s *ExecutionClientDirect) GetBodiesByRange(ctx context.Context, in *execution.GetBodiesByRangeRequest, opts ...grpc.CallOption) (*execution.GetBodiesBatchResponse, error) {
+ return s.server.GetBodiesByRange(ctx, in)
+}
+
+func (s *ExecutionClientDirect) GetAssembledBlock(ctx context.Context, in *execution.GetAssembledBlockRequest, opts ...grpc.CallOption) (*execution.GetAssembledBlockResponse, error) {
+ return s.server.GetAssembledBlock(ctx, in)
+}
+
+// Chain Putters.
+func (s *ExecutionClientDirect) InsertBlocks(ctx context.Context, in *execution.InsertBlocksRequest, opts ...grpc.CallOption) (*execution.InsertionResult, error) {
+ return s.server.InsertBlocks(ctx, in)
+}
+
+// Chain Validation and ForkChoice.
+func (s *ExecutionClientDirect) ValidateChain(ctx context.Context, in *execution.ValidationRequest, opts ...grpc.CallOption) (*execution.ValidationReceipt, error) {
+ return s.server.ValidateChain(ctx, in)
+
+}
+
+func (s *ExecutionClientDirect) UpdateForkChoice(ctx context.Context, in *execution.ForkChoice, opts ...grpc.CallOption) (*execution.ForkChoiceReceipt, error) {
+ return s.server.UpdateForkChoice(ctx, in)
+}
+
+// Chain Getters.
+func (s *ExecutionClientDirect) GetHeader(ctx context.Context, in *execution.GetSegmentRequest, opts ...grpc.CallOption) (*execution.GetHeaderResponse, error) {
+ return s.server.GetHeader(ctx, in)
+}
+
+func (s *ExecutionClientDirect) CurrentHeader(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*execution.GetHeaderResponse, error) {
+ return s.server.CurrentHeader(ctx, in)
+}
+
+func (s *ExecutionClientDirect) GetTD(ctx context.Context, in *execution.GetSegmentRequest, opts ...grpc.CallOption) (*execution.GetTDResponse, error) {
+ return s.server.GetTD(ctx, in)
+}
+
+func (s *ExecutionClientDirect) GetBody(ctx context.Context, in *execution.GetSegmentRequest, opts ...grpc.CallOption) (*execution.GetBodyResponse, error) {
+ return s.server.GetBody(ctx, in)
+}
+
+func (s *ExecutionClientDirect) IsCanonicalHash(ctx context.Context, in *types.H256, opts ...grpc.CallOption) (*execution.IsCanonicalResponse, error) {
+ return s.server.IsCanonicalHash(ctx, in)
+}
+
+func (s *ExecutionClientDirect) GetHeaderHashNumber(ctx context.Context, in *types.H256, opts ...grpc.CallOption) (*execution.GetHeaderHashNumberResponse, error) {
+ return s.server.GetHeaderHashNumber(ctx, in)
+}
+
+func (s *ExecutionClientDirect) GetForkChoice(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*execution.ForkChoice, error) {
+ return s.server.GetForkChoice(ctx, in)
+}
+
+func (s *ExecutionClientDirect) Ready(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*execution.ReadyResponse, error) {
+ return s.server.Ready(ctx, in)
+}
+
+func (s *ExecutionClientDirect) FrozenBlocks(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*execution.FrozenBlocksResponse, error) {
+ return s.server.FrozenBlocks(ctx, in)
+}
diff --git a/erigon-lib/direct/mining_client.go b/erigon-lib/direct/mining_client.go
new file mode 100644
index 00000000000..c6db989e0cc
--- /dev/null
+++ b/erigon-lib/direct/mining_client.go
@@ -0,0 +1,214 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package direct
+
+import (
+ "context"
+ "io"
+
+ txpool_proto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool"
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+ "google.golang.org/grpc"
+ "google.golang.org/protobuf/types/known/emptypb"
+)
+
+var _ txpool_proto.MiningClient = (*MiningClient)(nil)
+
+type MiningClient struct {
+ server txpool_proto.MiningServer
+}
+
+func NewMiningClient(server txpool_proto.MiningServer) *MiningClient {
+ return &MiningClient{server: server}
+}
+
+func (s *MiningClient) Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.VersionReply, error) {
+ return s.server.Version(ctx, in)
+}
+
+// -- start OnPendingBlock
+
+func (s *MiningClient) OnPendingBlock(ctx context.Context, in *txpool_proto.OnPendingBlockRequest, opts ...grpc.CallOption) (txpool_proto.Mining_OnPendingBlockClient, error) {
+ ch := make(chan *onPendigBlockReply, 16384)
+ streamServer := &MiningOnPendingBlockS{ch: ch, ctx: ctx}
+ go func() {
+ defer close(ch)
+ streamServer.Err(s.server.OnPendingBlock(in, streamServer))
+ }()
+ return &MiningOnPendingBlockC{ch: ch, ctx: ctx}, nil
+}
+
+type onPendigBlockReply struct {
+ r *txpool_proto.OnPendingBlockReply
+ err error
+}
+
+type MiningOnPendingBlockS struct {
+ ch chan *onPendigBlockReply
+ ctx context.Context
+ grpc.ServerStream
+}
+
+func (s *MiningOnPendingBlockS) Send(m *txpool_proto.OnPendingBlockReply) error {
+ s.ch <- &onPendigBlockReply{r: m}
+ return nil
+}
+func (s *MiningOnPendingBlockS) Context() context.Context { return s.ctx }
+func (s *MiningOnPendingBlockS) Err(err error) {
+ if err == nil {
+ return
+ }
+ s.ch <- &onPendigBlockReply{err: err}
+}
+
+type MiningOnPendingBlockC struct {
+ ch chan *onPendigBlockReply
+ ctx context.Context
+ grpc.ClientStream
+}
+
+func (c *MiningOnPendingBlockC) Recv() (*txpool_proto.OnPendingBlockReply, error) {
+ m, ok := <-c.ch
+ if !ok || m == nil {
+ return nil, io.EOF
+ }
+ return m.r, m.err
+}
+func (c *MiningOnPendingBlockC) Context() context.Context { return c.ctx }
+
+// -- end OnPendingBlock
+// -- start OnMinedBlock
+
+func (s *MiningClient) OnMinedBlock(ctx context.Context, in *txpool_proto.OnMinedBlockRequest, opts ...grpc.CallOption) (txpool_proto.Mining_OnMinedBlockClient, error) {
+ ch := make(chan *onMinedBlockReply, 16384)
+ streamServer := &MiningOnMinedBlockS{ch: ch, ctx: ctx}
+ go func() {
+ defer close(ch)
+ streamServer.Err(s.server.OnMinedBlock(in, streamServer))
+ }()
+ return &MiningOnMinedBlockC{ch: ch, ctx: ctx}, nil
+}
+
+type onMinedBlockReply struct {
+ r *txpool_proto.OnMinedBlockReply
+ err error
+}
+
+type MiningOnMinedBlockS struct {
+ ch chan *onMinedBlockReply
+ ctx context.Context
+ grpc.ServerStream
+}
+
+func (s *MiningOnMinedBlockS) Send(m *txpool_proto.OnMinedBlockReply) error {
+ s.ch <- &onMinedBlockReply{r: m}
+ return nil
+}
+func (s *MiningOnMinedBlockS) Context() context.Context { return s.ctx }
+func (s *MiningOnMinedBlockS) Err(err error) {
+ if err == nil {
+ return
+ }
+ s.ch <- &onMinedBlockReply{err: err}
+}
+
+type MiningOnMinedBlockC struct {
+ ch chan *onMinedBlockReply
+ ctx context.Context
+ grpc.ClientStream
+}
+
+func (c *MiningOnMinedBlockC) Recv() (*txpool_proto.OnMinedBlockReply, error) {
+ m, ok := <-c.ch
+ if !ok || m == nil {
+ return nil, io.EOF
+ }
+ return m.r, m.err
+}
+func (c *MiningOnMinedBlockC) Context() context.Context { return c.ctx }
+
+// -- end OnMinedBlock
+// -- end OnPendingLogs
+
+func (s *MiningClient) OnPendingLogs(ctx context.Context, in *txpool_proto.OnPendingLogsRequest, opts ...grpc.CallOption) (txpool_proto.Mining_OnPendingLogsClient, error) {
+ ch := make(chan *onPendingLogsReply, 16384)
+ streamServer := &MiningOnPendingLogsS{ch: ch, ctx: ctx}
+ go func() {
+ defer close(ch)
+ streamServer.Err(s.server.OnPendingLogs(in, streamServer))
+ }()
+ return &MiningOnPendingLogsC{ch: ch, ctx: ctx}, nil
+}
+
+type onPendingLogsReply struct {
+ r *txpool_proto.OnPendingLogsReply
+ err error
+}
+type MiningOnPendingLogsS struct {
+ ch chan *onPendingLogsReply
+ ctx context.Context
+ grpc.ServerStream
+}
+
+func (s *MiningOnPendingLogsS) Send(m *txpool_proto.OnPendingLogsReply) error {
+ s.ch <- &onPendingLogsReply{r: m}
+ return nil
+}
+func (s *MiningOnPendingLogsS) Context() context.Context { return s.ctx }
+func (s *MiningOnPendingLogsS) Err(err error) {
+ if err == nil {
+ return
+ }
+ s.ch <- &onPendingLogsReply{err: err}
+}
+
+type MiningOnPendingLogsC struct {
+ ch chan *onPendingLogsReply
+ ctx context.Context
+ grpc.ClientStream
+}
+
+func (c *MiningOnPendingLogsC) Recv() (*txpool_proto.OnPendingLogsReply, error) {
+ m, ok := <-c.ch
+ if !ok || m == nil {
+ return nil, io.EOF
+ }
+ return m.r, m.err
+}
+func (c *MiningOnPendingLogsC) Context() context.Context { return c.ctx }
+
+// -- end OnPendingLogs
+
+func (s *MiningClient) GetWork(ctx context.Context, in *txpool_proto.GetWorkRequest, opts ...grpc.CallOption) (*txpool_proto.GetWorkReply, error) {
+ return s.server.GetWork(ctx, in)
+}
+
+func (s *MiningClient) SubmitWork(ctx context.Context, in *txpool_proto.SubmitWorkRequest, opts ...grpc.CallOption) (*txpool_proto.SubmitWorkReply, error) {
+ return s.server.SubmitWork(ctx, in)
+}
+
+func (s *MiningClient) SubmitHashRate(ctx context.Context, in *txpool_proto.SubmitHashRateRequest, opts ...grpc.CallOption) (*txpool_proto.SubmitHashRateReply, error) {
+ return s.server.SubmitHashRate(ctx, in)
+}
+
+func (s *MiningClient) HashRate(ctx context.Context, in *txpool_proto.HashRateRequest, opts ...grpc.CallOption) (*txpool_proto.HashRateReply, error) {
+ return s.server.HashRate(ctx, in)
+}
+
+func (s *MiningClient) Mining(ctx context.Context, in *txpool_proto.MiningRequest, opts ...grpc.CallOption) (*txpool_proto.MiningReply, error) {
+ return s.server.Mining(ctx, in)
+}
diff --git a/erigon-lib/direct/sentinel_client.go b/erigon-lib/direct/sentinel_client.go
new file mode 100644
index 00000000000..f421f4332bf
--- /dev/null
+++ b/erigon-lib/direct/sentinel_client.go
@@ -0,0 +1,122 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package direct
+
+import (
+ "context"
+ "io"
+
+ "github.com/ledgerwatch/erigon-lib/diagnostics"
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/sentinel"
+ "google.golang.org/grpc"
+)
+
+type SentinelClientDirect struct {
+ server sentinel.SentinelServer
+}
+
+func NewSentinelClientDirect(sentinel sentinel.SentinelServer) sentinel.SentinelClient {
+ return &SentinelClientDirect{server: sentinel}
+}
+
+func (s *SentinelClientDirect) SendRequest(ctx context.Context, in *sentinel.RequestData, opts ...grpc.CallOption) (*sentinel.ResponseData, error) {
+ return s.server.SendRequest(ctx, in)
+}
+
+func (s *SentinelClientDirect) SetStatus(ctx context.Context, in *sentinel.Status, opts ...grpc.CallOption) (*sentinel.EmptyMessage, error) {
+ return s.server.SetStatus(ctx, in)
+}
+
+func (s *SentinelClientDirect) GetPeers(ctx context.Context, in *sentinel.EmptyMessage, opts ...grpc.CallOption) (*sentinel.PeerCount, error) {
+ return s.server.GetPeers(ctx, in)
+}
+
+func (s *SentinelClientDirect) BanPeer(ctx context.Context, p *sentinel.Peer, opts ...grpc.CallOption) (*sentinel.EmptyMessage, error) {
+ return s.server.BanPeer(ctx, p)
+}
+func (s *SentinelClientDirect) UnbanPeer(ctx context.Context, p *sentinel.Peer, opts ...grpc.CallOption) (*sentinel.EmptyMessage, error) {
+ return s.server.UnbanPeer(ctx, p)
+}
+func (s *SentinelClientDirect) RewardPeer(ctx context.Context, p *sentinel.Peer, opts ...grpc.CallOption) (*sentinel.EmptyMessage, error) {
+ return s.server.RewardPeer(ctx, p)
+}
+func (s *SentinelClientDirect) PenalizePeer(ctx context.Context, p *sentinel.Peer, opts ...grpc.CallOption) (*sentinel.EmptyMessage, error) {
+ return s.server.PenalizePeer(ctx, p)
+}
+
+func (s *SentinelClientDirect) PublishGossip(ctx context.Context, in *sentinel.GossipData, opts ...grpc.CallOption) (*sentinel.EmptyMessage, error) {
+ return s.server.PublishGossip(ctx, in)
+}
+
+// Subscribe gossip part. the only complex section of this bullshit
+
+func (s *SentinelClientDirect) SubscribeGossip(ctx context.Context, in *sentinel.EmptyMessage, opts ...grpc.CallOption) (sentinel.Sentinel_SubscribeGossipClient, error) {
+ ch := make(chan *gossipReply, 16384)
+ streamServer := &SentinelSubscribeGossipS{ch: ch, ctx: ctx}
+ go func() {
+ defer close(ch)
+ streamServer.Err(s.server.SubscribeGossip(in, streamServer))
+ }()
+ return &SentinelSubscribeGossipC{ch: ch, ctx: ctx}, nil
+}
+
+type SentinelSubscribeGossipC struct {
+ ch chan *gossipReply
+ ctx context.Context
+ grpc.ClientStream
+}
+
+func (c *SentinelSubscribeGossipC) Recv() (*sentinel.GossipData, error) {
+ m, ok := <-c.ch
+ if !ok || m == nil {
+ return nil, io.EOF
+ }
+ return m.r, m.err
+}
+func (c *SentinelSubscribeGossipC) Context() context.Context { return c.ctx }
+
+type SentinelSubscribeGossipS struct {
+ ch chan *gossipReply
+ ctx context.Context
+ grpc.ServerStream
+}
+
+type gossipReply struct {
+ r *sentinel.GossipData
+ err error
+}
+
+func (s *SentinelSubscribeGossipS) Send(m *sentinel.GossipData) error {
+ s.ch <- &gossipReply{r: m}
+ return nil
+}
+func (s *SentinelSubscribeGossipS) Context() context.Context { return s.ctx }
+func (s *SentinelSubscribeGossipS) Err(err error) {
+ if err == nil {
+ return
+ }
+ s.ch <- &gossipReply{err: err}
+}
+
+func (s *SentinelClientDirect) GetPeersStatistics() map[string]*diagnostics.PeerStatistics {
+
+ if diag, ok := s.server.(diagnostics.PeerStatisticsGetter); ok {
+ return diag.GetPeersStatistics()
+ }
+
+ return map[string]*diagnostics.PeerStatistics{}
+}
diff --git a/erigon-lib/direct/sentry_client.go b/erigon-lib/direct/sentry_client.go
new file mode 100644
index 00000000000..cf01003e71d
--- /dev/null
+++ b/erigon-lib/direct/sentry_client.go
@@ -0,0 +1,392 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package direct
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "sync"
+
+ "google.golang.org/grpc"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/emptypb"
+
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry"
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+)
+
+const (
+ ETH65 = 65
+ ETH66 = 66
+ ETH67 = 67
+ ETH68 = 68
+)
+
+var ProtoIds = map[uint]map[sentry.MessageId]struct{}{
+ ETH65: {
+ sentry.MessageId_GET_BLOCK_HEADERS_65: struct{}{},
+ sentry.MessageId_BLOCK_HEADERS_65: struct{}{},
+ sentry.MessageId_GET_BLOCK_BODIES_65: struct{}{},
+ sentry.MessageId_BLOCK_BODIES_65: struct{}{},
+ sentry.MessageId_GET_NODE_DATA_65: struct{}{},
+ sentry.MessageId_NODE_DATA_65: struct{}{},
+ sentry.MessageId_GET_RECEIPTS_65: struct{}{},
+ sentry.MessageId_RECEIPTS_65: struct{}{},
+ sentry.MessageId_NEW_BLOCK_HASHES_65: struct{}{},
+ sentry.MessageId_NEW_BLOCK_65: struct{}{},
+ sentry.MessageId_TRANSACTIONS_65: struct{}{},
+ sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_65: struct{}{},
+ sentry.MessageId_GET_POOLED_TRANSACTIONS_65: struct{}{},
+ sentry.MessageId_POOLED_TRANSACTIONS_65: struct{}{},
+ },
+ ETH66: {
+ sentry.MessageId_GET_BLOCK_HEADERS_66: struct{}{},
+ sentry.MessageId_BLOCK_HEADERS_66: struct{}{},
+ sentry.MessageId_GET_BLOCK_BODIES_66: struct{}{},
+ sentry.MessageId_BLOCK_BODIES_66: struct{}{},
+ sentry.MessageId_GET_NODE_DATA_66: struct{}{},
+ sentry.MessageId_NODE_DATA_66: struct{}{},
+ sentry.MessageId_GET_RECEIPTS_66: struct{}{},
+ sentry.MessageId_RECEIPTS_66: struct{}{},
+ sentry.MessageId_NEW_BLOCK_HASHES_66: struct{}{},
+ sentry.MessageId_NEW_BLOCK_66: struct{}{},
+ sentry.MessageId_TRANSACTIONS_66: struct{}{},
+ sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66: struct{}{},
+ sentry.MessageId_GET_POOLED_TRANSACTIONS_66: struct{}{},
+ sentry.MessageId_POOLED_TRANSACTIONS_66: struct{}{},
+ },
+ ETH67: {
+ sentry.MessageId_GET_BLOCK_HEADERS_66: struct{}{},
+ sentry.MessageId_BLOCK_HEADERS_66: struct{}{},
+ sentry.MessageId_GET_BLOCK_BODIES_66: struct{}{},
+ sentry.MessageId_BLOCK_BODIES_66: struct{}{},
+ sentry.MessageId_GET_RECEIPTS_66: struct{}{},
+ sentry.MessageId_RECEIPTS_66: struct{}{},
+ sentry.MessageId_NEW_BLOCK_HASHES_66: struct{}{},
+ sentry.MessageId_NEW_BLOCK_66: struct{}{},
+ sentry.MessageId_TRANSACTIONS_66: struct{}{},
+ sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66: struct{}{},
+ sentry.MessageId_GET_POOLED_TRANSACTIONS_66: struct{}{},
+ sentry.MessageId_POOLED_TRANSACTIONS_66: struct{}{},
+ },
+ ETH68: {
+ sentry.MessageId_GET_BLOCK_HEADERS_66: struct{}{},
+ sentry.MessageId_BLOCK_HEADERS_66: struct{}{},
+ sentry.MessageId_GET_BLOCK_BODIES_66: struct{}{},
+ sentry.MessageId_BLOCK_BODIES_66: struct{}{},
+ sentry.MessageId_GET_RECEIPTS_66: struct{}{},
+ sentry.MessageId_RECEIPTS_66: struct{}{},
+ sentry.MessageId_NEW_BLOCK_HASHES_66: struct{}{},
+ sentry.MessageId_NEW_BLOCK_66: struct{}{},
+ sentry.MessageId_TRANSACTIONS_66: struct{}{},
+ sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_68: struct{}{},
+ sentry.MessageId_GET_POOLED_TRANSACTIONS_66: struct{}{},
+ sentry.MessageId_POOLED_TRANSACTIONS_66: struct{}{},
+ },
+}
+
+type SentryClient interface {
+ sentry.SentryClient
+ Protocol() uint
+ Ready() bool
+ MarkDisconnected()
+}
+
+type SentryClientRemote struct {
+ sentry.SentryClient
+ sync.RWMutex
+ protocol uint
+ ready bool
+}
+
+var _ SentryClient = (*SentryClientRemote)(nil) // compile-time interface check
+var _ SentryClient = (*SentryClientDirect)(nil) // compile-time interface check
+
+// NewSentryClientRemote - app code must use this class
+// to avoid concurrency - it accepts protocol (which received async by SetStatus) in constructor,
+// means app can't use client which protocol unknown yet
+func NewSentryClientRemote(client sentry.SentryClient) *SentryClientRemote {
+ return &SentryClientRemote{SentryClient: client}
+}
+
+func (c *SentryClientRemote) Protocol() uint {
+ c.RLock()
+ defer c.RUnlock()
+ return c.protocol
+}
+
+func (c *SentryClientRemote) Ready() bool {
+ c.RLock()
+ defer c.RUnlock()
+ return c.ready
+}
+
+func (c *SentryClientRemote) MarkDisconnected() {
+ c.Lock()
+ defer c.Unlock()
+ c.ready = false
+}
+
+func (c *SentryClientRemote) HandShake(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*sentry.HandShakeReply, error) {
+ reply, err := c.SentryClient.HandShake(ctx, in, opts...)
+ if err != nil {
+ return nil, err
+ }
+ c.Lock()
+ defer c.Unlock()
+ switch reply.Protocol {
+ case sentry.Protocol_ETH65:
+ c.protocol = ETH65
+ case sentry.Protocol_ETH66:
+ c.protocol = ETH66
+ case sentry.Protocol_ETH67:
+ c.protocol = ETH67
+ case sentry.Protocol_ETH68:
+ c.protocol = ETH68
+ default:
+ return nil, fmt.Errorf("unexpected protocol: %d", reply.Protocol)
+ }
+ c.ready = true
+ return reply, nil
+}
+func (c *SentryClientRemote) SetStatus(ctx context.Context, in *sentry.StatusData, opts ...grpc.CallOption) (*sentry.SetStatusReply, error) {
+ return c.SentryClient.SetStatus(ctx, in, opts...)
+}
+
+func (c *SentryClientRemote) Messages(ctx context.Context, in *sentry.MessagesRequest, opts ...grpc.CallOption) (sentry.Sentry_MessagesClient, error) {
+ in.Ids = filterIds(in.Ids, c.Protocol())
+ return c.SentryClient.Messages(ctx, in, opts...)
+}
+
+func (c *SentryClientRemote) PeerCount(ctx context.Context, in *sentry.PeerCountRequest, opts ...grpc.CallOption) (*sentry.PeerCountReply, error) {
+ return c.SentryClient.PeerCount(ctx, in)
+}
+
+// Contains implementations of SentryServer, SentryClient, ControlClient, and ControlServer, that may be linked to each other
+// SentryClient is linked directly to the SentryServer, for example, so any function call on the instance of the SentryClient
+// cause invocations directly on the corresponding instance of the SentryServer. However, the link between SentryClient and
+// SentryServer is established outside of the constructor. This means that the reference from the SentyClient to the corresponding
+// SentryServer can be injected at any point in time.
+
+// SentryClientDirect implements SentryClient interface by connecting the instance of the client directly with the corresponding
+// instance of SentryServer
+type SentryClientDirect struct {
+ server sentry.SentryServer
+ protocol uint
+}
+
+func NewSentryClientDirect(protocol uint, sentryServer sentry.SentryServer) *SentryClientDirect {
+ return &SentryClientDirect{protocol: protocol, server: sentryServer}
+}
+
+func (c *SentryClientDirect) Protocol() uint { return c.protocol }
+func (c *SentryClientDirect) Ready() bool { return true }
+func (c *SentryClientDirect) MarkDisconnected() {}
+
+func (c *SentryClientDirect) PenalizePeer(ctx context.Context, in *sentry.PenalizePeerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ return c.server.PenalizePeer(ctx, in)
+}
+
+func (c *SentryClientDirect) PeerMinBlock(ctx context.Context, in *sentry.PeerMinBlockRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ return c.server.PeerMinBlock(ctx, in)
+}
+
+func (c *SentryClientDirect) SendMessageByMinBlock(ctx context.Context, in *sentry.SendMessageByMinBlockRequest, opts ...grpc.CallOption) (*sentry.SentPeers, error) {
+ return c.server.SendMessageByMinBlock(ctx, in)
+}
+
+func (c *SentryClientDirect) SendMessageById(ctx context.Context, in *sentry.SendMessageByIdRequest, opts ...grpc.CallOption) (*sentry.SentPeers, error) {
+ return c.server.SendMessageById(ctx, in)
+}
+
+func (c *SentryClientDirect) SendMessageToRandomPeers(ctx context.Context, in *sentry.SendMessageToRandomPeersRequest, opts ...grpc.CallOption) (*sentry.SentPeers, error) {
+ return c.server.SendMessageToRandomPeers(ctx, in)
+}
+
+func (c *SentryClientDirect) SendMessageToAll(ctx context.Context, in *sentry.OutboundMessageData, opts ...grpc.CallOption) (*sentry.SentPeers, error) {
+ return c.server.SendMessageToAll(ctx, in)
+}
+
+func (c *SentryClientDirect) HandShake(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*sentry.HandShakeReply, error) {
+ return c.server.HandShake(ctx, in)
+}
+
+func (c *SentryClientDirect) SetStatus(ctx context.Context, in *sentry.StatusData, opts ...grpc.CallOption) (*sentry.SetStatusReply, error) {
+ return c.server.SetStatus(ctx, in)
+}
+
+func (c *SentryClientDirect) Peers(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*sentry.PeersReply, error) {
+ return c.server.Peers(ctx, in)
+}
+
+func (c *SentryClientDirect) PeerCount(ctx context.Context, in *sentry.PeerCountRequest, opts ...grpc.CallOption) (*sentry.PeerCountReply, error) {
+ return c.server.PeerCount(ctx, in)
+}
+
+func (c *SentryClientDirect) PeerById(ctx context.Context, in *sentry.PeerByIdRequest, opts ...grpc.CallOption) (*sentry.PeerByIdReply, error) {
+ return c.server.PeerById(ctx, in)
+}
+
+// -- start Messages
+
+func (c *SentryClientDirect) Messages(ctx context.Context, in *sentry.MessagesRequest, opts ...grpc.CallOption) (sentry.Sentry_MessagesClient, error) {
+ in.Ids = filterIds(in.Ids, c.Protocol())
+ ch := make(chan *inboundMessageReply, 16384)
+ streamServer := &SentryMessagesStreamS{ch: ch, ctx: ctx}
+ go func() {
+ defer close(ch)
+ streamServer.Err(c.server.Messages(in, streamServer))
+ }()
+ return &SentryMessagesStreamC{ch: ch, ctx: ctx}, nil
+}
+
+type inboundMessageReply struct {
+ r *sentry.InboundMessage
+ err error
+}
+
+// SentryMessagesStreamS implements proto_sentry.Sentry_ReceiveMessagesServer
+type SentryMessagesStreamS struct {
+ ch chan *inboundMessageReply
+ ctx context.Context
+ grpc.ServerStream
+}
+
+func (s *SentryMessagesStreamS) Send(m *sentry.InboundMessage) error {
+ s.ch <- &inboundMessageReply{r: m}
+ return nil
+}
+
+func (s *SentryMessagesStreamS) Context() context.Context { return s.ctx }
+
+func (s *SentryMessagesStreamS) Err(err error) {
+ if err == nil {
+ return
+ }
+ s.ch <- &inboundMessageReply{err: err}
+}
+
+type SentryMessagesStreamC struct {
+ ch chan *inboundMessageReply
+ ctx context.Context
+ grpc.ClientStream
+}
+
+func (c *SentryMessagesStreamC) Recv() (*sentry.InboundMessage, error) {
+ m, ok := <-c.ch
+ if !ok || m == nil {
+ return nil, io.EOF
+ }
+ return m.r, m.err
+}
+
+func (c *SentryMessagesStreamC) Context() context.Context { return c.ctx }
+
+func (c *SentryMessagesStreamC) RecvMsg(anyMessage interface{}) error {
+ m, err := c.Recv()
+ if err != nil {
+ return err
+ }
+ outMessage := anyMessage.(*sentry.InboundMessage)
+ proto.Merge(outMessage, m)
+ return nil
+}
+
+// -- end Messages
+// -- start Peers
+
+func (c *SentryClientDirect) PeerEvents(ctx context.Context, in *sentry.PeerEventsRequest, opts ...grpc.CallOption) (sentry.Sentry_PeerEventsClient, error) {
+ ch := make(chan *peersReply, 16384)
+ streamServer := &SentryPeersStreamS{ch: ch, ctx: ctx}
+ go func() {
+ defer close(ch)
+ streamServer.Err(c.server.PeerEvents(in, streamServer))
+ }()
+ return &SentryPeersStreamC{ch: ch, ctx: ctx}, nil
+}
+
+func (c *SentryClientDirect) AddPeer(ctx context.Context, in *sentry.AddPeerRequest, opts ...grpc.CallOption) (*sentry.AddPeerReply, error) {
+ return c.server.AddPeer(ctx, in)
+}
+
+type peersReply struct {
+ r *sentry.PeerEvent
+ err error
+}
+
+// SentryPeersStreamS - implements proto_sentry.Sentry_ReceivePeersServer
+type SentryPeersStreamS struct {
+ ch chan *peersReply
+ ctx context.Context
+ grpc.ServerStream
+}
+
+func (s *SentryPeersStreamS) Send(m *sentry.PeerEvent) error {
+ s.ch <- &peersReply{r: m}
+ return nil
+}
+
+func (s *SentryPeersStreamS) Context() context.Context { return s.ctx }
+
+func (s *SentryPeersStreamS) Err(err error) {
+ if err == nil {
+ return
+ }
+ s.ch <- &peersReply{err: err}
+}
+
+type SentryPeersStreamC struct {
+ ch chan *peersReply
+ ctx context.Context
+ grpc.ClientStream
+}
+
+func (c *SentryPeersStreamC) Recv() (*sentry.PeerEvent, error) {
+ m, ok := <-c.ch
+ if !ok || m == nil {
+ return nil, io.EOF
+ }
+ return m.r, m.err
+}
+
+func (c *SentryPeersStreamC) Context() context.Context { return c.ctx }
+
+func (c *SentryPeersStreamC) RecvMsg(anyMessage interface{}) error {
+ m, err := c.Recv()
+ if err != nil {
+ return err
+ }
+ outMessage := anyMessage.(*sentry.PeerEvent)
+ proto.Merge(outMessage, m)
+ return nil
+}
+
+// -- end Peers
+
+func (c *SentryClientDirect) NodeInfo(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.NodeInfoReply, error) {
+ return c.server.NodeInfo(ctx, in)
+}
+
+func filterIds(in []sentry.MessageId, protocol uint) (filtered []sentry.MessageId) {
+ for _, id := range in {
+ if _, ok := ProtoIds[protocol][id]; ok {
+ filtered = append(filtered, id)
+ }
+ }
+ return filtered
+}
diff --git a/erigon-lib/direct/state_diff_client.go b/erigon-lib/direct/state_diff_client.go
new file mode 100644
index 00000000000..8c798c10546
--- /dev/null
+++ b/erigon-lib/direct/state_diff_client.go
@@ -0,0 +1,99 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package direct
+
+import (
+ "context"
+ "io"
+
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/remote"
+ "google.golang.org/grpc"
+)
+
+type StateDiffClient interface {
+ StateChanges(ctx context.Context, in *remote.StateChangeRequest, opts ...grpc.CallOption) (remote.KV_StateChangesClient, error)
+ Snapshots(ctx context.Context, in *remote.SnapshotsRequest, opts ...grpc.CallOption) (*remote.SnapshotsReply, error)
+}
+
+var _ StateDiffClient = (*StateDiffClientDirect)(nil) // compile-time interface check
+
+// SentryClientDirect implements SentryClient interface by connecting the instance of the client directly with the corresponding
+// instance of SentryServer
+type StateDiffClientDirect struct {
+ server remote.KVServer
+}
+
+func NewStateDiffClientDirect(server remote.KVServer) *StateDiffClientDirect {
+ return &StateDiffClientDirect{server: server}
+}
+
+func (c *StateDiffClientDirect) Snapshots(ctx context.Context, in *remote.SnapshotsRequest, opts ...grpc.CallOption) (*remote.SnapshotsReply, error) {
+ return c.server.Snapshots(ctx, in)
+}
+
+// -- start StateChanges
+
+func (c *StateDiffClientDirect) StateChanges(ctx context.Context, in *remote.StateChangeRequest, opts ...grpc.CallOption) (remote.KV_StateChangesClient, error) {
+ ch := make(chan *stateDiffReply, 16384)
+ streamServer := &StateDiffStreamS{ch: ch, ctx: ctx}
+ go func() {
+ defer close(ch)
+ streamServer.Err(c.server.StateChanges(in, streamServer))
+ }()
+ return &StateDiffStreamC{ch: ch, ctx: ctx}, nil
+}
+
+type stateDiffReply struct {
+ r *remote.StateChangeBatch
+ err error
+}
+
+type StateDiffStreamC struct {
+ ch chan *stateDiffReply
+ ctx context.Context
+ grpc.ClientStream
+}
+
+func (c *StateDiffStreamC) Recv() (*remote.StateChangeBatch, error) {
+ m, ok := <-c.ch
+ if !ok || m == nil {
+ return nil, io.EOF
+ }
+ return m.r, m.err
+}
+func (c *StateDiffStreamC) Context() context.Context { return c.ctx }
+
+// StateDiffStreamS implements proto_sentry.Sentry_ReceiveMessagesServer
+type StateDiffStreamS struct {
+ ch chan *stateDiffReply
+ ctx context.Context
+ grpc.ServerStream
+}
+
+func (s *StateDiffStreamS) Send(m *remote.StateChangeBatch) error {
+ s.ch <- &stateDiffReply{r: m}
+ return nil
+}
+func (s *StateDiffStreamS) Context() context.Context { return s.ctx }
+func (s *StateDiffStreamS) Err(err error) {
+ if err == nil {
+ return
+ }
+ s.ch <- &stateDiffReply{err: err}
+}
+
+// -- end StateChanges
diff --git a/erigon-lib/direct/txpool_client.go b/erigon-lib/direct/txpool_client.go
new file mode 100644
index 00000000000..5e54409b640
--- /dev/null
+++ b/erigon-lib/direct/txpool_client.go
@@ -0,0 +1,121 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package direct
+
+import (
+ "context"
+ "io"
+
+ txpool_proto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool"
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+ "google.golang.org/grpc"
+ "google.golang.org/protobuf/types/known/emptypb"
+)
+
+var _ txpool_proto.TxpoolClient = (*TxPoolClient)(nil)
+
+type TxPoolClient struct {
+ server txpool_proto.TxpoolServer
+}
+
+func NewTxPoolClient(server txpool_proto.TxpoolServer) *TxPoolClient {
+ return &TxPoolClient{server}
+}
+
+func (s *TxPoolClient) Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.VersionReply, error) {
+ return s.server.Version(ctx, in)
+}
+
+func (s *TxPoolClient) FindUnknown(ctx context.Context, in *txpool_proto.TxHashes, opts ...grpc.CallOption) (*txpool_proto.TxHashes, error) {
+ return s.server.FindUnknown(ctx, in)
+}
+
+func (s *TxPoolClient) Add(ctx context.Context, in *txpool_proto.AddRequest, opts ...grpc.CallOption) (*txpool_proto.AddReply, error) {
+ return s.server.Add(ctx, in)
+}
+
+func (s *TxPoolClient) Transactions(ctx context.Context, in *txpool_proto.TransactionsRequest, opts ...grpc.CallOption) (*txpool_proto.TransactionsReply, error) {
+ return s.server.Transactions(ctx, in)
+}
+
+func (s *TxPoolClient) All(ctx context.Context, in *txpool_proto.AllRequest, opts ...grpc.CallOption) (*txpool_proto.AllReply, error) {
+ return s.server.All(ctx, in)
+}
+
+func (s *TxPoolClient) Pending(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*txpool_proto.PendingReply, error) {
+ return s.server.Pending(ctx, in)
+}
+
+// -- start OnAdd
+
+func (s *TxPoolClient) OnAdd(ctx context.Context, in *txpool_proto.OnAddRequest, opts ...grpc.CallOption) (txpool_proto.Txpool_OnAddClient, error) {
+ ch := make(chan *onAddReply, 16384)
+ streamServer := &TxPoolOnAddS{ch: ch, ctx: ctx}
+ go func() {
+ defer close(ch)
+ streamServer.Err(s.server.OnAdd(in, streamServer))
+ }()
+ return &TxPoolOnAddC{ch: ch, ctx: ctx}, nil
+}
+
+type onAddReply struct {
+ r *txpool_proto.OnAddReply
+ err error
+}
+
+type TxPoolOnAddS struct {
+ ch chan *onAddReply
+ ctx context.Context
+ grpc.ServerStream
+}
+
+func (s *TxPoolOnAddS) Send(m *txpool_proto.OnAddReply) error {
+ s.ch <- &onAddReply{r: m}
+ return nil
+}
+func (s *TxPoolOnAddS) Context() context.Context { return s.ctx }
+func (s *TxPoolOnAddS) Err(err error) {
+ if err == nil {
+ return
+ }
+ s.ch <- &onAddReply{err: err}
+}
+
+type TxPoolOnAddC struct {
+ ch chan *onAddReply
+ ctx context.Context
+ grpc.ClientStream
+}
+
+func (c *TxPoolOnAddC) Recv() (*txpool_proto.OnAddReply, error) {
+ m, ok := <-c.ch
+ if !ok || m == nil {
+ return nil, io.EOF
+ }
+ return m.r, m.err
+}
+func (c *TxPoolOnAddC) Context() context.Context { return c.ctx }
+
+// -- end OnAdd
+
+func (s *TxPoolClient) Status(ctx context.Context, in *txpool_proto.StatusRequest, opts ...grpc.CallOption) (*txpool_proto.StatusReply, error) {
+ return s.server.Status(ctx, in)
+}
+
+func (s *TxPoolClient) Nonce(ctx context.Context, in *txpool_proto.NonceRequest, opts ...grpc.CallOption) (*txpool_proto.NonceReply, error) {
+ return s.server.Nonce(ctx, in)
+}
diff --git a/erigon-lib/downloader/downloader.go b/erigon-lib/downloader/downloader.go
new file mode 100644
index 00000000000..3305455928a
--- /dev/null
+++ b/erigon-lib/downloader/downloader.go
@@ -0,0 +1,754 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package downloader
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/url"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/anacrolix/torrent"
+ "github.com/anacrolix/torrent/metainfo"
+ "github.com/anacrolix/torrent/storage"
+ "github.com/c2h5oh/datasize"
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/datadir"
+ "github.com/ledgerwatch/erigon-lib/common/dbg"
+ "github.com/ledgerwatch/erigon-lib/common/dir"
+ "github.com/ledgerwatch/erigon-lib/diagnostics"
+ "github.com/ledgerwatch/erigon-lib/downloader/downloadercfg"
+ "github.com/ledgerwatch/erigon-lib/downloader/snaptype"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/mdbx"
+ "github.com/ledgerwatch/log/v3"
+ "golang.org/x/exp/slices"
+ "golang.org/x/sync/errgroup"
+ "golang.org/x/sync/semaphore"
+)
+
+// Downloader - component which downloading historical files. Can use BitTorrent, or other protocols
+type Downloader struct {
+ db kv.RwDB
+ pieceCompletionDB storage.PieceCompletion
+ torrentClient *torrent.Client
+
+ cfg *downloadercfg.Cfg
+
+ statsLock *sync.RWMutex
+ stats AggStats
+
+ folder storage.ClientImplCloser
+
+ ctx context.Context
+ stopMainLoop context.CancelFunc
+ wg sync.WaitGroup
+
+ webseeds *WebSeeds
+ logger log.Logger
+ verbosity log.Lvl
+}
+
+type AggStats struct {
+ MetadataReady, FilesTotal int32
+ PeersUnique int32
+ ConnectionsTotal uint64
+
+ Completed bool
+ Progress float32
+
+ BytesCompleted, BytesTotal uint64
+ DroppedCompleted, DroppedTotal uint64
+
+ BytesDownload, BytesUpload uint64
+ UploadRate, DownloadRate uint64
+}
+
+func New(ctx context.Context, cfg *downloadercfg.Cfg, dirs datadir.Dirs, logger log.Logger, verbosity log.Lvl, discover bool) (*Downloader, error) {
+ db, c, m, torrentClient, err := openClient(ctx, cfg.Dirs.Downloader, cfg.Dirs.Snap, cfg.ClientConfig)
+ if err != nil {
+ return nil, fmt.Errorf("openClient: %w", err)
+ }
+
+ peerID, err := readPeerID(db)
+ if err != nil {
+ return nil, fmt.Errorf("get peer id: %w", err)
+ }
+ cfg.ClientConfig.PeerID = string(peerID)
+ if len(peerID) == 0 {
+ if err = savePeerID(db, torrentClient.PeerID()); err != nil {
+ return nil, fmt.Errorf("save peer id: %w", err)
+ }
+ }
+
+ d := &Downloader{
+ cfg: cfg,
+ db: db,
+ pieceCompletionDB: c,
+ folder: m,
+ torrentClient: torrentClient,
+ statsLock: &sync.RWMutex{},
+ webseeds: &WebSeeds{logger: logger, verbosity: verbosity, downloadTorrentFile: cfg.DownloadTorrentFilesFromWebseed, torrentsWhitelist: cfg.ExpectedTorrentFilesHashes},
+ logger: logger,
+ verbosity: verbosity,
+ }
+ d.ctx, d.stopMainLoop = context.WithCancel(ctx)
+
+ if err := d.BuildTorrentFilesIfNeed(d.ctx); err != nil {
+ return nil, err
+ }
+ if err := d.addTorrentFilesFromDisk(false); err != nil {
+ return nil, err
+ }
+
+ // CornerCase: no peers -> no anoncments to trackers -> no magnetlink resolution (but magnetlink has filename)
+ // means we can start adding weebseeds without waiting for `<-t.GotInfo()`
+ d.wg.Add(1)
+
+ go func() {
+ defer d.wg.Done()
+ if !discover {
+ return
+ }
+ d.webseeds.Discover(d.ctx, d.cfg.WebSeedS3Tokens, d.cfg.WebSeedUrls, d.cfg.WebSeedFiles, d.cfg.Dirs.Snap)
+ // webseeds.Discover may create new .torrent files on disk
+ if err := d.addTorrentFilesFromDisk(true); err != nil && !errors.Is(err, context.Canceled) {
+ d.logger.Warn("[snapshots] addTorrentFilesFromDisk", "err", err)
+ }
+ }()
+ return d, nil
+}
+
+const prohibitNewDownloadsFileName = "prohibit_new_downloads.lock"
+
+// Erigon "download once" - means restart/upgrade/downgrade will not download files (and will be fast)
+// After "download once" - Erigon will produce and seed new files
+// Downloader will able: seed new files (already existing on FS), download uncomplete parts of existing files (if Verify found some bad parts)
+func (d *Downloader) prohibitNewDownloads() error {
+ fPath := filepath.Join(d.SnapDir(), prohibitNewDownloadsFileName)
+ f, err := os.Create(fPath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ if err := f.Sync(); err != nil {
+ return err
+ }
+ return nil
+}
+func (d *Downloader) newDownloadsAreProhibited() bool {
+ return dir.FileExist(filepath.Join(d.SnapDir(), prohibitNewDownloadsFileName))
+}
+
+func (d *Downloader) MainLoopInBackground(silent bool) {
+ d.wg.Add(1)
+ go func() {
+ defer d.wg.Done()
+ if err := d.mainLoop(silent); err != nil {
+ if !errors.Is(err, context.Canceled) {
+ d.logger.Warn("[snapshots]", "err", err)
+ }
+ }
+ }()
+}
+
+func (d *Downloader) mainLoop(silent bool) error {
+ var sem = semaphore.NewWeighted(int64(d.cfg.DownloadSlots))
+
+ d.wg.Add(1)
+ go func() {
+ defer d.wg.Done()
+
+ // Torrents that are already taken care of
+ //// First loop drops torrents that were downloaded or are already complete
+ //// This improves efficiency of download by reducing number of active torrent (empirical observation)
+ //for torrents := d.torrentClient.Torrents(); len(torrents) > 0; torrents = d.torrentClient.Torrents() {
+ // select {
+ // case <-d.ctx.Done():
+ // return
+ // default:
+ // }
+ // for _, t := range torrents {
+ // if _, already := torrentMap[t.InfoHash()]; already {
+ // continue
+ // }
+ // select {
+ // case <-d.ctx.Done():
+ // return
+ // case <-t.GotInfo():
+ // }
+ // if t.Complete.Bool() {
+ // atomic.AddUint64(&d.stats.DroppedCompleted, uint64(t.BytesCompleted()))
+ // atomic.AddUint64(&d.stats.DroppedTotal, uint64(t.Length()))
+ // t.Drop()
+ // torrentMap[t.InfoHash()] = struct{}{}
+ // continue
+ // }
+ // if err := sem.Acquire(d.ctx, 1); err != nil {
+ // return
+ // }
+ // t.AllowDataDownload()
+ // t.DownloadAll()
+ // torrentMap[t.InfoHash()] = struct{}{}
+ // d.wg.Add(1)
+ // go func(t *torrent.Torrent) {
+ // defer d.wg.Done()
+ // defer sem.Release(1)
+ // select {
+ // case <-d.ctx.Done():
+ // return
+ // case <-t.Complete.On():
+ // }
+ // atomic.AddUint64(&d.stats.DroppedCompleted, uint64(t.BytesCompleted()))
+ // atomic.AddUint64(&d.stats.DroppedTotal, uint64(t.Length()))
+ // t.Drop()
+ // }(t)
+ // }
+ //}
+ //atomic.StoreUint64(&d.stats.DroppedCompleted, 0)
+ //atomic.StoreUint64(&d.stats.DroppedTotal, 0)
+ //d.addTorrentFilesFromDisk(false)
+ for {
+ torrents := d.torrentClient.Torrents()
+ select {
+ case <-d.ctx.Done():
+ return
+ default:
+ }
+ for _, t := range torrents {
+ if t.Complete.Bool() {
+ continue
+ }
+ if err := sem.Acquire(d.ctx, 1); err != nil {
+ return
+ }
+ t.AllowDataDownload()
+ select {
+ case <-d.ctx.Done():
+ return
+ case <-t.GotInfo():
+ }
+ t.DownloadAll()
+ d.wg.Add(1)
+ go func(t *torrent.Torrent) {
+ defer d.wg.Done()
+ defer sem.Release(1)
+ select {
+ case <-d.ctx.Done():
+ return
+ case <-t.Complete.On():
+ }
+ }(t)
+ }
+
+ select {
+ case <-d.ctx.Done():
+ return
+ case <-time.After(10 * time.Second):
+ }
+ }
+ }()
+
+ logEvery := time.NewTicker(20 * time.Second)
+ defer logEvery.Stop()
+
+ statInterval := 20 * time.Second
+ statEvery := time.NewTicker(statInterval)
+ defer statEvery.Stop()
+
+ var m runtime.MemStats
+ justCompleted := true
+ for {
+ select {
+ case <-d.ctx.Done():
+ return d.ctx.Err()
+ case <-statEvery.C:
+ d.ReCalcStats(statInterval)
+
+ case <-logEvery.C:
+ if silent {
+ continue
+ }
+
+ stats := d.Stats()
+
+ dbg.ReadMemStats(&m)
+ if stats.Completed {
+ if justCompleted {
+ justCompleted = false
+ // force fsync of db. to not loose results of downloading on power-off
+ _ = d.db.Update(d.ctx, func(tx kv.RwTx) error { return nil })
+ }
+
+ d.logger.Info("[snapshots] Seeding",
+ "up", common.ByteCount(stats.UploadRate)+"/s",
+ "peers", stats.PeersUnique,
+ "conns", stats.ConnectionsTotal,
+ "files", stats.FilesTotal,
+ "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys),
+ )
+ continue
+ }
+
+ d.logger.Info("[snapshots] Downloading",
+ "progress", fmt.Sprintf("%.2f%% %s/%s", stats.Progress, common.ByteCount(stats.BytesCompleted), common.ByteCount(stats.BytesTotal)),
+ "download", common.ByteCount(stats.DownloadRate)+"/s",
+ "upload", common.ByteCount(stats.UploadRate)+"/s",
+ "peers", stats.PeersUnique,
+ "conns", stats.ConnectionsTotal,
+ "files", stats.FilesTotal,
+ "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys),
+ )
+
+ if stats.PeersUnique == 0 {
+ ips := d.TorrentClient().BadPeerIPs()
+ if len(ips) > 0 {
+ d.logger.Info("[snapshots] Stats", "banned", ips)
+ }
+ }
+ }
+ }
+}
+
+func (d *Downloader) SnapDir() string { return d.cfg.Dirs.Snap }
+
+func (d *Downloader) ReCalcStats(interval time.Duration) {
+ //Call this methods outside of `statsLock` critical section, because they have own locks with contention
+ torrents := d.torrentClient.Torrents()
+ connStats := d.torrentClient.ConnStats()
+ peers := make(map[torrent.PeerID]struct{}, 16)
+
+ d.statsLock.Lock()
+ defer d.statsLock.Unlock()
+ prevStats, stats := d.stats, d.stats
+
+ stats.Completed = true
+ stats.BytesDownload = uint64(connStats.BytesReadUsefulIntendedData.Int64())
+ stats.BytesUpload = uint64(connStats.BytesWrittenData.Int64())
+
+ stats.BytesTotal, stats.BytesCompleted, stats.ConnectionsTotal, stats.MetadataReady = atomic.LoadUint64(&stats.DroppedTotal), atomic.LoadUint64(&stats.DroppedCompleted), 0, 0
+
+ var zeroProgress []string
+ var noMetadata []string
+
+ for _, t := range torrents {
+ select {
+ case <-t.GotInfo():
+ stats.MetadataReady++
+ peersOfThisFile := t.PeerConns()
+ weebseedPeersOfThisFile := t.WebseedPeerConns()
+ for _, peer := range peersOfThisFile {
+ stats.ConnectionsTotal++
+ peers[peer.PeerID] = struct{}{}
+ }
+ stats.BytesCompleted += uint64(t.BytesCompleted())
+ stats.BytesTotal += uint64(t.Length())
+
+ progress := float32(float64(100) * (float64(t.BytesCompleted()) / float64(t.Length())))
+ if progress == 0 {
+ zeroProgress = append(zeroProgress, t.Name())
+ }
+
+ d.logger.Log(d.verbosity, "[snapshots] progress", "file", t.Name(), "progress", fmt.Sprintf("%.2f%%", progress), "peers", len(peersOfThisFile), "webseeds", len(weebseedPeersOfThisFile))
+ isDiagEnabled := diagnostics.TypeOf(diagnostics.SegmentDownloadStatistics{}).Enabled()
+ if d.verbosity >= log.LvlInfo || isDiagEnabled {
+ webseedRates, websRates := getWebseedsRatesForlogs(weebseedPeersOfThisFile)
+ rates, peersRates := getPeersRatesForlogs(peersOfThisFile)
+ // more detailed statistic: download rate of each peer (for each file)
+ if !t.Complete.Bool() && progress != 0 {
+ d.logger.Info(fmt.Sprintf("[snapshots] webseed peers file=%s", t.Name()), webseedRates...)
+ d.logger.Info(fmt.Sprintf("[snapshots] bittorrent peers file=%s", t.Name()), rates...)
+ }
+
+ if isDiagEnabled {
+ diagnostics.Send(diagnostics.SegmentDownloadStatistics{
+ Name: t.Name(),
+ TotalBytes: uint64(t.Length()),
+ DownloadedBytes: uint64(t.BytesCompleted()),
+ WebseedsCount: len(weebseedPeersOfThisFile),
+ PeersCount: len(peersOfThisFile),
+ WebseedsRate: websRates,
+ PeersRate: peersRates,
+ })
+ }
+ }
+
+ default:
+ noMetadata = append(noMetadata, t.Name())
+ }
+
+ stats.Completed = stats.Completed && t.Complete.Bool()
+ }
+
+ if len(noMetadata) > 0 {
+ amount := len(noMetadata)
+ if len(noMetadata) > 5 {
+ noMetadata = append(noMetadata[:5], "...")
+ }
+ d.logger.Log(d.verbosity, "[snapshots] no metadata yet", "files", amount, "list", strings.Join(noMetadata, ","))
+ }
+ if len(zeroProgress) > 0 {
+ amount := len(zeroProgress)
+ if len(zeroProgress) > 5 {
+ zeroProgress = append(zeroProgress[:5], "...")
+ }
+ d.logger.Log(d.verbosity, "[snapshots] no progress yet", "files", amount, "list", strings.Join(zeroProgress, ","))
+ }
+
+ stats.DownloadRate = (stats.BytesDownload - prevStats.BytesDownload) / uint64(interval.Seconds())
+ stats.UploadRate = (stats.BytesUpload - prevStats.BytesUpload) / uint64(interval.Seconds())
+
+ if stats.BytesTotal == 0 {
+ stats.Progress = 0
+ } else {
+ stats.Progress = float32(float64(100) * (float64(stats.BytesCompleted) / float64(stats.BytesTotal)))
+ if int(stats.Progress) == 100 && !stats.Completed {
+ stats.Progress = 99.99
+ }
+ }
+ stats.PeersUnique = int32(len(peers))
+ stats.FilesTotal = int32(len(torrents))
+
+ d.stats = stats
+}
+
+func getWebseedsRatesForlogs(weebseedPeersOfThisFile []*torrent.Peer) ([]interface{}, uint64) {
+ totalRate := uint64(0)
+ averageRate := uint64(0)
+ webseedRates := make([]interface{}, 0, len(weebseedPeersOfThisFile)*2)
+ for _, peer := range weebseedPeersOfThisFile {
+ urlS := strings.Trim(strings.TrimPrefix(peer.String(), "webseed peer for "), "\"")
+ if urlObj, err := url.Parse(urlS); err == nil {
+ if shortUrl, err := url.JoinPath(urlObj.Host, urlObj.Path); err == nil {
+ rate := uint64(peer.DownloadRate())
+ totalRate += rate
+ webseedRates = append(webseedRates, shortUrl, fmt.Sprintf("%s/s", common.ByteCount(rate)))
+ }
+ }
+ }
+
+ lenght := uint64(len(weebseedPeersOfThisFile))
+ if lenght > 0 {
+ averageRate = totalRate / lenght
+ }
+
+ return webseedRates, averageRate
+}
+
+func getPeersRatesForlogs(peersOfThisFile []*torrent.PeerConn) ([]interface{}, uint64) {
+ totalRate := uint64(0)
+ averageRate := uint64(0)
+ rates := make([]interface{}, 0, len(peersOfThisFile)*2)
+
+ for _, peer := range peersOfThisFile {
+ dr := uint64(peer.DownloadRate())
+ rates = append(rates, peer.PeerClientName.Load(), fmt.Sprintf("%s/s", common.ByteCount(dr)))
+ totalRate += dr
+ }
+
+ lenght := uint64(len(peersOfThisFile))
+ if lenght > 0 {
+ averageRate = totalRate / uint64(len(peersOfThisFile))
+ }
+
+ return rates, averageRate
+}
+
+func VerifyFile(ctx context.Context, t *torrent.Torrent, completePieces *atomic.Uint64) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-t.GotInfo():
+ }
+
+ g := &errgroup.Group{}
+ for i := 0; i < t.NumPieces(); i++ {
+ i := i
+ g.Go(func() error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ t.Piece(i).VerifyData()
+ completePieces.Add(1)
+ return nil
+ })
+ //<-t.Complete.On()
+ }
+ return g.Wait()
+}
+
+func (d *Downloader) VerifyData(ctx context.Context, onlyFiles []string) error {
+ total := 0
+ _torrents := d.torrentClient.Torrents()
+ torrents := make([]*torrent.Torrent, 0, len(_torrents))
+ for _, t := range torrents {
+ select {
+ case <-t.GotInfo():
+ if len(onlyFiles) > 0 && !slices.Contains(onlyFiles, t.Name()) {
+ continue
+ }
+ torrents = append(torrents, t)
+ total += t.NumPieces()
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+
+ completedPieces := &atomic.Uint64{}
+
+ {
+ d.logger.Info("[snapshots] Verify start")
+ defer d.logger.Info("[snapshots] Verify done")
+ logEvery := time.NewTicker(20 * time.Second)
+ defer logEvery.Stop()
+ d.wg.Add(1)
+ go func() {
+ defer d.wg.Done()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-logEvery.C:
+ d.logger.Info("[snapshots] Verify", "progress", fmt.Sprintf("%.2f%%", 100*float64(completedPieces.Load())/float64(total)))
+ }
+ }
+ }()
+ }
+
+ g, ctx := errgroup.WithContext(ctx)
+ // torrent lib internally limiting amount of hashers per file
+ // set limit here just to make load predictable, not to control Disk/CPU consumption
+ g.SetLimit(runtime.GOMAXPROCS(-1) * 4)
+
+ for _, t := range torrents {
+ t := t
+ g.Go(func() error {
+ return VerifyFile(ctx, t, completedPieces)
+ })
+ }
+
+ if err := g.Wait(); err != nil {
+ return err
+ }
+ // force fsync of db. to not loose results of validation on power-off
+ return d.db.Update(context.Background(), func(tx kv.RwTx) error { return nil })
+}
+
+// AddNewSeedableFile decides what we do depending on wether we have the .seg file or the .torrent file
+// have .torrent no .seg => get .seg file from .torrent
+// have .seg no .torrent => get .torrent from .seg
+func (d *Downloader) AddNewSeedableFile(ctx context.Context, name string) error {
+ ff, ok := snaptype.ParseFileName("", name)
+ if ok {
+ if !ff.Seedable() {
+ return nil
+ }
+ } else {
+ if !e3seedable(name) {
+ return nil
+ }
+ }
+
+ // if we don't have the torrent file we build it if we have the .seg file
+ torrentFilePath, err := BuildTorrentIfNeed(ctx, name, d.SnapDir())
+ if err != nil {
+ return fmt.Errorf("AddNewSeedableFile: %w", err)
+ }
+ ts, err := loadTorrent(torrentFilePath)
+ if err != nil {
+ return fmt.Errorf("AddNewSeedableFile: %w", err)
+ }
+ err = addTorrentFile(ctx, ts, d.torrentClient, d.webseeds)
+ if err != nil {
+ return fmt.Errorf("addTorrentFile: %w", err)
+ }
+ return nil
+}
+
+func (d *Downloader) alreadyHaveThisName(name string) bool {
+ for _, t := range d.torrentClient.Torrents() {
+ select {
+ case <-t.GotInfo():
+ if t.Name() == name {
+ return true
+ }
+ default:
+ }
+ }
+ return false
+}
+
+func (d *Downloader) AddMagnetLink(ctx context.Context, infoHash metainfo.Hash, name string) error {
+ // Paranoic Mode on: if same file changed infoHash - skip it
+ // Example:
+ // - Erigon generated file X with hash H1. User upgraded Erigon. New version has preverified file X with hash H2. Must ignore H2 (don't send to Downloader)
+ if d.alreadyHaveThisName(name) {
+ return nil
+ }
+ if d.newDownloadsAreProhibited() {
+ return nil
+ }
+
+ mi := &metainfo.MetaInfo{AnnounceList: Trackers}
+ magnet := mi.Magnet(&infoHash, &metainfo.Info{Name: name})
+ spec, err := torrent.TorrentSpecFromMagnetUri(magnet.String())
+ if err != nil {
+ return err
+ }
+ spec.DisallowDataDownload = true
+ t, _, err := d.torrentClient.AddTorrentSpec(spec)
+ if err != nil {
+ return err
+ }
+ d.wg.Add(1)
+ go func(t *torrent.Torrent) {
+ defer d.wg.Done()
+ select {
+ case <-ctx.Done():
+ return
+ case <-t.GotInfo():
+ }
+
+ mi := t.Metainfo()
+ if err := CreateTorrentFileIfNotExists(d.SnapDir(), t.Info(), &mi); err != nil {
+ d.logger.Warn("[snapshots] create torrent file", "err", err)
+ return
+ }
+ urls, ok := d.webseeds.ByFileName(t.Name())
+ if ok {
+ t.AddWebSeeds(urls)
+ }
+ }(t)
+ //log.Debug("[downloader] downloaded both seg and torrent files", "hash", infoHash)
+ return nil
+}
+
+func seedableFiles(dirs datadir.Dirs) ([]string, error) {
+ files, err := seedableSegmentFiles(dirs.Snap)
+ if err != nil {
+ return nil, fmt.Errorf("seedableSegmentFiles: %w", err)
+ }
+ l, err := seedableSnapshotsBySubDir(dirs.Snap, "history")
+ if err != nil {
+ return nil, err
+ }
+ l2, err := seedableSnapshotsBySubDir(dirs.Snap, "warm")
+ if err != nil {
+ return nil, err
+ }
+ files = append(append(files, l...), l2...)
+ return files, nil
+}
+func (d *Downloader) addTorrentFilesFromDisk(quiet bool) error {
+ logEvery := time.NewTicker(20 * time.Second)
+ defer logEvery.Stop()
+
+ files, err := AllTorrentSpecs(d.cfg.Dirs)
+ if err != nil {
+ return err
+ }
+ for i, ts := range files {
+ err := addTorrentFile(d.ctx, ts, d.torrentClient, d.webseeds)
+ if err != nil {
+ return err
+ }
+ select {
+ case <-logEvery.C:
+ if !quiet {
+ log.Info("[snapshots] Adding .torrent files", "progress", fmt.Sprintf("%d/%d", i, len(files)))
+ }
+ default:
+ }
+ }
+ return nil
+}
+func (d *Downloader) BuildTorrentFilesIfNeed(ctx context.Context) error {
+ return BuildTorrentFilesIfNeed(ctx, d.cfg.Dirs)
+}
+func (d *Downloader) Stats() AggStats {
+ d.statsLock.RLock()
+ defer d.statsLock.RUnlock()
+ return d.stats
+}
+
+func (d *Downloader) Close() {
+ d.stopMainLoop()
+ d.wg.Wait()
+ d.torrentClient.Close()
+ if err := d.folder.Close(); err != nil {
+ d.logger.Warn("[snapshots] folder.close", "err", err)
+ }
+ if err := d.pieceCompletionDB.Close(); err != nil {
+ d.logger.Warn("[snapshots] pieceCompletionDB.close", "err", err)
+ }
+ d.db.Close()
+}
+
+func (d *Downloader) PeerID() []byte {
+ peerID := d.torrentClient.PeerID()
+ return peerID[:]
+}
+
+func (d *Downloader) StopSeeding(hash metainfo.Hash) error {
+ t, ok := d.torrentClient.Torrent(hash)
+ if !ok {
+ return nil
+ }
+ ch := t.Closed()
+ t.Drop()
+ <-ch
+ return nil
+}
+
+func (d *Downloader) TorrentClient() *torrent.Client { return d.torrentClient }
+
+func openClient(ctx context.Context, dbDir, snapDir string, cfg *torrent.ClientConfig) (db kv.RwDB, c storage.PieceCompletion, m storage.ClientImplCloser, torrentClient *torrent.Client, err error) {
+ db, err = mdbx.NewMDBX(log.New()).
+ Label(kv.DownloaderDB).
+ WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg { return kv.DownloaderTablesCfg }).
+ GrowthStep(16 * datasize.MB).
+ MapSize(16 * datasize.GB).
+ PageSize(uint64(8 * datasize.KB)).
+ Path(dbDir).
+ Open(ctx)
+ if err != nil {
+ return nil, nil, nil, nil, fmt.Errorf("torrentcfg.openClient: %w", err)
+ }
+ c, err = NewMdbxPieceCompletion(db)
+ if err != nil {
+ return nil, nil, nil, nil, fmt.Errorf("torrentcfg.NewMdbxPieceCompletion: %w", err)
+ }
+ m = storage.NewMMapWithCompletion(snapDir, c)
+ cfg.DefaultStorage = m
+
+ torrentClient, err = torrent.NewClient(cfg)
+ if err != nil {
+ return nil, nil, nil, nil, fmt.Errorf("torrent.NewClient: %w", err)
+ }
+
+ return db, c, m, torrentClient, nil
+}
diff --git a/erigon-lib/downloader/downloader_grpc_server.go b/erigon-lib/downloader/downloader_grpc_server.go
new file mode 100644
index 00000000000..2787fbc280f
--- /dev/null
+++ b/erigon-lib/downloader/downloader_grpc_server.go
@@ -0,0 +1,145 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package downloader
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "time"
+
+ "github.com/anacrolix/torrent/metainfo"
+ "github.com/ledgerwatch/erigon-lib/gointerfaces"
+ proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader"
+ prototypes "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+ "github.com/ledgerwatch/log/v3"
+ "google.golang.org/protobuf/types/known/emptypb"
+)
+
+var (
+ _ proto_downloader.DownloaderServer = &GrpcServer{}
+)
+
+func NewGrpcServer(d *Downloader) (*GrpcServer, error) {
+ return &GrpcServer{d: d}, nil
+}
+
+type GrpcServer struct {
+ proto_downloader.UnimplementedDownloaderServer
+ d *Downloader
+}
+
+func (s *GrpcServer) ProhibitNewDownloads(context.Context, *proto_downloader.ProhibitNewDownloadsRequest) (*emptypb.Empty, error) {
+ if err := s.d.prohibitNewDownloads(); err != nil {
+ return nil, err
+ }
+ return nil, nil
+}
+
+// Erigon "download once" - means restart/upgrade/downgrade will not download files (and will be fast)
+// After "download once" - Erigon will produce and seed new files
+// Downloader will able: seed new files (already existing on FS), download uncomplete parts of existing files (if Verify found some bad parts)
+func (s *GrpcServer) Add(ctx context.Context, request *proto_downloader.AddRequest) (*emptypb.Empty, error) {
+ defer s.d.ReCalcStats(10 * time.Second) // immediately call ReCalc to set stat.Complete flag
+
+ logEvery := time.NewTicker(20 * time.Second)
+ defer logEvery.Stop()
+
+ for i, it := range request.Items {
+ if it.Path == "" {
+ return nil, fmt.Errorf("field 'path' is required")
+ }
+
+ select {
+ case <-logEvery.C:
+ log.Info("[snapshots] initializing", "files", fmt.Sprintf("%d/%d", i, len(request.Items)))
+ default:
+ }
+
+ if it.TorrentHash == nil {
+ // if we don't have the torrent hash then we seed a new snapshot
+ if err := s.d.AddNewSeedableFile(ctx, it.Path); err != nil {
+ return nil, err
+ }
+ continue
+ }
+
+ if err := s.d.AddMagnetLink(ctx, Proto2InfoHash(it.TorrentHash), it.Path); err != nil {
+ return nil, err
+ }
+ }
+ return &emptypb.Empty{}, nil
+}
+
+// Delete - stop seeding, remove file, remove .torrent
+func (s *GrpcServer) Delete(ctx context.Context, request *proto_downloader.DeleteRequest) (*emptypb.Empty, error) {
+ defer s.d.ReCalcStats(10 * time.Second) // immediately call ReCalc to set stat.Complete flag
+ torrents := s.d.torrentClient.Torrents()
+ for _, name := range request.Paths {
+ if name == "" {
+ return nil, fmt.Errorf("field 'path' is required")
+ }
+ for _, t := range torrents {
+ select {
+ case <-t.GotInfo():
+ continue
+ default:
+ }
+ if t.Name() == name {
+ t.Drop()
+ break
+ }
+ }
+
+ fPath := filepath.Join(s.d.SnapDir(), name)
+ _ = os.Remove(fPath)
+ _ = os.Remove(fPath + ".torrent")
+ }
+ return &emptypb.Empty{}, nil
+}
+
+func (s *GrpcServer) Verify(ctx context.Context, request *proto_downloader.VerifyRequest) (*emptypb.Empty, error) {
+ err := s.d.VerifyData(ctx, nil)
+ if err != nil {
+ return nil, err
+ }
+ return &emptypb.Empty{}, nil
+}
+
+func (s *GrpcServer) Stats(ctx context.Context, request *proto_downloader.StatsRequest) (*proto_downloader.StatsReply, error) {
+ stats := s.d.Stats()
+ return &proto_downloader.StatsReply{
+ MetadataReady: stats.MetadataReady,
+ FilesTotal: stats.FilesTotal,
+
+ Completed: stats.Completed,
+ Progress: stats.Progress,
+
+ PeersUnique: stats.PeersUnique,
+ ConnectionsTotal: stats.ConnectionsTotal,
+
+ BytesCompleted: stats.BytesCompleted,
+ BytesTotal: stats.BytesTotal,
+ UploadRate: stats.UploadRate,
+ DownloadRate: stats.DownloadRate,
+ }, nil
+}
+
+func Proto2InfoHash(in *prototypes.H160) metainfo.Hash {
+ return gointerfaces.ConvertH160toAddress(in)
+}
diff --git a/erigon-lib/downloader/downloader_test.go b/erigon-lib/downloader/downloader_test.go
new file mode 100644
index 00000000000..f94e3fa0d4b
--- /dev/null
+++ b/erigon-lib/downloader/downloader_test.go
@@ -0,0 +1,66 @@
+package downloader
+
+import (
+ "context"
+ "path/filepath"
+ "testing"
+
+ lg "github.com/anacrolix/log"
+ "github.com/ledgerwatch/erigon-lib/common/datadir"
+ downloadercfg2 "github.com/ledgerwatch/erigon-lib/downloader/downloadercfg"
+ "github.com/ledgerwatch/erigon-lib/downloader/snaptype"
+ "github.com/ledgerwatch/log/v3"
+ "github.com/stretchr/testify/require"
+)
+
+func TestChangeInfoHashOfSameFile(t *testing.T) {
+ require := require.New(t)
+ dirs := datadir.New(t.TempDir())
+ cfg, err := downloadercfg2.New(dirs, "", lg.Info, 0, 0, 0, 0, 0, nil, nil, "testnet")
+ require.NoError(err)
+ d, err := New(context.Background(), cfg, dirs, log.New(), log.LvlInfo, true)
+ require.NoError(err)
+ defer d.Close()
+ err = d.AddMagnetLink(d.ctx, snaptype.Hex2InfoHash("aa"), "a.seg")
+ require.NoError(err)
+ tt, ok := d.torrentClient.Torrent(snaptype.Hex2InfoHash("aa"))
+ require.True(ok)
+ require.Equal("a.seg", tt.Name())
+
+ // adding same file twice is ok
+ err = d.AddMagnetLink(d.ctx, snaptype.Hex2InfoHash("aa"), "a.seg")
+ require.NoError(err)
+
+ // adding same file with another infoHash - is ok, must be skipped
+ // use-cases:
+ // - release of re-compressed version of same file,
+ // - ErigonV1.24 produced file X, then ErigonV1.25 released with new compression algorithm and produced X with anouther infoHash.
+ // ErigonV1.24 node must keep using existing file instead of downloading new one.
+ err = d.AddMagnetLink(d.ctx, snaptype.Hex2InfoHash("bb"), "a.seg")
+ require.NoError(err)
+ tt, ok = d.torrentClient.Torrent(snaptype.Hex2InfoHash("aa"))
+ require.True(ok)
+ require.Equal("a.seg", tt.Name())
+}
+
+func TestNoEscape(t *testing.T) {
+ require := require.New(t)
+ dirs := datadir.New(t.TempDir())
+ ctx := context.Background()
+
+ // allow adding files only if they are inside snapshots dir
+ _, err := BuildTorrentIfNeed(ctx, "a.seg", dirs.Snap)
+ require.NoError(err)
+ _, err = BuildTorrentIfNeed(ctx, "b/a.seg", dirs.Snap)
+ require.NoError(err)
+ _, err = BuildTorrentIfNeed(ctx, filepath.Join(dirs.Snap, "a.seg"), dirs.Snap)
+ require.NoError(err)
+ _, err = BuildTorrentIfNeed(ctx, filepath.Join(dirs.Snap, "b", "a.seg"), dirs.Snap)
+ require.NoError(err)
+
+ // reject escaping snapshots dir
+ _, err = BuildTorrentIfNeed(ctx, filepath.Join(dirs.Chaindata, "b", "a.seg"), dirs.Snap)
+ require.Error(err)
+ _, err = BuildTorrentIfNeed(ctx, "./../a.seg", dirs.Snap)
+ require.Error(err)
+}
diff --git a/erigon-lib/downloader/downloadercfg/downloadercfg.go b/erigon-lib/downloader/downloadercfg/downloadercfg.go
new file mode 100644
index 00000000000..335429714c3
--- /dev/null
+++ b/erigon-lib/downloader/downloadercfg/downloadercfg.go
@@ -0,0 +1,212 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package downloadercfg
+
+import (
+ "io/ioutil"
+ "net"
+ "net/url"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/anacrolix/dht/v2"
+ lg "github.com/anacrolix/log"
+ "github.com/anacrolix/torrent"
+ "github.com/c2h5oh/datasize"
+ "github.com/ledgerwatch/erigon-lib/chain/snapcfg"
+ "github.com/ledgerwatch/erigon-lib/common/datadir"
+ "github.com/ledgerwatch/erigon-lib/common/dir"
+ "github.com/ledgerwatch/log/v3"
+ "golang.org/x/time/rate"
+)
+
+// DefaultPieceSize - Erigon serves many big files, bigger pieces will reduce
+// amount of network announcements, but can't go over 2Mb
+// see https://wiki.theory.org/BitTorrentSpecification#Metainfo_File_Structure
+const DefaultPieceSize = 2 * 1024 * 1024
+
+// DefaultNetworkChunkSize - how much data request per 1 network call to peer.
+// default: 16Kb
+const DefaultNetworkChunkSize = 256 * 1024
+
+type Cfg struct {
+ ClientConfig *torrent.ClientConfig
+ DownloadSlots int
+
+ WebSeedUrls []*url.URL
+ WebSeedFiles []string
+ WebSeedS3Tokens []string
+ ExpectedTorrentFilesHashes snapcfg.Preverified
+ DownloadTorrentFilesFromWebseed bool
+ ChainName string
+
+ Dirs datadir.Dirs
+}
+
+func Default() *torrent.ClientConfig {
+ torrentConfig := torrent.NewDefaultClientConfig()
+ // better don't increase because erigon periodically producing "new seedable files" - and adding them to downloader.
+ // it must not impact chain tip sync - so, limit resources to minimum by default.
+ // but when downloader is started as a separated process - rise it to max
+ //torrentConfig.PieceHashersPerTorrent = cmp.Max(1, runtime.NumCPU()-1)
+
+ torrentConfig.MinDialTimeout = 6 * time.Second //default: 3s
+ torrentConfig.HandshakesTimeout = 8 * time.Second //default: 4s
+
+ // enable dht
+ torrentConfig.NoDHT = true
+ //torrentConfig.DisableTrackers = true
+ //torrentConfig.DisableWebtorrent = true
+
+ // Reduce defaults - to avoid peers with very bad geography
+ //torrentConfig.MinDialTimeout = 1 * time.Second // default: 3sec
+ //torrentConfig.NominalDialTimeout = 10 * time.Second // default: 20sec
+ //torrentConfig.HandshakesTimeout = 1 * time.Second // default: 4sec
+
+ // see: https://en.wikipedia.org/wiki/TCP_half-open
+ //torrentConfig.TotalHalfOpenConns = 100 // default: 100
+ //torrentConfig.HalfOpenConnsPerTorrent = 25 // default: 25
+ //torrentConfig.TorrentPeersHighWater = 500 // default: 500
+ //torrentConfig.TorrentPeersLowWater = 50 // default: 50
+
+ torrentConfig.Seed = true
+ torrentConfig.UpnpID = torrentConfig.UpnpID + "leecher"
+
+ return torrentConfig
+}
+
+func New(dirs datadir.Dirs, version string, verbosity lg.Level, downloadRate, uploadRate datasize.ByteSize, port, connsPerFile, downloadSlots int, staticPeers, webseeds []string, chainName string) (*Cfg, error) {
+ torrentConfig := Default()
+ torrentConfig.DataDir = dirs.Snap // `DataDir` of torrent-client-lib is different from Erigon's `DataDir`. Just same naming.
+
+ torrentConfig.ExtendedHandshakeClientVersion = version
+
+ // We would-like to reduce amount of goroutines in Erigon, so reducing next params
+ torrentConfig.EstablishedConnsPerTorrent = connsPerFile // default: 50
+
+ torrentConfig.ListenPort = port
+ // check if ipv6 is enabled
+ torrentConfig.DisableIPv6 = !getIpv6Enabled()
+
+ torrentConfig.UploadRateLimiter = rate.NewLimiter(rate.Limit(uploadRate.Bytes()), DefaultNetworkChunkSize) // default: unlimited
+ if downloadRate.Bytes() < 500_000_000 {
+ torrentConfig.DownloadRateLimiter = rate.NewLimiter(rate.Limit(downloadRate.Bytes()), DefaultNetworkChunkSize) // default: unlimited
+ }
+
+ // debug
+ //torrentConfig.Debug = true
+ torrentConfig.Logger = torrentConfig.Logger.WithFilterLevel(verbosity)
+ torrentConfig.Logger.SetHandlers(adapterHandler{})
+
+ if len(staticPeers) > 0 {
+ torrentConfig.NoDHT = false
+ //defaultNodes := torrentConfig.DhtStartingNodes
+ torrentConfig.DhtStartingNodes = func(network string) dht.StartingNodesGetter {
+ return func() ([]dht.Addr, error) {
+ addrs, err := dht.GlobalBootstrapAddrs(network)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, seed := range staticPeers {
+ if network == "udp" {
+ var addr *net.UDPAddr
+ addr, err := net.ResolveUDPAddr(network, seed+":80")
+ if err != nil {
+ log.Warn("[downloader] Cannot UDP resolve address", "network", network, "addr", seed)
+ continue
+ }
+ addrs = append(addrs, dht.NewAddr(addr))
+ }
+ if network == "tcp" {
+ var addr *net.TCPAddr
+ addr, err := net.ResolveTCPAddr(network, seed+":80")
+ if err != nil {
+ log.Warn("[downloader] Cannot TCP resolve address", "network", network, "addr", seed)
+ continue
+ }
+ addrs = append(addrs, dht.NewAddr(addr))
+ }
+ }
+ return addrs, nil
+ }
+ }
+ //staticPeers
+ }
+
+ webseedUrlsOrFiles := webseeds
+ webseedHttpProviders := make([]*url.URL, 0, len(webseedUrlsOrFiles))
+ webseedFileProviders := make([]string, 0, len(webseedUrlsOrFiles))
+ webseedS3Providers := make([]string, 0, len(webseedUrlsOrFiles))
+ for _, webseed := range webseedUrlsOrFiles {
+ if !strings.HasPrefix(webseed, "v") { // has marker v1/v2/...
+ uri, err := url.ParseRequestURI(webseed)
+ if err != nil {
+ if strings.HasSuffix(webseed, ".toml") && dir.FileExist(webseed) {
+ webseedFileProviders = append(webseedFileProviders, webseed)
+ }
+ continue
+ }
+ webseedHttpProviders = append(webseedHttpProviders, uri)
+ continue
+ }
+
+ if strings.HasPrefix(webseed, "v1:") {
+ withoutVerisonPrefix := webseed[3:]
+ if !strings.HasPrefix(withoutVerisonPrefix, "https:") {
+ webseedS3Providers = append(webseedS3Providers, webseed)
+ continue
+ }
+ uri, err := url.ParseRequestURI(withoutVerisonPrefix)
+ if err != nil {
+ log.Warn("[webseed] can't parse url", "err", err, "url", withoutVerisonPrefix)
+ continue
+ }
+ webseedHttpProviders = append(webseedHttpProviders, uri)
+ } else {
+ continue
+ }
+ }
+ localCfgFile := filepath.Join(dirs.DataDir, "webseed.toml") // datadir/webseed.toml allowed
+ if dir.FileExist(localCfgFile) {
+ webseedFileProviders = append(webseedFileProviders, localCfgFile)
+ }
+ //TODO: if don't pass "downloaded files list here" (which we store in db) - synced erigon will download new .torrent files. And erigon can't work with "unfinished" files.
+ snapCfg := snapcfg.KnownCfg(chainName)
+ return &Cfg{Dirs: dirs, ChainName: chainName,
+ ClientConfig: torrentConfig, DownloadSlots: downloadSlots,
+ WebSeedUrls: webseedHttpProviders, WebSeedFiles: webseedFileProviders, WebSeedS3Tokens: webseedS3Providers,
+ DownloadTorrentFilesFromWebseed: false, ExpectedTorrentFilesHashes: snapCfg.Preverified,
+ }, nil
+}
+
+func getIpv6Enabled() bool {
+ if runtime.GOOS == "linux" {
+ file, err := ioutil.ReadFile("/sys/module/ipv6/parameters/disable")
+ if err != nil {
+ log.Warn("could not read /sys/module/ipv6/parameters/disable for ipv6 detection")
+ return false
+ }
+ fileContent := strings.TrimSpace(string(file))
+ return fileContent != "0"
+ }
+
+ // TODO hotfix: for platforms other than linux disable ipv6
+ return false
+}
diff --git a/erigon-lib/downloader/downloadercfg/logger.go b/erigon-lib/downloader/downloadercfg/logger.go
new file mode 100644
index 00000000000..88eb5dcabfa
--- /dev/null
+++ b/erigon-lib/downloader/downloadercfg/logger.go
@@ -0,0 +1,138 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package downloadercfg
+
+import (
+ "fmt"
+ "strings"
+
+ lg "github.com/anacrolix/log"
+ "github.com/ledgerwatch/log/v3"
+)
+
+func init() {
+ lg.Default.Handlers = []lg.Handler{adapterHandler{}}
+}
+
+func Int2LogLevel(level int) (lvl lg.Level, dbg bool, err error) {
+ switch level {
+ case 0:
+ lvl = lg.Critical
+ case 1:
+ lvl = lg.Error
+ case 2:
+ lvl = lg.Warning
+ case 3:
+ lvl = lg.Info
+ case 4:
+ lvl = lg.Debug
+ case 5:
+ lvl = lg.Debug
+ dbg = true
+ default:
+ return lvl, dbg, fmt.Errorf("invalid level set, expected a number between 0-5 but got: %d", level)
+ }
+ return lvl, dbg, nil
+}
+
+type noopHandler struct{}
+
+func (b noopHandler) Handle(r lg.Record) {
+}
+
+type adapterHandler struct{}
+
+func (b adapterHandler) Handle(r lg.Record) {
+ lvl := r.Level
+
+ switch lvl {
+ case lg.Debug:
+ str := r.String()
+ skip := strings.Contains(str, "completion change") || strings.Contains(str, "hashed piece") ||
+ strings.Contains(str, "set torrent=") ||
+ strings.Contains(str, "all initial dials failed") ||
+ strings.Contains(str, "local and remote peer ids are the same") ||
+ strings.Contains(str, "connection at") || strings.Contains(str, "don't want conns right now") ||
+ strings.Contains(str, "is mutually complete") ||
+ strings.Contains(str, "sending PEX message") || strings.Contains(str, "received pex message") ||
+ strings.Contains(str, "announce to") || strings.Contains(str, "announcing to") ||
+ strings.Contains(str, "EOF") || strings.Contains(str, "closed") || strings.Contains(str, "connection reset by peer") || strings.Contains(str, "use of closed network connection") || strings.Contains(str, "broken pipe") ||
+ strings.Contains(str, "inited with remoteAddr")
+ if skip {
+ log.Trace(str, "lvl", lvl.LogString())
+ break
+ }
+ log.Debug(str)
+ case lg.Info:
+ str := r.String()
+ skip := strings.Contains(str, "EOF")
+ //strings.Contains(str, "banning ip ") ||
+ //strings.Contains(str, "spurious timer") { // suppress useless errors
+ if skip {
+ log.Trace(str, "lvl", lvl.LogString())
+ break
+ }
+ log.Info(str)
+ case lg.Warning:
+ str := r.String()
+ skip := strings.Contains(str, "EOF") ||
+ strings.Contains(str, "requested chunk too long") ||
+ strings.Contains(str, "banned ip") ||
+ strings.Contains(str, "banning webseed") ||
+ strings.Contains(str, "TrackerClient closed") ||
+ strings.Contains(str, "being sole dirtier of piece") ||
+ strings.Contains(str, "webrtc conn for unloaded torrent") ||
+ strings.Contains(str, "could not find offer for id") ||
+ strings.Contains(str, "received invalid reject") ||
+ strings.Contains(str, "reservation cancelled")
+
+ if skip {
+ log.Trace(str)
+ break
+ }
+ log.Warn(str)
+ case lg.Error:
+ str := r.String()
+ skip := strings.Contains(str, "EOF") ||
+ strings.Contains(str, "short write") ||
+ strings.Contains(str, "disabling data download")
+ if skip {
+ log.Trace(str, "lvl", lvl.LogString())
+ break
+ }
+ log.Error(str)
+ case lg.Critical:
+ str := r.String()
+ skip := strings.Contains(str, "EOF") ||
+ strings.Contains(str, "torrent closed") ||
+ strings.Contains(str, "don't want conns")
+ if skip {
+ log.Trace(str, "lvl", lvl.LogString())
+ break
+ }
+ log.Error(str)
+ default:
+ str := r.String()
+ skip := strings.Contains(str, "EOF") || strings.Contains(str, "unhandled response status") ||
+ strings.Contains(str, "error doing webseed request")
+ if skip {
+ log.Trace(str, "lvl", lvl.LogString())
+ break
+ }
+ log.Info("[downloader] "+r.String(), "torrent_log_type", "unknown", "or", lvl.LogString())
+ }
+}
diff --git a/erigon-lib/downloader/downloadercfg/logger_libutp.go b/erigon-lib/downloader/downloadercfg/logger_libutp.go
new file mode 100644
index 00000000000..8e0f1a370f2
--- /dev/null
+++ b/erigon-lib/downloader/downloadercfg/logger_libutp.go
@@ -0,0 +1,27 @@
+//go:build !disable_libutp
+
+/*
+Copyright 2021 Erigon contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package downloadercfg
+
+import (
+ utp "github.com/anacrolix/go-libutp"
+ lg "github.com/anacrolix/log"
+)
+
+func init() {
+ utp.Logger.Handlers = []lg.Handler{noopHandler{}}
+}
diff --git a/erigon-lib/downloader/downloadergrpc/client.go b/erigon-lib/downloader/downloadergrpc/client.go
new file mode 100644
index 00000000000..c5a85230f78
--- /dev/null
+++ b/erigon-lib/downloader/downloadergrpc/client.go
@@ -0,0 +1,82 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package downloadergrpc
+
+import (
+ "context"
+ "encoding/hex"
+ "fmt"
+ "time"
+
+ "github.com/anacrolix/torrent/metainfo"
+ "github.com/c2h5oh/datasize"
+ "github.com/ledgerwatch/erigon-lib/gointerfaces"
+ proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader"
+ prototypes "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/backoff"
+ "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/keepalive"
+)
+
+func NewClient(ctx context.Context, downloaderAddr string) (proto_downloader.DownloaderClient, error) {
+ // creating grpc client connection
+ var dialOpts []grpc.DialOption
+
+ backoffCfg := backoff.DefaultConfig
+ backoffCfg.BaseDelay = 500 * time.Millisecond
+ backoffCfg.MaxDelay = 10 * time.Second
+ dialOpts = []grpc.DialOption{
+ grpc.WithConnectParams(grpc.ConnectParams{Backoff: backoffCfg, MinConnectTimeout: 10 * time.Minute}),
+ grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(16 * datasize.MB))),
+ grpc.WithKeepaliveParams(keepalive.ClientParameters{}),
+ }
+
+ dialOpts = append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ conn, err := grpc.DialContext(ctx, downloaderAddr, dialOpts...)
+ if err != nil {
+ return nil, fmt.Errorf("creating client connection to sentry P2P: %w", err)
+ }
+ return proto_downloader.NewDownloaderClient(conn), nil
+}
+
+func InfoHashes2Proto(in []metainfo.Hash) []*prototypes.H160 {
+ infoHashes := make([]*prototypes.H160, len(in))
+ i := 0
+ for _, h := range in {
+ infoHashes[i] = gointerfaces.ConvertAddressToH160(h)
+ i++
+ }
+ return infoHashes
+}
+
+func Strings2Proto(in []string) []*prototypes.H160 {
+ infoHashes := make([]*prototypes.H160, len(in))
+ i := 0
+ for _, h := range in {
+ infoHashes[i] = String2Proto(h)
+ i++
+ }
+ return infoHashes
+}
+
+func String2Proto(in string) *prototypes.H160 {
+ var infoHash [20]byte
+ inHex, _ := hex.DecodeString(in)
+ copy(infoHash[:], inHex)
+ return gointerfaces.ConvertAddressToH160(infoHash)
+}
diff --git a/erigon-lib/downloader/mdbx_piece_completion.go b/erigon-lib/downloader/mdbx_piece_completion.go
new file mode 100644
index 00000000000..6f209cd19c9
--- /dev/null
+++ b/erigon-lib/downloader/mdbx_piece_completion.go
@@ -0,0 +1,117 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package downloader
+
+import (
+ "context"
+ "encoding/binary"
+
+ "github.com/anacrolix/torrent/metainfo"
+ "github.com/anacrolix/torrent/storage"
+ "github.com/anacrolix/torrent/types/infohash"
+ "github.com/ledgerwatch/erigon-lib/kv"
+)
+
+const (
+ complete = "c"
+ incomplete = "i"
+)
+
+type mdbxPieceCompletion struct {
+ db kv.RwDB
+}
+
+var _ storage.PieceCompletion = (*mdbxPieceCompletion)(nil)
+
+func NewMdbxPieceCompletion(db kv.RwDB) (ret storage.PieceCompletion, err error) {
+ ret = &mdbxPieceCompletion{db: db}
+ return
+}
+
+func (m mdbxPieceCompletion) Get(pk metainfo.PieceKey) (cn storage.Completion, err error) {
+ err = m.db.View(context.Background(), func(tx kv.Tx) error {
+ var key [infohash.Size + 4]byte
+ copy(key[:], pk.InfoHash[:])
+ binary.BigEndian.PutUint32(key[infohash.Size:], uint32(pk.Index))
+ cn.Ok = true
+ v, err := tx.GetOne(kv.BittorrentCompletion, key[:])
+ if err != nil {
+ return err
+ }
+ switch string(v) {
+ case complete:
+ cn.Complete = true
+ case incomplete:
+ cn.Complete = false
+ default:
+ cn.Ok = false
+ }
+ return nil
+ })
+ return
+}
+
+func (m mdbxPieceCompletion) Set(pk metainfo.PieceKey, b bool) error {
+ if c, err := m.Get(pk); err == nil && c.Ok && c.Complete == b {
+ return nil
+ }
+
+ var tx kv.RwTx
+ var err error
+ // On power-off recent "no-sync" txs may be lost.
+ // It will cause 2 cases of in-consistency between files on disk and db metadata:
+ // - Good piece on disk and recent "complete" db marker lost. Self-Heal by re-download.
+ // - Bad piece on disk and recent "incomplete" db marker lost. No Self-Heal. Means: can't afford loosing recent "incomplete" markers.
+ // FYI: Fsync of torrent pieces happenng before storing db markers: https://github.com/anacrolix/torrent/blob/master/torrent.go#L2026
+ //
+ // Mainnet stats:
+ // call amount 2 minutes complete=100K vs incomple=1K
+ // 1K fsyncs/2minutes it's quite expensive, but even on cloud (high latency) drive it allow download 100mb/s
+ // and Erigon doesn't do anything when downloading snapshots
+ if b {
+ tx, err = m.db.BeginRwNosync(context.Background())
+ if err != nil {
+ return err
+ }
+ } else {
+ tx, err = m.db.BeginRw(context.Background())
+ if err != nil {
+ return err
+ }
+ }
+ defer tx.Rollback()
+
+ var key [infohash.Size + 4]byte
+ copy(key[:], pk.InfoHash[:])
+ binary.BigEndian.PutUint32(key[infohash.Size:], uint32(pk.Index))
+
+ v := []byte(incomplete)
+ if b {
+ v = []byte(complete)
+ }
+ err = tx.Put(kv.BittorrentCompletion, key[:], v)
+ if err != nil {
+ return err
+ }
+
+ return tx.Commit()
+}
+
+func (m *mdbxPieceCompletion) Close() error {
+ m.db.Close()
+ return nil
+}
diff --git a/erigon-lib/downloader/mdbx_piece_completion_test.go b/erigon-lib/downloader/mdbx_piece_completion_test.go
new file mode 100644
index 00000000000..c2035501ad4
--- /dev/null
+++ b/erigon-lib/downloader/mdbx_piece_completion_test.go
@@ -0,0 +1,53 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package downloader
+
+import (
+ "testing"
+
+ "github.com/ledgerwatch/erigon-lib/kv/memdb"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/anacrolix/torrent/metainfo"
+ "github.com/anacrolix/torrent/storage"
+)
+
+func TestMdbxPieceCompletion(t *testing.T) {
+ db := memdb.NewTestDownloaderDB(t)
+ pc, err := NewMdbxPieceCompletion(db)
+ require.NoError(t, err)
+ defer pc.Close()
+
+ pk := metainfo.PieceKey{}
+
+ b, err := pc.Get(pk)
+ require.NoError(t, err)
+ assert.False(t, b.Ok)
+
+ require.NoError(t, pc.Set(pk, false))
+
+ b, err = pc.Get(pk)
+ require.NoError(t, err)
+ assert.Equal(t, storage.Completion{Complete: false, Ok: true}, b)
+
+ require.NoError(t, pc.Set(pk, true))
+
+ b, err = pc.Get(pk)
+ require.NoError(t, err)
+ assert.Equal(t, storage.Completion{Complete: true, Ok: true}, b)
+}
diff --git a/erigon-lib/downloader/path.go b/erigon-lib/downloader/path.go
new file mode 100644
index 00000000000..06ba51865b4
--- /dev/null
+++ b/erigon-lib/downloader/path.go
@@ -0,0 +1,272 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package filepath implements utility routines for manipulating filename paths
+// in a way compatible with the target operating system-defined file paths.
+//
+// The filepath package uses either forward slashes or backslashes,
+// depending on the operating system. To process paths such as URLs
+// that always use forward slashes regardless of the operating
+// system, see the path package.
+package downloader
+
+import (
+ "io/fs"
+ "os"
+ "runtime"
+ "strings"
+)
+
+// A lazybuf is a lazily constructed path buffer.
+// It supports append, reading previously appended bytes,
+// and retrieving the final string. It does not allocate a buffer
+// to hold the output until that output diverges from s.
+type lazybuf struct {
+ path string
+ buf []byte
+ w int
+ volAndPath string
+ volLen int
+}
+
+func (b *lazybuf) index(i int) byte {
+ if b.buf != nil {
+ return b.buf[i]
+ }
+ return b.path[i]
+}
+
+func (b *lazybuf) append(c byte) {
+ if b.buf == nil {
+ if b.w < len(b.path) && b.path[b.w] == c {
+ b.w++
+ return
+ }
+ b.buf = make([]byte, len(b.path))
+ copy(b.buf, b.path[:b.w])
+ }
+ b.buf[b.w] = c
+ b.w++
+}
+
+func (b *lazybuf) string() string {
+ if b.buf == nil {
+ return b.volAndPath[:b.volLen+b.w]
+ }
+ return b.volAndPath[:b.volLen] + string(b.buf[:b.w])
+}
+
+const (
+ Separator = os.PathSeparator
+ ListSeparator = os.PathListSeparator
+)
+
+// Clean returns the shortest path name equivalent to path
+// by purely lexical processing. It applies the following rules
+// iteratively until no further processing can be done:
+//
+// 1. Replace multiple Separator elements with a single one.
+// 2. Eliminate each . path name element (the current directory).
+// 3. Eliminate each inner .. path name element (the parent directory)
+// along with the non-.. element that precedes it.
+// 4. Eliminate .. elements that begin a rooted path:
+// that is, replace "/.." by "/" at the beginning of a path,
+// assuming Separator is '/'.
+//
+// The returned path ends in a slash only if it represents a root directory,
+// such as "/" on Unix or `C:\` on Windows.
+//
+// Finally, any occurrences of slash are replaced by Separator.
+//
+// If the result of this process is an empty string, Clean
+// returns the string ".".
+//
+// See also Rob Pike, “Lexical File Names in Plan 9 or
+// Getting Dot-Dot Right,”
+// https://9p.io/sys/doc/lexnames.html
+func Clean(path string) string {
+ originalPath := path
+ volLen := volumeNameLen(path)
+ path = path[volLen:]
+ if path == "" {
+ if volLen > 1 && os.IsPathSeparator(originalPath[0]) && os.IsPathSeparator(originalPath[1]) {
+ // should be UNC
+ return FromSlash(originalPath)
+ }
+ return originalPath + "."
+ }
+ rooted := os.IsPathSeparator(path[0])
+
+ // Invariants:
+ // reading from path; r is index of next byte to process.
+ // writing to buf; w is index of next byte to write.
+ // dotdot is index in buf where .. must stop, either because
+ // it is the leading slash or it is a leading ../../.. prefix.
+ n := len(path)
+ out := lazybuf{path: path, volAndPath: originalPath, volLen: volLen}
+ r, dotdot := 0, 0
+ if rooted {
+ out.append(Separator)
+ r, dotdot = 1, 1
+ }
+
+ for r < n {
+ switch {
+ case os.IsPathSeparator(path[r]):
+ // empty path element
+ r++
+ case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])):
+ // . element
+ r++
+ case path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])):
+ // .. element: remove to last separator
+ r += 2
+ switch {
+ case out.w > dotdot:
+ // can backtrack
+ out.w--
+ for out.w > dotdot && !os.IsPathSeparator(out.index(out.w)) {
+ out.w--
+ }
+ case !rooted:
+ // cannot backtrack, but not rooted, so append .. element.
+ if out.w > 0 {
+ out.append(Separator)
+ }
+ out.append('.')
+ out.append('.')
+ dotdot = out.w
+ }
+ default:
+ // real path element.
+ // add slash if needed
+ if rooted && out.w != 1 || !rooted && out.w != 0 {
+ out.append(Separator)
+ }
+ // If a ':' appears in the path element at the start of a Windows path,
+ // insert a .\ at the beginning to avoid converting relative paths
+ // like a/../c: into c:.
+ if runtime.GOOS == "windows" && out.w == 0 && out.volLen == 0 && r != 0 {
+ for i := r; i < n && !os.IsPathSeparator(path[i]); i++ {
+ if path[i] == ':' {
+ out.append('.')
+ out.append(Separator)
+ break
+ }
+ }
+ }
+ // copy element
+ for ; r < n && !os.IsPathSeparator(path[r]); r++ {
+ out.append(path[r])
+ }
+ }
+ }
+
+ // Turn empty string into "."
+ if out.w == 0 {
+ out.append('.')
+ }
+
+ return FromSlash(out.string())
+}
+
+func unixIsLocal(path string) bool {
+ if IsAbs(path) || path == "" {
+ return false
+ }
+ hasDots := false
+ for p := path; p != ""; {
+ var part string
+ part, p, _ = strings.Cut(p, "/")
+ if part == "." || part == ".." {
+ hasDots = true
+ break
+ }
+ }
+ if hasDots {
+ path = Clean(path)
+ }
+ if path == ".." || strings.HasPrefix(path, "../") {
+ return false
+ }
+ return true
+}
+
+// FromSlash returns the result of replacing each slash ('/') character
+// in path with a separator character. Multiple slashes are replaced
+// by multiple separators.
+func FromSlash(path string) string {
+ if Separator == '/' {
+ return path
+ }
+ return strings.ReplaceAll(path, "/", string(Separator))
+}
+
+// Join joins any number of path elements into a single path,
+// separating them with an OS specific Separator. Empty elements
+// are ignored. The result is Cleaned. However, if the argument
+// list is empty or all its elements are empty, Join returns
+// an empty string.
+// On Windows, the result will only be a UNC path if the first
+// non-empty element is a UNC path.
+func Join(elem ...string) string {
+ return join(elem)
+}
+
+// nolint
+func unixAbs(path string) (string, error) {
+ if IsAbs(path) {
+ return Clean(path), nil
+ }
+ wd, err := os.Getwd()
+ if err != nil {
+ return "", err
+ }
+ return Join(wd, path), nil
+}
+
+// SkipDir is used as a return value from WalkFuncs to indicate that
+// the directory named in the call is to be skipped. It is not returned
+// as an error by any function.
+var SkipDir error = fs.SkipDir
+
+// WalkFunc is the type of the function called by Walk to visit each
+// file or directory.
+//
+// The path argument contains the argument to Walk as a prefix.
+// That is, if Walk is called with root argument "dir" and finds a file
+// named "a" in that directory, the walk function will be called with
+// argument "dir/a".
+//
+// The directory and file are joined with Join, which may clean the
+// directory name: if Walk is called with the root argument "x/../dir"
+// and finds a file named "a" in that directory, the walk function will
+// be called with argument "dir/a", not "x/../dir/a".
+//
+// The info argument is the fs.FileInfo for the named path.
+//
+// The error result returned by the function controls how Walk continues.
+// If the function returns the special value SkipDir, Walk skips the
+// current directory (path if info.IsDir() is true, otherwise path's
+// parent directory). If the function returns the special value SkipAll,
+// Walk skips all remaining files and directories. Otherwise, if the function
+// returns a non-nil error, Walk stops entirely and returns that error.
+//
+// The err argument reports an error related to path, signaling that Walk
+// will not walk into that directory. The function can decide how to
+// handle that error; as described earlier, returning the error will
+// cause Walk to stop walking the entire tree.
+//
+// Walk calls the function with a non-nil err argument in two cases.
+//
+// First, if an os.Lstat on the root directory or any directory or file
+// in the tree fails, Walk calls the function with path set to that
+// directory or file's path, info set to nil, and err set to the error
+// from os.Lstat.
+//
+// Second, if a directory's Readdirnames method fails, Walk calls the
+// function with path set to the directory's path, info, set to an
+// fs.FileInfo describing the directory, and err set to the error from
+// Readdirnames.
+type WalkFunc func(path string, info fs.FileInfo, err error) error
diff --git a/erigon-lib/downloader/path_plan9.go b/erigon-lib/downloader/path_plan9.go
new file mode 100644
index 00000000000..01cb95fc933
--- /dev/null
+++ b/erigon-lib/downloader/path_plan9.go
@@ -0,0 +1,55 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package downloader
+
+import "strings"
+
+func isLocal(path string) bool {
+ return unixIsLocal(path)
+}
+
+// IsAbs reports whether the path is absolute.
+func IsAbs(path string) bool {
+ return strings.HasPrefix(path, "/") || strings.HasPrefix(path, "#")
+}
+
+// volumeNameLen returns length of the leading volume name on Windows.
+// It returns 0 elsewhere.
+func volumeNameLen(path string) int {
+ return 0
+}
+
+// HasPrefix exists for historical compatibility and should not be used.
+//
+// Deprecated: HasPrefix does not respect path boundaries and
+// does not ignore case when required.
+func HasPrefix(p, prefix string) bool {
+ return strings.HasPrefix(p, prefix)
+}
+
+func splitList(path string) []string {
+ if path == "" {
+ return []string{}
+ }
+ return strings.Split(path, string(ListSeparator))
+}
+
+func abs(path string) (string, error) {
+ return unixAbs(path)
+}
+
+func join(elem []string) string {
+ // If there's a bug here, fix the logic in ./path_unix.go too.
+ for i, e := range elem {
+ if e != "" {
+ return Clean(strings.Join(elem[i:], string(Separator)))
+ }
+ }
+ return ""
+}
+
+func sameWord(a, b string) bool {
+ return a == b
+}
diff --git a/erigon-lib/downloader/path_unix.go b/erigon-lib/downloader/path_unix.go
new file mode 100644
index 00000000000..cb3b3bcd491
--- /dev/null
+++ b/erigon-lib/downloader/path_unix.go
@@ -0,0 +1,34 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm)
+
+package downloader
+
+import "strings"
+
+func isLocal(path string) bool {
+ return unixIsLocal(path)
+}
+
+// IsAbs reports whether the path is absolute.
+func IsAbs(path string) bool {
+ return strings.HasPrefix(path, "/")
+}
+
+// volumeNameLen returns length of the leading volume name on Windows.
+// It returns 0 elsewhere.
+func volumeNameLen(path string) int {
+ return 0
+}
+
+func join(elem []string) string {
+ // If there's a bug here, fix the logic in ./path_plan9.go too.
+ for i, e := range elem {
+ if e != "" {
+ return Clean(strings.Join(elem[i:], string(Separator)))
+ }
+ }
+ return ""
+}
diff --git a/erigon-lib/downloader/path_windows.go b/erigon-lib/downloader/path_windows.go
new file mode 100644
index 00000000000..f5f4a01d905
--- /dev/null
+++ b/erigon-lib/downloader/path_windows.go
@@ -0,0 +1,306 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package downloader
+
+import (
+ "strings"
+ "syscall"
+)
+
+func isSlash(c uint8) bool {
+ return c == '\\' || c == '/'
+}
+
+func toUpper(c byte) byte {
+ if 'a' <= c && c <= 'z' {
+ return c - ('a' - 'A')
+ }
+ return c
+}
+
+// isReservedName reports if name is a Windows reserved device name or a console handle.
+// It does not detect names with an extension, which are also reserved on some Windows versions.
+//
+// For details, search for PRN in
+// https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file.
+func isReservedName(name string) bool {
+ if 3 <= len(name) && len(name) <= 4 {
+ switch string([]byte{toUpper(name[0]), toUpper(name[1]), toUpper(name[2])}) {
+ case "CON", "PRN", "AUX", "NUL":
+ return len(name) == 3
+ case "COM", "LPT":
+ return len(name) == 4 && '1' <= name[3] && name[3] <= '9'
+ }
+ }
+ // Passing CONIN$ or CONOUT$ to CreateFile opens a console handle.
+ // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#consoles
+ //
+ // While CONIN$ and CONOUT$ aren't documented as being files,
+ // they behave the same as CON. For example, ./CONIN$ also opens the console input.
+ if len(name) == 6 && name[5] == '$' && strings.EqualFold(name, "CONIN$") {
+ return true
+ }
+ if len(name) == 7 && name[6] == '$' && strings.EqualFold(name, "CONOUT$") {
+ return true
+ }
+ return false
+}
+
+func isLocal(path string) bool {
+ if path == "" {
+ return false
+ }
+ if isSlash(path[0]) {
+ // Path rooted in the current drive.
+ return false
+ }
+ if strings.IndexByte(path, ':') >= 0 {
+ // Colons are only valid when marking a drive letter ("C:foo").
+ // Rejecting any path with a colon is conservative but safe.
+ return false
+ }
+ hasDots := false // contains . or .. path elements
+ for p := path; p != ""; {
+ var part string
+ part, p, _ = cutPath(p)
+ if part == "." || part == ".." {
+ hasDots = true
+ }
+ // Trim the extension and look for a reserved name.
+ base, _, hasExt := strings.Cut(part, ".")
+ if isReservedName(base) {
+ if !hasExt {
+ return false
+ }
+ // The path element is a reserved name with an extension. Some Windows
+ // versions consider this a reserved name, while others do not. Use
+ // FullPath to see if the name is reserved.
+ //
+ // FullPath will convert references to reserved device names to their
+ // canonical form: \\.\${DEVICE_NAME}
+ //
+ // FullPath does not perform this conversion for paths which contain
+ // a reserved device name anywhere other than in the last element,
+ // so check the part rather than the full path.
+ if p, _ := syscall.FullPath(part); len(p) >= 4 && p[:4] == `\\.\` {
+ return false
+ }
+ }
+ }
+ if hasDots {
+ path = Clean(path)
+ }
+ if path == ".." || strings.HasPrefix(path, `..\`) {
+ return false
+ }
+ return true
+}
+
+// IsAbs reports whether the path is absolute.
+func IsAbs(path string) (b bool) {
+ l := volumeNameLen(path)
+ if l == 0 {
+ return false
+ }
+ // If the volume name starts with a double slash, this is an absolute path.
+ if isSlash(path[0]) && isSlash(path[1]) {
+ return true
+ }
+ path = path[l:]
+ if path == "" {
+ return false
+ }
+ return isSlash(path[0])
+}
+
+// volumeNameLen returns length of the leading volume name on Windows.
+// It returns 0 elsewhere.
+//
+// See: https://learn.microsoft.com/en-us/dotnet/standard/io/file-path-formats
+func volumeNameLen(path string) int {
+ if len(path) < 2 {
+ return 0
+ }
+ // with drive letter
+ c := path[0]
+ if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
+ return 2
+ }
+ // UNC and DOS device paths start with two slashes.
+ if !isSlash(path[0]) || !isSlash(path[1]) {
+ return 0
+ }
+ rest := path[2:]
+ p1, rest, _ := cutPath(rest)
+ p2, rest, ok := cutPath(rest)
+ if !ok {
+ return len(path)
+ }
+ if p1 != "." && p1 != "?" {
+ // This is a UNC path: \\${HOST}\${SHARE}\
+ return len(path) - len(rest) - 1
+ }
+ // This is a DOS device path.
+ if len(p2) == 3 && toUpper(p2[0]) == 'U' && toUpper(p2[1]) == 'N' && toUpper(p2[2]) == 'C' {
+ // This is a DOS device path that links to a UNC: \\.\UNC\${HOST}\${SHARE}\
+ _, rest, _ = cutPath(rest) // host
+ _, rest, ok = cutPath(rest) // share
+ if !ok {
+ return len(path)
+ }
+ }
+ return len(path) - len(rest) - 1
+}
+
+// cutPath slices path around the first path separator.
+func cutPath(path string) (before, after string, found bool) {
+ for i := range path {
+ if isSlash(path[i]) {
+ return path[:i], path[i+1:], true
+ }
+ }
+ return path, "", false
+}
+
+// HasPrefix exists for historical compatibility and should not be used.
+//
+// Deprecated: HasPrefix does not respect path boundaries and
+// does not ignore case when required.
+func HasPrefix(p, prefix string) bool {
+ if strings.HasPrefix(p, prefix) {
+ return true
+ }
+ return strings.HasPrefix(strings.ToLower(p), strings.ToLower(prefix))
+}
+
+func splitList(path string) []string {
+ // The same implementation is used in LookPath in os/exec;
+ // consider changing os/exec when changing this.
+
+ if path == "" {
+ return []string{}
+ }
+
+ // Split path, respecting but preserving quotes.
+ list := []string{}
+ start := 0
+ quo := false
+ for i := 0; i < len(path); i++ {
+ switch c := path[i]; {
+ case c == '"':
+ quo = !quo
+ case c == ListSeparator && !quo:
+ list = append(list, path[start:i])
+ start = i + 1
+ }
+ }
+ list = append(list, path[start:])
+
+ // Remove quotes.
+ for i, s := range list {
+ list[i] = strings.ReplaceAll(s, `"`, ``)
+ }
+
+ return list
+}
+
+func abs(path string) (string, error) {
+ if path == "" {
+ // syscall.FullPath returns an error on empty path, because it's not a valid path.
+ // To implement Abs behavior of returning working directory on empty string input,
+ // special-case empty path by changing it to "." path. See golang.org/issue/24441.
+ path = "."
+ }
+ fullPath, err := syscall.FullPath(path)
+ if err != nil {
+ return "", err
+ }
+ return Clean(fullPath), nil
+}
+
+func join(elem []string) string {
+ var b strings.Builder
+ var lastChar byte
+ for _, e := range elem {
+ switch {
+ case b.Len() == 0:
+ // Add the first non-empty path element unchanged.
+ case isSlash(lastChar):
+ // If the path ends in a slash, strip any leading slashes from the next
+ // path element to avoid creating a UNC path (any path starting with "\\")
+ // from non-UNC elements.
+ //
+ // The correct behavior for Join when the first element is an incomplete UNC
+ // path (for example, "\\") is underspecified. We currently join subsequent
+ // elements so Join("\\", "host", "share") produces "\\host\share".
+ for len(e) > 0 && isSlash(e[0]) {
+ e = e[1:]
+ }
+ case lastChar == ':':
+ // If the path ends in a colon, keep the path relative to the current directory
+ // on a drive and don't add a separator. Preserve leading slashes in the next
+ // path element, which may make the path absolute.
+ //
+ // Join(`C:`, `f`) = `C:f`
+ // Join(`C:`, `\f`) = `C:\f`
+ default:
+ // In all other cases, add a separator between elements.
+ b.WriteByte('\\')
+ lastChar = '\\'
+ }
+ if len(e) > 0 {
+ b.WriteString(e)
+ lastChar = e[len(e)-1]
+ }
+ }
+ if b.Len() == 0 {
+ return ""
+ }
+ return Clean(b.String())
+}
+
+// joinNonEmpty is like join, but it assumes that the first element is non-empty.
+func joinNonEmpty(elem []string) string {
+ if len(elem[0]) == 2 && elem[0][1] == ':' {
+ // First element is drive letter without terminating slash.
+ // Keep path relative to current directory on that drive.
+ // Skip empty elements.
+ i := 1
+ for ; i < len(elem); i++ {
+ if elem[i] != "" {
+ break
+ }
+ }
+ return Clean(elem[0] + strings.Join(elem[i:], string(Separator)))
+ }
+ // The following logic prevents Join from inadvertently creating a
+ // UNC path on Windows. Unless the first element is a UNC path, Join
+ // shouldn't create a UNC path. See golang.org/issue/9167.
+ p := Clean(strings.Join(elem, string(Separator)))
+ if !isUNC(p) {
+ return p
+ }
+ // p == UNC only allowed when the first element is a UNC path.
+ head := Clean(elem[0])
+ if isUNC(head) {
+ return p
+ }
+ // head + tail == UNC, but joining two non-UNC paths should not result
+ // in a UNC path. Undo creation of UNC path.
+ tail := Clean(strings.Join(elem[1:], string(Separator)))
+ if head[len(head)-1] == Separator {
+ return head + tail
+ }
+ return head + string(Separator) + tail
+}
+
+// isUNC reports whether path is a UNC path.
+func isUNC(path string) bool {
+ return len(path) > 1 && isSlash(path[0]) && isSlash(path[1])
+}
+
+func sameWord(a, b string) bool {
+ return strings.EqualFold(a, b)
+}
diff --git a/erigon-lib/downloader/snaptype/files.go b/erigon-lib/downloader/snaptype/files.go
new file mode 100644
index 00000000000..069707cfe29
--- /dev/null
+++ b/erigon-lib/downloader/snaptype/files.go
@@ -0,0 +1,257 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package snaptype
+
+import (
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/anacrolix/torrent/metainfo"
+
+ "github.com/ledgerwatch/erigon-lib/common/cmp"
+ "github.com/ledgerwatch/erigon-lib/common/dir"
+ "golang.org/x/exp/slices"
+)
+
+type Type int
+
+const (
+ Headers Type = iota
+ Bodies
+ Transactions
+ BorEvents
+ BorSpans
+ NumberOfTypes
+ BeaconBlocks
+)
+
+var BorSnapshotTypes = []Type{BorEvents, BorSpans}
+
+func (ft Type) String() string {
+ switch ft {
+ case Headers:
+ return "headers"
+ case Bodies:
+ return "bodies"
+ case Transactions:
+ return "transactions"
+ case BorEvents:
+ return "borevents"
+ case BorSpans:
+ return "borspans"
+ case BeaconBlocks:
+ return "beaconblocks"
+ default:
+ panic(fmt.Sprintf("unknown file type: %d", ft))
+ }
+}
+
+func ParseFileType(s string) (Type, bool) {
+ switch s {
+ case "headers":
+ return Headers, true
+ case "bodies":
+ return Bodies, true
+ case "transactions":
+ return Transactions, true
+ case "borevents":
+ return BorEvents, true
+ case "borspans":
+ return BorSpans, true
+ case "beaconblocks":
+ return BeaconBlocks, true
+ default:
+ return NumberOfTypes, false
+ }
+}
+
+type IdxType string
+
+const (
+ Transactions2Block IdxType = "transactions-to-block"
+)
+
+func (it IdxType) String() string { return string(it) }
+
+var BlockSnapshotTypes = []Type{Headers, Bodies, Transactions}
+
+var (
+ ErrInvalidFileName = fmt.Errorf("invalid compressed file name")
+)
+
+func FileName(from, to uint64, fileType string) string {
+ return fmt.Sprintf("v1-%06d-%06d-%s", from/1_000, to/1_000, fileType)
+}
+func SegmentFileName(from, to uint64, t Type) string { return FileName(from, to, t.String()) + ".seg" }
+func DatFileName(from, to uint64, fType string) string { return FileName(from, to, fType) + ".dat" }
+func IdxFileName(from, to uint64, fType string) string { return FileName(from, to, fType) + ".idx" }
+
+func FilterExt(in []FileInfo, expectExt string) (out []FileInfo) {
+ for _, f := range in {
+ if f.Ext != expectExt { // filter out only compressed files
+ continue
+ }
+ out = append(out, f)
+ }
+ return out
+}
+func FilesWithExt(dir, expectExt string) ([]FileInfo, error) {
+ files, err := ParseDir(dir)
+ if err != nil {
+ return nil, err
+ }
+ return FilterExt(files, expectExt), nil
+}
+
+func IsCorrectFileName(name string) bool {
+ parts := strings.Split(name, "-")
+ return len(parts) == 4
+}
+
+func IsCorrectHistoryFileName(name string) bool {
+ parts := strings.Split(name, ".")
+ return len(parts) == 3
+}
+
+func ParseFileName(dir, fileName string) (res FileInfo, ok bool) {
+ ext := filepath.Ext(fileName)
+ onlyName := fileName[:len(fileName)-len(ext)]
+ parts := strings.Split(onlyName, "-")
+ if len(parts) < 4 {
+ return res, ok
+ }
+ version := parts[0]
+ _ = version
+ from, err := strconv.ParseUint(parts[1], 10, 64)
+ if err != nil {
+ return
+ }
+ to, err := strconv.ParseUint(parts[2], 10, 64)
+ if err != nil {
+ return
+ }
+ ft, ok := ParseFileType(parts[3])
+ if !ok {
+ return res, ok
+ }
+ return FileInfo{From: from * 1_000, To: to * 1_000, Path: filepath.Join(dir, fileName), T: ft, Ext: ext}, ok
+}
+
+const Erigon3SeedableSteps = 32
+
+// Use-cases:
+// - produce and seed snapshots earlier on chain tip. reduce depnedency on "good peers with history" at p2p-network.
+// Some networks have no much archive peers, also ConsensusLayer clients are not-good(not-incentivised) at serving history.
+// - avoiding having too much files:
+// more files(shards) - means "more metadata", "more lookups for non-indexed queries", "more dictionaries", "more bittorrent connections", ...
+// less files - means small files will be removed after merge (no peers for this files).
+const Erigon2RecentMergeLimit = 100_000 //nolint
+const Erigon2MergeLimit = 500_000
+const Erigon2MinSegmentSize = 1_000
+
+// FileInfo - parsed file metadata
+type FileInfo struct {
+ Version uint8
+ From, To uint64
+ Path, Ext string
+ T Type
+}
+
+func (f FileInfo) TorrentFileExists() bool { return dir.FileExist(f.Path + ".torrent") }
+func (f FileInfo) Seedable() bool {
+ return f.To-f.From == Erigon2MergeLimit || f.To-f.From == Erigon2RecentMergeLimit
+}
+func (f FileInfo) NeedTorrentFile() bool { return f.Seedable() && !f.TorrentFileExists() }
+
+func IdxFiles(dir string) (res []FileInfo, err error) { return FilesWithExt(dir, ".idx") }
+func Segments(dir string) (res []FileInfo, err error) { return FilesWithExt(dir, ".seg") }
+func TmpFiles(dir string) (res []string, err error) {
+ files, err := os.ReadDir(dir)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ return []string{}, nil
+ }
+ return nil, err
+ }
+ for _, f := range files {
+ if f.IsDir() || len(f.Name()) < 3 {
+ continue
+ }
+ if filepath.Ext(f.Name()) != ".tmp" {
+ continue
+ }
+ res = append(res, filepath.Join(dir, f.Name()))
+ }
+ return res, nil
+}
+
+// ParseDir - reading dir (
+func ParseDir(dir string) (res []FileInfo, err error) {
+ files, err := os.ReadDir(dir)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ return []FileInfo{}, nil
+ }
+ return nil, err
+ }
+ for _, f := range files {
+ fileInfo, err := f.Info()
+ if err != nil {
+ return nil, err
+ }
+ if f.IsDir() || fileInfo.Size() == 0 || len(f.Name()) < 3 {
+ continue
+ }
+
+ meta, ok := ParseFileName(dir, f.Name())
+ if !ok {
+ continue
+ }
+ res = append(res, meta)
+ }
+ slices.SortFunc(res, func(i, j FileInfo) int {
+ if i.Version != j.Version {
+ return cmp.Compare(i.Version, j.Version)
+ }
+ if i.From != j.From {
+ return cmp.Compare(i.From, j.From)
+ }
+ if i.To != j.To {
+ return cmp.Compare(i.To, j.To)
+ }
+ if i.T != j.T {
+ return cmp.Compare(i.T, j.T)
+ }
+ return cmp.Compare(i.Ext, j.Ext)
+ })
+
+ return res, nil
+}
+
+func Hex2InfoHash(in string) (infoHash metainfo.Hash) {
+ inHex, err := hex.DecodeString(in)
+ if err != nil {
+ panic(err)
+ }
+ copy(infoHash[:], inHex)
+ return infoHash
+}
diff --git a/erigon-lib/downloader/snaptype/snaptypes.go b/erigon-lib/downloader/snaptype/snaptypes.go
new file mode 100644
index 00000000000..c42e9982a2c
--- /dev/null
+++ b/erigon-lib/downloader/snaptype/snaptypes.go
@@ -0,0 +1,14 @@
+package snaptype
+
+import (
+ "net/url"
+
+ "github.com/anacrolix/torrent/metainfo"
+)
+
+// Each provider can provide only 1 WebSeed url per file
+// but overall BitTorrent protocol allowing multiple
+type WebSeedsFromProvider map[string]string // fileName -> Url, can be Http/Ftp
+
+type WebSeedUrls map[string]metainfo.UrlList // fileName -> []Url, can be Http/Ftp
+type TorrentUrls map[string][]*url.URL
diff --git a/erigon-lib/downloader/util.go b/erigon-lib/downloader/util.go
new file mode 100644
index 00000000000..053e830c851
--- /dev/null
+++ b/erigon-lib/downloader/util.go
@@ -0,0 +1,375 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package downloader
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strconv"
+ "sync/atomic"
+ "time"
+
+ "github.com/anacrolix/torrent"
+ "github.com/anacrolix/torrent/bencode"
+ "github.com/anacrolix/torrent/metainfo"
+ common2 "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/datadir"
+ dir2 "github.com/ledgerwatch/erigon-lib/common/dir"
+ "github.com/ledgerwatch/erigon-lib/downloader/downloadercfg"
+ "github.com/ledgerwatch/erigon-lib/downloader/snaptype"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/log/v3"
+ "golang.org/x/sync/errgroup"
+)
+
+// udpOrHttpTrackers - torrent library spawning several goroutines and producing many requests for each tracker. So we limit amout of trackers by 7
+var udpOrHttpTrackers = []string{
+ "udp://tracker.opentrackr.org:1337/announce",
+ "udp://9.rarbg.com:2810/announce",
+ "udp://tracker.openbittorrent.com:6969/announce",
+ "http://tracker.openbittorrent.com:80/announce",
+ "udp://opentracker.i2p.rocks:6969/announce",
+ "https://opentracker.i2p.rocks:443/announce",
+ "udp://tracker.torrent.eu.org:451/announce",
+ "udp://tracker.moeking.me:6969/announce",
+}
+
+// nolint
+var websocketTrackers = []string{
+ "wss://tracker.btorrent.xyz",
+}
+
+// Trackers - break down by priority tier
+var Trackers = [][]string{
+ udpOrHttpTrackers,
+ //websocketTrackers // TODO: Ws protocol producing too many errors and flooding logs. But it's also very fast and reactive.
+}
+
+func seedableSegmentFiles(dir string) ([]string, error) {
+ files, err := dir2.ListFiles(dir, ".seg")
+ if err != nil {
+ return nil, err
+ }
+ res := make([]string, 0, len(files))
+ for _, fPath := range files {
+ _, name := filepath.Split(fPath)
+ if !snaptype.IsCorrectFileName(name) {
+ continue
+ }
+ ff, ok := snaptype.ParseFileName(dir, name)
+ if !ok {
+ continue
+ }
+ if !ff.Seedable() {
+ continue
+ }
+ res = append(res, name)
+ }
+ return res, nil
+}
+
+var historyFileRegex = regexp.MustCompile("^([[:lower:]]+).([0-9]+)-([0-9]+).(.*)$")
+
+func seedableSnapshotsBySubDir(dir, subDir string) ([]string, error) {
+ historyDir := filepath.Join(dir, subDir)
+ dir2.MustExist(historyDir)
+ files, err := dir2.ListFiles(historyDir, ".kv", ".v", ".ef")
+ if err != nil {
+ return nil, err
+ }
+ res := make([]string, 0, len(files))
+ for _, fPath := range files {
+ _, name := filepath.Split(fPath)
+ if !e3seedable(name) {
+ continue
+ }
+ res = append(res, filepath.Join(subDir, name))
+ }
+ return res, nil
+}
+
+func e3seedable(name string) bool {
+ subs := historyFileRegex.FindStringSubmatch(name)
+ if len(subs) != 5 {
+ return false
+ }
+ // Check that it's seedable
+ from, err := strconv.ParseUint(subs[2], 10, 64)
+ if err != nil {
+ return false
+ }
+ to, err := strconv.ParseUint(subs[3], 10, 64)
+ if err != nil {
+ return false
+ }
+ if (to-from)%snaptype.Erigon3SeedableSteps != 0 {
+ return false
+ }
+ return true
+}
+func ensureCantLeaveDir(fName, root string) (string, error) {
+ if filepath.IsAbs(fName) {
+ newFName, err := filepath.Rel(root, fName)
+ if err != nil {
+ return fName, err
+ }
+ if !IsLocal(newFName) {
+ return fName, fmt.Errorf("file=%s, is outside of snapshots dir", fName)
+ }
+ fName = newFName
+ }
+ if !IsLocal(fName) {
+ return fName, fmt.Errorf("relative paths are not allowed: %s", fName)
+ }
+ return fName, nil
+}
+
+func BuildTorrentIfNeed(ctx context.Context, fName, root string) (torrentFilePath string, err error) {
+ select {
+ case <-ctx.Done():
+ return "", ctx.Err()
+ default:
+ }
+ fName, err = ensureCantLeaveDir(fName, root)
+ if err != nil {
+ return "", err
+ }
+
+ fPath := filepath.Join(root, fName)
+ if dir2.FileExist(fPath + ".torrent") {
+ return fPath, nil
+ }
+ if !dir2.FileExist(fPath) {
+ return fPath, nil
+ }
+
+ info := &metainfo.Info{PieceLength: downloadercfg.DefaultPieceSize, Name: fName}
+ if err := info.BuildFromFilePath(fPath); err != nil {
+ return "", fmt.Errorf("createTorrentFileFromSegment: %w", err)
+ }
+ info.Name = fName
+
+ return fPath + ".torrent", CreateTorrentFileFromInfo(root, info, nil)
+}
+
+// BuildTorrentFilesIfNeed - create .torrent files from .seg files (big IO) - if .seg files were added manually
+func BuildTorrentFilesIfNeed(ctx context.Context, dirs datadir.Dirs) error {
+ logEvery := time.NewTicker(20 * time.Second)
+ defer logEvery.Stop()
+
+ files, err := seedableFiles(dirs)
+ if err != nil {
+ return err
+ }
+
+ g, ctx := errgroup.WithContext(ctx)
+ g.SetLimit(runtime.GOMAXPROCS(-1) * 4)
+ var i atomic.Int32
+
+ for _, file := range files {
+ file := file
+ g.Go(func() error {
+ defer i.Add(1)
+ if _, err := BuildTorrentIfNeed(ctx, file, dirs.Snap); err != nil {
+ return err
+ }
+ return nil
+ })
+ }
+
+Loop:
+ for int(i.Load()) < len(files) {
+ select {
+ case <-ctx.Done():
+ break Loop // g.Wait() will return right error
+ case <-logEvery.C:
+ if int(i.Load()) == len(files) {
+ break Loop
+ }
+ log.Info("[snapshots] Creating .torrent files", "progress", fmt.Sprintf("%d/%d", i.Load(), len(files)))
+ }
+ }
+ if err := g.Wait(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func CreateTorrentFileIfNotExists(root string, info *metainfo.Info, mi *metainfo.MetaInfo) error {
+ fPath := filepath.Join(root, info.Name)
+ if dir2.FileExist(fPath + ".torrent") {
+ return nil
+ }
+ if err := CreateTorrentFileFromInfo(root, info, mi); err != nil {
+ return err
+ }
+ return nil
+}
+
+func CreateMetaInfo(info *metainfo.Info, mi *metainfo.MetaInfo) (*metainfo.MetaInfo, error) {
+ if mi == nil {
+ infoBytes, err := bencode.Marshal(info)
+ if err != nil {
+ return nil, err
+ }
+ mi = &metainfo.MetaInfo{
+ CreationDate: time.Now().Unix(),
+ CreatedBy: "erigon",
+ InfoBytes: infoBytes,
+ AnnounceList: Trackers,
+ }
+ } else {
+ mi.AnnounceList = Trackers
+ }
+ return mi, nil
+}
+func CreateTorrentFromMetaInfo(root string, info *metainfo.Info, mi *metainfo.MetaInfo) error {
+ torrentFileName := filepath.Join(root, info.Name+".torrent")
+ file, err := os.Create(torrentFileName)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+ if err := mi.Write(file); err != nil {
+ return err
+ }
+ file.Sync()
+ return nil
+}
+func CreateTorrentFileFromInfo(root string, info *metainfo.Info, mi *metainfo.MetaInfo) (err error) {
+ mi, err = CreateMetaInfo(info, mi)
+ if err != nil {
+ return err
+ }
+ return CreateTorrentFromMetaInfo(root, info, mi)
+}
+
+func AllTorrentPaths(dirs datadir.Dirs) ([]string, error) {
+ files, err := dir2.ListFiles(dirs.Snap, ".torrent")
+ if err != nil {
+ return nil, err
+ }
+ files2, err := dir2.ListFiles(dirs.SnapHistory, ".torrent")
+ if err != nil {
+ return nil, err
+ }
+ files = append(files, files2...)
+ return files, nil
+}
+
+func AllTorrentSpecs(dirs datadir.Dirs) (res []*torrent.TorrentSpec, err error) {
+ files, err := AllTorrentPaths(dirs)
+ if err != nil {
+ return nil, err
+ }
+ for _, fPath := range files {
+ if len(fPath) == 0 {
+ continue
+ }
+ a, err := loadTorrent(fPath)
+ if err != nil {
+ return nil, fmt.Errorf("AllTorrentSpecs: %w", err)
+ }
+ res = append(res, a)
+ }
+ return res, nil
+}
+
+func loadTorrent(torrentFilePath string) (*torrent.TorrentSpec, error) {
+ mi, err := metainfo.LoadFromFile(torrentFilePath)
+ if err != nil {
+ return nil, fmt.Errorf("LoadFromFile: %w, file=%s", err, torrentFilePath)
+ }
+ mi.AnnounceList = Trackers
+ return torrent.TorrentSpecFromMetaInfoErr(mi)
+}
+
+// addTorrentFile - adding .torrent file to torrentClient (and checking their hashes), if .torrent file
+// added first time - pieces verification process will start (disk IO heavy) - Progress
+// kept in `piece completion storage` (surviving reboot). Once it done - no disk IO needed again.
+// Don't need call torrent.VerifyData manually
+func addTorrentFile(ctx context.Context, ts *torrent.TorrentSpec, torrentClient *torrent.Client, webseeds *WebSeeds) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ wsUrls, ok := webseeds.ByFileName(ts.DisplayName)
+ if ok {
+ ts.Webseeds = append(ts.Webseeds, wsUrls...)
+ }
+
+ _, ok = torrentClient.Torrent(ts.InfoHash)
+ if !ok { // can set ChunkSize only for new torrents
+ ts.ChunkSize = downloadercfg.DefaultNetworkChunkSize
+ } else {
+ ts.ChunkSize = 0
+ }
+ ts.DisallowDataDownload = true
+ _, _, err := torrentClient.AddTorrentSpec(ts)
+ if err != nil {
+ return fmt.Errorf("addTorrentFile %s: %w", ts.DisplayName, err)
+ }
+ return nil
+}
+
+func savePeerID(db kv.RwDB, peerID torrent.PeerID) error {
+ return db.Update(context.Background(), func(tx kv.RwTx) error {
+ return tx.Put(kv.BittorrentInfo, []byte(kv.BittorrentPeerID), peerID[:])
+ })
+}
+
+func readPeerID(db kv.RoDB) (peerID []byte, err error) {
+ if err = db.View(context.Background(), func(tx kv.Tx) error {
+ peerIDFromDB, err := tx.GetOne(kv.BittorrentInfo, []byte(kv.BittorrentPeerID))
+ if err != nil {
+ return fmt.Errorf("get peer id: %w", err)
+ }
+ peerID = common2.Copy(peerIDFromDB)
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ return peerID, nil
+}
+
+// Deprecated: use `filepath.IsLocal` after drop go1.19 support
+func IsLocal(path string) bool {
+ return isLocal(path)
+}
+
+func saveTorrent(torrentFilePath string, res []byte) error {
+ if len(res) == 0 {
+ return fmt.Errorf("try to write 0 bytes to file: %s", torrentFilePath)
+ }
+ f, err := os.Create(torrentFilePath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ if _, err = f.Write(res); err != nil {
+ return err
+ }
+ if err = f.Sync(); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/erigon-lib/downloader/webseed.go b/erigon-lib/downloader/webseed.go
new file mode 100644
index 00000000000..0c42f17b28d
--- /dev/null
+++ b/erigon-lib/downloader/webseed.go
@@ -0,0 +1,334 @@
+package downloader
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
+ "github.com/c2h5oh/datasize"
+ "github.com/ledgerwatch/erigon-lib/chain/snapcfg"
+ "golang.org/x/sync/errgroup"
+
+ "github.com/anacrolix/torrent/bencode"
+ "github.com/anacrolix/torrent/metainfo"
+ "github.com/ledgerwatch/erigon-lib/common/dir"
+ "github.com/ledgerwatch/erigon-lib/downloader/snaptype"
+ "github.com/ledgerwatch/log/v3"
+ "github.com/pelletier/go-toml/v2"
+)
+
+// WebSeeds - allow use HTTP-based infrastrucutre to support Bittorrent network
+// it allows download .torrent files and data files from trusted url's (for example: S3 signed url)
+type WebSeeds struct {
+ lock sync.Mutex
+
+ byFileName snaptype.WebSeedUrls // HTTP urls of data files
+ torrentUrls snaptype.TorrentUrls // HTTP urls of .torrent files
+ downloadTorrentFile bool
+ torrentsWhitelist snapcfg.Preverified
+
+ logger log.Logger
+ verbosity log.Lvl
+}
+
+func (d *WebSeeds) Discover(ctx context.Context, s3tokens []string, urls []*url.URL, files []string, rootDir string) {
+ d.downloadWebseedTomlFromProviders(ctx, s3tokens, urls, files)
+ d.downloadTorrentFilesFromProviders(ctx, rootDir)
+}
+
+func (d *WebSeeds) downloadWebseedTomlFromProviders(ctx context.Context, s3Providers []string, httpProviders []*url.URL, diskProviders []string) {
+ log.Debug("[snapshots] webseed providers", "http", len(httpProviders), "s3", len(s3Providers), "disk", len(diskProviders))
+ list := make([]snaptype.WebSeedsFromProvider, 0, len(httpProviders)+len(diskProviders))
+ for _, webSeedProviderURL := range httpProviders {
+ select {
+ case <-ctx.Done():
+ break
+ default:
+ }
+ response, err := d.callHttpProvider(ctx, webSeedProviderURL)
+ if err != nil { // don't fail on error
+ d.logger.Debug("[snapshots.webseed] get from HTTP provider", "err", err, "url", webSeedProviderURL.EscapedPath())
+ continue
+ }
+ list = append(list, response)
+ }
+
+ for _, webSeedProviderURL := range s3Providers {
+ select {
+ case <-ctx.Done():
+ break
+ default:
+ }
+ response, err := d.callS3Provider(ctx, webSeedProviderURL)
+ if err != nil { // don't fail on error
+ d.logger.Debug("[snapshots.webseed] get from S3 provider", "err", err)
+ continue
+ }
+ list = append(list, response)
+ }
+ // add to list files from disk
+ for _, webSeedFile := range diskProviders {
+ response, err := d.readWebSeedsFile(webSeedFile)
+ if err != nil { // don't fail on error
+ d.logger.Debug("[snapshots.webseed] get from File provider", "err", err)
+ continue
+ }
+ list = append(list, response)
+ }
+
+ webSeedUrls, torrentUrls := snaptype.WebSeedUrls{}, snaptype.TorrentUrls{}
+ for _, urls := range list {
+ for name, wUrl := range urls {
+ if !strings.HasSuffix(name, ".torrent") {
+ webSeedUrls[name] = append(webSeedUrls[name], wUrl)
+ continue
+ }
+ if !nameWhitelisted(name, d.torrentsWhitelist) {
+ continue
+ }
+ uri, err := url.ParseRequestURI(wUrl)
+ if err != nil {
+ d.logger.Debug("[snapshots] url is invalid", "url", wUrl, "err", err)
+ continue
+ }
+ torrentUrls[name] = append(torrentUrls[name], uri)
+ }
+ }
+
+ d.lock.Lock()
+ defer d.lock.Unlock()
+ d.byFileName = webSeedUrls
+ d.torrentUrls = torrentUrls
+}
+
+func (d *WebSeeds) TorrentUrls() snaptype.TorrentUrls {
+ d.lock.Lock()
+ defer d.lock.Unlock()
+ return d.torrentUrls
+}
+
+func (d *WebSeeds) Len() int {
+ d.lock.Lock()
+ defer d.lock.Unlock()
+ return len(d.byFileName)
+}
+
+func (d *WebSeeds) ByFileName(name string) (metainfo.UrlList, bool) {
+ d.lock.Lock()
+ defer d.lock.Unlock()
+ v, ok := d.byFileName[name]
+ return v, ok
+}
+func (d *WebSeeds) callHttpProvider(ctx context.Context, webSeedProviderUrl *url.URL) (snaptype.WebSeedsFromProvider, error) {
+ baseUrl := webSeedProviderUrl.String()
+ ref, err := url.Parse("manifest.txt")
+ if err != nil {
+ return nil, err
+ }
+ u := webSeedProviderUrl.ResolveReference(ref)
+ request, err := http.NewRequest(http.MethodGet, u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ request = request.WithContext(ctx)
+ resp, err := http.DefaultClient.Do(request)
+ if err != nil {
+ return nil, fmt.Errorf("webseed.http: %w, host=%s, url=%s", err, webSeedProviderUrl.Hostname(), webSeedProviderUrl.EscapedPath())
+ }
+ defer resp.Body.Close()
+ b, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("webseed.http: %w, host=%s, url=%s, ", err, webSeedProviderUrl.Hostname(), webSeedProviderUrl.EscapedPath())
+ }
+ response := snaptype.WebSeedsFromProvider{}
+ fileNames := strings.Split(string(b), "\n")
+ for _, f := range fileNames {
+ response[f], err = url.JoinPath(baseUrl, f)
+ if err != nil {
+ return nil, err
+ }
+ }
+ d.logger.Debug("[snapshots.webseed] get from HTTP provider", "urls", len(response), "host", webSeedProviderUrl.Hostname(), "url", webSeedProviderUrl.EscapedPath())
+ return response, nil
+}
+func (d *WebSeeds) callS3Provider(ctx context.Context, token string) (snaptype.WebSeedsFromProvider, error) {
+ //v1:bucketName:accID:accessKeyID:accessKeySecret
+ l := strings.Split(token, ":")
+ if len(l) != 5 {
+ return nil, fmt.Errorf("token has invalid format, exepcing 'v1:tokenInBase64'")
+ }
+ version, bucketName, accountId, accessKeyId, accessKeySecret := strings.TrimSpace(l[0]), strings.TrimSpace(l[1]), strings.TrimSpace(l[2]), strings.TrimSpace(l[3]), strings.TrimSpace(l[4])
+ if version != "v1" {
+ return nil, fmt.Errorf("not supported version: %s", version)
+ }
+ var fileName = "webseeds.toml"
+
+ r2Resolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) {
+ return aws.Endpoint{
+ URL: fmt.Sprintf("https://%s.r2.cloudflarestorage.com", accountId),
+ }, nil
+ })
+ cfg, err := config.LoadDefaultConfig(ctx,
+ config.WithEndpointResolverWithOptions(r2Resolver),
+ config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(accessKeyId, accessKeySecret, "")),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ client := s3.NewFromConfig(cfg)
+ // {
+ // "ChecksumAlgorithm": null,
+ // "ETag": "\"eb2b891dc67b81755d2b726d9110af16\"",
+ // "Key": "ferriswasm.png",
+ // "LastModified": "2022-05-18T17:20:21.67Z",
+ // "Owner": null,
+ // "Size": 87671,
+ // "StorageClass": "STANDARD"
+ // }
+ resp, err := client.GetObject(ctx, &s3.GetObjectInput{Bucket: &bucketName, Key: &fileName})
+ if err != nil {
+ return nil, fmt.Errorf("webseed.s3: bucket=%s, %w", bucketName, err)
+ }
+ defer resp.Body.Close()
+ response := snaptype.WebSeedsFromProvider{}
+ if err := toml.NewDecoder(resp.Body).Decode(&response); err != nil {
+ return nil, fmt.Errorf("webseed.s3: bucket=%s, %w", bucketName, err)
+ }
+ d.logger.Debug("[snapshots.webseed] get from S3 provider", "urls", len(response), "bucket", bucketName)
+ return response, nil
+}
+func (d *WebSeeds) readWebSeedsFile(webSeedProviderPath string) (snaptype.WebSeedsFromProvider, error) {
+ _, fileName := filepath.Split(webSeedProviderPath)
+ data, err := os.ReadFile(webSeedProviderPath)
+ if err != nil {
+ return nil, fmt.Errorf("webseed.readWebSeedsFile: file=%s, %w", fileName, err)
+ }
+ response := snaptype.WebSeedsFromProvider{}
+ if err := toml.Unmarshal(data, &response); err != nil {
+ return nil, fmt.Errorf("webseed.readWebSeedsFile: file=%s, %w", fileName, err)
+ }
+ d.logger.Debug("[snapshots.webseed] get from File provider", "urls", len(response), "file", fileName)
+ return response, nil
+}
+
+// downloadTorrentFilesFromProviders - if they are not exist on file-system
+func (d *WebSeeds) downloadTorrentFilesFromProviders(ctx context.Context, rootDir string) {
+ // TODO: need more tests, need handle more forward-compatibility and backward-compatibility case
+ // - now, if add new type of .torrent files to S3 bucket - existing nodes will start downloading it. maybe need whitelist of file types
+ // - maybe need download new files if --snap.stop=true
+ if !d.downloadTorrentFile {
+ return
+ }
+ if len(d.TorrentUrls()) == 0 {
+ return
+ }
+ var addedNew int
+ e, ctx := errgroup.WithContext(ctx)
+ urlsByName := d.TorrentUrls()
+ //TODO:
+ // - what to do if node already synced?
+ for name, tUrls := range urlsByName {
+ tPath := filepath.Join(rootDir, name)
+ if dir.FileExist(tPath) {
+ continue
+ }
+ addedNew++
+ if !strings.HasSuffix(name, ".seg.torrent") {
+ _, fName := filepath.Split(name)
+ d.logger.Log(d.verbosity, "[snapshots] webseed has .torrent, but we skip it because this file-type not supported yet", "name", fName)
+ continue
+ }
+ name := name
+ tUrls := tUrls
+ e.Go(func() error {
+ for _, url := range tUrls {
+ res, err := d.callTorrentHttpProvider(ctx, url, name)
+ if err != nil {
+ d.logger.Log(d.verbosity, "[snapshots] got from webseed", "name", name, "err", err)
+ continue
+ }
+ d.logger.Log(d.verbosity, "[snapshots] got from webseed", "name", name)
+ if err := saveTorrent(tPath, res); err != nil {
+ d.logger.Debug("[snapshots] saveTorrent", "err", err)
+ continue
+ }
+ return nil
+ }
+ return nil
+ })
+ }
+ if err := e.Wait(); err != nil {
+ d.logger.Debug("[snapshots] webseed discover", "err", err)
+ }
+}
+
+func (d *WebSeeds) callTorrentHttpProvider(ctx context.Context, url *url.URL, fileName string) ([]byte, error) {
+ request, err := http.NewRequest(http.MethodGet, url.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+ request = request.WithContext(ctx)
+ resp, err := http.DefaultClient.Do(request)
+ if err != nil {
+ return nil, fmt.Errorf("webseed.downloadTorrentFile: host=%s, url=%s, %w", url.Hostname(), url.EscapedPath(), err)
+ }
+ defer resp.Body.Close()
+ //protect against too small and too big data
+ if resp.ContentLength == 0 || resp.ContentLength > int64(128*datasize.MB) {
+ return nil, nil
+ }
+ res, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("webseed.downloadTorrentFile: host=%s, url=%s, %w", url.Hostname(), url.EscapedPath(), err)
+ }
+ if err = validateTorrentBytes(fileName, res, d.torrentsWhitelist); err != nil {
+ return nil, fmt.Errorf("webseed.downloadTorrentFile: host=%s, url=%s, %w", url.Hostname(), url.EscapedPath(), err)
+ }
+ return res, nil
+}
+
+func validateTorrentBytes(fileName string, b []byte, whitelist snapcfg.Preverified) error {
+ var mi metainfo.MetaInfo
+ if err := bencode.NewDecoder(bytes.NewBuffer(b)).Decode(&mi); err != nil {
+ return err
+ }
+ torrentHash := mi.HashInfoBytes()
+ // files with different names can have same hash. means need check AND name AND hash.
+ if !nameAndHashWhitelisted(fileName, torrentHash.String(), whitelist) {
+ return fmt.Errorf(".torrent file is not whitelisted")
+ }
+ return nil
+}
+
+func nameWhitelisted(fileName string, whitelist snapcfg.Preverified) bool {
+ fileName = strings.TrimSuffix(fileName, ".torrent")
+ for i := 0; i < len(whitelist); i++ {
+ if whitelist[i].Name == fileName {
+ return true
+ }
+ }
+ return false
+}
+
+func nameAndHashWhitelisted(fileName, fileHash string, whitelist snapcfg.Preverified) bool {
+ fileName = strings.TrimSuffix(fileName, ".torrent")
+ for i := 0; i < len(whitelist); i++ {
+ if whitelist[i].Name == fileName && whitelist[i].Hash == fileHash {
+ return true
+ }
+ }
+ return false
+}
diff --git a/erigon-lib/etl/ETL-collector.png b/erigon-lib/etl/ETL-collector.png
new file mode 100644
index 00000000000..2e88cb2d43b
Binary files /dev/null and b/erigon-lib/etl/ETL-collector.png differ
diff --git a/erigon-lib/etl/ETL.png b/erigon-lib/etl/ETL.png
new file mode 100644
index 00000000000..757921eeff9
Binary files /dev/null and b/erigon-lib/etl/ETL.png differ
diff --git a/erigon-lib/etl/README.md b/erigon-lib/etl/README.md
new file mode 100644
index 00000000000..9a97c270673
--- /dev/null
+++ b/erigon-lib/etl/README.md
@@ -0,0 +1,182 @@
+# ETL
+ETL framework is most commonly used in [staged sync](https://github.com/ledgerwatch/erigon/blob/devel/eth/stagedsync/README.md).
+
+It implements a pattern where we extract some data from a database, transform it,
+then put it into temp files and insert back to the database in sorted order.
+
+Inserting entries into our KV storage sorted by keys helps to minimize write
+amplification, hence it is much faster, even considering additional I/O that
+is generated by storing files.
+
+It behaves similarly to enterprise [Extract, Tranform, Load](https://en.wikipedia.org/wiki/Extract,_transform,_load) frameworks, hence the name.
+We use temporary files because that helps keep RAM usage predictable and allows
+using ETL on large amounts of data.
+
+### Example
+
+```
+func keyTransformExtractFunc(transformKey func([]byte) ([]byte, error)) etl.ExtractFunc {
+ return func(k, v []byte, next etl.ExtractNextFunc) error {
+ newK, err := transformKey(k)
+ if err != nil {
+ return err
+ }
+ return next(k, newK, v)
+ }
+}
+
+err := etl.Transform(
+ db, // database
+ dbutils.PlainStateBucket, // "from" bucket
+ dbutils.CurrentStateBucket, // "to" bucket
+ datadir, // where to store temp files
+ keyTransformExtractFunc(transformPlainStateKey), // transformFunc on extraction
+ etl.IdentityLoadFunc, // transform on load
+ etl.TransformArgs{ // additional arguments
+ Quit: quit,
+ },
+ )
+ if err != nil {
+ return err
+ }
+
+```
+
+## Data Transformation
+
+The whole flow is shown in the image
+
+![](./ETL.png)
+
+Data could be transformed in two places along the pipeline:
+
+* transform on extraction
+
+* transform on loading
+
+### Transform On Extraction
+
+`type ExtractFunc func(k []byte, v []byte, next ExtractNextFunc) error`
+
+Transform on extraction function receives the current key and value from the
+source bucket.
+
+### Transform On Loading
+
+`type LoadFunc func(k []byte, value []byte, state State, next LoadNextFunc) error`
+
+As well as the current key and value, the transform on loading function
+receives the `State` object that can receive data from the destination bucket.
+
+That is used in index generation where we want to extend index entries with new
+data instead of just adding new ones.
+
+### `<...>NextFunc` pattern
+
+Sometimes we need to produce multiple entries from a single entry when
+transforming.
+
+To do that, each of the transform function receives a next function that should
+be called to move data further. That means that each transformation can produce
+any number of outputs for a single input.
+
+It can be one output, like in `IdentityLoadFunc`:
+
+```
+func IdentityLoadFunc(k []byte, value []byte, _ State, next LoadNextFunc) error {
+ return next(k, k, value) // go to the next step
+}
+```
+
+It can be multiple outputs like when each entry is a `ChangeSet`:
+
+```
+func(dbKey, dbValue []byte, next etl.ExtractNextFunc) error {
+ blockNum, _ := dbutils.DecodeTimestamp(dbKey)
+ return bytes2walker(dbValue).Walk(func(changesetKey, changesetValue []byte) error {
+ key := common.CopyBytes(changesetKey)
+ v := make([]byte, 9)
+ binary.BigEndian.PutUint64(v, blockNum)
+ if len(changesetValue) == 0 {
+ v[8] = 1
+ }
+ return next(dbKey, key, v) // go to the next step
+ })
+ }
+```
+
+### Buffer Types
+
+Before the data is being flushed into temp files, it is getting collected into
+a buffer until if overflows (`etl.ExtractArgs.BufferSize`).
+
+There are different types of buffers available with different behaviour.
+
+* `SortableSliceBuffer` -- just append `(k, v1)`, `(k, v2)` onto a slice. Duplicate keys
+ will lead to duplicate entries: `[(k, v1) (k, v2)]`.
+
+* `SortableAppendBuffer` -- on duplicate keys: merge. `(k, v1)`, `(k, v2)`
+ will lead to `k: [v1 v2]`
+
+* `SortableOldestAppearedBuffer` -- on duplicate keys: keep the oldest. `(k,
+ v1)`, `(k v2)` will lead to `k: v1`
+
+### Transforming Structs
+
+Both transform functions and next functions allow only byte arrays.
+If you need to pass a struct, you will need to marshal it.
+
+### Loading Into Database
+
+We load data from the temp files into a database in batches, limited by
+`IdealBatchSize()` of an `ethdb.Mutation`.
+
+(for tests we can also override it)
+
+### Handling Interruptions
+
+ETL processes are long, so we need to be able to handle interruptions.
+
+#### Handing `Ctrl+C`
+
+You can pass your quit channel into `Quit` parameter into `etl.TransformArgs`.
+
+When this channel is closed, ETL will be interrupted.
+
+#### Saving & Restoring State
+
+Interrupting in the middle of loading can lead to inconsistent state in the
+database.
+
+To avoid that, the ETL framework allows storing progress by setting `OnLoadCommit` in `etl.TransformArgs`.
+
+Then we can use this data to know the progress the ETL transformation made.
+
+You can also specify `ExtractStartKey` and `ExtractEndKey` to limit the number
+of items transformed.
+
+## Ways to work with ETL framework
+
+There might be 2 scenarios on how you want to work with the ETL framework.
+
+![](./ETL-collector.png)
+
+### `etl.Transform` function
+
+The vast majority of use-cases is when we extract data from one bucket and in
+the end, load it into another bucket. That is the use-case for `etl.Transform`
+function.
+
+### `etl.Collector` struct
+
+If you want a more modular behaviour instead of just reading from the DB (like
+generating intermediate hashes in `../../core/chain_makers.go`, you can use
+`etl.Collector` struct directly.
+
+It has a `.Collect()` method that you can provide your data to.
+
+
+## Optimizations
+
+* if all data fits into a single file, we don't write anything to disk and just
+ use in-memory storage.
diff --git a/erigon-lib/etl/buffers.go b/erigon-lib/etl/buffers.go
new file mode 100644
index 00000000000..a05f1614c08
--- /dev/null
+++ b/erigon-lib/etl/buffers.go
@@ -0,0 +1,517 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package etl
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "sort"
+ "strconv"
+
+ "github.com/c2h5oh/datasize"
+ "github.com/ledgerwatch/erigon-lib/common"
+)
+
+const (
+ //SliceBuffer - just simple slice w
+ SortableSliceBuffer = iota
+ //SortableAppendBuffer - map[k] [v1 v2 v3]
+ SortableAppendBuffer
+ // SortableOldestAppearedBuffer - buffer that keeps only the oldest entries.
+ // if first v1 was added under key K, then v2; only v1 will stay
+ SortableOldestAppearedBuffer
+ SortableMergeBuffer
+
+ //BufIOSize - 128 pages | default is 1 page | increasing over `64 * 4096` doesn't show speedup on SSD/NVMe, but show speedup in cloud drives
+ BufIOSize = 128 * 4096
+)
+
+var BufferOptimalSize = 256 * datasize.MB /* var because we want to sometimes change it from tests or command-line flags */
+
+type Buffer interface {
+ Put(k, v []byte)
+ Get(i int, keyBuf, valBuf []byte) ([]byte, []byte)
+ Len() int
+ Reset()
+ SizeLimit() int
+ Prealloc(predictKeysAmount, predictDataAmount int)
+ Write(io.Writer) error
+ Sort()
+ CheckFlushSize() bool
+}
+
+type sortableBufferEntry struct {
+ key []byte
+ value []byte
+}
+
+var (
+ _ Buffer = &sortableBuffer{}
+ _ Buffer = &appendSortableBuffer{}
+ _ Buffer = &oldestEntrySortableBuffer{}
+)
+
+func NewSortableBuffer(bufferOptimalSize datasize.ByteSize) *sortableBuffer {
+ return &sortableBuffer{
+ optimalSize: int(bufferOptimalSize.Bytes()),
+ }
+}
+
+type sortableBuffer struct {
+ offsets []int
+ lens []int
+ data []byte
+ optimalSize int
+}
+
+// Put adds key and value to the buffer. These slices will not be accessed later,
+// so no copying is necessary
+func (b *sortableBuffer) Put(k, v []byte) {
+ lk, lv := len(k), len(v)
+ if k == nil {
+ lk = -1
+ }
+ if v == nil {
+ lv = -1
+ }
+ b.lens = append(b.lens, lk, lv)
+
+ b.offsets = append(b.offsets, len(b.data))
+ b.data = append(b.data, k...)
+ b.offsets = append(b.offsets, len(b.data))
+ b.data = append(b.data, v...)
+}
+
+func (b *sortableBuffer) Size() int {
+ return len(b.data) + 8*len(b.offsets) + 8*len(b.lens)
+}
+
+func (b *sortableBuffer) Len() int {
+ return len(b.offsets) / 2
+}
+
+func (b *sortableBuffer) Less(i, j int) bool {
+ i2, j2 := i*2, j*2
+ ki := b.data[b.offsets[i2] : b.offsets[i2]+b.lens[i2]]
+ kj := b.data[b.offsets[j2] : b.offsets[j2]+b.lens[j2]]
+ return bytes.Compare(ki, kj) < 0
+}
+
+func (b *sortableBuffer) Swap(i, j int) {
+ i2, j2 := i*2, j*2
+ b.offsets[i2], b.offsets[j2] = b.offsets[j2], b.offsets[i2]
+ b.offsets[i2+1], b.offsets[j2+1] = b.offsets[j2+1], b.offsets[i2+1]
+ b.lens[i2], b.lens[j2] = b.lens[j2], b.lens[i2]
+ b.lens[i2+1], b.lens[j2+1] = b.lens[j2+1], b.lens[i2+1]
+}
+
+func (b *sortableBuffer) Get(i int, keyBuf, valBuf []byte) ([]byte, []byte) {
+ i2 := i * 2
+ keyOffset, valOffset := b.offsets[i2], b.offsets[i2+1]
+ keyLen, valLen := b.lens[i2], b.lens[i2+1]
+ if keyLen > 0 {
+ keyBuf = append(keyBuf, b.data[keyOffset:keyOffset+keyLen]...)
+ } else if keyLen == 0 {
+ if keyBuf != nil {
+ keyBuf = keyBuf[:0]
+ } else {
+ keyBuf = []byte{}
+ }
+ } else {
+ keyBuf = nil
+ }
+ if valLen > 0 {
+ valBuf = append(valBuf, b.data[valOffset:valOffset+valLen]...)
+ } else if valLen == 0 {
+ if valBuf != nil {
+ valBuf = valBuf[:0]
+ } else {
+ valBuf = []byte{}
+ }
+ } else {
+ valBuf = nil
+ }
+ return keyBuf, valBuf
+}
+
+func (b *sortableBuffer) Prealloc(predictKeysAmount, predictDataSize int) {
+ b.lens = make([]int, 0, predictKeysAmount)
+ b.offsets = make([]int, 0, predictKeysAmount)
+ b.data = make([]byte, 0, predictDataSize)
+}
+
+func (b *sortableBuffer) Reset() {
+ b.offsets = b.offsets[:0]
+ b.lens = b.lens[:0]
+ b.data = b.data[:0]
+}
+func (b *sortableBuffer) SizeLimit() int { return b.optimalSize }
+func (b *sortableBuffer) Sort() {
+ if sort.IsSorted(b) {
+ return
+ }
+ sort.Stable(b)
+}
+
+func (b *sortableBuffer) CheckFlushSize() bool {
+ return b.Size() >= b.optimalSize
+}
+
+func (b *sortableBuffer) Write(w io.Writer) error {
+ var numBuf [binary.MaxVarintLen64]byte
+ for i, offset := range b.offsets {
+ l := b.lens[i]
+ n := binary.PutVarint(numBuf[:], int64(l))
+ if _, err := w.Write(numBuf[:n]); err != nil {
+ return err
+ }
+ if l <= 0 {
+ continue
+ }
+ if _, err := w.Write(b.data[offset : offset+l]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func NewAppendBuffer(bufferOptimalSize datasize.ByteSize) *appendSortableBuffer {
+ return &appendSortableBuffer{
+ entries: make(map[string][]byte),
+ size: 0,
+ optimalSize: int(bufferOptimalSize.Bytes()),
+ }
+}
+
+type appendSortableBuffer struct {
+ entries map[string][]byte
+ sortedBuf []sortableBufferEntry
+ size int
+ optimalSize int
+}
+
+func (b *appendSortableBuffer) Put(k, v []byte) {
+ stored, ok := b.entries[string(k)]
+ if !ok {
+ b.size += len(k)
+ }
+ b.size += len(v)
+ fmt.Printf("put: %d, %x, %x . %x\n", b.size, k, stored, v)
+ b.entries[string(k)] = append(stored, v...)
+}
+
+func (b *appendSortableBuffer) Size() int { return b.size }
+func (b *appendSortableBuffer) SizeLimit() int { return b.optimalSize }
+
+func (b *appendSortableBuffer) Len() int {
+ return len(b.entries)
+}
+func (b *appendSortableBuffer) Sort() {
+ for key, val := range b.entries {
+ b.sortedBuf = append(b.sortedBuf, sortableBufferEntry{key: []byte(key), value: val})
+ }
+ sort.Stable(b)
+}
+
+func (b *appendSortableBuffer) Less(i, j int) bool {
+ return bytes.Compare(b.sortedBuf[i].key, b.sortedBuf[j].key) < 0
+}
+
+func (b *appendSortableBuffer) Swap(i, j int) {
+ b.sortedBuf[i], b.sortedBuf[j] = b.sortedBuf[j], b.sortedBuf[i]
+}
+
+func (b *appendSortableBuffer) Get(i int, keyBuf, valBuf []byte) ([]byte, []byte) {
+ keyBuf = append(keyBuf, b.sortedBuf[i].key...)
+ valBuf = append(valBuf, b.sortedBuf[i].value...)
+ return keyBuf, valBuf
+}
+func (b *appendSortableBuffer) Reset() {
+ b.sortedBuf = nil
+ b.entries = make(map[string][]byte)
+ b.size = 0
+}
+func (b *appendSortableBuffer) Prealloc(predictKeysAmount, predictDataSize int) {
+ b.entries = make(map[string][]byte, predictKeysAmount)
+ b.sortedBuf = make([]sortableBufferEntry, 0, predictKeysAmount*2)
+}
+
+func (b *appendSortableBuffer) Write(w io.Writer) error {
+ var numBuf [binary.MaxVarintLen64]byte
+ entries := b.sortedBuf
+ for _, entry := range entries {
+ fmt.Printf("write: %x, %x\n", entry.key, entry.value)
+ lk := int64(len(entry.key))
+ if entry.key == nil {
+ lk = -1
+ }
+ n := binary.PutVarint(numBuf[:], lk)
+ if _, err := w.Write(numBuf[:n]); err != nil {
+ return err
+ }
+ if _, err := w.Write(entry.key); err != nil {
+ return err
+ }
+ lv := int64(len(entry.value))
+ if entry.value == nil {
+ lv = -1
+ }
+ n = binary.PutVarint(numBuf[:], lv)
+ if _, err := w.Write(numBuf[:n]); err != nil {
+ return err
+ }
+ if _, err := w.Write(entry.value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *appendSortableBuffer) CheckFlushSize() bool {
+ return b.size >= b.optimalSize
+}
+
+func NewOldestEntryBuffer(bufferOptimalSize datasize.ByteSize) *oldestEntrySortableBuffer {
+ return &oldestEntrySortableBuffer{
+ entries: make(map[string][]byte),
+ size: 0,
+ optimalSize: int(bufferOptimalSize.Bytes()),
+ }
+}
+
+type oldestEntrySortableBuffer struct {
+ entries map[string][]byte
+ sortedBuf []sortableBufferEntry
+ size int
+ optimalSize int
+}
+
+func (b *oldestEntrySortableBuffer) Put(k, v []byte) {
+ _, ok := b.entries[string(k)]
+ if ok {
+ // if we already had this entry, we are going to keep it and ignore new value
+ return
+ }
+
+ b.size += len(k)*2 + len(v)
+ b.entries[string(k)] = common.Copy(v)
+}
+
+func (b *oldestEntrySortableBuffer) Size() int { return b.size }
+func (b *oldestEntrySortableBuffer) SizeLimit() int { return b.optimalSize }
+
+func (b *oldestEntrySortableBuffer) Len() int {
+ return len(b.entries)
+}
+
+func (b *oldestEntrySortableBuffer) Sort() {
+ for k, v := range b.entries {
+ b.sortedBuf = append(b.sortedBuf, sortableBufferEntry{key: []byte(k), value: v})
+ }
+ sort.Stable(b)
+}
+
+func (b *oldestEntrySortableBuffer) Less(i, j int) bool {
+ return bytes.Compare(b.sortedBuf[i].key, b.sortedBuf[j].key) < 0
+}
+
+func (b *oldestEntrySortableBuffer) Swap(i, j int) {
+ b.sortedBuf[i], b.sortedBuf[j] = b.sortedBuf[j], b.sortedBuf[i]
+}
+
+func (b *oldestEntrySortableBuffer) Get(i int, keyBuf, valBuf []byte) ([]byte, []byte) {
+ keyBuf = append(keyBuf, b.sortedBuf[i].key...)
+ valBuf = append(valBuf, b.sortedBuf[i].value...)
+ return keyBuf, valBuf
+}
+func (b *oldestEntrySortableBuffer) Reset() {
+ b.sortedBuf = nil
+ b.entries = make(map[string][]byte)
+ b.size = 0
+}
+func (b *oldestEntrySortableBuffer) Prealloc(predictKeysAmount, predictDataSize int) {
+ b.entries = make(map[string][]byte, predictKeysAmount)
+ b.sortedBuf = make([]sortableBufferEntry, 0, predictKeysAmount*2)
+}
+
+func (b *oldestEntrySortableBuffer) Write(w io.Writer) error {
+ var numBuf [binary.MaxVarintLen64]byte
+ entries := b.sortedBuf
+ for _, entry := range entries {
+ lk := int64(len(entry.key))
+ if entry.key == nil {
+ lk = -1
+ }
+ n := binary.PutVarint(numBuf[:], lk)
+ if _, err := w.Write(numBuf[:n]); err != nil {
+ return err
+ }
+ if _, err := w.Write(entry.key); err != nil {
+ return err
+ }
+ lv := int64(len(entry.value))
+ if entry.value == nil {
+ lv = -1
+ }
+ n = binary.PutVarint(numBuf[:], lv)
+ if _, err := w.Write(numBuf[:n]); err != nil {
+ return err
+ }
+ if _, err := w.Write(entry.value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+func (b *oldestEntrySortableBuffer) CheckFlushSize() bool {
+ return b.size >= b.optimalSize
+}
+
+func getBufferByType(tp int, size datasize.ByteSize, prevBuf Buffer) Buffer {
+ switch tp {
+ case SortableSliceBuffer:
+ return NewSortableBuffer(size)
+ case SortableAppendBuffer:
+ return NewAppendBuffer(size)
+ case SortableOldestAppearedBuffer:
+ return NewOldestEntryBuffer(size)
+ case SortableMergeBuffer:
+ return NewLatestMergedEntryMergedBuffer(size, prevBuf.(*oldestMergedEntrySortableBuffer).merge)
+ default:
+ panic("unknown buffer type " + strconv.Itoa(tp))
+ }
+}
+
+func getTypeByBuffer(b Buffer) int {
+ switch b.(type) {
+ case *sortableBuffer:
+ return SortableSliceBuffer
+ case *appendSortableBuffer:
+ return SortableAppendBuffer
+ case *oldestEntrySortableBuffer:
+ return SortableOldestAppearedBuffer
+ case *oldestMergedEntrySortableBuffer:
+ return SortableMergeBuffer
+ default:
+ panic(fmt.Sprintf("unknown buffer type: %T ", b))
+ }
+}
+
+func NewLatestMergedEntryMergedBuffer(bufferOptimalSize datasize.ByteSize, merger func([]byte, []byte) []byte) *oldestMergedEntrySortableBuffer {
+ if merger == nil {
+ panic("nil merge func")
+ }
+ return &oldestMergedEntrySortableBuffer{
+ entries: make(map[string][]byte),
+ size: 0,
+ merge: merger,
+ optimalSize: int(bufferOptimalSize.Bytes()),
+ }
+}
+
+type oldestMergedEntrySortableBuffer struct {
+ entries map[string][]byte
+ merge func([]byte, []byte) []byte
+ sortedBuf []sortableBufferEntry
+ size int
+ optimalSize int
+}
+
+func (b *oldestMergedEntrySortableBuffer) Put(k, v []byte) {
+ prev, ok := b.entries[string(k)]
+ if ok {
+ b.size -= len(v)
+ // if we already had this entry, we are going to keep it and ignore new value
+ v = b.merge(prev, v)
+ b.size += len(v)
+ } else {
+ b.size += len(k) + len(v)
+ }
+ b.entries[string(k)] = common.Copy(v)
+}
+
+func (b *oldestMergedEntrySortableBuffer) Size() int { return b.size }
+func (b *oldestMergedEntrySortableBuffer) SizeLimit() int { return b.optimalSize }
+
+func (b *oldestMergedEntrySortableBuffer) Len() int {
+ return len(b.entries)
+}
+
+func (b *oldestMergedEntrySortableBuffer) Sort() {
+ for k, v := range b.entries {
+ b.sortedBuf = append(b.sortedBuf, sortableBufferEntry{key: []byte(k), value: v})
+ }
+ sort.Stable(b)
+}
+
+func (b *oldestMergedEntrySortableBuffer) Less(i, j int) bool {
+ return bytes.Compare(b.sortedBuf[i].key, b.sortedBuf[j].key) < 0
+}
+
+func (b *oldestMergedEntrySortableBuffer) Swap(i, j int) {
+ b.sortedBuf[i], b.sortedBuf[j] = b.sortedBuf[j], b.sortedBuf[i]
+}
+
+func (b *oldestMergedEntrySortableBuffer) Get(i int, keyBuf, valBuf []byte) ([]byte, []byte) {
+ keyBuf = append(keyBuf, b.sortedBuf[i].key...)
+ valBuf = append(valBuf, b.sortedBuf[i].value...)
+ return keyBuf, valBuf
+}
+func (b *oldestMergedEntrySortableBuffer) Reset() {
+ b.sortedBuf = nil
+ b.entries = make(map[string][]byte)
+ b.size = 0
+}
+func (b *oldestMergedEntrySortableBuffer) Prealloc(predictKeysAmount, predictDataSize int) {
+ b.entries = make(map[string][]byte, predictKeysAmount)
+ b.sortedBuf = make([]sortableBufferEntry, 0, predictKeysAmount*2)
+}
+
+func (b *oldestMergedEntrySortableBuffer) Write(w io.Writer) error {
+ var numBuf [binary.MaxVarintLen64]byte
+ entries := b.sortedBuf
+ for _, entry := range entries {
+ lk := int64(len(entry.key))
+ if entry.key == nil {
+ lk = -1
+ }
+ n := binary.PutVarint(numBuf[:], lk)
+ if _, err := w.Write(numBuf[:n]); err != nil {
+ return err
+ }
+ if _, err := w.Write(entry.key); err != nil {
+ return err
+ }
+ lv := int64(len(entry.value))
+ if entry.value == nil {
+ lv = -1
+ }
+ n = binary.PutVarint(numBuf[:], lv)
+ if _, err := w.Write(numBuf[:n]); err != nil {
+ return err
+ }
+ if _, err := w.Write(entry.value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+func (b *oldestMergedEntrySortableBuffer) CheckFlushSize() bool {
+ return b.size >= b.optimalSize
+}
diff --git a/erigon-lib/etl/collector.go b/erigon-lib/etl/collector.go
new file mode 100644
index 00000000000..4a77ba2d368
--- /dev/null
+++ b/erigon-lib/etl/collector.go
@@ -0,0 +1,372 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package etl
+
+import (
+ "bytes"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "time"
+
+ "github.com/c2h5oh/datasize"
+ "github.com/ledgerwatch/log/v3"
+
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/kv"
+)
+
+type LoadNextFunc func(originalK, k, v []byte) error
+type LoadFunc func(k, v []byte, table CurrentTableReader, next LoadNextFunc) error
+type simpleLoadFunc func(k, v []byte) error
+
+// Collector performs the job of ETL Transform, but can also be used without "E" (Extract) part
+// as a Collect Transform Load
+type Collector struct {
+ buf Buffer
+ logPrefix string
+ tmpdir string
+ dataProviders []dataProvider
+ logLvl log.Lvl
+ bufType int
+ allFlushed bool
+ autoClean bool
+ logger log.Logger
+}
+
+// NewCollectorFromFiles creates collector from existing files (left over from previous unsuccessful loading)
+func NewCollectorFromFiles(logPrefix, tmpdir string, logger log.Logger) (*Collector, error) {
+ if _, err := os.Stat(tmpdir); os.IsNotExist(err) {
+ return nil, nil
+ }
+ dirEntries, err := os.ReadDir(tmpdir)
+ if err != nil {
+ return nil, fmt.Errorf("collector from files - reading directory %s: %w", tmpdir, err)
+ }
+ if len(dirEntries) == 0 {
+ return nil, nil
+ }
+ dataProviders := make([]dataProvider, len(dirEntries))
+ for i, dirEntry := range dirEntries {
+ fileInfo, err := dirEntry.Info()
+ if err != nil {
+ return nil, fmt.Errorf("collector from files - reading file info %s: %w", dirEntry.Name(), err)
+ }
+ var dataProvider fileDataProvider
+ dataProvider.file, err = os.Open(filepath.Join(tmpdir, fileInfo.Name()))
+ if err != nil {
+ return nil, fmt.Errorf("collector from files - opening file %s: %w", fileInfo.Name(), err)
+ }
+ dataProviders[i] = &dataProvider
+ }
+ return &Collector{dataProviders: dataProviders, allFlushed: true, autoClean: false, logPrefix: logPrefix}, nil
+}
+
+// NewCriticalCollector does not clean up temporary files if loading has failed
+func NewCriticalCollector(logPrefix, tmpdir string, sortableBuffer Buffer, logger log.Logger) *Collector {
+ c := NewCollector(logPrefix, tmpdir, sortableBuffer, logger)
+ c.autoClean = false
+ return c
+}
+
+func NewCollector(logPrefix, tmpdir string, sortableBuffer Buffer, logger log.Logger) *Collector {
+ return &Collector{autoClean: true, bufType: getTypeByBuffer(sortableBuffer), buf: sortableBuffer, logPrefix: logPrefix, tmpdir: tmpdir, logLvl: log.LvlInfo, logger: logger}
+}
+
+func (c *Collector) extractNextFunc(originalK, k []byte, v []byte) error {
+ c.buf.Put(k, v)
+ if !c.buf.CheckFlushSize() {
+ return nil
+ }
+ return c.flushBuffer(false)
+}
+
+func (c *Collector) Collect(k, v []byte) error {
+ return c.extractNextFunc(k, k, v)
+}
+
+func (c *Collector) LogLvl(v log.Lvl) { c.logLvl = v }
+
+func (c *Collector) flushBuffer(canStoreInRam bool) error {
+ if c.buf.Len() == 0 {
+ return nil
+ }
+
+ var provider dataProvider
+ if canStoreInRam && len(c.dataProviders) == 0 {
+ c.buf.Sort()
+ provider = KeepInRAM(c.buf)
+ c.allFlushed = true
+ } else {
+ fullBuf := c.buf
+ prevLen, prevSize := fullBuf.Len(), fullBuf.SizeLimit()
+ c.buf = getBufferByType(c.bufType, datasize.ByteSize(c.buf.SizeLimit()), c.buf)
+
+ doFsync := !c.autoClean /* is critical collector */
+ var err error
+ provider, err = FlushToDisk(c.logPrefix, fullBuf, c.tmpdir, doFsync, c.logLvl)
+ if err != nil {
+ return err
+ }
+ c.buf.Prealloc(prevLen/8, prevSize/8)
+ }
+ if provider != nil {
+ c.dataProviders = append(c.dataProviders, provider)
+ }
+ return nil
+}
+
+// Flush - an optional method (usually user don't need to call it) - forcing sort+flush current buffer.
+// it does trigger background sort and flush, reducing RAM-holding, etc...
+// it's useful when working with many collectors: to trigger background sort for all of them
+func (c *Collector) Flush() error {
+ if !c.allFlushed {
+ if e := c.flushBuffer(false); e != nil {
+ return e
+ }
+ }
+ return nil
+}
+
+func (c *Collector) Load(db kv.RwTx, toBucket string, loadFunc LoadFunc, args TransformArgs) error {
+ if c.autoClean {
+ defer c.Close()
+ }
+ args.BufferType = c.bufType
+
+ if !c.allFlushed {
+ if e := c.flushBuffer(true); e != nil {
+ return e
+ }
+ }
+
+ bucket := toBucket
+
+ var cursor kv.RwCursor
+ haveSortingGuaranties := isIdentityLoadFunc(loadFunc) // user-defined loadFunc may change ordering
+ var lastKey []byte
+ if bucket != "" { // passing empty bucket name is valid case for etl when DB modification is not expected
+ var err error
+ cursor, err = db.RwCursor(bucket)
+ if err != nil {
+ return err
+ }
+ var errLast error
+ lastKey, _, errLast = cursor.Last()
+ if errLast != nil {
+ return errLast
+ }
+ }
+
+ var canUseAppend bool
+ isDupSort := kv.ChaindataTablesCfg[bucket].Flags&kv.DupSort != 0 && !kv.ChaindataTablesCfg[bucket].AutoDupSortKeysConversion
+
+ logEvery := time.NewTicker(30 * time.Second)
+ defer logEvery.Stop()
+
+ i := 0
+ loadNextFunc := func(_, k, v []byte) error {
+ if i == 0 {
+ isEndOfBucket := lastKey == nil || bytes.Compare(lastKey, k) == -1
+ canUseAppend = haveSortingGuaranties && isEndOfBucket
+ }
+ i++
+
+ select {
+ default:
+ case <-logEvery.C:
+ logArs := []interface{}{"into", bucket}
+ if args.LogDetailsLoad != nil {
+ logArs = append(logArs, args.LogDetailsLoad(k, v)...)
+ } else {
+ logArs = append(logArs, "current_prefix", makeCurrentKeyStr(k))
+ }
+
+ c.logger.Log(c.logLvl, fmt.Sprintf("[%s] ETL [2/2] Loading", c.logPrefix), logArs...)
+ }
+
+ isNil := (c.bufType == SortableSliceBuffer && v == nil) ||
+ (c.bufType == SortableAppendBuffer && len(v) == 0) || //backward compatibility
+ (c.bufType == SortableOldestAppearedBuffer && len(v) == 0)
+ if isNil {
+ if canUseAppend {
+ return nil // nothing to delete after end of bucket
+ }
+ if err := cursor.Delete(k); err != nil {
+ return err
+ }
+ return nil
+ }
+ if canUseAppend {
+ if isDupSort {
+ if err := cursor.(kv.RwCursorDupSort).AppendDup(k, v); err != nil {
+ return fmt.Errorf("%s: bucket: %s, appendDup: k=%x, %w", c.logPrefix, bucket, k, err)
+ }
+ } else {
+ if err := cursor.Append(k, v); err != nil {
+ return fmt.Errorf("%s: bucket: %s, append: k=%x, v=%x, %w", c.logPrefix, bucket, k, v, err)
+ }
+ }
+
+ return nil
+ }
+ if err := cursor.Put(k, v); err != nil {
+ return fmt.Errorf("%s: put: k=%x, %w", c.logPrefix, k, err)
+ }
+ return nil
+ }
+
+ currentTable := ¤tTableReader{db, bucket}
+ simpleLoad := func(k, v []byte) error {
+ return loadFunc(k, v, currentTable, loadNextFunc)
+ }
+ if err := mergeSortFiles(c.logPrefix, c.dataProviders, simpleLoad, args, c.buf); err != nil {
+ return fmt.Errorf("loadIntoTable %s: %w", toBucket, err)
+ }
+ //logger.Trace(fmt.Sprintf("[%s] ETL Load done", c.logPrefix), "bucket", bucket, "records", i)
+ return nil
+}
+
+func (c *Collector) reset() {
+ if c.dataProviders != nil {
+ for _, p := range c.dataProviders {
+ p.Dispose()
+ }
+ c.dataProviders = nil
+ }
+ c.buf.Reset()
+ c.allFlushed = false
+}
+
+func (c *Collector) Close() {
+ c.reset()
+}
+
+// mergeSortFiles uses merge-sort to order the elements stored within the slice of providers,
+// regardless of ordering within the files the elements will be processed in order.
+// The first pass reads the first element from each of the providers and populates a heap with the key/value/provider index.
+// Later, the heap is popped to get the first element, the record is processed using the LoadFunc, and the provider is asked
+// for the next item, which is then added back to the heap.
+// The subsequent iterations pop the heap again and load up the provider associated with it to get the next element after processing LoadFunc.
+// this continues until all providers have reached their EOF.
+func mergeSortFiles(logPrefix string, providers []dataProvider, loadFunc simpleLoadFunc, args TransformArgs, buf Buffer) (err error) {
+ for _, provider := range providers {
+ if err := provider.Wait(); err != nil {
+ return err
+ }
+ }
+
+ h := &Heap{}
+ heapInit(h)
+ for i, provider := range providers {
+ if key, value, err := provider.Next(nil, nil); err == nil {
+ heapPush(h, &HeapElem{key, value, i})
+ } else /* we must have at least one entry per file */ {
+ eee := fmt.Errorf("%s: error reading first readers: n=%d current=%d provider=%s err=%w",
+ logPrefix, len(providers), i, provider, err)
+ panic(eee)
+ }
+ }
+
+ var prevK, prevV []byte
+
+ // Main loading loop
+ for h.Len() > 0 {
+ if err := common.Stopped(args.Quit); err != nil {
+ return err
+ }
+
+ element := heapPop(h)
+ provider := providers[element.TimeIdx]
+
+ // SortableOldestAppearedBuffer must guarantee that only 1 oldest value of key will appear
+ // but because size of buffer is limited - each flushed file does guarantee "oldest appeared"
+ // property, but files may overlap. files are sorted, just skip repeated keys here
+ if args.BufferType == SortableOldestAppearedBuffer {
+ if !bytes.Equal(prevK, element.Key) {
+ if err = loadFunc(element.Key, element.Value); err != nil {
+ return err
+ }
+ // Need to copy k because the underlying space will be re-used for the next key
+ prevK = common.Copy(element.Key)
+ }
+ } else if args.BufferType == SortableAppendBuffer {
+ if !bytes.Equal(prevK, element.Key) {
+ if prevK != nil {
+ if err = loadFunc(prevK, prevV); err != nil {
+ return err
+ }
+ }
+ // Need to copy k because the underlying space will be re-used for the next key
+ prevK = common.Copy(element.Key)
+ prevV = common.Copy(element.Value)
+ } else {
+ prevV = append(prevV, element.Value...)
+ }
+ } else if args.BufferType == SortableMergeBuffer {
+ if !bytes.Equal(prevK, element.Key) {
+ if prevK != nil {
+ if err = loadFunc(prevK, prevV); err != nil {
+ return err
+ }
+ }
+ // Need to copy k because the underlying space will be re-used for the next key
+ prevK = common.Copy(element.Key)
+ prevV = common.Copy(element.Value)
+ } else {
+ prevV = buf.(*oldestMergedEntrySortableBuffer).merge(prevV, element.Value)
+ }
+ } else {
+ if err = loadFunc(element.Key, element.Value); err != nil {
+ return err
+ }
+ }
+
+ if element.Key, element.Value, err = provider.Next(element.Key[:0], element.Value[:0]); err == nil {
+ heapPush(h, element)
+ } else if !errors.Is(err, io.EOF) {
+ return fmt.Errorf("%s: error while reading next element from disk: %w", logPrefix, err)
+ }
+ }
+
+ if args.BufferType == SortableAppendBuffer {
+ if prevK != nil {
+ if err = loadFunc(prevK, prevV); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func makeCurrentKeyStr(k []byte) string {
+ var currentKeyStr string
+ if k == nil {
+ currentKeyStr = "final"
+ } else if len(k) < 4 {
+ currentKeyStr = hex.EncodeToString(k)
+ } else if k[0] == 0 && k[1] == 0 && k[2] == 0 && k[3] == 0 && len(k) >= 8 { // if key has leading zeroes, show a bit more info
+ currentKeyStr = hex.EncodeToString(k)
+ } else {
+ currentKeyStr = hex.EncodeToString(k[:4])
+ }
+ return currentKeyStr
+}
diff --git a/erigon-lib/etl/dataprovider.go b/erigon-lib/etl/dataprovider.go
new file mode 100644
index 00000000000..25387da38f1
--- /dev/null
+++ b/erigon-lib/etl/dataprovider.go
@@ -0,0 +1,178 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package etl
+
+import (
+ "bufio"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/ledgerwatch/log/v3"
+ "golang.org/x/sync/errgroup"
+)
+
+type dataProvider interface {
+ Next(keyBuf, valBuf []byte) ([]byte, []byte, error)
+ Dispose() // Safe for repeated call, doesn't return error - means defer-friendly
+ Wait() error // join point for async providers
+}
+
+type fileDataProvider struct {
+ file *os.File
+ reader io.Reader
+ byteReader io.ByteReader // Different interface to the same object as reader
+ wg *errgroup.Group
+}
+
+// FlushToDisk - `doFsync` is true only for 'critical' collectors (which should not loose).
+func FlushToDisk(logPrefix string, b Buffer, tmpdir string, doFsync bool, lvl log.Lvl) (dataProvider, error) {
+ if b.Len() == 0 {
+ return nil, nil
+ }
+
+ provider := &fileDataProvider{reader: nil, wg: &errgroup.Group{}}
+ provider.wg.Go(func() error {
+ b.Sort()
+
+ // if we are going to create files in the system temp dir, we don't need any
+ // subfolders.
+ if tmpdir != "" {
+ if err := os.MkdirAll(tmpdir, 0755); err != nil {
+ return err
+ }
+ }
+
+ bufferFile, err := os.CreateTemp(tmpdir, "erigon-sortable-buf-")
+ if err != nil {
+ return err
+ }
+ provider.file = bufferFile
+
+ if doFsync {
+ defer bufferFile.Sync() //nolint:errcheck
+ }
+
+ w := bufio.NewWriterSize(bufferFile, BufIOSize)
+ defer w.Flush() //nolint:errcheck
+
+ _, fName := filepath.Split(bufferFile.Name())
+ if err = b.Write(w); err != nil {
+ return fmt.Errorf("error writing entries to disk: %w", err)
+ }
+ log.Log(lvl, fmt.Sprintf("[%s] Flushed buffer file", logPrefix), "name", fName)
+ return nil
+ })
+
+ return provider, nil
+}
+
+func (p *fileDataProvider) Next(keyBuf, valBuf []byte) ([]byte, []byte, error) {
+ if p.reader == nil {
+ _, err := p.file.Seek(0, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+ r := bufio.NewReaderSize(p.file, BufIOSize)
+ p.reader = r
+ p.byteReader = r
+
+ }
+ return readElementFromDisk(p.reader, p.byteReader, keyBuf, valBuf)
+}
+
+func (p *fileDataProvider) Wait() error { return p.wg.Wait() }
+func (p *fileDataProvider) Dispose() {
+ if p.file != nil { //invariant: safe to call multiple time
+ p.Wait()
+ _ = p.file.Close()
+ _ = os.Remove(p.file.Name())
+ p.file = nil
+ }
+}
+
+func (p *fileDataProvider) String() string {
+ return fmt.Sprintf("%T(file: %s)", p, p.file.Name())
+}
+
+func readElementFromDisk(r io.Reader, br io.ByteReader, keyBuf, valBuf []byte) ([]byte, []byte, error) {
+ n, err := binary.ReadVarint(br)
+ if err != nil {
+ return nil, nil, err
+ }
+ if n >= 0 {
+ // Reallocate the slice or extend it if there is enough capacity
+ if keyBuf == nil || len(keyBuf)+int(n) > cap(keyBuf) {
+ newKeyBuf := make([]byte, len(keyBuf)+int(n))
+ copy(newKeyBuf, keyBuf)
+ keyBuf = newKeyBuf
+ } else {
+ keyBuf = keyBuf[:len(keyBuf)+int(n)]
+ }
+ if _, err = io.ReadFull(r, keyBuf[len(keyBuf)-int(n):]); err != nil {
+ return nil, nil, err
+ }
+ } else {
+ keyBuf = nil
+ }
+ if n, err = binary.ReadVarint(br); err != nil {
+ return nil, nil, err
+ }
+ if n >= 0 {
+ // Reallocate the slice or extend it if there is enough capacity
+ if valBuf == nil || len(valBuf)+int(n) > cap(valBuf) {
+ newValBuf := make([]byte, len(valBuf)+int(n))
+ copy(newValBuf, valBuf)
+ valBuf = newValBuf
+ } else {
+ valBuf = valBuf[:len(valBuf)+int(n)]
+ }
+ if _, err = io.ReadFull(r, valBuf[len(valBuf)-int(n):]); err != nil {
+ return nil, nil, err
+ }
+ } else {
+ valBuf = nil
+ }
+ return keyBuf, valBuf, err
+}
+
+type memoryDataProvider struct {
+ buffer Buffer
+ currentIndex int
+}
+
+func KeepInRAM(buffer Buffer) dataProvider {
+ return &memoryDataProvider{buffer, 0}
+}
+
+func (p *memoryDataProvider) Next(keyBuf, valBuf []byte) ([]byte, []byte, error) {
+ if p.currentIndex >= p.buffer.Len() {
+ return nil, nil, io.EOF
+ }
+ key, value := p.buffer.Get(p.currentIndex, keyBuf, valBuf)
+ p.currentIndex++
+ return key, value, nil
+}
+
+func (p *memoryDataProvider) Wait() error { return nil }
+func (p *memoryDataProvider) Dispose() {}
+
+func (p *memoryDataProvider) String() string {
+ return fmt.Sprintf("%T(buffer.Len: %d)", p, p.buffer.Len())
+}
diff --git a/erigon-lib/etl/etl.go b/erigon-lib/etl/etl.go
new file mode 100644
index 00000000000..9bac4418501
--- /dev/null
+++ b/erigon-lib/etl/etl.go
@@ -0,0 +1,174 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package etl
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "time"
+
+ "github.com/c2h5oh/datasize"
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/log/v3"
+)
+
+type CurrentTableReader interface {
+ Get([]byte) ([]byte, error)
+}
+
+type ExtractNextFunc func(originalK, k []byte, v []byte) error
+type ExtractFunc func(k []byte, v []byte, next ExtractNextFunc) error
+
+// NextKey generates the possible next key w/o changing the key length.
+// for [0x01, 0x01, 0x01] it will generate [0x01, 0x01, 0x02], etc
+func NextKey(key []byte) ([]byte, error) {
+ if len(key) == 0 {
+ return key, fmt.Errorf("could not apply NextKey for the empty key")
+ }
+ nextKey := common.Copy(key)
+ for i := len(key) - 1; i >= 0; i-- {
+ b := nextKey[i]
+ if b < 0xFF {
+ nextKey[i] = b + 1
+ return nextKey, nil
+ }
+ if b == 0xFF {
+ nextKey[i] = 0
+ }
+ }
+ return key, fmt.Errorf("overflow while applying NextKey")
+}
+
+// LoadCommitHandler is a callback called each time a new batch is being
+// loaded from files into a DB
+// * `key`: last commited key to the database (use etl.NextKey helper to use in LoadStartKey)
+// * `isDone`: true, if everything is processed
+type LoadCommitHandler func(db kv.Putter, key []byte, isDone bool) error
+type AdditionalLogArguments func(k, v []byte) (additionalLogArguments []interface{})
+
+type TransformArgs struct {
+ Quit <-chan struct{}
+ LogDetailsExtract AdditionalLogArguments
+ LogDetailsLoad AdditionalLogArguments
+ // [ExtractStartKey, ExtractEndKey)
+ ExtractStartKey []byte
+ ExtractEndKey []byte
+ BufferType int
+ BufferSize int
+}
+
+func Transform(
+ logPrefix string,
+ db kv.RwTx,
+ fromBucket string,
+ toBucket string,
+ tmpdir string,
+ extractFunc ExtractFunc,
+ loadFunc LoadFunc,
+ args TransformArgs,
+ logger log.Logger,
+) error {
+ bufferSize := BufferOptimalSize
+ if args.BufferSize > 0 {
+ bufferSize = datasize.ByteSize(args.BufferSize)
+ }
+ buffer := getBufferByType(args.BufferType, bufferSize, nil)
+ collector := NewCollector(logPrefix, tmpdir, buffer, logger)
+ defer collector.Close()
+
+ t := time.Now()
+ if err := extractBucketIntoFiles(logPrefix, db, fromBucket, args.ExtractStartKey, args.ExtractEndKey, collector, extractFunc, args.Quit, args.LogDetailsExtract, logger); err != nil {
+ return err
+ }
+ logger.Trace(fmt.Sprintf("[%s] Extraction finished", logPrefix), "took", time.Since(t))
+
+ defer func(t time.Time) {
+ logger.Trace(fmt.Sprintf("[%s] Load finished", logPrefix), "took", time.Since(t))
+ }(time.Now())
+ return collector.Load(db, toBucket, loadFunc, args)
+}
+
+// extractBucketIntoFiles - [startkey, endkey)
+func extractBucketIntoFiles(
+ logPrefix string,
+ db kv.Tx,
+ bucket string,
+ startkey []byte,
+ endkey []byte,
+ collector *Collector,
+ extractFunc ExtractFunc,
+ quit <-chan struct{},
+ additionalLogArguments AdditionalLogArguments,
+ logger log.Logger,
+) error {
+ logEvery := time.NewTicker(30 * time.Second)
+ defer logEvery.Stop()
+
+ c, err := db.Cursor(bucket)
+ if err != nil {
+ return err
+ }
+ defer c.Close()
+ for k, v, e := c.Seek(startkey); k != nil; k, v, e = c.Next() {
+ if e != nil {
+ return e
+ }
+ if err := common.Stopped(quit); err != nil {
+ return err
+ }
+ select {
+ default:
+ case <-logEvery.C:
+ logArs := []interface{}{"from", bucket}
+ if additionalLogArguments != nil {
+ logArs = append(logArs, additionalLogArguments(k, v)...)
+ } else {
+ logArs = append(logArs, "current_prefix", makeCurrentKeyStr(k))
+ }
+
+ logger.Info(fmt.Sprintf("[%s] ETL [1/2] Extracting", logPrefix), logArs...)
+ }
+ if endkey != nil && bytes.Compare(k, endkey) >= 0 {
+ // endKey is exclusive bound: [startkey, endkey)
+ return nil
+ }
+ if err := extractFunc(k, v, collector.extractNextFunc); err != nil {
+ return err
+ }
+ }
+ return collector.flushBuffer(true)
+}
+
+type currentTableReader struct {
+ getter kv.Tx
+ bucket string
+}
+
+func (s *currentTableReader) Get(key []byte) ([]byte, error) {
+ return s.getter.GetOne(s.bucket, key)
+}
+
+// IdentityLoadFunc loads entries as they are, without transformation
+var IdentityLoadFunc LoadFunc = func(k []byte, value []byte, _ CurrentTableReader, next LoadNextFunc) error {
+ return next(k, k, value)
+}
+
+func isIdentityLoadFunc(f LoadFunc) bool {
+ return f == nil || reflect.ValueOf(IdentityLoadFunc).Pointer() == reflect.ValueOf(f).Pointer()
+}
diff --git a/erigon-lib/etl/etl_test.go b/erigon-lib/etl/etl_test.go
new file mode 100644
index 00000000000..81b257df4b3
--- /dev/null
+++ b/erigon-lib/etl/etl_test.go
@@ -0,0 +1,647 @@
+/*
+Copyright 2021 Erigon contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package etl
+
+import (
+ "bytes"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/memdb"
+ "github.com/ledgerwatch/log/v3"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func decodeHex(in string) []byte {
+ payload, err := hex.DecodeString(in)
+ if err != nil {
+ panic(err)
+ }
+ return payload
+}
+
+func TestEmptyValueIsNotANil(t *testing.T) {
+ logger := log.New()
+ t.Run("sortable", func(t *testing.T) {
+ collector := NewCollector(t.Name(), "", NewSortableBuffer(1), logger)
+ defer collector.Close()
+ require := require.New(t)
+ require.NoError(collector.Collect([]byte{1}, []byte{}))
+ require.NoError(collector.Collect([]byte{2}, nil))
+ require.NoError(collector.Load(nil, "", func(k, v []byte, table CurrentTableReader, next LoadNextFunc) error {
+ if k[0] == 1 {
+ require.Equal([]byte{}, v)
+ } else {
+ require.Nil(v)
+ }
+ return nil
+ }, TransformArgs{}))
+ })
+ t.Run("append", func(t *testing.T) {
+ // append buffer doesn't support nil values
+ collector := NewCollector(t.Name(), "", NewAppendBuffer(1), logger)
+ defer collector.Close()
+ require := require.New(t)
+ require.NoError(collector.Collect([]byte{1}, []byte{}))
+ require.NoError(collector.Collect([]byte{2}, nil))
+ require.NoError(collector.Load(nil, "", func(k, v []byte, table CurrentTableReader, next LoadNextFunc) error {
+ require.Nil(v)
+ return nil
+ }, TransformArgs{}))
+ })
+ t.Run("oldest", func(t *testing.T) {
+ collector := NewCollector(t.Name(), "", NewOldestEntryBuffer(1), logger)
+ defer collector.Close()
+ require := require.New(t)
+ require.NoError(collector.Collect([]byte{1}, []byte{}))
+ require.NoError(collector.Collect([]byte{2}, nil))
+ require.NoError(collector.Load(nil, "", func(k, v []byte, table CurrentTableReader, next LoadNextFunc) error {
+ if k[0] == 1 {
+ require.Equal([]byte{}, v)
+ } else {
+ require.Nil(v)
+ }
+ return nil
+ }, TransformArgs{}))
+ })
+ t.Run("merge", func(t *testing.T) {
+ collector := NewCollector(t.Name(), "", NewLatestMergedEntryMergedBuffer(1, func(v1 []byte, v2 []byte) []byte {
+ return append(v1, v2...)
+ }), logger)
+ defer collector.Close()
+ require := require.New(t)
+ require.NoError(collector.Collect([]byte{1}, []byte{}))
+ require.NoError(collector.Collect([]byte{2}, nil))
+ require.NoError(collector.Load(nil, "", func(k, v []byte, table CurrentTableReader, next LoadNextFunc) error {
+ if k[0] == 1 {
+ require.Equal([]byte{}, v)
+ } else {
+ require.Nil(v)
+ }
+ return nil
+ }, TransformArgs{}))
+ })
+}
+
+func TestEmptyKeyValue(t *testing.T) {
+ logger := log.New()
+ _, tx := memdb.NewTestTx(t)
+ require := require.New(t)
+ table := kv.ChaindataTables[0]
+ collector := NewCollector(t.Name(), "", NewSortableBuffer(1), logger)
+ defer collector.Close()
+ require.NoError(collector.Collect([]byte{2}, []byte{}))
+ require.NoError(collector.Collect([]byte{1}, []byte{1}))
+ require.NoError(collector.Load(tx, table, IdentityLoadFunc, TransformArgs{}))
+ v, err := tx.GetOne(table, []byte{2})
+ require.NoError(err)
+ require.Equal([]byte{}, v)
+ v, err = tx.GetOne(table, []byte{1})
+ require.NoError(err)
+ require.Equal([]byte{1}, v)
+
+ collector = NewCollector(t.Name(), "", NewSortableBuffer(1), logger)
+ defer collector.Close()
+ require.NoError(collector.Collect([]byte{}, nil))
+ require.NoError(collector.Load(tx, table, IdentityLoadFunc, TransformArgs{}))
+ v, err = tx.GetOne(table, []byte{})
+ require.NoError(err)
+ require.Nil(v)
+}
+
+func TestWriteAndReadBufferEntry(t *testing.T) {
+ b := NewSortableBuffer(128)
+ buffer := bytes.NewBuffer(make([]byte, 0))
+
+ entries := make([]sortableBufferEntry, 100)
+ for i := range entries {
+ entries[i].key = []byte(fmt.Sprintf("key-%d", i))
+ entries[i].value = []byte(fmt.Sprintf("value-%d", i))
+ b.Put(entries[i].key, entries[i].value)
+ }
+
+ if err := b.Write(buffer); err != nil {
+ t.Error(err)
+ }
+
+ bb := buffer.Bytes()
+
+ readBuffer := bytes.NewReader(bb)
+
+ for i := range entries {
+ k, v, err := readElementFromDisk(readBuffer, readBuffer, nil, nil)
+ if err != nil {
+ t.Error(err)
+ }
+ assert.Equal(t, string(entries[i].key), string(k))
+ assert.Equal(t, string(entries[i].value), string(v))
+ }
+
+ _, _, err := readElementFromDisk(readBuffer, readBuffer, nil, nil)
+ assert.Equal(t, io.EOF, err)
+}
+
+func TestNextKey(t *testing.T) {
+ for _, tc := range []string{
+ "00000001->00000002",
+ "000000FF->00000100",
+ "FEFFFFFF->FF000000",
+ } {
+ parts := strings.Split(tc, "->")
+ input := decodeHex(parts[0])
+ expectedOutput := decodeHex(parts[1])
+ actualOutput, err := NextKey(input)
+ assert.NoError(t, err)
+ assert.Equal(t, expectedOutput, actualOutput)
+ }
+}
+
+func TestNextKeyErr(t *testing.T) {
+ for _, tc := range []string{
+ "",
+ "FFFFFF",
+ } {
+ input := decodeHex(tc)
+ _, err := NextKey(input)
+ assert.Error(t, err)
+ }
+}
+
+func TestFileDataProviders(t *testing.T) {
+ logger := log.New()
+ // test invariant when we go through files (> 1 buffer)
+ _, tx := memdb.NewTestTx(t)
+ sourceBucket := kv.ChaindataTables[0]
+
+ generateTestData(t, tx, sourceBucket, 10)
+
+ collector := NewCollector(t.Name(), "", NewSortableBuffer(1), logger)
+
+ err := extractBucketIntoFiles("logPrefix", tx, sourceBucket, nil, nil, collector, testExtractToMapFunc, nil, nil, logger)
+ assert.NoError(t, err)
+
+ assert.Equal(t, 10, len(collector.dataProviders))
+
+ for _, p := range collector.dataProviders {
+ fp, ok := p.(*fileDataProvider)
+ assert.True(t, ok)
+ err := fp.Wait()
+ require.NoError(t, err)
+ _, err = os.Stat(fp.file.Name())
+ assert.NoError(t, err)
+ }
+
+ collector.Close()
+
+ for _, p := range collector.dataProviders {
+ fp, ok := p.(*fileDataProvider)
+ assert.True(t, ok)
+ _, err = os.Stat(fp.file.Name())
+ assert.True(t, os.IsNotExist(err))
+ }
+}
+
+func TestRAMDataProviders(t *testing.T) {
+ logger := log.New()
+ // test invariant when we go through memory (1 buffer)
+ _, tx := memdb.NewTestTx(t)
+ sourceBucket := kv.ChaindataTables[0]
+ generateTestData(t, tx, sourceBucket, 10)
+
+ collector := NewCollector(t.Name(), "", NewSortableBuffer(BufferOptimalSize), logger)
+ err := extractBucketIntoFiles("logPrefix", tx, sourceBucket, nil, nil, collector, testExtractToMapFunc, nil, nil, logger)
+ assert.NoError(t, err)
+
+ assert.Equal(t, 1, len(collector.dataProviders))
+
+ for _, p := range collector.dataProviders {
+ mp, ok := p.(*memoryDataProvider)
+ assert.True(t, ok)
+ assert.Equal(t, 10, mp.buffer.Len())
+ }
+}
+
+func TestTransformRAMOnly(t *testing.T) {
+ logger := log.New()
+ // test invariant when we only have one buffer and it fits into RAM (exactly 1 buffer)
+ _, tx := memdb.NewTestTx(t)
+
+ sourceBucket := kv.ChaindataTables[0]
+ destBucket := kv.ChaindataTables[1]
+ generateTestData(t, tx, sourceBucket, 20)
+ err := Transform(
+ "logPrefix",
+ tx,
+ sourceBucket,
+ destBucket,
+ "", // temp dir
+ testExtractToMapFunc,
+ testLoadFromMapFunc,
+ TransformArgs{},
+ logger,
+ )
+ assert.Nil(t, err)
+ compareBuckets(t, tx, sourceBucket, destBucket, nil)
+}
+
+func TestEmptySourceBucket(t *testing.T) {
+ logger := log.New()
+ _, tx := memdb.NewTestTx(t)
+ sourceBucket := kv.ChaindataTables[0]
+ destBucket := kv.ChaindataTables[1]
+ err := Transform(
+ "logPrefix",
+ tx,
+ sourceBucket,
+ destBucket,
+ "", // temp dir
+ testExtractToMapFunc,
+ testLoadFromMapFunc,
+ TransformArgs{},
+ logger,
+ )
+ assert.Nil(t, err)
+ compareBuckets(t, tx, sourceBucket, destBucket, nil)
+}
+
+func TestTransformExtractStartKey(t *testing.T) {
+ logger := log.New()
+ // test invariant when we only have one buffer and it fits into RAM (exactly 1 buffer)
+ _, tx := memdb.NewTestTx(t)
+ sourceBucket := kv.ChaindataTables[0]
+ destBucket := kv.ChaindataTables[1]
+ generateTestData(t, tx, sourceBucket, 10)
+ err := Transform(
+ "logPrefix",
+ tx,
+ sourceBucket,
+ destBucket,
+ "", // temp dir
+ testExtractToMapFunc,
+ testLoadFromMapFunc,
+ TransformArgs{ExtractStartKey: []byte(fmt.Sprintf("%10d-key-%010d", 5, 5))},
+ logger,
+ )
+ assert.Nil(t, err)
+ compareBuckets(t, tx, sourceBucket, destBucket, []byte(fmt.Sprintf("%10d-key-%010d", 5, 5)))
+}
+
+func TestTransformThroughFiles(t *testing.T) {
+ logger := log.New()
+ // test invariant when we go through files (> 1 buffer)
+ _, tx := memdb.NewTestTx(t)
+ sourceBucket := kv.ChaindataTables[0]
+ destBucket := kv.ChaindataTables[1]
+ generateTestData(t, tx, sourceBucket, 10)
+ err := Transform(
+ "logPrefix",
+ tx,
+ sourceBucket,
+ destBucket,
+ "", // temp dir
+ testExtractToMapFunc,
+ testLoadFromMapFunc,
+ TransformArgs{
+ BufferSize: 1,
+ },
+ logger,
+ )
+ assert.Nil(t, err)
+ compareBuckets(t, tx, sourceBucket, destBucket, nil)
+}
+
+func TestTransformDoubleOnExtract(t *testing.T) {
+ logger := log.New()
+ // test invariant when extractFunc multiplies the data 2x
+ _, tx := memdb.NewTestTx(t)
+ sourceBucket := kv.ChaindataTables[0]
+ destBucket := kv.ChaindataTables[1]
+ generateTestData(t, tx, sourceBucket, 10)
+ err := Transform(
+ "logPrefix",
+ tx,
+ sourceBucket,
+ destBucket,
+ "", // temp dir
+ testExtractDoubleToMapFunc,
+ testLoadFromMapFunc,
+ TransformArgs{},
+ logger,
+ )
+ assert.Nil(t, err)
+ compareBucketsDouble(t, tx, sourceBucket, destBucket)
+}
+
+func TestTransformDoubleOnLoad(t *testing.T) {
+ logger := log.New()
+ // test invariant when loadFunc multiplies the data 2x
+ _, tx := memdb.NewTestTx(t)
+ sourceBucket := kv.ChaindataTables[0]
+ destBucket := kv.ChaindataTables[1]
+ generateTestData(t, tx, sourceBucket, 10)
+ err := Transform(
+ "logPrefix",
+ tx,
+ sourceBucket,
+ destBucket,
+ "", // temp dir
+ testExtractToMapFunc,
+ testLoadFromMapDoubleFunc,
+ TransformArgs{},
+ logger,
+ )
+ assert.Nil(t, err)
+ compareBucketsDouble(t, tx, sourceBucket, destBucket)
+}
+
+func generateTestData(t *testing.T, db kv.Putter, bucket string, count int) {
+ t.Helper()
+ for i := 0; i < count; i++ {
+ k := []byte(fmt.Sprintf("%10d-key-%010d", i, i))
+ v := []byte(fmt.Sprintf("val-%099d", i))
+ err := db.Put(bucket, k, v)
+ assert.NoError(t, err)
+ }
+}
+
+func testExtractToMapFunc(k, v []byte, next ExtractNextFunc) error {
+ valueMap := make(map[string][]byte)
+ valueMap["value"] = v
+ out, err := json.Marshal(valueMap)
+ if err != nil {
+ return err
+ }
+ return next(k, k, out)
+}
+
+func testExtractDoubleToMapFunc(k, v []byte, next ExtractNextFunc) error {
+ var err error
+ valueMap := make(map[string][]byte)
+ valueMap["value"] = append(v, 0xAA)
+ k1 := append(k, 0xAA)
+ out, err := json.Marshal(valueMap)
+ if err != nil {
+ panic(err)
+ }
+
+ err = next(k, k1, out)
+ if err != nil {
+ return err
+ }
+
+ valueMap = make(map[string][]byte)
+ valueMap["value"] = append(v, 0xBB)
+ k2 := append(k, 0xBB)
+ out, err = json.Marshal(valueMap)
+ if err != nil {
+ panic(err)
+ }
+ return next(k, k2, out)
+}
+
+func testLoadFromMapFunc(k []byte, v []byte, _ CurrentTableReader, next LoadNextFunc) error {
+ valueMap := make(map[string][]byte)
+ err := json.Unmarshal(v, &valueMap)
+ if err != nil {
+ return err
+ }
+ realValue := valueMap["value"]
+ return next(k, k, realValue)
+}
+
+func testLoadFromMapDoubleFunc(k []byte, v []byte, _ CurrentTableReader, next LoadNextFunc) error {
+ valueMap := make(map[string][]byte)
+ err := json.Unmarshal(v, &valueMap)
+ if err != nil {
+ return err
+ }
+ realValue := valueMap["value"]
+
+ err = next(k, append(k, 0xAA), append(realValue, 0xAA))
+ if err != nil {
+ return err
+ }
+ return next(k, append(k, 0xBB), append(realValue, 0xBB))
+}
+
+func compareBuckets(t *testing.T, db kv.Tx, b1, b2 string, startKey []byte) {
+ t.Helper()
+ b1Map := make(map[string]string)
+ err := db.ForEach(b1, startKey, func(k, v []byte) error {
+ b1Map[fmt.Sprintf("%x", k)] = fmt.Sprintf("%x", v)
+ return nil
+ })
+ assert.NoError(t, err)
+ b2Map := make(map[string]string)
+ err = db.ForEach(b2, nil, func(k, v []byte) error {
+ b2Map[fmt.Sprintf("%x", k)] = fmt.Sprintf("%x", v)
+ return nil
+ })
+ assert.NoError(t, err)
+ assert.Equal(t, b1Map, b2Map)
+}
+
+func compareBucketsDouble(t *testing.T, db kv.Tx, b1, b2 string) {
+ t.Helper()
+ b1Map := make(map[string]string)
+ err := db.ForEach(b1, nil, func(k, v []byte) error {
+ b1Map[fmt.Sprintf("%x", append(k, 0xAA))] = fmt.Sprintf("%x", append(v, 0xAA))
+ b1Map[fmt.Sprintf("%x", append(k, 0xBB))] = fmt.Sprintf("%x", append(v, 0xBB))
+ return nil
+ })
+ assert.NoError(t, err)
+ b2Map := make(map[string]string)
+ err = db.ForEach(b2, nil, func(k, v []byte) error {
+ b2Map[fmt.Sprintf("%x", k)] = fmt.Sprintf("%x", v)
+ return nil
+ })
+ assert.NoError(t, err)
+ assert.Equal(t, b1Map, b2Map)
+}
+
+func TestReuseCollectorAfterLoad(t *testing.T) {
+ logger := log.New()
+ buf := NewSortableBuffer(128)
+ c := NewCollector("", t.TempDir(), buf, logger)
+
+ err := c.Collect([]byte{1}, []byte{2})
+ require.NoError(t, err)
+ see := 0
+ err = c.Load(nil, "", func(k, v []byte, table CurrentTableReader, next LoadNextFunc) error {
+ see++
+ return nil
+ }, TransformArgs{})
+ require.NoError(t, err)
+ require.Equal(t, 1, see)
+
+ // buffers are not lost
+ require.Zero(t, len(buf.data))
+ require.Zero(t, len(buf.lens))
+ require.Zero(t, len(buf.offsets))
+ require.NotZero(t, cap(buf.data))
+ require.NotZero(t, cap(buf.lens))
+ require.NotZero(t, cap(buf.offsets))
+
+ // teset that no data visible
+ see = 0
+ err = c.Load(nil, "", func(k, v []byte, table CurrentTableReader, next LoadNextFunc) error {
+ see++
+ return nil
+ }, TransformArgs{})
+ require.NoError(t, err)
+ require.Equal(t, 0, see)
+
+ // reuse
+ see = 0
+ err = c.Load(nil, "", func(k, v []byte, table CurrentTableReader, next LoadNextFunc) error {
+ see++
+ return nil
+ }, TransformArgs{})
+ require.NoError(t, err)
+ require.Equal(t, 0, see)
+
+ err = c.Collect([]byte{3}, []byte{4})
+ require.NoError(t, err)
+ see = 0
+ err = c.Load(nil, "", func(k, v []byte, table CurrentTableReader, next LoadNextFunc) error {
+ see++
+ return nil
+ }, TransformArgs{})
+ require.NoError(t, err)
+ require.Equal(t, 1, see)
+}
+
+func TestMerge(t *testing.T) {
+ collector := NewCollector(t.Name(), "", NewLatestMergedEntryMergedBuffer(4, func(v1 []byte, v2 []byte) []byte {
+ return append(v1, v2...)
+ }), log.New())
+ defer collector.Close()
+ require := require.New(t)
+ require.NoError(collector.Collect([]byte{1}, []byte{1}))
+ require.NoError(collector.Collect([]byte{1}, []byte{2}))
+ require.NoError(collector.Collect([]byte{1}, []byte{3}))
+ require.NoError(collector.Collect([]byte{1}, []byte{4}))
+ require.NoError(collector.Collect([]byte{1}, []byte{5}))
+ require.NoError(collector.Collect([]byte{1}, []byte{6}))
+ require.NoError(collector.Collect([]byte{1}, []byte{7}))
+ require.NoError(collector.Collect([]byte{2}, []byte{10}))
+ require.NoError(collector.Collect([]byte{2}, []byte{20}))
+ require.NoError(collector.Collect([]byte{2}, []byte{30}))
+ require.NoError(collector.Collect([]byte{2}, []byte{40}))
+ require.NoError(collector.Collect([]byte{2}, []byte{50}))
+ require.NoError(collector.Collect([]byte{2}, []byte{}))
+ require.NoError(collector.Collect([]byte{2}, nil))
+ require.NoError(collector.Collect([]byte{3}, nil))
+ require.NoError(collector.Load(nil, "", func(k, v []byte, table CurrentTableReader, next LoadNextFunc) error {
+ if k[0] == 1 {
+ require.Equal([]byte{1, 2, 3, 4, 5, 6, 7}, v)
+ } else if k[0] == 2 {
+ require.Equal([]byte{10, 20, 30, 40, 50}, v)
+ } else {
+ require.Nil(v)
+ }
+ return nil
+ }, TransformArgs{}))
+}
+
+func TestAppend(t *testing.T) {
+ // append buffer doesn't support nil values
+ collector := NewCollector(t.Name(), "", NewAppendBuffer(4), log.New())
+ defer collector.Close()
+ require := require.New(t)
+ require.NoError(collector.Collect([]byte{1}, []byte{1}))
+ require.NoError(collector.Collect([]byte{1}, []byte{2}))
+ require.NoError(collector.Collect([]byte{1}, []byte{3}))
+ require.NoError(collector.Collect([]byte{1}, []byte{4}))
+ require.NoError(collector.Collect([]byte{1}, []byte{5}))
+ require.NoError(collector.Collect([]byte{1}, []byte{6}))
+ require.NoError(collector.Collect([]byte{1}, []byte{7}))
+ require.NoError(collector.Collect([]byte{2}, []byte{10}))
+ require.NoError(collector.Collect([]byte{2}, []byte{20}))
+ require.NoError(collector.Collect([]byte{2}, []byte{30}))
+ require.NoError(collector.Collect([]byte{2}, []byte{40}))
+ require.NoError(collector.Collect([]byte{2}, []byte{50}))
+ require.NoError(collector.Collect([]byte{2}, []byte{}))
+ require.NoError(collector.Collect([]byte{2}, nil))
+ require.NoError(collector.Collect([]byte{3}, nil))
+ require.NoError(collector.Load(nil, "", func(k, v []byte, table CurrentTableReader, next LoadNextFunc) error {
+ fmt.Printf("%x %x\n", k, v)
+ if k[0] == 1 {
+ require.Equal([]byte{1, 2, 3, 4, 5, 6, 7}, v)
+ } else if k[0] == 2 {
+ require.Equal([]byte{10, 20, 30, 40, 50}, v)
+ } else {
+ require.Nil(v)
+ }
+ return nil
+ }, TransformArgs{}))
+}
+
+func TestOldest(t *testing.T) {
+ collector := NewCollector(t.Name(), "", NewOldestEntryBuffer(1), log.New())
+ defer collector.Close()
+ require := require.New(t)
+ require.NoError(collector.Collect([]byte{1}, []byte{1}))
+ require.NoError(collector.Collect([]byte{1}, []byte{2}))
+ require.NoError(collector.Collect([]byte{1}, []byte{3}))
+ require.NoError(collector.Collect([]byte{1}, []byte{4}))
+ require.NoError(collector.Collect([]byte{1}, []byte{5}))
+ require.NoError(collector.Collect([]byte{1}, []byte{6}))
+ require.NoError(collector.Collect([]byte{1}, []byte{7}))
+ require.NoError(collector.Collect([]byte{2}, nil))
+ require.NoError(collector.Load(nil, "", func(k, v []byte, table CurrentTableReader, next LoadNextFunc) error {
+ if k[0] == 1 {
+ require.Equal([]byte{1}, v)
+ } else {
+ require.Nil(v)
+ }
+ return nil
+ }, TransformArgs{}))
+}
+
+func TestSortable(t *testing.T) {
+ collector := NewCollector(t.Name(), "", NewSortableBuffer(1), log.New())
+ defer collector.Close()
+ require := require.New(t)
+ require.NoError(collector.Collect([]byte{1}, []byte{1}))
+ require.NoError(collector.Collect([]byte{1}, []byte{2}))
+ require.NoError(collector.Collect([]byte{1}, []byte{3}))
+ require.NoError(collector.Collect([]byte{1}, []byte{4}))
+ require.NoError(collector.Collect([]byte{1}, []byte{5}))
+ require.NoError(collector.Collect([]byte{1}, []byte{6}))
+ require.NoError(collector.Collect([]byte{1}, []byte{7}))
+ require.NoError(collector.Collect([]byte{2}, []byte{1}))
+ require.NoError(collector.Collect([]byte{2}, []byte{20}))
+ require.NoError(collector.Collect([]byte{2}, nil))
+
+ keys, vals := [][]byte{}, [][]byte{}
+ require.NoError(collector.Load(nil, "", func(k, v []byte, table CurrentTableReader, next LoadNextFunc) error {
+ keys = append(keys, k)
+ vals = append(vals, v)
+ return nil
+ }, TransformArgs{}))
+
+ require.Equal([][]byte{{1}, {1}, {1}, {1}, {1}, {1}, {1}, {2}, {2}, {2}}, keys)
+ require.Equal([][]byte{{1}, {2}, {3}, {4}, {5}, {6}, {7}, {1}, {20}, nil}, vals)
+
+}
diff --git a/erigon-lib/etl/heap.go b/erigon-lib/etl/heap.go
new file mode 100644
index 00000000000..03eea253c81
--- /dev/null
+++ b/erigon-lib/etl/heap.go
@@ -0,0 +1,122 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distwributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package etl
+
+import (
+ "bytes"
+)
+
+type HeapElem struct {
+ Key []byte
+ Value []byte
+ TimeIdx int
+}
+
+type Heap struct {
+ elems []*HeapElem
+}
+
+func (h *Heap) Len() int {
+ return len(h.elems)
+}
+
+func (h *Heap) Less(i, j int) bool {
+ if c := bytes.Compare(h.elems[i].Key, h.elems[j].Key); c != 0 {
+ return c < 0
+ }
+ return h.elems[i].TimeIdx < h.elems[j].TimeIdx
+}
+
+func (h *Heap) Swap(i, j int) {
+ h.elems[i], h.elems[j] = h.elems[j], h.elems[i]
+}
+
+func (h *Heap) Push(x *HeapElem) {
+ h.elems = append(h.elems, x)
+}
+
+func (h *Heap) Pop() *HeapElem {
+ old := h.elems
+ n := len(old) - 1
+ x := old[n]
+ //old[n].Key, old[n].Value, old[n].TimeIdx = nil, nil, 0
+ old[n] = nil
+ h.elems = old[0:n]
+ return x
+}
+
+// ------ Copy-Paste of `container/heap/heap.go` without interface conversion
+
+// Init establishes the heap invariants required by the other routines in this package.
+// Init is idempotent with respect to the heap invariants
+// and may be called whenever the heap invariants may have been invalidated.
+// The complexity is O(n) where n = h.Len().
+func heapInit(h *Heap) {
+ // heapify
+ n := h.Len()
+ for i := n/2 - 1; i >= 0; i-- {
+ down(h, i, n)
+ }
+}
+
+// Push pushes the element x onto the heap.
+// The complexity is O(log n) where n = h.Len().
+func heapPush(h *Heap, x *HeapElem) {
+ h.Push(x)
+ up(h, h.Len()-1)
+}
+
+// Pop removes and returns the minimum element (according to Less) from the heap.
+// The complexity is O(log n) where n = h.Len().
+// Pop is equivalent to Remove(h, 0).
+func heapPop(h *Heap) *HeapElem {
+ n := h.Len() - 1
+ h.Swap(0, n)
+ down(h, 0, n)
+ return h.Pop()
+}
+
+func up(h *Heap, j int) {
+ for {
+ i := (j - 1) / 2 // parent
+ if i == j || !h.Less(j, i) {
+ break
+ }
+ h.Swap(i, j)
+ j = i
+ }
+}
+
+func down(h *Heap, i0, n int) bool {
+ i := i0
+ for {
+ j1 := 2*i + 1
+ if j1 >= n || j1 < 0 { // j1 < 0 after int overflow
+ break
+ }
+ j := j1 // left child
+ if j2 := j1 + 1; j2 < n && h.Less(j2, j1) {
+ j = j2 // = 2*i + 2 // right child
+ }
+ if !h.Less(j, i) {
+ break
+ }
+ h.Swap(i, j)
+ i = j
+ }
+ return i > i0
+}
diff --git a/erigon-lib/etl/progress.go b/erigon-lib/etl/progress.go
new file mode 100644
index 00000000000..600af25ad94
--- /dev/null
+++ b/erigon-lib/etl/progress.go
@@ -0,0 +1,24 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package etl
+
+func ProgressFromKey(k []byte) int {
+ if len(k) < 1 {
+ return 0
+ }
+ return int(float64(k[0]>>4) * 3.3)
+}
diff --git a/erigon-lib/go.mod b/erigon-lib/go.mod
new file mode 100644
index 00000000000..f041253f649
--- /dev/null
+++ b/erigon-lib/go.mod
@@ -0,0 +1,150 @@
+module github.com/ledgerwatch/erigon-lib
+
+go 1.20
+
+require (
+ github.com/erigontech/mdbx-go v0.27.21
+ github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20231210032908-6ff6f4c91c60
+ github.com/ledgerwatch/interfaces v0.0.0-20231209102305-b17e86fbe07d
+ github.com/ledgerwatch/log/v3 v3.9.0
+ github.com/ledgerwatch/secp256k1 v1.0.0
+)
+
+require (
+ github.com/RoaringBitmap/roaring v1.2.3
+ github.com/anacrolix/dht/v2 v2.20.0
+ github.com/anacrolix/go-libutp v1.3.1
+ github.com/anacrolix/log v0.14.3-0.20230823030427-4b296d71a6b4
+ github.com/anacrolix/torrent v1.52.6-0.20231201115409-7ea994b6bbd8
+ github.com/aws/aws-sdk-go-v2 v1.21.2
+ github.com/aws/aws-sdk-go-v2/config v1.19.0
+ github.com/aws/aws-sdk-go-v2/credentials v1.13.43
+ github.com/aws/aws-sdk-go-v2/service/s3 v1.40.2
+ github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b
+ github.com/containerd/cgroups/v3 v3.0.2
+ github.com/crate-crypto/go-kzg-4844 v0.7.0
+ github.com/deckarep/golang-set/v2 v2.3.1
+ github.com/edsrzf/mmap-go v1.1.0
+ github.com/go-stack/stack v1.8.1
+ github.com/gofrs/flock v0.8.1
+ github.com/google/btree v1.1.2
+ github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
+ github.com/hashicorp/golang-lru/v2 v2.0.6
+ github.com/holiman/uint256 v1.2.3
+ github.com/matryer/moq v0.3.3
+ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58
+ github.com/pelletier/go-toml/v2 v2.1.0
+ github.com/prometheus/client_golang v1.17.0
+ github.com/prometheus/client_model v0.5.0
+ github.com/quasilyte/go-ruleguard/dsl v0.3.22
+ github.com/spaolacci/murmur3 v1.1.0
+ github.com/stretchr/testify v1.8.4
+ github.com/tidwall/btree v1.6.0
+ golang.org/x/crypto v0.16.0
+ golang.org/x/exp v0.0.0-20230905200255-921286631fa9
+ golang.org/x/sync v0.5.0
+ golang.org/x/sys v0.15.0
+ golang.org/x/time v0.5.0
+ google.golang.org/grpc v1.59.0
+ google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0
+ google.golang.org/protobuf v1.31.0
+)
+
+require (
+ github.com/ajwerner/btree v0.0.0-20211221152037-f427b3e689c0 // indirect
+ github.com/alecthomas/atomic v0.1.0-alpha2 // indirect
+ github.com/anacrolix/chansync v0.3.0 // indirect
+ github.com/anacrolix/envpprof v1.3.0 // indirect
+ github.com/anacrolix/generics v0.0.0-20230816105729-c755655aee45 // indirect
+ github.com/anacrolix/missinggo v1.3.0 // indirect
+ github.com/anacrolix/missinggo/perf v1.0.0 // indirect
+ github.com/anacrolix/missinggo/v2 v2.7.2-0.20230527121029-a582b4f397b9 // indirect
+ github.com/anacrolix/mmsg v1.0.0 // indirect
+ github.com/anacrolix/multiless v0.3.1-0.20221221005021-2d12701f83f7 // indirect
+ github.com/anacrolix/stm v0.4.1-0.20221221005312-96d17df0e496 // indirect
+ github.com/anacrolix/sync v0.4.0 // indirect
+ github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96 // indirect
+ github.com/anacrolix/utp v0.1.0 // indirect
+ github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.14 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.6 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.15 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.38 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.6 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 // indirect
+ github.com/aws/smithy-go v1.15.0 // indirect
+ github.com/bahlo/generic-list-go v0.2.0 // indirect
+ github.com/benbjohnson/immutable v0.4.1-0.20221220213129-8932b999621d // indirect
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/bits-and-blooms/bitset v1.7.0 // indirect
+ github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
+ github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/cilium/ebpf v0.9.1 // indirect
+ github.com/consensys/bavard v0.1.13 // indirect
+ github.com/consensys/gnark-crypto v0.12.1 // indirect
+ github.com/coreos/go-systemd/v22 v22.3.2 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/docker/go-units v0.4.0 // indirect
+ github.com/dustin/go-humanize v1.0.1 // indirect
+ github.com/go-llsqlite/adapter v0.0.0-20230912124304-94ed0e573c23 // indirect
+ github.com/go-llsqlite/crawshaw v0.0.0-20230910110433-7e901377eb6c // indirect
+ github.com/go-logr/logr v1.2.3 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/godbus/dbus/v5 v5.0.4 // indirect
+ github.com/golang/protobuf v1.5.3 // indirect
+ github.com/google/uuid v1.3.1 // indirect
+ github.com/gorilla/websocket v1.5.0 // indirect
+ github.com/huandu/xstrings v1.4.0 // indirect
+ github.com/mattn/go-colorable v0.1.13 // indirect
+ github.com/mattn/go-isatty v0.0.19 // indirect
+ github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
+ github.com/mmcloughlin/addchain v0.4.0 // indirect
+ github.com/mschoch/smat v0.2.0 // indirect
+ github.com/opencontainers/runtime-spec v1.0.2 // indirect
+ github.com/pion/datachannel v1.5.2 // indirect
+ github.com/pion/dtls/v2 v2.2.4 // indirect
+ github.com/pion/ice/v2 v2.2.6 // indirect
+ github.com/pion/interceptor v0.1.11 // indirect
+ github.com/pion/logging v0.2.2 // indirect
+ github.com/pion/mdns v0.0.5 // indirect
+ github.com/pion/randutil v0.1.0 // indirect
+ github.com/pion/rtcp v1.2.9 // indirect
+ github.com/pion/rtp v1.7.13 // indirect
+ github.com/pion/sctp v1.8.2 // indirect
+ github.com/pion/sdp/v3 v3.0.5 // indirect
+ github.com/pion/srtp/v2 v2.0.9 // indirect
+ github.com/pion/stun v0.3.5 // indirect
+ github.com/pion/transport v0.13.1 // indirect
+ github.com/pion/transport/v2 v2.0.0 // indirect
+ github.com/pion/turn/v2 v2.0.8 // indirect
+ github.com/pion/udp v0.1.4 // indirect
+ github.com/pion/webrtc/v3 v3.1.42 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/prometheus/common v0.44.0 // indirect
+ github.com/prometheus/procfs v0.11.1 // indirect
+ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
+ github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 // indirect
+ github.com/sirupsen/logrus v1.9.0 // indirect
+ go.etcd.io/bbolt v1.3.6 // indirect
+ go.opentelemetry.io/otel v1.8.0 // indirect
+ go.opentelemetry.io/otel/trace v1.8.0 // indirect
+ golang.org/x/mod v0.14.0 // indirect
+ golang.org/x/net v0.19.0 // indirect
+ golang.org/x/text v0.14.0 // indirect
+ golang.org/x/tools v0.16.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+ modernc.org/libc v1.24.1 // indirect
+ modernc.org/mathutil v1.6.0 // indirect
+ modernc.org/memory v1.6.0 // indirect
+ modernc.org/sqlite v1.26.0 // indirect
+ rsc.io/tmplfunc v0.0.3 // indirect
+ zombiezen.com/go/sqlite v0.13.1 // indirect
+)
diff --git a/erigon-lib/go.sum b/erigon-lib/go.sum
new file mode 100644
index 00000000000..9cec96b6b6b
--- /dev/null
+++ b/erigon-lib/go.sum
@@ -0,0 +1,686 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+crawshaw.io/iox v0.0.0-20181124134642-c51c3df30797 h1:yDf7ARQc637HoxDho7xjqdvO5ZA2Yb+xzv/fOnnvZzw=
+crawshaw.io/iox v0.0.0-20181124134642-c51c3df30797/go.mod h1:sXBiorCo8c46JlQV3oXPKINnZ8mcqnye1EkVkqsectk=
+crawshaw.io/sqlite v0.3.2/go.mod h1:igAO5JulrQ1DbdZdtVq48mnZUBAPOeFzer7VhDWNtW4=
+filippo.io/edwards25519 v1.0.0-rc.1 h1:m0VOOB23frXZvAOK44usCgLWvtsxIoMCTBGJZlpmGfU=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w=
+github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI=
+github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo=
+github.com/RoaringBitmap/roaring v1.2.3 h1:yqreLINqIrX22ErkKI0vY47/ivtJr6n+kMhVOVmhWBY=
+github.com/RoaringBitmap/roaring v1.2.3/go.mod h1:plvDsJQpxOC5bw8LRteu/MLWHsHez/3y6cubLI4/1yE=
+github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
+github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/ajwerner/btree v0.0.0-20211221152037-f427b3e689c0 h1:byYvvbfSo3+9efR4IeReh77gVs4PnNDR3AMOE9NJ7a0=
+github.com/ajwerner/btree v0.0.0-20211221152037-f427b3e689c0/go.mod h1:q37NoqncT41qKc048STsifIt69LfUJ8SrWWcz/yam5k=
+github.com/alecthomas/assert/v2 v2.0.0-alpha3 h1:pcHeMvQ3OMstAWgaeaXIAL8uzB9xMm2zlxt+/4ml8lk=
+github.com/alecthomas/atomic v0.1.0-alpha2 h1:dqwXmax66gXvHhsOS4pGPZKqYOlTkapELkLb3MNdlH8=
+github.com/alecthomas/atomic v0.1.0-alpha2/go.mod h1:zD6QGEyw49HIq19caJDc2NMXAy8rNi9ROrxtMXATfyI=
+github.com/alecthomas/repr v0.0.0-20210801044451-80ca428c5142 h1:8Uy0oSf5co/NZXje7U1z8Mpep++QJOldL2hs/sBQf48=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/anacrolix/chansync v0.3.0 h1:lRu9tbeuw3wl+PhMu/r+JJCRu5ArFXIluOgdF0ao6/U=
+github.com/anacrolix/chansync v0.3.0/go.mod h1:DZsatdsdXxD0WiwcGl0nJVwyjCKMDv+knl1q2iBjA2k=
+github.com/anacrolix/dht/v2 v2.20.0 h1:eDx9lfE9iCSf5sPK0290GToHURNhEFuUGN8iyvhvJDk=
+github.com/anacrolix/dht/v2 v2.20.0/go.mod h1:SDGC+sEs1pnO2sJGYuhvIis7T8749dDHNfcjtdH4e3g=
+github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
+github.com/anacrolix/envpprof v1.0.0/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
+github.com/anacrolix/envpprof v1.1.0/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4=
+github.com/anacrolix/envpprof v1.3.0 h1:WJt9bpuT7A/CDCxPOv/eeZqHWlle/Y0keJUvc6tcJDk=
+github.com/anacrolix/envpprof v1.3.0/go.mod h1:7QIG4CaX1uexQ3tqd5+BRa/9e2D02Wcertl6Yh0jCB0=
+github.com/anacrolix/generics v0.0.0-20230816105729-c755655aee45 h1:Kmcl3I9K2+5AdnnR7hvrnVT0TLeFWWMa9bxnm55aVIg=
+github.com/anacrolix/generics v0.0.0-20230816105729-c755655aee45/go.mod h1:ff2rHB/joTV03aMSSn/AZNnaIpUw0h3njetGsaXcMy8=
+github.com/anacrolix/go-libutp v1.3.1 h1:idJzreNLl+hNjGC3ZnUOjujEaryeOGgkwHLqSGoige0=
+github.com/anacrolix/go-libutp v1.3.1/go.mod h1:heF41EC8kN0qCLMokLBVkB8NXiLwx3t8R8810MTNI5o=
+github.com/anacrolix/log v0.3.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU=
+github.com/anacrolix/log v0.6.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU=
+github.com/anacrolix/log v0.10.1-0.20220123034749-3920702c17f8/go.mod h1:GmnE2c0nvz8pOIPUSC9Rawgefy1sDXqposC2wgtBZE4=
+github.com/anacrolix/log v0.13.1/go.mod h1:D4+CvN8SnruK6zIFS/xPoRJmtvtnxs+CSfDQ+BFxZ68=
+github.com/anacrolix/log v0.14.3-0.20230823030427-4b296d71a6b4 h1:01OE3pdiBGIZGyQb6cIAu+QfaNhBR9k5MVmLsl+DVbE=
+github.com/anacrolix/log v0.14.3-0.20230823030427-4b296d71a6b4/go.mod h1:1OmJESOtxQGNMlUO5rcv96Vpp9mfMqXXbe2RdinFLdY=
+github.com/anacrolix/lsan v0.0.0-20211126052245-807000409a62 h1:P04VG6Td13FHMgS5ZBcJX23NPC/fiC4cp9bXwYujdYM=
+github.com/anacrolix/lsan v0.0.0-20211126052245-807000409a62/go.mod h1:66cFKPCO7Sl4vbFnAaSq7e4OXtdMhRSBagJGWgmpJbM=
+github.com/anacrolix/missinggo v0.0.0-20180725070939-60ef2fbf63df/go.mod h1:kwGiTUTZ0+p4vAz3VbAI5a30t2YbvemcmspjKwrAz5s=
+github.com/anacrolix/missinggo v1.1.0/go.mod h1:MBJu3Sk/k3ZfGYcS7z18gwfu72Ey/xopPFJJbTi5yIo=
+github.com/anacrolix/missinggo v1.1.2-0.20190815015349-b888af804467/go.mod h1:MBJu3Sk/k3ZfGYcS7z18gwfu72Ey/xopPFJJbTi5yIo=
+github.com/anacrolix/missinggo v1.2.1/go.mod h1:J5cMhif8jPmFoC3+Uvob3OXXNIhOUikzMt+uUjeM21Y=
+github.com/anacrolix/missinggo v1.3.0 h1:06HlMsudotL7BAELRZs0yDZ4yVXsHXGi323QBjAVASw=
+github.com/anacrolix/missinggo v1.3.0/go.mod h1:bqHm8cE8xr+15uVfMG3BFui/TxyB6//H5fwlq/TeqMc=
+github.com/anacrolix/missinggo/perf v1.0.0 h1:7ZOGYziGEBytW49+KmYGTaNfnwUqP1HBsy6BqESAJVw=
+github.com/anacrolix/missinggo/perf v1.0.0/go.mod h1:ljAFWkBuzkO12MQclXzZrosP5urunoLS0Cbvb4V0uMQ=
+github.com/anacrolix/missinggo/v2 v2.2.0/go.mod h1:o0jgJoYOyaoYQ4E2ZMISVa9c88BbUBVQQW4QeRkNCGY=
+github.com/anacrolix/missinggo/v2 v2.5.1/go.mod h1:WEjqh2rmKECd0t1VhQkLGTdIWXO6f6NLjp5GlMZ+6FA=
+github.com/anacrolix/missinggo/v2 v2.5.2/go.mod h1:yNvsLrtZYRYCOI+KRH/JM8TodHjtIE/bjOGhQaLOWIE=
+github.com/anacrolix/missinggo/v2 v2.7.2-0.20230527121029-a582b4f397b9 h1:W/oGeHhYwxueeiDjQfmK9G+X9M2xJgfTtow62v0TWAs=
+github.com/anacrolix/missinggo/v2 v2.7.2-0.20230527121029-a582b4f397b9/go.mod h1:mIEtp9pgaXqt8VQ3NQxFOod/eQ1H0D1XsZzKUQfwtac=
+github.com/anacrolix/mmsg v0.0.0-20180515031531-a4a3ba1fc8bb/go.mod h1:x2/ErsYUmT77kezS63+wzZp8E3byYB0gzirM/WMBLfw=
+github.com/anacrolix/mmsg v1.0.0 h1:btC7YLjOn29aTUAExJiVUhQOuf/8rhm+/nWCMAnL3Hg=
+github.com/anacrolix/mmsg v1.0.0/go.mod h1:x8kRaJY/dCrY9Al0PEcj1mb/uFHwP6GCJ9fLl4thEPc=
+github.com/anacrolix/multiless v0.3.1-0.20221221005021-2d12701f83f7 h1:lOtCD+LzoD1g7bowhYJNR++uV+FyY5bTZXKwnPex9S8=
+github.com/anacrolix/multiless v0.3.1-0.20221221005021-2d12701f83f7/go.mod h1:zJv1JF9AqdZiHwxqPgjuOZDGWER6nyE48WBCi/OOrMM=
+github.com/anacrolix/stm v0.2.0/go.mod h1:zoVQRvSiGjGoTmbM0vSLIiaKjWtNPeTvXUSdJQA4hsg=
+github.com/anacrolix/stm v0.4.1-0.20221221005312-96d17df0e496 h1:aMiRi2kOOd+nG64suAmFMVnNK2E6GsnLif7ia9tI3cA=
+github.com/anacrolix/stm v0.4.1-0.20221221005312-96d17df0e496/go.mod h1:DBm8/1OXm4A4RZ6Xa9u/eOsjeAXCaoRYvd2JzlskXeM=
+github.com/anacrolix/sync v0.0.0-20180808010631-44578de4e778/go.mod h1:s735Etp3joe/voe2sdaXLcqDdJSay1O0OPnM0ystjqk=
+github.com/anacrolix/sync v0.3.0/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DCOj/I0g=
+github.com/anacrolix/sync v0.4.0 h1:T+MdO/u87ir/ijWsTFsPYw5jVm0SMm4kVpg8t4KF38o=
+github.com/anacrolix/sync v0.4.0/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DCOj/I0g=
+github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
+github.com/anacrolix/tagflag v1.0.0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
+github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8=
+github.com/anacrolix/torrent v1.52.6-0.20231201115409-7ea994b6bbd8 h1:6EyYT2DsEOZ/WwTDsQ0HXHI996IdT0MZCGP2L6xvfNg=
+github.com/anacrolix/torrent v1.52.6-0.20231201115409-7ea994b6bbd8/go.mod h1:Ma/WtLey9lU97u2i55LUJ8AnXaL2GfEK6pWh7/9v1hI=
+github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96 h1:QAVZ3pN/J4/UziniAhJR2OZ9Ox5kOY2053tBbbqUPYA=
+github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96/go.mod h1:Wa6n8cYIdaG35x15aH3Zy6d03f7P728QfdcDeD/IEOs=
+github.com/anacrolix/utp v0.1.0 h1:FOpQOmIwYsnENnz7tAGohA+r6iXpRjrq8ssKSre2Cp4=
+github.com/anacrolix/utp v0.1.0/go.mod h1:MDwc+vsGEq7RMw6lr2GKOEqjWny5hO5OZXRVNaBJ2Dk=
+github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/aws/aws-sdk-go-v2 v1.21.2 h1:+LXZ0sgo8quN9UOKXXzAWRT3FWd4NxeXWOZom9pE7GA=
+github.com/aws/aws-sdk-go-v2 v1.21.2/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.14 h1:Sc82v7tDQ/vdU1WtuSyzZ1I7y/68j//HJ6uozND1IDs=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.14/go.mod h1:9NCTOURS8OpxvoAVHq79LK81/zC78hfRWFn+aL0SPcY=
+github.com/aws/aws-sdk-go-v2/config v1.19.0 h1:AdzDvwH6dWuVARCl3RTLGRc4Ogy+N7yLFxVxXe1ClQ0=
+github.com/aws/aws-sdk-go-v2/config v1.19.0/go.mod h1:ZwDUgFnQgsazQTnWfeLWk5GjeqTQTL8lMkoE1UXzxdE=
+github.com/aws/aws-sdk-go-v2/credentials v1.13.43 h1:LU8vo40zBlo3R7bAvBVy/ku4nxGEyZe9N8MqAeFTzF8=
+github.com/aws/aws-sdk-go-v2/credentials v1.13.43/go.mod h1:zWJBz1Yf1ZtX5NGax9ZdNjhhI4rgjfgsyk6vTY1yfVg=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 h1:PIktER+hwIG286DqXyvVENjgLTAwGgoeriLDD5C+YlQ=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13/go.mod h1:f/Ib/qYjhV2/qdsf79H3QP/eRE4AkVyEf6sk7XfZ1tg=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 h1:nFBQlGtkbPzp/NjZLuFxRqmT91rLJkgvsEQs68h962Y=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43/go.mod h1:auo+PiyLl0n1l8A0e8RIeR8tOzYPfZZH/JNlrJ8igTQ=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37 h1:JRVhO25+r3ar2mKGP7E0LDl8K9/G36gjlqca5iQbaqc=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 h1:hze8YsjSh8Wl1rYa1CJpRmXP21BvOBuc76YhW0HsuQ4=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45/go.mod h1:lD5M20o09/LCuQ2mE62Mb/iSdSlCNuj6H5ci7tW7OsE=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.6 h1:wmGLw2i8ZTlHLw7a9ULGfQbuccw8uIiNr6sol5bFzc8=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.6/go.mod h1:Q0Hq2X/NuL7z8b1Dww8rmOFl+jzusKEcyvkKspwdpyc=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.15 h1:7R8uRYyXzdD71KWVCL78lJZltah6VVznXBazvKjfH58=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.15/go.mod h1:26SQUPcTNgV1Tapwdt4a1rOsYRsnBsJHLMPoxK2b0d8=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.38 h1:skaFGzv+3kA+v2BPKhuekeb1Hbb105+44r8ASC+q5SE=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.38/go.mod h1:epIZoRSSbRIwLPJU5F+OldHhwZPBdpDeQkRdCeY3+00=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37 h1:WWZA/I2K4ptBS1kg0kV1JbBtG/umed0vwHRrmcr9z7k=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37/go.mod h1:vBmDnwWXWxNPFRMmG2m/3MKOe+xEcMDo1tanpaWCcck=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.6 h1:9ulSU5ClouoPIYhDQdg9tpl83d5Yb91PXTKK+17q+ow=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.6/go.mod h1:lnc2taBsR9nTlz9meD+lhFZZ9EWY712QHrRflWpTcOA=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.40.2 h1:Ll5/YVCOzRB+gxPqs2uD0R7/MyATC0w85626glSKmp4=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.40.2/go.mod h1:Zjfqt7KhQK+PO1bbOsFNzKgaq7TcxzmEoDWN8lM0qzQ=
+github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 h1:JuPGc7IkOP4AaqcZSIcyqLpFSqBWK32rM9+a1g6u73k=
+github.com/aws/aws-sdk-go-v2/service/sso v1.15.2/go.mod h1:gsL4keucRCgW+xA85ALBpRFfdSLH4kHOVSnLMSuBECo=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 h1:HFiiRkf1SdaAmV3/BHOFZ9DjFynPHj8G/UIO1lQS+fk=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3/go.mod h1:a7bHA82fyUXOm+ZSWKU6PIoBxrjSprdLoM8xPYvzYVg=
+github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 h1:0BkLfgeDjfZnZ+MhB3ONb01u9pwFYTCZVhlsSSBvlbU=
+github.com/aws/aws-sdk-go-v2/service/sts v1.23.2/go.mod h1:Eows6e1uQEsc4ZaHANmsPRzAKcVDrcmjjWiih2+HUUQ=
+github.com/aws/smithy-go v1.15.0 h1:PS/durmlzvAFpQHDs4wi4sNNP9ExsqZh6IlfdHXgKK8=
+github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
+github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
+github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
+github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
+github.com/benbjohnson/immutable v0.4.1-0.20221220213129-8932b999621d h1:2qVb9bsAMtmAfnxXltm+6eBzrrS7SZ52c3SedsulaMI=
+github.com/benbjohnson/immutable v0.4.1-0.20221220213129-8932b999621d/go.mod h1:iAr8OjJGLnLmVUr9MZ/rz4PWUy6Ouc2JLYuMArmvAJM=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
+github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo=
+github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
+github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
+github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
+github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaqKnf+7Qs6GbEPfd4iMOitWzXJx8=
+github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67NmW1aVLEgtA8Yy1elc+X8y5SRW1sFW4Og=
+github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b h1:6+ZFm0flnudZzdSE0JxlhR2hKnGPcNB35BjQf4RYQDY=
+github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4=
+github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
+github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
+github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
+github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
+github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0=
+github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE=
+github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA=
+github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/deckarep/golang-set/v2 v2.3.1 h1:vjmkvJt/IV27WXPyYQpAh4bRyWJc5Y435D17XQ9QU5A=
+github.com/deckarep/golang-set/v2 v2.3.1/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
+github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
+github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
+github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/erigontech/mdbx-go v0.27.21 h1:Pv47QIiRXR8Nv+nltZteLm4xkRwuvqmOCjzZj9X0s1A=
+github.com/erigontech/mdbx-go v0.27.21/go.mod h1:FAMxbOgqOnRDx51j8HjuJZIgznbDwjX7LItd+/UWyA4=
+github.com/frankban/quicktest v1.9.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y=
+github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
+github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
+github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
+github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
+github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
+github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
+github.com/go-llsqlite/adapter v0.0.0-20230912124304-94ed0e573c23 h1:7krbnPREaxbmEaAkZovTNCMjmiZXEy/Gz9isFbqFK0I=
+github.com/go-llsqlite/adapter v0.0.0-20230912124304-94ed0e573c23/go.mod h1:DADrR88ONKPPeSGjFp5iEN55Arx3fi2qXZeKCYDpbmU=
+github.com/go-llsqlite/crawshaw v0.0.0-20230910110433-7e901377eb6c h1:pm7z8uwA2q3s8fAsJmKuGckNohqIrw2PRtv6yJ6z0Ro=
+github.com/go-llsqlite/crawshaw v0.0.0-20230910110433-7e901377eb6c/go.mod h1:UdTSzmN3nr5dJNuZCsbPLfhSQB76u16rWh8pn+WFx9Q=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
+github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
+github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
+github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
+github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
+github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
+github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI=
+github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru/v2 v2.0.6 h1:3xi/Cafd1NaoEnS/yDssIiuVeDVywU0QdFGl3aQaQHM=
+github.com/hashicorp/golang-lru/v2 v2.0.6/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
+github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
+github.com/holiman/uint256 v1.2.3 h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o=
+github.com/holiman/uint256 v1.2.3/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
+github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
+github.com/huandu/xstrings v1.3.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU=
+github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
+github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20231210032908-6ff6f4c91c60 h1:bsZ6XWPJkNp1DeVHkaX9/+/Tqg7+r5/IkRPlyc4Ztq4=
+github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20231210032908-6ff6f4c91c60/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo=
+github.com/ledgerwatch/interfaces v0.0.0-20231209102305-b17e86fbe07d h1:7aB9lKmUGAaWt4TzXnGLzJSZkhyuqREMmaao+Gn5Ky0=
+github.com/ledgerwatch/interfaces v0.0.0-20231209102305-b17e86fbe07d/go.mod h1:ugQv1QllJzBny3cKZKxUrSnykkjkBgm27eQM6dnGAcc=
+github.com/ledgerwatch/log/v3 v3.9.0 h1:iDwrXe0PVwBC68Dd94YSsHbMgQ3ufsgjzXtFNFVZFRk=
+github.com/ledgerwatch/log/v3 v3.9.0/go.mod h1:EiAY6upmI/6LkNhOVxb4eVsmsP11HZCnZ3PlJMjYiqE=
+github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ=
+github.com/ledgerwatch/secp256k1 v1.0.0/go.mod h1:SPmqJFciiF/Q0mPt2jVs2dTr/1TZBTIA+kPMmKgBAak=
+github.com/matryer/moq v0.3.3 h1:pScMH9VyrdT4S93yiLpVyU8rCDqGQr24uOyBxmktG5Q=
+github.com/matryer/moq v0.3.3/go.mod h1:RJ75ZZZD71hejp39j4crZLsEDszGk6iH4v4YsWFKH4s=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
+github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
+github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
+github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
+github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
+github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM=
+github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0=
+github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
+github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
+github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
+github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
+github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
+github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
+github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pion/datachannel v1.5.2 h1:piB93s8LGmbECrpO84DnkIVWasRMk3IimbcXkTQLE6E=
+github.com/pion/datachannel v1.5.2/go.mod h1:FTGQWaHrdCwIJ1rw6xBIfZVkslikjShim5yr05XFuCQ=
+github.com/pion/dtls/v2 v2.1.3/go.mod h1:o6+WvyLDAlXF7YiPB/RlskRoeK+/JtuaZa5emwQcWus=
+github.com/pion/dtls/v2 v2.1.5/go.mod h1:BqCE7xPZbPSubGasRoDFJeTsyJtdD1FanJYL0JGheqY=
+github.com/pion/dtls/v2 v2.2.4 h1:YSfYwDQgrxMYXLBc/m7PFY5BVtWlNm/DN4qoU2CbcWg=
+github.com/pion/dtls/v2 v2.2.4/go.mod h1:WGKfxqhrddne4Kg3p11FUMJrynkOY4lb25zHNO49wuw=
+github.com/pion/ice/v2 v2.2.6 h1:R/vaLlI1J2gCx141L5PEwtuGAGcyS6e7E0hDeJFq5Ig=
+github.com/pion/ice/v2 v2.2.6/go.mod h1:SWuHiOGP17lGromHTFadUe1EuPgFh/oCU6FCMZHooVE=
+github.com/pion/interceptor v0.1.11 h1:00U6OlqxA3FFB50HSg25J/8cWi7P6FbSzw4eFn24Bvs=
+github.com/pion/interceptor v0.1.11/go.mod h1:tbtKjZY14awXd7Bq0mmWvgtHB5MDaRN7HV3OZ/uy7s8=
+github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY=
+github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
+github.com/pion/mdns v0.0.5 h1:Q2oj/JB3NqfzY9xGZ1fPzZzK7sDSD8rZPOvcIQ10BCw=
+github.com/pion/mdns v0.0.5/go.mod h1:UgssrvdD3mxpi8tMxAXbsppL3vJ4Jipw1mTCW+al01g=
+github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
+github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
+github.com/pion/rtcp v1.2.9 h1:1ujStwg++IOLIEoOiIQ2s+qBuJ1VN81KW+9pMPsif+U=
+github.com/pion/rtcp v1.2.9/go.mod h1:qVPhiCzAm4D/rxb6XzKeyZiQK69yJpbUDJSF7TgrqNo=
+github.com/pion/rtp v1.7.13 h1:qcHwlmtiI50t1XivvoawdCGTP4Uiypzfrsap+bijcoA=
+github.com/pion/rtp v1.7.13/go.mod h1:bDb5n+BFZxXx0Ea7E5qe+klMuqiBrP+w8XSjiWtCUko=
+github.com/pion/sctp v1.8.0/go.mod h1:xFe9cLMZ5Vj6eOzpyiKjT9SwGM4KpK/8Jbw5//jc+0s=
+github.com/pion/sctp v1.8.2 h1:yBBCIrUMJ4yFICL3RIvR4eh/H2BTTvlligmSTy+3kiA=
+github.com/pion/sctp v1.8.2/go.mod h1:xFe9cLMZ5Vj6eOzpyiKjT9SwGM4KpK/8Jbw5//jc+0s=
+github.com/pion/sdp/v3 v3.0.5 h1:ouvI7IgGl+V4CrqskVtr3AaTrPvPisEOxwgpdktctkU=
+github.com/pion/sdp/v3 v3.0.5/go.mod h1:iiFWFpQO8Fy3S5ldclBkpXqmWy02ns78NOKoLLL0YQw=
+github.com/pion/srtp/v2 v2.0.9 h1:JJq3jClmDFBPX/F5roEb0U19jSU7eUhyDqR/NZ34EKQ=
+github.com/pion/srtp/v2 v2.0.9/go.mod h1:5TtM9yw6lsH0ppNCehB/EjEUli7VkUgKSPJqWVqbhQ4=
+github.com/pion/stun v0.3.5 h1:uLUCBCkQby4S1cf6CGuR9QrVOKcvUwFeemaC865QHDg=
+github.com/pion/stun v0.3.5/go.mod h1:gDMim+47EeEtfWogA37n6qXZS88L5V6LqFcf+DZA2UA=
+github.com/pion/transport v0.12.2/go.mod h1:N3+vZQD9HlDP5GWkZ85LohxNsDcNgofQmyL6ojX5d8Q=
+github.com/pion/transport v0.12.3/go.mod h1:OViWW9SP2peE/HbwBvARicmAVnesphkNkCVZIWJ6q9A=
+github.com/pion/transport v0.13.0/go.mod h1:yxm9uXpK9bpBBWkITk13cLo1y5/ur5VQpG22ny6EP7g=
+github.com/pion/transport v0.13.1 h1:/UH5yLeQtwm2VZIPjxwnNFxjS4DFhyLfS4GlfuKUzfA=
+github.com/pion/transport v0.13.1/go.mod h1:EBxbqzyv+ZrmDb82XswEE0BjfQFtuw1Nu6sjnjWCsGg=
+github.com/pion/transport/v2 v2.0.0 h1:bsMYyqHCbkvHwj+eNCFBuxtlKndKfyGI2vaQmM3fIE4=
+github.com/pion/transport/v2 v2.0.0/go.mod h1:HS2MEBJTwD+1ZI2eSXSvHJx/HnzQqRy2/LXxt6eVMHc=
+github.com/pion/turn/v2 v2.0.8 h1:KEstL92OUN3k5k8qxsXHpr7WWfrdp7iJZHx99ud8muw=
+github.com/pion/turn/v2 v2.0.8/go.mod h1:+y7xl719J8bAEVpSXBXvTxStjJv3hbz9YFflvkpcGPw=
+github.com/pion/udp v0.1.1/go.mod h1:6AFo+CMdKQm7UiA0eUPA8/eVCTx8jBIITLZHc9DWX5M=
+github.com/pion/udp v0.1.4 h1:OowsTmu1Od3sD6i3fQUJxJn2fEvJO6L1TidgadtbTI8=
+github.com/pion/udp v0.1.4/go.mod h1:G8LDo56HsFwC24LIcnT4YIDU5qcB6NepqqjP0keL2us=
+github.com/pion/webrtc/v3 v3.1.42 h1:wJEQFIXVanptnQcHOLTuIo4AtGB2+mG2x4OhIhnITOA=
+github.com/pion/webrtc/v3 v3.1.42/go.mod h1:ffD9DulDrPxyWvDPUIPAOSAWx9GUlOExiJPf7cCcMLA=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
+github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
+github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
+github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
+github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
+github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
+github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
+github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
+github.com/quasilyte/go-ruleguard/dsl v0.3.22 h1:wd8zkOhSNr+I+8Qeciml08ivDt1pSXe60+5DqOpCjPE=
+github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
+github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
+github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
+github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 h1:Lt9DzQALzHoDwMBGJ6v8ObDPR0dzr2a6sXTB1Fq7IHs=
+github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417/go.mod h1:qe5TWALJ8/a1Lqznoc5BDHpYX/8HU60Hm2AwRmqzxqA=
+github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
+github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
+github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
+github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
+github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs=
+github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
+github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg=
+github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY=
+github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
+github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
+github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
+github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
+github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
+go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
+go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opentelemetry.io/otel v1.8.0 h1:zcvBFizPbpa1q7FehvFiHbQwGzmPILebO0tyqIR5Djg=
+go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM=
+go.opentelemetry.io/otel/trace v1.8.0 h1:cSy0DF9eGI5WIfNwZ1q2iUyGj00tGzP24dE1lOlHrfY=
+go.opentelemetry.io/otel/trace v1.8.0/go.mod h1:0Bt3PXY8w+3pheS3hQUt+wow8b1ojPaTBoTCh2zIFI4=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
+go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220516162934-403b01795ae8/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
+golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
+golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
+golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
+golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201201195509-5d6afe98e0b7/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211201190559-0a0e4e1bb54c/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220401154927-543a649e0bdd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220531201128-c960675eff93/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
+golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
+golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
+golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220608164250-635b8c9b7f68/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
+golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM=
+golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
+google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
+google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+modernc.org/libc v1.24.1 h1:uvJSeCKL/AgzBo2yYIPPTy82v21KgGnizcGYfBHaNuM=
+modernc.org/libc v1.24.1/go.mod h1:FmfO1RLrU3MHJfyi9eYYmZBfi/R+tqZ6+hQ3yQQUkak=
+modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
+modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
+modernc.org/memory v1.6.0 h1:i6mzavxrE9a30whzMfwf7XWVODx2r5OYXvU46cirX7o=
+modernc.org/memory v1.6.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/sqlite v1.26.0 h1:SocQdLRSYlA8W99V8YH0NES75thx19d9sB/aFc4R8Lw=
+modernc.org/sqlite v1.26.0/go.mod h1:FL3pVXie73rg3Rii6V/u5BoHlSoyeZeIgKZEgHARyCU=
+rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
+rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
+zombiezen.com/go/sqlite v0.13.1 h1:qDzxyWWmMtSSEH5qxamqBFmqA2BLSSbtODi3ojaE02o=
+zombiezen.com/go/sqlite v0.13.1/go.mod h1:Ht/5Rg3Ae2hoyh1I7gbWtWAl89CNocfqeb/aAMTkJr4=
diff --git a/erigon-lib/gointerfaces/downloader/downloader.pb.go b/erigon-lib/gointerfaces/downloader/downloader.pb.go
new file mode 100644
index 00000000000..3c1ec9b2d4f
--- /dev/null
+++ b/erigon-lib/gointerfaces/downloader/downloader.pb.go
@@ -0,0 +1,642 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.31.0
+// protoc v4.24.2
+// source: downloader/downloader.proto
+
+package downloader
+
+import (
+ types "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// DownloadItem:
+// - if Erigon created new snapshot and want seed it
+// - if Erigon wnat download files - it fills only "torrent_hash" field
+type AddItem struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ TorrentHash *types.H160 `protobuf:"bytes,2,opt,name=torrent_hash,json=torrentHash,proto3" json:"torrent_hash,omitempty"` // will be resolved as magnet link
+}
+
+func (x *AddItem) Reset() {
+ *x = AddItem{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_downloader_downloader_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddItem) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddItem) ProtoMessage() {}
+
+func (x *AddItem) ProtoReflect() protoreflect.Message {
+ mi := &file_downloader_downloader_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddItem.ProtoReflect.Descriptor instead.
+func (*AddItem) Descriptor() ([]byte, []int) {
+ return file_downloader_downloader_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *AddItem) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
+func (x *AddItem) GetTorrentHash() *types.H160 {
+ if x != nil {
+ return x.TorrentHash
+ }
+ return nil
+}
+
+type AddRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Items []*AddItem `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` // single hash will be resolved as magnet link
+}
+
+func (x *AddRequest) Reset() {
+ *x = AddRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_downloader_downloader_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddRequest) ProtoMessage() {}
+
+func (x *AddRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_downloader_downloader_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddRequest.ProtoReflect.Descriptor instead.
+func (*AddRequest) Descriptor() ([]byte, []int) {
+ return file_downloader_downloader_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *AddRequest) GetItems() []*AddItem {
+ if x != nil {
+ return x.Items
+ }
+ return nil
+}
+
+// DeleteRequest: stop seeding, delete file, delete .torrent
+type DeleteRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
+}
+
+func (x *DeleteRequest) Reset() {
+ *x = DeleteRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_downloader_downloader_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeleteRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteRequest) ProtoMessage() {}
+
+func (x *DeleteRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_downloader_downloader_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteRequest.ProtoReflect.Descriptor instead.
+func (*DeleteRequest) Descriptor() ([]byte, []int) {
+ return file_downloader_downloader_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *DeleteRequest) GetPaths() []string {
+ if x != nil {
+ return x.Paths
+ }
+ return nil
+}
+
+type VerifyRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *VerifyRequest) Reset() {
+ *x = VerifyRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_downloader_downloader_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VerifyRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VerifyRequest) ProtoMessage() {}
+
+func (x *VerifyRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_downloader_downloader_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VerifyRequest.ProtoReflect.Descriptor instead.
+func (*VerifyRequest) Descriptor() ([]byte, []int) {
+ return file_downloader_downloader_proto_rawDescGZIP(), []int{3}
+}
+
+type StatsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *StatsRequest) Reset() {
+ *x = StatsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_downloader_downloader_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StatsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatsRequest) ProtoMessage() {}
+
+func (x *StatsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_downloader_downloader_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatsRequest.ProtoReflect.Descriptor instead.
+func (*StatsRequest) Descriptor() ([]byte, []int) {
+ return file_downloader_downloader_proto_rawDescGZIP(), []int{4}
+}
+
+type ProhibitNewDownloadsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *ProhibitNewDownloadsRequest) Reset() {
+ *x = ProhibitNewDownloadsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_downloader_downloader_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ProhibitNewDownloadsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ProhibitNewDownloadsRequest) ProtoMessage() {}
+
+func (x *ProhibitNewDownloadsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_downloader_downloader_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ProhibitNewDownloadsRequest.ProtoReflect.Descriptor instead.
+func (*ProhibitNewDownloadsRequest) Descriptor() ([]byte, []int) {
+ return file_downloader_downloader_proto_rawDescGZIP(), []int{5}
+}
+
+type StatsReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // First step on startup - "resolve metadata":
+ // - understand total amount of data to download
+ // - ensure all pieces hashes available
+ // - validate files after crush
+ // - when all metadata ready - can start download/upload
+ MetadataReady int32 `protobuf:"varint,1,opt,name=metadata_ready,json=metadataReady,proto3" json:"metadata_ready,omitempty"`
+ FilesTotal int32 `protobuf:"varint,2,opt,name=files_total,json=filesTotal,proto3" json:"files_total,omitempty"`
+ PeersUnique int32 `protobuf:"varint,4,opt,name=peers_unique,json=peersUnique,proto3" json:"peers_unique,omitempty"`
+ ConnectionsTotal uint64 `protobuf:"varint,5,opt,name=connections_total,json=connectionsTotal,proto3" json:"connections_total,omitempty"`
+ Completed bool `protobuf:"varint,6,opt,name=completed,proto3" json:"completed,omitempty"`
+ Progress float32 `protobuf:"fixed32,7,opt,name=progress,proto3" json:"progress,omitempty"`
+ BytesCompleted uint64 `protobuf:"varint,8,opt,name=bytes_completed,json=bytesCompleted,proto3" json:"bytes_completed,omitempty"`
+ BytesTotal uint64 `protobuf:"varint,9,opt,name=bytes_total,json=bytesTotal,proto3" json:"bytes_total,omitempty"`
+ UploadRate uint64 `protobuf:"varint,10,opt,name=upload_rate,json=uploadRate,proto3" json:"upload_rate,omitempty"` // bytes/sec
+ DownloadRate uint64 `protobuf:"varint,11,opt,name=download_rate,json=downloadRate,proto3" json:"download_rate,omitempty"` // bytes/sec
+}
+
+func (x *StatsReply) Reset() {
+ *x = StatsReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_downloader_downloader_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StatsReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatsReply) ProtoMessage() {}
+
+func (x *StatsReply) ProtoReflect() protoreflect.Message {
+ mi := &file_downloader_downloader_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatsReply.ProtoReflect.Descriptor instead.
+func (*StatsReply) Descriptor() ([]byte, []int) {
+ return file_downloader_downloader_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *StatsReply) GetMetadataReady() int32 {
+ if x != nil {
+ return x.MetadataReady
+ }
+ return 0
+}
+
+func (x *StatsReply) GetFilesTotal() int32 {
+ if x != nil {
+ return x.FilesTotal
+ }
+ return 0
+}
+
+func (x *StatsReply) GetPeersUnique() int32 {
+ if x != nil {
+ return x.PeersUnique
+ }
+ return 0
+}
+
+func (x *StatsReply) GetConnectionsTotal() uint64 {
+ if x != nil {
+ return x.ConnectionsTotal
+ }
+ return 0
+}
+
+func (x *StatsReply) GetCompleted() bool {
+ if x != nil {
+ return x.Completed
+ }
+ return false
+}
+
+func (x *StatsReply) GetProgress() float32 {
+ if x != nil {
+ return x.Progress
+ }
+ return 0
+}
+
+func (x *StatsReply) GetBytesCompleted() uint64 {
+ if x != nil {
+ return x.BytesCompleted
+ }
+ return 0
+}
+
+func (x *StatsReply) GetBytesTotal() uint64 {
+ if x != nil {
+ return x.BytesTotal
+ }
+ return 0
+}
+
+func (x *StatsReply) GetUploadRate() uint64 {
+ if x != nil {
+ return x.UploadRate
+ }
+ return 0
+}
+
+func (x *StatsReply) GetDownloadRate() uint64 {
+ if x != nil {
+ return x.DownloadRate
+ }
+ return 0
+}
+
+var File_downloader_downloader_proto protoreflect.FileDescriptor
+
+var file_downloader_downloader_proto_rawDesc = []byte{
+ 0x0a, 0x1b, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2f, 0x64, 0x6f, 0x77,
+ 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x64,
+ 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x11, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4d, 0x0a, 0x07, 0x41, 0x64, 0x64,
+ 0x49, 0x74, 0x65, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x2e, 0x0a, 0x0c, 0x74, 0x6f, 0x72, 0x72,
+ 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b,
+ 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, 0x36, 0x30, 0x52, 0x0b, 0x74, 0x6f, 0x72,
+ 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x22, 0x37, 0x0a, 0x0a, 0x41, 0x64, 0x64, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64,
+ 0x65, 0x72, 0x2e, 0x41, 0x64, 0x64, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d,
+ 0x73, 0x22, 0x25, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x22, 0x0f, 0x0a, 0x0d, 0x56, 0x65, 0x72, 0x69,
+ 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x0e, 0x0a, 0x0c, 0x53, 0x74, 0x61,
+ 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1d, 0x0a, 0x1b, 0x50, 0x72, 0x6f,
+ 0x68, 0x69, 0x62, 0x69, 0x74, 0x4e, 0x65, 0x77, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xee, 0x02, 0x0a, 0x0a, 0x53, 0x74, 0x61,
+ 0x74, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64,
+ 0x61, 0x74, 0x61, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x61, 0x64, 0x79, 0x12, 0x1f,
+ 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x05, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12,
+ 0x21, 0x0a, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x73, 0x5f, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x70, 0x65, 0x65, 0x72, 0x73, 0x55, 0x6e, 0x69, 0x71,
+ 0x75, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x63,
+ 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12,
+ 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x12, 0x1a, 0x0a,
+ 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x02, 0x52,
+ 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x62, 0x79, 0x74,
+ 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x08, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74,
+ 0x65, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61,
+ 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, 0x54, 0x6f,
+ 0x74, 0x61, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x61,
+ 0x74, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64,
+ 0x52, 0x61, 0x74, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64,
+ 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x64, 0x6f, 0x77,
+ 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x61, 0x74, 0x65, 0x32, 0xdb, 0x02, 0x0a, 0x0a, 0x44, 0x6f,
+ 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x12, 0x59, 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x68,
+ 0x69, 0x62, 0x69, 0x74, 0x4e, 0x65, 0x77, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x73,
+ 0x12, 0x27, 0x2e, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x50, 0x72,
+ 0x6f, 0x68, 0x69, 0x62, 0x69, 0x74, 0x4e, 0x65, 0x77, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61,
+ 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74,
+ 0x79, 0x22, 0x00, 0x12, 0x37, 0x0a, 0x03, 0x41, 0x64, 0x64, 0x12, 0x16, 0x2e, 0x64, 0x6f, 0x77,
+ 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x06,
+ 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x19, 0x2e, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61,
+ 0x64, 0x65, 0x72, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x06, 0x56,
+ 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x19, 0x2e, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64,
+ 0x65, 0x72, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x05, 0x53, 0x74,
+ 0x61, 0x74, 0x73, 0x12, 0x18, 0x2e, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72,
+ 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e,
+ 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73,
+ 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x42, 0x19, 0x5a, 0x17, 0x2e, 0x2f, 0x64, 0x6f, 0x77,
+ 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x3b, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64,
+ 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_downloader_downloader_proto_rawDescOnce sync.Once
+ file_downloader_downloader_proto_rawDescData = file_downloader_downloader_proto_rawDesc
+)
+
+func file_downloader_downloader_proto_rawDescGZIP() []byte {
+ file_downloader_downloader_proto_rawDescOnce.Do(func() {
+ file_downloader_downloader_proto_rawDescData = protoimpl.X.CompressGZIP(file_downloader_downloader_proto_rawDescData)
+ })
+ return file_downloader_downloader_proto_rawDescData
+}
+
+var file_downloader_downloader_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
+var file_downloader_downloader_proto_goTypes = []interface{}{
+ (*AddItem)(nil), // 0: downloader.AddItem
+ (*AddRequest)(nil), // 1: downloader.AddRequest
+ (*DeleteRequest)(nil), // 2: downloader.DeleteRequest
+ (*VerifyRequest)(nil), // 3: downloader.VerifyRequest
+ (*StatsRequest)(nil), // 4: downloader.StatsRequest
+ (*ProhibitNewDownloadsRequest)(nil), // 5: downloader.ProhibitNewDownloadsRequest
+ (*StatsReply)(nil), // 6: downloader.StatsReply
+ (*types.H160)(nil), // 7: types.H160
+ (*emptypb.Empty)(nil), // 8: google.protobuf.Empty
+}
+var file_downloader_downloader_proto_depIdxs = []int32{
+ 7, // 0: downloader.AddItem.torrent_hash:type_name -> types.H160
+ 0, // 1: downloader.AddRequest.items:type_name -> downloader.AddItem
+ 5, // 2: downloader.Downloader.ProhibitNewDownloads:input_type -> downloader.ProhibitNewDownloadsRequest
+ 1, // 3: downloader.Downloader.Add:input_type -> downloader.AddRequest
+ 2, // 4: downloader.Downloader.Delete:input_type -> downloader.DeleteRequest
+ 3, // 5: downloader.Downloader.Verify:input_type -> downloader.VerifyRequest
+ 4, // 6: downloader.Downloader.Stats:input_type -> downloader.StatsRequest
+ 8, // 7: downloader.Downloader.ProhibitNewDownloads:output_type -> google.protobuf.Empty
+ 8, // 8: downloader.Downloader.Add:output_type -> google.protobuf.Empty
+ 8, // 9: downloader.Downloader.Delete:output_type -> google.protobuf.Empty
+ 8, // 10: downloader.Downloader.Verify:output_type -> google.protobuf.Empty
+ 6, // 11: downloader.Downloader.Stats:output_type -> downloader.StatsReply
+ 7, // [7:12] is the sub-list for method output_type
+ 2, // [2:7] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_downloader_downloader_proto_init() }
+func file_downloader_downloader_proto_init() {
+ if File_downloader_downloader_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_downloader_downloader_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddItem); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_downloader_downloader_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_downloader_downloader_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeleteRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_downloader_downloader_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VerifyRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_downloader_downloader_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StatsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_downloader_downloader_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ProhibitNewDownloadsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_downloader_downloader_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StatsReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_downloader_downloader_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 7,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_downloader_downloader_proto_goTypes,
+ DependencyIndexes: file_downloader_downloader_proto_depIdxs,
+ MessageInfos: file_downloader_downloader_proto_msgTypes,
+ }.Build()
+ File_downloader_downloader_proto = out.File
+ file_downloader_downloader_proto_rawDesc = nil
+ file_downloader_downloader_proto_goTypes = nil
+ file_downloader_downloader_proto_depIdxs = nil
+}
diff --git a/erigon-lib/gointerfaces/downloader/downloader_grpc.pb.go b/erigon-lib/gointerfaces/downloader/downloader_grpc.pb.go
new file mode 100644
index 00000000000..d3a0468ff2c
--- /dev/null
+++ b/erigon-lib/gointerfaces/downloader/downloader_grpc.pb.go
@@ -0,0 +1,270 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.3.0
+// - protoc v4.24.2
+// source: downloader/downloader.proto
+
+package downloader
+
+import (
+ context "context"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+const (
+ Downloader_ProhibitNewDownloads_FullMethodName = "/downloader.Downloader/ProhibitNewDownloads"
+ Downloader_Add_FullMethodName = "/downloader.Downloader/Add"
+ Downloader_Delete_FullMethodName = "/downloader.Downloader/Delete"
+ Downloader_Verify_FullMethodName = "/downloader.Downloader/Verify"
+ Downloader_Stats_FullMethodName = "/downloader.Downloader/Stats"
+)
+
+// DownloaderClient is the client API for Downloader service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type DownloaderClient interface {
+ // Erigon "download once" - means restart/upgrade will not download files (and will be fast)
+ // After "download once" - Erigon will produce and seed new files
+ // Downloader will able: seed new files (already existing on FS), download uncomplete parts of existing files (if Verify found some bad parts)
+ ProhibitNewDownloads(ctx context.Context, in *ProhibitNewDownloadsRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Adding new file to downloader: non-existing files it will download, existing - seed
+ Add(ctx context.Context, in *AddRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Trigger verification of files
+ // If some part of file is bad - such part will be re-downloaded (without returning error)
+ Verify(ctx context.Context, in *VerifyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsReply, error)
+}
+
+type downloaderClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewDownloaderClient(cc grpc.ClientConnInterface) DownloaderClient {
+ return &downloaderClient{cc}
+}
+
+func (c *downloaderClient) ProhibitNewDownloads(ctx context.Context, in *ProhibitNewDownloadsRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, Downloader_ProhibitNewDownloads_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *downloaderClient) Add(ctx context.Context, in *AddRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, Downloader_Add_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *downloaderClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, Downloader_Delete_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *downloaderClient) Verify(ctx context.Context, in *VerifyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, Downloader_Verify_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *downloaderClient) Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsReply, error) {
+ out := new(StatsReply)
+ err := c.cc.Invoke(ctx, Downloader_Stats_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// DownloaderServer is the server API for Downloader service.
+// All implementations must embed UnimplementedDownloaderServer
+// for forward compatibility
+type DownloaderServer interface {
+ // Erigon "download once" - means restart/upgrade will not download files (and will be fast)
+ // After "download once" - Erigon will produce and seed new files
+ // Downloader will able: seed new files (already existing on FS), download uncomplete parts of existing files (if Verify found some bad parts)
+ ProhibitNewDownloads(context.Context, *ProhibitNewDownloadsRequest) (*emptypb.Empty, error)
+ // Adding new file to downloader: non-existing files it will download, existing - seed
+ Add(context.Context, *AddRequest) (*emptypb.Empty, error)
+ Delete(context.Context, *DeleteRequest) (*emptypb.Empty, error)
+ // Trigger verification of files
+ // If some part of file is bad - such part will be re-downloaded (without returning error)
+ Verify(context.Context, *VerifyRequest) (*emptypb.Empty, error)
+ Stats(context.Context, *StatsRequest) (*StatsReply, error)
+ mustEmbedUnimplementedDownloaderServer()
+}
+
+// UnimplementedDownloaderServer must be embedded to have forward compatible implementations.
+type UnimplementedDownloaderServer struct {
+}
+
+func (UnimplementedDownloaderServer) ProhibitNewDownloads(context.Context, *ProhibitNewDownloadsRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ProhibitNewDownloads not implemented")
+}
+func (UnimplementedDownloaderServer) Add(context.Context, *AddRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Add not implemented")
+}
+func (UnimplementedDownloaderServer) Delete(context.Context, *DeleteRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented")
+}
+func (UnimplementedDownloaderServer) Verify(context.Context, *VerifyRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Verify not implemented")
+}
+func (UnimplementedDownloaderServer) Stats(context.Context, *StatsRequest) (*StatsReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Stats not implemented")
+}
+func (UnimplementedDownloaderServer) mustEmbedUnimplementedDownloaderServer() {}
+
+// UnsafeDownloaderServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to DownloaderServer will
+// result in compilation errors.
+type UnsafeDownloaderServer interface {
+ mustEmbedUnimplementedDownloaderServer()
+}
+
+func RegisterDownloaderServer(s grpc.ServiceRegistrar, srv DownloaderServer) {
+ s.RegisterService(&Downloader_ServiceDesc, srv)
+}
+
+func _Downloader_ProhibitNewDownloads_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ProhibitNewDownloadsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DownloaderServer).ProhibitNewDownloads(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Downloader_ProhibitNewDownloads_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DownloaderServer).ProhibitNewDownloads(ctx, req.(*ProhibitNewDownloadsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Downloader_Add_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AddRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DownloaderServer).Add(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Downloader_Add_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DownloaderServer).Add(ctx, req.(*AddRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Downloader_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DownloaderServer).Delete(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Downloader_Delete_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DownloaderServer).Delete(ctx, req.(*DeleteRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Downloader_Verify_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VerifyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DownloaderServer).Verify(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Downloader_Verify_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DownloaderServer).Verify(ctx, req.(*VerifyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Downloader_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(StatsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DownloaderServer).Stats(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Downloader_Stats_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DownloaderServer).Stats(ctx, req.(*StatsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// Downloader_ServiceDesc is the grpc.ServiceDesc for Downloader service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var Downloader_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "downloader.Downloader",
+ HandlerType: (*DownloaderServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ProhibitNewDownloads",
+ Handler: _Downloader_ProhibitNewDownloads_Handler,
+ },
+ {
+ MethodName: "Add",
+ Handler: _Downloader_Add_Handler,
+ },
+ {
+ MethodName: "Delete",
+ Handler: _Downloader_Delete_Handler,
+ },
+ {
+ MethodName: "Verify",
+ Handler: _Downloader_Verify_Handler,
+ },
+ {
+ MethodName: "Stats",
+ Handler: _Downloader_Stats_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "downloader/downloader.proto",
+}
diff --git a/erigon-lib/gointerfaces/execution/execution.pb.go b/erigon-lib/gointerfaces/execution/execution.pb.go
new file mode 100644
index 00000000000..c331568493b
--- /dev/null
+++ b/erigon-lib/gointerfaces/execution/execution.pb.go
@@ -0,0 +1,2417 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.31.0
+// protoc v4.24.2
+// source: execution/execution.proto
+
+package execution
+
+import (
+ types "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ExecutionStatus int32
+
+const (
+ ExecutionStatus_Success ExecutionStatus = 0
+ ExecutionStatus_BadBlock ExecutionStatus = 1
+ ExecutionStatus_TooFarAway ExecutionStatus = 2
+ ExecutionStatus_MissingSegment ExecutionStatus = 3
+ ExecutionStatus_InvalidForkchoice ExecutionStatus = 4
+ ExecutionStatus_Busy ExecutionStatus = 5
+)
+
+// Enum value maps for ExecutionStatus.
+var (
+ ExecutionStatus_name = map[int32]string{
+ 0: "Success",
+ 1: "BadBlock",
+ 2: "TooFarAway",
+ 3: "MissingSegment",
+ 4: "InvalidForkchoice",
+ 5: "Busy",
+ }
+ ExecutionStatus_value = map[string]int32{
+ "Success": 0,
+ "BadBlock": 1,
+ "TooFarAway": 2,
+ "MissingSegment": 3,
+ "InvalidForkchoice": 4,
+ "Busy": 5,
+ }
+)
+
+func (x ExecutionStatus) Enum() *ExecutionStatus {
+ p := new(ExecutionStatus)
+ *p = x
+ return p
+}
+
+func (x ExecutionStatus) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ExecutionStatus) Descriptor() protoreflect.EnumDescriptor {
+ return file_execution_execution_proto_enumTypes[0].Descriptor()
+}
+
+func (ExecutionStatus) Type() protoreflect.EnumType {
+ return &file_execution_execution_proto_enumTypes[0]
+}
+
+func (x ExecutionStatus) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ExecutionStatus.Descriptor instead.
+func (ExecutionStatus) EnumDescriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{0}
+}
+
+type ForkChoiceReceipt struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Status ExecutionStatus `protobuf:"varint,1,opt,name=status,proto3,enum=execution.ExecutionStatus" json:"status,omitempty"`
+ LatestValidHash *types.H256 `protobuf:"bytes,2,opt,name=latest_valid_hash,json=latestValidHash,proto3" json:"latest_valid_hash,omitempty"` // Return latest valid hash in case of halt of execution.
+}
+
+func (x *ForkChoiceReceipt) Reset() {
+ *x = ForkChoiceReceipt{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ForkChoiceReceipt) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ForkChoiceReceipt) ProtoMessage() {}
+
+func (x *ForkChoiceReceipt) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ForkChoiceReceipt.ProtoReflect.Descriptor instead.
+func (*ForkChoiceReceipt) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ForkChoiceReceipt) GetStatus() ExecutionStatus {
+ if x != nil {
+ return x.Status
+ }
+ return ExecutionStatus_Success
+}
+
+func (x *ForkChoiceReceipt) GetLatestValidHash() *types.H256 {
+ if x != nil {
+ return x.LatestValidHash
+ }
+ return nil
+}
+
+// Result we receive after validation
+type ValidationReceipt struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ValidationStatus ExecutionStatus `protobuf:"varint,1,opt,name=validation_status,json=validationStatus,proto3,enum=execution.ExecutionStatus" json:"validation_status,omitempty"`
+ LatestValidHash *types.H256 `protobuf:"bytes,2,opt,name=latest_valid_hash,json=latestValidHash,proto3" json:"latest_valid_hash,omitempty"`
+}
+
+func (x *ValidationReceipt) Reset() {
+ *x = ValidationReceipt{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ValidationReceipt) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ValidationReceipt) ProtoMessage() {}
+
+func (x *ValidationReceipt) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ValidationReceipt.ProtoReflect.Descriptor instead.
+func (*ValidationReceipt) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ValidationReceipt) GetValidationStatus() ExecutionStatus {
+ if x != nil {
+ return x.ValidationStatus
+ }
+ return ExecutionStatus_Success
+}
+
+func (x *ValidationReceipt) GetLatestValidHash() *types.H256 {
+ if x != nil {
+ return x.LatestValidHash
+ }
+ return nil
+}
+
+type IsCanonicalResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Canonical bool `protobuf:"varint,1,opt,name=canonical,proto3" json:"canonical,omitempty"` // Whether hash is canonical or not.
+}
+
+func (x *IsCanonicalResponse) Reset() {
+ *x = IsCanonicalResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *IsCanonicalResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*IsCanonicalResponse) ProtoMessage() {}
+
+func (x *IsCanonicalResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use IsCanonicalResponse.ProtoReflect.Descriptor instead.
+func (*IsCanonicalResponse) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *IsCanonicalResponse) GetCanonical() bool {
+ if x != nil {
+ return x.Canonical
+ }
+ return false
+}
+
+// Header is a header for execution
+type Header struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ParentHash *types.H256 `protobuf:"bytes,1,opt,name=parent_hash,json=parentHash,proto3" json:"parent_hash,omitempty"`
+ Coinbase *types.H160 `protobuf:"bytes,2,opt,name=coinbase,proto3" json:"coinbase,omitempty"`
+ StateRoot *types.H256 `protobuf:"bytes,3,opt,name=state_root,json=stateRoot,proto3" json:"state_root,omitempty"`
+ ReceiptRoot *types.H256 `protobuf:"bytes,4,opt,name=receipt_root,json=receiptRoot,proto3" json:"receipt_root,omitempty"`
+ LogsBloom *types.H2048 `protobuf:"bytes,5,opt,name=logs_bloom,json=logsBloom,proto3" json:"logs_bloom,omitempty"`
+ PrevRandao *types.H256 `protobuf:"bytes,6,opt,name=prev_randao,json=prevRandao,proto3" json:"prev_randao,omitempty"`
+ BlockNumber uint64 `protobuf:"varint,7,opt,name=block_number,json=blockNumber,proto3" json:"block_number,omitempty"`
+ GasLimit uint64 `protobuf:"varint,8,opt,name=gas_limit,json=gasLimit,proto3" json:"gas_limit,omitempty"`
+ GasUsed uint64 `protobuf:"varint,9,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"`
+ Timestamp uint64 `protobuf:"varint,10,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ Nonce uint64 `protobuf:"varint,11,opt,name=nonce,proto3" json:"nonce,omitempty"`
+ ExtraData []byte `protobuf:"bytes,12,opt,name=extra_data,json=extraData,proto3" json:"extra_data,omitempty"`
+ Difficulty *types.H256 `protobuf:"bytes,13,opt,name=difficulty,proto3" json:"difficulty,omitempty"`
+ BlockHash *types.H256 `protobuf:"bytes,14,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` // We keep this so that we can validate it
+ OmmerHash *types.H256 `protobuf:"bytes,15,opt,name=ommer_hash,json=ommerHash,proto3" json:"ommer_hash,omitempty"`
+ TransactionHash *types.H256 `protobuf:"bytes,16,opt,name=transaction_hash,json=transactionHash,proto3" json:"transaction_hash,omitempty"`
+ BaseFeePerGas *types.H256 `protobuf:"bytes,17,opt,name=base_fee_per_gas,json=baseFeePerGas,proto3,oneof" json:"base_fee_per_gas,omitempty"`
+ WithdrawalHash *types.H256 `protobuf:"bytes,18,opt,name=withdrawal_hash,json=withdrawalHash,proto3,oneof" json:"withdrawal_hash,omitempty"` // added in Shapella (EIP-4895)
+ BlobGasUsed *uint64 `protobuf:"varint,19,opt,name=blob_gas_used,json=blobGasUsed,proto3,oneof" json:"blob_gas_used,omitempty"` // added in Dencun (EIP-4844)
+ ExcessBlobGas *uint64 `protobuf:"varint,20,opt,name=excess_blob_gas,json=excessBlobGas,proto3,oneof" json:"excess_blob_gas,omitempty"` // added in Dencun (EIP-4844)
+ ParentBeaconBlockRoot *types.H256 `protobuf:"bytes,21,opt,name=parent_beacon_block_root,json=parentBeaconBlockRoot,proto3,oneof" json:"parent_beacon_block_root,omitempty"` // added in Dencun (EIP-4788)
+ // AuRa
+ AuraStep *uint64 `protobuf:"varint,22,opt,name=aura_step,json=auraStep,proto3,oneof" json:"aura_step,omitempty"`
+ AuraSeal []byte `protobuf:"bytes,23,opt,name=aura_seal,json=auraSeal,proto3,oneof" json:"aura_seal,omitempty"`
+}
+
+func (x *Header) Reset() {
+ *x = Header{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Header) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Header) ProtoMessage() {}
+
+func (x *Header) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Header.ProtoReflect.Descriptor instead.
+func (*Header) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Header) GetParentHash() *types.H256 {
+ if x != nil {
+ return x.ParentHash
+ }
+ return nil
+}
+
+func (x *Header) GetCoinbase() *types.H160 {
+ if x != nil {
+ return x.Coinbase
+ }
+ return nil
+}
+
+func (x *Header) GetStateRoot() *types.H256 {
+ if x != nil {
+ return x.StateRoot
+ }
+ return nil
+}
+
+func (x *Header) GetReceiptRoot() *types.H256 {
+ if x != nil {
+ return x.ReceiptRoot
+ }
+ return nil
+}
+
+func (x *Header) GetLogsBloom() *types.H2048 {
+ if x != nil {
+ return x.LogsBloom
+ }
+ return nil
+}
+
+func (x *Header) GetPrevRandao() *types.H256 {
+ if x != nil {
+ return x.PrevRandao
+ }
+ return nil
+}
+
+func (x *Header) GetBlockNumber() uint64 {
+ if x != nil {
+ return x.BlockNumber
+ }
+ return 0
+}
+
+func (x *Header) GetGasLimit() uint64 {
+ if x != nil {
+ return x.GasLimit
+ }
+ return 0
+}
+
+func (x *Header) GetGasUsed() uint64 {
+ if x != nil {
+ return x.GasUsed
+ }
+ return 0
+}
+
+func (x *Header) GetTimestamp() uint64 {
+ if x != nil {
+ return x.Timestamp
+ }
+ return 0
+}
+
+func (x *Header) GetNonce() uint64 {
+ if x != nil {
+ return x.Nonce
+ }
+ return 0
+}
+
+func (x *Header) GetExtraData() []byte {
+ if x != nil {
+ return x.ExtraData
+ }
+ return nil
+}
+
+func (x *Header) GetDifficulty() *types.H256 {
+ if x != nil {
+ return x.Difficulty
+ }
+ return nil
+}
+
+func (x *Header) GetBlockHash() *types.H256 {
+ if x != nil {
+ return x.BlockHash
+ }
+ return nil
+}
+
+func (x *Header) GetOmmerHash() *types.H256 {
+ if x != nil {
+ return x.OmmerHash
+ }
+ return nil
+}
+
+func (x *Header) GetTransactionHash() *types.H256 {
+ if x != nil {
+ return x.TransactionHash
+ }
+ return nil
+}
+
+func (x *Header) GetBaseFeePerGas() *types.H256 {
+ if x != nil {
+ return x.BaseFeePerGas
+ }
+ return nil
+}
+
+func (x *Header) GetWithdrawalHash() *types.H256 {
+ if x != nil {
+ return x.WithdrawalHash
+ }
+ return nil
+}
+
+func (x *Header) GetBlobGasUsed() uint64 {
+ if x != nil && x.BlobGasUsed != nil {
+ return *x.BlobGasUsed
+ }
+ return 0
+}
+
+func (x *Header) GetExcessBlobGas() uint64 {
+ if x != nil && x.ExcessBlobGas != nil {
+ return *x.ExcessBlobGas
+ }
+ return 0
+}
+
+func (x *Header) GetParentBeaconBlockRoot() *types.H256 {
+ if x != nil {
+ return x.ParentBeaconBlockRoot
+ }
+ return nil
+}
+
+func (x *Header) GetAuraStep() uint64 {
+ if x != nil && x.AuraStep != nil {
+ return *x.AuraStep
+ }
+ return 0
+}
+
+func (x *Header) GetAuraSeal() []byte {
+ if x != nil {
+ return x.AuraSeal
+ }
+ return nil
+}
+
+// Body is a block body for execution
+type BlockBody struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ BlockHash *types.H256 `protobuf:"bytes,1,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"`
+ BlockNumber uint64 `protobuf:"varint,2,opt,name=block_number,json=blockNumber,proto3" json:"block_number,omitempty"`
+ // Raw transactions in byte format.
+ Transactions [][]byte `protobuf:"bytes,3,rep,name=transactions,proto3" json:"transactions,omitempty"`
+ Uncles []*Header `protobuf:"bytes,4,rep,name=uncles,proto3" json:"uncles,omitempty"`
+ Withdrawals []*types.Withdrawal `protobuf:"bytes,5,rep,name=withdrawals,proto3" json:"withdrawals,omitempty"`
+}
+
+func (x *BlockBody) Reset() {
+ *x = BlockBody{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BlockBody) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BlockBody) ProtoMessage() {}
+
+func (x *BlockBody) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BlockBody.ProtoReflect.Descriptor instead.
+func (*BlockBody) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *BlockBody) GetBlockHash() *types.H256 {
+ if x != nil {
+ return x.BlockHash
+ }
+ return nil
+}
+
+func (x *BlockBody) GetBlockNumber() uint64 {
+ if x != nil {
+ return x.BlockNumber
+ }
+ return 0
+}
+
+func (x *BlockBody) GetTransactions() [][]byte {
+ if x != nil {
+ return x.Transactions
+ }
+ return nil
+}
+
+func (x *BlockBody) GetUncles() []*Header {
+ if x != nil {
+ return x.Uncles
+ }
+ return nil
+}
+
+func (x *BlockBody) GetWithdrawals() []*types.Withdrawal {
+ if x != nil {
+ return x.Withdrawals
+ }
+ return nil
+}
+
+type Block struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Header *Header `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ Body *BlockBody `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"`
+}
+
+func (x *Block) Reset() {
+ *x = Block{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Block) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Block) ProtoMessage() {}
+
+func (x *Block) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Block.ProtoReflect.Descriptor instead.
+func (*Block) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *Block) GetHeader() *Header {
+ if x != nil {
+ return x.Header
+ }
+ return nil
+}
+
+func (x *Block) GetBody() *BlockBody {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+type GetHeaderResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Header *Header `protobuf:"bytes,1,opt,name=header,proto3,oneof" json:"header,omitempty"`
+}
+
+func (x *GetHeaderResponse) Reset() {
+ *x = GetHeaderResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetHeaderResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetHeaderResponse) ProtoMessage() {}
+
+func (x *GetHeaderResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetHeaderResponse.ProtoReflect.Descriptor instead.
+func (*GetHeaderResponse) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *GetHeaderResponse) GetHeader() *Header {
+ if x != nil {
+ return x.Header
+ }
+ return nil
+}
+
+type GetTDResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Td *types.H256 `protobuf:"bytes,1,opt,name=td,proto3,oneof" json:"td,omitempty"`
+}
+
+func (x *GetTDResponse) Reset() {
+ *x = GetTDResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetTDResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetTDResponse) ProtoMessage() {}
+
+func (x *GetTDResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetTDResponse.ProtoReflect.Descriptor instead.
+func (*GetTDResponse) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *GetTDResponse) GetTd() *types.H256 {
+ if x != nil {
+ return x.Td
+ }
+ return nil
+}
+
+type GetBodyResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Body *BlockBody `protobuf:"bytes,1,opt,name=body,proto3,oneof" json:"body,omitempty"`
+}
+
+func (x *GetBodyResponse) Reset() {
+ *x = GetBodyResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetBodyResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetBodyResponse) ProtoMessage() {}
+
+func (x *GetBodyResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetBodyResponse.ProtoReflect.Descriptor instead.
+func (*GetBodyResponse) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *GetBodyResponse) GetBody() *BlockBody {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+type GetHeaderHashNumberResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ BlockNumber *uint64 `protobuf:"varint,1,opt,name=block_number,json=blockNumber,proto3,oneof" json:"block_number,omitempty"` // null if not found.
+}
+
+func (x *GetHeaderHashNumberResponse) Reset() {
+ *x = GetHeaderHashNumberResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetHeaderHashNumberResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetHeaderHashNumberResponse) ProtoMessage() {}
+
+func (x *GetHeaderHashNumberResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetHeaderHashNumberResponse.ProtoReflect.Descriptor instead.
+func (*GetHeaderHashNumberResponse) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *GetHeaderHashNumberResponse) GetBlockNumber() uint64 {
+ if x != nil && x.BlockNumber != nil {
+ return *x.BlockNumber
+ }
+ return 0
+}
+
+type GetSegmentRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Get headers/body by number or hash, invalid if none set.
+ BlockNumber *uint64 `protobuf:"varint,1,opt,name=block_number,json=blockNumber,proto3,oneof" json:"block_number,omitempty"`
+ BlockHash *types.H256 `protobuf:"bytes,2,opt,name=block_hash,json=blockHash,proto3,oneof" json:"block_hash,omitempty"`
+}
+
+func (x *GetSegmentRequest) Reset() {
+ *x = GetSegmentRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetSegmentRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetSegmentRequest) ProtoMessage() {}
+
+func (x *GetSegmentRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetSegmentRequest.ProtoReflect.Descriptor instead.
+func (*GetSegmentRequest) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *GetSegmentRequest) GetBlockNumber() uint64 {
+ if x != nil && x.BlockNumber != nil {
+ return *x.BlockNumber
+ }
+ return 0
+}
+
+func (x *GetSegmentRequest) GetBlockHash() *types.H256 {
+ if x != nil {
+ return x.BlockHash
+ }
+ return nil
+}
+
+type InsertBlocksRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Blocks []*Block `protobuf:"bytes,1,rep,name=blocks,proto3" json:"blocks,omitempty"`
+}
+
+func (x *InsertBlocksRequest) Reset() {
+ *x = InsertBlocksRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *InsertBlocksRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*InsertBlocksRequest) ProtoMessage() {}
+
+func (x *InsertBlocksRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use InsertBlocksRequest.ProtoReflect.Descriptor instead.
+func (*InsertBlocksRequest) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *InsertBlocksRequest) GetBlocks() []*Block {
+ if x != nil {
+ return x.Blocks
+ }
+ return nil
+}
+
+type ForkChoice struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ HeadBlockHash *types.H256 `protobuf:"bytes,1,opt,name=head_block_hash,json=headBlockHash,proto3" json:"head_block_hash,omitempty"`
+ Timeout uint64 `protobuf:"varint,2,opt,name=timeout,proto3" json:"timeout,omitempty"` // Timeout in milliseconds for fcu before it becomes async.
+ FinalizedBlockHash *types.H256 `protobuf:"bytes,3,opt,name=finalized_block_hash,json=finalizedBlockHash,proto3,oneof" json:"finalized_block_hash,omitempty"`
+ SafeBlockHash *types.H256 `protobuf:"bytes,4,opt,name=safe_block_hash,json=safeBlockHash,proto3,oneof" json:"safe_block_hash,omitempty"`
+}
+
+func (x *ForkChoice) Reset() {
+ *x = ForkChoice{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ForkChoice) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ForkChoice) ProtoMessage() {}
+
+func (x *ForkChoice) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ForkChoice.ProtoReflect.Descriptor instead.
+func (*ForkChoice) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *ForkChoice) GetHeadBlockHash() *types.H256 {
+ if x != nil {
+ return x.HeadBlockHash
+ }
+ return nil
+}
+
+func (x *ForkChoice) GetTimeout() uint64 {
+ if x != nil {
+ return x.Timeout
+ }
+ return 0
+}
+
+func (x *ForkChoice) GetFinalizedBlockHash() *types.H256 {
+ if x != nil {
+ return x.FinalizedBlockHash
+ }
+ return nil
+}
+
+func (x *ForkChoice) GetSafeBlockHash() *types.H256 {
+ if x != nil {
+ return x.SafeBlockHash
+ }
+ return nil
+}
+
+type InsertionResult struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Result ExecutionStatus `protobuf:"varint,1,opt,name=result,proto3,enum=execution.ExecutionStatus" json:"result,omitempty"`
+}
+
+func (x *InsertionResult) Reset() {
+ *x = InsertionResult{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *InsertionResult) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*InsertionResult) ProtoMessage() {}
+
+func (x *InsertionResult) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use InsertionResult.ProtoReflect.Descriptor instead.
+func (*InsertionResult) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *InsertionResult) GetResult() ExecutionStatus {
+ if x != nil {
+ return x.Result
+ }
+ return ExecutionStatus_Success
+}
+
+type ValidationRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Hash *types.H256 `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"`
+ Number uint64 `protobuf:"varint,2,opt,name=number,proto3" json:"number,omitempty"`
+}
+
+func (x *ValidationRequest) Reset() {
+ *x = ValidationRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ValidationRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ValidationRequest) ProtoMessage() {}
+
+func (x *ValidationRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ValidationRequest.ProtoReflect.Descriptor instead.
+func (*ValidationRequest) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *ValidationRequest) GetHash() *types.H256 {
+ if x != nil {
+ return x.Hash
+ }
+ return nil
+}
+
+func (x *ValidationRequest) GetNumber() uint64 {
+ if x != nil {
+ return x.Number
+ }
+ return 0
+}
+
+type AssembleBlockRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ParentHash *types.H256 `protobuf:"bytes,1,opt,name=parent_hash,json=parentHash,proto3" json:"parent_hash,omitempty"`
+ Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ PrevRandao *types.H256 `protobuf:"bytes,3,opt,name=prev_randao,json=prevRandao,proto3" json:"prev_randao,omitempty"`
+ SuggestedFeeRecipient *types.H160 `protobuf:"bytes,4,opt,name=suggested_fee_recipient,json=suggestedFeeRecipient,proto3" json:"suggested_fee_recipient,omitempty"`
+ Withdrawals []*types.Withdrawal `protobuf:"bytes,5,rep,name=withdrawals,proto3" json:"withdrawals,omitempty"` // added in Shapella (EIP-4895)
+ ParentBeaconBlockRoot *types.H256 `protobuf:"bytes,6,opt,name=parent_beacon_block_root,json=parentBeaconBlockRoot,proto3,oneof" json:"parent_beacon_block_root,omitempty"` // added in Dencun (EIP-4788)
+}
+
+func (x *AssembleBlockRequest) Reset() {
+ *x = AssembleBlockRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AssembleBlockRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AssembleBlockRequest) ProtoMessage() {}
+
+func (x *AssembleBlockRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AssembleBlockRequest.ProtoReflect.Descriptor instead.
+func (*AssembleBlockRequest) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *AssembleBlockRequest) GetParentHash() *types.H256 {
+ if x != nil {
+ return x.ParentHash
+ }
+ return nil
+}
+
+func (x *AssembleBlockRequest) GetTimestamp() uint64 {
+ if x != nil {
+ return x.Timestamp
+ }
+ return 0
+}
+
+func (x *AssembleBlockRequest) GetPrevRandao() *types.H256 {
+ if x != nil {
+ return x.PrevRandao
+ }
+ return nil
+}
+
+func (x *AssembleBlockRequest) GetSuggestedFeeRecipient() *types.H160 {
+ if x != nil {
+ return x.SuggestedFeeRecipient
+ }
+ return nil
+}
+
+func (x *AssembleBlockRequest) GetWithdrawals() []*types.Withdrawal {
+ if x != nil {
+ return x.Withdrawals
+ }
+ return nil
+}
+
+func (x *AssembleBlockRequest) GetParentBeaconBlockRoot() *types.H256 {
+ if x != nil {
+ return x.ParentBeaconBlockRoot
+ }
+ return nil
+}
+
+type AssembleBlockResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ Busy bool `protobuf:"varint,2,opt,name=busy,proto3" json:"busy,omitempty"`
+}
+
+func (x *AssembleBlockResponse) Reset() {
+ *x = AssembleBlockResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AssembleBlockResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AssembleBlockResponse) ProtoMessage() {}
+
+func (x *AssembleBlockResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AssembleBlockResponse.ProtoReflect.Descriptor instead.
+func (*AssembleBlockResponse) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{16}
+}
+
+func (x *AssembleBlockResponse) GetId() uint64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *AssembleBlockResponse) GetBusy() bool {
+ if x != nil {
+ return x.Busy
+ }
+ return false
+}
+
+type GetAssembledBlockRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+}
+
+func (x *GetAssembledBlockRequest) Reset() {
+ *x = GetAssembledBlockRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetAssembledBlockRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetAssembledBlockRequest) ProtoMessage() {}
+
+func (x *GetAssembledBlockRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetAssembledBlockRequest.ProtoReflect.Descriptor instead.
+func (*GetAssembledBlockRequest) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{17}
+}
+
+func (x *GetAssembledBlockRequest) GetId() uint64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+type AssembledBlockData struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ExecutionPayload *types.ExecutionPayload `protobuf:"bytes,1,opt,name=execution_payload,json=executionPayload,proto3" json:"execution_payload,omitempty"`
+ BlockValue *types.H256 `protobuf:"bytes,2,opt,name=block_value,json=blockValue,proto3" json:"block_value,omitempty"`
+ BlobsBundle *types.BlobsBundleV1 `protobuf:"bytes,3,opt,name=blobs_bundle,json=blobsBundle,proto3" json:"blobs_bundle,omitempty"`
+}
+
+func (x *AssembledBlockData) Reset() {
+ *x = AssembledBlockData{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AssembledBlockData) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AssembledBlockData) ProtoMessage() {}
+
+func (x *AssembledBlockData) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AssembledBlockData.ProtoReflect.Descriptor instead.
+func (*AssembledBlockData) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{18}
+}
+
+func (x *AssembledBlockData) GetExecutionPayload() *types.ExecutionPayload {
+ if x != nil {
+ return x.ExecutionPayload
+ }
+ return nil
+}
+
+func (x *AssembledBlockData) GetBlockValue() *types.H256 {
+ if x != nil {
+ return x.BlockValue
+ }
+ return nil
+}
+
+func (x *AssembledBlockData) GetBlobsBundle() *types.BlobsBundleV1 {
+ if x != nil {
+ return x.BlobsBundle
+ }
+ return nil
+}
+
+type GetAssembledBlockResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Data *AssembledBlockData `protobuf:"bytes,1,opt,name=data,proto3,oneof" json:"data,omitempty"`
+ Busy bool `protobuf:"varint,2,opt,name=busy,proto3" json:"busy,omitempty"`
+}
+
+func (x *GetAssembledBlockResponse) Reset() {
+ *x = GetAssembledBlockResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetAssembledBlockResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetAssembledBlockResponse) ProtoMessage() {}
+
+func (x *GetAssembledBlockResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetAssembledBlockResponse.ProtoReflect.Descriptor instead.
+func (*GetAssembledBlockResponse) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{19}
+}
+
+func (x *GetAssembledBlockResponse) GetData() *AssembledBlockData {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+func (x *GetAssembledBlockResponse) GetBusy() bool {
+ if x != nil {
+ return x.Busy
+ }
+ return false
+}
+
+type GetBodiesBatchResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Bodies []*BlockBody `protobuf:"bytes,1,rep,name=bodies,proto3" json:"bodies,omitempty"`
+}
+
+func (x *GetBodiesBatchResponse) Reset() {
+ *x = GetBodiesBatchResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetBodiesBatchResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetBodiesBatchResponse) ProtoMessage() {}
+
+func (x *GetBodiesBatchResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetBodiesBatchResponse.ProtoReflect.Descriptor instead.
+func (*GetBodiesBatchResponse) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{20}
+}
+
+func (x *GetBodiesBatchResponse) GetBodies() []*BlockBody {
+ if x != nil {
+ return x.Bodies
+ }
+ return nil
+}
+
+type GetBodiesByHashesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Hashes []*types.H256 `protobuf:"bytes,1,rep,name=hashes,proto3" json:"hashes,omitempty"`
+}
+
+func (x *GetBodiesByHashesRequest) Reset() {
+ *x = GetBodiesByHashesRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetBodiesByHashesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetBodiesByHashesRequest) ProtoMessage() {}
+
+func (x *GetBodiesByHashesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[21]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetBodiesByHashesRequest.ProtoReflect.Descriptor instead.
+func (*GetBodiesByHashesRequest) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{21}
+}
+
+func (x *GetBodiesByHashesRequest) GetHashes() []*types.H256 {
+ if x != nil {
+ return x.Hashes
+ }
+ return nil
+}
+
+type GetBodiesByRangeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Start uint64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"`
+ Count uint64 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"`
+}
+
+func (x *GetBodiesByRangeRequest) Reset() {
+ *x = GetBodiesByRangeRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetBodiesByRangeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetBodiesByRangeRequest) ProtoMessage() {}
+
+func (x *GetBodiesByRangeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[22]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetBodiesByRangeRequest.ProtoReflect.Descriptor instead.
+func (*GetBodiesByRangeRequest) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{22}
+}
+
+func (x *GetBodiesByRangeRequest) GetStart() uint64 {
+ if x != nil {
+ return x.Start
+ }
+ return 0
+}
+
+func (x *GetBodiesByRangeRequest) GetCount() uint64 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+
+type ReadyResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ready bool `protobuf:"varint,1,opt,name=ready,proto3" json:"ready,omitempty"`
+}
+
+func (x *ReadyResponse) Reset() {
+ *x = ReadyResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ReadyResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ReadyResponse) ProtoMessage() {}
+
+func (x *ReadyResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[23]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ReadyResponse.ProtoReflect.Descriptor instead.
+func (*ReadyResponse) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{23}
+}
+
+func (x *ReadyResponse) GetReady() bool {
+ if x != nil {
+ return x.Ready
+ }
+ return false
+}
+
+type FrozenBlocksResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ FrozenBlocks uint64 `protobuf:"varint,1,opt,name=frozen_blocks,json=frozenBlocks,proto3" json:"frozen_blocks,omitempty"`
+}
+
+func (x *FrozenBlocksResponse) Reset() {
+ *x = FrozenBlocksResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_execution_execution_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FrozenBlocksResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FrozenBlocksResponse) ProtoMessage() {}
+
+func (x *FrozenBlocksResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_execution_execution_proto_msgTypes[24]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FrozenBlocksResponse.ProtoReflect.Descriptor instead.
+func (*FrozenBlocksResponse) Descriptor() ([]byte, []int) {
+ return file_execution_execution_proto_rawDescGZIP(), []int{24}
+}
+
+func (x *FrozenBlocksResponse) GetFrozenBlocks() uint64 {
+ if x != nil {
+ return x.FrozenBlocks
+ }
+ return 0
+}
+
+var File_execution_execution_proto protoreflect.FileDescriptor
+
+var file_execution_execution_proto_rawDesc = []byte{
+ 0x0a, 0x19, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x65, 0x78, 0x65, 0x63,
+ 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x65, 0x78, 0x65,
+ 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x11, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x80, 0x01, 0x0a, 0x11, 0x46, 0x6f, 0x72, 0x6b, 0x43,
+ 0x68, 0x6f, 0x69, 0x63, 0x65, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x12, 0x32, 0x0a, 0x06,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x65,
+ 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69,
+ 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x12, 0x37, 0x0a, 0x11, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64,
+ 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74,
+ 0x56, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x61, 0x73, 0x68, 0x22, 0x95, 0x01, 0x0a, 0x11, 0x56, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x12,
+ 0x47, 0x0a, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x65, 0x78, 0x65,
+ 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x11, 0x6c, 0x61, 0x74, 0x65,
+ 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36,
+ 0x52, 0x0f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x61, 0x73,
+ 0x68, 0x22, 0x33, 0x0a, 0x13, 0x49, 0x73, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x61, 0x6e, 0x6f,
+ 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x61, 0x6e,
+ 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x22, 0xe4, 0x08, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x12, 0x2c, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48,
+ 0x32, 0x35, 0x36, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12,
+ 0x27, 0x0a, 0x08, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, 0x36, 0x30, 0x52, 0x08,
+ 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74,
+ 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65,
+ 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2e, 0x0a, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x5f,
+ 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0b, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74,
+ 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2b, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x73, 0x5f, 0x62, 0x6c, 0x6f,
+ 0x6f, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73,
+ 0x2e, 0x48, 0x32, 0x30, 0x34, 0x38, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x73, 0x42, 0x6c, 0x6f, 0x6f,
+ 0x6d, 0x12, 0x2c, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x76, 0x5f, 0x72, 0x61, 0x6e, 0x64, 0x61, 0x6f,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48,
+ 0x32, 0x35, 0x36, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x76, 0x52, 0x61, 0x6e, 0x64, 0x61, 0x6f, 0x12,
+ 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62,
+ 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x61, 0x73, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x67, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12,
+ 0x19, 0x0a, 0x08, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x07, 0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63,
+ 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0c, 0x20, 0x01,
+ 0x28, 0x0c, 0x52, 0x09, 0x65, 0x78, 0x74, 0x72, 0x61, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2b, 0x0a,
+ 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0a,
+ 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x0a, 0x62, 0x6c,
+ 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b,
+ 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x09, 0x62, 0x6c, 0x6f,
+ 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x2a, 0x0a, 0x0a, 0x6f, 0x6d, 0x6d, 0x65, 0x72, 0x5f,
+ 0x68, 0x61, 0x73, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x09, 0x6f, 0x6d, 0x6d, 0x65, 0x72, 0x48, 0x61,
+ 0x73, 0x68, 0x12, 0x36, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73,
+ 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x61, 0x73, 0x68, 0x12, 0x39, 0x0a, 0x10, 0x62, 0x61,
+ 0x73, 0x65, 0x5f, 0x66, 0x65, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x67, 0x61, 0x73, 0x18, 0x11,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35,
+ 0x36, 0x48, 0x00, 0x52, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x46, 0x65, 0x65, 0x50, 0x65, 0x72, 0x47,
+ 0x61, 0x73, 0x88, 0x01, 0x01, 0x12, 0x39, 0x0a, 0x0f, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61,
+ 0x77, 0x61, 0x6c, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b,
+ 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x48, 0x01, 0x52, 0x0e, 0x77,
+ 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x48, 0x61, 0x73, 0x68, 0x88, 0x01, 0x01,
+ 0x12, 0x27, 0x0a, 0x0d, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65,
+ 0x64, 0x18, 0x13, 0x20, 0x01, 0x28, 0x04, 0x48, 0x02, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x62, 0x47,
+ 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x88, 0x01, 0x01, 0x12, 0x2b, 0x0a, 0x0f, 0x65, 0x78, 0x63,
+ 0x65, 0x73, 0x73, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x67, 0x61, 0x73, 0x18, 0x14, 0x20, 0x01,
+ 0x28, 0x04, 0x48, 0x03, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x65, 0x73, 0x73, 0x42, 0x6c, 0x6f, 0x62,
+ 0x47, 0x61, 0x73, 0x88, 0x01, 0x01, 0x12, 0x49, 0x0a, 0x18, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x5f, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72, 0x6f,
+ 0x6f, 0x74, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73,
+ 0x2e, 0x48, 0x32, 0x35, 0x36, 0x48, 0x04, 0x52, 0x15, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x42,
+ 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x88, 0x01,
+ 0x01, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x75, 0x72, 0x61, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x18, 0x16,
+ 0x20, 0x01, 0x28, 0x04, 0x48, 0x05, 0x52, 0x08, 0x61, 0x75, 0x72, 0x61, 0x53, 0x74, 0x65, 0x70,
+ 0x88, 0x01, 0x01, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x75, 0x72, 0x61, 0x5f, 0x73, 0x65, 0x61, 0x6c,
+ 0x18, 0x17, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x06, 0x52, 0x08, 0x61, 0x75, 0x72, 0x61, 0x53, 0x65,
+ 0x61, 0x6c, 0x88, 0x01, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x66,
+ 0x65, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x67, 0x61, 0x73, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x77,
+ 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x42, 0x10,
+ 0x0a, 0x0e, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64,
+ 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x65, 0x78, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x62, 0x6c, 0x6f, 0x62,
+ 0x5f, 0x67, 0x61, 0x73, 0x42, 0x1b, 0x0a, 0x19, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f,
+ 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72, 0x6f, 0x6f,
+ 0x74, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x61, 0x75, 0x72, 0x61, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x42,
+ 0x0c, 0x0a, 0x0a, 0x5f, 0x61, 0x75, 0x72, 0x61, 0x5f, 0x73, 0x65, 0x61, 0x6c, 0x22, 0xde, 0x01,
+ 0x0a, 0x09, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2a, 0x0a, 0x0a, 0x62,
+ 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x09, 0x62, 0x6c,
+ 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b,
+ 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62,
+ 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x72,
+ 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c,
+ 0x52, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x29,
+ 0x0a, 0x06, 0x75, 0x6e, 0x63, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11,
+ 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x52, 0x06, 0x75, 0x6e, 0x63, 0x6c, 0x65, 0x73, 0x12, 0x33, 0x0a, 0x0b, 0x77, 0x69, 0x74,
+ 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11,
+ 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x57, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61,
+ 0x6c, 0x52, 0x0b, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x73, 0x22, 0x5c,
+ 0x0a, 0x05, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x29, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64,
+ 0x65, 0x72, 0x12, 0x28, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x14, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x42, 0x6c, 0x6f,
+ 0x63, 0x6b, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x22, 0x4e, 0x0a, 0x11,
+ 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x2e, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x11, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65,
+ 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x88, 0x01,
+ 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x22, 0x38, 0x0a, 0x0d,
+ 0x47, 0x65, 0x74, 0x54, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a,
+ 0x02, 0x74, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x48, 0x00, 0x52, 0x02, 0x74, 0x64, 0x88, 0x01, 0x01, 0x42,
+ 0x05, 0x0a, 0x03, 0x5f, 0x74, 0x64, 0x22, 0x49, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x42, 0x6f, 0x64,
+ 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x04, 0x62, 0x6f, 0x64,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x6f, 0x64, 0x79, 0x48, 0x00, 0x52,
+ 0x04, 0x62, 0x6f, 0x64, 0x79, 0x88, 0x01, 0x01, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x62, 0x6f, 0x64,
+ 0x79, 0x22, 0x56, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x61,
+ 0x73, 0x68, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x26, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e,
+ 0x75, 0x6d, 0x62, 0x65, 0x72, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x62, 0x6c, 0x6f,
+ 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x8c, 0x01, 0x0a, 0x11, 0x47, 0x65,
+ 0x74, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x26, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75,
+ 0x6d, 0x62, 0x65, 0x72, 0x88, 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b,
+ 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x48, 0x01, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63,
+ 0x6b, 0x48, 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x62, 0x6c, 0x6f,
+ 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x62, 0x6c,
+ 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x22, 0x3f, 0x0a, 0x13, 0x49, 0x6e, 0x73, 0x65,
+ 0x72, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x28, 0x0a, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x10, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x42, 0x6c, 0x6f, 0x63,
+ 0x6b, 0x52, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x22, 0x86, 0x02, 0x0a, 0x0a, 0x46, 0x6f,
+ 0x72, 0x6b, 0x43, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x12, 0x33, 0x0a, 0x0f, 0x68, 0x65, 0x61, 0x64,
+ 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0d,
+ 0x68, 0x65, 0x61, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x18, 0x0a,
+ 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07,
+ 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x42, 0x0a, 0x14, 0x66, 0x69, 0x6e, 0x61, 0x6c,
+ 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32,
+ 0x35, 0x36, 0x48, 0x00, 0x52, 0x12, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x42,
+ 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0f, 0x73,
+ 0x61, 0x66, 0x65, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35,
+ 0x36, 0x48, 0x01, 0x52, 0x0d, 0x73, 0x61, 0x66, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61,
+ 0x73, 0x68, 0x88, 0x01, 0x01, 0x42, 0x17, 0x0a, 0x15, 0x5f, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69,
+ 0x7a, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x42, 0x12,
+ 0x0a, 0x10, 0x5f, 0x73, 0x61, 0x66, 0x65, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61,
+ 0x73, 0x68, 0x22, 0x45, 0x0a, 0x0f, 0x49, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f,
+ 0x6e, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x4c, 0x0a, 0x11, 0x56, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f,
+ 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12,
+ 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xf2, 0x02, 0x0a, 0x14, 0x41, 0x73, 0x73, 0x65,
+ 0x6d, 0x62, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x2c, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32,
+ 0x35, 0x36, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x1c,
+ 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2c, 0x0a, 0x0b,
+ 0x70, 0x72, 0x65, 0x76, 0x5f, 0x72, 0x61, 0x6e, 0x64, 0x61, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0a,
+ 0x70, 0x72, 0x65, 0x76, 0x52, 0x61, 0x6e, 0x64, 0x61, 0x6f, 0x12, 0x43, 0x0a, 0x17, 0x73, 0x75,
+ 0x67, 0x67, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x65, 0x5f, 0x72, 0x65, 0x63, 0x69,
+ 0x70, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, 0x36, 0x30, 0x52, 0x15, 0x73, 0x75, 0x67, 0x67, 0x65, 0x73,
+ 0x74, 0x65, 0x64, 0x46, 0x65, 0x65, 0x52, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x12,
+ 0x33, 0x0a, 0x0b, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x73, 0x18, 0x05,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x57, 0x69, 0x74,
+ 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x52, 0x0b, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61,
+ 0x77, 0x61, 0x6c, 0x73, 0x12, 0x49, 0x0a, 0x18, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x62,
+ 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48,
+ 0x32, 0x35, 0x36, 0x48, 0x00, 0x52, 0x15, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x65, 0x61,
+ 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x88, 0x01, 0x01, 0x42,
+ 0x1b, 0x0a, 0x19, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x65, 0x61, 0x63, 0x6f,
+ 0x6e, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x22, 0x3b, 0x0a, 0x15,
+ 0x41, 0x73, 0x73, 0x65, 0x6d, 0x62, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x75, 0x73, 0x79, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x04, 0x62, 0x75, 0x73, 0x79, 0x22, 0x2a, 0x0a, 0x18, 0x47, 0x65, 0x74,
+ 0x41, 0x73, 0x73, 0x65, 0x6d, 0x62, 0x6c, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x02, 0x69, 0x64, 0x22, 0xc1, 0x01, 0x0a, 0x12, 0x41, 0x73, 0x73, 0x65, 0x6d, 0x62,
+ 0x6c, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x44, 0x61, 0x74, 0x61, 0x12, 0x44, 0x0a, 0x11,
+ 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e,
+ 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64,
+ 0x52, 0x10, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f,
+ 0x61, 0x64, 0x12, 0x2c, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e,
+ 0x48, 0x32, 0x35, 0x36, 0x52, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x37, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x5f, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x42,
+ 0x6c, 0x6f, 0x62, 0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x56, 0x31, 0x52, 0x0b, 0x62, 0x6c,
+ 0x6f, 0x62, 0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x22, 0x70, 0x0a, 0x19, 0x47, 0x65, 0x74,
+ 0x41, 0x73, 0x73, 0x65, 0x6d, 0x62, 0x6c, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
+ 0x2e, 0x41, 0x73, 0x73, 0x65, 0x6d, 0x62, 0x6c, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x44,
+ 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x88, 0x01, 0x01, 0x12, 0x12,
+ 0x0a, 0x04, 0x62, 0x75, 0x73, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x62, 0x75,
+ 0x73, 0x79, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x22, 0x46, 0x0a, 0x16, 0x47,
+ 0x65, 0x74, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x06, 0x62, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f,
+ 0x6e, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x06, 0x62, 0x6f, 0x64,
+ 0x69, 0x65, 0x73, 0x22, 0x3f, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73,
+ 0x42, 0x79, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x23, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x06, 0x68, 0x61,
+ 0x73, 0x68, 0x65, 0x73, 0x22, 0x45, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x42, 0x6f, 0x64, 0x69, 0x65,
+ 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05,
+ 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x25, 0x0a, 0x0d, 0x52,
+ 0x65, 0x61, 0x64, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05,
+ 0x72, 0x65, 0x61, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x72, 0x65, 0x61,
+ 0x64, 0x79, 0x22, 0x3b, 0x0a, 0x14, 0x46, 0x72, 0x6f, 0x7a, 0x65, 0x6e, 0x42, 0x6c, 0x6f, 0x63,
+ 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x72,
+ 0x6f, 0x7a, 0x65, 0x6e, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x0c, 0x66, 0x72, 0x6f, 0x7a, 0x65, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x2a,
+ 0x71, 0x0a, 0x0f, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x10, 0x00, 0x12,
+ 0x0c, 0x0a, 0x08, 0x42, 0x61, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x10, 0x01, 0x12, 0x0e, 0x0a,
+ 0x0a, 0x54, 0x6f, 0x6f, 0x46, 0x61, 0x72, 0x41, 0x77, 0x61, 0x79, 0x10, 0x02, 0x12, 0x12, 0x0a,
+ 0x0e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x10,
+ 0x03, 0x12, 0x15, 0x0a, 0x11, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x46, 0x6f, 0x72, 0x6b,
+ 0x63, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x10, 0x04, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x75, 0x73, 0x79,
+ 0x10, 0x05, 0x32, 0xbf, 0x09, 0x0a, 0x09, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x4a, 0x0a, 0x0c, 0x49, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73,
+ 0x12, 0x1e, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x49, 0x6e, 0x73,
+ 0x65, 0x72, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x1a, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x49, 0x6e, 0x73,
+ 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x4b, 0x0a, 0x0d,
+ 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x1c, 0x2e,
+ 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x65, 0x78,
+ 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x12, 0x47, 0x0a, 0x10, 0x55, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x46, 0x6f, 0x72, 0x6b, 0x43, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x12, 0x15, 0x2e,
+ 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x46, 0x6f, 0x72, 0x6b, 0x43, 0x68,
+ 0x6f, 0x69, 0x63, 0x65, 0x1a, 0x1c, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
+ 0x2e, 0x46, 0x6f, 0x72, 0x6b, 0x43, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x52, 0x65, 0x63, 0x65, 0x69,
+ 0x70, 0x74, 0x12, 0x52, 0x0a, 0x0d, 0x41, 0x73, 0x73, 0x65, 0x6d, 0x62, 0x6c, 0x65, 0x42, 0x6c,
+ 0x6f, 0x63, 0x6b, 0x12, 0x1f, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
+ 0x41, 0x73, 0x73, 0x65, 0x6d, 0x62, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
+ 0x2e, 0x41, 0x73, 0x73, 0x65, 0x6d, 0x62, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x41, 0x73, 0x73,
+ 0x65, 0x6d, 0x62, 0x6c, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x23, 0x2e, 0x65, 0x78,
+ 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x73, 0x73, 0x65, 0x6d,
+ 0x62, 0x6c, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x24, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74,
+ 0x41, 0x73, 0x73, 0x65, 0x6d, 0x62, 0x6c, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0d, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e,
+ 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a,
+ 0x1c, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x48,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a,
+ 0x05, 0x47, 0x65, 0x74, 0x54, 0x44, 0x12, 0x1c, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69,
+ 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
+ 0x2e, 0x47, 0x65, 0x74, 0x54, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47,
+ 0x0a, 0x09, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x1c, 0x2e, 0x65, 0x78,
+ 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x67, 0x6d, 0x65,
+ 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x65, 0x78, 0x65, 0x63,
+ 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x42, 0x6f,
+ 0x64, 0x79, 0x12, 0x1c, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47,
+ 0x65, 0x74, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x1a, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74,
+ 0x42, 0x6f, 0x64, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x10,
+ 0x47, 0x65, 0x74, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65,
+ 0x12, 0x22, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74,
+ 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
+ 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5b, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x42, 0x6f,
+ 0x64, 0x69, 0x65, 0x73, 0x42, 0x79, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x12, 0x23, 0x2e, 0x65,
+ 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6f, 0x64, 0x69,
+ 0x65, 0x73, 0x42, 0x79, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x21, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65,
+ 0x74, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0f, 0x49, 0x73, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69,
+ 0x63, 0x61, 0x6c, 0x48, 0x61, 0x73, 0x68, 0x12, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e,
+ 0x48, 0x32, 0x35, 0x36, 0x1a, 0x1e, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
+ 0x2e, 0x49, 0x73, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x48, 0x61, 0x73, 0x68, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x0b, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x1a, 0x26, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75,
+ 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x61,
+ 0x73, 0x68, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x3e, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x46, 0x6f, 0x72, 0x6b, 0x43, 0x68, 0x6f, 0x69, 0x63,
+ 0x65, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x65, 0x78, 0x65, 0x63,
+ 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x46, 0x6f, 0x72, 0x6b, 0x43, 0x68, 0x6f, 0x69, 0x63, 0x65,
+ 0x12, 0x39, 0x0a, 0x05, 0x52, 0x65, 0x61, 0x64, 0x79, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74,
+ 0x79, 0x1a, 0x18, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65,
+ 0x61, 0x64, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x0c, 0x46,
+ 0x72, 0x6f, 0x7a, 0x65, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d,
+ 0x70, 0x74, 0x79, 0x1a, 0x1f, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
+ 0x46, 0x72, 0x6f, 0x7a, 0x65, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x17, 0x5a, 0x15, 0x2e, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74,
+ 0x69, 0x6f, 0x6e, 0x3b, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_execution_execution_proto_rawDescOnce sync.Once
+ file_execution_execution_proto_rawDescData = file_execution_execution_proto_rawDesc
+)
+
+func file_execution_execution_proto_rawDescGZIP() []byte {
+ file_execution_execution_proto_rawDescOnce.Do(func() {
+ file_execution_execution_proto_rawDescData = protoimpl.X.CompressGZIP(file_execution_execution_proto_rawDescData)
+ })
+ return file_execution_execution_proto_rawDescData
+}
+
+var file_execution_execution_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_execution_execution_proto_msgTypes = make([]protoimpl.MessageInfo, 25)
+var file_execution_execution_proto_goTypes = []interface{}{
+ (ExecutionStatus)(0), // 0: execution.ExecutionStatus
+ (*ForkChoiceReceipt)(nil), // 1: execution.ForkChoiceReceipt
+ (*ValidationReceipt)(nil), // 2: execution.ValidationReceipt
+ (*IsCanonicalResponse)(nil), // 3: execution.IsCanonicalResponse
+ (*Header)(nil), // 4: execution.Header
+ (*BlockBody)(nil), // 5: execution.BlockBody
+ (*Block)(nil), // 6: execution.Block
+ (*GetHeaderResponse)(nil), // 7: execution.GetHeaderResponse
+ (*GetTDResponse)(nil), // 8: execution.GetTDResponse
+ (*GetBodyResponse)(nil), // 9: execution.GetBodyResponse
+ (*GetHeaderHashNumberResponse)(nil), // 10: execution.GetHeaderHashNumberResponse
+ (*GetSegmentRequest)(nil), // 11: execution.GetSegmentRequest
+ (*InsertBlocksRequest)(nil), // 12: execution.InsertBlocksRequest
+ (*ForkChoice)(nil), // 13: execution.ForkChoice
+ (*InsertionResult)(nil), // 14: execution.InsertionResult
+ (*ValidationRequest)(nil), // 15: execution.ValidationRequest
+ (*AssembleBlockRequest)(nil), // 16: execution.AssembleBlockRequest
+ (*AssembleBlockResponse)(nil), // 17: execution.AssembleBlockResponse
+ (*GetAssembledBlockRequest)(nil), // 18: execution.GetAssembledBlockRequest
+ (*AssembledBlockData)(nil), // 19: execution.AssembledBlockData
+ (*GetAssembledBlockResponse)(nil), // 20: execution.GetAssembledBlockResponse
+ (*GetBodiesBatchResponse)(nil), // 21: execution.GetBodiesBatchResponse
+ (*GetBodiesByHashesRequest)(nil), // 22: execution.GetBodiesByHashesRequest
+ (*GetBodiesByRangeRequest)(nil), // 23: execution.GetBodiesByRangeRequest
+ (*ReadyResponse)(nil), // 24: execution.ReadyResponse
+ (*FrozenBlocksResponse)(nil), // 25: execution.FrozenBlocksResponse
+ (*types.H256)(nil), // 26: types.H256
+ (*types.H160)(nil), // 27: types.H160
+ (*types.H2048)(nil), // 28: types.H2048
+ (*types.Withdrawal)(nil), // 29: types.Withdrawal
+ (*types.ExecutionPayload)(nil), // 30: types.ExecutionPayload
+ (*types.BlobsBundleV1)(nil), // 31: types.BlobsBundleV1
+ (*emptypb.Empty)(nil), // 32: google.protobuf.Empty
+}
+var file_execution_execution_proto_depIdxs = []int32{
+ 0, // 0: execution.ForkChoiceReceipt.status:type_name -> execution.ExecutionStatus
+ 26, // 1: execution.ForkChoiceReceipt.latest_valid_hash:type_name -> types.H256
+ 0, // 2: execution.ValidationReceipt.validation_status:type_name -> execution.ExecutionStatus
+ 26, // 3: execution.ValidationReceipt.latest_valid_hash:type_name -> types.H256
+ 26, // 4: execution.Header.parent_hash:type_name -> types.H256
+ 27, // 5: execution.Header.coinbase:type_name -> types.H160
+ 26, // 6: execution.Header.state_root:type_name -> types.H256
+ 26, // 7: execution.Header.receipt_root:type_name -> types.H256
+ 28, // 8: execution.Header.logs_bloom:type_name -> types.H2048
+ 26, // 9: execution.Header.prev_randao:type_name -> types.H256
+ 26, // 10: execution.Header.difficulty:type_name -> types.H256
+ 26, // 11: execution.Header.block_hash:type_name -> types.H256
+ 26, // 12: execution.Header.ommer_hash:type_name -> types.H256
+ 26, // 13: execution.Header.transaction_hash:type_name -> types.H256
+ 26, // 14: execution.Header.base_fee_per_gas:type_name -> types.H256
+ 26, // 15: execution.Header.withdrawal_hash:type_name -> types.H256
+ 26, // 16: execution.Header.parent_beacon_block_root:type_name -> types.H256
+ 26, // 17: execution.BlockBody.block_hash:type_name -> types.H256
+ 4, // 18: execution.BlockBody.uncles:type_name -> execution.Header
+ 29, // 19: execution.BlockBody.withdrawals:type_name -> types.Withdrawal
+ 4, // 20: execution.Block.header:type_name -> execution.Header
+ 5, // 21: execution.Block.body:type_name -> execution.BlockBody
+ 4, // 22: execution.GetHeaderResponse.header:type_name -> execution.Header
+ 26, // 23: execution.GetTDResponse.td:type_name -> types.H256
+ 5, // 24: execution.GetBodyResponse.body:type_name -> execution.BlockBody
+ 26, // 25: execution.GetSegmentRequest.block_hash:type_name -> types.H256
+ 6, // 26: execution.InsertBlocksRequest.blocks:type_name -> execution.Block
+ 26, // 27: execution.ForkChoice.head_block_hash:type_name -> types.H256
+ 26, // 28: execution.ForkChoice.finalized_block_hash:type_name -> types.H256
+ 26, // 29: execution.ForkChoice.safe_block_hash:type_name -> types.H256
+ 0, // 30: execution.InsertionResult.result:type_name -> execution.ExecutionStatus
+ 26, // 31: execution.ValidationRequest.hash:type_name -> types.H256
+ 26, // 32: execution.AssembleBlockRequest.parent_hash:type_name -> types.H256
+ 26, // 33: execution.AssembleBlockRequest.prev_randao:type_name -> types.H256
+ 27, // 34: execution.AssembleBlockRequest.suggested_fee_recipient:type_name -> types.H160
+ 29, // 35: execution.AssembleBlockRequest.withdrawals:type_name -> types.Withdrawal
+ 26, // 36: execution.AssembleBlockRequest.parent_beacon_block_root:type_name -> types.H256
+ 30, // 37: execution.AssembledBlockData.execution_payload:type_name -> types.ExecutionPayload
+ 26, // 38: execution.AssembledBlockData.block_value:type_name -> types.H256
+ 31, // 39: execution.AssembledBlockData.blobs_bundle:type_name -> types.BlobsBundleV1
+ 19, // 40: execution.GetAssembledBlockResponse.data:type_name -> execution.AssembledBlockData
+ 5, // 41: execution.GetBodiesBatchResponse.bodies:type_name -> execution.BlockBody
+ 26, // 42: execution.GetBodiesByHashesRequest.hashes:type_name -> types.H256
+ 12, // 43: execution.Execution.InsertBlocks:input_type -> execution.InsertBlocksRequest
+ 15, // 44: execution.Execution.ValidateChain:input_type -> execution.ValidationRequest
+ 13, // 45: execution.Execution.UpdateForkChoice:input_type -> execution.ForkChoice
+ 16, // 46: execution.Execution.AssembleBlock:input_type -> execution.AssembleBlockRequest
+ 18, // 47: execution.Execution.GetAssembledBlock:input_type -> execution.GetAssembledBlockRequest
+ 32, // 48: execution.Execution.CurrentHeader:input_type -> google.protobuf.Empty
+ 11, // 49: execution.Execution.GetTD:input_type -> execution.GetSegmentRequest
+ 11, // 50: execution.Execution.GetHeader:input_type -> execution.GetSegmentRequest
+ 11, // 51: execution.Execution.GetBody:input_type -> execution.GetSegmentRequest
+ 23, // 52: execution.Execution.GetBodiesByRange:input_type -> execution.GetBodiesByRangeRequest
+ 22, // 53: execution.Execution.GetBodiesByHashes:input_type -> execution.GetBodiesByHashesRequest
+ 26, // 54: execution.Execution.IsCanonicalHash:input_type -> types.H256
+ 26, // 55: execution.Execution.GetHeaderHashNumber:input_type -> types.H256
+ 32, // 56: execution.Execution.GetForkChoice:input_type -> google.protobuf.Empty
+ 32, // 57: execution.Execution.Ready:input_type -> google.protobuf.Empty
+ 32, // 58: execution.Execution.FrozenBlocks:input_type -> google.protobuf.Empty
+ 14, // 59: execution.Execution.InsertBlocks:output_type -> execution.InsertionResult
+ 2, // 60: execution.Execution.ValidateChain:output_type -> execution.ValidationReceipt
+ 1, // 61: execution.Execution.UpdateForkChoice:output_type -> execution.ForkChoiceReceipt
+ 17, // 62: execution.Execution.AssembleBlock:output_type -> execution.AssembleBlockResponse
+ 20, // 63: execution.Execution.GetAssembledBlock:output_type -> execution.GetAssembledBlockResponse
+ 7, // 64: execution.Execution.CurrentHeader:output_type -> execution.GetHeaderResponse
+ 8, // 65: execution.Execution.GetTD:output_type -> execution.GetTDResponse
+ 7, // 66: execution.Execution.GetHeader:output_type -> execution.GetHeaderResponse
+ 9, // 67: execution.Execution.GetBody:output_type -> execution.GetBodyResponse
+ 21, // 68: execution.Execution.GetBodiesByRange:output_type -> execution.GetBodiesBatchResponse
+ 21, // 69: execution.Execution.GetBodiesByHashes:output_type -> execution.GetBodiesBatchResponse
+ 3, // 70: execution.Execution.IsCanonicalHash:output_type -> execution.IsCanonicalResponse
+ 10, // 71: execution.Execution.GetHeaderHashNumber:output_type -> execution.GetHeaderHashNumberResponse
+ 13, // 72: execution.Execution.GetForkChoice:output_type -> execution.ForkChoice
+ 24, // 73: execution.Execution.Ready:output_type -> execution.ReadyResponse
+ 25, // 74: execution.Execution.FrozenBlocks:output_type -> execution.FrozenBlocksResponse
+ 59, // [59:75] is the sub-list for method output_type
+ 43, // [43:59] is the sub-list for method input_type
+ 43, // [43:43] is the sub-list for extension type_name
+ 43, // [43:43] is the sub-list for extension extendee
+ 0, // [0:43] is the sub-list for field type_name
+}
+
+func init() { file_execution_execution_proto_init() }
+func file_execution_execution_proto_init() {
+ if File_execution_execution_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_execution_execution_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ForkChoiceReceipt); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_execution_execution_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ValidationReceipt); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_execution_execution_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*IsCanonicalResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_execution_execution_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Header); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_execution_execution_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BlockBody); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_execution_execution_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Block); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_execution_execution_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetHeaderResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_execution_execution_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetTDResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_execution_execution_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetBodyResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_execution_execution_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetHeaderHashNumberResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_execution_execution_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetSegmentRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_execution_execution_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*InsertBlocksRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_execution_execution_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ForkChoice); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_execution_execution_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*InsertionResult); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_execution_execution_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ValidationRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_execution_execution_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AssembleBlockRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_execution_execution_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AssembleBlockResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_execution_execution_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetAssembledBlockRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_execution_execution_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AssembledBlockData); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_execution_execution_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetAssembledBlockResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_execution_execution_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetBodiesBatchResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_execution_execution_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetBodiesByHashesRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_execution_execution_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetBodiesByRangeRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_execution_execution_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ReadyResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_execution_execution_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FrozenBlocksResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_execution_execution_proto_msgTypes[3].OneofWrappers = []interface{}{}
+ file_execution_execution_proto_msgTypes[6].OneofWrappers = []interface{}{}
+ file_execution_execution_proto_msgTypes[7].OneofWrappers = []interface{}{}
+ file_execution_execution_proto_msgTypes[8].OneofWrappers = []interface{}{}
+ file_execution_execution_proto_msgTypes[9].OneofWrappers = []interface{}{}
+ file_execution_execution_proto_msgTypes[10].OneofWrappers = []interface{}{}
+ file_execution_execution_proto_msgTypes[12].OneofWrappers = []interface{}{}
+ file_execution_execution_proto_msgTypes[15].OneofWrappers = []interface{}{}
+ file_execution_execution_proto_msgTypes[19].OneofWrappers = []interface{}{}
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_execution_execution_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 25,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_execution_execution_proto_goTypes,
+ DependencyIndexes: file_execution_execution_proto_depIdxs,
+ EnumInfos: file_execution_execution_proto_enumTypes,
+ MessageInfos: file_execution_execution_proto_msgTypes,
+ }.Build()
+ File_execution_execution_proto = out.File
+ file_execution_execution_proto_rawDesc = nil
+ file_execution_execution_proto_goTypes = nil
+ file_execution_execution_proto_depIdxs = nil
+}
diff --git a/erigon-lib/gointerfaces/execution/execution_grpc.pb.go b/erigon-lib/gointerfaces/execution/execution_grpc.pb.go
new file mode 100644
index 00000000000..065a305e8fc
--- /dev/null
+++ b/erigon-lib/gointerfaces/execution/execution_grpc.pb.go
@@ -0,0 +1,686 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.3.0
+// - protoc v4.24.2
+// source: execution/execution.proto
+
+package execution
+
+import (
+ context "context"
+ types "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+const (
+ Execution_InsertBlocks_FullMethodName = "/execution.Execution/InsertBlocks"
+ Execution_ValidateChain_FullMethodName = "/execution.Execution/ValidateChain"
+ Execution_UpdateForkChoice_FullMethodName = "/execution.Execution/UpdateForkChoice"
+ Execution_AssembleBlock_FullMethodName = "/execution.Execution/AssembleBlock"
+ Execution_GetAssembledBlock_FullMethodName = "/execution.Execution/GetAssembledBlock"
+ Execution_CurrentHeader_FullMethodName = "/execution.Execution/CurrentHeader"
+ Execution_GetTD_FullMethodName = "/execution.Execution/GetTD"
+ Execution_GetHeader_FullMethodName = "/execution.Execution/GetHeader"
+ Execution_GetBody_FullMethodName = "/execution.Execution/GetBody"
+ Execution_GetBodiesByRange_FullMethodName = "/execution.Execution/GetBodiesByRange"
+ Execution_GetBodiesByHashes_FullMethodName = "/execution.Execution/GetBodiesByHashes"
+ Execution_IsCanonicalHash_FullMethodName = "/execution.Execution/IsCanonicalHash"
+ Execution_GetHeaderHashNumber_FullMethodName = "/execution.Execution/GetHeaderHashNumber"
+ Execution_GetForkChoice_FullMethodName = "/execution.Execution/GetForkChoice"
+ Execution_Ready_FullMethodName = "/execution.Execution/Ready"
+ Execution_FrozenBlocks_FullMethodName = "/execution.Execution/FrozenBlocks"
+)
+
+// ExecutionClient is the client API for Execution service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type ExecutionClient interface {
+ // Chain Putters.
+ InsertBlocks(ctx context.Context, in *InsertBlocksRequest, opts ...grpc.CallOption) (*InsertionResult, error)
+ // Chain Validation and ForkChoice.
+ ValidateChain(ctx context.Context, in *ValidationRequest, opts ...grpc.CallOption) (*ValidationReceipt, error)
+ UpdateForkChoice(ctx context.Context, in *ForkChoice, opts ...grpc.CallOption) (*ForkChoiceReceipt, error)
+ // Block Assembly
+ // EAGAIN design here, AssembleBlock initiates the asynchronous request, and GetAssembleBlock just return it if ready.
+ AssembleBlock(ctx context.Context, in *AssembleBlockRequest, opts ...grpc.CallOption) (*AssembleBlockResponse, error)
+ GetAssembledBlock(ctx context.Context, in *GetAssembledBlockRequest, opts ...grpc.CallOption) (*GetAssembledBlockResponse, error)
+ // Chain Getters.
+ CurrentHeader(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetHeaderResponse, error)
+ GetTD(ctx context.Context, in *GetSegmentRequest, opts ...grpc.CallOption) (*GetTDResponse, error)
+ GetHeader(ctx context.Context, in *GetSegmentRequest, opts ...grpc.CallOption) (*GetHeaderResponse, error)
+ GetBody(ctx context.Context, in *GetSegmentRequest, opts ...grpc.CallOption) (*GetBodyResponse, error)
+ // Ranges
+ GetBodiesByRange(ctx context.Context, in *GetBodiesByRangeRequest, opts ...grpc.CallOption) (*GetBodiesBatchResponse, error)
+ GetBodiesByHashes(ctx context.Context, in *GetBodiesByHashesRequest, opts ...grpc.CallOption) (*GetBodiesBatchResponse, error)
+ // Chain checkers
+ IsCanonicalHash(ctx context.Context, in *types.H256, opts ...grpc.CallOption) (*IsCanonicalResponse, error)
+ GetHeaderHashNumber(ctx context.Context, in *types.H256, opts ...grpc.CallOption) (*GetHeaderHashNumberResponse, error)
+ GetForkChoice(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ForkChoice, error)
+ // Misc
+ // We want to figure out whether we processed snapshots and cleanup sync cycles.
+ Ready(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ReadyResponse, error)
+ // Frozen blocks are how many blocks are in snapshots .seg files.
+ FrozenBlocks(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*FrozenBlocksResponse, error)
+}
+
+type executionClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewExecutionClient(cc grpc.ClientConnInterface) ExecutionClient {
+ return &executionClient{cc}
+}
+
+func (c *executionClient) InsertBlocks(ctx context.Context, in *InsertBlocksRequest, opts ...grpc.CallOption) (*InsertionResult, error) {
+ out := new(InsertionResult)
+ err := c.cc.Invoke(ctx, Execution_InsertBlocks_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *executionClient) ValidateChain(ctx context.Context, in *ValidationRequest, opts ...grpc.CallOption) (*ValidationReceipt, error) {
+ out := new(ValidationReceipt)
+ err := c.cc.Invoke(ctx, Execution_ValidateChain_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *executionClient) UpdateForkChoice(ctx context.Context, in *ForkChoice, opts ...grpc.CallOption) (*ForkChoiceReceipt, error) {
+ out := new(ForkChoiceReceipt)
+ err := c.cc.Invoke(ctx, Execution_UpdateForkChoice_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *executionClient) AssembleBlock(ctx context.Context, in *AssembleBlockRequest, opts ...grpc.CallOption) (*AssembleBlockResponse, error) {
+ out := new(AssembleBlockResponse)
+ err := c.cc.Invoke(ctx, Execution_AssembleBlock_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *executionClient) GetAssembledBlock(ctx context.Context, in *GetAssembledBlockRequest, opts ...grpc.CallOption) (*GetAssembledBlockResponse, error) {
+ out := new(GetAssembledBlockResponse)
+ err := c.cc.Invoke(ctx, Execution_GetAssembledBlock_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *executionClient) CurrentHeader(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetHeaderResponse, error) {
+ out := new(GetHeaderResponse)
+ err := c.cc.Invoke(ctx, Execution_CurrentHeader_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *executionClient) GetTD(ctx context.Context, in *GetSegmentRequest, opts ...grpc.CallOption) (*GetTDResponse, error) {
+ out := new(GetTDResponse)
+ err := c.cc.Invoke(ctx, Execution_GetTD_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *executionClient) GetHeader(ctx context.Context, in *GetSegmentRequest, opts ...grpc.CallOption) (*GetHeaderResponse, error) {
+ out := new(GetHeaderResponse)
+ err := c.cc.Invoke(ctx, Execution_GetHeader_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *executionClient) GetBody(ctx context.Context, in *GetSegmentRequest, opts ...grpc.CallOption) (*GetBodyResponse, error) {
+ out := new(GetBodyResponse)
+ err := c.cc.Invoke(ctx, Execution_GetBody_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *executionClient) GetBodiesByRange(ctx context.Context, in *GetBodiesByRangeRequest, opts ...grpc.CallOption) (*GetBodiesBatchResponse, error) {
+ out := new(GetBodiesBatchResponse)
+ err := c.cc.Invoke(ctx, Execution_GetBodiesByRange_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *executionClient) GetBodiesByHashes(ctx context.Context, in *GetBodiesByHashesRequest, opts ...grpc.CallOption) (*GetBodiesBatchResponse, error) {
+ out := new(GetBodiesBatchResponse)
+ err := c.cc.Invoke(ctx, Execution_GetBodiesByHashes_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *executionClient) IsCanonicalHash(ctx context.Context, in *types.H256, opts ...grpc.CallOption) (*IsCanonicalResponse, error) {
+ out := new(IsCanonicalResponse)
+ err := c.cc.Invoke(ctx, Execution_IsCanonicalHash_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *executionClient) GetHeaderHashNumber(ctx context.Context, in *types.H256, opts ...grpc.CallOption) (*GetHeaderHashNumberResponse, error) {
+ out := new(GetHeaderHashNumberResponse)
+ err := c.cc.Invoke(ctx, Execution_GetHeaderHashNumber_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *executionClient) GetForkChoice(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ForkChoice, error) {
+ out := new(ForkChoice)
+ err := c.cc.Invoke(ctx, Execution_GetForkChoice_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *executionClient) Ready(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ReadyResponse, error) {
+ out := new(ReadyResponse)
+ err := c.cc.Invoke(ctx, Execution_Ready_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *executionClient) FrozenBlocks(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*FrozenBlocksResponse, error) {
+ out := new(FrozenBlocksResponse)
+ err := c.cc.Invoke(ctx, Execution_FrozenBlocks_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// ExecutionServer is the server API for Execution service.
+// All implementations must embed UnimplementedExecutionServer
+// for forward compatibility
+type ExecutionServer interface {
+ // Chain Putters.
+ InsertBlocks(context.Context, *InsertBlocksRequest) (*InsertionResult, error)
+ // Chain Validation and ForkChoice.
+ ValidateChain(context.Context, *ValidationRequest) (*ValidationReceipt, error)
+ UpdateForkChoice(context.Context, *ForkChoice) (*ForkChoiceReceipt, error)
+ // Block Assembly
+ // EAGAIN design here, AssembleBlock initiates the asynchronous request, and GetAssembleBlock just return it if ready.
+ AssembleBlock(context.Context, *AssembleBlockRequest) (*AssembleBlockResponse, error)
+ GetAssembledBlock(context.Context, *GetAssembledBlockRequest) (*GetAssembledBlockResponse, error)
+ // Chain Getters.
+ CurrentHeader(context.Context, *emptypb.Empty) (*GetHeaderResponse, error)
+ GetTD(context.Context, *GetSegmentRequest) (*GetTDResponse, error)
+ GetHeader(context.Context, *GetSegmentRequest) (*GetHeaderResponse, error)
+ GetBody(context.Context, *GetSegmentRequest) (*GetBodyResponse, error)
+ // Ranges
+ GetBodiesByRange(context.Context, *GetBodiesByRangeRequest) (*GetBodiesBatchResponse, error)
+ GetBodiesByHashes(context.Context, *GetBodiesByHashesRequest) (*GetBodiesBatchResponse, error)
+ // Chain checkers
+ IsCanonicalHash(context.Context, *types.H256) (*IsCanonicalResponse, error)
+ GetHeaderHashNumber(context.Context, *types.H256) (*GetHeaderHashNumberResponse, error)
+ GetForkChoice(context.Context, *emptypb.Empty) (*ForkChoice, error)
+ // Misc
+ // We want to figure out whether we processed snapshots and cleanup sync cycles.
+ Ready(context.Context, *emptypb.Empty) (*ReadyResponse, error)
+ // Frozen blocks are how many blocks are in snapshots .seg files.
+ FrozenBlocks(context.Context, *emptypb.Empty) (*FrozenBlocksResponse, error)
+ mustEmbedUnimplementedExecutionServer()
+}
+
+// UnimplementedExecutionServer must be embedded to have forward compatible implementations.
+type UnimplementedExecutionServer struct {
+}
+
+func (UnimplementedExecutionServer) InsertBlocks(context.Context, *InsertBlocksRequest) (*InsertionResult, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method InsertBlocks not implemented")
+}
+func (UnimplementedExecutionServer) ValidateChain(context.Context, *ValidationRequest) (*ValidationReceipt, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ValidateChain not implemented")
+}
+func (UnimplementedExecutionServer) UpdateForkChoice(context.Context, *ForkChoice) (*ForkChoiceReceipt, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateForkChoice not implemented")
+}
+func (UnimplementedExecutionServer) AssembleBlock(context.Context, *AssembleBlockRequest) (*AssembleBlockResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AssembleBlock not implemented")
+}
+func (UnimplementedExecutionServer) GetAssembledBlock(context.Context, *GetAssembledBlockRequest) (*GetAssembledBlockResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetAssembledBlock not implemented")
+}
+func (UnimplementedExecutionServer) CurrentHeader(context.Context, *emptypb.Empty) (*GetHeaderResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CurrentHeader not implemented")
+}
+func (UnimplementedExecutionServer) GetTD(context.Context, *GetSegmentRequest) (*GetTDResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetTD not implemented")
+}
+func (UnimplementedExecutionServer) GetHeader(context.Context, *GetSegmentRequest) (*GetHeaderResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetHeader not implemented")
+}
+func (UnimplementedExecutionServer) GetBody(context.Context, *GetSegmentRequest) (*GetBodyResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetBody not implemented")
+}
+func (UnimplementedExecutionServer) GetBodiesByRange(context.Context, *GetBodiesByRangeRequest) (*GetBodiesBatchResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetBodiesByRange not implemented")
+}
+func (UnimplementedExecutionServer) GetBodiesByHashes(context.Context, *GetBodiesByHashesRequest) (*GetBodiesBatchResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetBodiesByHashes not implemented")
+}
+func (UnimplementedExecutionServer) IsCanonicalHash(context.Context, *types.H256) (*IsCanonicalResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method IsCanonicalHash not implemented")
+}
+func (UnimplementedExecutionServer) GetHeaderHashNumber(context.Context, *types.H256) (*GetHeaderHashNumberResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetHeaderHashNumber not implemented")
+}
+func (UnimplementedExecutionServer) GetForkChoice(context.Context, *emptypb.Empty) (*ForkChoice, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetForkChoice not implemented")
+}
+func (UnimplementedExecutionServer) Ready(context.Context, *emptypb.Empty) (*ReadyResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Ready not implemented")
+}
+func (UnimplementedExecutionServer) FrozenBlocks(context.Context, *emptypb.Empty) (*FrozenBlocksResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method FrozenBlocks not implemented")
+}
+func (UnimplementedExecutionServer) mustEmbedUnimplementedExecutionServer() {}
+
+// UnsafeExecutionServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to ExecutionServer will
+// result in compilation errors.
+type UnsafeExecutionServer interface {
+ mustEmbedUnimplementedExecutionServer()
+}
+
+func RegisterExecutionServer(s grpc.ServiceRegistrar, srv ExecutionServer) {
+ s.RegisterService(&Execution_ServiceDesc, srv)
+}
+
+func _Execution_InsertBlocks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(InsertBlocksRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ExecutionServer).InsertBlocks(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Execution_InsertBlocks_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ExecutionServer).InsertBlocks(ctx, req.(*InsertBlocksRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Execution_ValidateChain_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ValidationRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ExecutionServer).ValidateChain(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Execution_ValidateChain_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ExecutionServer).ValidateChain(ctx, req.(*ValidationRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Execution_UpdateForkChoice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ForkChoice)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ExecutionServer).UpdateForkChoice(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Execution_UpdateForkChoice_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ExecutionServer).UpdateForkChoice(ctx, req.(*ForkChoice))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Execution_AssembleBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AssembleBlockRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ExecutionServer).AssembleBlock(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Execution_AssembleBlock_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ExecutionServer).AssembleBlock(ctx, req.(*AssembleBlockRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Execution_GetAssembledBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetAssembledBlockRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ExecutionServer).GetAssembledBlock(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Execution_GetAssembledBlock_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ExecutionServer).GetAssembledBlock(ctx, req.(*GetAssembledBlockRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Execution_CurrentHeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(emptypb.Empty)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ExecutionServer).CurrentHeader(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Execution_CurrentHeader_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ExecutionServer).CurrentHeader(ctx, req.(*emptypb.Empty))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Execution_GetTD_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetSegmentRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ExecutionServer).GetTD(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Execution_GetTD_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ExecutionServer).GetTD(ctx, req.(*GetSegmentRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Execution_GetHeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetSegmentRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ExecutionServer).GetHeader(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Execution_GetHeader_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ExecutionServer).GetHeader(ctx, req.(*GetSegmentRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Execution_GetBody_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetSegmentRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ExecutionServer).GetBody(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Execution_GetBody_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ExecutionServer).GetBody(ctx, req.(*GetSegmentRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Execution_GetBodiesByRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetBodiesByRangeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ExecutionServer).GetBodiesByRange(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Execution_GetBodiesByRange_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ExecutionServer).GetBodiesByRange(ctx, req.(*GetBodiesByRangeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Execution_GetBodiesByHashes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetBodiesByHashesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ExecutionServer).GetBodiesByHashes(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Execution_GetBodiesByHashes_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ExecutionServer).GetBodiesByHashes(ctx, req.(*GetBodiesByHashesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Execution_IsCanonicalHash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(types.H256)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ExecutionServer).IsCanonicalHash(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Execution_IsCanonicalHash_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ExecutionServer).IsCanonicalHash(ctx, req.(*types.H256))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Execution_GetHeaderHashNumber_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(types.H256)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ExecutionServer).GetHeaderHashNumber(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Execution_GetHeaderHashNumber_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ExecutionServer).GetHeaderHashNumber(ctx, req.(*types.H256))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Execution_GetForkChoice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(emptypb.Empty)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ExecutionServer).GetForkChoice(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Execution_GetForkChoice_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ExecutionServer).GetForkChoice(ctx, req.(*emptypb.Empty))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Execution_Ready_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(emptypb.Empty)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ExecutionServer).Ready(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Execution_Ready_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ExecutionServer).Ready(ctx, req.(*emptypb.Empty))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Execution_FrozenBlocks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(emptypb.Empty)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ExecutionServer).FrozenBlocks(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Execution_FrozenBlocks_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ExecutionServer).FrozenBlocks(ctx, req.(*emptypb.Empty))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// Execution_ServiceDesc is the grpc.ServiceDesc for Execution service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var Execution_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "execution.Execution",
+ HandlerType: (*ExecutionServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "InsertBlocks",
+ Handler: _Execution_InsertBlocks_Handler,
+ },
+ {
+ MethodName: "ValidateChain",
+ Handler: _Execution_ValidateChain_Handler,
+ },
+ {
+ MethodName: "UpdateForkChoice",
+ Handler: _Execution_UpdateForkChoice_Handler,
+ },
+ {
+ MethodName: "AssembleBlock",
+ Handler: _Execution_AssembleBlock_Handler,
+ },
+ {
+ MethodName: "GetAssembledBlock",
+ Handler: _Execution_GetAssembledBlock_Handler,
+ },
+ {
+ MethodName: "CurrentHeader",
+ Handler: _Execution_CurrentHeader_Handler,
+ },
+ {
+ MethodName: "GetTD",
+ Handler: _Execution_GetTD_Handler,
+ },
+ {
+ MethodName: "GetHeader",
+ Handler: _Execution_GetHeader_Handler,
+ },
+ {
+ MethodName: "GetBody",
+ Handler: _Execution_GetBody_Handler,
+ },
+ {
+ MethodName: "GetBodiesByRange",
+ Handler: _Execution_GetBodiesByRange_Handler,
+ },
+ {
+ MethodName: "GetBodiesByHashes",
+ Handler: _Execution_GetBodiesByHashes_Handler,
+ },
+ {
+ MethodName: "IsCanonicalHash",
+ Handler: _Execution_IsCanonicalHash_Handler,
+ },
+ {
+ MethodName: "GetHeaderHashNumber",
+ Handler: _Execution_GetHeaderHashNumber_Handler,
+ },
+ {
+ MethodName: "GetForkChoice",
+ Handler: _Execution_GetForkChoice_Handler,
+ },
+ {
+ MethodName: "Ready",
+ Handler: _Execution_Ready_Handler,
+ },
+ {
+ MethodName: "FrozenBlocks",
+ Handler: _Execution_FrozenBlocks_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "execution/execution.proto",
+}
diff --git a/erigon-lib/gointerfaces/grpcutil/utils.go b/erigon-lib/gointerfaces/grpcutil/utils.go
new file mode 100644
index 00000000000..a0f7c2883da
--- /dev/null
+++ b/erigon-lib/gointerfaces/grpcutil/utils.go
@@ -0,0 +1,148 @@
+package grpcutil
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/c2h5oh/datasize"
+ grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
+ grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/backoff"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/reflection"
+ "google.golang.org/grpc/status"
+)
+
+func TLS(tlsCACert, tlsCertFile, tlsKeyFile string) (credentials.TransportCredentials, error) {
+ // load peer cert/key, ca cert
+ if tlsCACert == "" {
+ if tlsCertFile == "" && tlsKeyFile == "" {
+ return nil, nil
+ }
+ return credentials.NewServerTLSFromFile(tlsCertFile, tlsKeyFile)
+ }
+ var caCert []byte
+ peerCert, err := tls.LoadX509KeyPair(tlsCertFile, tlsKeyFile)
+ if err != nil {
+ return nil, fmt.Errorf("load peer cert/key error:%w", err)
+ }
+ caCert, err = os.ReadFile(tlsCACert)
+ if err != nil {
+ return nil, fmt.Errorf("read ca cert file error:%w", err)
+ }
+ caCertPool := x509.NewCertPool()
+ caCertPool.AppendCertsFromPEM(caCert)
+ return credentials.NewTLS(&tls.Config{
+ Certificates: []tls.Certificate{peerCert},
+ ClientCAs: caCertPool,
+ ClientAuth: tls.RequireAndVerifyClientCert,
+ MinVersion: tls.VersionTLS12,
+ //nolint:gosec
+ InsecureSkipVerify: true, // This is to make it work when Common Name does not match - remove when procedure is updated for common name
+ }), nil
+}
+
+func NewServer(rateLimit uint32, creds credentials.TransportCredentials) *grpc.Server {
+ var (
+ streamInterceptors []grpc.StreamServerInterceptor
+ unaryInterceptors []grpc.UnaryServerInterceptor
+ )
+ streamInterceptors = append(streamInterceptors, grpc_recovery.StreamServerInterceptor())
+ unaryInterceptors = append(unaryInterceptors, grpc_recovery.UnaryServerInterceptor())
+
+ //if metrics.Enabled {
+ // streamInterceptors = append(streamInterceptors, grpc_prometheus.StreamServerInterceptor)
+ // unaryInterceptors = append(unaryInterceptors, grpc_prometheus.UnaryServerInterceptor)
+ //}
+
+ //cpus := uint32(runtime.GOMAXPROCS(-1))
+ opts := []grpc.ServerOption{
+ //grpc.NumStreamWorkers(cpus), // reduce amount of goroutines
+ grpc.MaxConcurrentStreams(rateLimit), // to force clients reduce concurrency level
+ // Don't drop the connection, settings accordign to this comment on GitHub
+ // https://github.com/grpc/grpc-go/issues/3171#issuecomment-552796779
+ grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
+ MinTime: 10 * time.Second,
+ PermitWithoutStream: true,
+ }),
+ grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(streamInterceptors...)),
+ grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unaryInterceptors...)),
+ grpc.Creds(creds),
+ }
+ grpcServer := grpc.NewServer(opts...)
+ reflection.Register(grpcServer)
+
+ //if metrics.Enabled {
+ // grpc_prometheus.Register(grpcServer)
+ //}
+
+ return grpcServer
+}
+
+func Connect(creds credentials.TransportCredentials, dialAddress string) (*grpc.ClientConn, error) {
+ var dialOpts []grpc.DialOption
+
+ backoffCfg := backoff.DefaultConfig
+ backoffCfg.BaseDelay = 500 * time.Millisecond
+ backoffCfg.MaxDelay = 10 * time.Second
+ dialOpts = []grpc.DialOption{
+ grpc.WithConnectParams(grpc.ConnectParams{Backoff: backoffCfg, MinConnectTimeout: 10 * time.Minute}),
+ grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(200 * datasize.MB))),
+ grpc.WithKeepaliveParams(keepalive.ClientParameters{}),
+ }
+ if creds == nil {
+ dialOpts = append(dialOpts, grpc.WithInsecure())
+ } else {
+ dialOpts = append(dialOpts, grpc.WithTransportCredentials(creds))
+ }
+
+ //if opts.inMemConn != nil {
+ // dialOpts = append(dialOpts, grpc.WithContextDialer(func(ctx context.Context, url string) (net.Conn, error) {
+ // return opts.inMemConn.Dial()
+ // }))
+ //}
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ return grpc.DialContext(ctx, dialAddress, dialOpts...)
+}
+
+func IsRetryLater(err error) bool {
+ if s, ok := status.FromError(err); ok {
+ code := s.Code()
+ return code == codes.Unavailable || code == codes.Canceled || code == codes.ResourceExhausted
+ }
+ return false
+}
+
+func IsEndOfStream(err error) bool {
+ if errors.Is(err, io.EOF) || errors.Is(err, context.Canceled) {
+ return true
+ }
+ if s, ok := status.FromError(err); ok {
+ return s.Code() == codes.Canceled || strings.Contains(s.Message(), context.Canceled.Error())
+ }
+ return false
+}
+
+// ErrIs - like `errors.Is` but for grpc errors
+func ErrIs(err, target error) bool {
+ if errors.Is(err, target) { // direct clients do return Go-style errors
+ return true
+ }
+ if s, ok := status.FromError(err); ok { // remote clients do return GRPC-style errors
+ return strings.Contains(s.Message(), target.Error())
+ }
+ return false
+}
diff --git a/erigon-lib/gointerfaces/remote/ethbackend.pb.go b/erigon-lib/gointerfaces/remote/ethbackend.pb.go
new file mode 100644
index 00000000000..118a3f7637d
--- /dev/null
+++ b/erigon-lib/gointerfaces/remote/ethbackend.pb.go
@@ -0,0 +1,2150 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.31.0
+// protoc v4.24.2
+// source: remote/ethbackend.proto
+
+package remote
+
+import (
+ types "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Event int32
+
+const (
+ Event_HEADER Event = 0
+ Event_PENDING_LOGS Event = 1
+ Event_PENDING_BLOCK Event = 2
+ // NEW_SNAPSHOT - one or many new snapshots (of snapshot sync) were created,
+ // client need to close old file descriptors and open new (on new segments),
+ // then server can remove old files
+ Event_NEW_SNAPSHOT Event = 3
+)
+
+// Enum value maps for Event.
+var (
+ Event_name = map[int32]string{
+ 0: "HEADER",
+ 1: "PENDING_LOGS",
+ 2: "PENDING_BLOCK",
+ 3: "NEW_SNAPSHOT",
+ }
+ Event_value = map[string]int32{
+ "HEADER": 0,
+ "PENDING_LOGS": 1,
+ "PENDING_BLOCK": 2,
+ "NEW_SNAPSHOT": 3,
+ }
+)
+
+func (x Event) Enum() *Event {
+ p := new(Event)
+ *p = x
+ return p
+}
+
+func (x Event) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Event) Descriptor() protoreflect.EnumDescriptor {
+ return file_remote_ethbackend_proto_enumTypes[0].Descriptor()
+}
+
+func (Event) Type() protoreflect.EnumType {
+ return &file_remote_ethbackend_proto_enumTypes[0]
+}
+
+func (x Event) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Event.Descriptor instead.
+func (Event) EnumDescriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{0}
+}
+
+type EtherbaseRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *EtherbaseRequest) Reset() {
+ *x = EtherbaseRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EtherbaseRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EtherbaseRequest) ProtoMessage() {}
+
+func (x *EtherbaseRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EtherbaseRequest.ProtoReflect.Descriptor instead.
+func (*EtherbaseRequest) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{0}
+}
+
+type EtherbaseReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Address *types.H160 `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
+}
+
+func (x *EtherbaseReply) Reset() {
+ *x = EtherbaseReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EtherbaseReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EtherbaseReply) ProtoMessage() {}
+
+func (x *EtherbaseReply) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EtherbaseReply.ProtoReflect.Descriptor instead.
+func (*EtherbaseReply) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *EtherbaseReply) GetAddress() *types.H160 {
+ if x != nil {
+ return x.Address
+ }
+ return nil
+}
+
+type NetVersionRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *NetVersionRequest) Reset() {
+ *x = NetVersionRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NetVersionRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NetVersionRequest) ProtoMessage() {}
+
+func (x *NetVersionRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NetVersionRequest.ProtoReflect.Descriptor instead.
+func (*NetVersionRequest) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{2}
+}
+
+type NetVersionReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+}
+
+func (x *NetVersionReply) Reset() {
+ *x = NetVersionReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NetVersionReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NetVersionReply) ProtoMessage() {}
+
+func (x *NetVersionReply) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NetVersionReply.ProtoReflect.Descriptor instead.
+func (*NetVersionReply) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *NetVersionReply) GetId() uint64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+type NetPeerCountRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *NetPeerCountRequest) Reset() {
+ *x = NetPeerCountRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NetPeerCountRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NetPeerCountRequest) ProtoMessage() {}
+
+func (x *NetPeerCountRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NetPeerCountRequest.ProtoReflect.Descriptor instead.
+func (*NetPeerCountRequest) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{4}
+}
+
+type NetPeerCountReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Count uint64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
+}
+
+func (x *NetPeerCountReply) Reset() {
+ *x = NetPeerCountReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NetPeerCountReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NetPeerCountReply) ProtoMessage() {}
+
+func (x *NetPeerCountReply) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NetPeerCountReply.ProtoReflect.Descriptor instead.
+func (*NetPeerCountReply) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *NetPeerCountReply) GetCount() uint64 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+
+type ProtocolVersionRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *ProtocolVersionRequest) Reset() {
+ *x = ProtocolVersionRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ProtocolVersionRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ProtocolVersionRequest) ProtoMessage() {}
+
+func (x *ProtocolVersionRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ProtocolVersionRequest.ProtoReflect.Descriptor instead.
+func (*ProtocolVersionRequest) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{6}
+}
+
+type ProtocolVersionReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+}
+
+func (x *ProtocolVersionReply) Reset() {
+ *x = ProtocolVersionReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ProtocolVersionReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ProtocolVersionReply) ProtoMessage() {}
+
+func (x *ProtocolVersionReply) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ProtocolVersionReply.ProtoReflect.Descriptor instead.
+func (*ProtocolVersionReply) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *ProtocolVersionReply) GetId() uint64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+type ClientVersionRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *ClientVersionRequest) Reset() {
+ *x = ClientVersionRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ClientVersionRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ClientVersionRequest) ProtoMessage() {}
+
+func (x *ClientVersionRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ClientVersionRequest.ProtoReflect.Descriptor instead.
+func (*ClientVersionRequest) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{8}
+}
+
+type ClientVersionReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ NodeName string `protobuf:"bytes,1,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
+}
+
+func (x *ClientVersionReply) Reset() {
+ *x = ClientVersionReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ClientVersionReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ClientVersionReply) ProtoMessage() {}
+
+func (x *ClientVersionReply) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ClientVersionReply.ProtoReflect.Descriptor instead.
+func (*ClientVersionReply) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *ClientVersionReply) GetNodeName() string {
+ if x != nil {
+ return x.NodeName
+ }
+ return ""
+}
+
+type SubscribeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type Event `protobuf:"varint,1,opt,name=type,proto3,enum=remote.Event" json:"type,omitempty"`
+}
+
+func (x *SubscribeRequest) Reset() {
+ *x = SubscribeRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SubscribeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SubscribeRequest) ProtoMessage() {}
+
+func (x *SubscribeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SubscribeRequest.ProtoReflect.Descriptor instead.
+func (*SubscribeRequest) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *SubscribeRequest) GetType() Event {
+ if x != nil {
+ return x.Type
+ }
+ return Event_HEADER
+}
+
+type SubscribeReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type Event `protobuf:"varint,1,opt,name=type,proto3,enum=remote.Event" json:"type,omitempty"`
+ Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` // serialized data
+}
+
+func (x *SubscribeReply) Reset() {
+ *x = SubscribeReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SubscribeReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SubscribeReply) ProtoMessage() {}
+
+func (x *SubscribeReply) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SubscribeReply.ProtoReflect.Descriptor instead.
+func (*SubscribeReply) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *SubscribeReply) GetType() Event {
+ if x != nil {
+ return x.Type
+ }
+ return Event_HEADER
+}
+
+func (x *SubscribeReply) GetData() []byte {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+type LogsFilterRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ AllAddresses bool `protobuf:"varint,1,opt,name=all_addresses,json=allAddresses,proto3" json:"all_addresses,omitempty"`
+ Addresses []*types.H160 `protobuf:"bytes,2,rep,name=addresses,proto3" json:"addresses,omitempty"`
+ AllTopics bool `protobuf:"varint,3,opt,name=all_topics,json=allTopics,proto3" json:"all_topics,omitempty"`
+ Topics []*types.H256 `protobuf:"bytes,4,rep,name=topics,proto3" json:"topics,omitempty"`
+}
+
+func (x *LogsFilterRequest) Reset() {
+ *x = LogsFilterRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LogsFilterRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LogsFilterRequest) ProtoMessage() {}
+
+func (x *LogsFilterRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LogsFilterRequest.ProtoReflect.Descriptor instead.
+func (*LogsFilterRequest) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *LogsFilterRequest) GetAllAddresses() bool {
+ if x != nil {
+ return x.AllAddresses
+ }
+ return false
+}
+
+func (x *LogsFilterRequest) GetAddresses() []*types.H160 {
+ if x != nil {
+ return x.Addresses
+ }
+ return nil
+}
+
+func (x *LogsFilterRequest) GetAllTopics() bool {
+ if x != nil {
+ return x.AllTopics
+ }
+ return false
+}
+
+func (x *LogsFilterRequest) GetTopics() []*types.H256 {
+ if x != nil {
+ return x.Topics
+ }
+ return nil
+}
+
+type SubscribeLogsReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Address *types.H160 `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
+ BlockHash *types.H256 `protobuf:"bytes,2,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"`
+ BlockNumber uint64 `protobuf:"varint,3,opt,name=block_number,json=blockNumber,proto3" json:"block_number,omitempty"`
+ Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
+ LogIndex uint64 `protobuf:"varint,5,opt,name=log_index,json=logIndex,proto3" json:"log_index,omitempty"`
+ Topics []*types.H256 `protobuf:"bytes,6,rep,name=topics,proto3" json:"topics,omitempty"`
+ TransactionHash *types.H256 `protobuf:"bytes,7,opt,name=transaction_hash,json=transactionHash,proto3" json:"transaction_hash,omitempty"`
+ TransactionIndex uint64 `protobuf:"varint,8,opt,name=transaction_index,json=transactionIndex,proto3" json:"transaction_index,omitempty"`
+ Removed bool `protobuf:"varint,9,opt,name=removed,proto3" json:"removed,omitempty"`
+}
+
+func (x *SubscribeLogsReply) Reset() {
+ *x = SubscribeLogsReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SubscribeLogsReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SubscribeLogsReply) ProtoMessage() {}
+
+func (x *SubscribeLogsReply) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SubscribeLogsReply.ProtoReflect.Descriptor instead.
+func (*SubscribeLogsReply) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *SubscribeLogsReply) GetAddress() *types.H160 {
+ if x != nil {
+ return x.Address
+ }
+ return nil
+}
+
+func (x *SubscribeLogsReply) GetBlockHash() *types.H256 {
+ if x != nil {
+ return x.BlockHash
+ }
+ return nil
+}
+
+func (x *SubscribeLogsReply) GetBlockNumber() uint64 {
+ if x != nil {
+ return x.BlockNumber
+ }
+ return 0
+}
+
+func (x *SubscribeLogsReply) GetData() []byte {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+func (x *SubscribeLogsReply) GetLogIndex() uint64 {
+ if x != nil {
+ return x.LogIndex
+ }
+ return 0
+}
+
+func (x *SubscribeLogsReply) GetTopics() []*types.H256 {
+ if x != nil {
+ return x.Topics
+ }
+ return nil
+}
+
+func (x *SubscribeLogsReply) GetTransactionHash() *types.H256 {
+ if x != nil {
+ return x.TransactionHash
+ }
+ return nil
+}
+
+func (x *SubscribeLogsReply) GetTransactionIndex() uint64 {
+ if x != nil {
+ return x.TransactionIndex
+ }
+ return 0
+}
+
+func (x *SubscribeLogsReply) GetRemoved() bool {
+ if x != nil {
+ return x.Removed
+ }
+ return false
+}
+
+type BlockRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ BlockHeight uint64 `protobuf:"varint,2,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"`
+ BlockHash *types.H256 `protobuf:"bytes,3,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"`
+}
+
+func (x *BlockRequest) Reset() {
+ *x = BlockRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BlockRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BlockRequest) ProtoMessage() {}
+
+func (x *BlockRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BlockRequest.ProtoReflect.Descriptor instead.
+func (*BlockRequest) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *BlockRequest) GetBlockHeight() uint64 {
+ if x != nil {
+ return x.BlockHeight
+ }
+ return 0
+}
+
+func (x *BlockRequest) GetBlockHash() *types.H256 {
+ if x != nil {
+ return x.BlockHash
+ }
+ return nil
+}
+
+type BlockReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ BlockRlp []byte `protobuf:"bytes,1,opt,name=block_rlp,json=blockRlp,proto3" json:"block_rlp,omitempty"`
+ Senders []byte `protobuf:"bytes,2,opt,name=senders,proto3" json:"senders,omitempty"`
+}
+
+func (x *BlockReply) Reset() {
+ *x = BlockReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BlockReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BlockReply) ProtoMessage() {}
+
+func (x *BlockReply) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BlockReply.ProtoReflect.Descriptor instead.
+func (*BlockReply) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *BlockReply) GetBlockRlp() []byte {
+ if x != nil {
+ return x.BlockRlp
+ }
+ return nil
+}
+
+func (x *BlockReply) GetSenders() []byte {
+ if x != nil {
+ return x.Senders
+ }
+ return nil
+}
+
+type TxnLookupRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ TxnHash *types.H256 `protobuf:"bytes,1,opt,name=txn_hash,json=txnHash,proto3" json:"txn_hash,omitempty"`
+}
+
+func (x *TxnLookupRequest) Reset() {
+ *x = TxnLookupRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TxnLookupRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TxnLookupRequest) ProtoMessage() {}
+
+func (x *TxnLookupRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TxnLookupRequest.ProtoReflect.Descriptor instead.
+func (*TxnLookupRequest) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{16}
+}
+
+func (x *TxnLookupRequest) GetTxnHash() *types.H256 {
+ if x != nil {
+ return x.TxnHash
+ }
+ return nil
+}
+
+type TxnLookupReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ BlockNumber uint64 `protobuf:"varint,1,opt,name=block_number,json=blockNumber,proto3" json:"block_number,omitempty"`
+}
+
+func (x *TxnLookupReply) Reset() {
+ *x = TxnLookupReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TxnLookupReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TxnLookupReply) ProtoMessage() {}
+
+func (x *TxnLookupReply) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TxnLookupReply.ProtoReflect.Descriptor instead.
+func (*TxnLookupReply) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{17}
+}
+
+func (x *TxnLookupReply) GetBlockNumber() uint64 {
+ if x != nil {
+ return x.BlockNumber
+ }
+ return 0
+}
+
+type NodesInfoRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Limit uint32 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"`
+}
+
+func (x *NodesInfoRequest) Reset() {
+ *x = NodesInfoRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NodesInfoRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NodesInfoRequest) ProtoMessage() {}
+
+func (x *NodesInfoRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NodesInfoRequest.ProtoReflect.Descriptor instead.
+func (*NodesInfoRequest) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{18}
+}
+
+func (x *NodesInfoRequest) GetLimit() uint32 {
+ if x != nil {
+ return x.Limit
+ }
+ return 0
+}
+
+type AddPeerRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"`
+}
+
+func (x *AddPeerRequest) Reset() {
+ *x = AddPeerRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddPeerRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddPeerRequest) ProtoMessage() {}
+
+func (x *AddPeerRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddPeerRequest.ProtoReflect.Descriptor instead.
+func (*AddPeerRequest) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{19}
+}
+
+func (x *AddPeerRequest) GetUrl() string {
+ if x != nil {
+ return x.Url
+ }
+ return ""
+}
+
+type NodesInfoReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ NodesInfo []*types.NodeInfoReply `protobuf:"bytes,1,rep,name=nodes_info,json=nodesInfo,proto3" json:"nodes_info,omitempty"`
+}
+
+func (x *NodesInfoReply) Reset() {
+ *x = NodesInfoReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NodesInfoReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NodesInfoReply) ProtoMessage() {}
+
+func (x *NodesInfoReply) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NodesInfoReply.ProtoReflect.Descriptor instead.
+func (*NodesInfoReply) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{20}
+}
+
+func (x *NodesInfoReply) GetNodesInfo() []*types.NodeInfoReply {
+ if x != nil {
+ return x.NodesInfo
+ }
+ return nil
+}
+
+type PeersReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Peers []*types.PeerInfo `protobuf:"bytes,1,rep,name=peers,proto3" json:"peers,omitempty"`
+}
+
+func (x *PeersReply) Reset() {
+ *x = PeersReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PeersReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PeersReply) ProtoMessage() {}
+
+func (x *PeersReply) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[21]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PeersReply.ProtoReflect.Descriptor instead.
+func (*PeersReply) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{21}
+}
+
+func (x *PeersReply) GetPeers() []*types.PeerInfo {
+ if x != nil {
+ return x.Peers
+ }
+ return nil
+}
+
+type AddPeerReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"`
+}
+
+func (x *AddPeerReply) Reset() {
+ *x = AddPeerReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddPeerReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddPeerReply) ProtoMessage() {}
+
+func (x *AddPeerReply) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[22]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddPeerReply.ProtoReflect.Descriptor instead.
+func (*AddPeerReply) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{22}
+}
+
+func (x *AddPeerReply) GetSuccess() bool {
+ if x != nil {
+ return x.Success
+ }
+ return false
+}
+
+type PendingBlockReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ BlockRlp []byte `protobuf:"bytes,1,opt,name=block_rlp,json=blockRlp,proto3" json:"block_rlp,omitempty"`
+}
+
+func (x *PendingBlockReply) Reset() {
+ *x = PendingBlockReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PendingBlockReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PendingBlockReply) ProtoMessage() {}
+
+func (x *PendingBlockReply) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[23]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PendingBlockReply.ProtoReflect.Descriptor instead.
+func (*PendingBlockReply) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{23}
+}
+
+func (x *PendingBlockReply) GetBlockRlp() []byte {
+ if x != nil {
+ return x.BlockRlp
+ }
+ return nil
+}
+
+type EngineGetPayloadBodiesByHashV1Request struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Hashes []*types.H256 `protobuf:"bytes,1,rep,name=hashes,proto3" json:"hashes,omitempty"`
+}
+
+func (x *EngineGetPayloadBodiesByHashV1Request) Reset() {
+ *x = EngineGetPayloadBodiesByHashV1Request{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EngineGetPayloadBodiesByHashV1Request) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EngineGetPayloadBodiesByHashV1Request) ProtoMessage() {}
+
+func (x *EngineGetPayloadBodiesByHashV1Request) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[24]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EngineGetPayloadBodiesByHashV1Request.ProtoReflect.Descriptor instead.
+func (*EngineGetPayloadBodiesByHashV1Request) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{24}
+}
+
+func (x *EngineGetPayloadBodiesByHashV1Request) GetHashes() []*types.H256 {
+ if x != nil {
+ return x.Hashes
+ }
+ return nil
+}
+
+type EngineGetPayloadBodiesByRangeV1Request struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Start uint64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"`
+ Count uint64 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"`
+}
+
+func (x *EngineGetPayloadBodiesByRangeV1Request) Reset() {
+ *x = EngineGetPayloadBodiesByRangeV1Request{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EngineGetPayloadBodiesByRangeV1Request) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EngineGetPayloadBodiesByRangeV1Request) ProtoMessage() {}
+
+func (x *EngineGetPayloadBodiesByRangeV1Request) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[25]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EngineGetPayloadBodiesByRangeV1Request.ProtoReflect.Descriptor instead.
+func (*EngineGetPayloadBodiesByRangeV1Request) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{25}
+}
+
+func (x *EngineGetPayloadBodiesByRangeV1Request) GetStart() uint64 {
+ if x != nil {
+ return x.Start
+ }
+ return 0
+}
+
+func (x *EngineGetPayloadBodiesByRangeV1Request) GetCount() uint64 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+
+type BorEventRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ BorTxHash *types.H256 `protobuf:"bytes,1,opt,name=bor_tx_hash,json=borTxHash,proto3" json:"bor_tx_hash,omitempty"`
+}
+
+func (x *BorEventRequest) Reset() {
+ *x = BorEventRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BorEventRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BorEventRequest) ProtoMessage() {}
+
+func (x *BorEventRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[26]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BorEventRequest.ProtoReflect.Descriptor instead.
+func (*BorEventRequest) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{26}
+}
+
+func (x *BorEventRequest) GetBorTxHash() *types.H256 {
+ if x != nil {
+ return x.BorTxHash
+ }
+ return nil
+}
+
+type BorEventReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Present bool `protobuf:"varint,1,opt,name=present,proto3" json:"present,omitempty"`
+ BlockNumber uint64 `protobuf:"varint,2,opt,name=block_number,json=blockNumber,proto3" json:"block_number,omitempty"`
+ EventRlps [][]byte `protobuf:"bytes,3,rep,name=event_rlps,json=eventRlps,proto3" json:"event_rlps,omitempty"`
+}
+
+func (x *BorEventReply) Reset() {
+ *x = BorEventReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_ethbackend_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BorEventReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BorEventReply) ProtoMessage() {}
+
+func (x *BorEventReply) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_ethbackend_proto_msgTypes[27]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BorEventReply.ProtoReflect.Descriptor instead.
+func (*BorEventReply) Descriptor() ([]byte, []int) {
+ return file_remote_ethbackend_proto_rawDescGZIP(), []int{27}
+}
+
+func (x *BorEventReply) GetPresent() bool {
+ if x != nil {
+ return x.Present
+ }
+ return false
+}
+
+func (x *BorEventReply) GetBlockNumber() uint64 {
+ if x != nil {
+ return x.BlockNumber
+ }
+ return 0
+}
+
+func (x *BorEventReply) GetEventRlps() [][]byte {
+ if x != nil {
+ return x.EventRlps
+ }
+ return nil
+}
+
+var File_remote_ethbackend_proto protoreflect.FileDescriptor
+
+var file_remote_ethbackend_proto_rawDesc = []byte{
+ 0x0a, 0x17, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2f, 0x65, 0x74, 0x68, 0x62, 0x61, 0x63, 0x6b,
+ 0x65, 0x6e, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74,
+ 0x65, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x11,
+ 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x22, 0x12, 0x0a, 0x10, 0x45, 0x74, 0x68, 0x65, 0x72, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x37, 0x0a, 0x0e, 0x45, 0x74, 0x68, 0x65, 0x72, 0x62, 0x61,
+ 0x73, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x25, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65,
+ 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73,
+ 0x2e, 0x48, 0x31, 0x36, 0x30, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x13,
+ 0x0a, 0x11, 0x4e, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x22, 0x21, 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x4e, 0x65, 0x74, 0x50, 0x65, 0x65,
+ 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x29, 0x0a,
+ 0x11, 0x4e, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x70,
+ 0x6c, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x18, 0x0a, 0x16, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x22, 0x26, 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x22, 0x16, 0x0a, 0x14, 0x43, 0x6c,
+ 0x69, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x22, 0x31, 0x0a, 0x12, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65,
+ 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64,
+ 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x35, 0x0a, 0x10, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69,
+ 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x04, 0x74, 0x79, 0x70,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65,
+ 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x47, 0x0a, 0x0e,
+ 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x21,
+ 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x72,
+ 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x04, 0x74, 0x79, 0x70,
+ 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52,
+ 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0xa7, 0x01, 0x0a, 0x11, 0x4c, 0x6f, 0x67, 0x73, 0x46, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x61,
+ 0x6c, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73,
+ 0x12, 0x29, 0x0a, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, 0x36, 0x30,
+ 0x52, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61,
+ 0x6c, 0x6c, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x09, 0x61, 0x6c, 0x6c, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12, 0x23, 0x0a, 0x06, 0x74, 0x6f,
+ 0x70, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x06, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x22,
+ 0xdf, 0x02, 0x0a, 0x12, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4c, 0x6f, 0x67,
+ 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x25, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73,
+ 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e,
+ 0x48, 0x31, 0x36, 0x30, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2a, 0x0a,
+ 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x09,
+ 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f,
+ 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04,
+ 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61,
+ 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x23, 0x0a,
+ 0x06, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x06, 0x74, 0x6f, 0x70, 0x69,
+ 0x63, 0x73, 0x12, 0x36, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73,
+ 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x61, 0x73, 0x68, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72,
+ 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x6d, 0x6f, 0x76,
+ 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65,
+ 0x64, 0x22, 0x5d, 0x0a, 0x0c, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68,
+ 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65,
+ 0x69, 0x67, 0x68, 0x74, 0x12, 0x2a, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61,
+ 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73,
+ 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68,
+ 0x22, 0x43, 0x0a, 0x0a, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1b,
+ 0x0a, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72, 0x6c, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x08, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6c, 0x70, 0x12, 0x18, 0x0a, 0x07, 0x73,
+ 0x65, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x65,
+ 0x6e, 0x64, 0x65, 0x72, 0x73, 0x22, 0x3a, 0x0a, 0x10, 0x54, 0x78, 0x6e, 0x4c, 0x6f, 0x6f, 0x6b,
+ 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x08, 0x74, 0x78, 0x6e,
+ 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x07, 0x74, 0x78, 0x6e, 0x48, 0x61, 0x73,
+ 0x68, 0x22, 0x33, 0x0a, 0x0e, 0x54, 0x78, 0x6e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65,
+ 0x70, 0x6c, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d,
+ 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b,
+ 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x28, 0x0a, 0x10, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x49,
+ 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69,
+ 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74,
+ 0x22, 0x22, 0x0a, 0x0e, 0x41, 0x64, 0x64, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x03, 0x75, 0x72, 0x6c, 0x22, 0x45, 0x0a, 0x0e, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x49, 0x6e, 0x66,
+ 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x33, 0x0a, 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x5f,
+ 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x73, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79,
+ 0x52, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x33, 0x0a, 0x0a, 0x50,
+ 0x65, 0x65, 0x72, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x70, 0x65, 0x65,
+ 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73,
+ 0x2e, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x70, 0x65, 0x65, 0x72, 0x73,
+ 0x22, 0x28, 0x0a, 0x0c, 0x41, 0x64, 0x64, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79,
+ 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0x30, 0x0a, 0x11, 0x50, 0x65,
+ 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12,
+ 0x1b, 0x0a, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72, 0x6c, 0x70, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0c, 0x52, 0x08, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6c, 0x70, 0x22, 0x4c, 0x0a, 0x25,
+ 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64,
+ 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x42, 0x79, 0x48, 0x61, 0x73, 0x68, 0x56, 0x31, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32,
+ 0x35, 0x36, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x54, 0x0a, 0x26, 0x45, 0x6e,
+ 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f,
+ 0x64, 0x69, 0x65, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x56, 0x31, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+ 0x22, 0x3e, 0x0a, 0x0f, 0x42, 0x6f, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x0b, 0x62, 0x6f, 0x72, 0x5f, 0x74, 0x78, 0x5f, 0x68, 0x61,
+ 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73,
+ 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x09, 0x62, 0x6f, 0x72, 0x54, 0x78, 0x48, 0x61, 0x73, 0x68,
+ 0x22, 0x6b, 0x0a, 0x0d, 0x42, 0x6f, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x70, 0x6c,
+ 0x79, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x07, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x62,
+ 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x6c, 0x70, 0x73, 0x18, 0x03, 0x20, 0x03,
+ 0x28, 0x0c, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x6c, 0x70, 0x73, 0x2a, 0x4a, 0x0a,
+ 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x0a, 0x0a, 0x06, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52,
+ 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x4c, 0x4f,
+ 0x47, 0x53, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f,
+ 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4e, 0x45, 0x57, 0x5f, 0x53,
+ 0x4e, 0x41, 0x50, 0x53, 0x48, 0x4f, 0x54, 0x10, 0x03, 0x32, 0xd3, 0x07, 0x0a, 0x0a, 0x45, 0x54,
+ 0x48, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x12, 0x3d, 0x0a, 0x09, 0x45, 0x74, 0x68, 0x65,
+ 0x72, 0x62, 0x61, 0x73, 0x65, 0x12, 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45,
+ 0x74, 0x68, 0x65, 0x72, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x74, 0x68, 0x65, 0x72, 0x62, 0x61,
+ 0x73, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x40, 0x0a, 0x0a, 0x4e, 0x65, 0x74, 0x56, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4e,
+ 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4e, 0x65, 0x74, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x46, 0x0a, 0x0c, 0x4e, 0x65, 0x74,
+ 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1b, 0x2e, 0x72, 0x65, 0x6d, 0x6f,
+ 0x74, 0x65, 0x2e, 0x4e, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e,
+ 0x4e, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x70, 0x6c,
+ 0x79, 0x12, 0x36, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
+ 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x4f, 0x0a, 0x0f, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x72,
+ 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x72,
+ 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x49, 0x0a, 0x0d, 0x43, 0x6c,
+ 0x69, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x72, 0x65,
+ 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x72, 0x65, 0x6d, 0x6f,
+ 0x74, 0x65, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3f, 0x0a, 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69,
+ 0x62, 0x65, 0x12, 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x75, 0x62, 0x73,
+ 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x72,
+ 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52,
+ 0x65, 0x70, 0x6c, 0x79, 0x30, 0x01, 0x12, 0x4a, 0x0a, 0x0d, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72,
+ 0x69, 0x62, 0x65, 0x4c, 0x6f, 0x67, 0x73, 0x12, 0x19, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65,
+ 0x2e, 0x4c, 0x6f, 0x67, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x75, 0x62, 0x73,
+ 0x63, 0x72, 0x69, 0x62, 0x65, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x28, 0x01,
+ 0x30, 0x01, 0x12, 0x31, 0x0a, 0x05, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x14, 0x2e, 0x72, 0x65,
+ 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x12, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b,
+ 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3d, 0x0a, 0x09, 0x54, 0x78, 0x6e, 0x4c, 0x6f, 0x6f, 0x6b,
+ 0x75, 0x70, 0x12, 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x54, 0x78, 0x6e, 0x4c,
+ 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x72,
+ 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x54, 0x78, 0x6e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52,
+ 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3c, 0x0a, 0x08, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f,
+ 0x12, 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x49,
+ 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x72, 0x65, 0x6d,
+ 0x6f, 0x74, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70,
+ 0x6c, 0x79, 0x12, 0x33, 0x0a, 0x05, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d,
+ 0x70, 0x74, 0x79, 0x1a, 0x12, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x65, 0x65,
+ 0x72, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x50, 0x65,
+ 0x65, 0x72, 0x12, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x50,
+ 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x72, 0x65, 0x6d,
+ 0x6f, 0x74, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79,
+ 0x12, 0x41, 0x0a, 0x0c, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b,
+ 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x19, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74,
+ 0x65, 0x2e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65,
+ 0x70, 0x6c, 0x79, 0x12, 0x3a, 0x0a, 0x08, 0x42, 0x6f, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12,
+ 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x42, 0x6f, 0x72, 0x45, 0x76, 0x65, 0x6e,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74,
+ 0x65, 0x2e, 0x42, 0x6f, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x42,
+ 0x11, 0x5a, 0x0f, 0x2e, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x3b, 0x72, 0x65, 0x6d, 0x6f,
+ 0x74, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_remote_ethbackend_proto_rawDescOnce sync.Once
+ file_remote_ethbackend_proto_rawDescData = file_remote_ethbackend_proto_rawDesc
+)
+
+func file_remote_ethbackend_proto_rawDescGZIP() []byte {
+ file_remote_ethbackend_proto_rawDescOnce.Do(func() {
+ file_remote_ethbackend_proto_rawDescData = protoimpl.X.CompressGZIP(file_remote_ethbackend_proto_rawDescData)
+ })
+ return file_remote_ethbackend_proto_rawDescData
+}
+
+var file_remote_ethbackend_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_remote_ethbackend_proto_msgTypes = make([]protoimpl.MessageInfo, 28)
+var file_remote_ethbackend_proto_goTypes = []interface{}{
+ (Event)(0), // 0: remote.Event
+ (*EtherbaseRequest)(nil), // 1: remote.EtherbaseRequest
+ (*EtherbaseReply)(nil), // 2: remote.EtherbaseReply
+ (*NetVersionRequest)(nil), // 3: remote.NetVersionRequest
+ (*NetVersionReply)(nil), // 4: remote.NetVersionReply
+ (*NetPeerCountRequest)(nil), // 5: remote.NetPeerCountRequest
+ (*NetPeerCountReply)(nil), // 6: remote.NetPeerCountReply
+ (*ProtocolVersionRequest)(nil), // 7: remote.ProtocolVersionRequest
+ (*ProtocolVersionReply)(nil), // 8: remote.ProtocolVersionReply
+ (*ClientVersionRequest)(nil), // 9: remote.ClientVersionRequest
+ (*ClientVersionReply)(nil), // 10: remote.ClientVersionReply
+ (*SubscribeRequest)(nil), // 11: remote.SubscribeRequest
+ (*SubscribeReply)(nil), // 12: remote.SubscribeReply
+ (*LogsFilterRequest)(nil), // 13: remote.LogsFilterRequest
+ (*SubscribeLogsReply)(nil), // 14: remote.SubscribeLogsReply
+ (*BlockRequest)(nil), // 15: remote.BlockRequest
+ (*BlockReply)(nil), // 16: remote.BlockReply
+ (*TxnLookupRequest)(nil), // 17: remote.TxnLookupRequest
+ (*TxnLookupReply)(nil), // 18: remote.TxnLookupReply
+ (*NodesInfoRequest)(nil), // 19: remote.NodesInfoRequest
+ (*AddPeerRequest)(nil), // 20: remote.AddPeerRequest
+ (*NodesInfoReply)(nil), // 21: remote.NodesInfoReply
+ (*PeersReply)(nil), // 22: remote.PeersReply
+ (*AddPeerReply)(nil), // 23: remote.AddPeerReply
+ (*PendingBlockReply)(nil), // 24: remote.PendingBlockReply
+ (*EngineGetPayloadBodiesByHashV1Request)(nil), // 25: remote.EngineGetPayloadBodiesByHashV1Request
+ (*EngineGetPayloadBodiesByRangeV1Request)(nil), // 26: remote.EngineGetPayloadBodiesByRangeV1Request
+ (*BorEventRequest)(nil), // 27: remote.BorEventRequest
+ (*BorEventReply)(nil), // 28: remote.BorEventReply
+ (*types.H160)(nil), // 29: types.H160
+ (*types.H256)(nil), // 30: types.H256
+ (*types.NodeInfoReply)(nil), // 31: types.NodeInfoReply
+ (*types.PeerInfo)(nil), // 32: types.PeerInfo
+ (*emptypb.Empty)(nil), // 33: google.protobuf.Empty
+ (*types.VersionReply)(nil), // 34: types.VersionReply
+}
+var file_remote_ethbackend_proto_depIdxs = []int32{
+ 29, // 0: remote.EtherbaseReply.address:type_name -> types.H160
+ 0, // 1: remote.SubscribeRequest.type:type_name -> remote.Event
+ 0, // 2: remote.SubscribeReply.type:type_name -> remote.Event
+ 29, // 3: remote.LogsFilterRequest.addresses:type_name -> types.H160
+ 30, // 4: remote.LogsFilterRequest.topics:type_name -> types.H256
+ 29, // 5: remote.SubscribeLogsReply.address:type_name -> types.H160
+ 30, // 6: remote.SubscribeLogsReply.block_hash:type_name -> types.H256
+ 30, // 7: remote.SubscribeLogsReply.topics:type_name -> types.H256
+ 30, // 8: remote.SubscribeLogsReply.transaction_hash:type_name -> types.H256
+ 30, // 9: remote.BlockRequest.block_hash:type_name -> types.H256
+ 30, // 10: remote.TxnLookupRequest.txn_hash:type_name -> types.H256
+ 31, // 11: remote.NodesInfoReply.nodes_info:type_name -> types.NodeInfoReply
+ 32, // 12: remote.PeersReply.peers:type_name -> types.PeerInfo
+ 30, // 13: remote.EngineGetPayloadBodiesByHashV1Request.hashes:type_name -> types.H256
+ 30, // 14: remote.BorEventRequest.bor_tx_hash:type_name -> types.H256
+ 1, // 15: remote.ETHBACKEND.Etherbase:input_type -> remote.EtherbaseRequest
+ 3, // 16: remote.ETHBACKEND.NetVersion:input_type -> remote.NetVersionRequest
+ 5, // 17: remote.ETHBACKEND.NetPeerCount:input_type -> remote.NetPeerCountRequest
+ 33, // 18: remote.ETHBACKEND.Version:input_type -> google.protobuf.Empty
+ 7, // 19: remote.ETHBACKEND.ProtocolVersion:input_type -> remote.ProtocolVersionRequest
+ 9, // 20: remote.ETHBACKEND.ClientVersion:input_type -> remote.ClientVersionRequest
+ 11, // 21: remote.ETHBACKEND.Subscribe:input_type -> remote.SubscribeRequest
+ 13, // 22: remote.ETHBACKEND.SubscribeLogs:input_type -> remote.LogsFilterRequest
+ 15, // 23: remote.ETHBACKEND.Block:input_type -> remote.BlockRequest
+ 17, // 24: remote.ETHBACKEND.TxnLookup:input_type -> remote.TxnLookupRequest
+ 19, // 25: remote.ETHBACKEND.NodeInfo:input_type -> remote.NodesInfoRequest
+ 33, // 26: remote.ETHBACKEND.Peers:input_type -> google.protobuf.Empty
+ 20, // 27: remote.ETHBACKEND.AddPeer:input_type -> remote.AddPeerRequest
+ 33, // 28: remote.ETHBACKEND.PendingBlock:input_type -> google.protobuf.Empty
+ 27, // 29: remote.ETHBACKEND.BorEvent:input_type -> remote.BorEventRequest
+ 2, // 30: remote.ETHBACKEND.Etherbase:output_type -> remote.EtherbaseReply
+ 4, // 31: remote.ETHBACKEND.NetVersion:output_type -> remote.NetVersionReply
+ 6, // 32: remote.ETHBACKEND.NetPeerCount:output_type -> remote.NetPeerCountReply
+ 34, // 33: remote.ETHBACKEND.Version:output_type -> types.VersionReply
+ 8, // 34: remote.ETHBACKEND.ProtocolVersion:output_type -> remote.ProtocolVersionReply
+ 10, // 35: remote.ETHBACKEND.ClientVersion:output_type -> remote.ClientVersionReply
+ 12, // 36: remote.ETHBACKEND.Subscribe:output_type -> remote.SubscribeReply
+ 14, // 37: remote.ETHBACKEND.SubscribeLogs:output_type -> remote.SubscribeLogsReply
+ 16, // 38: remote.ETHBACKEND.Block:output_type -> remote.BlockReply
+ 18, // 39: remote.ETHBACKEND.TxnLookup:output_type -> remote.TxnLookupReply
+ 21, // 40: remote.ETHBACKEND.NodeInfo:output_type -> remote.NodesInfoReply
+ 22, // 41: remote.ETHBACKEND.Peers:output_type -> remote.PeersReply
+ 23, // 42: remote.ETHBACKEND.AddPeer:output_type -> remote.AddPeerReply
+ 24, // 43: remote.ETHBACKEND.PendingBlock:output_type -> remote.PendingBlockReply
+ 28, // 44: remote.ETHBACKEND.BorEvent:output_type -> remote.BorEventReply
+ 30, // [30:45] is the sub-list for method output_type
+ 15, // [15:30] is the sub-list for method input_type
+ 15, // [15:15] is the sub-list for extension type_name
+ 15, // [15:15] is the sub-list for extension extendee
+ 0, // [0:15] is the sub-list for field type_name
+}
+
+func init() { file_remote_ethbackend_proto_init() }
+func file_remote_ethbackend_proto_init() {
+ if File_remote_ethbackend_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_remote_ethbackend_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EtherbaseRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EtherbaseReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NetVersionRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NetVersionReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NetPeerCountRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NetPeerCountReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ProtocolVersionRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ProtocolVersionReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ClientVersionRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ClientVersionReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SubscribeRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SubscribeReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LogsFilterRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SubscribeLogsReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BlockRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BlockReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TxnLookupRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TxnLookupReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NodesInfoRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddPeerRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NodesInfoReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PeersReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddPeerReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PendingBlockReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EngineGetPayloadBodiesByHashV1Request); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EngineGetPayloadBodiesByRangeV1Request); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BorEventRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_ethbackend_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BorEventReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_remote_ethbackend_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 28,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_remote_ethbackend_proto_goTypes,
+ DependencyIndexes: file_remote_ethbackend_proto_depIdxs,
+ EnumInfos: file_remote_ethbackend_proto_enumTypes,
+ MessageInfos: file_remote_ethbackend_proto_msgTypes,
+ }.Build()
+ File_remote_ethbackend_proto = out.File
+ file_remote_ethbackend_proto_rawDesc = nil
+ file_remote_ethbackend_proto_goTypes = nil
+ file_remote_ethbackend_proto_depIdxs = nil
+}
diff --git a/erigon-lib/gointerfaces/remote/ethbackend_grpc.pb.go b/erigon-lib/gointerfaces/remote/ethbackend_grpc.pb.go
new file mode 100644
index 00000000000..4a410a32b86
--- /dev/null
+++ b/erigon-lib/gointerfaces/remote/ethbackend_grpc.pb.go
@@ -0,0 +1,713 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.3.0
+// - protoc v4.24.2
+// source: remote/ethbackend.proto
+
+package remote
+
+import (
+ context "context"
+ types "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+const (
+ ETHBACKEND_Etherbase_FullMethodName = "/remote.ETHBACKEND/Etherbase"
+ ETHBACKEND_NetVersion_FullMethodName = "/remote.ETHBACKEND/NetVersion"
+ ETHBACKEND_NetPeerCount_FullMethodName = "/remote.ETHBACKEND/NetPeerCount"
+ ETHBACKEND_Version_FullMethodName = "/remote.ETHBACKEND/Version"
+ ETHBACKEND_ProtocolVersion_FullMethodName = "/remote.ETHBACKEND/ProtocolVersion"
+ ETHBACKEND_ClientVersion_FullMethodName = "/remote.ETHBACKEND/ClientVersion"
+ ETHBACKEND_Subscribe_FullMethodName = "/remote.ETHBACKEND/Subscribe"
+ ETHBACKEND_SubscribeLogs_FullMethodName = "/remote.ETHBACKEND/SubscribeLogs"
+ ETHBACKEND_Block_FullMethodName = "/remote.ETHBACKEND/Block"
+ ETHBACKEND_TxnLookup_FullMethodName = "/remote.ETHBACKEND/TxnLookup"
+ ETHBACKEND_NodeInfo_FullMethodName = "/remote.ETHBACKEND/NodeInfo"
+ ETHBACKEND_Peers_FullMethodName = "/remote.ETHBACKEND/Peers"
+ ETHBACKEND_AddPeer_FullMethodName = "/remote.ETHBACKEND/AddPeer"
+ ETHBACKEND_PendingBlock_FullMethodName = "/remote.ETHBACKEND/PendingBlock"
+ ETHBACKEND_BorEvent_FullMethodName = "/remote.ETHBACKEND/BorEvent"
+)
+
+// ETHBACKENDClient is the client API for ETHBACKEND service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type ETHBACKENDClient interface {
+ Etherbase(ctx context.Context, in *EtherbaseRequest, opts ...grpc.CallOption) (*EtherbaseReply, error)
+ NetVersion(ctx context.Context, in *NetVersionRequest, opts ...grpc.CallOption) (*NetVersionReply, error)
+ NetPeerCount(ctx context.Context, in *NetPeerCountRequest, opts ...grpc.CallOption) (*NetPeerCountReply, error)
+ // Version returns the service version number
+ Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.VersionReply, error)
+ // ProtocolVersion returns the Ethereum protocol version number (e.g. 66 for ETH66).
+ ProtocolVersion(ctx context.Context, in *ProtocolVersionRequest, opts ...grpc.CallOption) (*ProtocolVersionReply, error)
+ // ClientVersion returns the Ethereum client version string using node name convention (e.g. TurboGeth/v2021.03.2-alpha/Linux).
+ ClientVersion(ctx context.Context, in *ClientVersionRequest, opts ...grpc.CallOption) (*ClientVersionReply, error)
+ Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (ETHBACKEND_SubscribeClient, error)
+ // Only one subscription is needed to serve all the users, LogsFilterRequest allows to dynamically modifying the subscription
+ SubscribeLogs(ctx context.Context, opts ...grpc.CallOption) (ETHBACKEND_SubscribeLogsClient, error)
+ // High-level method - can read block from db, snapshots or apply any other logic
+ // it doesn't provide consistency
+ // Request fields are optional - it's ok to request block only by hash or only by number
+ Block(ctx context.Context, in *BlockRequest, opts ...grpc.CallOption) (*BlockReply, error)
+ // High-level method - can find block number by txn hash
+ // it doesn't provide consistency
+ TxnLookup(ctx context.Context, in *TxnLookupRequest, opts ...grpc.CallOption) (*TxnLookupReply, error)
+ // NodeInfo collects and returns NodeInfo from all running sentry instances.
+ NodeInfo(ctx context.Context, in *NodesInfoRequest, opts ...grpc.CallOption) (*NodesInfoReply, error)
+ // Peers collects and returns peers information from all running sentry instances.
+ Peers(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PeersReply, error)
+ AddPeer(ctx context.Context, in *AddPeerRequest, opts ...grpc.CallOption) (*AddPeerReply, error)
+ // PendingBlock returns latest built block.
+ PendingBlock(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PendingBlockReply, error)
+ BorEvent(ctx context.Context, in *BorEventRequest, opts ...grpc.CallOption) (*BorEventReply, error)
+}
+
+type eTHBACKENDClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewETHBACKENDClient(cc grpc.ClientConnInterface) ETHBACKENDClient {
+ return &eTHBACKENDClient{cc}
+}
+
+func (c *eTHBACKENDClient) Etherbase(ctx context.Context, in *EtherbaseRequest, opts ...grpc.CallOption) (*EtherbaseReply, error) {
+ out := new(EtherbaseReply)
+ err := c.cc.Invoke(ctx, ETHBACKEND_Etherbase_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *eTHBACKENDClient) NetVersion(ctx context.Context, in *NetVersionRequest, opts ...grpc.CallOption) (*NetVersionReply, error) {
+ out := new(NetVersionReply)
+ err := c.cc.Invoke(ctx, ETHBACKEND_NetVersion_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *eTHBACKENDClient) NetPeerCount(ctx context.Context, in *NetPeerCountRequest, opts ...grpc.CallOption) (*NetPeerCountReply, error) {
+ out := new(NetPeerCountReply)
+ err := c.cc.Invoke(ctx, ETHBACKEND_NetPeerCount_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *eTHBACKENDClient) Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.VersionReply, error) {
+ out := new(types.VersionReply)
+ err := c.cc.Invoke(ctx, ETHBACKEND_Version_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *eTHBACKENDClient) ProtocolVersion(ctx context.Context, in *ProtocolVersionRequest, opts ...grpc.CallOption) (*ProtocolVersionReply, error) {
+ out := new(ProtocolVersionReply)
+ err := c.cc.Invoke(ctx, ETHBACKEND_ProtocolVersion_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *eTHBACKENDClient) ClientVersion(ctx context.Context, in *ClientVersionRequest, opts ...grpc.CallOption) (*ClientVersionReply, error) {
+ out := new(ClientVersionReply)
+ err := c.cc.Invoke(ctx, ETHBACKEND_ClientVersion_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *eTHBACKENDClient) Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (ETHBACKEND_SubscribeClient, error) {
+ stream, err := c.cc.NewStream(ctx, ÐBACKEND_ServiceDesc.Streams[0], ETHBACKEND_Subscribe_FullMethodName, opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &eTHBACKENDSubscribeClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type ETHBACKEND_SubscribeClient interface {
+ Recv() (*SubscribeReply, error)
+ grpc.ClientStream
+}
+
+type eTHBACKENDSubscribeClient struct {
+ grpc.ClientStream
+}
+
+func (x *eTHBACKENDSubscribeClient) Recv() (*SubscribeReply, error) {
+ m := new(SubscribeReply)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *eTHBACKENDClient) SubscribeLogs(ctx context.Context, opts ...grpc.CallOption) (ETHBACKEND_SubscribeLogsClient, error) {
+ stream, err := c.cc.NewStream(ctx, ÐBACKEND_ServiceDesc.Streams[1], ETHBACKEND_SubscribeLogs_FullMethodName, opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &eTHBACKENDSubscribeLogsClient{stream}
+ return x, nil
+}
+
+type ETHBACKEND_SubscribeLogsClient interface {
+ Send(*LogsFilterRequest) error
+ Recv() (*SubscribeLogsReply, error)
+ grpc.ClientStream
+}
+
+type eTHBACKENDSubscribeLogsClient struct {
+ grpc.ClientStream
+}
+
+func (x *eTHBACKENDSubscribeLogsClient) Send(m *LogsFilterRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *eTHBACKENDSubscribeLogsClient) Recv() (*SubscribeLogsReply, error) {
+ m := new(SubscribeLogsReply)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *eTHBACKENDClient) Block(ctx context.Context, in *BlockRequest, opts ...grpc.CallOption) (*BlockReply, error) {
+ out := new(BlockReply)
+ err := c.cc.Invoke(ctx, ETHBACKEND_Block_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *eTHBACKENDClient) TxnLookup(ctx context.Context, in *TxnLookupRequest, opts ...grpc.CallOption) (*TxnLookupReply, error) {
+ out := new(TxnLookupReply)
+ err := c.cc.Invoke(ctx, ETHBACKEND_TxnLookup_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *eTHBACKENDClient) NodeInfo(ctx context.Context, in *NodesInfoRequest, opts ...grpc.CallOption) (*NodesInfoReply, error) {
+ out := new(NodesInfoReply)
+ err := c.cc.Invoke(ctx, ETHBACKEND_NodeInfo_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *eTHBACKENDClient) Peers(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PeersReply, error) {
+ out := new(PeersReply)
+ err := c.cc.Invoke(ctx, ETHBACKEND_Peers_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *eTHBACKENDClient) AddPeer(ctx context.Context, in *AddPeerRequest, opts ...grpc.CallOption) (*AddPeerReply, error) {
+ out := new(AddPeerReply)
+ err := c.cc.Invoke(ctx, ETHBACKEND_AddPeer_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *eTHBACKENDClient) PendingBlock(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PendingBlockReply, error) {
+ out := new(PendingBlockReply)
+ err := c.cc.Invoke(ctx, ETHBACKEND_PendingBlock_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *eTHBACKENDClient) BorEvent(ctx context.Context, in *BorEventRequest, opts ...grpc.CallOption) (*BorEventReply, error) {
+ out := new(BorEventReply)
+ err := c.cc.Invoke(ctx, ETHBACKEND_BorEvent_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// ETHBACKENDServer is the server API for ETHBACKEND service.
+// All implementations must embed UnimplementedETHBACKENDServer
+// for forward compatibility
+type ETHBACKENDServer interface {
+ Etherbase(context.Context, *EtherbaseRequest) (*EtherbaseReply, error)
+ NetVersion(context.Context, *NetVersionRequest) (*NetVersionReply, error)
+ NetPeerCount(context.Context, *NetPeerCountRequest) (*NetPeerCountReply, error)
+ // Version returns the service version number
+ Version(context.Context, *emptypb.Empty) (*types.VersionReply, error)
+ // ProtocolVersion returns the Ethereum protocol version number (e.g. 66 for ETH66).
+ ProtocolVersion(context.Context, *ProtocolVersionRequest) (*ProtocolVersionReply, error)
+ // ClientVersion returns the Ethereum client version string using node name convention (e.g. TurboGeth/v2021.03.2-alpha/Linux).
+ ClientVersion(context.Context, *ClientVersionRequest) (*ClientVersionReply, error)
+ Subscribe(*SubscribeRequest, ETHBACKEND_SubscribeServer) error
+ // Only one subscription is needed to serve all the users, LogsFilterRequest allows to dynamically modifying the subscription
+ SubscribeLogs(ETHBACKEND_SubscribeLogsServer) error
+ // High-level method - can read block from db, snapshots or apply any other logic
+ // it doesn't provide consistency
+ // Request fields are optional - it's ok to request block only by hash or only by number
+ Block(context.Context, *BlockRequest) (*BlockReply, error)
+ // High-level method - can find block number by txn hash
+ // it doesn't provide consistency
+ TxnLookup(context.Context, *TxnLookupRequest) (*TxnLookupReply, error)
+ // NodeInfo collects and returns NodeInfo from all running sentry instances.
+ NodeInfo(context.Context, *NodesInfoRequest) (*NodesInfoReply, error)
+ // Peers collects and returns peers information from all running sentry instances.
+ Peers(context.Context, *emptypb.Empty) (*PeersReply, error)
+ AddPeer(context.Context, *AddPeerRequest) (*AddPeerReply, error)
+ // PendingBlock returns latest built block.
+ PendingBlock(context.Context, *emptypb.Empty) (*PendingBlockReply, error)
+ BorEvent(context.Context, *BorEventRequest) (*BorEventReply, error)
+ mustEmbedUnimplementedETHBACKENDServer()
+}
+
+// UnimplementedETHBACKENDServer must be embedded to have forward compatible implementations.
+type UnimplementedETHBACKENDServer struct {
+}
+
+func (UnimplementedETHBACKENDServer) Etherbase(context.Context, *EtherbaseRequest) (*EtherbaseReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Etherbase not implemented")
+}
+func (UnimplementedETHBACKENDServer) NetVersion(context.Context, *NetVersionRequest) (*NetVersionReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method NetVersion not implemented")
+}
+func (UnimplementedETHBACKENDServer) NetPeerCount(context.Context, *NetPeerCountRequest) (*NetPeerCountReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method NetPeerCount not implemented")
+}
+func (UnimplementedETHBACKENDServer) Version(context.Context, *emptypb.Empty) (*types.VersionReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Version not implemented")
+}
+func (UnimplementedETHBACKENDServer) ProtocolVersion(context.Context, *ProtocolVersionRequest) (*ProtocolVersionReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ProtocolVersion not implemented")
+}
+func (UnimplementedETHBACKENDServer) ClientVersion(context.Context, *ClientVersionRequest) (*ClientVersionReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ClientVersion not implemented")
+}
+func (UnimplementedETHBACKENDServer) Subscribe(*SubscribeRequest, ETHBACKEND_SubscribeServer) error {
+ return status.Errorf(codes.Unimplemented, "method Subscribe not implemented")
+}
+func (UnimplementedETHBACKENDServer) SubscribeLogs(ETHBACKEND_SubscribeLogsServer) error {
+ return status.Errorf(codes.Unimplemented, "method SubscribeLogs not implemented")
+}
+func (UnimplementedETHBACKENDServer) Block(context.Context, *BlockRequest) (*BlockReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Block not implemented")
+}
+func (UnimplementedETHBACKENDServer) TxnLookup(context.Context, *TxnLookupRequest) (*TxnLookupReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method TxnLookup not implemented")
+}
+func (UnimplementedETHBACKENDServer) NodeInfo(context.Context, *NodesInfoRequest) (*NodesInfoReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method NodeInfo not implemented")
+}
+func (UnimplementedETHBACKENDServer) Peers(context.Context, *emptypb.Empty) (*PeersReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Peers not implemented")
+}
+func (UnimplementedETHBACKENDServer) AddPeer(context.Context, *AddPeerRequest) (*AddPeerReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AddPeer not implemented")
+}
+func (UnimplementedETHBACKENDServer) PendingBlock(context.Context, *emptypb.Empty) (*PendingBlockReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method PendingBlock not implemented")
+}
+func (UnimplementedETHBACKENDServer) BorEvent(context.Context, *BorEventRequest) (*BorEventReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method BorEvent not implemented")
+}
+func (UnimplementedETHBACKENDServer) mustEmbedUnimplementedETHBACKENDServer() {}
+
+// UnsafeETHBACKENDServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to ETHBACKENDServer will
+// result in compilation errors.
+type UnsafeETHBACKENDServer interface {
+ mustEmbedUnimplementedETHBACKENDServer()
+}
+
+func RegisterETHBACKENDServer(s grpc.ServiceRegistrar, srv ETHBACKENDServer) {
+ s.RegisterService(ÐBACKEND_ServiceDesc, srv)
+}
+
+func _ETHBACKEND_Etherbase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(EtherbaseRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ETHBACKENDServer).Etherbase(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ETHBACKEND_Etherbase_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ETHBACKENDServer).Etherbase(ctx, req.(*EtherbaseRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ETHBACKEND_NetVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(NetVersionRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ETHBACKENDServer).NetVersion(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ETHBACKEND_NetVersion_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ETHBACKENDServer).NetVersion(ctx, req.(*NetVersionRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ETHBACKEND_NetPeerCount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(NetPeerCountRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ETHBACKENDServer).NetPeerCount(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ETHBACKEND_NetPeerCount_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ETHBACKENDServer).NetPeerCount(ctx, req.(*NetPeerCountRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ETHBACKEND_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(emptypb.Empty)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ETHBACKENDServer).Version(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ETHBACKEND_Version_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ETHBACKENDServer).Version(ctx, req.(*emptypb.Empty))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ETHBACKEND_ProtocolVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ProtocolVersionRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ETHBACKENDServer).ProtocolVersion(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ETHBACKEND_ProtocolVersion_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ETHBACKENDServer).ProtocolVersion(ctx, req.(*ProtocolVersionRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ETHBACKEND_ClientVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ClientVersionRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ETHBACKENDServer).ClientVersion(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ETHBACKEND_ClientVersion_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ETHBACKENDServer).ClientVersion(ctx, req.(*ClientVersionRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ETHBACKEND_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(SubscribeRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(ETHBACKENDServer).Subscribe(m, &eTHBACKENDSubscribeServer{stream})
+}
+
+type ETHBACKEND_SubscribeServer interface {
+ Send(*SubscribeReply) error
+ grpc.ServerStream
+}
+
+type eTHBACKENDSubscribeServer struct {
+ grpc.ServerStream
+}
+
+func (x *eTHBACKENDSubscribeServer) Send(m *SubscribeReply) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _ETHBACKEND_SubscribeLogs_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(ETHBACKENDServer).SubscribeLogs(&eTHBACKENDSubscribeLogsServer{stream})
+}
+
+type ETHBACKEND_SubscribeLogsServer interface {
+ Send(*SubscribeLogsReply) error
+ Recv() (*LogsFilterRequest, error)
+ grpc.ServerStream
+}
+
+type eTHBACKENDSubscribeLogsServer struct {
+ grpc.ServerStream
+}
+
+func (x *eTHBACKENDSubscribeLogsServer) Send(m *SubscribeLogsReply) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *eTHBACKENDSubscribeLogsServer) Recv() (*LogsFilterRequest, error) {
+ m := new(LogsFilterRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func _ETHBACKEND_Block_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(BlockRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ETHBACKENDServer).Block(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ETHBACKEND_Block_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ETHBACKENDServer).Block(ctx, req.(*BlockRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ETHBACKEND_TxnLookup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(TxnLookupRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ETHBACKENDServer).TxnLookup(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ETHBACKEND_TxnLookup_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ETHBACKENDServer).TxnLookup(ctx, req.(*TxnLookupRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ETHBACKEND_NodeInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(NodesInfoRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ETHBACKENDServer).NodeInfo(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ETHBACKEND_NodeInfo_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ETHBACKENDServer).NodeInfo(ctx, req.(*NodesInfoRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ETHBACKEND_Peers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(emptypb.Empty)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ETHBACKENDServer).Peers(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ETHBACKEND_Peers_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ETHBACKENDServer).Peers(ctx, req.(*emptypb.Empty))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ETHBACKEND_AddPeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AddPeerRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ETHBACKENDServer).AddPeer(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ETHBACKEND_AddPeer_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ETHBACKENDServer).AddPeer(ctx, req.(*AddPeerRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ETHBACKEND_PendingBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(emptypb.Empty)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ETHBACKENDServer).PendingBlock(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ETHBACKEND_PendingBlock_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ETHBACKENDServer).PendingBlock(ctx, req.(*emptypb.Empty))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ETHBACKEND_BorEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(BorEventRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ETHBACKENDServer).BorEvent(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ETHBACKEND_BorEvent_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ETHBACKENDServer).BorEvent(ctx, req.(*BorEventRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// ETHBACKEND_ServiceDesc is the grpc.ServiceDesc for ETHBACKEND service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var ETHBACKEND_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "remote.ETHBACKEND",
+ HandlerType: (*ETHBACKENDServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Etherbase",
+ Handler: _ETHBACKEND_Etherbase_Handler,
+ },
+ {
+ MethodName: "NetVersion",
+ Handler: _ETHBACKEND_NetVersion_Handler,
+ },
+ {
+ MethodName: "NetPeerCount",
+ Handler: _ETHBACKEND_NetPeerCount_Handler,
+ },
+ {
+ MethodName: "Version",
+ Handler: _ETHBACKEND_Version_Handler,
+ },
+ {
+ MethodName: "ProtocolVersion",
+ Handler: _ETHBACKEND_ProtocolVersion_Handler,
+ },
+ {
+ MethodName: "ClientVersion",
+ Handler: _ETHBACKEND_ClientVersion_Handler,
+ },
+ {
+ MethodName: "Block",
+ Handler: _ETHBACKEND_Block_Handler,
+ },
+ {
+ MethodName: "TxnLookup",
+ Handler: _ETHBACKEND_TxnLookup_Handler,
+ },
+ {
+ MethodName: "NodeInfo",
+ Handler: _ETHBACKEND_NodeInfo_Handler,
+ },
+ {
+ MethodName: "Peers",
+ Handler: _ETHBACKEND_Peers_Handler,
+ },
+ {
+ MethodName: "AddPeer",
+ Handler: _ETHBACKEND_AddPeer_Handler,
+ },
+ {
+ MethodName: "PendingBlock",
+ Handler: _ETHBACKEND_PendingBlock_Handler,
+ },
+ {
+ MethodName: "BorEvent",
+ Handler: _ETHBACKEND_BorEvent_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Subscribe",
+ Handler: _ETHBACKEND_Subscribe_Handler,
+ ServerStreams: true,
+ },
+ {
+ StreamName: "SubscribeLogs",
+ Handler: _ETHBACKEND_SubscribeLogs_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ },
+ Metadata: "remote/ethbackend.proto",
+}
diff --git a/erigon-lib/gointerfaces/remote/kv.pb.go b/erigon-lib/gointerfaces/remote/kv.pb.go
new file mode 100644
index 00000000000..a7f659b68a7
--- /dev/null
+++ b/erigon-lib/gointerfaces/remote/kv.pb.go
@@ -0,0 +1,2392 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.31.0
+// protoc v4.24.2
+// source: remote/kv.proto
+
+package remote
+
+import (
+ types "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Op int32
+
+const (
+ Op_FIRST Op = 0
+ Op_FIRST_DUP Op = 1
+ Op_SEEK Op = 2
+ Op_SEEK_BOTH Op = 3
+ Op_CURRENT Op = 4
+ Op_LAST Op = 6
+ Op_LAST_DUP Op = 7
+ Op_NEXT Op = 8
+ Op_NEXT_DUP Op = 9
+ Op_NEXT_NO_DUP Op = 11
+ Op_PREV Op = 12
+ Op_PREV_DUP Op = 13
+ Op_PREV_NO_DUP Op = 14
+ Op_SEEK_EXACT Op = 15
+ Op_SEEK_BOTH_EXACT Op = 16
+ Op_OPEN Op = 30
+ Op_CLOSE Op = 31
+ Op_OPEN_DUP_SORT Op = 32
+ Op_COUNT Op = 33
+)
+
+// Enum value maps for Op.
+var (
+ Op_name = map[int32]string{
+ 0: "FIRST",
+ 1: "FIRST_DUP",
+ 2: "SEEK",
+ 3: "SEEK_BOTH",
+ 4: "CURRENT",
+ 6: "LAST",
+ 7: "LAST_DUP",
+ 8: "NEXT",
+ 9: "NEXT_DUP",
+ 11: "NEXT_NO_DUP",
+ 12: "PREV",
+ 13: "PREV_DUP",
+ 14: "PREV_NO_DUP",
+ 15: "SEEK_EXACT",
+ 16: "SEEK_BOTH_EXACT",
+ 30: "OPEN",
+ 31: "CLOSE",
+ 32: "OPEN_DUP_SORT",
+ 33: "COUNT",
+ }
+ Op_value = map[string]int32{
+ "FIRST": 0,
+ "FIRST_DUP": 1,
+ "SEEK": 2,
+ "SEEK_BOTH": 3,
+ "CURRENT": 4,
+ "LAST": 6,
+ "LAST_DUP": 7,
+ "NEXT": 8,
+ "NEXT_DUP": 9,
+ "NEXT_NO_DUP": 11,
+ "PREV": 12,
+ "PREV_DUP": 13,
+ "PREV_NO_DUP": 14,
+ "SEEK_EXACT": 15,
+ "SEEK_BOTH_EXACT": 16,
+ "OPEN": 30,
+ "CLOSE": 31,
+ "OPEN_DUP_SORT": 32,
+ "COUNT": 33,
+ }
+)
+
+func (x Op) Enum() *Op {
+ p := new(Op)
+ *p = x
+ return p
+}
+
+func (x Op) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Op) Descriptor() protoreflect.EnumDescriptor {
+ return file_remote_kv_proto_enumTypes[0].Descriptor()
+}
+
+func (Op) Type() protoreflect.EnumType {
+ return &file_remote_kv_proto_enumTypes[0]
+}
+
+func (x Op) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Op.Descriptor instead.
+func (Op) EnumDescriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{0}
+}
+
+type Action int32
+
+const (
+ Action_STORAGE Action = 0 // Change only in the storage
+ Action_UPSERT Action = 1 // Change of balance or nonce (and optionally storage)
+ Action_CODE Action = 2 // Change of code (and optionally storage)
+ Action_UPSERT_CODE Action = 3 // Change in (balance or nonce) and code (and optionally storage)
+ Action_REMOVE Action = 4 // Account is deleted
+)
+
+// Enum value maps for Action.
+var (
+ Action_name = map[int32]string{
+ 0: "STORAGE",
+ 1: "UPSERT",
+ 2: "CODE",
+ 3: "UPSERT_CODE",
+ 4: "REMOVE",
+ }
+ Action_value = map[string]int32{
+ "STORAGE": 0,
+ "UPSERT": 1,
+ "CODE": 2,
+ "UPSERT_CODE": 3,
+ "REMOVE": 4,
+ }
+)
+
+func (x Action) Enum() *Action {
+ p := new(Action)
+ *p = x
+ return p
+}
+
+func (x Action) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Action) Descriptor() protoreflect.EnumDescriptor {
+ return file_remote_kv_proto_enumTypes[1].Descriptor()
+}
+
+func (Action) Type() protoreflect.EnumType {
+ return &file_remote_kv_proto_enumTypes[1]
+}
+
+func (x Action) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Action.Descriptor instead.
+func (Action) EnumDescriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{1}
+}
+
+type Direction int32
+
+const (
+ Direction_FORWARD Direction = 0
+ Direction_UNWIND Direction = 1
+)
+
+// Enum value maps for Direction.
+var (
+ Direction_name = map[int32]string{
+ 0: "FORWARD",
+ 1: "UNWIND",
+ }
+ Direction_value = map[string]int32{
+ "FORWARD": 0,
+ "UNWIND": 1,
+ }
+)
+
+func (x Direction) Enum() *Direction {
+ p := new(Direction)
+ *p = x
+ return p
+}
+
+func (x Direction) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Direction) Descriptor() protoreflect.EnumDescriptor {
+ return file_remote_kv_proto_enumTypes[2].Descriptor()
+}
+
+func (Direction) Type() protoreflect.EnumType {
+ return &file_remote_kv_proto_enumTypes[2]
+}
+
+func (x Direction) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Direction.Descriptor instead.
+func (Direction) EnumDescriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{2}
+}
+
+type Cursor struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Op Op `protobuf:"varint,1,opt,name=op,proto3,enum=remote.Op" json:"op,omitempty"`
+ BucketName string `protobuf:"bytes,2,opt,name=bucket_name,json=bucketName,proto3" json:"bucket_name,omitempty"`
+ Cursor uint32 `protobuf:"varint,3,opt,name=cursor,proto3" json:"cursor,omitempty"`
+ K []byte `protobuf:"bytes,4,opt,name=k,proto3" json:"k,omitempty"`
+ V []byte `protobuf:"bytes,5,opt,name=v,proto3" json:"v,omitempty"`
+}
+
+func (x *Cursor) Reset() {
+ *x = Cursor{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_kv_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Cursor) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Cursor) ProtoMessage() {}
+
+func (x *Cursor) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_kv_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Cursor.ProtoReflect.Descriptor instead.
+func (*Cursor) Descriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Cursor) GetOp() Op {
+ if x != nil {
+ return x.Op
+ }
+ return Op_FIRST
+}
+
+func (x *Cursor) GetBucketName() string {
+ if x != nil {
+ return x.BucketName
+ }
+ return ""
+}
+
+func (x *Cursor) GetCursor() uint32 {
+ if x != nil {
+ return x.Cursor
+ }
+ return 0
+}
+
+func (x *Cursor) GetK() []byte {
+ if x != nil {
+ return x.K
+ }
+ return nil
+}
+
+func (x *Cursor) GetV() []byte {
+ if x != nil {
+ return x.V
+ }
+ return nil
+}
+
+type Pair struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ K []byte `protobuf:"bytes,1,opt,name=k,proto3" json:"k,omitempty"`
+ V []byte `protobuf:"bytes,2,opt,name=v,proto3" json:"v,omitempty"`
+ CursorId uint32 `protobuf:"varint,3,opt,name=cursor_id,json=cursorId,proto3" json:"cursor_id,omitempty"` // send once after new cursor open
+ ViewId uint64 `protobuf:"varint,4,opt,name=view_id,json=viewId,proto3" json:"view_id,omitempty"` // return once after tx open. mdbx's tx.ViewID() - id of write transaction in db
+ TxId uint64 `protobuf:"varint,5,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"` // return once after tx open. internal identifier - use it in other methods - to achieve consistent DB view (to read data from same DB tx on server).
+}
+
+func (x *Pair) Reset() {
+ *x = Pair{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_kv_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Pair) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Pair) ProtoMessage() {}
+
+func (x *Pair) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_kv_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Pair.ProtoReflect.Descriptor instead.
+func (*Pair) Descriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Pair) GetK() []byte {
+ if x != nil {
+ return x.K
+ }
+ return nil
+}
+
+func (x *Pair) GetV() []byte {
+ if x != nil {
+ return x.V
+ }
+ return nil
+}
+
+func (x *Pair) GetCursorId() uint32 {
+ if x != nil {
+ return x.CursorId
+ }
+ return 0
+}
+
+func (x *Pair) GetViewId() uint64 {
+ if x != nil {
+ return x.ViewId
+ }
+ return 0
+}
+
+func (x *Pair) GetTxId() uint64 {
+ if x != nil {
+ return x.TxId
+ }
+ return 0
+}
+
+type StorageChange struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Location *types.H256 `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"`
+ Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+}
+
+func (x *StorageChange) Reset() {
+ *x = StorageChange{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_kv_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StorageChange) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StorageChange) ProtoMessage() {}
+
+func (x *StorageChange) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_kv_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StorageChange.ProtoReflect.Descriptor instead.
+func (*StorageChange) Descriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *StorageChange) GetLocation() *types.H256 {
+ if x != nil {
+ return x.Location
+ }
+ return nil
+}
+
+func (x *StorageChange) GetData() []byte {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+type AccountChange struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Address *types.H160 `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
+ Incarnation uint64 `protobuf:"varint,2,opt,name=incarnation,proto3" json:"incarnation,omitempty"`
+ Action Action `protobuf:"varint,3,opt,name=action,proto3,enum=remote.Action" json:"action,omitempty"`
+ Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` // nil if there is no UPSERT in action
+ Code []byte `protobuf:"bytes,5,opt,name=code,proto3" json:"code,omitempty"` // nil if there is no CODE in action
+ StorageChanges []*StorageChange `protobuf:"bytes,6,rep,name=storage_changes,json=storageChanges,proto3" json:"storage_changes,omitempty"`
+}
+
+func (x *AccountChange) Reset() {
+ *x = AccountChange{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_kv_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AccountChange) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AccountChange) ProtoMessage() {}
+
+func (x *AccountChange) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_kv_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AccountChange.ProtoReflect.Descriptor instead.
+func (*AccountChange) Descriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *AccountChange) GetAddress() *types.H160 {
+ if x != nil {
+ return x.Address
+ }
+ return nil
+}
+
+func (x *AccountChange) GetIncarnation() uint64 {
+ if x != nil {
+ return x.Incarnation
+ }
+ return 0
+}
+
+func (x *AccountChange) GetAction() Action {
+ if x != nil {
+ return x.Action
+ }
+ return Action_STORAGE
+}
+
+func (x *AccountChange) GetData() []byte {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+func (x *AccountChange) GetCode() []byte {
+ if x != nil {
+ return x.Code
+ }
+ return nil
+}
+
+func (x *AccountChange) GetStorageChanges() []*StorageChange {
+ if x != nil {
+ return x.StorageChanges
+ }
+ return nil
+}
+
+// StateChangeBatch - list of StateDiff done in one DB transaction
+type StateChangeBatch struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ StateVersionId uint64 `protobuf:"varint,1,opt,name=state_version_id,json=stateVersionId,proto3" json:"state_version_id,omitempty"` // mdbx's tx.ID() - id of write transaction in db - where this changes happened
+ ChangeBatch []*StateChange `protobuf:"bytes,2,rep,name=change_batch,json=changeBatch,proto3" json:"change_batch,omitempty"`
+ PendingBlockBaseFee uint64 `protobuf:"varint,3,opt,name=pending_block_base_fee,json=pendingBlockBaseFee,proto3" json:"pending_block_base_fee,omitempty"` // BaseFee of the next block to be produced
+ BlockGasLimit uint64 `protobuf:"varint,4,opt,name=block_gas_limit,json=blockGasLimit,proto3" json:"block_gas_limit,omitempty"` // GasLimit of the latest block - proxy for the gas limit of the next block to be produced
+ FinalizedBlock uint64 `protobuf:"varint,5,opt,name=finalized_block,json=finalizedBlock,proto3" json:"finalized_block,omitempty"`
+ PendingBlobFeePerGas uint64 `protobuf:"varint,6,opt,name=pending_blob_fee_per_gas,json=pendingBlobFeePerGas,proto3" json:"pending_blob_fee_per_gas,omitempty"` // Base Blob Fee for the next block to be produced
+}
+
+func (x *StateChangeBatch) Reset() {
+ *x = StateChangeBatch{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_kv_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StateChangeBatch) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StateChangeBatch) ProtoMessage() {}
+
+func (x *StateChangeBatch) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_kv_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StateChangeBatch.ProtoReflect.Descriptor instead.
+func (*StateChangeBatch) Descriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *StateChangeBatch) GetStateVersionId() uint64 {
+ if x != nil {
+ return x.StateVersionId
+ }
+ return 0
+}
+
+func (x *StateChangeBatch) GetChangeBatch() []*StateChange {
+ if x != nil {
+ return x.ChangeBatch
+ }
+ return nil
+}
+
+func (x *StateChangeBatch) GetPendingBlockBaseFee() uint64 {
+ if x != nil {
+ return x.PendingBlockBaseFee
+ }
+ return 0
+}
+
+func (x *StateChangeBatch) GetBlockGasLimit() uint64 {
+ if x != nil {
+ return x.BlockGasLimit
+ }
+ return 0
+}
+
+func (x *StateChangeBatch) GetFinalizedBlock() uint64 {
+ if x != nil {
+ return x.FinalizedBlock
+ }
+ return 0
+}
+
+func (x *StateChangeBatch) GetPendingBlobFeePerGas() uint64 {
+ if x != nil {
+ return x.PendingBlobFeePerGas
+ }
+ return 0
+}
+
+// StateChange - changes done by 1 block or by 1 unwind
+type StateChange struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Direction Direction `protobuf:"varint,1,opt,name=direction,proto3,enum=remote.Direction" json:"direction,omitempty"`
+ BlockHeight uint64 `protobuf:"varint,2,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"`
+ BlockHash *types.H256 `protobuf:"bytes,3,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"`
+ Changes []*AccountChange `protobuf:"bytes,4,rep,name=changes,proto3" json:"changes,omitempty"`
+ Txs [][]byte `protobuf:"bytes,5,rep,name=txs,proto3" json:"txs,omitempty"` // enable by withTransactions=true
+}
+
+func (x *StateChange) Reset() {
+ *x = StateChange{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_kv_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StateChange) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StateChange) ProtoMessage() {}
+
+func (x *StateChange) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_kv_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StateChange.ProtoReflect.Descriptor instead.
+func (*StateChange) Descriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *StateChange) GetDirection() Direction {
+ if x != nil {
+ return x.Direction
+ }
+ return Direction_FORWARD
+}
+
+func (x *StateChange) GetBlockHeight() uint64 {
+ if x != nil {
+ return x.BlockHeight
+ }
+ return 0
+}
+
+func (x *StateChange) GetBlockHash() *types.H256 {
+ if x != nil {
+ return x.BlockHash
+ }
+ return nil
+}
+
+func (x *StateChange) GetChanges() []*AccountChange {
+ if x != nil {
+ return x.Changes
+ }
+ return nil
+}
+
+func (x *StateChange) GetTxs() [][]byte {
+ if x != nil {
+ return x.Txs
+ }
+ return nil
+}
+
+type StateChangeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ WithStorage bool `protobuf:"varint,1,opt,name=with_storage,json=withStorage,proto3" json:"with_storage,omitempty"`
+ WithTransactions bool `protobuf:"varint,2,opt,name=with_transactions,json=withTransactions,proto3" json:"with_transactions,omitempty"`
+}
+
+func (x *StateChangeRequest) Reset() {
+ *x = StateChangeRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_kv_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StateChangeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StateChangeRequest) ProtoMessage() {}
+
+func (x *StateChangeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_kv_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StateChangeRequest.ProtoReflect.Descriptor instead.
+func (*StateChangeRequest) Descriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *StateChangeRequest) GetWithStorage() bool {
+ if x != nil {
+ return x.WithStorage
+ }
+ return false
+}
+
+func (x *StateChangeRequest) GetWithTransactions() bool {
+ if x != nil {
+ return x.WithTransactions
+ }
+ return false
+}
+
+type SnapshotsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *SnapshotsRequest) Reset() {
+ *x = SnapshotsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_kv_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SnapshotsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SnapshotsRequest) ProtoMessage() {}
+
+func (x *SnapshotsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_kv_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SnapshotsRequest.ProtoReflect.Descriptor instead.
+func (*SnapshotsRequest) Descriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{7}
+}
+
+type SnapshotsReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ BlocksFiles []string `protobuf:"bytes,1,rep,name=blocks_files,json=blocksFiles,proto3" json:"blocks_files,omitempty"`
+ HistoryFiles []string `protobuf:"bytes,2,rep,name=history_files,json=historyFiles,proto3" json:"history_files,omitempty"`
+}
+
+func (x *SnapshotsReply) Reset() {
+ *x = SnapshotsReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_kv_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SnapshotsReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SnapshotsReply) ProtoMessage() {}
+
+func (x *SnapshotsReply) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_kv_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SnapshotsReply.ProtoReflect.Descriptor instead.
+func (*SnapshotsReply) Descriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *SnapshotsReply) GetBlocksFiles() []string {
+ if x != nil {
+ return x.BlocksFiles
+ }
+ return nil
+}
+
+func (x *SnapshotsReply) GetHistoryFiles() []string {
+ if x != nil {
+ return x.HistoryFiles
+ }
+ return nil
+}
+
+type RangeReq struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ TxId uint64 `protobuf:"varint,1,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"` // returned by .Tx()
+ // query params
+ Table string `protobuf:"bytes,2,opt,name=table,proto3" json:"table,omitempty"`
+ FromPrefix []byte `protobuf:"bytes,3,opt,name=from_prefix,json=fromPrefix,proto3" json:"from_prefix,omitempty"`
+ ToPrefix []byte `protobuf:"bytes,4,opt,name=to_prefix,json=toPrefix,proto3" json:"to_prefix,omitempty"`
+ OrderAscend bool `protobuf:"varint,5,opt,name=order_ascend,json=orderAscend,proto3" json:"order_ascend,omitempty"`
+ Limit int64 `protobuf:"zigzag64,6,opt,name=limit,proto3" json:"limit,omitempty"` // <= 0 means no limit
+ // pagination params
+ PageSize int32 `protobuf:"varint,7,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` // <= 0 means server will choose
+ PageToken string `protobuf:"bytes,8,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *RangeReq) Reset() {
+ *x = RangeReq{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_kv_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RangeReq) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RangeReq) ProtoMessage() {}
+
+func (x *RangeReq) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_kv_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RangeReq.ProtoReflect.Descriptor instead.
+func (*RangeReq) Descriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *RangeReq) GetTxId() uint64 {
+ if x != nil {
+ return x.TxId
+ }
+ return 0
+}
+
+func (x *RangeReq) GetTable() string {
+ if x != nil {
+ return x.Table
+ }
+ return ""
+}
+
+func (x *RangeReq) GetFromPrefix() []byte {
+ if x != nil {
+ return x.FromPrefix
+ }
+ return nil
+}
+
+func (x *RangeReq) GetToPrefix() []byte {
+ if x != nil {
+ return x.ToPrefix
+ }
+ return nil
+}
+
+func (x *RangeReq) GetOrderAscend() bool {
+ if x != nil {
+ return x.OrderAscend
+ }
+ return false
+}
+
+func (x *RangeReq) GetLimit() int64 {
+ if x != nil {
+ return x.Limit
+ }
+ return 0
+}
+
+func (x *RangeReq) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *RangeReq) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// Temporal methods
+type DomainGetReq struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ TxId uint64 `protobuf:"varint,1,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"` // returned by .Tx()
+ // query params
+ Table string `protobuf:"bytes,2,opt,name=table,proto3" json:"table,omitempty"`
+ K []byte `protobuf:"bytes,3,opt,name=k,proto3" json:"k,omitempty"`
+ Ts uint64 `protobuf:"varint,4,opt,name=ts,proto3" json:"ts,omitempty"`
+ K2 []byte `protobuf:"bytes,5,opt,name=k2,proto3" json:"k2,omitempty"`
+ Latest bool `protobuf:"varint,6,opt,name=latest,proto3" json:"latest,omitempty"` // if true, then `ts` ignored and return latest state (without history lookup)
+}
+
+func (x *DomainGetReq) Reset() {
+ *x = DomainGetReq{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_kv_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DomainGetReq) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DomainGetReq) ProtoMessage() {}
+
+func (x *DomainGetReq) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_kv_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DomainGetReq.ProtoReflect.Descriptor instead.
+func (*DomainGetReq) Descriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *DomainGetReq) GetTxId() uint64 {
+ if x != nil {
+ return x.TxId
+ }
+ return 0
+}
+
+func (x *DomainGetReq) GetTable() string {
+ if x != nil {
+ return x.Table
+ }
+ return ""
+}
+
+func (x *DomainGetReq) GetK() []byte {
+ if x != nil {
+ return x.K
+ }
+ return nil
+}
+
+func (x *DomainGetReq) GetTs() uint64 {
+ if x != nil {
+ return x.Ts
+ }
+ return 0
+}
+
+func (x *DomainGetReq) GetK2() []byte {
+ if x != nil {
+ return x.K2
+ }
+ return nil
+}
+
+func (x *DomainGetReq) GetLatest() bool {
+ if x != nil {
+ return x.Latest
+ }
+ return false
+}
+
+type DomainGetReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ V []byte `protobuf:"bytes,1,opt,name=v,proto3" json:"v,omitempty"`
+ Ok bool `protobuf:"varint,2,opt,name=ok,proto3" json:"ok,omitempty"`
+}
+
+func (x *DomainGetReply) Reset() {
+ *x = DomainGetReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_kv_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DomainGetReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DomainGetReply) ProtoMessage() {}
+
+func (x *DomainGetReply) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_kv_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DomainGetReply.ProtoReflect.Descriptor instead.
+func (*DomainGetReply) Descriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *DomainGetReply) GetV() []byte {
+ if x != nil {
+ return x.V
+ }
+ return nil
+}
+
+func (x *DomainGetReply) GetOk() bool {
+ if x != nil {
+ return x.Ok
+ }
+ return false
+}
+
+type HistoryGetReq struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ TxId uint64 `protobuf:"varint,1,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"` // returned by .Tx()
+ Table string `protobuf:"bytes,2,opt,name=table,proto3" json:"table,omitempty"`
+ K []byte `protobuf:"bytes,3,opt,name=k,proto3" json:"k,omitempty"`
+ Ts uint64 `protobuf:"varint,4,opt,name=ts,proto3" json:"ts,omitempty"`
+}
+
+func (x *HistoryGetReq) Reset() {
+ *x = HistoryGetReq{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_kv_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HistoryGetReq) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HistoryGetReq) ProtoMessage() {}
+
+func (x *HistoryGetReq) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_kv_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HistoryGetReq.ProtoReflect.Descriptor instead.
+func (*HistoryGetReq) Descriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *HistoryGetReq) GetTxId() uint64 {
+ if x != nil {
+ return x.TxId
+ }
+ return 0
+}
+
+func (x *HistoryGetReq) GetTable() string {
+ if x != nil {
+ return x.Table
+ }
+ return ""
+}
+
+func (x *HistoryGetReq) GetK() []byte {
+ if x != nil {
+ return x.K
+ }
+ return nil
+}
+
+func (x *HistoryGetReq) GetTs() uint64 {
+ if x != nil {
+ return x.Ts
+ }
+ return 0
+}
+
+type HistoryGetReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ V []byte `protobuf:"bytes,1,opt,name=v,proto3" json:"v,omitempty"`
+ Ok bool `protobuf:"varint,2,opt,name=ok,proto3" json:"ok,omitempty"`
+}
+
+func (x *HistoryGetReply) Reset() {
+ *x = HistoryGetReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_kv_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HistoryGetReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HistoryGetReply) ProtoMessage() {}
+
+func (x *HistoryGetReply) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_kv_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HistoryGetReply.ProtoReflect.Descriptor instead.
+func (*HistoryGetReply) Descriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *HistoryGetReply) GetV() []byte {
+ if x != nil {
+ return x.V
+ }
+ return nil
+}
+
+func (x *HistoryGetReply) GetOk() bool {
+ if x != nil {
+ return x.Ok
+ }
+ return false
+}
+
+type IndexRangeReq struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ TxId uint64 `protobuf:"varint,1,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"` // returned by .Tx()
+ // query params
+ Table string `protobuf:"bytes,2,opt,name=table,proto3" json:"table,omitempty"`
+ K []byte `protobuf:"bytes,3,opt,name=k,proto3" json:"k,omitempty"`
+ FromTs int64 `protobuf:"zigzag64,4,opt,name=from_ts,json=fromTs,proto3" json:"from_ts,omitempty"` // -1 means Inf
+ ToTs int64 `protobuf:"zigzag64,5,opt,name=to_ts,json=toTs,proto3" json:"to_ts,omitempty"` // -1 means Inf
+ OrderAscend bool `protobuf:"varint,6,opt,name=order_ascend,json=orderAscend,proto3" json:"order_ascend,omitempty"`
+ Limit int64 `protobuf:"zigzag64,7,opt,name=limit,proto3" json:"limit,omitempty"` // <= 0 means no limit
+ // pagination params
+ PageSize int32 `protobuf:"varint,8,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` // <= 0 means server will choose
+ PageToken string `protobuf:"bytes,9,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *IndexRangeReq) Reset() {
+ *x = IndexRangeReq{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_kv_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *IndexRangeReq) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*IndexRangeReq) ProtoMessage() {}
+
+func (x *IndexRangeReq) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_kv_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use IndexRangeReq.ProtoReflect.Descriptor instead.
+func (*IndexRangeReq) Descriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *IndexRangeReq) GetTxId() uint64 {
+ if x != nil {
+ return x.TxId
+ }
+ return 0
+}
+
+func (x *IndexRangeReq) GetTable() string {
+ if x != nil {
+ return x.Table
+ }
+ return ""
+}
+
+func (x *IndexRangeReq) GetK() []byte {
+ if x != nil {
+ return x.K
+ }
+ return nil
+}
+
+func (x *IndexRangeReq) GetFromTs() int64 {
+ if x != nil {
+ return x.FromTs
+ }
+ return 0
+}
+
+func (x *IndexRangeReq) GetToTs() int64 {
+ if x != nil {
+ return x.ToTs
+ }
+ return 0
+}
+
+func (x *IndexRangeReq) GetOrderAscend() bool {
+ if x != nil {
+ return x.OrderAscend
+ }
+ return false
+}
+
+func (x *IndexRangeReq) GetLimit() int64 {
+ if x != nil {
+ return x.Limit
+ }
+ return 0
+}
+
+func (x *IndexRangeReq) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *IndexRangeReq) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+type IndexRangeReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Timestamps []uint64 `protobuf:"varint,1,rep,packed,name=timestamps,proto3" json:"timestamps,omitempty"` //TODO: it can be a bitmap
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *IndexRangeReply) Reset() {
+ *x = IndexRangeReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_kv_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *IndexRangeReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*IndexRangeReply) ProtoMessage() {}
+
+func (x *IndexRangeReply) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_kv_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use IndexRangeReply.ProtoReflect.Descriptor instead.
+func (*IndexRangeReply) Descriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *IndexRangeReply) GetTimestamps() []uint64 {
+ if x != nil {
+ return x.Timestamps
+ }
+ return nil
+}
+
+func (x *IndexRangeReply) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+type HistoryRangeReq struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ TxId uint64 `protobuf:"varint,1,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"` // returned by .Tx()
+ // query params
+ Table string `protobuf:"bytes,2,opt,name=table,proto3" json:"table,omitempty"`
+ FromTs int64 `protobuf:"zigzag64,4,opt,name=from_ts,json=fromTs,proto3" json:"from_ts,omitempty"` // -1 means Inf
+ ToTs int64 `protobuf:"zigzag64,5,opt,name=to_ts,json=toTs,proto3" json:"to_ts,omitempty"` // -1 means Inf
+ OrderAscend bool `protobuf:"varint,6,opt,name=order_ascend,json=orderAscend,proto3" json:"order_ascend,omitempty"`
+ Limit int64 `protobuf:"zigzag64,7,opt,name=limit,proto3" json:"limit,omitempty"` // <= 0 means no limit
+ // pagination params
+ PageSize int32 `protobuf:"varint,8,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` // <= 0 means server will choose
+ PageToken string `protobuf:"bytes,9,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *HistoryRangeReq) Reset() {
+ *x = HistoryRangeReq{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_kv_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HistoryRangeReq) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HistoryRangeReq) ProtoMessage() {}
+
+func (x *HistoryRangeReq) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_kv_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HistoryRangeReq.ProtoReflect.Descriptor instead.
+func (*HistoryRangeReq) Descriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{16}
+}
+
+func (x *HistoryRangeReq) GetTxId() uint64 {
+ if x != nil {
+ return x.TxId
+ }
+ return 0
+}
+
+func (x *HistoryRangeReq) GetTable() string {
+ if x != nil {
+ return x.Table
+ }
+ return ""
+}
+
+func (x *HistoryRangeReq) GetFromTs() int64 {
+ if x != nil {
+ return x.FromTs
+ }
+ return 0
+}
+
+func (x *HistoryRangeReq) GetToTs() int64 {
+ if x != nil {
+ return x.ToTs
+ }
+ return 0
+}
+
+func (x *HistoryRangeReq) GetOrderAscend() bool {
+ if x != nil {
+ return x.OrderAscend
+ }
+ return false
+}
+
+func (x *HistoryRangeReq) GetLimit() int64 {
+ if x != nil {
+ return x.Limit
+ }
+ return 0
+}
+
+func (x *HistoryRangeReq) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *HistoryRangeReq) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+type DomainRangeReq struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ TxId uint64 `protobuf:"varint,1,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"` // returned by .Tx()
+ // query params
+ Table string `protobuf:"bytes,2,opt,name=table,proto3" json:"table,omitempty"`
+ FromKey []byte `protobuf:"bytes,3,opt,name=from_key,json=fromKey,proto3" json:"from_key,omitempty"` // nil means Inf
+ ToKey []byte `protobuf:"bytes,4,opt,name=to_key,json=toKey,proto3" json:"to_key,omitempty"` // nil means Inf
+ Ts uint64 `protobuf:"varint,5,opt,name=ts,proto3" json:"ts,omitempty"`
+ Latest bool `protobuf:"varint,6,opt,name=latest,proto3" json:"latest,omitempty"` // if true, then `ts` ignored and return latest state (without history lookup)
+ OrderAscend bool `protobuf:"varint,7,opt,name=order_ascend,json=orderAscend,proto3" json:"order_ascend,omitempty"`
+ Limit int64 `protobuf:"zigzag64,8,opt,name=limit,proto3" json:"limit,omitempty"` // <= 0 means no limit
+ // pagination params
+ PageSize int32 `protobuf:"varint,9,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` // <= 0 means server will choose
+ PageToken string `protobuf:"bytes,10,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *DomainRangeReq) Reset() {
+ *x = DomainRangeReq{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_kv_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DomainRangeReq) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DomainRangeReq) ProtoMessage() {}
+
+func (x *DomainRangeReq) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_kv_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DomainRangeReq.ProtoReflect.Descriptor instead.
+func (*DomainRangeReq) Descriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{17}
+}
+
+func (x *DomainRangeReq) GetTxId() uint64 {
+ if x != nil {
+ return x.TxId
+ }
+ return 0
+}
+
+func (x *DomainRangeReq) GetTable() string {
+ if x != nil {
+ return x.Table
+ }
+ return ""
+}
+
+func (x *DomainRangeReq) GetFromKey() []byte {
+ if x != nil {
+ return x.FromKey
+ }
+ return nil
+}
+
+func (x *DomainRangeReq) GetToKey() []byte {
+ if x != nil {
+ return x.ToKey
+ }
+ return nil
+}
+
+func (x *DomainRangeReq) GetTs() uint64 {
+ if x != nil {
+ return x.Ts
+ }
+ return 0
+}
+
+func (x *DomainRangeReq) GetLatest() bool {
+ if x != nil {
+ return x.Latest
+ }
+ return false
+}
+
+func (x *DomainRangeReq) GetOrderAscend() bool {
+ if x != nil {
+ return x.OrderAscend
+ }
+ return false
+}
+
+func (x *DomainRangeReq) GetLimit() int64 {
+ if x != nil {
+ return x.Limit
+ }
+ return 0
+}
+
+func (x *DomainRangeReq) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *DomainRangeReq) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+type Pairs struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Keys [][]byte `protobuf:"bytes,1,rep,name=keys,proto3" json:"keys,omitempty"` // TODO: replace by lengtsh+arena? Anyway on server we need copy (serialization happening outside tx)
+ Values [][]byte `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty"`
+ NextPageToken string `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` // uint32 estimateTotal = 3; // send once after stream creation
+}
+
+func (x *Pairs) Reset() {
+ *x = Pairs{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_kv_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Pairs) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Pairs) ProtoMessage() {}
+
+func (x *Pairs) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_kv_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Pairs.ProtoReflect.Descriptor instead.
+func (*Pairs) Descriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{18}
+}
+
+func (x *Pairs) GetKeys() [][]byte {
+ if x != nil {
+ return x.Keys
+ }
+ return nil
+}
+
+func (x *Pairs) GetValues() [][]byte {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+func (x *Pairs) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+type ParisPagination struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ NextKey []byte `protobuf:"bytes,1,opt,name=next_key,json=nextKey,proto3" json:"next_key,omitempty"`
+ Limit int64 `protobuf:"zigzag64,2,opt,name=limit,proto3" json:"limit,omitempty"`
+}
+
+func (x *ParisPagination) Reset() {
+ *x = ParisPagination{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_kv_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ParisPagination) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ParisPagination) ProtoMessage() {}
+
+func (x *ParisPagination) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_kv_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ParisPagination.ProtoReflect.Descriptor instead.
+func (*ParisPagination) Descriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{19}
+}
+
+func (x *ParisPagination) GetNextKey() []byte {
+ if x != nil {
+ return x.NextKey
+ }
+ return nil
+}
+
+func (x *ParisPagination) GetLimit() int64 {
+ if x != nil {
+ return x.Limit
+ }
+ return 0
+}
+
+type IndexPagination struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ NextTimeStamp int64 `protobuf:"zigzag64,1,opt,name=next_time_stamp,json=nextTimeStamp,proto3" json:"next_time_stamp,omitempty"`
+ Limit int64 `protobuf:"zigzag64,2,opt,name=limit,proto3" json:"limit,omitempty"`
+}
+
+func (x *IndexPagination) Reset() {
+ *x = IndexPagination{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_remote_kv_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *IndexPagination) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*IndexPagination) ProtoMessage() {}
+
+func (x *IndexPagination) ProtoReflect() protoreflect.Message {
+ mi := &file_remote_kv_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use IndexPagination.ProtoReflect.Descriptor instead.
+func (*IndexPagination) Descriptor() ([]byte, []int) {
+ return file_remote_kv_proto_rawDescGZIP(), []int{20}
+}
+
+func (x *IndexPagination) GetNextTimeStamp() int64 {
+ if x != nil {
+ return x.NextTimeStamp
+ }
+ return 0
+}
+
+func (x *IndexPagination) GetLimit() int64 {
+ if x != nil {
+ return x.Limit
+ }
+ return 0
+}
+
+var File_remote_kv_proto protoreflect.FileDescriptor
+
+var file_remote_kv_proto_rawDesc = []byte{
+ 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2f, 0x6b, 0x76, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x11, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x79, 0x0a, 0x06, 0x43, 0x75, 0x72,
+ 0x73, 0x6f, 0x72, 0x12, 0x1a, 0x0a, 0x02, 0x6f, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x0a, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4f, 0x70, 0x52, 0x02, 0x6f, 0x70, 0x12,
+ 0x1f, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65,
+ 0x12, 0x16, 0x0a, 0x06, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d,
+ 0x52, 0x06, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x12, 0x0c, 0x0a, 0x01, 0x6b, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x0c, 0x0a, 0x01, 0x76, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x01, 0x76, 0x22, 0x6d, 0x0a, 0x04, 0x50, 0x61, 0x69, 0x72, 0x12, 0x0c, 0x0a, 0x01,
+ 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x0c, 0x0a, 0x01, 0x76, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x76, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x75, 0x72, 0x73,
+ 0x6f, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x63, 0x75, 0x72,
+ 0x73, 0x6f, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x69, 0x64,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x76, 0x69, 0x65, 0x77, 0x49, 0x64, 0x12, 0x13,
+ 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74,
+ 0x78, 0x49, 0x64, 0x22, 0x4c, 0x0a, 0x0d, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x68,
+ 0x61, 0x6e, 0x67, 0x65, 0x12, 0x27, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48,
+ 0x32, 0x35, 0x36, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a,
+ 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74,
+ 0x61, 0x22, 0xe8, 0x01, 0x0a, 0x0d, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x68, 0x61,
+ 0x6e, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, 0x36,
+ 0x30, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x69, 0x6e,
+ 0x63, 0x61, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x0b, 0x69, 0x6e, 0x63, 0x61, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x06,
+ 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x72,
+ 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x3e, 0x0a, 0x0f,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18,
+ 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x22, 0xb2, 0x02, 0x0a,
+ 0x10, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x61, 0x74, 0x63,
+ 0x68, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x73, 0x74, 0x61,
+ 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x0c, 0x63,
+ 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x13, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65,
+ 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0b, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x61,
+ 0x74, 0x63, 0x68, 0x12, 0x33, 0x0a, 0x16, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x62,
+ 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x66, 0x65, 0x65, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x13, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63,
+ 0x6b, 0x42, 0x61, 0x73, 0x65, 0x46, 0x65, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x62, 0x6c, 0x6f, 0x63,
+ 0x6b, 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x0d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x47, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74,
+ 0x12, 0x27, 0x0a, 0x0f, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x62, 0x6c,
+ 0x6f, 0x63, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x66, 0x69, 0x6e, 0x61, 0x6c,
+ 0x69, 0x7a, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x36, 0x0a, 0x18, 0x70, 0x65, 0x6e,
+ 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x66, 0x65, 0x65, 0x5f, 0x70, 0x65,
+ 0x72, 0x5f, 0x67, 0x61, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x70, 0x65, 0x6e,
+ 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x62, 0x46, 0x65, 0x65, 0x50, 0x65, 0x72, 0x47, 0x61,
+ 0x73, 0x22, 0xd0, 0x01, 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67,
+ 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x44, 0x69,
+ 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x69, 0x67,
+ 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48,
+ 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x2a, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68,
+ 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73,
+ 0x68, 0x12, 0x2f, 0x0a, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x41, 0x63, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x67,
+ 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x78, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0c, 0x52,
+ 0x03, 0x74, 0x78, 0x73, 0x22, 0x64, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61,
+ 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x77, 0x69,
+ 0x74, 0x68, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x0b, 0x77, 0x69, 0x74, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x2b, 0x0a,
+ 0x11, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x77, 0x69, 0x74, 0x68, 0x54, 0x72,
+ 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x12, 0x0a, 0x10, 0x53, 0x6e,
+ 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x58,
+ 0x0a, 0x0e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79,
+ 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x46, 0x69,
+ 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x66,
+ 0x69, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x68, 0x69, 0x73, 0x74,
+ 0x6f, 0x72, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, 0xe8, 0x01, 0x0a, 0x08, 0x52, 0x61, 0x6e,
+ 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x66, 0x72, 0x6f, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69,
+ 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x74, 0x6f, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21,
+ 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x73, 0x63, 0x65, 0x6e,
+ 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x12,
+ 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f,
+ 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65,
+ 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x22, 0x7f, 0x0a, 0x0c, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74,
+ 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x0c,
+ 0x0a, 0x01, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x0e, 0x0a, 0x02,
+ 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x74, 0x73, 0x12, 0x0e, 0x0a, 0x02,
+ 0x6b, 0x32, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x6b, 0x32, 0x12, 0x16, 0x0a, 0x06,
+ 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x6c, 0x61,
+ 0x74, 0x65, 0x73, 0x74, 0x22, 0x2e, 0x0a, 0x0e, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65,
+ 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x0c, 0x0a, 0x01, 0x76, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x01, 0x76, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x02, 0x6f, 0x6b, 0x22, 0x58, 0x0a, 0x0d, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x47,
+ 0x65, 0x74, 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x12, 0x0c, 0x0a, 0x01, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x0e,
+ 0x0a, 0x02, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x74, 0x73, 0x22, 0x2f,
+ 0x0a, 0x0f, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c,
+ 0x79, 0x12, 0x0c, 0x0a, 0x01, 0x76, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x76, 0x12,
+ 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x22,
+ 0xeb, 0x01, 0x0a, 0x0d, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65,
+ 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x0c, 0x0a, 0x01,
+ 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x72,
+ 0x6f, 0x6d, 0x5f, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x12, 0x52, 0x06, 0x66, 0x72, 0x6f,
+ 0x6d, 0x54, 0x73, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x6f, 0x5f, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x12, 0x52, 0x04, 0x74, 0x6f, 0x54, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65,
+ 0x72, 0x5f, 0x61, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b,
+ 0x6f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c,
+ 0x69, 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69,
+ 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08,
+ 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x59, 0x0a,
+ 0x0f, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79,
+ 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73,
+ 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50,
+ 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xdf, 0x01, 0x0a, 0x0f, 0x48, 0x69, 0x73,
+ 0x74, 0x6f, 0x72, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05,
+ 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, 0x49,
+ 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x72, 0x6f, 0x6d, 0x5f,
+ 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x12, 0x52, 0x06, 0x66, 0x72, 0x6f, 0x6d, 0x54, 0x73,
+ 0x12, 0x13, 0x0a, 0x05, 0x74, 0x6f, 0x5f, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x12, 0x52,
+ 0x04, 0x74, 0x6f, 0x54, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x61,
+ 0x73, 0x63, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6f, 0x72, 0x64,
+ 0x65, 0x72, 0x41, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69,
+ 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1b,
+ 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28,
+ 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70,
+ 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8a, 0x02, 0x0a, 0x0e, 0x44,
+ 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a,
+ 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x78,
+ 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x72, 0x6f, 0x6d,
+ 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x66, 0x72, 0x6f, 0x6d,
+ 0x4b, 0x65, 0x79, 0x12, 0x15, 0x0a, 0x06, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0c, 0x52, 0x05, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x74, 0x73,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61,
+ 0x74, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x6c, 0x61, 0x74, 0x65,
+ 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x73, 0x63, 0x65,
+ 0x6e, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x41,
+ 0x73, 0x63, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x08,
+ 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70,
+ 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08,
+ 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65,
+ 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61,
+ 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x5b, 0x0a, 0x05, 0x50, 0x61, 0x69, 0x72, 0x73,
+ 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x04,
+ 0x6b, 0x65, 0x79, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f,
+ 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x42, 0x0a, 0x0f, 0x50, 0x61, 0x72, 0x69, 0x73, 0x50, 0x61, 0x67,
+ 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x78, 0x74, 0x5f,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6e, 0x65, 0x78, 0x74, 0x4b,
+ 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x4f, 0x0a, 0x0f, 0x49, 0x6e, 0x64, 0x65,
+ 0x78, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x6e,
+ 0x65, 0x78, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x12, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x74,
+ 0x61, 0x6d, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2a, 0x86, 0x02, 0x0a, 0x02, 0x4f, 0x70,
+ 0x12, 0x09, 0x0a, 0x05, 0x46, 0x49, 0x52, 0x53, 0x54, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x46,
+ 0x49, 0x52, 0x53, 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x45,
+ 0x45, 0x4b, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x45, 0x45, 0x4b, 0x5f, 0x42, 0x4f, 0x54,
+ 0x48, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x55, 0x52, 0x52, 0x45, 0x4e, 0x54, 0x10, 0x04,
+ 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x41, 0x53, 0x54, 0x10, 0x06, 0x12, 0x0c, 0x0a, 0x08, 0x4c, 0x41,
+ 0x53, 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x07, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x45, 0x58, 0x54,
+ 0x10, 0x08, 0x12, 0x0c, 0x0a, 0x08, 0x4e, 0x45, 0x58, 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x09,
+ 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x45, 0x58, 0x54, 0x5f, 0x4e, 0x4f, 0x5f, 0x44, 0x55, 0x50, 0x10,
+ 0x0b, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x52, 0x45, 0x56, 0x10, 0x0c, 0x12, 0x0c, 0x0a, 0x08, 0x50,
+ 0x52, 0x45, 0x56, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x0d, 0x12, 0x0f, 0x0a, 0x0b, 0x50, 0x52, 0x45,
+ 0x56, 0x5f, 0x4e, 0x4f, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x0e, 0x12, 0x0e, 0x0a, 0x0a, 0x53, 0x45,
+ 0x45, 0x4b, 0x5f, 0x45, 0x58, 0x41, 0x43, 0x54, 0x10, 0x0f, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45,
+ 0x45, 0x4b, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x5f, 0x45, 0x58, 0x41, 0x43, 0x54, 0x10, 0x10, 0x12,
+ 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, 0x1e, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f,
+ 0x53, 0x45, 0x10, 0x1f, 0x12, 0x11, 0x0a, 0x0d, 0x4f, 0x50, 0x45, 0x4e, 0x5f, 0x44, 0x55, 0x50,
+ 0x5f, 0x53, 0x4f, 0x52, 0x54, 0x10, 0x20, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4f, 0x55, 0x4e, 0x54,
+ 0x10, 0x21, 0x2a, 0x48, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07,
+ 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x53,
+ 0x45, 0x52, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x02, 0x12,
+ 0x0f, 0x0a, 0x0b, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x03,
+ 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x10, 0x04, 0x2a, 0x24, 0x0a, 0x09,
+ 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x4f, 0x52,
+ 0x57, 0x41, 0x52, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x4e, 0x57, 0x49, 0x4e, 0x44,
+ 0x10, 0x01, 0x32, 0xba, 0x04, 0x0a, 0x02, 0x4b, 0x56, 0x12, 0x36, 0x0a, 0x07, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c,
+ 0x79, 0x12, 0x26, 0x0a, 0x02, 0x54, 0x78, 0x12, 0x0e, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65,
+ 0x2e, 0x43, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x1a, 0x0c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65,
+ 0x2e, 0x50, 0x61, 0x69, 0x72, 0x28, 0x01, 0x30, 0x01, 0x12, 0x46, 0x0a, 0x0c, 0x53, 0x74, 0x61,
+ 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x1a, 0x2e, 0x72, 0x65, 0x6d, 0x6f,
+ 0x74, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53,
+ 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x30,
+ 0x01, 0x12, 0x3d, 0x0a, 0x09, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x18,
+ 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74,
+ 0x65, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79,
+ 0x12, 0x28, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x10, 0x2e, 0x72, 0x65, 0x6d, 0x6f,
+ 0x74, 0x65, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x0d, 0x2e, 0x72, 0x65,
+ 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12, 0x39, 0x0a, 0x09, 0x44, 0x6f,
+ 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74, 0x12, 0x14, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65,
+ 0x2e, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x16, 0x2e,
+ 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74,
+ 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3c, 0x0a, 0x0a, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79,
+ 0x47, 0x65, 0x74, 0x12, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x48, 0x69, 0x73,
+ 0x74, 0x6f, 0x72, 0x79, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x72, 0x65, 0x6d,
+ 0x6f, 0x74, 0x65, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x47, 0x65, 0x74, 0x52, 0x65,
+ 0x70, 0x6c, 0x79, 0x12, 0x3c, 0x0a, 0x0a, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x12, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78,
+ 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74,
+ 0x65, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x70, 0x6c,
+ 0x79, 0x12, 0x36, 0x0a, 0x0c, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x12, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f,
+ 0x72, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x0d, 0x2e, 0x72, 0x65, 0x6d,
+ 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12, 0x34, 0x0a, 0x0b, 0x44, 0x6f, 0x6d,
+ 0x61, 0x69, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74,
+ 0x65, 0x2e, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71,
+ 0x1a, 0x0d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x73, 0x42,
+ 0x11, 0x5a, 0x0f, 0x2e, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x3b, 0x72, 0x65, 0x6d, 0x6f,
+ 0x74, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_remote_kv_proto_rawDescOnce sync.Once
+ file_remote_kv_proto_rawDescData = file_remote_kv_proto_rawDesc
+)
+
+func file_remote_kv_proto_rawDescGZIP() []byte {
+ file_remote_kv_proto_rawDescOnce.Do(func() {
+ file_remote_kv_proto_rawDescData = protoimpl.X.CompressGZIP(file_remote_kv_proto_rawDescData)
+ })
+ return file_remote_kv_proto_rawDescData
+}
+
+var file_remote_kv_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
+var file_remote_kv_proto_msgTypes = make([]protoimpl.MessageInfo, 21)
+var file_remote_kv_proto_goTypes = []interface{}{
+ (Op)(0), // 0: remote.Op
+ (Action)(0), // 1: remote.Action
+ (Direction)(0), // 2: remote.Direction
+ (*Cursor)(nil), // 3: remote.Cursor
+ (*Pair)(nil), // 4: remote.Pair
+ (*StorageChange)(nil), // 5: remote.StorageChange
+ (*AccountChange)(nil), // 6: remote.AccountChange
+ (*StateChangeBatch)(nil), // 7: remote.StateChangeBatch
+ (*StateChange)(nil), // 8: remote.StateChange
+ (*StateChangeRequest)(nil), // 9: remote.StateChangeRequest
+ (*SnapshotsRequest)(nil), // 10: remote.SnapshotsRequest
+ (*SnapshotsReply)(nil), // 11: remote.SnapshotsReply
+ (*RangeReq)(nil), // 12: remote.RangeReq
+ (*DomainGetReq)(nil), // 13: remote.DomainGetReq
+ (*DomainGetReply)(nil), // 14: remote.DomainGetReply
+ (*HistoryGetReq)(nil), // 15: remote.HistoryGetReq
+ (*HistoryGetReply)(nil), // 16: remote.HistoryGetReply
+ (*IndexRangeReq)(nil), // 17: remote.IndexRangeReq
+ (*IndexRangeReply)(nil), // 18: remote.IndexRangeReply
+ (*HistoryRangeReq)(nil), // 19: remote.HistoryRangeReq
+ (*DomainRangeReq)(nil), // 20: remote.DomainRangeReq
+ (*Pairs)(nil), // 21: remote.Pairs
+ (*ParisPagination)(nil), // 22: remote.ParisPagination
+ (*IndexPagination)(nil), // 23: remote.IndexPagination
+ (*types.H256)(nil), // 24: types.H256
+ (*types.H160)(nil), // 25: types.H160
+ (*emptypb.Empty)(nil), // 26: google.protobuf.Empty
+ (*types.VersionReply)(nil), // 27: types.VersionReply
+}
+var file_remote_kv_proto_depIdxs = []int32{
+ 0, // 0: remote.Cursor.op:type_name -> remote.Op
+ 24, // 1: remote.StorageChange.location:type_name -> types.H256
+ 25, // 2: remote.AccountChange.address:type_name -> types.H160
+ 1, // 3: remote.AccountChange.action:type_name -> remote.Action
+ 5, // 4: remote.AccountChange.storage_changes:type_name -> remote.StorageChange
+ 8, // 5: remote.StateChangeBatch.change_batch:type_name -> remote.StateChange
+ 2, // 6: remote.StateChange.direction:type_name -> remote.Direction
+ 24, // 7: remote.StateChange.block_hash:type_name -> types.H256
+ 6, // 8: remote.StateChange.changes:type_name -> remote.AccountChange
+ 26, // 9: remote.KV.Version:input_type -> google.protobuf.Empty
+ 3, // 10: remote.KV.Tx:input_type -> remote.Cursor
+ 9, // 11: remote.KV.StateChanges:input_type -> remote.StateChangeRequest
+ 10, // 12: remote.KV.Snapshots:input_type -> remote.SnapshotsRequest
+ 12, // 13: remote.KV.Range:input_type -> remote.RangeReq
+ 13, // 14: remote.KV.DomainGet:input_type -> remote.DomainGetReq
+ 15, // 15: remote.KV.HistoryGet:input_type -> remote.HistoryGetReq
+ 17, // 16: remote.KV.IndexRange:input_type -> remote.IndexRangeReq
+ 19, // 17: remote.KV.HistoryRange:input_type -> remote.HistoryRangeReq
+ 20, // 18: remote.KV.DomainRange:input_type -> remote.DomainRangeReq
+ 27, // 19: remote.KV.Version:output_type -> types.VersionReply
+ 4, // 20: remote.KV.Tx:output_type -> remote.Pair
+ 7, // 21: remote.KV.StateChanges:output_type -> remote.StateChangeBatch
+ 11, // 22: remote.KV.Snapshots:output_type -> remote.SnapshotsReply
+ 21, // 23: remote.KV.Range:output_type -> remote.Pairs
+ 14, // 24: remote.KV.DomainGet:output_type -> remote.DomainGetReply
+ 16, // 25: remote.KV.HistoryGet:output_type -> remote.HistoryGetReply
+ 18, // 26: remote.KV.IndexRange:output_type -> remote.IndexRangeReply
+ 21, // 27: remote.KV.HistoryRange:output_type -> remote.Pairs
+ 21, // 28: remote.KV.DomainRange:output_type -> remote.Pairs
+ 19, // [19:29] is the sub-list for method output_type
+ 9, // [9:19] is the sub-list for method input_type
+ 9, // [9:9] is the sub-list for extension type_name
+ 9, // [9:9] is the sub-list for extension extendee
+ 0, // [0:9] is the sub-list for field type_name
+}
+
+func init() { file_remote_kv_proto_init() }
+func file_remote_kv_proto_init() {
+ if File_remote_kv_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_remote_kv_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Cursor); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_kv_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Pair); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_kv_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StorageChange); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_kv_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AccountChange); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_kv_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StateChangeBatch); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_kv_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StateChange); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_kv_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StateChangeRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_kv_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SnapshotsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_kv_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SnapshotsReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_kv_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RangeReq); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_kv_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DomainGetReq); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_kv_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DomainGetReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_kv_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HistoryGetReq); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_kv_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HistoryGetReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_kv_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*IndexRangeReq); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_kv_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*IndexRangeReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_kv_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HistoryRangeReq); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_kv_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DomainRangeReq); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_kv_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Pairs); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_kv_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ParisPagination); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_remote_kv_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*IndexPagination); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_remote_kv_proto_rawDesc,
+ NumEnums: 3,
+ NumMessages: 21,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_remote_kv_proto_goTypes,
+ DependencyIndexes: file_remote_kv_proto_depIdxs,
+ EnumInfos: file_remote_kv_proto_enumTypes,
+ MessageInfos: file_remote_kv_proto_msgTypes,
+ }.Build()
+ File_remote_kv_proto = out.File
+ file_remote_kv_proto_rawDesc = nil
+ file_remote_kv_proto_goTypes = nil
+ file_remote_kv_proto_depIdxs = nil
+}
diff --git a/erigon-lib/gointerfaces/remote/kv_grpc.pb.go b/erigon-lib/gointerfaces/remote/kv_grpc.pb.go
new file mode 100644
index 00000000000..d0305cb0fb4
--- /dev/null
+++ b/erigon-lib/gointerfaces/remote/kv_grpc.pb.go
@@ -0,0 +1,528 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.3.0
+// - protoc v4.24.2
+// source: remote/kv.proto
+
+package remote
+
+import (
+ context "context"
+ types "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+const (
+ KV_Version_FullMethodName = "/remote.KV/Version"
+ KV_Tx_FullMethodName = "/remote.KV/Tx"
+ KV_StateChanges_FullMethodName = "/remote.KV/StateChanges"
+ KV_Snapshots_FullMethodName = "/remote.KV/Snapshots"
+ KV_Range_FullMethodName = "/remote.KV/Range"
+ KV_DomainGet_FullMethodName = "/remote.KV/DomainGet"
+ KV_HistoryGet_FullMethodName = "/remote.KV/HistoryGet"
+ KV_IndexRange_FullMethodName = "/remote.KV/IndexRange"
+ KV_HistoryRange_FullMethodName = "/remote.KV/HistoryRange"
+ KV_DomainRange_FullMethodName = "/remote.KV/DomainRange"
+)
+
+// KVClient is the client API for KV service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type KVClient interface {
+ // Version returns the service version number
+ Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.VersionReply, error)
+ // Tx exposes read-only transactions for the key-value store
+ //
+ // When tx open, client must receive 1 message from server with txID
+ // When cursor open, client must receive 1 message from server with cursorID
+ // Then only client can initiate messages from server
+ Tx(ctx context.Context, opts ...grpc.CallOption) (KV_TxClient, error)
+ StateChanges(ctx context.Context, in *StateChangeRequest, opts ...grpc.CallOption) (KV_StateChangesClient, error)
+ // Snapshots returns list of current snapshot files. Then client can just open all of them.
+ Snapshots(ctx context.Context, in *SnapshotsRequest, opts ...grpc.CallOption) (*SnapshotsReply, error)
+ // Range [from, to)
+ // Range(from, nil) means [from, EndOfTable)
+ // Range(nil, to) means [StartOfTable, to)
+ // If orderAscend=false server expecting `from`<`to`. Example: Range("B", "A")
+ Range(ctx context.Context, in *RangeReq, opts ...grpc.CallOption) (*Pairs, error)
+ // Temporal methods
+ DomainGet(ctx context.Context, in *DomainGetReq, opts ...grpc.CallOption) (*DomainGetReply, error)
+ HistoryGet(ctx context.Context, in *HistoryGetReq, opts ...grpc.CallOption) (*HistoryGetReply, error)
+ IndexRange(ctx context.Context, in *IndexRangeReq, opts ...grpc.CallOption) (*IndexRangeReply, error)
+ HistoryRange(ctx context.Context, in *HistoryRangeReq, opts ...grpc.CallOption) (*Pairs, error)
+ DomainRange(ctx context.Context, in *DomainRangeReq, opts ...grpc.CallOption) (*Pairs, error)
+}
+
+type kVClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewKVClient(cc grpc.ClientConnInterface) KVClient {
+ return &kVClient{cc}
+}
+
+func (c *kVClient) Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.VersionReply, error) {
+ out := new(types.VersionReply)
+ err := c.cc.Invoke(ctx, KV_Version_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *kVClient) Tx(ctx context.Context, opts ...grpc.CallOption) (KV_TxClient, error) {
+ stream, err := c.cc.NewStream(ctx, &KV_ServiceDesc.Streams[0], KV_Tx_FullMethodName, opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &kVTxClient{stream}
+ return x, nil
+}
+
+type KV_TxClient interface {
+ Send(*Cursor) error
+ Recv() (*Pair, error)
+ grpc.ClientStream
+}
+
+type kVTxClient struct {
+ grpc.ClientStream
+}
+
+func (x *kVTxClient) Send(m *Cursor) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *kVTxClient) Recv() (*Pair, error) {
+ m := new(Pair)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *kVClient) StateChanges(ctx context.Context, in *StateChangeRequest, opts ...grpc.CallOption) (KV_StateChangesClient, error) {
+ stream, err := c.cc.NewStream(ctx, &KV_ServiceDesc.Streams[1], KV_StateChanges_FullMethodName, opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &kVStateChangesClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type KV_StateChangesClient interface {
+ Recv() (*StateChangeBatch, error)
+ grpc.ClientStream
+}
+
+type kVStateChangesClient struct {
+ grpc.ClientStream
+}
+
+func (x *kVStateChangesClient) Recv() (*StateChangeBatch, error) {
+ m := new(StateChangeBatch)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *kVClient) Snapshots(ctx context.Context, in *SnapshotsRequest, opts ...grpc.CallOption) (*SnapshotsReply, error) {
+ out := new(SnapshotsReply)
+ err := c.cc.Invoke(ctx, KV_Snapshots_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *kVClient) Range(ctx context.Context, in *RangeReq, opts ...grpc.CallOption) (*Pairs, error) {
+ out := new(Pairs)
+ err := c.cc.Invoke(ctx, KV_Range_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *kVClient) DomainGet(ctx context.Context, in *DomainGetReq, opts ...grpc.CallOption) (*DomainGetReply, error) {
+ out := new(DomainGetReply)
+ err := c.cc.Invoke(ctx, KV_DomainGet_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *kVClient) HistoryGet(ctx context.Context, in *HistoryGetReq, opts ...grpc.CallOption) (*HistoryGetReply, error) {
+ out := new(HistoryGetReply)
+ err := c.cc.Invoke(ctx, KV_HistoryGet_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *kVClient) IndexRange(ctx context.Context, in *IndexRangeReq, opts ...grpc.CallOption) (*IndexRangeReply, error) {
+ out := new(IndexRangeReply)
+ err := c.cc.Invoke(ctx, KV_IndexRange_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *kVClient) HistoryRange(ctx context.Context, in *HistoryRangeReq, opts ...grpc.CallOption) (*Pairs, error) {
+ out := new(Pairs)
+ err := c.cc.Invoke(ctx, KV_HistoryRange_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *kVClient) DomainRange(ctx context.Context, in *DomainRangeReq, opts ...grpc.CallOption) (*Pairs, error) {
+ out := new(Pairs)
+ err := c.cc.Invoke(ctx, KV_DomainRange_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// KVServer is the server API for KV service.
+// All implementations must embed UnimplementedKVServer
+// for forward compatibility
+type KVServer interface {
+ // Version returns the service version number
+ Version(context.Context, *emptypb.Empty) (*types.VersionReply, error)
+ // Tx exposes read-only transactions for the key-value store
+ //
+ // When tx open, client must receive 1 message from server with txID
+ // When cursor open, client must receive 1 message from server with cursorID
+ // Then only client can initiate messages from server
+ Tx(KV_TxServer) error
+ StateChanges(*StateChangeRequest, KV_StateChangesServer) error
+ // Snapshots returns list of current snapshot files. Then client can just open all of them.
+ Snapshots(context.Context, *SnapshotsRequest) (*SnapshotsReply, error)
+ // Range [from, to)
+ // Range(from, nil) means [from, EndOfTable)
+ // Range(nil, to) means [StartOfTable, to)
+ // If orderAscend=false server expecting `from`<`to`. Example: Range("B", "A")
+ Range(context.Context, *RangeReq) (*Pairs, error)
+ // Temporal methods
+ DomainGet(context.Context, *DomainGetReq) (*DomainGetReply, error)
+ HistoryGet(context.Context, *HistoryGetReq) (*HistoryGetReply, error)
+ IndexRange(context.Context, *IndexRangeReq) (*IndexRangeReply, error)
+ HistoryRange(context.Context, *HistoryRangeReq) (*Pairs, error)
+ DomainRange(context.Context, *DomainRangeReq) (*Pairs, error)
+ mustEmbedUnimplementedKVServer()
+}
+
+// UnimplementedKVServer must be embedded to have forward compatible implementations.
+type UnimplementedKVServer struct {
+}
+
+func (UnimplementedKVServer) Version(context.Context, *emptypb.Empty) (*types.VersionReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Version not implemented")
+}
+func (UnimplementedKVServer) Tx(KV_TxServer) error {
+ return status.Errorf(codes.Unimplemented, "method Tx not implemented")
+}
+func (UnimplementedKVServer) StateChanges(*StateChangeRequest, KV_StateChangesServer) error {
+ return status.Errorf(codes.Unimplemented, "method StateChanges not implemented")
+}
+func (UnimplementedKVServer) Snapshots(context.Context, *SnapshotsRequest) (*SnapshotsReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Snapshots not implemented")
+}
+func (UnimplementedKVServer) Range(context.Context, *RangeReq) (*Pairs, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Range not implemented")
+}
+func (UnimplementedKVServer) DomainGet(context.Context, *DomainGetReq) (*DomainGetReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DomainGet not implemented")
+}
+func (UnimplementedKVServer) HistoryGet(context.Context, *HistoryGetReq) (*HistoryGetReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method HistoryGet not implemented")
+}
+func (UnimplementedKVServer) IndexRange(context.Context, *IndexRangeReq) (*IndexRangeReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method IndexRange not implemented")
+}
+func (UnimplementedKVServer) HistoryRange(context.Context, *HistoryRangeReq) (*Pairs, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method HistoryRange not implemented")
+}
+func (UnimplementedKVServer) DomainRange(context.Context, *DomainRangeReq) (*Pairs, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DomainRange not implemented")
+}
+func (UnimplementedKVServer) mustEmbedUnimplementedKVServer() {}
+
+// UnsafeKVServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to KVServer will
+// result in compilation errors.
+type UnsafeKVServer interface {
+ mustEmbedUnimplementedKVServer()
+}
+
+func RegisterKVServer(s grpc.ServiceRegistrar, srv KVServer) {
+ s.RegisterService(&KV_ServiceDesc, srv)
+}
+
+func _KV_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(emptypb.Empty)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(KVServer).Version(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: KV_Version_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(KVServer).Version(ctx, req.(*emptypb.Empty))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _KV_Tx_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(KVServer).Tx(&kVTxServer{stream})
+}
+
+type KV_TxServer interface {
+ Send(*Pair) error
+ Recv() (*Cursor, error)
+ grpc.ServerStream
+}
+
+type kVTxServer struct {
+ grpc.ServerStream
+}
+
+func (x *kVTxServer) Send(m *Pair) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *kVTxServer) Recv() (*Cursor, error) {
+ m := new(Cursor)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func _KV_StateChanges_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(StateChangeRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(KVServer).StateChanges(m, &kVStateChangesServer{stream})
+}
+
+type KV_StateChangesServer interface {
+ Send(*StateChangeBatch) error
+ grpc.ServerStream
+}
+
+type kVStateChangesServer struct {
+ grpc.ServerStream
+}
+
+func (x *kVStateChangesServer) Send(m *StateChangeBatch) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _KV_Snapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SnapshotsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(KVServer).Snapshots(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: KV_Snapshots_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(KVServer).Snapshots(ctx, req.(*SnapshotsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _KV_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RangeReq)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(KVServer).Range(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: KV_Range_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(KVServer).Range(ctx, req.(*RangeReq))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _KV_DomainGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DomainGetReq)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(KVServer).DomainGet(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: KV_DomainGet_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(KVServer).DomainGet(ctx, req.(*DomainGetReq))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _KV_HistoryGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(HistoryGetReq)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(KVServer).HistoryGet(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: KV_HistoryGet_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(KVServer).HistoryGet(ctx, req.(*HistoryGetReq))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _KV_IndexRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(IndexRangeReq)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(KVServer).IndexRange(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: KV_IndexRange_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(KVServer).IndexRange(ctx, req.(*IndexRangeReq))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _KV_HistoryRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(HistoryRangeReq)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(KVServer).HistoryRange(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: KV_HistoryRange_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(KVServer).HistoryRange(ctx, req.(*HistoryRangeReq))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _KV_DomainRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DomainRangeReq)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(KVServer).DomainRange(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: KV_DomainRange_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(KVServer).DomainRange(ctx, req.(*DomainRangeReq))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// KV_ServiceDesc is the grpc.ServiceDesc for KV service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var KV_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "remote.KV",
+ HandlerType: (*KVServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Version",
+ Handler: _KV_Version_Handler,
+ },
+ {
+ MethodName: "Snapshots",
+ Handler: _KV_Snapshots_Handler,
+ },
+ {
+ MethodName: "Range",
+ Handler: _KV_Range_Handler,
+ },
+ {
+ MethodName: "DomainGet",
+ Handler: _KV_DomainGet_Handler,
+ },
+ {
+ MethodName: "HistoryGet",
+ Handler: _KV_HistoryGet_Handler,
+ },
+ {
+ MethodName: "IndexRange",
+ Handler: _KV_IndexRange_Handler,
+ },
+ {
+ MethodName: "HistoryRange",
+ Handler: _KV_HistoryRange_Handler,
+ },
+ {
+ MethodName: "DomainRange",
+ Handler: _KV_DomainRange_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Tx",
+ Handler: _KV_Tx_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ {
+ StreamName: "StateChanges",
+ Handler: _KV_StateChanges_Handler,
+ ServerStreams: true,
+ },
+ },
+ Metadata: "remote/kv.proto",
+}
diff --git a/erigon-lib/gointerfaces/remote/mocks.go b/erigon-lib/gointerfaces/remote/mocks.go
new file mode 100644
index 00000000000..8300eb434d2
--- /dev/null
+++ b/erigon-lib/gointerfaces/remote/mocks.go
@@ -0,0 +1,947 @@
+// Code generated by moq; DO NOT EDIT.
+// github.com/matryer/moq
+
+package remote
+
+import (
+ context "context"
+ types "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+ grpc "google.golang.org/grpc"
+ "google.golang.org/grpc/metadata"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ sync "sync"
+)
+
+// Ensure, that KVClientMock does implement KVClient.
+// If this is not the case, regenerate this file with moq.
+var _ KVClient = &KVClientMock{}
+
+// KVClientMock is a mock implementation of KVClient.
+//
+// func TestSomethingThatUsesKVClient(t *testing.T) {
+//
+// // make and configure a mocked KVClient
+// mockedKVClient := &KVClientMock{
+// DomainGetFunc: func(ctx context.Context, in *DomainGetReq, opts ...grpc.CallOption) (*DomainGetReply, error) {
+// panic("mock out the DomainGet method")
+// },
+// DomainRangeFunc: func(ctx context.Context, in *DomainRangeReq, opts ...grpc.CallOption) (*Pairs, error) {
+// panic("mock out the DomainRange method")
+// },
+// HistoryGetFunc: func(ctx context.Context, in *HistoryGetReq, opts ...grpc.CallOption) (*HistoryGetReply, error) {
+// panic("mock out the HistoryGet method")
+// },
+// HistoryRangeFunc: func(ctx context.Context, in *HistoryRangeReq, opts ...grpc.CallOption) (*Pairs, error) {
+// panic("mock out the HistoryRange method")
+// },
+// IndexRangeFunc: func(ctx context.Context, in *IndexRangeReq, opts ...grpc.CallOption) (*IndexRangeReply, error) {
+// panic("mock out the IndexRange method")
+// },
+// RangeFunc: func(ctx context.Context, in *RangeReq, opts ...grpc.CallOption) (*Pairs, error) {
+// panic("mock out the Range method")
+// },
+// SnapshotsFunc: func(ctx context.Context, in *SnapshotsRequest, opts ...grpc.CallOption) (*SnapshotsReply, error) {
+// panic("mock out the Snapshots method")
+// },
+// StateChangesFunc: func(ctx context.Context, in *StateChangeRequest, opts ...grpc.CallOption) (KV_StateChangesClient, error) {
+// panic("mock out the StateChanges method")
+// },
+// TxFunc: func(ctx context.Context, opts ...grpc.CallOption) (KV_TxClient, error) {
+// panic("mock out the Tx method")
+// },
+// VersionFunc: func(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.VersionReply, error) {
+// panic("mock out the Version method")
+// },
+// }
+//
+// // use mockedKVClient in code that requires KVClient
+// // and then make assertions.
+//
+// }
+type KVClientMock struct {
+ // DomainGetFunc mocks the DomainGet method.
+ DomainGetFunc func(ctx context.Context, in *DomainGetReq, opts ...grpc.CallOption) (*DomainGetReply, error)
+
+ // DomainRangeFunc mocks the DomainRange method.
+ DomainRangeFunc func(ctx context.Context, in *DomainRangeReq, opts ...grpc.CallOption) (*Pairs, error)
+
+ // HistoryGetFunc mocks the HistoryGet method.
+ HistoryGetFunc func(ctx context.Context, in *HistoryGetReq, opts ...grpc.CallOption) (*HistoryGetReply, error)
+
+ // HistoryRangeFunc mocks the HistoryRange method.
+ HistoryRangeFunc func(ctx context.Context, in *HistoryRangeReq, opts ...grpc.CallOption) (*Pairs, error)
+
+ // IndexRangeFunc mocks the IndexRange method.
+ IndexRangeFunc func(ctx context.Context, in *IndexRangeReq, opts ...grpc.CallOption) (*IndexRangeReply, error)
+
+ // RangeFunc mocks the Range method.
+ RangeFunc func(ctx context.Context, in *RangeReq, opts ...grpc.CallOption) (*Pairs, error)
+
+ // SnapshotsFunc mocks the Snapshots method.
+ SnapshotsFunc func(ctx context.Context, in *SnapshotsRequest, opts ...grpc.CallOption) (*SnapshotsReply, error)
+
+ // StateChangesFunc mocks the StateChanges method.
+ StateChangesFunc func(ctx context.Context, in *StateChangeRequest, opts ...grpc.CallOption) (KV_StateChangesClient, error)
+
+ // TxFunc mocks the Tx method.
+ TxFunc func(ctx context.Context, opts ...grpc.CallOption) (KV_TxClient, error)
+
+ // VersionFunc mocks the Version method.
+ VersionFunc func(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.VersionReply, error)
+
+ // calls tracks calls to the methods.
+ calls struct {
+ // DomainGet holds details about calls to the DomainGet method.
+ DomainGet []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // In is the in argument value.
+ In *DomainGetReq
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ // DomainRange holds details about calls to the DomainRange method.
+ DomainRange []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // In is the in argument value.
+ In *DomainRangeReq
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ // HistoryGet holds details about calls to the HistoryGet method.
+ HistoryGet []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // In is the in argument value.
+ In *HistoryGetReq
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ // HistoryRange holds details about calls to the HistoryRange method.
+ HistoryRange []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // In is the in argument value.
+ In *HistoryRangeReq
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ // IndexRange holds details about calls to the IndexRange method.
+ IndexRange []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // In is the in argument value.
+ In *IndexRangeReq
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ // Range holds details about calls to the Range method.
+ Range []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // In is the in argument value.
+ In *RangeReq
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ // Snapshots holds details about calls to the Snapshots method.
+ Snapshots []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // In is the in argument value.
+ In *SnapshotsRequest
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ // StateChanges holds details about calls to the StateChanges method.
+ StateChanges []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // In is the in argument value.
+ In *StateChangeRequest
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ // Tx holds details about calls to the Tx method.
+ Tx []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ // Version holds details about calls to the Version method.
+ Version []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // In is the in argument value.
+ In *emptypb.Empty
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ }
+ lockDomainGet sync.RWMutex
+ lockDomainRange sync.RWMutex
+ lockHistoryGet sync.RWMutex
+ lockHistoryRange sync.RWMutex
+ lockIndexRange sync.RWMutex
+ lockRange sync.RWMutex
+ lockSnapshots sync.RWMutex
+ lockStateChanges sync.RWMutex
+ lockTx sync.RWMutex
+ lockVersion sync.RWMutex
+}
+
+// DomainGet calls DomainGetFunc.
+func (mock *KVClientMock) DomainGet(ctx context.Context, in *DomainGetReq, opts ...grpc.CallOption) (*DomainGetReply, error) {
+ callInfo := struct {
+ Ctx context.Context
+ In *DomainGetReq
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ In: in,
+ Opts: opts,
+ }
+ mock.lockDomainGet.Lock()
+ mock.calls.DomainGet = append(mock.calls.DomainGet, callInfo)
+ mock.lockDomainGet.Unlock()
+ if mock.DomainGetFunc == nil {
+ var (
+ domainGetReplyOut *DomainGetReply
+ errOut error
+ )
+ return domainGetReplyOut, errOut
+ }
+ return mock.DomainGetFunc(ctx, in, opts...)
+}
+
+// DomainGetCalls gets all the calls that were made to DomainGet.
+// Check the length with:
+//
+// len(mockedKVClient.DomainGetCalls())
+func (mock *KVClientMock) DomainGetCalls() []struct {
+ Ctx context.Context
+ In *DomainGetReq
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ In *DomainGetReq
+ Opts []grpc.CallOption
+ }
+ mock.lockDomainGet.RLock()
+ calls = mock.calls.DomainGet
+ mock.lockDomainGet.RUnlock()
+ return calls
+}
+
+// DomainRange calls DomainRangeFunc.
+func (mock *KVClientMock) DomainRange(ctx context.Context, in *DomainRangeReq, opts ...grpc.CallOption) (*Pairs, error) {
+ callInfo := struct {
+ Ctx context.Context
+ In *DomainRangeReq
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ In: in,
+ Opts: opts,
+ }
+ mock.lockDomainRange.Lock()
+ mock.calls.DomainRange = append(mock.calls.DomainRange, callInfo)
+ mock.lockDomainRange.Unlock()
+ if mock.DomainRangeFunc == nil {
+ var (
+ pairsOut *Pairs
+ errOut error
+ )
+ return pairsOut, errOut
+ }
+ return mock.DomainRangeFunc(ctx, in, opts...)
+}
+
+// DomainRangeCalls gets all the calls that were made to DomainRange.
+// Check the length with:
+//
+// len(mockedKVClient.DomainRangeCalls())
+func (mock *KVClientMock) DomainRangeCalls() []struct {
+ Ctx context.Context
+ In *DomainRangeReq
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ In *DomainRangeReq
+ Opts []grpc.CallOption
+ }
+ mock.lockDomainRange.RLock()
+ calls = mock.calls.DomainRange
+ mock.lockDomainRange.RUnlock()
+ return calls
+}
+
+// HistoryGet calls HistoryGetFunc.
+func (mock *KVClientMock) HistoryGet(ctx context.Context, in *HistoryGetReq, opts ...grpc.CallOption) (*HistoryGetReply, error) {
+ callInfo := struct {
+ Ctx context.Context
+ In *HistoryGetReq
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ In: in,
+ Opts: opts,
+ }
+ mock.lockHistoryGet.Lock()
+ mock.calls.HistoryGet = append(mock.calls.HistoryGet, callInfo)
+ mock.lockHistoryGet.Unlock()
+ if mock.HistoryGetFunc == nil {
+ var (
+ historyGetReplyOut *HistoryGetReply
+ errOut error
+ )
+ return historyGetReplyOut, errOut
+ }
+ return mock.HistoryGetFunc(ctx, in, opts...)
+}
+
+// HistoryGetCalls gets all the calls that were made to HistoryGet.
+// Check the length with:
+//
+// len(mockedKVClient.HistoryGetCalls())
+func (mock *KVClientMock) HistoryGetCalls() []struct {
+ Ctx context.Context
+ In *HistoryGetReq
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ In *HistoryGetReq
+ Opts []grpc.CallOption
+ }
+ mock.lockHistoryGet.RLock()
+ calls = mock.calls.HistoryGet
+ mock.lockHistoryGet.RUnlock()
+ return calls
+}
+
+// HistoryRange calls HistoryRangeFunc.
+func (mock *KVClientMock) HistoryRange(ctx context.Context, in *HistoryRangeReq, opts ...grpc.CallOption) (*Pairs, error) {
+ callInfo := struct {
+ Ctx context.Context
+ In *HistoryRangeReq
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ In: in,
+ Opts: opts,
+ }
+ mock.lockHistoryRange.Lock()
+ mock.calls.HistoryRange = append(mock.calls.HistoryRange, callInfo)
+ mock.lockHistoryRange.Unlock()
+ if mock.HistoryRangeFunc == nil {
+ var (
+ pairsOut *Pairs
+ errOut error
+ )
+ return pairsOut, errOut
+ }
+ return mock.HistoryRangeFunc(ctx, in, opts...)
+}
+
+// HistoryRangeCalls gets all the calls that were made to HistoryRange.
+// Check the length with:
+//
+// len(mockedKVClient.HistoryRangeCalls())
+func (mock *KVClientMock) HistoryRangeCalls() []struct {
+ Ctx context.Context
+ In *HistoryRangeReq
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ In *HistoryRangeReq
+ Opts []grpc.CallOption
+ }
+ mock.lockHistoryRange.RLock()
+ calls = mock.calls.HistoryRange
+ mock.lockHistoryRange.RUnlock()
+ return calls
+}
+
+// IndexRange calls IndexRangeFunc.
+func (mock *KVClientMock) IndexRange(ctx context.Context, in *IndexRangeReq, opts ...grpc.CallOption) (*IndexRangeReply, error) {
+ callInfo := struct {
+ Ctx context.Context
+ In *IndexRangeReq
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ In: in,
+ Opts: opts,
+ }
+ mock.lockIndexRange.Lock()
+ mock.calls.IndexRange = append(mock.calls.IndexRange, callInfo)
+ mock.lockIndexRange.Unlock()
+ if mock.IndexRangeFunc == nil {
+ var (
+ indexRangeReplyOut *IndexRangeReply
+ errOut error
+ )
+ return indexRangeReplyOut, errOut
+ }
+ return mock.IndexRangeFunc(ctx, in, opts...)
+}
+
+// IndexRangeCalls gets all the calls that were made to IndexRange.
+// Check the length with:
+//
+// len(mockedKVClient.IndexRangeCalls())
+func (mock *KVClientMock) IndexRangeCalls() []struct {
+ Ctx context.Context
+ In *IndexRangeReq
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ In *IndexRangeReq
+ Opts []grpc.CallOption
+ }
+ mock.lockIndexRange.RLock()
+ calls = mock.calls.IndexRange
+ mock.lockIndexRange.RUnlock()
+ return calls
+}
+
+// Range calls RangeFunc.
+func (mock *KVClientMock) Range(ctx context.Context, in *RangeReq, opts ...grpc.CallOption) (*Pairs, error) {
+ callInfo := struct {
+ Ctx context.Context
+ In *RangeReq
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ In: in,
+ Opts: opts,
+ }
+ mock.lockRange.Lock()
+ mock.calls.Range = append(mock.calls.Range, callInfo)
+ mock.lockRange.Unlock()
+ if mock.RangeFunc == nil {
+ var (
+ pairsOut *Pairs
+ errOut error
+ )
+ return pairsOut, errOut
+ }
+ return mock.RangeFunc(ctx, in, opts...)
+}
+
+// RangeCalls gets all the calls that were made to Range.
+// Check the length with:
+//
+// len(mockedKVClient.RangeCalls())
+func (mock *KVClientMock) RangeCalls() []struct {
+ Ctx context.Context
+ In *RangeReq
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ In *RangeReq
+ Opts []grpc.CallOption
+ }
+ mock.lockRange.RLock()
+ calls = mock.calls.Range
+ mock.lockRange.RUnlock()
+ return calls
+}
+
+// Snapshots calls SnapshotsFunc.
+func (mock *KVClientMock) Snapshots(ctx context.Context, in *SnapshotsRequest, opts ...grpc.CallOption) (*SnapshotsReply, error) {
+ callInfo := struct {
+ Ctx context.Context
+ In *SnapshotsRequest
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ In: in,
+ Opts: opts,
+ }
+ mock.lockSnapshots.Lock()
+ mock.calls.Snapshots = append(mock.calls.Snapshots, callInfo)
+ mock.lockSnapshots.Unlock()
+ if mock.SnapshotsFunc == nil {
+ var (
+ snapshotsReplyOut *SnapshotsReply
+ errOut error
+ )
+ return snapshotsReplyOut, errOut
+ }
+ return mock.SnapshotsFunc(ctx, in, opts...)
+}
+
+// SnapshotsCalls gets all the calls that were made to Snapshots.
+// Check the length with:
+//
+// len(mockedKVClient.SnapshotsCalls())
+func (mock *KVClientMock) SnapshotsCalls() []struct {
+ Ctx context.Context
+ In *SnapshotsRequest
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ In *SnapshotsRequest
+ Opts []grpc.CallOption
+ }
+ mock.lockSnapshots.RLock()
+ calls = mock.calls.Snapshots
+ mock.lockSnapshots.RUnlock()
+ return calls
+}
+
+// StateChanges calls StateChangesFunc.
+func (mock *KVClientMock) StateChanges(ctx context.Context, in *StateChangeRequest, opts ...grpc.CallOption) (KV_StateChangesClient, error) {
+ callInfo := struct {
+ Ctx context.Context
+ In *StateChangeRequest
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ In: in,
+ Opts: opts,
+ }
+ mock.lockStateChanges.Lock()
+ mock.calls.StateChanges = append(mock.calls.StateChanges, callInfo)
+ mock.lockStateChanges.Unlock()
+ if mock.StateChangesFunc == nil {
+ var (
+ kV_StateChangesClientOut KV_StateChangesClient
+ errOut error
+ )
+ return kV_StateChangesClientOut, errOut
+ }
+ return mock.StateChangesFunc(ctx, in, opts...)
+}
+
+// StateChangesCalls gets all the calls that were made to StateChanges.
+// Check the length with:
+//
+// len(mockedKVClient.StateChangesCalls())
+func (mock *KVClientMock) StateChangesCalls() []struct {
+ Ctx context.Context
+ In *StateChangeRequest
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ In *StateChangeRequest
+ Opts []grpc.CallOption
+ }
+ mock.lockStateChanges.RLock()
+ calls = mock.calls.StateChanges
+ mock.lockStateChanges.RUnlock()
+ return calls
+}
+
+// Tx calls TxFunc.
+func (mock *KVClientMock) Tx(ctx context.Context, opts ...grpc.CallOption) (KV_TxClient, error) {
+ callInfo := struct {
+ Ctx context.Context
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ Opts: opts,
+ }
+ mock.lockTx.Lock()
+ mock.calls.Tx = append(mock.calls.Tx, callInfo)
+ mock.lockTx.Unlock()
+ if mock.TxFunc == nil {
+ var (
+ kV_TxClientOut KV_TxClient
+ errOut error
+ )
+ return kV_TxClientOut, errOut
+ }
+ return mock.TxFunc(ctx, opts...)
+}
+
+// TxCalls gets all the calls that were made to Tx.
+// Check the length with:
+//
+// len(mockedKVClient.TxCalls())
+func (mock *KVClientMock) TxCalls() []struct {
+ Ctx context.Context
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ Opts []grpc.CallOption
+ }
+ mock.lockTx.RLock()
+ calls = mock.calls.Tx
+ mock.lockTx.RUnlock()
+ return calls
+}
+
+// Version calls VersionFunc.
+func (mock *KVClientMock) Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.VersionReply, error) {
+ callInfo := struct {
+ Ctx context.Context
+ In *emptypb.Empty
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ In: in,
+ Opts: opts,
+ }
+ mock.lockVersion.Lock()
+ mock.calls.Version = append(mock.calls.Version, callInfo)
+ mock.lockVersion.Unlock()
+ if mock.VersionFunc == nil {
+ var (
+ versionReplyOut *types.VersionReply
+ errOut error
+ )
+ return versionReplyOut, errOut
+ }
+ return mock.VersionFunc(ctx, in, opts...)
+}
+
+// VersionCalls gets all the calls that were made to Version.
+// Check the length with:
+//
+// len(mockedKVClient.VersionCalls())
+func (mock *KVClientMock) VersionCalls() []struct {
+ Ctx context.Context
+ In *emptypb.Empty
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ In *emptypb.Empty
+ Opts []grpc.CallOption
+ }
+ mock.lockVersion.RLock()
+ calls = mock.calls.Version
+ mock.lockVersion.RUnlock()
+ return calls
+}
+
+// Ensure, that KV_StateChangesClientMock does implement KV_StateChangesClient.
+// If this is not the case, regenerate this file with moq.
+var _ KV_StateChangesClient = &KV_StateChangesClientMock{}
+
+// KV_StateChangesClientMock is a mock implementation of KV_StateChangesClient.
+//
+// func TestSomethingThatUsesKV_StateChangesClient(t *testing.T) {
+//
+// // make and configure a mocked KV_StateChangesClient
+// mockedKV_StateChangesClient := &KV_StateChangesClientMock{
+// CloseSendFunc: func() error {
+// panic("mock out the CloseSend method")
+// },
+// ContextFunc: func() context.Context {
+// panic("mock out the Context method")
+// },
+// HeaderFunc: func() (metadata.MD, error) {
+// panic("mock out the Header method")
+// },
+// RecvFunc: func() (*StateChangeBatch, error) {
+// panic("mock out the Recv method")
+// },
+// RecvMsgFunc: func(m any) error {
+// panic("mock out the RecvMsg method")
+// },
+// SendMsgFunc: func(m any) error {
+// panic("mock out the SendMsg method")
+// },
+// TrailerFunc: func() metadata.MD {
+// panic("mock out the Trailer method")
+// },
+// }
+//
+// // use mockedKV_StateChangesClient in code that requires KV_StateChangesClient
+// // and then make assertions.
+//
+// }
+type KV_StateChangesClientMock struct {
+ // CloseSendFunc mocks the CloseSend method.
+ CloseSendFunc func() error
+
+ // ContextFunc mocks the Context method.
+ ContextFunc func() context.Context
+
+ // HeaderFunc mocks the Header method.
+ HeaderFunc func() (metadata.MD, error)
+
+ // RecvFunc mocks the Recv method.
+ RecvFunc func() (*StateChangeBatch, error)
+
+ // RecvMsgFunc mocks the RecvMsg method.
+ RecvMsgFunc func(m any) error
+
+ // SendMsgFunc mocks the SendMsg method.
+ SendMsgFunc func(m any) error
+
+ // TrailerFunc mocks the Trailer method.
+ TrailerFunc func() metadata.MD
+
+ // calls tracks calls to the methods.
+ calls struct {
+ // CloseSend holds details about calls to the CloseSend method.
+ CloseSend []struct {
+ }
+ // Context holds details about calls to the Context method.
+ Context []struct {
+ }
+ // Header holds details about calls to the Header method.
+ Header []struct {
+ }
+ // Recv holds details about calls to the Recv method.
+ Recv []struct {
+ }
+ // RecvMsg holds details about calls to the RecvMsg method.
+ RecvMsg []struct {
+ // M is the m argument value.
+ M any
+ }
+ // SendMsg holds details about calls to the SendMsg method.
+ SendMsg []struct {
+ // M is the m argument value.
+ M any
+ }
+ // Trailer holds details about calls to the Trailer method.
+ Trailer []struct {
+ }
+ }
+ lockCloseSend sync.RWMutex
+ lockContext sync.RWMutex
+ lockHeader sync.RWMutex
+ lockRecv sync.RWMutex
+ lockRecvMsg sync.RWMutex
+ lockSendMsg sync.RWMutex
+ lockTrailer sync.RWMutex
+}
+
+// CloseSend calls CloseSendFunc.
+func (mock *KV_StateChangesClientMock) CloseSend() error {
+ callInfo := struct {
+ }{}
+ mock.lockCloseSend.Lock()
+ mock.calls.CloseSend = append(mock.calls.CloseSend, callInfo)
+ mock.lockCloseSend.Unlock()
+ if mock.CloseSendFunc == nil {
+ var (
+ errOut error
+ )
+ return errOut
+ }
+ return mock.CloseSendFunc()
+}
+
+// CloseSendCalls gets all the calls that were made to CloseSend.
+// Check the length with:
+//
+// len(mockedKV_StateChangesClient.CloseSendCalls())
+func (mock *KV_StateChangesClientMock) CloseSendCalls() []struct {
+} {
+ var calls []struct {
+ }
+ mock.lockCloseSend.RLock()
+ calls = mock.calls.CloseSend
+ mock.lockCloseSend.RUnlock()
+ return calls
+}
+
+// Context calls ContextFunc.
+func (mock *KV_StateChangesClientMock) Context() context.Context {
+ callInfo := struct {
+ }{}
+ mock.lockContext.Lock()
+ mock.calls.Context = append(mock.calls.Context, callInfo)
+ mock.lockContext.Unlock()
+ if mock.ContextFunc == nil {
+ var (
+ contextOut context.Context
+ )
+ return contextOut
+ }
+ return mock.ContextFunc()
+}
+
+// ContextCalls gets all the calls that were made to Context.
+// Check the length with:
+//
+// len(mockedKV_StateChangesClient.ContextCalls())
+func (mock *KV_StateChangesClientMock) ContextCalls() []struct {
+} {
+ var calls []struct {
+ }
+ mock.lockContext.RLock()
+ calls = mock.calls.Context
+ mock.lockContext.RUnlock()
+ return calls
+}
+
+// Header calls HeaderFunc.
+func (mock *KV_StateChangesClientMock) Header() (metadata.MD, error) {
+ callInfo := struct {
+ }{}
+ mock.lockHeader.Lock()
+ mock.calls.Header = append(mock.calls.Header, callInfo)
+ mock.lockHeader.Unlock()
+ if mock.HeaderFunc == nil {
+ var (
+ mDOut metadata.MD
+ errOut error
+ )
+ return mDOut, errOut
+ }
+ return mock.HeaderFunc()
+}
+
+// HeaderCalls gets all the calls that were made to Header.
+// Check the length with:
+//
+// len(mockedKV_StateChangesClient.HeaderCalls())
+func (mock *KV_StateChangesClientMock) HeaderCalls() []struct {
+} {
+ var calls []struct {
+ }
+ mock.lockHeader.RLock()
+ calls = mock.calls.Header
+ mock.lockHeader.RUnlock()
+ return calls
+}
+
+// Recv calls RecvFunc.
+func (mock *KV_StateChangesClientMock) Recv() (*StateChangeBatch, error) {
+ callInfo := struct {
+ }{}
+ mock.lockRecv.Lock()
+ mock.calls.Recv = append(mock.calls.Recv, callInfo)
+ mock.lockRecv.Unlock()
+ if mock.RecvFunc == nil {
+ var (
+ stateChangeBatchOut *StateChangeBatch
+ errOut error
+ )
+ return stateChangeBatchOut, errOut
+ }
+ return mock.RecvFunc()
+}
+
+// RecvCalls gets all the calls that were made to Recv.
+// Check the length with:
+//
+// len(mockedKV_StateChangesClient.RecvCalls())
+func (mock *KV_StateChangesClientMock) RecvCalls() []struct {
+} {
+ var calls []struct {
+ }
+ mock.lockRecv.RLock()
+ calls = mock.calls.Recv
+ mock.lockRecv.RUnlock()
+ return calls
+}
+
+// RecvMsg calls RecvMsgFunc.
+func (mock *KV_StateChangesClientMock) RecvMsg(m any) error {
+ callInfo := struct {
+ M any
+ }{
+ M: m,
+ }
+ mock.lockRecvMsg.Lock()
+ mock.calls.RecvMsg = append(mock.calls.RecvMsg, callInfo)
+ mock.lockRecvMsg.Unlock()
+ if mock.RecvMsgFunc == nil {
+ var (
+ errOut error
+ )
+ return errOut
+ }
+ return mock.RecvMsgFunc(m)
+}
+
+// RecvMsgCalls gets all the calls that were made to RecvMsg.
+// Check the length with:
+//
+// len(mockedKV_StateChangesClient.RecvMsgCalls())
+func (mock *KV_StateChangesClientMock) RecvMsgCalls() []struct {
+ M any
+} {
+ var calls []struct {
+ M any
+ }
+ mock.lockRecvMsg.RLock()
+ calls = mock.calls.RecvMsg
+ mock.lockRecvMsg.RUnlock()
+ return calls
+}
+
+// SendMsg calls SendMsgFunc.
+func (mock *KV_StateChangesClientMock) SendMsg(m any) error {
+ callInfo := struct {
+ M any
+ }{
+ M: m,
+ }
+ mock.lockSendMsg.Lock()
+ mock.calls.SendMsg = append(mock.calls.SendMsg, callInfo)
+ mock.lockSendMsg.Unlock()
+ if mock.SendMsgFunc == nil {
+ var (
+ errOut error
+ )
+ return errOut
+ }
+ return mock.SendMsgFunc(m)
+}
+
+// SendMsgCalls gets all the calls that were made to SendMsg.
+// Check the length with:
+//
+// len(mockedKV_StateChangesClient.SendMsgCalls())
+func (mock *KV_StateChangesClientMock) SendMsgCalls() []struct {
+ M any
+} {
+ var calls []struct {
+ M any
+ }
+ mock.lockSendMsg.RLock()
+ calls = mock.calls.SendMsg
+ mock.lockSendMsg.RUnlock()
+ return calls
+}
+
+// Trailer calls TrailerFunc.
+func (mock *KV_StateChangesClientMock) Trailer() metadata.MD {
+ callInfo := struct {
+ }{}
+ mock.lockTrailer.Lock()
+ mock.calls.Trailer = append(mock.calls.Trailer, callInfo)
+ mock.lockTrailer.Unlock()
+ if mock.TrailerFunc == nil {
+ var (
+ mDOut metadata.MD
+ )
+ return mDOut
+ }
+ return mock.TrailerFunc()
+}
+
+// TrailerCalls gets all the calls that were made to Trailer.
+// Check the length with:
+//
+// len(mockedKV_StateChangesClient.TrailerCalls())
+func (mock *KV_StateChangesClientMock) TrailerCalls() []struct {
+} {
+ var calls []struct {
+ }
+ mock.lockTrailer.RLock()
+ calls = mock.calls.Trailer
+ mock.lockTrailer.RUnlock()
+ return calls
+}
diff --git a/erigon-lib/gointerfaces/remote/sort.go b/erigon-lib/gointerfaces/remote/sort.go
new file mode 100644
index 00000000000..f61407bf0fd
--- /dev/null
+++ b/erigon-lib/gointerfaces/remote/sort.go
@@ -0,0 +1,14 @@
+package remote
+
+import (
+ "strings"
+
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+)
+
+func NodeInfoReplyCmp(i, j *types.NodeInfoReply) int {
+ if cmp := strings.Compare(i.Name, j.Name); cmp != 0 {
+ return cmp
+ }
+ return strings.Compare(i.Enode, j.Enode)
+}
diff --git a/erigon-lib/gointerfaces/remote/sort_test.go b/erigon-lib/gointerfaces/remote/sort_test.go
new file mode 100644
index 00000000000..8a32e5a6e17
--- /dev/null
+++ b/erigon-lib/gointerfaces/remote/sort_test.go
@@ -0,0 +1,56 @@
+package remote_test
+
+import (
+ "testing"
+
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/remote"
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+ "github.com/stretchr/testify/assert"
+ "golang.org/x/exp/slices"
+)
+
+func TestSort(t *testing.T) {
+ tests := []struct {
+ name string
+ got *remote.NodesInfoReply
+ want *remote.NodesInfoReply
+ }{
+ {
+ name: "sort by name",
+ got: &remote.NodesInfoReply{
+ NodesInfo: []*types.NodeInfoReply{
+ {Name: "b", Enode: "c"},
+ {Name: "a", Enode: "d"},
+ },
+ },
+ want: &remote.NodesInfoReply{
+ NodesInfo: []*types.NodeInfoReply{
+ {Name: "a", Enode: "d"},
+ {Name: "b", Enode: "c"},
+ },
+ },
+ },
+ {
+ name: "sort by enode",
+ got: &remote.NodesInfoReply{
+ NodesInfo: []*types.NodeInfoReply{
+ {Name: "a", Enode: "d"},
+ {Name: "a", Enode: "c"},
+ },
+ },
+ want: &remote.NodesInfoReply{
+ NodesInfo: []*types.NodeInfoReply{
+ {Name: "a", Enode: "c"},
+ {Name: "a", Enode: "d"},
+ },
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+
+ slices.SortFunc(tt.got.NodesInfo, remote.NodeInfoReplyCmp)
+ assert.Equal(t, tt.want, tt.got)
+ })
+ }
+}
diff --git a/erigon-lib/gointerfaces/sentinel/sentinel.pb.go b/erigon-lib/gointerfaces/sentinel/sentinel.pb.go
new file mode 100644
index 00000000000..0fc32fe89d8
--- /dev/null
+++ b/erigon-lib/gointerfaces/sentinel/sentinel.pb.go
@@ -0,0 +1,750 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.31.0
+// protoc v4.24.2
+// source: p2psentinel/sentinel.proto
+
+package sentinel
+
+import (
+ types "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type GossipType int32
+
+const (
+ // Global gossip topics.
+ GossipType_BeaconBlockGossipType GossipType = 0
+ GossipType_AggregateAndProofGossipType GossipType = 1
+ GossipType_VoluntaryExitGossipType GossipType = 2
+ GossipType_ProposerSlashingGossipType GossipType = 3
+ GossipType_AttesterSlashingGossipType GossipType = 4
+ GossipType_BlobSidecarType GossipType = 5
+ GossipType_BlsToExecutionChangeGossipType GossipType = 6
+)
+
+// Enum value maps for GossipType.
+var (
+ GossipType_name = map[int32]string{
+ 0: "BeaconBlockGossipType",
+ 1: "AggregateAndProofGossipType",
+ 2: "VoluntaryExitGossipType",
+ 3: "ProposerSlashingGossipType",
+ 4: "AttesterSlashingGossipType",
+ 5: "BlobSidecarType",
+ 6: "BlsToExecutionChangeGossipType",
+ }
+ GossipType_value = map[string]int32{
+ "BeaconBlockGossipType": 0,
+ "AggregateAndProofGossipType": 1,
+ "VoluntaryExitGossipType": 2,
+ "ProposerSlashingGossipType": 3,
+ "AttesterSlashingGossipType": 4,
+ "BlobSidecarType": 5,
+ "BlsToExecutionChangeGossipType": 6,
+ }
+)
+
+func (x GossipType) Enum() *GossipType {
+ p := new(GossipType)
+ *p = x
+ return p
+}
+
+func (x GossipType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (GossipType) Descriptor() protoreflect.EnumDescriptor {
+ return file_p2psentinel_sentinel_proto_enumTypes[0].Descriptor()
+}
+
+func (GossipType) Type() protoreflect.EnumType {
+ return &file_p2psentinel_sentinel_proto_enumTypes[0]
+}
+
+func (x GossipType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use GossipType.Descriptor instead.
+func (GossipType) EnumDescriptor() ([]byte, []int) {
+ return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{0}
+}
+
+type EmptyMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *EmptyMessage) Reset() {
+ *x = EmptyMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentinel_sentinel_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EmptyMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EmptyMessage) ProtoMessage() {}
+
+func (x *EmptyMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentinel_sentinel_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EmptyMessage.ProtoReflect.Descriptor instead.
+func (*EmptyMessage) Descriptor() ([]byte, []int) {
+ return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{0}
+}
+
+type Peer struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Pid string `protobuf:"bytes,1,opt,name=pid,proto3" json:"pid,omitempty"`
+}
+
+func (x *Peer) Reset() {
+ *x = Peer{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentinel_sentinel_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Peer) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Peer) ProtoMessage() {}
+
+func (x *Peer) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentinel_sentinel_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Peer.ProtoReflect.Descriptor instead.
+func (*Peer) Descriptor() ([]byte, []int) {
+ return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Peer) GetPid() string {
+ if x != nil {
+ return x.Pid
+ }
+ return ""
+}
+
+type GossipData struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` // SSZ encoded data
+ Type GossipType `protobuf:"varint,2,opt,name=type,proto3,enum=sentinel.GossipType" json:"type,omitempty"`
+ Peer *Peer `protobuf:"bytes,3,opt,name=peer,proto3,oneof" json:"peer,omitempty"`
+ BlobIndex *uint32 `protobuf:"varint,4,opt,name=blob_index,json=blobIndex,proto3,oneof" json:"blob_index,omitempty"` // Blob identifier for EIP4844
+}
+
+func (x *GossipData) Reset() {
+ *x = GossipData{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentinel_sentinel_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GossipData) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GossipData) ProtoMessage() {}
+
+func (x *GossipData) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentinel_sentinel_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GossipData.ProtoReflect.Descriptor instead.
+func (*GossipData) Descriptor() ([]byte, []int) {
+ return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *GossipData) GetData() []byte {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+func (x *GossipData) GetType() GossipType {
+ if x != nil {
+ return x.Type
+ }
+ return GossipType_BeaconBlockGossipType
+}
+
+func (x *GossipData) GetPeer() *Peer {
+ if x != nil {
+ return x.Peer
+ }
+ return nil
+}
+
+func (x *GossipData) GetBlobIndex() uint32 {
+ if x != nil && x.BlobIndex != nil {
+ return *x.BlobIndex
+ }
+ return 0
+}
+
+type Status struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ForkDigest uint32 `protobuf:"varint,1,opt,name=fork_digest,json=forkDigest,proto3" json:"fork_digest,omitempty"` // 4 bytes can be repressented in uint32.
+ FinalizedRoot *types.H256 `protobuf:"bytes,2,opt,name=finalized_root,json=finalizedRoot,proto3" json:"finalized_root,omitempty"`
+ FinalizedEpoch uint64 `protobuf:"varint,3,opt,name=finalized_epoch,json=finalizedEpoch,proto3" json:"finalized_epoch,omitempty"`
+ HeadRoot *types.H256 `protobuf:"bytes,4,opt,name=head_root,json=headRoot,proto3" json:"head_root,omitempty"`
+ HeadSlot uint64 `protobuf:"varint,5,opt,name=head_slot,json=headSlot,proto3" json:"head_slot,omitempty"`
+}
+
+func (x *Status) Reset() {
+ *x = Status{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentinel_sentinel_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Status) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Status) ProtoMessage() {}
+
+func (x *Status) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentinel_sentinel_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Status.ProtoReflect.Descriptor instead.
+func (*Status) Descriptor() ([]byte, []int) {
+ return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Status) GetForkDigest() uint32 {
+ if x != nil {
+ return x.ForkDigest
+ }
+ return 0
+}
+
+func (x *Status) GetFinalizedRoot() *types.H256 {
+ if x != nil {
+ return x.FinalizedRoot
+ }
+ return nil
+}
+
+func (x *Status) GetFinalizedEpoch() uint64 {
+ if x != nil {
+ return x.FinalizedEpoch
+ }
+ return 0
+}
+
+func (x *Status) GetHeadRoot() *types.H256 {
+ if x != nil {
+ return x.HeadRoot
+ }
+ return nil
+}
+
+func (x *Status) GetHeadSlot() uint64 {
+ if x != nil {
+ return x.HeadSlot
+ }
+ return 0
+}
+
+type PeerCount struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Amount uint64 `protobuf:"varint,1,opt,name=amount,proto3" json:"amount,omitempty"`
+}
+
+func (x *PeerCount) Reset() {
+ *x = PeerCount{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentinel_sentinel_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PeerCount) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PeerCount) ProtoMessage() {}
+
+func (x *PeerCount) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentinel_sentinel_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PeerCount.ProtoReflect.Descriptor instead.
+func (*PeerCount) Descriptor() ([]byte, []int) {
+ return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *PeerCount) GetAmount() uint64 {
+ if x != nil {
+ return x.Amount
+ }
+ return 0
+}
+
+type RequestData struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` // SSZ encoded data
+ Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"`
+}
+
+func (x *RequestData) Reset() {
+ *x = RequestData{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentinel_sentinel_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RequestData) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RequestData) ProtoMessage() {}
+
+func (x *RequestData) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentinel_sentinel_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RequestData.ProtoReflect.Descriptor instead.
+func (*RequestData) Descriptor() ([]byte, []int) {
+ return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *RequestData) GetData() []byte {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+func (x *RequestData) GetTopic() string {
+ if x != nil {
+ return x.Topic
+ }
+ return ""
+}
+
+type ResponseData struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` // prefix-stripped SSZ encoded data
+ Error bool `protobuf:"varint,2,opt,name=error,proto3" json:"error,omitempty"` // did the peer encounter an error
+ Peer *Peer `protobuf:"bytes,3,opt,name=peer,proto3" json:"peer,omitempty"`
+}
+
+func (x *ResponseData) Reset() {
+ *x = ResponseData{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentinel_sentinel_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ResponseData) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResponseData) ProtoMessage() {}
+
+func (x *ResponseData) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentinel_sentinel_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResponseData.ProtoReflect.Descriptor instead.
+func (*ResponseData) Descriptor() ([]byte, []int) {
+ return file_p2psentinel_sentinel_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *ResponseData) GetData() []byte {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+func (x *ResponseData) GetError() bool {
+ if x != nil {
+ return x.Error
+ }
+ return false
+}
+
+func (x *ResponseData) GetPeer() *Peer {
+ if x != nil {
+ return x.Peer
+ }
+ return nil
+}
+
+var File_p2psentinel_sentinel_proto protoreflect.FileDescriptor
+
+var file_p2psentinel_sentinel_proto_rawDesc = []byte{
+ 0x0a, 0x1a, 0x70, 0x32, 0x70, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2f, 0x73, 0x65,
+ 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x73, 0x65,
+ 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x1a, 0x11, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x0e, 0x0a, 0x0c, 0x45, 0x6d, 0x70,
+ 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x18, 0x0a, 0x04, 0x50, 0x65, 0x65,
+ 0x72, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
+ 0x70, 0x69, 0x64, 0x22, 0xaf, 0x01, 0x0a, 0x0a, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x44, 0x61,
+ 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c,
+ 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e,
+ 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65,
+ 0x12, 0x27, 0x0a, 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e,
+ 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00,
+ 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, 0x0a, 0x62, 0x6c, 0x6f,
+ 0x62, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x01, 0x52,
+ 0x09, 0x62, 0x6c, 0x6f, 0x62, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x88, 0x01, 0x01, 0x42, 0x07, 0x0a,
+ 0x05, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x5f,
+ 0x69, 0x6e, 0x64, 0x65, 0x78, 0x22, 0xcd, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x6f, 0x72, 0x6b, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x66, 0x6f, 0x72, 0x6b, 0x44, 0x69, 0x67, 0x65, 0x73,
+ 0x74, 0x12, 0x32, 0x0a, 0x0e, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x72,
+ 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0d, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65,
+ 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a,
+ 0x65, 0x64, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e,
+ 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x28,
+ 0x0a, 0x09, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x08,
+ 0x68, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x68, 0x65, 0x61, 0x64,
+ 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x68, 0x65, 0x61,
+ 0x64, 0x53, 0x6c, 0x6f, 0x74, 0x22, 0x23, 0x0a, 0x09, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75,
+ 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x37, 0x0a, 0x0b, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74,
+ 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a,
+ 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f,
+ 0x70, 0x69, 0x63, 0x22, 0x5c, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44,
+ 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x22, 0x0a,
+ 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x73, 0x65,
+ 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x04, 0x70, 0x65, 0x65,
+ 0x72, 0x2a, 0xde, 0x01, 0x0a, 0x0a, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x19, 0x0a, 0x15, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x47,
+ 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x00, 0x12, 0x1f, 0x0a, 0x1b, 0x41,
+ 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66,
+ 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x01, 0x12, 0x1b, 0x0a, 0x17,
+ 0x56, 0x6f, 0x6c, 0x75, 0x6e, 0x74, 0x61, 0x72, 0x79, 0x45, 0x78, 0x69, 0x74, 0x47, 0x6f, 0x73,
+ 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x72, 0x6f,
+ 0x70, 0x6f, 0x73, 0x65, 0x72, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x47, 0x6f, 0x73,
+ 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x03, 0x12, 0x1e, 0x0a, 0x1a, 0x41, 0x74, 0x74,
+ 0x65, 0x73, 0x74, 0x65, 0x72, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x47, 0x6f, 0x73,
+ 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x42, 0x6c, 0x6f,
+ 0x62, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x54, 0x79, 0x70, 0x65, 0x10, 0x05, 0x12, 0x22,
+ 0x0a, 0x1e, 0x42, 0x6c, 0x73, 0x54, 0x6f, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65,
+ 0x10, 0x06, 0x32, 0x90, 0x04, 0x0a, 0x08, 0x53, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x12,
+ 0x41, 0x0a, 0x0f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x47, 0x6f, 0x73, 0x73,
+ 0x69, 0x70, 0x12, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d,
+ 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x14, 0x2e, 0x73, 0x65, 0x6e,
+ 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x44, 0x61, 0x74, 0x61,
+ 0x30, 0x01, 0x12, 0x3c, 0x0a, 0x0b, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x15, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69,
+ 0x6e, 0x65, 0x6c, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x61, 0x74, 0x61,
+ 0x12, 0x35, 0x0a, 0x09, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x10, 0x2e,
+ 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a,
+ 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79,
+ 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x50, 0x65,
+ 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45,
+ 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x13, 0x2e, 0x73, 0x65,
+ 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74,
+ 0x12, 0x31, 0x0a, 0x07, 0x42, 0x61, 0x6e, 0x50, 0x65, 0x65, 0x72, 0x12, 0x0e, 0x2e, 0x73, 0x65,
+ 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x16, 0x2e, 0x73, 0x65,
+ 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x65, 0x12, 0x33, 0x0a, 0x09, 0x55, 0x6e, 0x62, 0x61, 0x6e, 0x50, 0x65, 0x65, 0x72,
+ 0x12, 0x0e, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72,
+ 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74,
+ 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x0c, 0x50, 0x65, 0x6e, 0x61,
+ 0x6c, 0x69, 0x7a, 0x65, 0x50, 0x65, 0x65, 0x72, 0x12, 0x0e, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69,
+ 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69,
+ 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x12, 0x34, 0x0a, 0x0a, 0x52, 0x65, 0x77, 0x61, 0x72, 0x64, 0x50, 0x65, 0x65, 0x72, 0x12, 0x0e,
+ 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x16,
+ 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3d, 0x0a, 0x0d, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73,
+ 0x68, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x14, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e,
+ 0x65, 0x6c, 0x2e, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x44, 0x61, 0x74, 0x61, 0x1a, 0x16, 0x2e,
+ 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x15, 0x5a, 0x13, 0x2e, 0x2f, 0x73, 0x65, 0x6e, 0x74, 0x69,
+ 0x6e, 0x65, 0x6c, 0x3b, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_p2psentinel_sentinel_proto_rawDescOnce sync.Once
+ file_p2psentinel_sentinel_proto_rawDescData = file_p2psentinel_sentinel_proto_rawDesc
+)
+
+func file_p2psentinel_sentinel_proto_rawDescGZIP() []byte {
+ file_p2psentinel_sentinel_proto_rawDescOnce.Do(func() {
+ file_p2psentinel_sentinel_proto_rawDescData = protoimpl.X.CompressGZIP(file_p2psentinel_sentinel_proto_rawDescData)
+ })
+ return file_p2psentinel_sentinel_proto_rawDescData
+}
+
+var file_p2psentinel_sentinel_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_p2psentinel_sentinel_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
+var file_p2psentinel_sentinel_proto_goTypes = []interface{}{
+ (GossipType)(0), // 0: sentinel.GossipType
+ (*EmptyMessage)(nil), // 1: sentinel.EmptyMessage
+ (*Peer)(nil), // 2: sentinel.Peer
+ (*GossipData)(nil), // 3: sentinel.GossipData
+ (*Status)(nil), // 4: sentinel.Status
+ (*PeerCount)(nil), // 5: sentinel.PeerCount
+ (*RequestData)(nil), // 6: sentinel.RequestData
+ (*ResponseData)(nil), // 7: sentinel.ResponseData
+ (*types.H256)(nil), // 8: types.H256
+}
+var file_p2psentinel_sentinel_proto_depIdxs = []int32{
+ 0, // 0: sentinel.GossipData.type:type_name -> sentinel.GossipType
+ 2, // 1: sentinel.GossipData.peer:type_name -> sentinel.Peer
+ 8, // 2: sentinel.Status.finalized_root:type_name -> types.H256
+ 8, // 3: sentinel.Status.head_root:type_name -> types.H256
+ 2, // 4: sentinel.ResponseData.peer:type_name -> sentinel.Peer
+ 1, // 5: sentinel.Sentinel.SubscribeGossip:input_type -> sentinel.EmptyMessage
+ 6, // 6: sentinel.Sentinel.SendRequest:input_type -> sentinel.RequestData
+ 4, // 7: sentinel.Sentinel.SetStatus:input_type -> sentinel.Status
+ 1, // 8: sentinel.Sentinel.GetPeers:input_type -> sentinel.EmptyMessage
+ 2, // 9: sentinel.Sentinel.BanPeer:input_type -> sentinel.Peer
+ 2, // 10: sentinel.Sentinel.UnbanPeer:input_type -> sentinel.Peer
+ 2, // 11: sentinel.Sentinel.PenalizePeer:input_type -> sentinel.Peer
+ 2, // 12: sentinel.Sentinel.RewardPeer:input_type -> sentinel.Peer
+ 3, // 13: sentinel.Sentinel.PublishGossip:input_type -> sentinel.GossipData
+ 3, // 14: sentinel.Sentinel.SubscribeGossip:output_type -> sentinel.GossipData
+ 7, // 15: sentinel.Sentinel.SendRequest:output_type -> sentinel.ResponseData
+ 1, // 16: sentinel.Sentinel.SetStatus:output_type -> sentinel.EmptyMessage
+ 5, // 17: sentinel.Sentinel.GetPeers:output_type -> sentinel.PeerCount
+ 1, // 18: sentinel.Sentinel.BanPeer:output_type -> sentinel.EmptyMessage
+ 1, // 19: sentinel.Sentinel.UnbanPeer:output_type -> sentinel.EmptyMessage
+ 1, // 20: sentinel.Sentinel.PenalizePeer:output_type -> sentinel.EmptyMessage
+ 1, // 21: sentinel.Sentinel.RewardPeer:output_type -> sentinel.EmptyMessage
+ 1, // 22: sentinel.Sentinel.PublishGossip:output_type -> sentinel.EmptyMessage
+ 14, // [14:23] is the sub-list for method output_type
+ 5, // [5:14] is the sub-list for method input_type
+ 5, // [5:5] is the sub-list for extension type_name
+ 5, // [5:5] is the sub-list for extension extendee
+ 0, // [0:5] is the sub-list for field type_name
+}
+
+func init() { file_p2psentinel_sentinel_proto_init() }
+func file_p2psentinel_sentinel_proto_init() {
+ if File_p2psentinel_sentinel_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_p2psentinel_sentinel_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EmptyMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentinel_sentinel_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Peer); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentinel_sentinel_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GossipData); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentinel_sentinel_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Status); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentinel_sentinel_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PeerCount); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentinel_sentinel_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RequestData); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentinel_sentinel_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ResponseData); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_p2psentinel_sentinel_proto_msgTypes[2].OneofWrappers = []interface{}{}
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_p2psentinel_sentinel_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 7,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_p2psentinel_sentinel_proto_goTypes,
+ DependencyIndexes: file_p2psentinel_sentinel_proto_depIdxs,
+ EnumInfos: file_p2psentinel_sentinel_proto_enumTypes,
+ MessageInfos: file_p2psentinel_sentinel_proto_msgTypes,
+ }.Build()
+ File_p2psentinel_sentinel_proto = out.File
+ file_p2psentinel_sentinel_proto_rawDesc = nil
+ file_p2psentinel_sentinel_proto_goTypes = nil
+ file_p2psentinel_sentinel_proto_depIdxs = nil
+}
diff --git a/erigon-lib/gointerfaces/sentinel/sentinel_grpc.pb.go b/erigon-lib/gointerfaces/sentinel/sentinel_grpc.pb.go
new file mode 100644
index 00000000000..a62786b600f
--- /dev/null
+++ b/erigon-lib/gointerfaces/sentinel/sentinel_grpc.pb.go
@@ -0,0 +1,433 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.3.0
+// - protoc v4.24.2
+// source: p2psentinel/sentinel.proto
+
+package sentinel
+
+import (
+ context "context"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+const (
+ Sentinel_SubscribeGossip_FullMethodName = "/sentinel.Sentinel/SubscribeGossip"
+ Sentinel_SendRequest_FullMethodName = "/sentinel.Sentinel/SendRequest"
+ Sentinel_SetStatus_FullMethodName = "/sentinel.Sentinel/SetStatus"
+ Sentinel_GetPeers_FullMethodName = "/sentinel.Sentinel/GetPeers"
+ Sentinel_BanPeer_FullMethodName = "/sentinel.Sentinel/BanPeer"
+ Sentinel_UnbanPeer_FullMethodName = "/sentinel.Sentinel/UnbanPeer"
+ Sentinel_PenalizePeer_FullMethodName = "/sentinel.Sentinel/PenalizePeer"
+ Sentinel_RewardPeer_FullMethodName = "/sentinel.Sentinel/RewardPeer"
+ Sentinel_PublishGossip_FullMethodName = "/sentinel.Sentinel/PublishGossip"
+)
+
+// SentinelClient is the client API for Sentinel service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type SentinelClient interface {
+ SubscribeGossip(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (Sentinel_SubscribeGossipClient, error)
+ SendRequest(ctx context.Context, in *RequestData, opts ...grpc.CallOption) (*ResponseData, error)
+ SetStatus(ctx context.Context, in *Status, opts ...grpc.CallOption) (*EmptyMessage, error)
+ GetPeers(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (*PeerCount, error)
+ BanPeer(ctx context.Context, in *Peer, opts ...grpc.CallOption) (*EmptyMessage, error)
+ UnbanPeer(ctx context.Context, in *Peer, opts ...grpc.CallOption) (*EmptyMessage, error)
+ PenalizePeer(ctx context.Context, in *Peer, opts ...grpc.CallOption) (*EmptyMessage, error)
+ RewardPeer(ctx context.Context, in *Peer, opts ...grpc.CallOption) (*EmptyMessage, error)
+ PublishGossip(ctx context.Context, in *GossipData, opts ...grpc.CallOption) (*EmptyMessage, error)
+}
+
+type sentinelClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewSentinelClient(cc grpc.ClientConnInterface) SentinelClient {
+ return &sentinelClient{cc}
+}
+
+func (c *sentinelClient) SubscribeGossip(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (Sentinel_SubscribeGossipClient, error) {
+ stream, err := c.cc.NewStream(ctx, &Sentinel_ServiceDesc.Streams[0], Sentinel_SubscribeGossip_FullMethodName, opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &sentinelSubscribeGossipClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type Sentinel_SubscribeGossipClient interface {
+ Recv() (*GossipData, error)
+ grpc.ClientStream
+}
+
+type sentinelSubscribeGossipClient struct {
+ grpc.ClientStream
+}
+
+func (x *sentinelSubscribeGossipClient) Recv() (*GossipData, error) {
+ m := new(GossipData)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *sentinelClient) SendRequest(ctx context.Context, in *RequestData, opts ...grpc.CallOption) (*ResponseData, error) {
+ out := new(ResponseData)
+ err := c.cc.Invoke(ctx, Sentinel_SendRequest_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *sentinelClient) SetStatus(ctx context.Context, in *Status, opts ...grpc.CallOption) (*EmptyMessage, error) {
+ out := new(EmptyMessage)
+ err := c.cc.Invoke(ctx, Sentinel_SetStatus_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *sentinelClient) GetPeers(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (*PeerCount, error) {
+ out := new(PeerCount)
+ err := c.cc.Invoke(ctx, Sentinel_GetPeers_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *sentinelClient) BanPeer(ctx context.Context, in *Peer, opts ...grpc.CallOption) (*EmptyMessage, error) {
+ out := new(EmptyMessage)
+ err := c.cc.Invoke(ctx, Sentinel_BanPeer_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *sentinelClient) UnbanPeer(ctx context.Context, in *Peer, opts ...grpc.CallOption) (*EmptyMessage, error) {
+ out := new(EmptyMessage)
+ err := c.cc.Invoke(ctx, Sentinel_UnbanPeer_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *sentinelClient) PenalizePeer(ctx context.Context, in *Peer, opts ...grpc.CallOption) (*EmptyMessage, error) {
+ out := new(EmptyMessage)
+ err := c.cc.Invoke(ctx, Sentinel_PenalizePeer_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *sentinelClient) RewardPeer(ctx context.Context, in *Peer, opts ...grpc.CallOption) (*EmptyMessage, error) {
+ out := new(EmptyMessage)
+ err := c.cc.Invoke(ctx, Sentinel_RewardPeer_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *sentinelClient) PublishGossip(ctx context.Context, in *GossipData, opts ...grpc.CallOption) (*EmptyMessage, error) {
+ out := new(EmptyMessage)
+ err := c.cc.Invoke(ctx, Sentinel_PublishGossip_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// SentinelServer is the server API for Sentinel service.
+// All implementations must embed UnimplementedSentinelServer
+// for forward compatibility
+type SentinelServer interface {
+ SubscribeGossip(*EmptyMessage, Sentinel_SubscribeGossipServer) error
+ SendRequest(context.Context, *RequestData) (*ResponseData, error)
+ SetStatus(context.Context, *Status) (*EmptyMessage, error)
+ GetPeers(context.Context, *EmptyMessage) (*PeerCount, error)
+ BanPeer(context.Context, *Peer) (*EmptyMessage, error)
+ UnbanPeer(context.Context, *Peer) (*EmptyMessage, error)
+ PenalizePeer(context.Context, *Peer) (*EmptyMessage, error)
+ RewardPeer(context.Context, *Peer) (*EmptyMessage, error)
+ PublishGossip(context.Context, *GossipData) (*EmptyMessage, error)
+ mustEmbedUnimplementedSentinelServer()
+}
+
+// UnimplementedSentinelServer must be embedded to have forward compatible implementations.
+type UnimplementedSentinelServer struct {
+}
+
+func (UnimplementedSentinelServer) SubscribeGossip(*EmptyMessage, Sentinel_SubscribeGossipServer) error {
+ return status.Errorf(codes.Unimplemented, "method SubscribeGossip not implemented")
+}
+func (UnimplementedSentinelServer) SendRequest(context.Context, *RequestData) (*ResponseData, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method SendRequest not implemented")
+}
+func (UnimplementedSentinelServer) SetStatus(context.Context, *Status) (*EmptyMessage, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method SetStatus not implemented")
+}
+func (UnimplementedSentinelServer) GetPeers(context.Context, *EmptyMessage) (*PeerCount, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetPeers not implemented")
+}
+func (UnimplementedSentinelServer) BanPeer(context.Context, *Peer) (*EmptyMessage, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method BanPeer not implemented")
+}
+func (UnimplementedSentinelServer) UnbanPeer(context.Context, *Peer) (*EmptyMessage, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UnbanPeer not implemented")
+}
+func (UnimplementedSentinelServer) PenalizePeer(context.Context, *Peer) (*EmptyMessage, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method PenalizePeer not implemented")
+}
+func (UnimplementedSentinelServer) RewardPeer(context.Context, *Peer) (*EmptyMessage, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method RewardPeer not implemented")
+}
+func (UnimplementedSentinelServer) PublishGossip(context.Context, *GossipData) (*EmptyMessage, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method PublishGossip not implemented")
+}
+func (UnimplementedSentinelServer) mustEmbedUnimplementedSentinelServer() {}
+
+// UnsafeSentinelServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to SentinelServer will
+// result in compilation errors.
+type UnsafeSentinelServer interface {
+ mustEmbedUnimplementedSentinelServer()
+}
+
+func RegisterSentinelServer(s grpc.ServiceRegistrar, srv SentinelServer) {
+ s.RegisterService(&Sentinel_ServiceDesc, srv)
+}
+
+func _Sentinel_SubscribeGossip_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(EmptyMessage)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(SentinelServer).SubscribeGossip(m, &sentinelSubscribeGossipServer{stream})
+}
+
+type Sentinel_SubscribeGossipServer interface {
+ Send(*GossipData) error
+ grpc.ServerStream
+}
+
+type sentinelSubscribeGossipServer struct {
+ grpc.ServerStream
+}
+
+func (x *sentinelSubscribeGossipServer) Send(m *GossipData) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _Sentinel_SendRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RequestData)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SentinelServer).SendRequest(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Sentinel_SendRequest_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SentinelServer).SendRequest(ctx, req.(*RequestData))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Sentinel_SetStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(Status)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SentinelServer).SetStatus(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Sentinel_SetStatus_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SentinelServer).SetStatus(ctx, req.(*Status))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Sentinel_GetPeers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(EmptyMessage)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SentinelServer).GetPeers(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Sentinel_GetPeers_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SentinelServer).GetPeers(ctx, req.(*EmptyMessage))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Sentinel_BanPeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(Peer)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SentinelServer).BanPeer(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Sentinel_BanPeer_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SentinelServer).BanPeer(ctx, req.(*Peer))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Sentinel_UnbanPeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(Peer)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SentinelServer).UnbanPeer(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Sentinel_UnbanPeer_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SentinelServer).UnbanPeer(ctx, req.(*Peer))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Sentinel_PenalizePeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(Peer)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SentinelServer).PenalizePeer(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Sentinel_PenalizePeer_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SentinelServer).PenalizePeer(ctx, req.(*Peer))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Sentinel_RewardPeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(Peer)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SentinelServer).RewardPeer(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Sentinel_RewardPeer_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SentinelServer).RewardPeer(ctx, req.(*Peer))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Sentinel_PublishGossip_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GossipData)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SentinelServer).PublishGossip(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Sentinel_PublishGossip_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SentinelServer).PublishGossip(ctx, req.(*GossipData))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// Sentinel_ServiceDesc is the grpc.ServiceDesc for Sentinel service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var Sentinel_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "sentinel.Sentinel",
+ HandlerType: (*SentinelServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "SendRequest",
+ Handler: _Sentinel_SendRequest_Handler,
+ },
+ {
+ MethodName: "SetStatus",
+ Handler: _Sentinel_SetStatus_Handler,
+ },
+ {
+ MethodName: "GetPeers",
+ Handler: _Sentinel_GetPeers_Handler,
+ },
+ {
+ MethodName: "BanPeer",
+ Handler: _Sentinel_BanPeer_Handler,
+ },
+ {
+ MethodName: "UnbanPeer",
+ Handler: _Sentinel_UnbanPeer_Handler,
+ },
+ {
+ MethodName: "PenalizePeer",
+ Handler: _Sentinel_PenalizePeer_Handler,
+ },
+ {
+ MethodName: "RewardPeer",
+ Handler: _Sentinel_RewardPeer_Handler,
+ },
+ {
+ MethodName: "PublishGossip",
+ Handler: _Sentinel_PublishGossip_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "SubscribeGossip",
+ Handler: _Sentinel_SubscribeGossip_Handler,
+ ServerStreams: true,
+ },
+ },
+ Metadata: "p2psentinel/sentinel.proto",
+}
diff --git a/erigon-lib/gointerfaces/sentry/mocks.go b/erigon-lib/gointerfaces/sentry/mocks.go
new file mode 100644
index 00000000000..37a446b4470
--- /dev/null
+++ b/erigon-lib/gointerfaces/sentry/mocks.go
@@ -0,0 +1,1801 @@
+// Code generated by moq; DO NOT EDIT.
+// github.com/matryer/moq
+
+package sentry
+
+import (
+ context "context"
+ types "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+ grpc "google.golang.org/grpc"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ sync "sync"
+)
+
+// Ensure, that SentryServerMock does implement SentryServer.
+// If this is not the case, regenerate this file with moq.
+var _ SentryServer = &SentryServerMock{}
+
+// SentryServerMock is a mock implementation of SentryServer.
+//
+// func TestSomethingThatUsesSentryServer(t *testing.T) {
+//
+// // make and configure a mocked SentryServer
+// mockedSentryServer := &SentryServerMock{
+// AddPeerFunc: func(contextMoqParam context.Context, addPeerRequest *AddPeerRequest) (*AddPeerReply, error) {
+// panic("mock out the AddPeer method")
+// },
+// HandShakeFunc: func(contextMoqParam context.Context, empty *emptypb.Empty) (*HandShakeReply, error) {
+// panic("mock out the HandShake method")
+// },
+// MessagesFunc: func(messagesRequest *MessagesRequest, sentry_MessagesServer Sentry_MessagesServer) error {
+// panic("mock out the Messages method")
+// },
+// NodeInfoFunc: func(contextMoqParam context.Context, empty *emptypb.Empty) (*types.NodeInfoReply, error) {
+// panic("mock out the NodeInfo method")
+// },
+// PeerByIdFunc: func(contextMoqParam context.Context, peerByIdRequest *PeerByIdRequest) (*PeerByIdReply, error) {
+// panic("mock out the PeerById method")
+// },
+// PeerCountFunc: func(contextMoqParam context.Context, peerCountRequest *PeerCountRequest) (*PeerCountReply, error) {
+// panic("mock out the PeerCount method")
+// },
+// PeerEventsFunc: func(peerEventsRequest *PeerEventsRequest, sentry_PeerEventsServer Sentry_PeerEventsServer) error {
+// panic("mock out the PeerEvents method")
+// },
+// PeerMinBlockFunc: func(contextMoqParam context.Context, peerMinBlockRequest *PeerMinBlockRequest) (*emptypb.Empty, error) {
+// panic("mock out the PeerMinBlock method")
+// },
+// PeersFunc: func(contextMoqParam context.Context, empty *emptypb.Empty) (*PeersReply, error) {
+// panic("mock out the Peers method")
+// },
+// PenalizePeerFunc: func(contextMoqParam context.Context, penalizePeerRequest *PenalizePeerRequest) (*emptypb.Empty, error) {
+// panic("mock out the PenalizePeer method")
+// },
+// SendMessageByIdFunc: func(contextMoqParam context.Context, sendMessageByIdRequest *SendMessageByIdRequest) (*SentPeers, error) {
+// panic("mock out the SendMessageById method")
+// },
+// SendMessageByMinBlockFunc: func(contextMoqParam context.Context, sendMessageByMinBlockRequest *SendMessageByMinBlockRequest) (*SentPeers, error) {
+// panic("mock out the SendMessageByMinBlock method")
+// },
+// SendMessageToAllFunc: func(contextMoqParam context.Context, outboundMessageData *OutboundMessageData) (*SentPeers, error) {
+// panic("mock out the SendMessageToAll method")
+// },
+// SendMessageToRandomPeersFunc: func(contextMoqParam context.Context, sendMessageToRandomPeersRequest *SendMessageToRandomPeersRequest) (*SentPeers, error) {
+// panic("mock out the SendMessageToRandomPeers method")
+// },
+// SetStatusFunc: func(contextMoqParam context.Context, statusData *StatusData) (*SetStatusReply, error) {
+// panic("mock out the SetStatus method")
+// },
+// mustEmbedUnimplementedSentryServerFunc: func() {
+// panic("mock out the mustEmbedUnimplementedSentryServer method")
+// },
+// }
+//
+// // use mockedSentryServer in code that requires SentryServer
+// // and then make assertions.
+//
+// }
+type SentryServerMock struct {
+ // AddPeerFunc mocks the AddPeer method.
+ AddPeerFunc func(contextMoqParam context.Context, addPeerRequest *AddPeerRequest) (*AddPeerReply, error)
+
+ // HandShakeFunc mocks the HandShake method.
+ HandShakeFunc func(contextMoqParam context.Context, empty *emptypb.Empty) (*HandShakeReply, error)
+
+ // MessagesFunc mocks the Messages method.
+ MessagesFunc func(messagesRequest *MessagesRequest, sentry_MessagesServer Sentry_MessagesServer) error
+
+ // NodeInfoFunc mocks the NodeInfo method.
+ NodeInfoFunc func(contextMoqParam context.Context, empty *emptypb.Empty) (*types.NodeInfoReply, error)
+
+ // PeerByIdFunc mocks the PeerById method.
+ PeerByIdFunc func(contextMoqParam context.Context, peerByIdRequest *PeerByIdRequest) (*PeerByIdReply, error)
+
+ // PeerCountFunc mocks the PeerCount method.
+ PeerCountFunc func(contextMoqParam context.Context, peerCountRequest *PeerCountRequest) (*PeerCountReply, error)
+
+ // PeerEventsFunc mocks the PeerEvents method.
+ PeerEventsFunc func(peerEventsRequest *PeerEventsRequest, sentry_PeerEventsServer Sentry_PeerEventsServer) error
+
+ // PeerMinBlockFunc mocks the PeerMinBlock method.
+ PeerMinBlockFunc func(contextMoqParam context.Context, peerMinBlockRequest *PeerMinBlockRequest) (*emptypb.Empty, error)
+
+ // PeersFunc mocks the Peers method.
+ PeersFunc func(contextMoqParam context.Context, empty *emptypb.Empty) (*PeersReply, error)
+
+ // PenalizePeerFunc mocks the PenalizePeer method.
+ PenalizePeerFunc func(contextMoqParam context.Context, penalizePeerRequest *PenalizePeerRequest) (*emptypb.Empty, error)
+
+ // SendMessageByIdFunc mocks the SendMessageById method.
+ SendMessageByIdFunc func(contextMoqParam context.Context, sendMessageByIdRequest *SendMessageByIdRequest) (*SentPeers, error)
+
+ // SendMessageByMinBlockFunc mocks the SendMessageByMinBlock method.
+ SendMessageByMinBlockFunc func(contextMoqParam context.Context, sendMessageByMinBlockRequest *SendMessageByMinBlockRequest) (*SentPeers, error)
+
+ // SendMessageToAllFunc mocks the SendMessageToAll method.
+ SendMessageToAllFunc func(contextMoqParam context.Context, outboundMessageData *OutboundMessageData) (*SentPeers, error)
+
+ // SendMessageToRandomPeersFunc mocks the SendMessageToRandomPeers method.
+ SendMessageToRandomPeersFunc func(contextMoqParam context.Context, sendMessageToRandomPeersRequest *SendMessageToRandomPeersRequest) (*SentPeers, error)
+
+ // SetStatusFunc mocks the SetStatus method.
+ SetStatusFunc func(contextMoqParam context.Context, statusData *StatusData) (*SetStatusReply, error)
+
+ // mustEmbedUnimplementedSentryServerFunc mocks the mustEmbedUnimplementedSentryServer method.
+ mustEmbedUnimplementedSentryServerFunc func()
+
+ // calls tracks calls to the methods.
+ calls struct {
+ // AddPeer holds details about calls to the AddPeer method.
+ AddPeer []struct {
+ // ContextMoqParam is the contextMoqParam argument value.
+ ContextMoqParam context.Context
+ // AddPeerRequest is the addPeerRequest argument value.
+ AddPeerRequest *AddPeerRequest
+ }
+ // HandShake holds details about calls to the HandShake method.
+ HandShake []struct {
+ // ContextMoqParam is the contextMoqParam argument value.
+ ContextMoqParam context.Context
+ // Empty is the empty argument value.
+ Empty *emptypb.Empty
+ }
+ // Messages holds details about calls to the Messages method.
+ Messages []struct {
+ // MessagesRequest is the messagesRequest argument value.
+ MessagesRequest *MessagesRequest
+ // Sentry_MessagesServer is the sentry_MessagesServer argument value.
+ Sentry_MessagesServer Sentry_MessagesServer
+ }
+ // NodeInfo holds details about calls to the NodeInfo method.
+ NodeInfo []struct {
+ // ContextMoqParam is the contextMoqParam argument value.
+ ContextMoqParam context.Context
+ // Empty is the empty argument value.
+ Empty *emptypb.Empty
+ }
+ // PeerById holds details about calls to the PeerById method.
+ PeerById []struct {
+ // ContextMoqParam is the contextMoqParam argument value.
+ ContextMoqParam context.Context
+ // PeerByIdRequest is the peerByIdRequest argument value.
+ PeerByIdRequest *PeerByIdRequest
+ }
+ // PeerCount holds details about calls to the PeerCount method.
+ PeerCount []struct {
+ // ContextMoqParam is the contextMoqParam argument value.
+ ContextMoqParam context.Context
+ // PeerCountRequest is the peerCountRequest argument value.
+ PeerCountRequest *PeerCountRequest
+ }
+ // PeerEvents holds details about calls to the PeerEvents method.
+ PeerEvents []struct {
+ // PeerEventsRequest is the peerEventsRequest argument value.
+ PeerEventsRequest *PeerEventsRequest
+ // Sentry_PeerEventsServer is the sentry_PeerEventsServer argument value.
+ Sentry_PeerEventsServer Sentry_PeerEventsServer
+ }
+ // PeerMinBlock holds details about calls to the PeerMinBlock method.
+ PeerMinBlock []struct {
+ // ContextMoqParam is the contextMoqParam argument value.
+ ContextMoqParam context.Context
+ // PeerMinBlockRequest is the peerMinBlockRequest argument value.
+ PeerMinBlockRequest *PeerMinBlockRequest
+ }
+ // Peers holds details about calls to the Peers method.
+ Peers []struct {
+ // ContextMoqParam is the contextMoqParam argument value.
+ ContextMoqParam context.Context
+ // Empty is the empty argument value.
+ Empty *emptypb.Empty
+ }
+ // PenalizePeer holds details about calls to the PenalizePeer method.
+ PenalizePeer []struct {
+ // ContextMoqParam is the contextMoqParam argument value.
+ ContextMoqParam context.Context
+ // PenalizePeerRequest is the penalizePeerRequest argument value.
+ PenalizePeerRequest *PenalizePeerRequest
+ }
+ // SendMessageById holds details about calls to the SendMessageById method.
+ SendMessageById []struct {
+ // ContextMoqParam is the contextMoqParam argument value.
+ ContextMoqParam context.Context
+ // SendMessageByIdRequest is the sendMessageByIdRequest argument value.
+ SendMessageByIdRequest *SendMessageByIdRequest
+ }
+ // SendMessageByMinBlock holds details about calls to the SendMessageByMinBlock method.
+ SendMessageByMinBlock []struct {
+ // ContextMoqParam is the contextMoqParam argument value.
+ ContextMoqParam context.Context
+ // SendMessageByMinBlockRequest is the sendMessageByMinBlockRequest argument value.
+ SendMessageByMinBlockRequest *SendMessageByMinBlockRequest
+ }
+ // SendMessageToAll holds details about calls to the SendMessageToAll method.
+ SendMessageToAll []struct {
+ // ContextMoqParam is the contextMoqParam argument value.
+ ContextMoqParam context.Context
+ // OutboundMessageData is the outboundMessageData argument value.
+ OutboundMessageData *OutboundMessageData
+ }
+ // SendMessageToRandomPeers holds details about calls to the SendMessageToRandomPeers method.
+ SendMessageToRandomPeers []struct {
+ // ContextMoqParam is the contextMoqParam argument value.
+ ContextMoqParam context.Context
+ // SendMessageToRandomPeersRequest is the sendMessageToRandomPeersRequest argument value.
+ SendMessageToRandomPeersRequest *SendMessageToRandomPeersRequest
+ }
+ // SetStatus holds details about calls to the SetStatus method.
+ SetStatus []struct {
+ // ContextMoqParam is the contextMoqParam argument value.
+ ContextMoqParam context.Context
+ // StatusData is the statusData argument value.
+ StatusData *StatusData
+ }
+ // mustEmbedUnimplementedSentryServer holds details about calls to the mustEmbedUnimplementedSentryServer method.
+ mustEmbedUnimplementedSentryServer []struct {
+ }
+ }
+ lockAddPeer sync.RWMutex
+ lockHandShake sync.RWMutex
+ lockMessages sync.RWMutex
+ lockNodeInfo sync.RWMutex
+ lockPeerById sync.RWMutex
+ lockPeerCount sync.RWMutex
+ lockPeerEvents sync.RWMutex
+ lockPeerMinBlock sync.RWMutex
+ lockPeers sync.RWMutex
+ lockPenalizePeer sync.RWMutex
+ lockSendMessageById sync.RWMutex
+ lockSendMessageByMinBlock sync.RWMutex
+ lockSendMessageToAll sync.RWMutex
+ lockSendMessageToRandomPeers sync.RWMutex
+ lockSetStatus sync.RWMutex
+ lockmustEmbedUnimplementedSentryServer sync.RWMutex
+}
+
+// AddPeer calls AddPeerFunc.
+func (mock *SentryServerMock) AddPeer(contextMoqParam context.Context, addPeerRequest *AddPeerRequest) (*AddPeerReply, error) {
+ callInfo := struct {
+ ContextMoqParam context.Context
+ AddPeerRequest *AddPeerRequest
+ }{
+ ContextMoqParam: contextMoqParam,
+ AddPeerRequest: addPeerRequest,
+ }
+ mock.lockAddPeer.Lock()
+ mock.calls.AddPeer = append(mock.calls.AddPeer, callInfo)
+ mock.lockAddPeer.Unlock()
+ if mock.AddPeerFunc == nil {
+ var (
+ addPeerReplyOut *AddPeerReply
+ errOut error
+ )
+ return addPeerReplyOut, errOut
+ }
+ return mock.AddPeerFunc(contextMoqParam, addPeerRequest)
+}
+
+// AddPeerCalls gets all the calls that were made to AddPeer.
+// Check the length with:
+//
+// len(mockedSentryServer.AddPeerCalls())
+func (mock *SentryServerMock) AddPeerCalls() []struct {
+ ContextMoqParam context.Context
+ AddPeerRequest *AddPeerRequest
+} {
+ var calls []struct {
+ ContextMoqParam context.Context
+ AddPeerRequest *AddPeerRequest
+ }
+ mock.lockAddPeer.RLock()
+ calls = mock.calls.AddPeer
+ mock.lockAddPeer.RUnlock()
+ return calls
+}
+
+// HandShake calls HandShakeFunc.
+func (mock *SentryServerMock) HandShake(contextMoqParam context.Context, empty *emptypb.Empty) (*HandShakeReply, error) {
+ callInfo := struct {
+ ContextMoqParam context.Context
+ Empty *emptypb.Empty
+ }{
+ ContextMoqParam: contextMoqParam,
+ Empty: empty,
+ }
+ mock.lockHandShake.Lock()
+ mock.calls.HandShake = append(mock.calls.HandShake, callInfo)
+ mock.lockHandShake.Unlock()
+ if mock.HandShakeFunc == nil {
+ var (
+ handShakeReplyOut *HandShakeReply
+ errOut error
+ )
+ return handShakeReplyOut, errOut
+ }
+ return mock.HandShakeFunc(contextMoqParam, empty)
+}
+
+// HandShakeCalls gets all the calls that were made to HandShake.
+// Check the length with:
+//
+// len(mockedSentryServer.HandShakeCalls())
+func (mock *SentryServerMock) HandShakeCalls() []struct {
+ ContextMoqParam context.Context
+ Empty *emptypb.Empty
+} {
+ var calls []struct {
+ ContextMoqParam context.Context
+ Empty *emptypb.Empty
+ }
+ mock.lockHandShake.RLock()
+ calls = mock.calls.HandShake
+ mock.lockHandShake.RUnlock()
+ return calls
+}
+
+// Messages calls MessagesFunc.
+func (mock *SentryServerMock) Messages(messagesRequest *MessagesRequest, sentry_MessagesServer Sentry_MessagesServer) error {
+ callInfo := struct {
+ MessagesRequest *MessagesRequest
+ Sentry_MessagesServer Sentry_MessagesServer
+ }{
+ MessagesRequest: messagesRequest,
+ Sentry_MessagesServer: sentry_MessagesServer,
+ }
+ mock.lockMessages.Lock()
+ mock.calls.Messages = append(mock.calls.Messages, callInfo)
+ mock.lockMessages.Unlock()
+ if mock.MessagesFunc == nil {
+ var (
+ errOut error
+ )
+ return errOut
+ }
+ return mock.MessagesFunc(messagesRequest, sentry_MessagesServer)
+}
+
+// MessagesCalls gets all the calls that were made to Messages.
+// Check the length with:
+//
+// len(mockedSentryServer.MessagesCalls())
+func (mock *SentryServerMock) MessagesCalls() []struct {
+ MessagesRequest *MessagesRequest
+ Sentry_MessagesServer Sentry_MessagesServer
+} {
+ var calls []struct {
+ MessagesRequest *MessagesRequest
+ Sentry_MessagesServer Sentry_MessagesServer
+ }
+ mock.lockMessages.RLock()
+ calls = mock.calls.Messages
+ mock.lockMessages.RUnlock()
+ return calls
+}
+
+// NodeInfo calls NodeInfoFunc.
+func (mock *SentryServerMock) NodeInfo(contextMoqParam context.Context, empty *emptypb.Empty) (*types.NodeInfoReply, error) {
+ callInfo := struct {
+ ContextMoqParam context.Context
+ Empty *emptypb.Empty
+ }{
+ ContextMoqParam: contextMoqParam,
+ Empty: empty,
+ }
+ mock.lockNodeInfo.Lock()
+ mock.calls.NodeInfo = append(mock.calls.NodeInfo, callInfo)
+ mock.lockNodeInfo.Unlock()
+ if mock.NodeInfoFunc == nil {
+ var (
+ nodeInfoReplyOut *types.NodeInfoReply
+ errOut error
+ )
+ return nodeInfoReplyOut, errOut
+ }
+ return mock.NodeInfoFunc(contextMoqParam, empty)
+}
+
+// NodeInfoCalls gets all the calls that were made to NodeInfo.
+// Check the length with:
+//
+// len(mockedSentryServer.NodeInfoCalls())
+func (mock *SentryServerMock) NodeInfoCalls() []struct {
+ ContextMoqParam context.Context
+ Empty *emptypb.Empty
+} {
+ var calls []struct {
+ ContextMoqParam context.Context
+ Empty *emptypb.Empty
+ }
+ mock.lockNodeInfo.RLock()
+ calls = mock.calls.NodeInfo
+ mock.lockNodeInfo.RUnlock()
+ return calls
+}
+
+// PeerById calls PeerByIdFunc.
+func (mock *SentryServerMock) PeerById(contextMoqParam context.Context, peerByIdRequest *PeerByIdRequest) (*PeerByIdReply, error) {
+ callInfo := struct {
+ ContextMoqParam context.Context
+ PeerByIdRequest *PeerByIdRequest
+ }{
+ ContextMoqParam: contextMoqParam,
+ PeerByIdRequest: peerByIdRequest,
+ }
+ mock.lockPeerById.Lock()
+ mock.calls.PeerById = append(mock.calls.PeerById, callInfo)
+ mock.lockPeerById.Unlock()
+ if mock.PeerByIdFunc == nil {
+ var (
+ peerByIdReplyOut *PeerByIdReply
+ errOut error
+ )
+ return peerByIdReplyOut, errOut
+ }
+ return mock.PeerByIdFunc(contextMoqParam, peerByIdRequest)
+}
+
+// PeerByIdCalls gets all the calls that were made to PeerById.
+// Check the length with:
+//
+// len(mockedSentryServer.PeerByIdCalls())
+func (mock *SentryServerMock) PeerByIdCalls() []struct {
+ ContextMoqParam context.Context
+ PeerByIdRequest *PeerByIdRequest
+} {
+ var calls []struct {
+ ContextMoqParam context.Context
+ PeerByIdRequest *PeerByIdRequest
+ }
+ mock.lockPeerById.RLock()
+ calls = mock.calls.PeerById
+ mock.lockPeerById.RUnlock()
+ return calls
+}
+
+// PeerCount calls PeerCountFunc.
+func (mock *SentryServerMock) PeerCount(contextMoqParam context.Context, peerCountRequest *PeerCountRequest) (*PeerCountReply, error) {
+ callInfo := struct {
+ ContextMoqParam context.Context
+ PeerCountRequest *PeerCountRequest
+ }{
+ ContextMoqParam: contextMoqParam,
+ PeerCountRequest: peerCountRequest,
+ }
+ mock.lockPeerCount.Lock()
+ mock.calls.PeerCount = append(mock.calls.PeerCount, callInfo)
+ mock.lockPeerCount.Unlock()
+ if mock.PeerCountFunc == nil {
+ var (
+ peerCountReplyOut *PeerCountReply
+ errOut error
+ )
+ return peerCountReplyOut, errOut
+ }
+ return mock.PeerCountFunc(contextMoqParam, peerCountRequest)
+}
+
+// PeerCountCalls gets all the calls that were made to PeerCount.
+// Check the length with:
+//
+// len(mockedSentryServer.PeerCountCalls())
+func (mock *SentryServerMock) PeerCountCalls() []struct {
+ ContextMoqParam context.Context
+ PeerCountRequest *PeerCountRequest
+} {
+ var calls []struct {
+ ContextMoqParam context.Context
+ PeerCountRequest *PeerCountRequest
+ }
+ mock.lockPeerCount.RLock()
+ calls = mock.calls.PeerCount
+ mock.lockPeerCount.RUnlock()
+ return calls
+}
+
+// PeerEvents calls PeerEventsFunc.
+func (mock *SentryServerMock) PeerEvents(peerEventsRequest *PeerEventsRequest, sentry_PeerEventsServer Sentry_PeerEventsServer) error {
+ callInfo := struct {
+ PeerEventsRequest *PeerEventsRequest
+ Sentry_PeerEventsServer Sentry_PeerEventsServer
+ }{
+ PeerEventsRequest: peerEventsRequest,
+ Sentry_PeerEventsServer: sentry_PeerEventsServer,
+ }
+ mock.lockPeerEvents.Lock()
+ mock.calls.PeerEvents = append(mock.calls.PeerEvents, callInfo)
+ mock.lockPeerEvents.Unlock()
+ if mock.PeerEventsFunc == nil {
+ var (
+ errOut error
+ )
+ return errOut
+ }
+ return mock.PeerEventsFunc(peerEventsRequest, sentry_PeerEventsServer)
+}
+
+// PeerEventsCalls gets all the calls that were made to PeerEvents.
+// Check the length with:
+//
+// len(mockedSentryServer.PeerEventsCalls())
+func (mock *SentryServerMock) PeerEventsCalls() []struct {
+ PeerEventsRequest *PeerEventsRequest
+ Sentry_PeerEventsServer Sentry_PeerEventsServer
+} {
+ var calls []struct {
+ PeerEventsRequest *PeerEventsRequest
+ Sentry_PeerEventsServer Sentry_PeerEventsServer
+ }
+ mock.lockPeerEvents.RLock()
+ calls = mock.calls.PeerEvents
+ mock.lockPeerEvents.RUnlock()
+ return calls
+}
+
+// PeerMinBlock calls PeerMinBlockFunc.
+func (mock *SentryServerMock) PeerMinBlock(contextMoqParam context.Context, peerMinBlockRequest *PeerMinBlockRequest) (*emptypb.Empty, error) {
+ callInfo := struct {
+ ContextMoqParam context.Context
+ PeerMinBlockRequest *PeerMinBlockRequest
+ }{
+ ContextMoqParam: contextMoqParam,
+ PeerMinBlockRequest: peerMinBlockRequest,
+ }
+ mock.lockPeerMinBlock.Lock()
+ mock.calls.PeerMinBlock = append(mock.calls.PeerMinBlock, callInfo)
+ mock.lockPeerMinBlock.Unlock()
+ if mock.PeerMinBlockFunc == nil {
+ var (
+ emptyOut *emptypb.Empty
+ errOut error
+ )
+ return emptyOut, errOut
+ }
+ return mock.PeerMinBlockFunc(contextMoqParam, peerMinBlockRequest)
+}
+
+// PeerMinBlockCalls gets all the calls that were made to PeerMinBlock.
+// Check the length with:
+//
+// len(mockedSentryServer.PeerMinBlockCalls())
+func (mock *SentryServerMock) PeerMinBlockCalls() []struct {
+ ContextMoqParam context.Context
+ PeerMinBlockRequest *PeerMinBlockRequest
+} {
+ var calls []struct {
+ ContextMoqParam context.Context
+ PeerMinBlockRequest *PeerMinBlockRequest
+ }
+ mock.lockPeerMinBlock.RLock()
+ calls = mock.calls.PeerMinBlock
+ mock.lockPeerMinBlock.RUnlock()
+ return calls
+}
+
+// Peers calls PeersFunc.
+func (mock *SentryServerMock) Peers(contextMoqParam context.Context, empty *emptypb.Empty) (*PeersReply, error) {
+ callInfo := struct {
+ ContextMoqParam context.Context
+ Empty *emptypb.Empty
+ }{
+ ContextMoqParam: contextMoqParam,
+ Empty: empty,
+ }
+ mock.lockPeers.Lock()
+ mock.calls.Peers = append(mock.calls.Peers, callInfo)
+ mock.lockPeers.Unlock()
+ if mock.PeersFunc == nil {
+ var (
+ peersReplyOut *PeersReply
+ errOut error
+ )
+ return peersReplyOut, errOut
+ }
+ return mock.PeersFunc(contextMoqParam, empty)
+}
+
+// PeersCalls gets all the calls that were made to Peers.
+// Check the length with:
+//
+// len(mockedSentryServer.PeersCalls())
+func (mock *SentryServerMock) PeersCalls() []struct {
+ ContextMoqParam context.Context
+ Empty *emptypb.Empty
+} {
+ var calls []struct {
+ ContextMoqParam context.Context
+ Empty *emptypb.Empty
+ }
+ mock.lockPeers.RLock()
+ calls = mock.calls.Peers
+ mock.lockPeers.RUnlock()
+ return calls
+}
+
+// PenalizePeer calls PenalizePeerFunc.
+func (mock *SentryServerMock) PenalizePeer(contextMoqParam context.Context, penalizePeerRequest *PenalizePeerRequest) (*emptypb.Empty, error) {
+ callInfo := struct {
+ ContextMoqParam context.Context
+ PenalizePeerRequest *PenalizePeerRequest
+ }{
+ ContextMoqParam: contextMoqParam,
+ PenalizePeerRequest: penalizePeerRequest,
+ }
+ mock.lockPenalizePeer.Lock()
+ mock.calls.PenalizePeer = append(mock.calls.PenalizePeer, callInfo)
+ mock.lockPenalizePeer.Unlock()
+ if mock.PenalizePeerFunc == nil {
+ var (
+ emptyOut *emptypb.Empty
+ errOut error
+ )
+ return emptyOut, errOut
+ }
+ return mock.PenalizePeerFunc(contextMoqParam, penalizePeerRequest)
+}
+
+// PenalizePeerCalls gets all the calls that were made to PenalizePeer.
+// Check the length with:
+//
+// len(mockedSentryServer.PenalizePeerCalls())
+func (mock *SentryServerMock) PenalizePeerCalls() []struct {
+ ContextMoqParam context.Context
+ PenalizePeerRequest *PenalizePeerRequest
+} {
+ var calls []struct {
+ ContextMoqParam context.Context
+ PenalizePeerRequest *PenalizePeerRequest
+ }
+ mock.lockPenalizePeer.RLock()
+ calls = mock.calls.PenalizePeer
+ mock.lockPenalizePeer.RUnlock()
+ return calls
+}
+
+// SendMessageById calls SendMessageByIdFunc.
+func (mock *SentryServerMock) SendMessageById(contextMoqParam context.Context, sendMessageByIdRequest *SendMessageByIdRequest) (*SentPeers, error) {
+ callInfo := struct {
+ ContextMoqParam context.Context
+ SendMessageByIdRequest *SendMessageByIdRequest
+ }{
+ ContextMoqParam: contextMoqParam,
+ SendMessageByIdRequest: sendMessageByIdRequest,
+ }
+ mock.lockSendMessageById.Lock()
+ mock.calls.SendMessageById = append(mock.calls.SendMessageById, callInfo)
+ mock.lockSendMessageById.Unlock()
+ if mock.SendMessageByIdFunc == nil {
+ var (
+ sentPeersOut *SentPeers
+ errOut error
+ )
+ return sentPeersOut, errOut
+ }
+ return mock.SendMessageByIdFunc(contextMoqParam, sendMessageByIdRequest)
+}
+
+// SendMessageByIdCalls gets all the calls that were made to SendMessageById.
+// Check the length with:
+//
+// len(mockedSentryServer.SendMessageByIdCalls())
+func (mock *SentryServerMock) SendMessageByIdCalls() []struct {
+ ContextMoqParam context.Context
+ SendMessageByIdRequest *SendMessageByIdRequest
+} {
+ var calls []struct {
+ ContextMoqParam context.Context
+ SendMessageByIdRequest *SendMessageByIdRequest
+ }
+ mock.lockSendMessageById.RLock()
+ calls = mock.calls.SendMessageById
+ mock.lockSendMessageById.RUnlock()
+ return calls
+}
+
+// SendMessageByMinBlock calls SendMessageByMinBlockFunc.
+func (mock *SentryServerMock) SendMessageByMinBlock(contextMoqParam context.Context, sendMessageByMinBlockRequest *SendMessageByMinBlockRequest) (*SentPeers, error) {
+ callInfo := struct {
+ ContextMoqParam context.Context
+ SendMessageByMinBlockRequest *SendMessageByMinBlockRequest
+ }{
+ ContextMoqParam: contextMoqParam,
+ SendMessageByMinBlockRequest: sendMessageByMinBlockRequest,
+ }
+ mock.lockSendMessageByMinBlock.Lock()
+ mock.calls.SendMessageByMinBlock = append(mock.calls.SendMessageByMinBlock, callInfo)
+ mock.lockSendMessageByMinBlock.Unlock()
+ if mock.SendMessageByMinBlockFunc == nil {
+ var (
+ sentPeersOut *SentPeers
+ errOut error
+ )
+ return sentPeersOut, errOut
+ }
+ return mock.SendMessageByMinBlockFunc(contextMoqParam, sendMessageByMinBlockRequest)
+}
+
+// SendMessageByMinBlockCalls gets all the calls that were made to SendMessageByMinBlock.
+// Check the length with:
+//
+// len(mockedSentryServer.SendMessageByMinBlockCalls())
+func (mock *SentryServerMock) SendMessageByMinBlockCalls() []struct {
+ ContextMoqParam context.Context
+ SendMessageByMinBlockRequest *SendMessageByMinBlockRequest
+} {
+ var calls []struct {
+ ContextMoqParam context.Context
+ SendMessageByMinBlockRequest *SendMessageByMinBlockRequest
+ }
+ mock.lockSendMessageByMinBlock.RLock()
+ calls = mock.calls.SendMessageByMinBlock
+ mock.lockSendMessageByMinBlock.RUnlock()
+ return calls
+}
+
+// SendMessageToAll calls SendMessageToAllFunc.
+func (mock *SentryServerMock) SendMessageToAll(contextMoqParam context.Context, outboundMessageData *OutboundMessageData) (*SentPeers, error) {
+ callInfo := struct {
+ ContextMoqParam context.Context
+ OutboundMessageData *OutboundMessageData
+ }{
+ ContextMoqParam: contextMoqParam,
+ OutboundMessageData: outboundMessageData,
+ }
+ mock.lockSendMessageToAll.Lock()
+ mock.calls.SendMessageToAll = append(mock.calls.SendMessageToAll, callInfo)
+ mock.lockSendMessageToAll.Unlock()
+ if mock.SendMessageToAllFunc == nil {
+ var (
+ sentPeersOut *SentPeers
+ errOut error
+ )
+ return sentPeersOut, errOut
+ }
+ return mock.SendMessageToAllFunc(contextMoqParam, outboundMessageData)
+}
+
+// SendMessageToAllCalls gets all the calls that were made to SendMessageToAll.
+// Check the length with:
+//
+// len(mockedSentryServer.SendMessageToAllCalls())
+func (mock *SentryServerMock) SendMessageToAllCalls() []struct {
+ ContextMoqParam context.Context
+ OutboundMessageData *OutboundMessageData
+} {
+ var calls []struct {
+ ContextMoqParam context.Context
+ OutboundMessageData *OutboundMessageData
+ }
+ mock.lockSendMessageToAll.RLock()
+ calls = mock.calls.SendMessageToAll
+ mock.lockSendMessageToAll.RUnlock()
+ return calls
+}
+
+// SendMessageToRandomPeers calls SendMessageToRandomPeersFunc.
+func (mock *SentryServerMock) SendMessageToRandomPeers(contextMoqParam context.Context, sendMessageToRandomPeersRequest *SendMessageToRandomPeersRequest) (*SentPeers, error) {
+ callInfo := struct {
+ ContextMoqParam context.Context
+ SendMessageToRandomPeersRequest *SendMessageToRandomPeersRequest
+ }{
+ ContextMoqParam: contextMoqParam,
+ SendMessageToRandomPeersRequest: sendMessageToRandomPeersRequest,
+ }
+ mock.lockSendMessageToRandomPeers.Lock()
+ mock.calls.SendMessageToRandomPeers = append(mock.calls.SendMessageToRandomPeers, callInfo)
+ mock.lockSendMessageToRandomPeers.Unlock()
+ if mock.SendMessageToRandomPeersFunc == nil {
+ var (
+ sentPeersOut *SentPeers
+ errOut error
+ )
+ return sentPeersOut, errOut
+ }
+ return mock.SendMessageToRandomPeersFunc(contextMoqParam, sendMessageToRandomPeersRequest)
+}
+
+// SendMessageToRandomPeersCalls gets all the calls that were made to SendMessageToRandomPeers.
+// Check the length with:
+//
+// len(mockedSentryServer.SendMessageToRandomPeersCalls())
+func (mock *SentryServerMock) SendMessageToRandomPeersCalls() []struct {
+ ContextMoqParam context.Context
+ SendMessageToRandomPeersRequest *SendMessageToRandomPeersRequest
+} {
+ var calls []struct {
+ ContextMoqParam context.Context
+ SendMessageToRandomPeersRequest *SendMessageToRandomPeersRequest
+ }
+ mock.lockSendMessageToRandomPeers.RLock()
+ calls = mock.calls.SendMessageToRandomPeers
+ mock.lockSendMessageToRandomPeers.RUnlock()
+ return calls
+}
+
+// SetStatus calls SetStatusFunc.
+func (mock *SentryServerMock) SetStatus(contextMoqParam context.Context, statusData *StatusData) (*SetStatusReply, error) {
+ callInfo := struct {
+ ContextMoqParam context.Context
+ StatusData *StatusData
+ }{
+ ContextMoqParam: contextMoqParam,
+ StatusData: statusData,
+ }
+ mock.lockSetStatus.Lock()
+ mock.calls.SetStatus = append(mock.calls.SetStatus, callInfo)
+ mock.lockSetStatus.Unlock()
+ if mock.SetStatusFunc == nil {
+ var (
+ setStatusReplyOut *SetStatusReply
+ errOut error
+ )
+ return setStatusReplyOut, errOut
+ }
+ return mock.SetStatusFunc(contextMoqParam, statusData)
+}
+
+// SetStatusCalls gets all the calls that were made to SetStatus.
+// Check the length with:
+//
+// len(mockedSentryServer.SetStatusCalls())
+func (mock *SentryServerMock) SetStatusCalls() []struct {
+ ContextMoqParam context.Context
+ StatusData *StatusData
+} {
+ var calls []struct {
+ ContextMoqParam context.Context
+ StatusData *StatusData
+ }
+ mock.lockSetStatus.RLock()
+ calls = mock.calls.SetStatus
+ mock.lockSetStatus.RUnlock()
+ return calls
+}
+
+// mustEmbedUnimplementedSentryServer calls mustEmbedUnimplementedSentryServerFunc.
+func (mock *SentryServerMock) mustEmbedUnimplementedSentryServer() {
+ callInfo := struct {
+ }{}
+ mock.lockmustEmbedUnimplementedSentryServer.Lock()
+ mock.calls.mustEmbedUnimplementedSentryServer = append(mock.calls.mustEmbedUnimplementedSentryServer, callInfo)
+ mock.lockmustEmbedUnimplementedSentryServer.Unlock()
+ if mock.mustEmbedUnimplementedSentryServerFunc == nil {
+ return
+ }
+ mock.mustEmbedUnimplementedSentryServerFunc()
+}
+
+// mustEmbedUnimplementedSentryServerCalls gets all the calls that were made to mustEmbedUnimplementedSentryServer.
+// Check the length with:
+//
+// len(mockedSentryServer.mustEmbedUnimplementedSentryServerCalls())
+func (mock *SentryServerMock) mustEmbedUnimplementedSentryServerCalls() []struct {
+} {
+ var calls []struct {
+ }
+ mock.lockmustEmbedUnimplementedSentryServer.RLock()
+ calls = mock.calls.mustEmbedUnimplementedSentryServer
+ mock.lockmustEmbedUnimplementedSentryServer.RUnlock()
+ return calls
+}
+
+// Ensure, that SentryClientMock does implement SentryClient.
+// If this is not the case, regenerate this file with moq.
+var _ SentryClient = &SentryClientMock{}
+
+// SentryClientMock is a mock implementation of SentryClient.
+//
+// func TestSomethingThatUsesSentryClient(t *testing.T) {
+//
+// // make and configure a mocked SentryClient
+// mockedSentryClient := &SentryClientMock{
+// AddPeerFunc: func(ctx context.Context, in *AddPeerRequest, opts ...grpc.CallOption) (*AddPeerReply, error) {
+// panic("mock out the AddPeer method")
+// },
+// HandShakeFunc: func(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*HandShakeReply, error) {
+// panic("mock out the HandShake method")
+// },
+// MessagesFunc: func(ctx context.Context, in *MessagesRequest, opts ...grpc.CallOption) (Sentry_MessagesClient, error) {
+// panic("mock out the Messages method")
+// },
+// NodeInfoFunc: func(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.NodeInfoReply, error) {
+// panic("mock out the NodeInfo method")
+// },
+// PeerByIdFunc: func(ctx context.Context, in *PeerByIdRequest, opts ...grpc.CallOption) (*PeerByIdReply, error) {
+// panic("mock out the PeerById method")
+// },
+// PeerCountFunc: func(ctx context.Context, in *PeerCountRequest, opts ...grpc.CallOption) (*PeerCountReply, error) {
+// panic("mock out the PeerCount method")
+// },
+// PeerEventsFunc: func(ctx context.Context, in *PeerEventsRequest, opts ...grpc.CallOption) (Sentry_PeerEventsClient, error) {
+// panic("mock out the PeerEvents method")
+// },
+// PeerMinBlockFunc: func(ctx context.Context, in *PeerMinBlockRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+// panic("mock out the PeerMinBlock method")
+// },
+// PeersFunc: func(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PeersReply, error) {
+// panic("mock out the Peers method")
+// },
+// PenalizePeerFunc: func(ctx context.Context, in *PenalizePeerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+// panic("mock out the PenalizePeer method")
+// },
+// SendMessageByIdFunc: func(ctx context.Context, in *SendMessageByIdRequest, opts ...grpc.CallOption) (*SentPeers, error) {
+// panic("mock out the SendMessageById method")
+// },
+// SendMessageByMinBlockFunc: func(ctx context.Context, in *SendMessageByMinBlockRequest, opts ...grpc.CallOption) (*SentPeers, error) {
+// panic("mock out the SendMessageByMinBlock method")
+// },
+// SendMessageToAllFunc: func(ctx context.Context, in *OutboundMessageData, opts ...grpc.CallOption) (*SentPeers, error) {
+// panic("mock out the SendMessageToAll method")
+// },
+// SendMessageToRandomPeersFunc: func(ctx context.Context, in *SendMessageToRandomPeersRequest, opts ...grpc.CallOption) (*SentPeers, error) {
+// panic("mock out the SendMessageToRandomPeers method")
+// },
+// SetStatusFunc: func(ctx context.Context, in *StatusData, opts ...grpc.CallOption) (*SetStatusReply, error) {
+// panic("mock out the SetStatus method")
+// },
+// }
+//
+// // use mockedSentryClient in code that requires SentryClient
+// // and then make assertions.
+//
+// }
+type SentryClientMock struct {
+ // AddPeerFunc mocks the AddPeer method.
+ AddPeerFunc func(ctx context.Context, in *AddPeerRequest, opts ...grpc.CallOption) (*AddPeerReply, error)
+
+ // HandShakeFunc mocks the HandShake method.
+ HandShakeFunc func(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*HandShakeReply, error)
+
+ // MessagesFunc mocks the Messages method.
+ MessagesFunc func(ctx context.Context, in *MessagesRequest, opts ...grpc.CallOption) (Sentry_MessagesClient, error)
+
+ // NodeInfoFunc mocks the NodeInfo method.
+ NodeInfoFunc func(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.NodeInfoReply, error)
+
+ // PeerByIdFunc mocks the PeerById method.
+ PeerByIdFunc func(ctx context.Context, in *PeerByIdRequest, opts ...grpc.CallOption) (*PeerByIdReply, error)
+
+ // PeerCountFunc mocks the PeerCount method.
+ PeerCountFunc func(ctx context.Context, in *PeerCountRequest, opts ...grpc.CallOption) (*PeerCountReply, error)
+
+ // PeerEventsFunc mocks the PeerEvents method.
+ PeerEventsFunc func(ctx context.Context, in *PeerEventsRequest, opts ...grpc.CallOption) (Sentry_PeerEventsClient, error)
+
+ // PeerMinBlockFunc mocks the PeerMinBlock method.
+ PeerMinBlockFunc func(ctx context.Context, in *PeerMinBlockRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+
+ // PeersFunc mocks the Peers method.
+ PeersFunc func(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PeersReply, error)
+
+ // PenalizePeerFunc mocks the PenalizePeer method.
+ PenalizePeerFunc func(ctx context.Context, in *PenalizePeerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+
+ // SendMessageByIdFunc mocks the SendMessageById method.
+ SendMessageByIdFunc func(ctx context.Context, in *SendMessageByIdRequest, opts ...grpc.CallOption) (*SentPeers, error)
+
+ // SendMessageByMinBlockFunc mocks the SendMessageByMinBlock method.
+ SendMessageByMinBlockFunc func(ctx context.Context, in *SendMessageByMinBlockRequest, opts ...grpc.CallOption) (*SentPeers, error)
+
+ // SendMessageToAllFunc mocks the SendMessageToAll method.
+ SendMessageToAllFunc func(ctx context.Context, in *OutboundMessageData, opts ...grpc.CallOption) (*SentPeers, error)
+
+ // SendMessageToRandomPeersFunc mocks the SendMessageToRandomPeers method.
+ SendMessageToRandomPeersFunc func(ctx context.Context, in *SendMessageToRandomPeersRequest, opts ...grpc.CallOption) (*SentPeers, error)
+
+ // SetStatusFunc mocks the SetStatus method.
+ SetStatusFunc func(ctx context.Context, in *StatusData, opts ...grpc.CallOption) (*SetStatusReply, error)
+
+ // calls tracks calls to the methods.
+ calls struct {
+ // AddPeer holds details about calls to the AddPeer method.
+ AddPeer []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // In is the in argument value.
+ In *AddPeerRequest
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ // HandShake holds details about calls to the HandShake method.
+ HandShake []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // In is the in argument value.
+ In *emptypb.Empty
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ // Messages holds details about calls to the Messages method.
+ Messages []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // In is the in argument value.
+ In *MessagesRequest
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ // NodeInfo holds details about calls to the NodeInfo method.
+ NodeInfo []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // In is the in argument value.
+ In *emptypb.Empty
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ // PeerById holds details about calls to the PeerById method.
+ PeerById []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // In is the in argument value.
+ In *PeerByIdRequest
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ // PeerCount holds details about calls to the PeerCount method.
+ PeerCount []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // In is the in argument value.
+ In *PeerCountRequest
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ // PeerEvents holds details about calls to the PeerEvents method.
+ PeerEvents []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // In is the in argument value.
+ In *PeerEventsRequest
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ // PeerMinBlock holds details about calls to the PeerMinBlock method.
+ PeerMinBlock []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // In is the in argument value.
+ In *PeerMinBlockRequest
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ // Peers holds details about calls to the Peers method.
+ Peers []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // In is the in argument value.
+ In *emptypb.Empty
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ // PenalizePeer holds details about calls to the PenalizePeer method.
+ PenalizePeer []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // In is the in argument value.
+ In *PenalizePeerRequest
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ // SendMessageById holds details about calls to the SendMessageById method.
+ SendMessageById []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // In is the in argument value.
+ In *SendMessageByIdRequest
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ // SendMessageByMinBlock holds details about calls to the SendMessageByMinBlock method.
+ SendMessageByMinBlock []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // In is the in argument value.
+ In *SendMessageByMinBlockRequest
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ // SendMessageToAll holds details about calls to the SendMessageToAll method.
+ SendMessageToAll []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // In is the in argument value.
+ In *OutboundMessageData
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ // SendMessageToRandomPeers holds details about calls to the SendMessageToRandomPeers method.
+ SendMessageToRandomPeers []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // In is the in argument value.
+ In *SendMessageToRandomPeersRequest
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ // SetStatus holds details about calls to the SetStatus method.
+ SetStatus []struct {
+ // Ctx is the ctx argument value.
+ Ctx context.Context
+ // In is the in argument value.
+ In *StatusData
+ // Opts is the opts argument value.
+ Opts []grpc.CallOption
+ }
+ }
+ lockAddPeer sync.RWMutex
+ lockHandShake sync.RWMutex
+ lockMessages sync.RWMutex
+ lockNodeInfo sync.RWMutex
+ lockPeerById sync.RWMutex
+ lockPeerCount sync.RWMutex
+ lockPeerEvents sync.RWMutex
+ lockPeerMinBlock sync.RWMutex
+ lockPeers sync.RWMutex
+ lockPenalizePeer sync.RWMutex
+ lockSendMessageById sync.RWMutex
+ lockSendMessageByMinBlock sync.RWMutex
+ lockSendMessageToAll sync.RWMutex
+ lockSendMessageToRandomPeers sync.RWMutex
+ lockSetStatus sync.RWMutex
+}
+
+// AddPeer calls AddPeerFunc.
+func (mock *SentryClientMock) AddPeer(ctx context.Context, in *AddPeerRequest, opts ...grpc.CallOption) (*AddPeerReply, error) {
+ callInfo := struct {
+ Ctx context.Context
+ In *AddPeerRequest
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ In: in,
+ Opts: opts,
+ }
+ mock.lockAddPeer.Lock()
+ mock.calls.AddPeer = append(mock.calls.AddPeer, callInfo)
+ mock.lockAddPeer.Unlock()
+ if mock.AddPeerFunc == nil {
+ var (
+ addPeerReplyOut *AddPeerReply
+ errOut error
+ )
+ return addPeerReplyOut, errOut
+ }
+ return mock.AddPeerFunc(ctx, in, opts...)
+}
+
+// AddPeerCalls gets all the calls that were made to AddPeer.
+// Check the length with:
+//
+// len(mockedSentryClient.AddPeerCalls())
+func (mock *SentryClientMock) AddPeerCalls() []struct {
+ Ctx context.Context
+ In *AddPeerRequest
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ In *AddPeerRequest
+ Opts []grpc.CallOption
+ }
+ mock.lockAddPeer.RLock()
+ calls = mock.calls.AddPeer
+ mock.lockAddPeer.RUnlock()
+ return calls
+}
+
+// HandShake calls HandShakeFunc.
+func (mock *SentryClientMock) HandShake(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*HandShakeReply, error) {
+ callInfo := struct {
+ Ctx context.Context
+ In *emptypb.Empty
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ In: in,
+ Opts: opts,
+ }
+ mock.lockHandShake.Lock()
+ mock.calls.HandShake = append(mock.calls.HandShake, callInfo)
+ mock.lockHandShake.Unlock()
+ if mock.HandShakeFunc == nil {
+ var (
+ handShakeReplyOut *HandShakeReply
+ errOut error
+ )
+ return handShakeReplyOut, errOut
+ }
+ return mock.HandShakeFunc(ctx, in, opts...)
+}
+
+// HandShakeCalls gets all the calls that were made to HandShake.
+// Check the length with:
+//
+// len(mockedSentryClient.HandShakeCalls())
+func (mock *SentryClientMock) HandShakeCalls() []struct {
+ Ctx context.Context
+ In *emptypb.Empty
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ In *emptypb.Empty
+ Opts []grpc.CallOption
+ }
+ mock.lockHandShake.RLock()
+ calls = mock.calls.HandShake
+ mock.lockHandShake.RUnlock()
+ return calls
+}
+
+// Messages calls MessagesFunc.
+func (mock *SentryClientMock) Messages(ctx context.Context, in *MessagesRequest, opts ...grpc.CallOption) (Sentry_MessagesClient, error) {
+ callInfo := struct {
+ Ctx context.Context
+ In *MessagesRequest
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ In: in,
+ Opts: opts,
+ }
+ mock.lockMessages.Lock()
+ mock.calls.Messages = append(mock.calls.Messages, callInfo)
+ mock.lockMessages.Unlock()
+ if mock.MessagesFunc == nil {
+ var (
+ sentry_MessagesClientOut Sentry_MessagesClient
+ errOut error
+ )
+ return sentry_MessagesClientOut, errOut
+ }
+ return mock.MessagesFunc(ctx, in, opts...)
+}
+
+// MessagesCalls gets all the calls that were made to Messages.
+// Check the length with:
+//
+// len(mockedSentryClient.MessagesCalls())
+func (mock *SentryClientMock) MessagesCalls() []struct {
+ Ctx context.Context
+ In *MessagesRequest
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ In *MessagesRequest
+ Opts []grpc.CallOption
+ }
+ mock.lockMessages.RLock()
+ calls = mock.calls.Messages
+ mock.lockMessages.RUnlock()
+ return calls
+}
+
+// NodeInfo calls NodeInfoFunc.
+func (mock *SentryClientMock) NodeInfo(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.NodeInfoReply, error) {
+ callInfo := struct {
+ Ctx context.Context
+ In *emptypb.Empty
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ In: in,
+ Opts: opts,
+ }
+ mock.lockNodeInfo.Lock()
+ mock.calls.NodeInfo = append(mock.calls.NodeInfo, callInfo)
+ mock.lockNodeInfo.Unlock()
+ if mock.NodeInfoFunc == nil {
+ var (
+ nodeInfoReplyOut *types.NodeInfoReply
+ errOut error
+ )
+ return nodeInfoReplyOut, errOut
+ }
+ return mock.NodeInfoFunc(ctx, in, opts...)
+}
+
+// NodeInfoCalls gets all the calls that were made to NodeInfo.
+// Check the length with:
+//
+// len(mockedSentryClient.NodeInfoCalls())
+func (mock *SentryClientMock) NodeInfoCalls() []struct {
+ Ctx context.Context
+ In *emptypb.Empty
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ In *emptypb.Empty
+ Opts []grpc.CallOption
+ }
+ mock.lockNodeInfo.RLock()
+ calls = mock.calls.NodeInfo
+ mock.lockNodeInfo.RUnlock()
+ return calls
+}
+
+// PeerById calls PeerByIdFunc.
+func (mock *SentryClientMock) PeerById(ctx context.Context, in *PeerByIdRequest, opts ...grpc.CallOption) (*PeerByIdReply, error) {
+ callInfo := struct {
+ Ctx context.Context
+ In *PeerByIdRequest
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ In: in,
+ Opts: opts,
+ }
+ mock.lockPeerById.Lock()
+ mock.calls.PeerById = append(mock.calls.PeerById, callInfo)
+ mock.lockPeerById.Unlock()
+ if mock.PeerByIdFunc == nil {
+ var (
+ peerByIdReplyOut *PeerByIdReply
+ errOut error
+ )
+ return peerByIdReplyOut, errOut
+ }
+ return mock.PeerByIdFunc(ctx, in, opts...)
+}
+
+// PeerByIdCalls gets all the calls that were made to PeerById.
+// Check the length with:
+//
+// len(mockedSentryClient.PeerByIdCalls())
+func (mock *SentryClientMock) PeerByIdCalls() []struct {
+ Ctx context.Context
+ In *PeerByIdRequest
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ In *PeerByIdRequest
+ Opts []grpc.CallOption
+ }
+ mock.lockPeerById.RLock()
+ calls = mock.calls.PeerById
+ mock.lockPeerById.RUnlock()
+ return calls
+}
+
+// PeerCount calls PeerCountFunc.
+func (mock *SentryClientMock) PeerCount(ctx context.Context, in *PeerCountRequest, opts ...grpc.CallOption) (*PeerCountReply, error) {
+ callInfo := struct {
+ Ctx context.Context
+ In *PeerCountRequest
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ In: in,
+ Opts: opts,
+ }
+ mock.lockPeerCount.Lock()
+ mock.calls.PeerCount = append(mock.calls.PeerCount, callInfo)
+ mock.lockPeerCount.Unlock()
+ if mock.PeerCountFunc == nil {
+ var (
+ peerCountReplyOut *PeerCountReply
+ errOut error
+ )
+ return peerCountReplyOut, errOut
+ }
+ return mock.PeerCountFunc(ctx, in, opts...)
+}
+
+// PeerCountCalls gets all the calls that were made to PeerCount.
+// Check the length with:
+//
+// len(mockedSentryClient.PeerCountCalls())
+func (mock *SentryClientMock) PeerCountCalls() []struct {
+ Ctx context.Context
+ In *PeerCountRequest
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ In *PeerCountRequest
+ Opts []grpc.CallOption
+ }
+ mock.lockPeerCount.RLock()
+ calls = mock.calls.PeerCount
+ mock.lockPeerCount.RUnlock()
+ return calls
+}
+
+// PeerEvents calls PeerEventsFunc.
+func (mock *SentryClientMock) PeerEvents(ctx context.Context, in *PeerEventsRequest, opts ...grpc.CallOption) (Sentry_PeerEventsClient, error) {
+ callInfo := struct {
+ Ctx context.Context
+ In *PeerEventsRequest
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ In: in,
+ Opts: opts,
+ }
+ mock.lockPeerEvents.Lock()
+ mock.calls.PeerEvents = append(mock.calls.PeerEvents, callInfo)
+ mock.lockPeerEvents.Unlock()
+ if mock.PeerEventsFunc == nil {
+ var (
+ sentry_PeerEventsClientOut Sentry_PeerEventsClient
+ errOut error
+ )
+ return sentry_PeerEventsClientOut, errOut
+ }
+ return mock.PeerEventsFunc(ctx, in, opts...)
+}
+
+// PeerEventsCalls gets all the calls that were made to PeerEvents.
+// Check the length with:
+//
+// len(mockedSentryClient.PeerEventsCalls())
+func (mock *SentryClientMock) PeerEventsCalls() []struct {
+ Ctx context.Context
+ In *PeerEventsRequest
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ In *PeerEventsRequest
+ Opts []grpc.CallOption
+ }
+ mock.lockPeerEvents.RLock()
+ calls = mock.calls.PeerEvents
+ mock.lockPeerEvents.RUnlock()
+ return calls
+}
+
+// PeerMinBlock calls PeerMinBlockFunc.
+func (mock *SentryClientMock) PeerMinBlock(ctx context.Context, in *PeerMinBlockRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ callInfo := struct {
+ Ctx context.Context
+ In *PeerMinBlockRequest
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ In: in,
+ Opts: opts,
+ }
+ mock.lockPeerMinBlock.Lock()
+ mock.calls.PeerMinBlock = append(mock.calls.PeerMinBlock, callInfo)
+ mock.lockPeerMinBlock.Unlock()
+ if mock.PeerMinBlockFunc == nil {
+ var (
+ emptyOut *emptypb.Empty
+ errOut error
+ )
+ return emptyOut, errOut
+ }
+ return mock.PeerMinBlockFunc(ctx, in, opts...)
+}
+
+// PeerMinBlockCalls gets all the calls that were made to PeerMinBlock.
+// Check the length with:
+//
+// len(mockedSentryClient.PeerMinBlockCalls())
+func (mock *SentryClientMock) PeerMinBlockCalls() []struct {
+ Ctx context.Context
+ In *PeerMinBlockRequest
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ In *PeerMinBlockRequest
+ Opts []grpc.CallOption
+ }
+ mock.lockPeerMinBlock.RLock()
+ calls = mock.calls.PeerMinBlock
+ mock.lockPeerMinBlock.RUnlock()
+ return calls
+}
+
+// Peers calls PeersFunc.
+func (mock *SentryClientMock) Peers(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PeersReply, error) {
+ callInfo := struct {
+ Ctx context.Context
+ In *emptypb.Empty
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ In: in,
+ Opts: opts,
+ }
+ mock.lockPeers.Lock()
+ mock.calls.Peers = append(mock.calls.Peers, callInfo)
+ mock.lockPeers.Unlock()
+ if mock.PeersFunc == nil {
+ var (
+ peersReplyOut *PeersReply
+ errOut error
+ )
+ return peersReplyOut, errOut
+ }
+ return mock.PeersFunc(ctx, in, opts...)
+}
+
+// PeersCalls gets all the calls that were made to Peers.
+// Check the length with:
+//
+// len(mockedSentryClient.PeersCalls())
+func (mock *SentryClientMock) PeersCalls() []struct {
+ Ctx context.Context
+ In *emptypb.Empty
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ In *emptypb.Empty
+ Opts []grpc.CallOption
+ }
+ mock.lockPeers.RLock()
+ calls = mock.calls.Peers
+ mock.lockPeers.RUnlock()
+ return calls
+}
+
+// PenalizePeer calls PenalizePeerFunc.
+func (mock *SentryClientMock) PenalizePeer(ctx context.Context, in *PenalizePeerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ callInfo := struct {
+ Ctx context.Context
+ In *PenalizePeerRequest
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ In: in,
+ Opts: opts,
+ }
+ mock.lockPenalizePeer.Lock()
+ mock.calls.PenalizePeer = append(mock.calls.PenalizePeer, callInfo)
+ mock.lockPenalizePeer.Unlock()
+ if mock.PenalizePeerFunc == nil {
+ var (
+ emptyOut *emptypb.Empty
+ errOut error
+ )
+ return emptyOut, errOut
+ }
+ return mock.PenalizePeerFunc(ctx, in, opts...)
+}
+
+// PenalizePeerCalls gets all the calls that were made to PenalizePeer.
+// Check the length with:
+//
+// len(mockedSentryClient.PenalizePeerCalls())
+func (mock *SentryClientMock) PenalizePeerCalls() []struct {
+ Ctx context.Context
+ In *PenalizePeerRequest
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ In *PenalizePeerRequest
+ Opts []grpc.CallOption
+ }
+ mock.lockPenalizePeer.RLock()
+ calls = mock.calls.PenalizePeer
+ mock.lockPenalizePeer.RUnlock()
+ return calls
+}
+
+// SendMessageById calls SendMessageByIdFunc.
+func (mock *SentryClientMock) SendMessageById(ctx context.Context, in *SendMessageByIdRequest, opts ...grpc.CallOption) (*SentPeers, error) {
+ callInfo := struct {
+ Ctx context.Context
+ In *SendMessageByIdRequest
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ In: in,
+ Opts: opts,
+ }
+ mock.lockSendMessageById.Lock()
+ mock.calls.SendMessageById = append(mock.calls.SendMessageById, callInfo)
+ mock.lockSendMessageById.Unlock()
+ if mock.SendMessageByIdFunc == nil {
+ var (
+ sentPeersOut *SentPeers
+ errOut error
+ )
+ return sentPeersOut, errOut
+ }
+ return mock.SendMessageByIdFunc(ctx, in, opts...)
+}
+
+// SendMessageByIdCalls gets all the calls that were made to SendMessageById.
+// Check the length with:
+//
+// len(mockedSentryClient.SendMessageByIdCalls())
+func (mock *SentryClientMock) SendMessageByIdCalls() []struct {
+ Ctx context.Context
+ In *SendMessageByIdRequest
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ In *SendMessageByIdRequest
+ Opts []grpc.CallOption
+ }
+ mock.lockSendMessageById.RLock()
+ calls = mock.calls.SendMessageById
+ mock.lockSendMessageById.RUnlock()
+ return calls
+}
+
+// SendMessageByMinBlock calls SendMessageByMinBlockFunc.
+func (mock *SentryClientMock) SendMessageByMinBlock(ctx context.Context, in *SendMessageByMinBlockRequest, opts ...grpc.CallOption) (*SentPeers, error) {
+ callInfo := struct {
+ Ctx context.Context
+ In *SendMessageByMinBlockRequest
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ In: in,
+ Opts: opts,
+ }
+ mock.lockSendMessageByMinBlock.Lock()
+ mock.calls.SendMessageByMinBlock = append(mock.calls.SendMessageByMinBlock, callInfo)
+ mock.lockSendMessageByMinBlock.Unlock()
+ if mock.SendMessageByMinBlockFunc == nil {
+ var (
+ sentPeersOut *SentPeers
+ errOut error
+ )
+ return sentPeersOut, errOut
+ }
+ return mock.SendMessageByMinBlockFunc(ctx, in, opts...)
+}
+
+// SendMessageByMinBlockCalls gets all the calls that were made to SendMessageByMinBlock.
+// Check the length with:
+//
+// len(mockedSentryClient.SendMessageByMinBlockCalls())
+func (mock *SentryClientMock) SendMessageByMinBlockCalls() []struct {
+ Ctx context.Context
+ In *SendMessageByMinBlockRequest
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ In *SendMessageByMinBlockRequest
+ Opts []grpc.CallOption
+ }
+ mock.lockSendMessageByMinBlock.RLock()
+ calls = mock.calls.SendMessageByMinBlock
+ mock.lockSendMessageByMinBlock.RUnlock()
+ return calls
+}
+
+// SendMessageToAll calls SendMessageToAllFunc.
+func (mock *SentryClientMock) SendMessageToAll(ctx context.Context, in *OutboundMessageData, opts ...grpc.CallOption) (*SentPeers, error) {
+ callInfo := struct {
+ Ctx context.Context
+ In *OutboundMessageData
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ In: in,
+ Opts: opts,
+ }
+ mock.lockSendMessageToAll.Lock()
+ mock.calls.SendMessageToAll = append(mock.calls.SendMessageToAll, callInfo)
+ mock.lockSendMessageToAll.Unlock()
+ if mock.SendMessageToAllFunc == nil {
+ var (
+ sentPeersOut *SentPeers
+ errOut error
+ )
+ return sentPeersOut, errOut
+ }
+ return mock.SendMessageToAllFunc(ctx, in, opts...)
+}
+
+// SendMessageToAllCalls gets all the calls that were made to SendMessageToAll.
+// Check the length with:
+//
+// len(mockedSentryClient.SendMessageToAllCalls())
+func (mock *SentryClientMock) SendMessageToAllCalls() []struct {
+ Ctx context.Context
+ In *OutboundMessageData
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ In *OutboundMessageData
+ Opts []grpc.CallOption
+ }
+ mock.lockSendMessageToAll.RLock()
+ calls = mock.calls.SendMessageToAll
+ mock.lockSendMessageToAll.RUnlock()
+ return calls
+}
+
+// SendMessageToRandomPeers calls SendMessageToRandomPeersFunc.
+func (mock *SentryClientMock) SendMessageToRandomPeers(ctx context.Context, in *SendMessageToRandomPeersRequest, opts ...grpc.CallOption) (*SentPeers, error) {
+ callInfo := struct {
+ Ctx context.Context
+ In *SendMessageToRandomPeersRequest
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ In: in,
+ Opts: opts,
+ }
+ mock.lockSendMessageToRandomPeers.Lock()
+ mock.calls.SendMessageToRandomPeers = append(mock.calls.SendMessageToRandomPeers, callInfo)
+ mock.lockSendMessageToRandomPeers.Unlock()
+ if mock.SendMessageToRandomPeersFunc == nil {
+ var (
+ sentPeersOut *SentPeers
+ errOut error
+ )
+ return sentPeersOut, errOut
+ }
+ return mock.SendMessageToRandomPeersFunc(ctx, in, opts...)
+}
+
+// SendMessageToRandomPeersCalls gets all the calls that were made to SendMessageToRandomPeers.
+// Check the length with:
+//
+// len(mockedSentryClient.SendMessageToRandomPeersCalls())
+func (mock *SentryClientMock) SendMessageToRandomPeersCalls() []struct {
+ Ctx context.Context
+ In *SendMessageToRandomPeersRequest
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ In *SendMessageToRandomPeersRequest
+ Opts []grpc.CallOption
+ }
+ mock.lockSendMessageToRandomPeers.RLock()
+ calls = mock.calls.SendMessageToRandomPeers
+ mock.lockSendMessageToRandomPeers.RUnlock()
+ return calls
+}
+
+// SetStatus calls SetStatusFunc.
+func (mock *SentryClientMock) SetStatus(ctx context.Context, in *StatusData, opts ...grpc.CallOption) (*SetStatusReply, error) {
+ callInfo := struct {
+ Ctx context.Context
+ In *StatusData
+ Opts []grpc.CallOption
+ }{
+ Ctx: ctx,
+ In: in,
+ Opts: opts,
+ }
+ mock.lockSetStatus.Lock()
+ mock.calls.SetStatus = append(mock.calls.SetStatus, callInfo)
+ mock.lockSetStatus.Unlock()
+ if mock.SetStatusFunc == nil {
+ var (
+ setStatusReplyOut *SetStatusReply
+ errOut error
+ )
+ return setStatusReplyOut, errOut
+ }
+ return mock.SetStatusFunc(ctx, in, opts...)
+}
+
+// SetStatusCalls gets all the calls that were made to SetStatus.
+// Check the length with:
+//
+// len(mockedSentryClient.SetStatusCalls())
+func (mock *SentryClientMock) SetStatusCalls() []struct {
+ Ctx context.Context
+ In *StatusData
+ Opts []grpc.CallOption
+} {
+ var calls []struct {
+ Ctx context.Context
+ In *StatusData
+ Opts []grpc.CallOption
+ }
+ mock.lockSetStatus.RLock()
+ calls = mock.calls.SetStatus
+ mock.lockSetStatus.RUnlock()
+ return calls
+}
diff --git a/erigon-lib/gointerfaces/sentry/sentry.pb.go b/erigon-lib/gointerfaces/sentry/sentry.pb.go
new file mode 100644
index 00000000000..87710f44292
--- /dev/null
+++ b/erigon-lib/gointerfaces/sentry/sentry.pb.go
@@ -0,0 +1,2177 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.31.0
+// protoc v4.24.2
+// source: p2psentry/sentry.proto
+
+package sentry
+
+import (
+ types "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type MessageId int32
+
+const (
+ MessageId_STATUS_65 MessageId = 0
+ MessageId_GET_BLOCK_HEADERS_65 MessageId = 1
+ MessageId_BLOCK_HEADERS_65 MessageId = 2
+ MessageId_BLOCK_HASHES_65 MessageId = 3
+ MessageId_GET_BLOCK_BODIES_65 MessageId = 4
+ MessageId_BLOCK_BODIES_65 MessageId = 5
+ MessageId_GET_NODE_DATA_65 MessageId = 6
+ MessageId_NODE_DATA_65 MessageId = 7
+ MessageId_GET_RECEIPTS_65 MessageId = 8
+ MessageId_RECEIPTS_65 MessageId = 9
+ MessageId_NEW_BLOCK_HASHES_65 MessageId = 10
+ MessageId_NEW_BLOCK_65 MessageId = 11
+ MessageId_TRANSACTIONS_65 MessageId = 12
+ MessageId_NEW_POOLED_TRANSACTION_HASHES_65 MessageId = 13
+ MessageId_GET_POOLED_TRANSACTIONS_65 MessageId = 14
+ MessageId_POOLED_TRANSACTIONS_65 MessageId = 15
+ // eth64 announcement messages (no id)
+ MessageId_STATUS_66 MessageId = 17
+ MessageId_NEW_BLOCK_HASHES_66 MessageId = 18
+ MessageId_NEW_BLOCK_66 MessageId = 19
+ MessageId_TRANSACTIONS_66 MessageId = 20
+ // eth65 announcement messages (no id)
+ MessageId_NEW_POOLED_TRANSACTION_HASHES_66 MessageId = 21
+ // eth66 messages with request-id
+ MessageId_GET_BLOCK_HEADERS_66 MessageId = 22
+ MessageId_GET_BLOCK_BODIES_66 MessageId = 23
+ MessageId_GET_NODE_DATA_66 MessageId = 24
+ MessageId_GET_RECEIPTS_66 MessageId = 25
+ MessageId_GET_POOLED_TRANSACTIONS_66 MessageId = 26
+ MessageId_BLOCK_HEADERS_66 MessageId = 27
+ MessageId_BLOCK_BODIES_66 MessageId = 28
+ MessageId_NODE_DATA_66 MessageId = 29
+ MessageId_RECEIPTS_66 MessageId = 30
+ MessageId_POOLED_TRANSACTIONS_66 MessageId = 31
+ // ======= eth 68 protocol ===========
+ MessageId_NEW_POOLED_TRANSACTION_HASHES_68 MessageId = 32
+)
+
+// Enum value maps for MessageId.
+var (
+ MessageId_name = map[int32]string{
+ 0: "STATUS_65",
+ 1: "GET_BLOCK_HEADERS_65",
+ 2: "BLOCK_HEADERS_65",
+ 3: "BLOCK_HASHES_65",
+ 4: "GET_BLOCK_BODIES_65",
+ 5: "BLOCK_BODIES_65",
+ 6: "GET_NODE_DATA_65",
+ 7: "NODE_DATA_65",
+ 8: "GET_RECEIPTS_65",
+ 9: "RECEIPTS_65",
+ 10: "NEW_BLOCK_HASHES_65",
+ 11: "NEW_BLOCK_65",
+ 12: "TRANSACTIONS_65",
+ 13: "NEW_POOLED_TRANSACTION_HASHES_65",
+ 14: "GET_POOLED_TRANSACTIONS_65",
+ 15: "POOLED_TRANSACTIONS_65",
+ 17: "STATUS_66",
+ 18: "NEW_BLOCK_HASHES_66",
+ 19: "NEW_BLOCK_66",
+ 20: "TRANSACTIONS_66",
+ 21: "NEW_POOLED_TRANSACTION_HASHES_66",
+ 22: "GET_BLOCK_HEADERS_66",
+ 23: "GET_BLOCK_BODIES_66",
+ 24: "GET_NODE_DATA_66",
+ 25: "GET_RECEIPTS_66",
+ 26: "GET_POOLED_TRANSACTIONS_66",
+ 27: "BLOCK_HEADERS_66",
+ 28: "BLOCK_BODIES_66",
+ 29: "NODE_DATA_66",
+ 30: "RECEIPTS_66",
+ 31: "POOLED_TRANSACTIONS_66",
+ 32: "NEW_POOLED_TRANSACTION_HASHES_68",
+ }
+ MessageId_value = map[string]int32{
+ "STATUS_65": 0,
+ "GET_BLOCK_HEADERS_65": 1,
+ "BLOCK_HEADERS_65": 2,
+ "BLOCK_HASHES_65": 3,
+ "GET_BLOCK_BODIES_65": 4,
+ "BLOCK_BODIES_65": 5,
+ "GET_NODE_DATA_65": 6,
+ "NODE_DATA_65": 7,
+ "GET_RECEIPTS_65": 8,
+ "RECEIPTS_65": 9,
+ "NEW_BLOCK_HASHES_65": 10,
+ "NEW_BLOCK_65": 11,
+ "TRANSACTIONS_65": 12,
+ "NEW_POOLED_TRANSACTION_HASHES_65": 13,
+ "GET_POOLED_TRANSACTIONS_65": 14,
+ "POOLED_TRANSACTIONS_65": 15,
+ "STATUS_66": 17,
+ "NEW_BLOCK_HASHES_66": 18,
+ "NEW_BLOCK_66": 19,
+ "TRANSACTIONS_66": 20,
+ "NEW_POOLED_TRANSACTION_HASHES_66": 21,
+ "GET_BLOCK_HEADERS_66": 22,
+ "GET_BLOCK_BODIES_66": 23,
+ "GET_NODE_DATA_66": 24,
+ "GET_RECEIPTS_66": 25,
+ "GET_POOLED_TRANSACTIONS_66": 26,
+ "BLOCK_HEADERS_66": 27,
+ "BLOCK_BODIES_66": 28,
+ "NODE_DATA_66": 29,
+ "RECEIPTS_66": 30,
+ "POOLED_TRANSACTIONS_66": 31,
+ "NEW_POOLED_TRANSACTION_HASHES_68": 32,
+ }
+)
+
+func (x MessageId) Enum() *MessageId {
+ p := new(MessageId)
+ *p = x
+ return p
+}
+
+func (x MessageId) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (MessageId) Descriptor() protoreflect.EnumDescriptor {
+ return file_p2psentry_sentry_proto_enumTypes[0].Descriptor()
+}
+
+func (MessageId) Type() protoreflect.EnumType {
+ return &file_p2psentry_sentry_proto_enumTypes[0]
+}
+
+func (x MessageId) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use MessageId.Descriptor instead.
+func (MessageId) EnumDescriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{0}
+}
+
+type PenaltyKind int32
+
+const (
+ PenaltyKind_Kick PenaltyKind = 0
+)
+
+// Enum value maps for PenaltyKind.
+var (
+ PenaltyKind_name = map[int32]string{
+ 0: "Kick",
+ }
+ PenaltyKind_value = map[string]int32{
+ "Kick": 0,
+ }
+)
+
+func (x PenaltyKind) Enum() *PenaltyKind {
+ p := new(PenaltyKind)
+ *p = x
+ return p
+}
+
+func (x PenaltyKind) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (PenaltyKind) Descriptor() protoreflect.EnumDescriptor {
+ return file_p2psentry_sentry_proto_enumTypes[1].Descriptor()
+}
+
+func (PenaltyKind) Type() protoreflect.EnumType {
+ return &file_p2psentry_sentry_proto_enumTypes[1]
+}
+
+func (x PenaltyKind) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use PenaltyKind.Descriptor instead.
+func (PenaltyKind) EnumDescriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{1}
+}
+
+type Protocol int32
+
+const (
+ Protocol_ETH65 Protocol = 0
+ Protocol_ETH66 Protocol = 1
+ Protocol_ETH67 Protocol = 2
+ Protocol_ETH68 Protocol = 3
+)
+
+// Enum value maps for Protocol.
+var (
+ Protocol_name = map[int32]string{
+ 0: "ETH65",
+ 1: "ETH66",
+ 2: "ETH67",
+ 3: "ETH68",
+ }
+ Protocol_value = map[string]int32{
+ "ETH65": 0,
+ "ETH66": 1,
+ "ETH67": 2,
+ "ETH68": 3,
+ }
+)
+
+func (x Protocol) Enum() *Protocol {
+ p := new(Protocol)
+ *p = x
+ return p
+}
+
+func (x Protocol) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Protocol) Descriptor() protoreflect.EnumDescriptor {
+ return file_p2psentry_sentry_proto_enumTypes[2].Descriptor()
+}
+
+func (Protocol) Type() protoreflect.EnumType {
+ return &file_p2psentry_sentry_proto_enumTypes[2]
+}
+
+func (x Protocol) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Protocol.Descriptor instead.
+func (Protocol) EnumDescriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{2}
+}
+
+type PeerEvent_PeerEventId int32
+
+const (
+ // Happens after after a successful sub-protocol handshake.
+ PeerEvent_Connect PeerEvent_PeerEventId = 0
+ PeerEvent_Disconnect PeerEvent_PeerEventId = 1
+)
+
+// Enum value maps for PeerEvent_PeerEventId.
+var (
+ PeerEvent_PeerEventId_name = map[int32]string{
+ 0: "Connect",
+ 1: "Disconnect",
+ }
+ PeerEvent_PeerEventId_value = map[string]int32{
+ "Connect": 0,
+ "Disconnect": 1,
+ }
+)
+
+func (x PeerEvent_PeerEventId) Enum() *PeerEvent_PeerEventId {
+ p := new(PeerEvent_PeerEventId)
+ *p = x
+ return p
+}
+
+func (x PeerEvent_PeerEventId) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (PeerEvent_PeerEventId) Descriptor() protoreflect.EnumDescriptor {
+ return file_p2psentry_sentry_proto_enumTypes[3].Descriptor()
+}
+
+func (PeerEvent_PeerEventId) Type() protoreflect.EnumType {
+ return &file_p2psentry_sentry_proto_enumTypes[3]
+}
+
+func (x PeerEvent_PeerEventId) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use PeerEvent_PeerEventId.Descriptor instead.
+func (PeerEvent_PeerEventId) EnumDescriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{21, 0}
+}
+
+type OutboundMessageData struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id MessageId `protobuf:"varint,1,opt,name=id,proto3,enum=sentry.MessageId" json:"id,omitempty"`
+ Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+}
+
+func (x *OutboundMessageData) Reset() {
+ *x = OutboundMessageData{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentry_sentry_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *OutboundMessageData) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*OutboundMessageData) ProtoMessage() {}
+
+func (x *OutboundMessageData) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentry_sentry_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use OutboundMessageData.ProtoReflect.Descriptor instead.
+func (*OutboundMessageData) Descriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *OutboundMessageData) GetId() MessageId {
+ if x != nil {
+ return x.Id
+ }
+ return MessageId_STATUS_65
+}
+
+func (x *OutboundMessageData) GetData() []byte {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+type SendMessageByMinBlockRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Data *OutboundMessageData `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
+ MinBlock uint64 `protobuf:"varint,2,opt,name=min_block,json=minBlock,proto3" json:"min_block,omitempty"`
+ MaxPeers uint64 `protobuf:"varint,3,opt,name=max_peers,json=maxPeers,proto3" json:"max_peers,omitempty"`
+}
+
+func (x *SendMessageByMinBlockRequest) Reset() {
+ *x = SendMessageByMinBlockRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentry_sentry_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SendMessageByMinBlockRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SendMessageByMinBlockRequest) ProtoMessage() {}
+
+func (x *SendMessageByMinBlockRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentry_sentry_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SendMessageByMinBlockRequest.ProtoReflect.Descriptor instead.
+func (*SendMessageByMinBlockRequest) Descriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *SendMessageByMinBlockRequest) GetData() *OutboundMessageData {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+func (x *SendMessageByMinBlockRequest) GetMinBlock() uint64 {
+ if x != nil {
+ return x.MinBlock
+ }
+ return 0
+}
+
+func (x *SendMessageByMinBlockRequest) GetMaxPeers() uint64 {
+ if x != nil {
+ return x.MaxPeers
+ }
+ return 0
+}
+
+type SendMessageByIdRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Data *OutboundMessageData `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
+ PeerId *types.H512 `protobuf:"bytes,2,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"`
+}
+
+func (x *SendMessageByIdRequest) Reset() {
+ *x = SendMessageByIdRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentry_sentry_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SendMessageByIdRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SendMessageByIdRequest) ProtoMessage() {}
+
+func (x *SendMessageByIdRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentry_sentry_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SendMessageByIdRequest.ProtoReflect.Descriptor instead.
+func (*SendMessageByIdRequest) Descriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *SendMessageByIdRequest) GetData() *OutboundMessageData {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+func (x *SendMessageByIdRequest) GetPeerId() *types.H512 {
+ if x != nil {
+ return x.PeerId
+ }
+ return nil
+}
+
+type SendMessageToRandomPeersRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Data *OutboundMessageData `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
+ MaxPeers uint64 `protobuf:"varint,2,opt,name=max_peers,json=maxPeers,proto3" json:"max_peers,omitempty"`
+}
+
+func (x *SendMessageToRandomPeersRequest) Reset() {
+ *x = SendMessageToRandomPeersRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentry_sentry_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SendMessageToRandomPeersRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SendMessageToRandomPeersRequest) ProtoMessage() {}
+
+func (x *SendMessageToRandomPeersRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentry_sentry_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SendMessageToRandomPeersRequest.ProtoReflect.Descriptor instead.
+func (*SendMessageToRandomPeersRequest) Descriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *SendMessageToRandomPeersRequest) GetData() *OutboundMessageData {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+func (x *SendMessageToRandomPeersRequest) GetMaxPeers() uint64 {
+ if x != nil {
+ return x.MaxPeers
+ }
+ return 0
+}
+
+type SentPeers struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Peers []*types.H512 `protobuf:"bytes,1,rep,name=peers,proto3" json:"peers,omitempty"`
+}
+
+func (x *SentPeers) Reset() {
+ *x = SentPeers{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentry_sentry_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SentPeers) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SentPeers) ProtoMessage() {}
+
+func (x *SentPeers) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentry_sentry_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SentPeers.ProtoReflect.Descriptor instead.
+func (*SentPeers) Descriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *SentPeers) GetPeers() []*types.H512 {
+ if x != nil {
+ return x.Peers
+ }
+ return nil
+}
+
+type PenalizePeerRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ PeerId *types.H512 `protobuf:"bytes,1,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"`
+ Penalty PenaltyKind `protobuf:"varint,2,opt,name=penalty,proto3,enum=sentry.PenaltyKind" json:"penalty,omitempty"`
+}
+
+func (x *PenalizePeerRequest) Reset() {
+ *x = PenalizePeerRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentry_sentry_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PenalizePeerRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PenalizePeerRequest) ProtoMessage() {}
+
+func (x *PenalizePeerRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentry_sentry_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PenalizePeerRequest.ProtoReflect.Descriptor instead.
+func (*PenalizePeerRequest) Descriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *PenalizePeerRequest) GetPeerId() *types.H512 {
+ if x != nil {
+ return x.PeerId
+ }
+ return nil
+}
+
+func (x *PenalizePeerRequest) GetPenalty() PenaltyKind {
+ if x != nil {
+ return x.Penalty
+ }
+ return PenaltyKind_Kick
+}
+
+type PeerMinBlockRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ PeerId *types.H512 `protobuf:"bytes,1,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"`
+ MinBlock uint64 `protobuf:"varint,2,opt,name=min_block,json=minBlock,proto3" json:"min_block,omitempty"`
+}
+
+func (x *PeerMinBlockRequest) Reset() {
+ *x = PeerMinBlockRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentry_sentry_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PeerMinBlockRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PeerMinBlockRequest) ProtoMessage() {}
+
+func (x *PeerMinBlockRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentry_sentry_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PeerMinBlockRequest.ProtoReflect.Descriptor instead.
+func (*PeerMinBlockRequest) Descriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *PeerMinBlockRequest) GetPeerId() *types.H512 {
+ if x != nil {
+ return x.PeerId
+ }
+ return nil
+}
+
+func (x *PeerMinBlockRequest) GetMinBlock() uint64 {
+ if x != nil {
+ return x.MinBlock
+ }
+ return 0
+}
+
+type AddPeerRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"`
+}
+
+func (x *AddPeerRequest) Reset() {
+ *x = AddPeerRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentry_sentry_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddPeerRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddPeerRequest) ProtoMessage() {}
+
+func (x *AddPeerRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentry_sentry_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddPeerRequest.ProtoReflect.Descriptor instead.
+func (*AddPeerRequest) Descriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *AddPeerRequest) GetUrl() string {
+ if x != nil {
+ return x.Url
+ }
+ return ""
+}
+
+type InboundMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id MessageId `protobuf:"varint,1,opt,name=id,proto3,enum=sentry.MessageId" json:"id,omitempty"`
+ Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+ PeerId *types.H512 `protobuf:"bytes,3,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"`
+}
+
+func (x *InboundMessage) Reset() {
+ *x = InboundMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentry_sentry_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *InboundMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*InboundMessage) ProtoMessage() {}
+
+func (x *InboundMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentry_sentry_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use InboundMessage.ProtoReflect.Descriptor instead.
+func (*InboundMessage) Descriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *InboundMessage) GetId() MessageId {
+ if x != nil {
+ return x.Id
+ }
+ return MessageId_STATUS_65
+}
+
+func (x *InboundMessage) GetData() []byte {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+func (x *InboundMessage) GetPeerId() *types.H512 {
+ if x != nil {
+ return x.PeerId
+ }
+ return nil
+}
+
+type Forks struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Genesis *types.H256 `protobuf:"bytes,1,opt,name=genesis,proto3" json:"genesis,omitempty"`
+ HeightForks []uint64 `protobuf:"varint,2,rep,packed,name=height_forks,json=heightForks,proto3" json:"height_forks,omitempty"`
+ TimeForks []uint64 `protobuf:"varint,3,rep,packed,name=time_forks,json=timeForks,proto3" json:"time_forks,omitempty"`
+}
+
+func (x *Forks) Reset() {
+ *x = Forks{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentry_sentry_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Forks) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Forks) ProtoMessage() {}
+
+func (x *Forks) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentry_sentry_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Forks.ProtoReflect.Descriptor instead.
+func (*Forks) Descriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *Forks) GetGenesis() *types.H256 {
+ if x != nil {
+ return x.Genesis
+ }
+ return nil
+}
+
+func (x *Forks) GetHeightForks() []uint64 {
+ if x != nil {
+ return x.HeightForks
+ }
+ return nil
+}
+
+func (x *Forks) GetTimeForks() []uint64 {
+ if x != nil {
+ return x.TimeForks
+ }
+ return nil
+}
+
+type StatusData struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ NetworkId uint64 `protobuf:"varint,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"`
+ TotalDifficulty *types.H256 `protobuf:"bytes,2,opt,name=total_difficulty,json=totalDifficulty,proto3" json:"total_difficulty,omitempty"`
+ BestHash *types.H256 `protobuf:"bytes,3,opt,name=best_hash,json=bestHash,proto3" json:"best_hash,omitempty"`
+ ForkData *Forks `protobuf:"bytes,4,opt,name=fork_data,json=forkData,proto3" json:"fork_data,omitempty"`
+ MaxBlockHeight uint64 `protobuf:"varint,5,opt,name=max_block_height,json=maxBlockHeight,proto3" json:"max_block_height,omitempty"`
+ MaxBlockTime uint64 `protobuf:"varint,6,opt,name=max_block_time,json=maxBlockTime,proto3" json:"max_block_time,omitempty"`
+}
+
+func (x *StatusData) Reset() {
+ *x = StatusData{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentry_sentry_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StatusData) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusData) ProtoMessage() {}
+
+func (x *StatusData) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentry_sentry_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusData.ProtoReflect.Descriptor instead.
+func (*StatusData) Descriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *StatusData) GetNetworkId() uint64 {
+ if x != nil {
+ return x.NetworkId
+ }
+ return 0
+}
+
+func (x *StatusData) GetTotalDifficulty() *types.H256 {
+ if x != nil {
+ return x.TotalDifficulty
+ }
+ return nil
+}
+
+func (x *StatusData) GetBestHash() *types.H256 {
+ if x != nil {
+ return x.BestHash
+ }
+ return nil
+}
+
+func (x *StatusData) GetForkData() *Forks {
+ if x != nil {
+ return x.ForkData
+ }
+ return nil
+}
+
+func (x *StatusData) GetMaxBlockHeight() uint64 {
+ if x != nil {
+ return x.MaxBlockHeight
+ }
+ return 0
+}
+
+func (x *StatusData) GetMaxBlockTime() uint64 {
+ if x != nil {
+ return x.MaxBlockTime
+ }
+ return 0
+}
+
+type SetStatusReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *SetStatusReply) Reset() {
+ *x = SetStatusReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentry_sentry_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SetStatusReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SetStatusReply) ProtoMessage() {}
+
+func (x *SetStatusReply) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentry_sentry_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SetStatusReply.ProtoReflect.Descriptor instead.
+func (*SetStatusReply) Descriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{11}
+}
+
+type HandShakeReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Protocol Protocol `protobuf:"varint,1,opt,name=protocol,proto3,enum=sentry.Protocol" json:"protocol,omitempty"`
+}
+
+func (x *HandShakeReply) Reset() {
+ *x = HandShakeReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentry_sentry_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HandShakeReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HandShakeReply) ProtoMessage() {}
+
+func (x *HandShakeReply) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentry_sentry_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HandShakeReply.ProtoReflect.Descriptor instead.
+func (*HandShakeReply) Descriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *HandShakeReply) GetProtocol() Protocol {
+ if x != nil {
+ return x.Protocol
+ }
+ return Protocol_ETH65
+}
+
+type MessagesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ids []MessageId `protobuf:"varint,1,rep,packed,name=ids,proto3,enum=sentry.MessageId" json:"ids,omitempty"`
+}
+
+func (x *MessagesRequest) Reset() {
+ *x = MessagesRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentry_sentry_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MessagesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MessagesRequest) ProtoMessage() {}
+
+func (x *MessagesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentry_sentry_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MessagesRequest.ProtoReflect.Descriptor instead.
+func (*MessagesRequest) Descriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *MessagesRequest) GetIds() []MessageId {
+ if x != nil {
+ return x.Ids
+ }
+ return nil
+}
+
+type PeersReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Peers []*types.PeerInfo `protobuf:"bytes,1,rep,name=peers,proto3" json:"peers,omitempty"`
+}
+
+func (x *PeersReply) Reset() {
+ *x = PeersReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentry_sentry_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PeersReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PeersReply) ProtoMessage() {}
+
+func (x *PeersReply) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentry_sentry_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PeersReply.ProtoReflect.Descriptor instead.
+func (*PeersReply) Descriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *PeersReply) GetPeers() []*types.PeerInfo {
+ if x != nil {
+ return x.Peers
+ }
+ return nil
+}
+
+type PeerCountRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *PeerCountRequest) Reset() {
+ *x = PeerCountRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentry_sentry_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PeerCountRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PeerCountRequest) ProtoMessage() {}
+
+func (x *PeerCountRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentry_sentry_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PeerCountRequest.ProtoReflect.Descriptor instead.
+func (*PeerCountRequest) Descriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{15}
+}
+
+type PeerCountPerProtocol struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Protocol Protocol `protobuf:"varint,1,opt,name=protocol,proto3,enum=sentry.Protocol" json:"protocol,omitempty"`
+ Count uint64 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"`
+}
+
+func (x *PeerCountPerProtocol) Reset() {
+ *x = PeerCountPerProtocol{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentry_sentry_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PeerCountPerProtocol) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PeerCountPerProtocol) ProtoMessage() {}
+
+func (x *PeerCountPerProtocol) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentry_sentry_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PeerCountPerProtocol.ProtoReflect.Descriptor instead.
+func (*PeerCountPerProtocol) Descriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{16}
+}
+
+func (x *PeerCountPerProtocol) GetProtocol() Protocol {
+ if x != nil {
+ return x.Protocol
+ }
+ return Protocol_ETH65
+}
+
+func (x *PeerCountPerProtocol) GetCount() uint64 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+
+type PeerCountReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Count uint64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
+ CountsPerProtocol []*PeerCountPerProtocol `protobuf:"bytes,2,rep,name=counts_per_protocol,json=countsPerProtocol,proto3" json:"counts_per_protocol,omitempty"`
+}
+
+func (x *PeerCountReply) Reset() {
+ *x = PeerCountReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentry_sentry_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PeerCountReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PeerCountReply) ProtoMessage() {}
+
+func (x *PeerCountReply) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentry_sentry_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PeerCountReply.ProtoReflect.Descriptor instead.
+func (*PeerCountReply) Descriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{17}
+}
+
+func (x *PeerCountReply) GetCount() uint64 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+
+func (x *PeerCountReply) GetCountsPerProtocol() []*PeerCountPerProtocol {
+ if x != nil {
+ return x.CountsPerProtocol
+ }
+ return nil
+}
+
+type PeerByIdRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ PeerId *types.H512 `protobuf:"bytes,1,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"`
+}
+
+func (x *PeerByIdRequest) Reset() {
+ *x = PeerByIdRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentry_sentry_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PeerByIdRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PeerByIdRequest) ProtoMessage() {}
+
+func (x *PeerByIdRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentry_sentry_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PeerByIdRequest.ProtoReflect.Descriptor instead.
+func (*PeerByIdRequest) Descriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{18}
+}
+
+func (x *PeerByIdRequest) GetPeerId() *types.H512 {
+ if x != nil {
+ return x.PeerId
+ }
+ return nil
+}
+
+type PeerByIdReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Peer *types.PeerInfo `protobuf:"bytes,1,opt,name=peer,proto3,oneof" json:"peer,omitempty"`
+}
+
+func (x *PeerByIdReply) Reset() {
+ *x = PeerByIdReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentry_sentry_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PeerByIdReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PeerByIdReply) ProtoMessage() {}
+
+func (x *PeerByIdReply) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentry_sentry_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PeerByIdReply.ProtoReflect.Descriptor instead.
+func (*PeerByIdReply) Descriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{19}
+}
+
+func (x *PeerByIdReply) GetPeer() *types.PeerInfo {
+ if x != nil {
+ return x.Peer
+ }
+ return nil
+}
+
+type PeerEventsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *PeerEventsRequest) Reset() {
+ *x = PeerEventsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentry_sentry_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PeerEventsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PeerEventsRequest) ProtoMessage() {}
+
+func (x *PeerEventsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentry_sentry_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PeerEventsRequest.ProtoReflect.Descriptor instead.
+func (*PeerEventsRequest) Descriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{20}
+}
+
+type PeerEvent struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ PeerId *types.H512 `protobuf:"bytes,1,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"`
+ EventId PeerEvent_PeerEventId `protobuf:"varint,2,opt,name=event_id,json=eventId,proto3,enum=sentry.PeerEvent_PeerEventId" json:"event_id,omitempty"`
+}
+
+func (x *PeerEvent) Reset() {
+ *x = PeerEvent{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentry_sentry_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PeerEvent) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PeerEvent) ProtoMessage() {}
+
+func (x *PeerEvent) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentry_sentry_proto_msgTypes[21]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PeerEvent.ProtoReflect.Descriptor instead.
+func (*PeerEvent) Descriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{21}
+}
+
+func (x *PeerEvent) GetPeerId() *types.H512 {
+ if x != nil {
+ return x.PeerId
+ }
+ return nil
+}
+
+func (x *PeerEvent) GetEventId() PeerEvent_PeerEventId {
+ if x != nil {
+ return x.EventId
+ }
+ return PeerEvent_Connect
+}
+
+type AddPeerReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"`
+}
+
+func (x *AddPeerReply) Reset() {
+ *x = AddPeerReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_p2psentry_sentry_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddPeerReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddPeerReply) ProtoMessage() {}
+
+func (x *AddPeerReply) ProtoReflect() protoreflect.Message {
+ mi := &file_p2psentry_sentry_proto_msgTypes[22]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddPeerReply.ProtoReflect.Descriptor instead.
+func (*AddPeerReply) Descriptor() ([]byte, []int) {
+ return file_p2psentry_sentry_proto_rawDescGZIP(), []int{22}
+}
+
+func (x *AddPeerReply) GetSuccess() bool {
+ if x != nil {
+ return x.Success
+ }
+ return false
+}
+
+var File_p2psentry_sentry_proto protoreflect.FileDescriptor
+
+var file_p2psentry_sentry_proto_rawDesc = []byte{
+ 0x0a, 0x16, 0x70, 0x32, 0x70, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2f, 0x73, 0x65, 0x6e, 0x74,
+ 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79,
+ 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x11, 0x74,
+ 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0x4c, 0x0a, 0x13, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61,
+ 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x89,
+ 0x01, 0x0a, 0x1c, 0x53, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79,
+ 0x4d, 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x2f, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e,
+ 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x4d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61,
+ 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1b, 0x0a,
+ 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x08, 0x6d, 0x61, 0x78, 0x50, 0x65, 0x65, 0x72, 0x73, 0x22, 0x6f, 0x0a, 0x16, 0x53, 0x65,
+ 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x49, 0x64, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4f, 0x75, 0x74, 0x62,
+ 0x6f, 0x75, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52,
+ 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x24, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48,
+ 0x35, 0x31, 0x32, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x22, 0x6f, 0x0a, 0x1f, 0x53,
+ 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x52, 0x61, 0x6e, 0x64,
+ 0x6f, 0x6d, 0x50, 0x65, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f,
+ 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x73,
+ 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x4d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12,
+ 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x50, 0x65, 0x65, 0x72, 0x73, 0x22, 0x2e, 0x0a, 0x09,
+ 0x53, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x21, 0x0a, 0x05, 0x70, 0x65, 0x65,
+ 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73,
+ 0x2e, 0x48, 0x35, 0x31, 0x32, 0x52, 0x05, 0x70, 0x65, 0x65, 0x72, 0x73, 0x22, 0x6a, 0x0a, 0x13,
+ 0x50, 0x65, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x35, 0x31,
+ 0x32, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x2d, 0x0a, 0x07, 0x70, 0x65, 0x6e,
+ 0x61, 0x6c, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x73, 0x65, 0x6e,
+ 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x4b, 0x69, 0x6e, 0x64, 0x52,
+ 0x07, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x22, 0x58, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72,
+ 0x4d, 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x24, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x35, 0x31, 0x32, 0x52, 0x06, 0x70,
+ 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x62, 0x6c, 0x6f,
+ 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x42, 0x6c, 0x6f,
+ 0x63, 0x6b, 0x22, 0x22, 0x0a, 0x0e, 0x41, 0x64, 0x64, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x6d, 0x0a, 0x0e, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e,
+ 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x21, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64,
+ 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12,
+ 0x24, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x35, 0x31, 0x32, 0x52, 0x06, 0x70,
+ 0x65, 0x65, 0x72, 0x49, 0x64, 0x22, 0x70, 0x0a, 0x05, 0x46, 0x6f, 0x72, 0x6b, 0x73, 0x12, 0x25,
+ 0x0a, 0x07, 0x67, 0x65, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x07, 0x67, 0x65,
+ 0x6e, 0x65, 0x73, 0x69, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f,
+ 0x66, 0x6f, 0x72, 0x6b, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x04, 0x52, 0x0b, 0x68, 0x65, 0x69,
+ 0x67, 0x68, 0x74, 0x46, 0x6f, 0x72, 0x6b, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65,
+ 0x5f, 0x66, 0x6f, 0x72, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69,
+ 0x6d, 0x65, 0x46, 0x6f, 0x72, 0x6b, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72,
+ 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6e, 0x65, 0x74, 0x77,
+ 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x64,
+ 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0f, 0x74, 0x6f,
+ 0x74, 0x61, 0x6c, 0x44, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x12, 0x28, 0x0a,
+ 0x09, 0x62, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x08, 0x62,
+ 0x65, 0x73, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x2a, 0x0a, 0x09, 0x66, 0x6f, 0x72, 0x6b, 0x5f,
+ 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x73, 0x65, 0x6e,
+ 0x74, 0x72, 0x79, 0x2e, 0x46, 0x6f, 0x72, 0x6b, 0x73, 0x52, 0x08, 0x66, 0x6f, 0x72, 0x6b, 0x44,
+ 0x61, 0x74, 0x61, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b,
+ 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6d,
+ 0x61, 0x78, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x24, 0x0a,
+ 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x54,
+ 0x69, 0x6d, 0x65, 0x22, 0x10, 0x0a, 0x0e, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x3e, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x53, 0x68, 0x61,
+ 0x6b, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2c, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x73, 0x65, 0x6e, 0x74,
+ 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x36, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0x33, 0x0a,
+ 0x0a, 0x50, 0x65, 0x65, 0x72, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x70,
+ 0x65, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x73, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x70, 0x65, 0x65,
+ 0x72, 0x73, 0x22, 0x12, 0x0a, 0x10, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x5a, 0x0a, 0x14, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f,
+ 0x75, 0x6e, 0x74, 0x50, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x2c,
+ 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e,
+ 0x32, 0x10, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x14, 0x0a, 0x05,
+ 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75,
+ 0x6e, 0x74, 0x22, 0x74, 0x0a, 0x0e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52,
+ 0x65, 0x70, 0x6c, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x4c, 0x0a, 0x13, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f,
+ 0x6c, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79,
+ 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x65, 0x72, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x11, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x50, 0x65, 0x72,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x37, 0x0a, 0x0f, 0x50, 0x65, 0x65, 0x72,
+ 0x42, 0x79, 0x49, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x07, 0x70,
+ 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x35, 0x31, 0x32, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49,
+ 0x64, 0x22, 0x42, 0x0a, 0x0d, 0x50, 0x65, 0x65, 0x72, 0x42, 0x79, 0x49, 0x64, 0x52, 0x65, 0x70,
+ 0x6c, 0x79, 0x12, 0x28, 0x0a, 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x0f, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66,
+ 0x6f, 0x48, 0x00, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x88, 0x01, 0x01, 0x42, 0x07, 0x0a, 0x05,
+ 0x5f, 0x70, 0x65, 0x65, 0x72, 0x22, 0x13, 0x0a, 0x11, 0x50, 0x65, 0x65, 0x72, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x97, 0x01, 0x0a, 0x09, 0x50,
+ 0x65, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72,
+ 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2e, 0x48, 0x35, 0x31, 0x32, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x38,
+ 0x0a, 0x08, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e,
+ 0x32, 0x1d, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x45, 0x76,
+ 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x52,
+ 0x07, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x22, 0x2a, 0x0a, 0x0b, 0x50, 0x65, 0x65, 0x72,
+ 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x6e, 0x65,
+ 0x63, 0x74, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65,
+ 0x63, 0x74, 0x10, 0x01, 0x22, 0x28, 0x0a, 0x0c, 0x41, 0x64, 0x64, 0x50, 0x65, 0x65, 0x72, 0x52,
+ 0x65, 0x70, 0x6c, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x2a, 0x80,
+ 0x06, 0x0a, 0x09, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x0d, 0x0a, 0x09,
+ 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x47,
+ 0x45, 0x54, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x53,
+ 0x5f, 0x36, 0x35, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x48,
+ 0x45, 0x41, 0x44, 0x45, 0x52, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x42,
+ 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x45, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x03,
+ 0x12, 0x17, 0x0a, 0x13, 0x47, 0x45, 0x54, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x42, 0x4f,
+ 0x44, 0x49, 0x45, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x42, 0x4c, 0x4f,
+ 0x43, 0x4b, 0x5f, 0x42, 0x4f, 0x44, 0x49, 0x45, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x05, 0x12, 0x14,
+ 0x0a, 0x10, 0x47, 0x45, 0x54, 0x5f, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f,
+ 0x36, 0x35, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x44, 0x41, 0x54,
+ 0x41, 0x5f, 0x36, 0x35, 0x10, 0x07, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x45, 0x54, 0x5f, 0x52, 0x45,
+ 0x43, 0x45, 0x49, 0x50, 0x54, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x52,
+ 0x45, 0x43, 0x45, 0x49, 0x50, 0x54, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x09, 0x12, 0x17, 0x0a, 0x13,
+ 0x4e, 0x45, 0x57, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x45, 0x53,
+ 0x5f, 0x36, 0x35, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x4e, 0x45, 0x57, 0x5f, 0x42, 0x4c, 0x4f,
+ 0x43, 0x4b, 0x5f, 0x36, 0x35, 0x10, 0x0b, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x52, 0x41, 0x4e, 0x53,
+ 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x0c, 0x12, 0x24, 0x0a, 0x20,
+ 0x4e, 0x45, 0x57, 0x5f, 0x50, 0x4f, 0x4f, 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53,
+ 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x45, 0x53, 0x5f, 0x36, 0x35,
+ 0x10, 0x0d, 0x12, 0x1e, 0x0a, 0x1a, 0x47, 0x45, 0x54, 0x5f, 0x50, 0x4f, 0x4f, 0x4c, 0x45, 0x44,
+ 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x36, 0x35,
+ 0x10, 0x0e, 0x12, 0x1a, 0x0a, 0x16, 0x50, 0x4f, 0x4f, 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x52, 0x41,
+ 0x4e, 0x53, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x36, 0x35, 0x10, 0x0f, 0x12, 0x0d,
+ 0x0a, 0x09, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x36, 0x36, 0x10, 0x11, 0x12, 0x17, 0x0a,
+ 0x13, 0x4e, 0x45, 0x57, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x45,
+ 0x53, 0x5f, 0x36, 0x36, 0x10, 0x12, 0x12, 0x10, 0x0a, 0x0c, 0x4e, 0x45, 0x57, 0x5f, 0x42, 0x4c,
+ 0x4f, 0x43, 0x4b, 0x5f, 0x36, 0x36, 0x10, 0x13, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x52, 0x41, 0x4e,
+ 0x53, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x36, 0x36, 0x10, 0x14, 0x12, 0x24, 0x0a,
+ 0x20, 0x4e, 0x45, 0x57, 0x5f, 0x50, 0x4f, 0x4f, 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x52, 0x41, 0x4e,
+ 0x53, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x45, 0x53, 0x5f, 0x36,
+ 0x36, 0x10, 0x15, 0x12, 0x18, 0x0a, 0x14, 0x47, 0x45, 0x54, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b,
+ 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x53, 0x5f, 0x36, 0x36, 0x10, 0x16, 0x12, 0x17, 0x0a,
+ 0x13, 0x47, 0x45, 0x54, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x42, 0x4f, 0x44, 0x49, 0x45,
+ 0x53, 0x5f, 0x36, 0x36, 0x10, 0x17, 0x12, 0x14, 0x0a, 0x10, 0x47, 0x45, 0x54, 0x5f, 0x4e, 0x4f,
+ 0x44, 0x45, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x36, 0x36, 0x10, 0x18, 0x12, 0x13, 0x0a, 0x0f,
+ 0x47, 0x45, 0x54, 0x5f, 0x52, 0x45, 0x43, 0x45, 0x49, 0x50, 0x54, 0x53, 0x5f, 0x36, 0x36, 0x10,
+ 0x19, 0x12, 0x1e, 0x0a, 0x1a, 0x47, 0x45, 0x54, 0x5f, 0x50, 0x4f, 0x4f, 0x4c, 0x45, 0x44, 0x5f,
+ 0x54, 0x52, 0x41, 0x4e, 0x53, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x36, 0x36, 0x10,
+ 0x1a, 0x12, 0x14, 0x0a, 0x10, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45,
+ 0x52, 0x53, 0x5f, 0x36, 0x36, 0x10, 0x1b, 0x12, 0x13, 0x0a, 0x0f, 0x42, 0x4c, 0x4f, 0x43, 0x4b,
+ 0x5f, 0x42, 0x4f, 0x44, 0x49, 0x45, 0x53, 0x5f, 0x36, 0x36, 0x10, 0x1c, 0x12, 0x10, 0x0a, 0x0c,
+ 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x36, 0x36, 0x10, 0x1d, 0x12, 0x0f,
+ 0x0a, 0x0b, 0x52, 0x45, 0x43, 0x45, 0x49, 0x50, 0x54, 0x53, 0x5f, 0x36, 0x36, 0x10, 0x1e, 0x12,
+ 0x1a, 0x0a, 0x16, 0x50, 0x4f, 0x4f, 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x41,
+ 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x36, 0x36, 0x10, 0x1f, 0x12, 0x24, 0x0a, 0x20, 0x4e,
+ 0x45, 0x57, 0x5f, 0x50, 0x4f, 0x4f, 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x41,
+ 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x45, 0x53, 0x5f, 0x36, 0x38, 0x10,
+ 0x20, 0x2a, 0x17, 0x0a, 0x0b, 0x50, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x4b, 0x69, 0x6e, 0x64,
+ 0x12, 0x08, 0x0a, 0x04, 0x4b, 0x69, 0x63, 0x6b, 0x10, 0x00, 0x2a, 0x36, 0x0a, 0x08, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x54, 0x48, 0x36, 0x35, 0x10,
+ 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x54, 0x48, 0x36, 0x36, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05,
+ 0x45, 0x54, 0x48, 0x36, 0x37, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x54, 0x48, 0x36, 0x38,
+ 0x10, 0x03, 0x32, 0xdc, 0x07, 0x0a, 0x06, 0x53, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x37, 0x0a,
+ 0x09, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x2e, 0x73, 0x65, 0x6e,
+ 0x74, 0x72, 0x79, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x61, 0x74, 0x61, 0x1a, 0x16,
+ 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x43, 0x0a, 0x0c, 0x50, 0x65, 0x6e, 0x61, 0x6c, 0x69,
+ 0x7a, 0x65, 0x50, 0x65, 0x65, 0x72, 0x12, 0x1b, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e,
+ 0x50, 0x65, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x0c, 0x50,
+ 0x65, 0x65, 0x72, 0x4d, 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1b, 0x2e, 0x73, 0x65,
+ 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x4d, 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63,
+ 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79,
+ 0x12, 0x3b, 0x0a, 0x09, 0x48, 0x61, 0x6e, 0x64, 0x53, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x16, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x48,
+ 0x61, 0x6e, 0x64, 0x53, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x50, 0x0a,
+ 0x15, 0x53, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x4d, 0x69,
+ 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x24, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e,
+ 0x53, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x4d, 0x69, 0x6e,
+ 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x73,
+ 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12,
+ 0x44, 0x0a, 0x0f, 0x53, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79,
+ 0x49, 0x64, 0x12, 0x1e, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, 0x6e, 0x64,
+ 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x49, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, 0x6e, 0x74,
+ 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x56, 0x0a, 0x18, 0x53, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x50, 0x65, 0x65, 0x72,
+ 0x73, 0x12, 0x27, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x4d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x50, 0x65,
+ 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x73, 0x65, 0x6e,
+ 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x42, 0x0a,
+ 0x10, 0x53, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x41, 0x6c,
+ 0x6c, 0x12, 0x1b, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4f, 0x75, 0x74, 0x62, 0x6f,
+ 0x75, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x44, 0x61, 0x74, 0x61, 0x1a, 0x11,
+ 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72,
+ 0x73, 0x12, 0x3d, 0x0a, 0x08, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x17, 0x2e,
+ 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e,
+ 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x30, 0x01,
+ 0x12, 0x33, 0x0a, 0x05, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74,
+ 0x79, 0x1a, 0x12, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x73,
+ 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3d, 0x0a, 0x09, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75,
+ 0x6e, 0x74, 0x12, 0x18, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x73,
+ 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52,
+ 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x42, 0x79, 0x49, 0x64,
+ 0x12, 0x17, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x42, 0x79,
+ 0x49, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x73, 0x65, 0x6e, 0x74,
+ 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x42, 0x79, 0x49, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x79,
+ 0x12, 0x3c, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x19,
+ 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e,
+ 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x73, 0x65, 0x6e, 0x74,
+ 0x72, 0x79, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x30, 0x01, 0x12, 0x37,
+ 0x0a, 0x07, 0x41, 0x64, 0x64, 0x50, 0x65, 0x65, 0x72, 0x12, 0x16, 0x2e, 0x73, 0x65, 0x6e, 0x74,
+ 0x72, 0x79, 0x2e, 0x41, 0x64, 0x64, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x14, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x41, 0x64, 0x64, 0x50, 0x65,
+ 0x65, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x38, 0x0a, 0x08, 0x4e, 0x6f, 0x64, 0x65, 0x49,
+ 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x14, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c,
+ 0x79, 0x42, 0x11, 0x5a, 0x0f, 0x2e, 0x2f, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x3b, 0x73, 0x65,
+ 0x6e, 0x74, 0x72, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_p2psentry_sentry_proto_rawDescOnce sync.Once
+ file_p2psentry_sentry_proto_rawDescData = file_p2psentry_sentry_proto_rawDesc
+)
+
+func file_p2psentry_sentry_proto_rawDescGZIP() []byte {
+ file_p2psentry_sentry_proto_rawDescOnce.Do(func() {
+ file_p2psentry_sentry_proto_rawDescData = protoimpl.X.CompressGZIP(file_p2psentry_sentry_proto_rawDescData)
+ })
+ return file_p2psentry_sentry_proto_rawDescData
+}
+
+var file_p2psentry_sentry_proto_enumTypes = make([]protoimpl.EnumInfo, 4)
+var file_p2psentry_sentry_proto_msgTypes = make([]protoimpl.MessageInfo, 23)
+var file_p2psentry_sentry_proto_goTypes = []interface{}{
+ (MessageId)(0), // 0: sentry.MessageId
+ (PenaltyKind)(0), // 1: sentry.PenaltyKind
+ (Protocol)(0), // 2: sentry.Protocol
+ (PeerEvent_PeerEventId)(0), // 3: sentry.PeerEvent.PeerEventId
+ (*OutboundMessageData)(nil), // 4: sentry.OutboundMessageData
+ (*SendMessageByMinBlockRequest)(nil), // 5: sentry.SendMessageByMinBlockRequest
+ (*SendMessageByIdRequest)(nil), // 6: sentry.SendMessageByIdRequest
+ (*SendMessageToRandomPeersRequest)(nil), // 7: sentry.SendMessageToRandomPeersRequest
+ (*SentPeers)(nil), // 8: sentry.SentPeers
+ (*PenalizePeerRequest)(nil), // 9: sentry.PenalizePeerRequest
+ (*PeerMinBlockRequest)(nil), // 10: sentry.PeerMinBlockRequest
+ (*AddPeerRequest)(nil), // 11: sentry.AddPeerRequest
+ (*InboundMessage)(nil), // 12: sentry.InboundMessage
+ (*Forks)(nil), // 13: sentry.Forks
+ (*StatusData)(nil), // 14: sentry.StatusData
+ (*SetStatusReply)(nil), // 15: sentry.SetStatusReply
+ (*HandShakeReply)(nil), // 16: sentry.HandShakeReply
+ (*MessagesRequest)(nil), // 17: sentry.MessagesRequest
+ (*PeersReply)(nil), // 18: sentry.PeersReply
+ (*PeerCountRequest)(nil), // 19: sentry.PeerCountRequest
+ (*PeerCountPerProtocol)(nil), // 20: sentry.PeerCountPerProtocol
+ (*PeerCountReply)(nil), // 21: sentry.PeerCountReply
+ (*PeerByIdRequest)(nil), // 22: sentry.PeerByIdRequest
+ (*PeerByIdReply)(nil), // 23: sentry.PeerByIdReply
+ (*PeerEventsRequest)(nil), // 24: sentry.PeerEventsRequest
+ (*PeerEvent)(nil), // 25: sentry.PeerEvent
+ (*AddPeerReply)(nil), // 26: sentry.AddPeerReply
+ (*types.H512)(nil), // 27: types.H512
+ (*types.H256)(nil), // 28: types.H256
+ (*types.PeerInfo)(nil), // 29: types.PeerInfo
+ (*emptypb.Empty)(nil), // 30: google.protobuf.Empty
+ (*types.NodeInfoReply)(nil), // 31: types.NodeInfoReply
+}
+var file_p2psentry_sentry_proto_depIdxs = []int32{
+ 0, // 0: sentry.OutboundMessageData.id:type_name -> sentry.MessageId
+ 4, // 1: sentry.SendMessageByMinBlockRequest.data:type_name -> sentry.OutboundMessageData
+ 4, // 2: sentry.SendMessageByIdRequest.data:type_name -> sentry.OutboundMessageData
+ 27, // 3: sentry.SendMessageByIdRequest.peer_id:type_name -> types.H512
+ 4, // 4: sentry.SendMessageToRandomPeersRequest.data:type_name -> sentry.OutboundMessageData
+ 27, // 5: sentry.SentPeers.peers:type_name -> types.H512
+ 27, // 6: sentry.PenalizePeerRequest.peer_id:type_name -> types.H512
+ 1, // 7: sentry.PenalizePeerRequest.penalty:type_name -> sentry.PenaltyKind
+ 27, // 8: sentry.PeerMinBlockRequest.peer_id:type_name -> types.H512
+ 0, // 9: sentry.InboundMessage.id:type_name -> sentry.MessageId
+ 27, // 10: sentry.InboundMessage.peer_id:type_name -> types.H512
+ 28, // 11: sentry.Forks.genesis:type_name -> types.H256
+ 28, // 12: sentry.StatusData.total_difficulty:type_name -> types.H256
+ 28, // 13: sentry.StatusData.best_hash:type_name -> types.H256
+ 13, // 14: sentry.StatusData.fork_data:type_name -> sentry.Forks
+ 2, // 15: sentry.HandShakeReply.protocol:type_name -> sentry.Protocol
+ 0, // 16: sentry.MessagesRequest.ids:type_name -> sentry.MessageId
+ 29, // 17: sentry.PeersReply.peers:type_name -> types.PeerInfo
+ 2, // 18: sentry.PeerCountPerProtocol.protocol:type_name -> sentry.Protocol
+ 20, // 19: sentry.PeerCountReply.counts_per_protocol:type_name -> sentry.PeerCountPerProtocol
+ 27, // 20: sentry.PeerByIdRequest.peer_id:type_name -> types.H512
+ 29, // 21: sentry.PeerByIdReply.peer:type_name -> types.PeerInfo
+ 27, // 22: sentry.PeerEvent.peer_id:type_name -> types.H512
+ 3, // 23: sentry.PeerEvent.event_id:type_name -> sentry.PeerEvent.PeerEventId
+ 14, // 24: sentry.Sentry.SetStatus:input_type -> sentry.StatusData
+ 9, // 25: sentry.Sentry.PenalizePeer:input_type -> sentry.PenalizePeerRequest
+ 10, // 26: sentry.Sentry.PeerMinBlock:input_type -> sentry.PeerMinBlockRequest
+ 30, // 27: sentry.Sentry.HandShake:input_type -> google.protobuf.Empty
+ 5, // 28: sentry.Sentry.SendMessageByMinBlock:input_type -> sentry.SendMessageByMinBlockRequest
+ 6, // 29: sentry.Sentry.SendMessageById:input_type -> sentry.SendMessageByIdRequest
+ 7, // 30: sentry.Sentry.SendMessageToRandomPeers:input_type -> sentry.SendMessageToRandomPeersRequest
+ 4, // 31: sentry.Sentry.SendMessageToAll:input_type -> sentry.OutboundMessageData
+ 17, // 32: sentry.Sentry.Messages:input_type -> sentry.MessagesRequest
+ 30, // 33: sentry.Sentry.Peers:input_type -> google.protobuf.Empty
+ 19, // 34: sentry.Sentry.PeerCount:input_type -> sentry.PeerCountRequest
+ 22, // 35: sentry.Sentry.PeerById:input_type -> sentry.PeerByIdRequest
+ 24, // 36: sentry.Sentry.PeerEvents:input_type -> sentry.PeerEventsRequest
+ 11, // 37: sentry.Sentry.AddPeer:input_type -> sentry.AddPeerRequest
+ 30, // 38: sentry.Sentry.NodeInfo:input_type -> google.protobuf.Empty
+ 15, // 39: sentry.Sentry.SetStatus:output_type -> sentry.SetStatusReply
+ 30, // 40: sentry.Sentry.PenalizePeer:output_type -> google.protobuf.Empty
+ 30, // 41: sentry.Sentry.PeerMinBlock:output_type -> google.protobuf.Empty
+ 16, // 42: sentry.Sentry.HandShake:output_type -> sentry.HandShakeReply
+ 8, // 43: sentry.Sentry.SendMessageByMinBlock:output_type -> sentry.SentPeers
+ 8, // 44: sentry.Sentry.SendMessageById:output_type -> sentry.SentPeers
+ 8, // 45: sentry.Sentry.SendMessageToRandomPeers:output_type -> sentry.SentPeers
+ 8, // 46: sentry.Sentry.SendMessageToAll:output_type -> sentry.SentPeers
+ 12, // 47: sentry.Sentry.Messages:output_type -> sentry.InboundMessage
+ 18, // 48: sentry.Sentry.Peers:output_type -> sentry.PeersReply
+ 21, // 49: sentry.Sentry.PeerCount:output_type -> sentry.PeerCountReply
+ 23, // 50: sentry.Sentry.PeerById:output_type -> sentry.PeerByIdReply
+ 25, // 51: sentry.Sentry.PeerEvents:output_type -> sentry.PeerEvent
+ 26, // 52: sentry.Sentry.AddPeer:output_type -> sentry.AddPeerReply
+ 31, // 53: sentry.Sentry.NodeInfo:output_type -> types.NodeInfoReply
+ 39, // [39:54] is the sub-list for method output_type
+ 24, // [24:39] is the sub-list for method input_type
+ 24, // [24:24] is the sub-list for extension type_name
+ 24, // [24:24] is the sub-list for extension extendee
+ 0, // [0:24] is the sub-list for field type_name
+}
+
+func init() { file_p2psentry_sentry_proto_init() }
+func file_p2psentry_sentry_proto_init() {
+ if File_p2psentry_sentry_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_p2psentry_sentry_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*OutboundMessageData); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentry_sentry_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SendMessageByMinBlockRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentry_sentry_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SendMessageByIdRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentry_sentry_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SendMessageToRandomPeersRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentry_sentry_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SentPeers); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentry_sentry_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PenalizePeerRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentry_sentry_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PeerMinBlockRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentry_sentry_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddPeerRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentry_sentry_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*InboundMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentry_sentry_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Forks); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentry_sentry_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StatusData); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentry_sentry_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SetStatusReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentry_sentry_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HandShakeReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentry_sentry_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MessagesRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentry_sentry_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PeersReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentry_sentry_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PeerCountRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentry_sentry_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PeerCountPerProtocol); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentry_sentry_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PeerCountReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentry_sentry_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PeerByIdRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentry_sentry_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PeerByIdReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentry_sentry_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PeerEventsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentry_sentry_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PeerEvent); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_p2psentry_sentry_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddPeerReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_p2psentry_sentry_proto_msgTypes[19].OneofWrappers = []interface{}{}
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_p2psentry_sentry_proto_rawDesc,
+ NumEnums: 4,
+ NumMessages: 23,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_p2psentry_sentry_proto_goTypes,
+ DependencyIndexes: file_p2psentry_sentry_proto_depIdxs,
+ EnumInfos: file_p2psentry_sentry_proto_enumTypes,
+ MessageInfos: file_p2psentry_sentry_proto_msgTypes,
+ }.Build()
+ File_p2psentry_sentry_proto = out.File
+ file_p2psentry_sentry_proto_rawDesc = nil
+ file_p2psentry_sentry_proto_goTypes = nil
+ file_p2psentry_sentry_proto_depIdxs = nil
+}
diff --git a/erigon-lib/gointerfaces/sentry/sentry_grpc.pb.go b/erigon-lib/gointerfaces/sentry/sentry_grpc.pb.go
new file mode 100644
index 00000000000..1a9d1959b5c
--- /dev/null
+++ b/erigon-lib/gointerfaces/sentry/sentry_grpc.pb.go
@@ -0,0 +1,700 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.3.0
+// - protoc v4.24.2
+// source: p2psentry/sentry.proto
+
+package sentry
+
+import (
+ context "context"
+ types "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+const (
+ Sentry_SetStatus_FullMethodName = "/sentry.Sentry/SetStatus"
+ Sentry_PenalizePeer_FullMethodName = "/sentry.Sentry/PenalizePeer"
+ Sentry_PeerMinBlock_FullMethodName = "/sentry.Sentry/PeerMinBlock"
+ Sentry_HandShake_FullMethodName = "/sentry.Sentry/HandShake"
+ Sentry_SendMessageByMinBlock_FullMethodName = "/sentry.Sentry/SendMessageByMinBlock"
+ Sentry_SendMessageById_FullMethodName = "/sentry.Sentry/SendMessageById"
+ Sentry_SendMessageToRandomPeers_FullMethodName = "/sentry.Sentry/SendMessageToRandomPeers"
+ Sentry_SendMessageToAll_FullMethodName = "/sentry.Sentry/SendMessageToAll"
+ Sentry_Messages_FullMethodName = "/sentry.Sentry/Messages"
+ Sentry_Peers_FullMethodName = "/sentry.Sentry/Peers"
+ Sentry_PeerCount_FullMethodName = "/sentry.Sentry/PeerCount"
+ Sentry_PeerById_FullMethodName = "/sentry.Sentry/PeerById"
+ Sentry_PeerEvents_FullMethodName = "/sentry.Sentry/PeerEvents"
+ Sentry_AddPeer_FullMethodName = "/sentry.Sentry/AddPeer"
+ Sentry_NodeInfo_FullMethodName = "/sentry.Sentry/NodeInfo"
+)
+
+// SentryClient is the client API for Sentry service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type SentryClient interface {
+ // SetStatus - force new ETH client state of sentry - network_id, max_block, etc...
+ SetStatus(ctx context.Context, in *StatusData, opts ...grpc.CallOption) (*SetStatusReply, error)
+ PenalizePeer(ctx context.Context, in *PenalizePeerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ PeerMinBlock(ctx context.Context, in *PeerMinBlockRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // HandShake - pre-requirement for all Send* methods - returns list of ETH protocol versions,
+ // without knowledge of protocol - impossible encode correct P2P message
+ HandShake(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*HandShakeReply, error)
+ SendMessageByMinBlock(ctx context.Context, in *SendMessageByMinBlockRequest, opts ...grpc.CallOption) (*SentPeers, error)
+ SendMessageById(ctx context.Context, in *SendMessageByIdRequest, opts ...grpc.CallOption) (*SentPeers, error)
+ SendMessageToRandomPeers(ctx context.Context, in *SendMessageToRandomPeersRequest, opts ...grpc.CallOption) (*SentPeers, error)
+ SendMessageToAll(ctx context.Context, in *OutboundMessageData, opts ...grpc.CallOption) (*SentPeers, error)
+ // Subscribe to receive messages.
+ // Calling multiple times with a different set of ids starts separate streams.
+ // It is possible to subscribe to the same set if ids more than once.
+ Messages(ctx context.Context, in *MessagesRequest, opts ...grpc.CallOption) (Sentry_MessagesClient, error)
+ Peers(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PeersReply, error)
+ PeerCount(ctx context.Context, in *PeerCountRequest, opts ...grpc.CallOption) (*PeerCountReply, error)
+ PeerById(ctx context.Context, in *PeerByIdRequest, opts ...grpc.CallOption) (*PeerByIdReply, error)
+ // Subscribe to notifications about connected or lost peers.
+ PeerEvents(ctx context.Context, in *PeerEventsRequest, opts ...grpc.CallOption) (Sentry_PeerEventsClient, error)
+ AddPeer(ctx context.Context, in *AddPeerRequest, opts ...grpc.CallOption) (*AddPeerReply, error)
+ // NodeInfo returns a collection of metadata known about the host.
+ NodeInfo(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.NodeInfoReply, error)
+}
+
+type sentryClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewSentryClient(cc grpc.ClientConnInterface) SentryClient {
+ return &sentryClient{cc}
+}
+
+func (c *sentryClient) SetStatus(ctx context.Context, in *StatusData, opts ...grpc.CallOption) (*SetStatusReply, error) {
+ out := new(SetStatusReply)
+ err := c.cc.Invoke(ctx, Sentry_SetStatus_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *sentryClient) PenalizePeer(ctx context.Context, in *PenalizePeerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, Sentry_PenalizePeer_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *sentryClient) PeerMinBlock(ctx context.Context, in *PeerMinBlockRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, Sentry_PeerMinBlock_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *sentryClient) HandShake(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*HandShakeReply, error) {
+ out := new(HandShakeReply)
+ err := c.cc.Invoke(ctx, Sentry_HandShake_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *sentryClient) SendMessageByMinBlock(ctx context.Context, in *SendMessageByMinBlockRequest, opts ...grpc.CallOption) (*SentPeers, error) {
+ out := new(SentPeers)
+ err := c.cc.Invoke(ctx, Sentry_SendMessageByMinBlock_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *sentryClient) SendMessageById(ctx context.Context, in *SendMessageByIdRequest, opts ...grpc.CallOption) (*SentPeers, error) {
+ out := new(SentPeers)
+ err := c.cc.Invoke(ctx, Sentry_SendMessageById_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *sentryClient) SendMessageToRandomPeers(ctx context.Context, in *SendMessageToRandomPeersRequest, opts ...grpc.CallOption) (*SentPeers, error) {
+ out := new(SentPeers)
+ err := c.cc.Invoke(ctx, Sentry_SendMessageToRandomPeers_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *sentryClient) SendMessageToAll(ctx context.Context, in *OutboundMessageData, opts ...grpc.CallOption) (*SentPeers, error) {
+ out := new(SentPeers)
+ err := c.cc.Invoke(ctx, Sentry_SendMessageToAll_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *sentryClient) Messages(ctx context.Context, in *MessagesRequest, opts ...grpc.CallOption) (Sentry_MessagesClient, error) {
+ stream, err := c.cc.NewStream(ctx, &Sentry_ServiceDesc.Streams[0], Sentry_Messages_FullMethodName, opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &sentryMessagesClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type Sentry_MessagesClient interface {
+ Recv() (*InboundMessage, error)
+ grpc.ClientStream
+}
+
+type sentryMessagesClient struct {
+ grpc.ClientStream
+}
+
+func (x *sentryMessagesClient) Recv() (*InboundMessage, error) {
+ m := new(InboundMessage)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *sentryClient) Peers(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PeersReply, error) {
+ out := new(PeersReply)
+ err := c.cc.Invoke(ctx, Sentry_Peers_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *sentryClient) PeerCount(ctx context.Context, in *PeerCountRequest, opts ...grpc.CallOption) (*PeerCountReply, error) {
+ out := new(PeerCountReply)
+ err := c.cc.Invoke(ctx, Sentry_PeerCount_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *sentryClient) PeerById(ctx context.Context, in *PeerByIdRequest, opts ...grpc.CallOption) (*PeerByIdReply, error) {
+ out := new(PeerByIdReply)
+ err := c.cc.Invoke(ctx, Sentry_PeerById_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *sentryClient) PeerEvents(ctx context.Context, in *PeerEventsRequest, opts ...grpc.CallOption) (Sentry_PeerEventsClient, error) {
+ stream, err := c.cc.NewStream(ctx, &Sentry_ServiceDesc.Streams[1], Sentry_PeerEvents_FullMethodName, opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &sentryPeerEventsClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type Sentry_PeerEventsClient interface {
+ Recv() (*PeerEvent, error)
+ grpc.ClientStream
+}
+
+type sentryPeerEventsClient struct {
+ grpc.ClientStream
+}
+
+func (x *sentryPeerEventsClient) Recv() (*PeerEvent, error) {
+ m := new(PeerEvent)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *sentryClient) AddPeer(ctx context.Context, in *AddPeerRequest, opts ...grpc.CallOption) (*AddPeerReply, error) {
+ out := new(AddPeerReply)
+ err := c.cc.Invoke(ctx, Sentry_AddPeer_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *sentryClient) NodeInfo(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.NodeInfoReply, error) {
+ out := new(types.NodeInfoReply)
+ err := c.cc.Invoke(ctx, Sentry_NodeInfo_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// SentryServer is the server API for Sentry service.
+// All implementations must embed UnimplementedSentryServer
+// for forward compatibility
+type SentryServer interface {
+ // SetStatus - force new ETH client state of sentry - network_id, max_block, etc...
+ SetStatus(context.Context, *StatusData) (*SetStatusReply, error)
+ PenalizePeer(context.Context, *PenalizePeerRequest) (*emptypb.Empty, error)
+ PeerMinBlock(context.Context, *PeerMinBlockRequest) (*emptypb.Empty, error)
+ // HandShake - pre-requirement for all Send* methods - returns list of ETH protocol versions,
+ // without knowledge of protocol - impossible encode correct P2P message
+ HandShake(context.Context, *emptypb.Empty) (*HandShakeReply, error)
+ SendMessageByMinBlock(context.Context, *SendMessageByMinBlockRequest) (*SentPeers, error)
+ SendMessageById(context.Context, *SendMessageByIdRequest) (*SentPeers, error)
+ SendMessageToRandomPeers(context.Context, *SendMessageToRandomPeersRequest) (*SentPeers, error)
+ SendMessageToAll(context.Context, *OutboundMessageData) (*SentPeers, error)
+ // Subscribe to receive messages.
+ // Calling multiple times with a different set of ids starts separate streams.
+ // It is possible to subscribe to the same set if ids more than once.
+ Messages(*MessagesRequest, Sentry_MessagesServer) error
+ Peers(context.Context, *emptypb.Empty) (*PeersReply, error)
+ PeerCount(context.Context, *PeerCountRequest) (*PeerCountReply, error)
+ PeerById(context.Context, *PeerByIdRequest) (*PeerByIdReply, error)
+ // Subscribe to notifications about connected or lost peers.
+ PeerEvents(*PeerEventsRequest, Sentry_PeerEventsServer) error
+ AddPeer(context.Context, *AddPeerRequest) (*AddPeerReply, error)
+ // NodeInfo returns a collection of metadata known about the host.
+ NodeInfo(context.Context, *emptypb.Empty) (*types.NodeInfoReply, error)
+ mustEmbedUnimplementedSentryServer()
+}
+
+// UnimplementedSentryServer must be embedded to have forward compatible implementations.
+type UnimplementedSentryServer struct {
+}
+
+func (UnimplementedSentryServer) SetStatus(context.Context, *StatusData) (*SetStatusReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method SetStatus not implemented")
+}
+func (UnimplementedSentryServer) PenalizePeer(context.Context, *PenalizePeerRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method PenalizePeer not implemented")
+}
+func (UnimplementedSentryServer) PeerMinBlock(context.Context, *PeerMinBlockRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method PeerMinBlock not implemented")
+}
+func (UnimplementedSentryServer) HandShake(context.Context, *emptypb.Empty) (*HandShakeReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method HandShake not implemented")
+}
+func (UnimplementedSentryServer) SendMessageByMinBlock(context.Context, *SendMessageByMinBlockRequest) (*SentPeers, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method SendMessageByMinBlock not implemented")
+}
+func (UnimplementedSentryServer) SendMessageById(context.Context, *SendMessageByIdRequest) (*SentPeers, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method SendMessageById not implemented")
+}
+func (UnimplementedSentryServer) SendMessageToRandomPeers(context.Context, *SendMessageToRandomPeersRequest) (*SentPeers, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method SendMessageToRandomPeers not implemented")
+}
+func (UnimplementedSentryServer) SendMessageToAll(context.Context, *OutboundMessageData) (*SentPeers, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method SendMessageToAll not implemented")
+}
+func (UnimplementedSentryServer) Messages(*MessagesRequest, Sentry_MessagesServer) error {
+ return status.Errorf(codes.Unimplemented, "method Messages not implemented")
+}
+func (UnimplementedSentryServer) Peers(context.Context, *emptypb.Empty) (*PeersReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Peers not implemented")
+}
+func (UnimplementedSentryServer) PeerCount(context.Context, *PeerCountRequest) (*PeerCountReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method PeerCount not implemented")
+}
+func (UnimplementedSentryServer) PeerById(context.Context, *PeerByIdRequest) (*PeerByIdReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method PeerById not implemented")
+}
+func (UnimplementedSentryServer) PeerEvents(*PeerEventsRequest, Sentry_PeerEventsServer) error {
+ return status.Errorf(codes.Unimplemented, "method PeerEvents not implemented")
+}
+func (UnimplementedSentryServer) AddPeer(context.Context, *AddPeerRequest) (*AddPeerReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AddPeer not implemented")
+}
+func (UnimplementedSentryServer) NodeInfo(context.Context, *emptypb.Empty) (*types.NodeInfoReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method NodeInfo not implemented")
+}
+func (UnimplementedSentryServer) mustEmbedUnimplementedSentryServer() {}
+
+// UnsafeSentryServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to SentryServer will
+// result in compilation errors.
+type UnsafeSentryServer interface {
+ mustEmbedUnimplementedSentryServer()
+}
+
+func RegisterSentryServer(s grpc.ServiceRegistrar, srv SentryServer) {
+ s.RegisterService(&Sentry_ServiceDesc, srv)
+}
+
+func _Sentry_SetStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(StatusData)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SentryServer).SetStatus(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Sentry_SetStatus_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SentryServer).SetStatus(ctx, req.(*StatusData))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Sentry_PenalizePeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(PenalizePeerRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SentryServer).PenalizePeer(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Sentry_PenalizePeer_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SentryServer).PenalizePeer(ctx, req.(*PenalizePeerRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Sentry_PeerMinBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(PeerMinBlockRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SentryServer).PeerMinBlock(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Sentry_PeerMinBlock_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SentryServer).PeerMinBlock(ctx, req.(*PeerMinBlockRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Sentry_HandShake_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(emptypb.Empty)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SentryServer).HandShake(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Sentry_HandShake_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SentryServer).HandShake(ctx, req.(*emptypb.Empty))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Sentry_SendMessageByMinBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SendMessageByMinBlockRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SentryServer).SendMessageByMinBlock(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Sentry_SendMessageByMinBlock_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SentryServer).SendMessageByMinBlock(ctx, req.(*SendMessageByMinBlockRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Sentry_SendMessageById_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SendMessageByIdRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SentryServer).SendMessageById(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Sentry_SendMessageById_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SentryServer).SendMessageById(ctx, req.(*SendMessageByIdRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Sentry_SendMessageToRandomPeers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SendMessageToRandomPeersRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SentryServer).SendMessageToRandomPeers(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Sentry_SendMessageToRandomPeers_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SentryServer).SendMessageToRandomPeers(ctx, req.(*SendMessageToRandomPeersRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Sentry_SendMessageToAll_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(OutboundMessageData)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SentryServer).SendMessageToAll(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Sentry_SendMessageToAll_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SentryServer).SendMessageToAll(ctx, req.(*OutboundMessageData))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Sentry_Messages_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(MessagesRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(SentryServer).Messages(m, &sentryMessagesServer{stream})
+}
+
+type Sentry_MessagesServer interface {
+ Send(*InboundMessage) error
+ grpc.ServerStream
+}
+
+type sentryMessagesServer struct {
+ grpc.ServerStream
+}
+
+func (x *sentryMessagesServer) Send(m *InboundMessage) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _Sentry_Peers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(emptypb.Empty)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SentryServer).Peers(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Sentry_Peers_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SentryServer).Peers(ctx, req.(*emptypb.Empty))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Sentry_PeerCount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(PeerCountRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SentryServer).PeerCount(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Sentry_PeerCount_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SentryServer).PeerCount(ctx, req.(*PeerCountRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Sentry_PeerById_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(PeerByIdRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SentryServer).PeerById(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Sentry_PeerById_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SentryServer).PeerById(ctx, req.(*PeerByIdRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Sentry_PeerEvents_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(PeerEventsRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(SentryServer).PeerEvents(m, &sentryPeerEventsServer{stream})
+}
+
+type Sentry_PeerEventsServer interface {
+ Send(*PeerEvent) error
+ grpc.ServerStream
+}
+
+type sentryPeerEventsServer struct {
+ grpc.ServerStream
+}
+
+func (x *sentryPeerEventsServer) Send(m *PeerEvent) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _Sentry_AddPeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AddPeerRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SentryServer).AddPeer(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Sentry_AddPeer_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SentryServer).AddPeer(ctx, req.(*AddPeerRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Sentry_NodeInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(emptypb.Empty)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SentryServer).NodeInfo(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Sentry_NodeInfo_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SentryServer).NodeInfo(ctx, req.(*emptypb.Empty))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// Sentry_ServiceDesc is the grpc.ServiceDesc for Sentry service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var Sentry_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "sentry.Sentry",
+ HandlerType: (*SentryServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "SetStatus",
+ Handler: _Sentry_SetStatus_Handler,
+ },
+ {
+ MethodName: "PenalizePeer",
+ Handler: _Sentry_PenalizePeer_Handler,
+ },
+ {
+ MethodName: "PeerMinBlock",
+ Handler: _Sentry_PeerMinBlock_Handler,
+ },
+ {
+ MethodName: "HandShake",
+ Handler: _Sentry_HandShake_Handler,
+ },
+ {
+ MethodName: "SendMessageByMinBlock",
+ Handler: _Sentry_SendMessageByMinBlock_Handler,
+ },
+ {
+ MethodName: "SendMessageById",
+ Handler: _Sentry_SendMessageById_Handler,
+ },
+ {
+ MethodName: "SendMessageToRandomPeers",
+ Handler: _Sentry_SendMessageToRandomPeers_Handler,
+ },
+ {
+ MethodName: "SendMessageToAll",
+ Handler: _Sentry_SendMessageToAll_Handler,
+ },
+ {
+ MethodName: "Peers",
+ Handler: _Sentry_Peers_Handler,
+ },
+ {
+ MethodName: "PeerCount",
+ Handler: _Sentry_PeerCount_Handler,
+ },
+ {
+ MethodName: "PeerById",
+ Handler: _Sentry_PeerById_Handler,
+ },
+ {
+ MethodName: "AddPeer",
+ Handler: _Sentry_AddPeer_Handler,
+ },
+ {
+ MethodName: "NodeInfo",
+ Handler: _Sentry_NodeInfo_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Messages",
+ Handler: _Sentry_Messages_Handler,
+ ServerStreams: true,
+ },
+ {
+ StreamName: "PeerEvents",
+ Handler: _Sentry_PeerEvents_Handler,
+ ServerStreams: true,
+ },
+ },
+ Metadata: "p2psentry/sentry.proto",
+}
diff --git a/erigon-lib/gointerfaces/test_util.go b/erigon-lib/gointerfaces/test_util.go
new file mode 100644
index 00000000000..25e9b45751d
--- /dev/null
+++ b/erigon-lib/gointerfaces/test_util.go
@@ -0,0 +1,4 @@
+package gointerfaces
+
+//go:generate moq -stub -out ./sentry/mocks.go ./sentry SentryServer SentryClient
+//go:generate moq -stub -out ./remote/mocks.go ./remote KVClient KV_StateChangesClient
diff --git a/erigon-lib/gointerfaces/txpool/mining.pb.go b/erigon-lib/gointerfaces/txpool/mining.pb.go
new file mode 100644
index 00000000000..20b3e0bd7e6
--- /dev/null
+++ b/erigon-lib/gointerfaces/txpool/mining.pb.go
@@ -0,0 +1,1153 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.31.0
+// protoc v4.24.2
+// source: txpool/mining.proto
+
+package txpool
+
+import (
+ types "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type OnPendingBlockRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *OnPendingBlockRequest) Reset() {
+ *x = OnPendingBlockRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_mining_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *OnPendingBlockRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*OnPendingBlockRequest) ProtoMessage() {}
+
+func (x *OnPendingBlockRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_mining_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use OnPendingBlockRequest.ProtoReflect.Descriptor instead.
+func (*OnPendingBlockRequest) Descriptor() ([]byte, []int) {
+ return file_txpool_mining_proto_rawDescGZIP(), []int{0}
+}
+
+type OnPendingBlockReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RplBlock []byte `protobuf:"bytes,1,opt,name=rpl_block,json=rplBlock,proto3" json:"rpl_block,omitempty"`
+}
+
+func (x *OnPendingBlockReply) Reset() {
+ *x = OnPendingBlockReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_mining_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *OnPendingBlockReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*OnPendingBlockReply) ProtoMessage() {}
+
+func (x *OnPendingBlockReply) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_mining_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use OnPendingBlockReply.ProtoReflect.Descriptor instead.
+func (*OnPendingBlockReply) Descriptor() ([]byte, []int) {
+ return file_txpool_mining_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *OnPendingBlockReply) GetRplBlock() []byte {
+ if x != nil {
+ return x.RplBlock
+ }
+ return nil
+}
+
+type OnMinedBlockRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *OnMinedBlockRequest) Reset() {
+ *x = OnMinedBlockRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_mining_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *OnMinedBlockRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*OnMinedBlockRequest) ProtoMessage() {}
+
+func (x *OnMinedBlockRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_mining_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use OnMinedBlockRequest.ProtoReflect.Descriptor instead.
+func (*OnMinedBlockRequest) Descriptor() ([]byte, []int) {
+ return file_txpool_mining_proto_rawDescGZIP(), []int{2}
+}
+
+type OnMinedBlockReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RplBlock []byte `protobuf:"bytes,1,opt,name=rpl_block,json=rplBlock,proto3" json:"rpl_block,omitempty"`
+}
+
+func (x *OnMinedBlockReply) Reset() {
+ *x = OnMinedBlockReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_mining_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *OnMinedBlockReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*OnMinedBlockReply) ProtoMessage() {}
+
+func (x *OnMinedBlockReply) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_mining_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use OnMinedBlockReply.ProtoReflect.Descriptor instead.
+func (*OnMinedBlockReply) Descriptor() ([]byte, []int) {
+ return file_txpool_mining_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *OnMinedBlockReply) GetRplBlock() []byte {
+ if x != nil {
+ return x.RplBlock
+ }
+ return nil
+}
+
+type OnPendingLogsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *OnPendingLogsRequest) Reset() {
+ *x = OnPendingLogsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_mining_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *OnPendingLogsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*OnPendingLogsRequest) ProtoMessage() {}
+
+func (x *OnPendingLogsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_mining_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use OnPendingLogsRequest.ProtoReflect.Descriptor instead.
+func (*OnPendingLogsRequest) Descriptor() ([]byte, []int) {
+ return file_txpool_mining_proto_rawDescGZIP(), []int{4}
+}
+
+type OnPendingLogsReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RplLogs []byte `protobuf:"bytes,1,opt,name=rpl_logs,json=rplLogs,proto3" json:"rpl_logs,omitempty"`
+}
+
+func (x *OnPendingLogsReply) Reset() {
+ *x = OnPendingLogsReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_mining_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *OnPendingLogsReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*OnPendingLogsReply) ProtoMessage() {}
+
+func (x *OnPendingLogsReply) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_mining_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use OnPendingLogsReply.ProtoReflect.Descriptor instead.
+func (*OnPendingLogsReply) Descriptor() ([]byte, []int) {
+ return file_txpool_mining_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *OnPendingLogsReply) GetRplLogs() []byte {
+ if x != nil {
+ return x.RplLogs
+ }
+ return nil
+}
+
+type GetWorkRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *GetWorkRequest) Reset() {
+ *x = GetWorkRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_mining_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetWorkRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetWorkRequest) ProtoMessage() {}
+
+func (x *GetWorkRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_mining_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetWorkRequest.ProtoReflect.Descriptor instead.
+func (*GetWorkRequest) Descriptor() ([]byte, []int) {
+ return file_txpool_mining_proto_rawDescGZIP(), []int{6}
+}
+
+type GetWorkReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ HeaderHash string `protobuf:"bytes,1,opt,name=header_hash,json=headerHash,proto3" json:"header_hash,omitempty"` // 32 bytes hex encoded current block header pow-hash
+ SeedHash string `protobuf:"bytes,2,opt,name=seed_hash,json=seedHash,proto3" json:"seed_hash,omitempty"` // 32 bytes hex encoded seed hash used for DAG
+ Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` // 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
+ BlockNumber string `protobuf:"bytes,4,opt,name=block_number,json=blockNumber,proto3" json:"block_number,omitempty"` // hex encoded block number
+}
+
+func (x *GetWorkReply) Reset() {
+ *x = GetWorkReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_mining_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetWorkReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetWorkReply) ProtoMessage() {}
+
+func (x *GetWorkReply) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_mining_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetWorkReply.ProtoReflect.Descriptor instead.
+func (*GetWorkReply) Descriptor() ([]byte, []int) {
+ return file_txpool_mining_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *GetWorkReply) GetHeaderHash() string {
+ if x != nil {
+ return x.HeaderHash
+ }
+ return ""
+}
+
+func (x *GetWorkReply) GetSeedHash() string {
+ if x != nil {
+ return x.SeedHash
+ }
+ return ""
+}
+
+func (x *GetWorkReply) GetTarget() string {
+ if x != nil {
+ return x.Target
+ }
+ return ""
+}
+
+func (x *GetWorkReply) GetBlockNumber() string {
+ if x != nil {
+ return x.BlockNumber
+ }
+ return ""
+}
+
+type SubmitWorkRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ BlockNonce []byte `protobuf:"bytes,1,opt,name=block_nonce,json=blockNonce,proto3" json:"block_nonce,omitempty"`
+ PowHash []byte `protobuf:"bytes,2,opt,name=pow_hash,json=powHash,proto3" json:"pow_hash,omitempty"`
+ Digest []byte `protobuf:"bytes,3,opt,name=digest,proto3" json:"digest,omitempty"`
+}
+
+func (x *SubmitWorkRequest) Reset() {
+ *x = SubmitWorkRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_mining_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SubmitWorkRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SubmitWorkRequest) ProtoMessage() {}
+
+func (x *SubmitWorkRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_mining_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SubmitWorkRequest.ProtoReflect.Descriptor instead.
+func (*SubmitWorkRequest) Descriptor() ([]byte, []int) {
+ return file_txpool_mining_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *SubmitWorkRequest) GetBlockNonce() []byte {
+ if x != nil {
+ return x.BlockNonce
+ }
+ return nil
+}
+
+func (x *SubmitWorkRequest) GetPowHash() []byte {
+ if x != nil {
+ return x.PowHash
+ }
+ return nil
+}
+
+func (x *SubmitWorkRequest) GetDigest() []byte {
+ if x != nil {
+ return x.Digest
+ }
+ return nil
+}
+
+type SubmitWorkReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"`
+}
+
+func (x *SubmitWorkReply) Reset() {
+ *x = SubmitWorkReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_mining_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SubmitWorkReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SubmitWorkReply) ProtoMessage() {}
+
+func (x *SubmitWorkReply) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_mining_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SubmitWorkReply.ProtoReflect.Descriptor instead.
+func (*SubmitWorkReply) Descriptor() ([]byte, []int) {
+ return file_txpool_mining_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *SubmitWorkReply) GetOk() bool {
+ if x != nil {
+ return x.Ok
+ }
+ return false
+}
+
+type SubmitHashRateRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Rate uint64 `protobuf:"varint,1,opt,name=rate,proto3" json:"rate,omitempty"`
+ Id []byte `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
+}
+
+func (x *SubmitHashRateRequest) Reset() {
+ *x = SubmitHashRateRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_mining_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SubmitHashRateRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SubmitHashRateRequest) ProtoMessage() {}
+
+func (x *SubmitHashRateRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_mining_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SubmitHashRateRequest.ProtoReflect.Descriptor instead.
+func (*SubmitHashRateRequest) Descriptor() ([]byte, []int) {
+ return file_txpool_mining_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *SubmitHashRateRequest) GetRate() uint64 {
+ if x != nil {
+ return x.Rate
+ }
+ return 0
+}
+
+func (x *SubmitHashRateRequest) GetId() []byte {
+ if x != nil {
+ return x.Id
+ }
+ return nil
+}
+
+type SubmitHashRateReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"`
+}
+
+func (x *SubmitHashRateReply) Reset() {
+ *x = SubmitHashRateReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_mining_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SubmitHashRateReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SubmitHashRateReply) ProtoMessage() {}
+
+func (x *SubmitHashRateReply) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_mining_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SubmitHashRateReply.ProtoReflect.Descriptor instead.
+func (*SubmitHashRateReply) Descriptor() ([]byte, []int) {
+ return file_txpool_mining_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *SubmitHashRateReply) GetOk() bool {
+ if x != nil {
+ return x.Ok
+ }
+ return false
+}
+
+type HashRateRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *HashRateRequest) Reset() {
+ *x = HashRateRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_mining_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HashRateRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HashRateRequest) ProtoMessage() {}
+
+func (x *HashRateRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_mining_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HashRateRequest.ProtoReflect.Descriptor instead.
+func (*HashRateRequest) Descriptor() ([]byte, []int) {
+ return file_txpool_mining_proto_rawDescGZIP(), []int{12}
+}
+
+type HashRateReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ HashRate uint64 `protobuf:"varint,1,opt,name=hash_rate,json=hashRate,proto3" json:"hash_rate,omitempty"`
+}
+
+func (x *HashRateReply) Reset() {
+ *x = HashRateReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_mining_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HashRateReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HashRateReply) ProtoMessage() {}
+
+func (x *HashRateReply) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_mining_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HashRateReply.ProtoReflect.Descriptor instead.
+func (*HashRateReply) Descriptor() ([]byte, []int) {
+ return file_txpool_mining_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *HashRateReply) GetHashRate() uint64 {
+ if x != nil {
+ return x.HashRate
+ }
+ return 0
+}
+
+type MiningRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *MiningRequest) Reset() {
+ *x = MiningRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_mining_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MiningRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MiningRequest) ProtoMessage() {}
+
+func (x *MiningRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_mining_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MiningRequest.ProtoReflect.Descriptor instead.
+func (*MiningRequest) Descriptor() ([]byte, []int) {
+ return file_txpool_mining_proto_rawDescGZIP(), []int{14}
+}
+
+type MiningReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ Running bool `protobuf:"varint,2,opt,name=running,proto3" json:"running,omitempty"`
+}
+
+func (x *MiningReply) Reset() {
+ *x = MiningReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_mining_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MiningReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MiningReply) ProtoMessage() {}
+
+func (x *MiningReply) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_mining_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MiningReply.ProtoReflect.Descriptor instead.
+func (*MiningReply) Descriptor() ([]byte, []int) {
+ return file_txpool_mining_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *MiningReply) GetEnabled() bool {
+ if x != nil {
+ return x.Enabled
+ }
+ return false
+}
+
+func (x *MiningReply) GetRunning() bool {
+ if x != nil {
+ return x.Running
+ }
+ return false
+}
+
+var File_txpool_mining_proto protoreflect.FileDescriptor
+
+var file_txpool_mining_proto_rawDesc = []byte{
+ 0x0a, 0x13, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2f, 0x6d, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x1a, 0x1b, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65,
+ 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x11, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x17, 0x0a,
+ 0x15, 0x4f, 0x6e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x32, 0x0a, 0x13, 0x4f, 0x6e, 0x50, 0x65, 0x6e, 0x64,
+ 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1b, 0x0a,
+ 0x09, 0x72, 0x70, 0x6c, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c,
+ 0x52, 0x08, 0x72, 0x70, 0x6c, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x15, 0x0a, 0x13, 0x4f, 0x6e,
+ 0x4d, 0x69, 0x6e, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x22, 0x30, 0x0a, 0x11, 0x4f, 0x6e, 0x4d, 0x69, 0x6e, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63,
+ 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x70, 0x6c, 0x5f, 0x62, 0x6c,
+ 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x70, 0x6c, 0x42, 0x6c,
+ 0x6f, 0x63, 0x6b, 0x22, 0x16, 0x0a, 0x14, 0x4f, 0x6e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67,
+ 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x2f, 0x0a, 0x12, 0x4f,
+ 0x6e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x70, 0x6c,
+ 0x79, 0x12, 0x19, 0x0a, 0x08, 0x72, 0x70, 0x6c, 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x70, 0x6c, 0x4c, 0x6f, 0x67, 0x73, 0x22, 0x10, 0x0a, 0x0e,
+ 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x87,
+ 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12,
+ 0x1f, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x61, 0x73, 0x68,
+ 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x65, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x65, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x16, 0x0a,
+ 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74,
+ 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e,
+ 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x6c, 0x6f,
+ 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x67, 0x0a, 0x11, 0x53, 0x75, 0x62, 0x6d,
+ 0x69, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a,
+ 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0c, 0x52, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x19,
+ 0x0a, 0x08, 0x70, 0x6f, 0x77, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c,
+ 0x52, 0x07, 0x70, 0x6f, 0x77, 0x48, 0x61, 0x73, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67,
+ 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73,
+ 0x74, 0x22, 0x21, 0x0a, 0x0f, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x52,
+ 0x65, 0x70, 0x6c, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x02, 0x6f, 0x6b, 0x22, 0x3b, 0x0a, 0x15, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x48, 0x61,
+ 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a,
+ 0x04, 0x72, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x72, 0x61, 0x74,
+ 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69,
+ 0x64, 0x22, 0x25, 0x0a, 0x13, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x48, 0x61, 0x73, 0x68, 0x52,
+ 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x22, 0x11, 0x0a, 0x0f, 0x48, 0x61, 0x73, 0x68,
+ 0x52, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x2c, 0x0a, 0x0d, 0x48,
+ 0x61, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1b, 0x0a, 0x09,
+ 0x68, 0x61, 0x73, 0x68, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x08, 0x68, 0x61, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x22, 0x0f, 0x0a, 0x0d, 0x4d, 0x69, 0x6e,
+ 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x41, 0x0a, 0x0b, 0x4d, 0x69,
+ 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61,
+ 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62,
+ 0x6c, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x32, 0xe2, 0x04,
+ 0x0a, 0x06, 0x4d, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x36, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79,
+ 0x12, 0x4e, 0x0a, 0x0e, 0x4f, 0x6e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f,
+ 0x63, 0x6b, 0x12, 0x1d, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4f, 0x6e, 0x50, 0x65,
+ 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x1b, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4f, 0x6e, 0x50, 0x65, 0x6e,
+ 0x64, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x30, 0x01,
+ 0x12, 0x48, 0x0a, 0x0c, 0x4f, 0x6e, 0x4d, 0x69, 0x6e, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b,
+ 0x12, 0x1b, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4f, 0x6e, 0x4d, 0x69, 0x6e, 0x65,
+ 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e,
+ 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4f, 0x6e, 0x4d, 0x69, 0x6e, 0x65, 0x64, 0x42, 0x6c,
+ 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x30, 0x01, 0x12, 0x4b, 0x0a, 0x0d, 0x4f, 0x6e,
+ 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x4c, 0x6f, 0x67, 0x73, 0x12, 0x1c, 0x2e, 0x74, 0x78,
+ 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4f, 0x6e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x4c, 0x6f,
+ 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x74, 0x78, 0x70, 0x6f,
+ 0x6f, 0x6c, 0x2e, 0x4f, 0x6e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x4c, 0x6f, 0x67, 0x73,
+ 0x52, 0x65, 0x70, 0x6c, 0x79, 0x30, 0x01, 0x12, 0x37, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x57, 0x6f,
+ 0x72, 0x6b, 0x12, 0x16, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x57,
+ 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x74, 0x78, 0x70,
+ 0x6f, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79,
+ 0x12, 0x40, 0x0a, 0x0a, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x12, 0x19,
+ 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x57, 0x6f,
+ 0x72, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x74, 0x78, 0x70, 0x6f,
+ 0x6f, 0x6c, 0x2e, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x70,
+ 0x6c, 0x79, 0x12, 0x4c, 0x0a, 0x0e, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x48, 0x61, 0x73, 0x68,
+ 0x52, 0x61, 0x74, 0x65, 0x12, 0x1d, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x53, 0x75,
+ 0x62, 0x6d, 0x69, 0x74, 0x48, 0x61, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x53, 0x75, 0x62,
+ 0x6d, 0x69, 0x74, 0x48, 0x61, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79,
+ 0x12, 0x3a, 0x0a, 0x08, 0x48, 0x61, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x12, 0x17, 0x2e, 0x74,
+ 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x48,
+ 0x61, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x34, 0x0a, 0x06,
+ 0x4d, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x15, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e,
+ 0x4d, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e,
+ 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4d, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70,
+ 0x6c, 0x79, 0x42, 0x11, 0x5a, 0x0f, 0x2e, 0x2f, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x3b, 0x74,
+ 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_txpool_mining_proto_rawDescOnce sync.Once
+ file_txpool_mining_proto_rawDescData = file_txpool_mining_proto_rawDesc
+)
+
+func file_txpool_mining_proto_rawDescGZIP() []byte {
+ file_txpool_mining_proto_rawDescOnce.Do(func() {
+ file_txpool_mining_proto_rawDescData = protoimpl.X.CompressGZIP(file_txpool_mining_proto_rawDescData)
+ })
+ return file_txpool_mining_proto_rawDescData
+}
+
+var file_txpool_mining_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
+var file_txpool_mining_proto_goTypes = []interface{}{
+ (*OnPendingBlockRequest)(nil), // 0: txpool.OnPendingBlockRequest
+ (*OnPendingBlockReply)(nil), // 1: txpool.OnPendingBlockReply
+ (*OnMinedBlockRequest)(nil), // 2: txpool.OnMinedBlockRequest
+ (*OnMinedBlockReply)(nil), // 3: txpool.OnMinedBlockReply
+ (*OnPendingLogsRequest)(nil), // 4: txpool.OnPendingLogsRequest
+ (*OnPendingLogsReply)(nil), // 5: txpool.OnPendingLogsReply
+ (*GetWorkRequest)(nil), // 6: txpool.GetWorkRequest
+ (*GetWorkReply)(nil), // 7: txpool.GetWorkReply
+ (*SubmitWorkRequest)(nil), // 8: txpool.SubmitWorkRequest
+ (*SubmitWorkReply)(nil), // 9: txpool.SubmitWorkReply
+ (*SubmitHashRateRequest)(nil), // 10: txpool.SubmitHashRateRequest
+ (*SubmitHashRateReply)(nil), // 11: txpool.SubmitHashRateReply
+ (*HashRateRequest)(nil), // 12: txpool.HashRateRequest
+ (*HashRateReply)(nil), // 13: txpool.HashRateReply
+ (*MiningRequest)(nil), // 14: txpool.MiningRequest
+ (*MiningReply)(nil), // 15: txpool.MiningReply
+ (*emptypb.Empty)(nil), // 16: google.protobuf.Empty
+ (*types.VersionReply)(nil), // 17: types.VersionReply
+}
+var file_txpool_mining_proto_depIdxs = []int32{
+ 16, // 0: txpool.Mining.Version:input_type -> google.protobuf.Empty
+ 0, // 1: txpool.Mining.OnPendingBlock:input_type -> txpool.OnPendingBlockRequest
+ 2, // 2: txpool.Mining.OnMinedBlock:input_type -> txpool.OnMinedBlockRequest
+ 4, // 3: txpool.Mining.OnPendingLogs:input_type -> txpool.OnPendingLogsRequest
+ 6, // 4: txpool.Mining.GetWork:input_type -> txpool.GetWorkRequest
+ 8, // 5: txpool.Mining.SubmitWork:input_type -> txpool.SubmitWorkRequest
+ 10, // 6: txpool.Mining.SubmitHashRate:input_type -> txpool.SubmitHashRateRequest
+ 12, // 7: txpool.Mining.HashRate:input_type -> txpool.HashRateRequest
+ 14, // 8: txpool.Mining.Mining:input_type -> txpool.MiningRequest
+ 17, // 9: txpool.Mining.Version:output_type -> types.VersionReply
+ 1, // 10: txpool.Mining.OnPendingBlock:output_type -> txpool.OnPendingBlockReply
+ 3, // 11: txpool.Mining.OnMinedBlock:output_type -> txpool.OnMinedBlockReply
+ 5, // 12: txpool.Mining.OnPendingLogs:output_type -> txpool.OnPendingLogsReply
+ 7, // 13: txpool.Mining.GetWork:output_type -> txpool.GetWorkReply
+ 9, // 14: txpool.Mining.SubmitWork:output_type -> txpool.SubmitWorkReply
+ 11, // 15: txpool.Mining.SubmitHashRate:output_type -> txpool.SubmitHashRateReply
+ 13, // 16: txpool.Mining.HashRate:output_type -> txpool.HashRateReply
+ 15, // 17: txpool.Mining.Mining:output_type -> txpool.MiningReply
+ 9, // [9:18] is the sub-list for method output_type
+ 0, // [0:9] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_txpool_mining_proto_init() }
+func file_txpool_mining_proto_init() {
+ if File_txpool_mining_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_txpool_mining_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*OnPendingBlockRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_mining_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*OnPendingBlockReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_mining_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*OnMinedBlockRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_mining_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*OnMinedBlockReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_mining_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*OnPendingLogsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_mining_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*OnPendingLogsReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_mining_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetWorkRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_mining_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetWorkReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_mining_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SubmitWorkRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_mining_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SubmitWorkReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_mining_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SubmitHashRateRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_mining_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SubmitHashRateReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_mining_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HashRateRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_mining_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HashRateReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_mining_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MiningRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_mining_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MiningReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_txpool_mining_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 16,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_txpool_mining_proto_goTypes,
+ DependencyIndexes: file_txpool_mining_proto_depIdxs,
+ MessageInfos: file_txpool_mining_proto_msgTypes,
+ }.Build()
+ File_txpool_mining_proto = out.File
+ file_txpool_mining_proto_rawDesc = nil
+ file_txpool_mining_proto_goTypes = nil
+ file_txpool_mining_proto_depIdxs = nil
+}
diff --git a/erigon-lib/gointerfaces/txpool/mining_grpc.pb.go b/erigon-lib/gointerfaces/txpool/mining_grpc.pb.go
new file mode 100644
index 00000000000..c8855bfb6e3
--- /dev/null
+++ b/erigon-lib/gointerfaces/txpool/mining_grpc.pb.go
@@ -0,0 +1,535 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.3.0
+// - protoc v4.24.2
+// source: txpool/mining.proto
+
+package txpool
+
+import (
+ context "context"
+ types "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+const (
+ Mining_Version_FullMethodName = "/txpool.Mining/Version"
+ Mining_OnPendingBlock_FullMethodName = "/txpool.Mining/OnPendingBlock"
+ Mining_OnMinedBlock_FullMethodName = "/txpool.Mining/OnMinedBlock"
+ Mining_OnPendingLogs_FullMethodName = "/txpool.Mining/OnPendingLogs"
+ Mining_GetWork_FullMethodName = "/txpool.Mining/GetWork"
+ Mining_SubmitWork_FullMethodName = "/txpool.Mining/SubmitWork"
+ Mining_SubmitHashRate_FullMethodName = "/txpool.Mining/SubmitHashRate"
+ Mining_HashRate_FullMethodName = "/txpool.Mining/HashRate"
+ Mining_Mining_FullMethodName = "/txpool.Mining/Mining"
+)
+
+// MiningClient is the client API for Mining service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type MiningClient interface {
+ // Version returns the service version number
+ Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.VersionReply, error)
+ // subscribe to pending blocks event
+ OnPendingBlock(ctx context.Context, in *OnPendingBlockRequest, opts ...grpc.CallOption) (Mining_OnPendingBlockClient, error)
+ // subscribe to mined blocks event
+ OnMinedBlock(ctx context.Context, in *OnMinedBlockRequest, opts ...grpc.CallOption) (Mining_OnMinedBlockClient, error)
+ // subscribe to pending blocks event
+ OnPendingLogs(ctx context.Context, in *OnPendingLogsRequest, opts ...grpc.CallOption) (Mining_OnPendingLogsClient, error)
+ // GetWork returns a work package for external miner.
+ //
+ // The work package consists of 3 strings:
+ //
+ // result[0] - 32 bytes hex encoded current block header pow-hash
+ // result[1] - 32 bytes hex encoded seed hash used for DAG
+ // result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
+ // result[3] - hex encoded block number
+ GetWork(ctx context.Context, in *GetWorkRequest, opts ...grpc.CallOption) (*GetWorkReply, error)
+ // SubmitWork can be used by external miner to submit their POW solution.
+ // It returns an indication if the work was accepted.
+ // Note either an invalid solution, a stale work a non-existent work will return false.
+ SubmitWork(ctx context.Context, in *SubmitWorkRequest, opts ...grpc.CallOption) (*SubmitWorkReply, error)
+ // SubmitHashRate can be used for remote miners to submit their hash rate.
+ // This enables the node to report the combined hash rate of all miners
+ // which submit work through this node.
+ //
+ // It accepts the miner hash rate and an identifier which must be unique
+ // between nodes.
+ SubmitHashRate(ctx context.Context, in *SubmitHashRateRequest, opts ...grpc.CallOption) (*SubmitHashRateReply, error)
+ // HashRate returns the current hashrate for local CPU miner and remote miner.
+ HashRate(ctx context.Context, in *HashRateRequest, opts ...grpc.CallOption) (*HashRateReply, error)
+ // Mining returns an indication if this node is currently mining and its mining configuration
+ Mining(ctx context.Context, in *MiningRequest, opts ...grpc.CallOption) (*MiningReply, error)
+}
+
+type miningClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewMiningClient(cc grpc.ClientConnInterface) MiningClient {
+ return &miningClient{cc}
+}
+
+func (c *miningClient) Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.VersionReply, error) {
+ out := new(types.VersionReply)
+ err := c.cc.Invoke(ctx, Mining_Version_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *miningClient) OnPendingBlock(ctx context.Context, in *OnPendingBlockRequest, opts ...grpc.CallOption) (Mining_OnPendingBlockClient, error) {
+ stream, err := c.cc.NewStream(ctx, &Mining_ServiceDesc.Streams[0], Mining_OnPendingBlock_FullMethodName, opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &miningOnPendingBlockClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type Mining_OnPendingBlockClient interface {
+ Recv() (*OnPendingBlockReply, error)
+ grpc.ClientStream
+}
+
+type miningOnPendingBlockClient struct {
+ grpc.ClientStream
+}
+
+func (x *miningOnPendingBlockClient) Recv() (*OnPendingBlockReply, error) {
+ m := new(OnPendingBlockReply)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *miningClient) OnMinedBlock(ctx context.Context, in *OnMinedBlockRequest, opts ...grpc.CallOption) (Mining_OnMinedBlockClient, error) {
+ stream, err := c.cc.NewStream(ctx, &Mining_ServiceDesc.Streams[1], Mining_OnMinedBlock_FullMethodName, opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &miningOnMinedBlockClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type Mining_OnMinedBlockClient interface {
+ Recv() (*OnMinedBlockReply, error)
+ grpc.ClientStream
+}
+
+type miningOnMinedBlockClient struct {
+ grpc.ClientStream
+}
+
+func (x *miningOnMinedBlockClient) Recv() (*OnMinedBlockReply, error) {
+ m := new(OnMinedBlockReply)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *miningClient) OnPendingLogs(ctx context.Context, in *OnPendingLogsRequest, opts ...grpc.CallOption) (Mining_OnPendingLogsClient, error) {
+ stream, err := c.cc.NewStream(ctx, &Mining_ServiceDesc.Streams[2], Mining_OnPendingLogs_FullMethodName, opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &miningOnPendingLogsClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type Mining_OnPendingLogsClient interface {
+ Recv() (*OnPendingLogsReply, error)
+ grpc.ClientStream
+}
+
+type miningOnPendingLogsClient struct {
+ grpc.ClientStream
+}
+
+func (x *miningOnPendingLogsClient) Recv() (*OnPendingLogsReply, error) {
+ m := new(OnPendingLogsReply)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *miningClient) GetWork(ctx context.Context, in *GetWorkRequest, opts ...grpc.CallOption) (*GetWorkReply, error) {
+ out := new(GetWorkReply)
+ err := c.cc.Invoke(ctx, Mining_GetWork_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *miningClient) SubmitWork(ctx context.Context, in *SubmitWorkRequest, opts ...grpc.CallOption) (*SubmitWorkReply, error) {
+ out := new(SubmitWorkReply)
+ err := c.cc.Invoke(ctx, Mining_SubmitWork_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *miningClient) SubmitHashRate(ctx context.Context, in *SubmitHashRateRequest, opts ...grpc.CallOption) (*SubmitHashRateReply, error) {
+ out := new(SubmitHashRateReply)
+ err := c.cc.Invoke(ctx, Mining_SubmitHashRate_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *miningClient) HashRate(ctx context.Context, in *HashRateRequest, opts ...grpc.CallOption) (*HashRateReply, error) {
+ out := new(HashRateReply)
+ err := c.cc.Invoke(ctx, Mining_HashRate_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *miningClient) Mining(ctx context.Context, in *MiningRequest, opts ...grpc.CallOption) (*MiningReply, error) {
+ out := new(MiningReply)
+ err := c.cc.Invoke(ctx, Mining_Mining_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// MiningServer is the server API for Mining service.
+// All implementations must embed UnimplementedMiningServer
+// for forward compatibility
+type MiningServer interface {
+ // Version returns the service version number
+ Version(context.Context, *emptypb.Empty) (*types.VersionReply, error)
+ // subscribe to pending blocks event
+ OnPendingBlock(*OnPendingBlockRequest, Mining_OnPendingBlockServer) error
+ // subscribe to mined blocks event
+ OnMinedBlock(*OnMinedBlockRequest, Mining_OnMinedBlockServer) error
+ // subscribe to pending blocks event
+ OnPendingLogs(*OnPendingLogsRequest, Mining_OnPendingLogsServer) error
+ // GetWork returns a work package for external miner.
+ //
+ // The work package consists of 3 strings:
+ //
+ // result[0] - 32 bytes hex encoded current block header pow-hash
+ // result[1] - 32 bytes hex encoded seed hash used for DAG
+ // result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
+ // result[3] - hex encoded block number
+ GetWork(context.Context, *GetWorkRequest) (*GetWorkReply, error)
+ // SubmitWork can be used by external miner to submit their POW solution.
+ // It returns an indication if the work was accepted.
+ // Note either an invalid solution, a stale work a non-existent work will return false.
+ SubmitWork(context.Context, *SubmitWorkRequest) (*SubmitWorkReply, error)
+ // SubmitHashRate can be used for remote miners to submit their hash rate.
+ // This enables the node to report the combined hash rate of all miners
+ // which submit work through this node.
+ //
+ // It accepts the miner hash rate and an identifier which must be unique
+ // between nodes.
+ SubmitHashRate(context.Context, *SubmitHashRateRequest) (*SubmitHashRateReply, error)
+ // HashRate returns the current hashrate for local CPU miner and remote miner.
+ HashRate(context.Context, *HashRateRequest) (*HashRateReply, error)
+ // Mining returns an indication if this node is currently mining and its mining configuration
+ Mining(context.Context, *MiningRequest) (*MiningReply, error)
+ mustEmbedUnimplementedMiningServer()
+}
+
+// UnimplementedMiningServer must be embedded to have forward compatible implementations.
+type UnimplementedMiningServer struct {
+}
+
+func (UnimplementedMiningServer) Version(context.Context, *emptypb.Empty) (*types.VersionReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Version not implemented")
+}
+func (UnimplementedMiningServer) OnPendingBlock(*OnPendingBlockRequest, Mining_OnPendingBlockServer) error {
+ return status.Errorf(codes.Unimplemented, "method OnPendingBlock not implemented")
+}
+func (UnimplementedMiningServer) OnMinedBlock(*OnMinedBlockRequest, Mining_OnMinedBlockServer) error {
+ return status.Errorf(codes.Unimplemented, "method OnMinedBlock not implemented")
+}
+func (UnimplementedMiningServer) OnPendingLogs(*OnPendingLogsRequest, Mining_OnPendingLogsServer) error {
+ return status.Errorf(codes.Unimplemented, "method OnPendingLogs not implemented")
+}
+func (UnimplementedMiningServer) GetWork(context.Context, *GetWorkRequest) (*GetWorkReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetWork not implemented")
+}
+func (UnimplementedMiningServer) SubmitWork(context.Context, *SubmitWorkRequest) (*SubmitWorkReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method SubmitWork not implemented")
+}
+func (UnimplementedMiningServer) SubmitHashRate(context.Context, *SubmitHashRateRequest) (*SubmitHashRateReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method SubmitHashRate not implemented")
+}
+func (UnimplementedMiningServer) HashRate(context.Context, *HashRateRequest) (*HashRateReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method HashRate not implemented")
+}
+func (UnimplementedMiningServer) Mining(context.Context, *MiningRequest) (*MiningReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Mining not implemented")
+}
+func (UnimplementedMiningServer) mustEmbedUnimplementedMiningServer() {}
+
+// UnsafeMiningServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to MiningServer will
+// result in compilation errors.
+type UnsafeMiningServer interface {
+ mustEmbedUnimplementedMiningServer()
+}
+
+func RegisterMiningServer(s grpc.ServiceRegistrar, srv MiningServer) {
+ s.RegisterService(&Mining_ServiceDesc, srv)
+}
+
+func _Mining_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(emptypb.Empty)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MiningServer).Version(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Mining_Version_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MiningServer).Version(ctx, req.(*emptypb.Empty))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Mining_OnPendingBlock_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(OnPendingBlockRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(MiningServer).OnPendingBlock(m, &miningOnPendingBlockServer{stream})
+}
+
+type Mining_OnPendingBlockServer interface {
+ Send(*OnPendingBlockReply) error
+ grpc.ServerStream
+}
+
+type miningOnPendingBlockServer struct {
+ grpc.ServerStream
+}
+
+func (x *miningOnPendingBlockServer) Send(m *OnPendingBlockReply) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _Mining_OnMinedBlock_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(OnMinedBlockRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(MiningServer).OnMinedBlock(m, &miningOnMinedBlockServer{stream})
+}
+
+type Mining_OnMinedBlockServer interface {
+ Send(*OnMinedBlockReply) error
+ grpc.ServerStream
+}
+
+type miningOnMinedBlockServer struct {
+ grpc.ServerStream
+}
+
+func (x *miningOnMinedBlockServer) Send(m *OnMinedBlockReply) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _Mining_OnPendingLogs_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(OnPendingLogsRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(MiningServer).OnPendingLogs(m, &miningOnPendingLogsServer{stream})
+}
+
+type Mining_OnPendingLogsServer interface {
+ Send(*OnPendingLogsReply) error
+ grpc.ServerStream
+}
+
+type miningOnPendingLogsServer struct {
+ grpc.ServerStream
+}
+
+func (x *miningOnPendingLogsServer) Send(m *OnPendingLogsReply) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _Mining_GetWork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetWorkRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MiningServer).GetWork(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Mining_GetWork_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MiningServer).GetWork(ctx, req.(*GetWorkRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Mining_SubmitWork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SubmitWorkRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MiningServer).SubmitWork(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Mining_SubmitWork_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MiningServer).SubmitWork(ctx, req.(*SubmitWorkRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Mining_SubmitHashRate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SubmitHashRateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MiningServer).SubmitHashRate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Mining_SubmitHashRate_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MiningServer).SubmitHashRate(ctx, req.(*SubmitHashRateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Mining_HashRate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(HashRateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MiningServer).HashRate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Mining_HashRate_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MiningServer).HashRate(ctx, req.(*HashRateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Mining_Mining_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MiningRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MiningServer).Mining(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Mining_Mining_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MiningServer).Mining(ctx, req.(*MiningRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// Mining_ServiceDesc is the grpc.ServiceDesc for Mining service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var Mining_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "txpool.Mining",
+ HandlerType: (*MiningServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Version",
+ Handler: _Mining_Version_Handler,
+ },
+ {
+ MethodName: "GetWork",
+ Handler: _Mining_GetWork_Handler,
+ },
+ {
+ MethodName: "SubmitWork",
+ Handler: _Mining_SubmitWork_Handler,
+ },
+ {
+ MethodName: "SubmitHashRate",
+ Handler: _Mining_SubmitHashRate_Handler,
+ },
+ {
+ MethodName: "HashRate",
+ Handler: _Mining_HashRate_Handler,
+ },
+ {
+ MethodName: "Mining",
+ Handler: _Mining_Mining_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "OnPendingBlock",
+ Handler: _Mining_OnPendingBlock_Handler,
+ ServerStreams: true,
+ },
+ {
+ StreamName: "OnMinedBlock",
+ Handler: _Mining_OnMinedBlock_Handler,
+ ServerStreams: true,
+ },
+ {
+ StreamName: "OnPendingLogs",
+ Handler: _Mining_OnPendingLogs_Handler,
+ ServerStreams: true,
+ },
+ },
+ Metadata: "txpool/mining.proto",
+}
diff --git a/erigon-lib/gointerfaces/txpool/txpool.pb.go b/erigon-lib/gointerfaces/txpool/txpool.pb.go
new file mode 100644
index 00000000000..52b9b02def1
--- /dev/null
+++ b/erigon-lib/gointerfaces/txpool/txpool.pb.go
@@ -0,0 +1,1323 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.31.0
+// protoc v4.24.2
+// source: txpool/txpool.proto
+
+package txpool
+
+import (
+ types "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ImportResult int32
+
+const (
+ ImportResult_SUCCESS ImportResult = 0
+ ImportResult_ALREADY_EXISTS ImportResult = 1
+ ImportResult_FEE_TOO_LOW ImportResult = 2
+ ImportResult_STALE ImportResult = 3
+ ImportResult_INVALID ImportResult = 4
+ ImportResult_INTERNAL_ERROR ImportResult = 5
+)
+
+// Enum value maps for ImportResult.
+var (
+ ImportResult_name = map[int32]string{
+ 0: "SUCCESS",
+ 1: "ALREADY_EXISTS",
+ 2: "FEE_TOO_LOW",
+ 3: "STALE",
+ 4: "INVALID",
+ 5: "INTERNAL_ERROR",
+ }
+ ImportResult_value = map[string]int32{
+ "SUCCESS": 0,
+ "ALREADY_EXISTS": 1,
+ "FEE_TOO_LOW": 2,
+ "STALE": 3,
+ "INVALID": 4,
+ "INTERNAL_ERROR": 5,
+ }
+)
+
+func (x ImportResult) Enum() *ImportResult {
+ p := new(ImportResult)
+ *p = x
+ return p
+}
+
+func (x ImportResult) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ImportResult) Descriptor() protoreflect.EnumDescriptor {
+ return file_txpool_txpool_proto_enumTypes[0].Descriptor()
+}
+
+func (ImportResult) Type() protoreflect.EnumType {
+ return &file_txpool_txpool_proto_enumTypes[0]
+}
+
+func (x ImportResult) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ImportResult.Descriptor instead.
+func (ImportResult) EnumDescriptor() ([]byte, []int) {
+ return file_txpool_txpool_proto_rawDescGZIP(), []int{0}
+}
+
+type AllReply_TxnType int32
+
+const (
+ AllReply_PENDING AllReply_TxnType = 0 // All currently processable transactions
+ AllReply_QUEUED AllReply_TxnType = 1 // Queued but non-processable transactions
+ AllReply_BASE_FEE AllReply_TxnType = 2 // BaseFee not enough baseFee non-processable transactions
+)
+
+// Enum value maps for AllReply_TxnType.
+var (
+ AllReply_TxnType_name = map[int32]string{
+ 0: "PENDING",
+ 1: "QUEUED",
+ 2: "BASE_FEE",
+ }
+ AllReply_TxnType_value = map[string]int32{
+ "PENDING": 0,
+ "QUEUED": 1,
+ "BASE_FEE": 2,
+ }
+)
+
+func (x AllReply_TxnType) Enum() *AllReply_TxnType {
+ p := new(AllReply_TxnType)
+ *p = x
+ return p
+}
+
+func (x AllReply_TxnType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (AllReply_TxnType) Descriptor() protoreflect.EnumDescriptor {
+ return file_txpool_txpool_proto_enumTypes[1].Descriptor()
+}
+
+func (AllReply_TxnType) Type() protoreflect.EnumType {
+ return &file_txpool_txpool_proto_enumTypes[1]
+}
+
+func (x AllReply_TxnType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use AllReply_TxnType.Descriptor instead.
+func (AllReply_TxnType) EnumDescriptor() ([]byte, []int) {
+ return file_txpool_txpool_proto_rawDescGZIP(), []int{8, 0}
+}
+
+type TxHashes struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Hashes []*types.H256 `protobuf:"bytes,1,rep,name=hashes,proto3" json:"hashes,omitempty"`
+}
+
+func (x *TxHashes) Reset() {
+ *x = TxHashes{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_txpool_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TxHashes) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TxHashes) ProtoMessage() {}
+
+func (x *TxHashes) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_txpool_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TxHashes.ProtoReflect.Descriptor instead.
+func (*TxHashes) Descriptor() ([]byte, []int) {
+ return file_txpool_txpool_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *TxHashes) GetHashes() []*types.H256 {
+ if x != nil {
+ return x.Hashes
+ }
+ return nil
+}
+
+type AddRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RlpTxs [][]byte `protobuf:"bytes,1,rep,name=rlp_txs,json=rlpTxs,proto3" json:"rlp_txs,omitempty"`
+}
+
+func (x *AddRequest) Reset() {
+ *x = AddRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_txpool_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddRequest) ProtoMessage() {}
+
+func (x *AddRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_txpool_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddRequest.ProtoReflect.Descriptor instead.
+func (*AddRequest) Descriptor() ([]byte, []int) {
+ return file_txpool_txpool_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *AddRequest) GetRlpTxs() [][]byte {
+ if x != nil {
+ return x.RlpTxs
+ }
+ return nil
+}
+
+type AddReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Imported []ImportResult `protobuf:"varint,1,rep,packed,name=imported,proto3,enum=txpool.ImportResult" json:"imported,omitempty"`
+ Errors []string `protobuf:"bytes,2,rep,name=errors,proto3" json:"errors,omitempty"`
+}
+
+func (x *AddReply) Reset() {
+ *x = AddReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_txpool_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddReply) ProtoMessage() {}
+
+func (x *AddReply) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_txpool_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddReply.ProtoReflect.Descriptor instead.
+func (*AddReply) Descriptor() ([]byte, []int) {
+ return file_txpool_txpool_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *AddReply) GetImported() []ImportResult {
+ if x != nil {
+ return x.Imported
+ }
+ return nil
+}
+
+func (x *AddReply) GetErrors() []string {
+ if x != nil {
+ return x.Errors
+ }
+ return nil
+}
+
+type TransactionsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Hashes []*types.H256 `protobuf:"bytes,1,rep,name=hashes,proto3" json:"hashes,omitempty"`
+}
+
+func (x *TransactionsRequest) Reset() {
+ *x = TransactionsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_txpool_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TransactionsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TransactionsRequest) ProtoMessage() {}
+
+func (x *TransactionsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_txpool_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TransactionsRequest.ProtoReflect.Descriptor instead.
+func (*TransactionsRequest) Descriptor() ([]byte, []int) {
+ return file_txpool_txpool_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *TransactionsRequest) GetHashes() []*types.H256 {
+ if x != nil {
+ return x.Hashes
+ }
+ return nil
+}
+
+type TransactionsReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RlpTxs [][]byte `protobuf:"bytes,1,rep,name=rlp_txs,json=rlpTxs,proto3" json:"rlp_txs,omitempty"`
+}
+
+func (x *TransactionsReply) Reset() {
+ *x = TransactionsReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_txpool_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TransactionsReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TransactionsReply) ProtoMessage() {}
+
+func (x *TransactionsReply) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_txpool_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TransactionsReply.ProtoReflect.Descriptor instead.
+func (*TransactionsReply) Descriptor() ([]byte, []int) {
+ return file_txpool_txpool_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *TransactionsReply) GetRlpTxs() [][]byte {
+ if x != nil {
+ return x.RlpTxs
+ }
+ return nil
+}
+
+type OnAddRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *OnAddRequest) Reset() {
+ *x = OnAddRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_txpool_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *OnAddRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*OnAddRequest) ProtoMessage() {}
+
+func (x *OnAddRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_txpool_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use OnAddRequest.ProtoReflect.Descriptor instead.
+func (*OnAddRequest) Descriptor() ([]byte, []int) {
+ return file_txpool_txpool_proto_rawDescGZIP(), []int{5}
+}
+
+type OnAddReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RplTxs [][]byte `protobuf:"bytes,1,rep,name=rpl_txs,json=rplTxs,proto3" json:"rpl_txs,omitempty"`
+}
+
+func (x *OnAddReply) Reset() {
+ *x = OnAddReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_txpool_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *OnAddReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*OnAddReply) ProtoMessage() {}
+
+func (x *OnAddReply) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_txpool_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use OnAddReply.ProtoReflect.Descriptor instead.
+func (*OnAddReply) Descriptor() ([]byte, []int) {
+ return file_txpool_txpool_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *OnAddReply) GetRplTxs() [][]byte {
+ if x != nil {
+ return x.RplTxs
+ }
+ return nil
+}
+
+type AllRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *AllRequest) Reset() {
+ *x = AllRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_txpool_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AllRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AllRequest) ProtoMessage() {}
+
+func (x *AllRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_txpool_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AllRequest.ProtoReflect.Descriptor instead.
+func (*AllRequest) Descriptor() ([]byte, []int) {
+ return file_txpool_txpool_proto_rawDescGZIP(), []int{7}
+}
+
+type AllReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Txs []*AllReply_Tx `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"`
+}
+
+func (x *AllReply) Reset() {
+ *x = AllReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_txpool_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AllReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AllReply) ProtoMessage() {}
+
+func (x *AllReply) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_txpool_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AllReply.ProtoReflect.Descriptor instead.
+func (*AllReply) Descriptor() ([]byte, []int) {
+ return file_txpool_txpool_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *AllReply) GetTxs() []*AllReply_Tx {
+ if x != nil {
+ return x.Txs
+ }
+ return nil
+}
+
+type PendingReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Txs []*PendingReply_Tx `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"`
+}
+
+func (x *PendingReply) Reset() {
+ *x = PendingReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_txpool_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PendingReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PendingReply) ProtoMessage() {}
+
+func (x *PendingReply) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_txpool_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PendingReply.ProtoReflect.Descriptor instead.
+func (*PendingReply) Descriptor() ([]byte, []int) {
+ return file_txpool_txpool_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *PendingReply) GetTxs() []*PendingReply_Tx {
+ if x != nil {
+ return x.Txs
+ }
+ return nil
+}
+
+type StatusRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *StatusRequest) Reset() {
+ *x = StatusRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_txpool_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StatusRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusRequest) ProtoMessage() {}
+
+func (x *StatusRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_txpool_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusRequest.ProtoReflect.Descriptor instead.
+func (*StatusRequest) Descriptor() ([]byte, []int) {
+ return file_txpool_txpool_proto_rawDescGZIP(), []int{10}
+}
+
+type StatusReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ PendingCount uint32 `protobuf:"varint,1,opt,name=pending_count,json=pendingCount,proto3" json:"pending_count,omitempty"`
+ QueuedCount uint32 `protobuf:"varint,2,opt,name=queued_count,json=queuedCount,proto3" json:"queued_count,omitempty"`
+ BaseFeeCount uint32 `protobuf:"varint,3,opt,name=base_fee_count,json=baseFeeCount,proto3" json:"base_fee_count,omitempty"`
+}
+
+func (x *StatusReply) Reset() {
+ *x = StatusReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_txpool_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StatusReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusReply) ProtoMessage() {}
+
+func (x *StatusReply) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_txpool_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusReply.ProtoReflect.Descriptor instead.
+func (*StatusReply) Descriptor() ([]byte, []int) {
+ return file_txpool_txpool_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *StatusReply) GetPendingCount() uint32 {
+ if x != nil {
+ return x.PendingCount
+ }
+ return 0
+}
+
+func (x *StatusReply) GetQueuedCount() uint32 {
+ if x != nil {
+ return x.QueuedCount
+ }
+ return 0
+}
+
+func (x *StatusReply) GetBaseFeeCount() uint32 {
+ if x != nil {
+ return x.BaseFeeCount
+ }
+ return 0
+}
+
+type NonceRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Address *types.H160 `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
+}
+
+func (x *NonceRequest) Reset() {
+ *x = NonceRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_txpool_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NonceRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NonceRequest) ProtoMessage() {}
+
+func (x *NonceRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_txpool_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NonceRequest.ProtoReflect.Descriptor instead.
+func (*NonceRequest) Descriptor() ([]byte, []int) {
+ return file_txpool_txpool_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *NonceRequest) GetAddress() *types.H160 {
+ if x != nil {
+ return x.Address
+ }
+ return nil
+}
+
+type NonceReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Found bool `protobuf:"varint,1,opt,name=found,proto3" json:"found,omitempty"`
+ Nonce uint64 `protobuf:"varint,2,opt,name=nonce,proto3" json:"nonce,omitempty"`
+}
+
+func (x *NonceReply) Reset() {
+ *x = NonceReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_txpool_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NonceReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NonceReply) ProtoMessage() {}
+
+func (x *NonceReply) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_txpool_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NonceReply.ProtoReflect.Descriptor instead.
+func (*NonceReply) Descriptor() ([]byte, []int) {
+ return file_txpool_txpool_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *NonceReply) GetFound() bool {
+ if x != nil {
+ return x.Found
+ }
+ return false
+}
+
+func (x *NonceReply) GetNonce() uint64 {
+ if x != nil {
+ return x.Nonce
+ }
+ return 0
+}
+
+type AllReply_Tx struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ TxnType AllReply_TxnType `protobuf:"varint,1,opt,name=txn_type,json=txnType,proto3,enum=txpool.AllReply_TxnType" json:"txn_type,omitempty"`
+ Sender *types.H160 `protobuf:"bytes,2,opt,name=sender,proto3" json:"sender,omitempty"`
+ RlpTx []byte `protobuf:"bytes,3,opt,name=rlp_tx,json=rlpTx,proto3" json:"rlp_tx,omitempty"`
+}
+
+func (x *AllReply_Tx) Reset() {
+ *x = AllReply_Tx{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_txpool_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AllReply_Tx) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AllReply_Tx) ProtoMessage() {}
+
+func (x *AllReply_Tx) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_txpool_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AllReply_Tx.ProtoReflect.Descriptor instead.
+func (*AllReply_Tx) Descriptor() ([]byte, []int) {
+ return file_txpool_txpool_proto_rawDescGZIP(), []int{8, 0}
+}
+
+func (x *AllReply_Tx) GetTxnType() AllReply_TxnType {
+ if x != nil {
+ return x.TxnType
+ }
+ return AllReply_PENDING
+}
+
+func (x *AllReply_Tx) GetSender() *types.H160 {
+ if x != nil {
+ return x.Sender
+ }
+ return nil
+}
+
+func (x *AllReply_Tx) GetRlpTx() []byte {
+ if x != nil {
+ return x.RlpTx
+ }
+ return nil
+}
+
+type PendingReply_Tx struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Sender *types.H160 `protobuf:"bytes,1,opt,name=sender,proto3" json:"sender,omitempty"`
+ RlpTx []byte `protobuf:"bytes,2,opt,name=rlp_tx,json=rlpTx,proto3" json:"rlp_tx,omitempty"`
+ IsLocal bool `protobuf:"varint,3,opt,name=is_local,json=isLocal,proto3" json:"is_local,omitempty"`
+}
+
+func (x *PendingReply_Tx) Reset() {
+ *x = PendingReply_Tx{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_txpool_txpool_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PendingReply_Tx) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PendingReply_Tx) ProtoMessage() {}
+
+func (x *PendingReply_Tx) ProtoReflect() protoreflect.Message {
+ mi := &file_txpool_txpool_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PendingReply_Tx.ProtoReflect.Descriptor instead.
+func (*PendingReply_Tx) Descriptor() ([]byte, []int) {
+ return file_txpool_txpool_proto_rawDescGZIP(), []int{9, 0}
+}
+
+func (x *PendingReply_Tx) GetSender() *types.H160 {
+ if x != nil {
+ return x.Sender
+ }
+ return nil
+}
+
+func (x *PendingReply_Tx) GetRlpTx() []byte {
+ if x != nil {
+ return x.RlpTx
+ }
+ return nil
+}
+
+func (x *PendingReply_Tx) GetIsLocal() bool {
+ if x != nil {
+ return x.IsLocal
+ }
+ return false
+}
+
+var File_txpool_txpool_proto protoreflect.FileDescriptor
+
+var file_txpool_txpool_proto_rawDesc = []byte{
+ 0x0a, 0x13, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2f, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x1a, 0x1b, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65,
+ 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x11, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x2f, 0x0a,
+ 0x08, 0x54, 0x78, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x06, 0x68, 0x61, 0x73,
+ 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x25,
+ 0x0a, 0x0a, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07,
+ 0x72, 0x6c, 0x70, 0x5f, 0x74, 0x78, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x72,
+ 0x6c, 0x70, 0x54, 0x78, 0x73, 0x22, 0x54, 0x0a, 0x08, 0x41, 0x64, 0x64, 0x52, 0x65, 0x70, 0x6c,
+ 0x79, 0x12, 0x30, 0x0a, 0x08, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x49, 0x6d, 0x70,
+ 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x69, 0x6d, 0x70, 0x6f, 0x72,
+ 0x74, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x3a, 0x0a, 0x13, 0x54,
+ 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x23, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52,
+ 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x2c, 0x0a, 0x11, 0x54, 0x72, 0x61, 0x6e, 0x73,
+ 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x17, 0x0a, 0x07,
+ 0x72, 0x6c, 0x70, 0x5f, 0x74, 0x78, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x72,
+ 0x6c, 0x70, 0x54, 0x78, 0x73, 0x22, 0x0e, 0x0a, 0x0c, 0x4f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x25, 0x0a, 0x0a, 0x4f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65,
+ 0x70, 0x6c, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x72, 0x70, 0x6c, 0x5f, 0x74, 0x78, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x70, 0x6c, 0x54, 0x78, 0x73, 0x22, 0x0c, 0x0a, 0x0a,
+ 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xda, 0x01, 0x0a, 0x08, 0x41,
+ 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x25, 0x0a, 0x03, 0x74, 0x78, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x41, 0x6c,
+ 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x54, 0x78, 0x52, 0x03, 0x74, 0x78, 0x73, 0x1a, 0x75,
+ 0x0a, 0x02, 0x54, 0x78, 0x12, 0x33, 0x0a, 0x08, 0x74, 0x78, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e,
+ 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x54, 0x78, 0x6e, 0x54, 0x79, 0x70, 0x65,
+ 0x52, 0x07, 0x74, 0x78, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x06, 0x73, 0x65, 0x6e,
+ 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2e, 0x48, 0x31, 0x36, 0x30, 0x52, 0x06, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x15,
+ 0x0a, 0x06, 0x72, 0x6c, 0x70, 0x5f, 0x74, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05,
+ 0x72, 0x6c, 0x70, 0x54, 0x78, 0x22, 0x30, 0x0a, 0x07, 0x54, 0x78, 0x6e, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0a, 0x0a,
+ 0x06, 0x51, 0x55, 0x45, 0x55, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x42, 0x41, 0x53,
+ 0x45, 0x5f, 0x46, 0x45, 0x45, 0x10, 0x02, 0x22, 0x96, 0x01, 0x0a, 0x0c, 0x50, 0x65, 0x6e, 0x64,
+ 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x29, 0x0a, 0x03, 0x74, 0x78, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x50,
+ 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x54, 0x78, 0x52, 0x03,
+ 0x74, 0x78, 0x73, 0x1a, 0x5b, 0x0a, 0x02, 0x54, 0x78, 0x12, 0x23, 0x0a, 0x06, 0x73, 0x65, 0x6e,
+ 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2e, 0x48, 0x31, 0x36, 0x30, 0x52, 0x06, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x15,
+ 0x0a, 0x06, 0x72, 0x6c, 0x70, 0x5f, 0x74, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05,
+ 0x72, 0x6c, 0x70, 0x54, 0x78, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x61,
+ 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x6c,
+ 0x22, 0x0f, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x22, 0x7b, 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79,
+ 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
+ 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x5f,
+ 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x71, 0x75, 0x65,
+ 0x75, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, 0x65,
+ 0x5f, 0x66, 0x65, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d,
+ 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x46, 0x65, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x35,
+ 0x0a, 0x0c, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25,
+ 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, 0x36, 0x30, 0x52, 0x07, 0x61, 0x64,
+ 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x38, 0x0a, 0x0a, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x52, 0x65,
+ 0x70, 0x6c, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e,
+ 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x2a,
+ 0x6c, 0x0a, 0x0c, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12,
+ 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e,
+ 0x41, 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x01,
+ 0x12, 0x0f, 0x0a, 0x0b, 0x46, 0x45, 0x45, 0x5f, 0x54, 0x4f, 0x4f, 0x5f, 0x4c, 0x4f, 0x57, 0x10,
+ 0x02, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07,
+ 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x04, 0x12, 0x12, 0x0a, 0x0e, 0x49, 0x4e, 0x54,
+ 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x05, 0x32, 0xec, 0x03,
+ 0x0a, 0x06, 0x54, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x12, 0x36, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79,
+ 0x12, 0x31, 0x0a, 0x0b, 0x46, 0x69, 0x6e, 0x64, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x12,
+ 0x10, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x54, 0x78, 0x48, 0x61, 0x73, 0x68, 0x65,
+ 0x73, 0x1a, 0x10, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x54, 0x78, 0x48, 0x61, 0x73,
+ 0x68, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x03, 0x41, 0x64, 0x64, 0x12, 0x12, 0x2e, 0x74, 0x78, 0x70,
+ 0x6f, 0x6f, 0x6c, 0x2e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10,
+ 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x79,
+ 0x12, 0x46, 0x0a, 0x0c, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x12, 0x1b, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e,
+ 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2b, 0x0a, 0x03, 0x41, 0x6c, 0x6c, 0x12,
+ 0x12, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x41, 0x6c, 0x6c,
+ 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67,
+ 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x14, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f,
+ 0x6c, 0x2e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x33,
+ 0x0a, 0x05, 0x4f, 0x6e, 0x41, 0x64, 0x64, 0x12, 0x14, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c,
+ 0x2e, 0x4f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e,
+ 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x70, 0x6c,
+ 0x79, 0x30, 0x01, 0x12, 0x34, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x15, 0x2e,
+ 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x31, 0x0a, 0x05, 0x4e, 0x6f, 0x6e,
+ 0x63, 0x65, 0x12, 0x14, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x2e, 0x4e, 0x6f, 0x6e, 0x63,
+ 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x74, 0x78, 0x70, 0x6f, 0x6f,
+ 0x6c, 0x2e, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x42, 0x11, 0x5a, 0x0f,
+ 0x2e, 0x2f, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x3b, 0x74, 0x78, 0x70, 0x6f, 0x6f, 0x6c, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_txpool_txpool_proto_rawDescOnce sync.Once
+ file_txpool_txpool_proto_rawDescData = file_txpool_txpool_proto_rawDesc
+)
+
+func file_txpool_txpool_proto_rawDescGZIP() []byte {
+ file_txpool_txpool_proto_rawDescOnce.Do(func() {
+ file_txpool_txpool_proto_rawDescData = protoimpl.X.CompressGZIP(file_txpool_txpool_proto_rawDescData)
+ })
+ return file_txpool_txpool_proto_rawDescData
+}
+
+var file_txpool_txpool_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_txpool_txpool_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
+var file_txpool_txpool_proto_goTypes = []interface{}{
+ (ImportResult)(0), // 0: txpool.ImportResult
+ (AllReply_TxnType)(0), // 1: txpool.AllReply.TxnType
+ (*TxHashes)(nil), // 2: txpool.TxHashes
+ (*AddRequest)(nil), // 3: txpool.AddRequest
+ (*AddReply)(nil), // 4: txpool.AddReply
+ (*TransactionsRequest)(nil), // 5: txpool.TransactionsRequest
+ (*TransactionsReply)(nil), // 6: txpool.TransactionsReply
+ (*OnAddRequest)(nil), // 7: txpool.OnAddRequest
+ (*OnAddReply)(nil), // 8: txpool.OnAddReply
+ (*AllRequest)(nil), // 9: txpool.AllRequest
+ (*AllReply)(nil), // 10: txpool.AllReply
+ (*PendingReply)(nil), // 11: txpool.PendingReply
+ (*StatusRequest)(nil), // 12: txpool.StatusRequest
+ (*StatusReply)(nil), // 13: txpool.StatusReply
+ (*NonceRequest)(nil), // 14: txpool.NonceRequest
+ (*NonceReply)(nil), // 15: txpool.NonceReply
+ (*AllReply_Tx)(nil), // 16: txpool.AllReply.Tx
+ (*PendingReply_Tx)(nil), // 17: txpool.PendingReply.Tx
+ (*types.H256)(nil), // 18: types.H256
+ (*types.H160)(nil), // 19: types.H160
+ (*emptypb.Empty)(nil), // 20: google.protobuf.Empty
+ (*types.VersionReply)(nil), // 21: types.VersionReply
+}
+var file_txpool_txpool_proto_depIdxs = []int32{
+ 18, // 0: txpool.TxHashes.hashes:type_name -> types.H256
+ 0, // 1: txpool.AddReply.imported:type_name -> txpool.ImportResult
+ 18, // 2: txpool.TransactionsRequest.hashes:type_name -> types.H256
+ 16, // 3: txpool.AllReply.txs:type_name -> txpool.AllReply.Tx
+ 17, // 4: txpool.PendingReply.txs:type_name -> txpool.PendingReply.Tx
+ 19, // 5: txpool.NonceRequest.address:type_name -> types.H160
+ 1, // 6: txpool.AllReply.Tx.txn_type:type_name -> txpool.AllReply.TxnType
+ 19, // 7: txpool.AllReply.Tx.sender:type_name -> types.H160
+ 19, // 8: txpool.PendingReply.Tx.sender:type_name -> types.H160
+ 20, // 9: txpool.Txpool.Version:input_type -> google.protobuf.Empty
+ 2, // 10: txpool.Txpool.FindUnknown:input_type -> txpool.TxHashes
+ 3, // 11: txpool.Txpool.Add:input_type -> txpool.AddRequest
+ 5, // 12: txpool.Txpool.Transactions:input_type -> txpool.TransactionsRequest
+ 9, // 13: txpool.Txpool.All:input_type -> txpool.AllRequest
+ 20, // 14: txpool.Txpool.Pending:input_type -> google.protobuf.Empty
+ 7, // 15: txpool.Txpool.OnAdd:input_type -> txpool.OnAddRequest
+ 12, // 16: txpool.Txpool.Status:input_type -> txpool.StatusRequest
+ 14, // 17: txpool.Txpool.Nonce:input_type -> txpool.NonceRequest
+ 21, // 18: txpool.Txpool.Version:output_type -> types.VersionReply
+ 2, // 19: txpool.Txpool.FindUnknown:output_type -> txpool.TxHashes
+ 4, // 20: txpool.Txpool.Add:output_type -> txpool.AddReply
+ 6, // 21: txpool.Txpool.Transactions:output_type -> txpool.TransactionsReply
+ 10, // 22: txpool.Txpool.All:output_type -> txpool.AllReply
+ 11, // 23: txpool.Txpool.Pending:output_type -> txpool.PendingReply
+ 8, // 24: txpool.Txpool.OnAdd:output_type -> txpool.OnAddReply
+ 13, // 25: txpool.Txpool.Status:output_type -> txpool.StatusReply
+ 15, // 26: txpool.Txpool.Nonce:output_type -> txpool.NonceReply
+ 18, // [18:27] is the sub-list for method output_type
+ 9, // [9:18] is the sub-list for method input_type
+ 9, // [9:9] is the sub-list for extension type_name
+ 9, // [9:9] is the sub-list for extension extendee
+ 0, // [0:9] is the sub-list for field type_name
+}
+
+func init() { file_txpool_txpool_proto_init() }
+func file_txpool_txpool_proto_init() {
+ if File_txpool_txpool_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_txpool_txpool_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TxHashes); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_txpool_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_txpool_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_txpool_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TransactionsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_txpool_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TransactionsReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_txpool_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*OnAddRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_txpool_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*OnAddReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_txpool_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AllRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_txpool_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AllReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_txpool_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PendingReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_txpool_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StatusRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_txpool_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StatusReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_txpool_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NonceRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_txpool_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NonceReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_txpool_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AllReply_Tx); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_txpool_txpool_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PendingReply_Tx); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_txpool_txpool_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 16,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_txpool_txpool_proto_goTypes,
+ DependencyIndexes: file_txpool_txpool_proto_depIdxs,
+ EnumInfos: file_txpool_txpool_proto_enumTypes,
+ MessageInfos: file_txpool_txpool_proto_msgTypes,
+ }.Build()
+ File_txpool_txpool_proto = out.File
+ file_txpool_txpool_proto_rawDesc = nil
+ file_txpool_txpool_proto_goTypes = nil
+ file_txpool_txpool_proto_depIdxs = nil
+}
diff --git a/erigon-lib/gointerfaces/txpool/txpool_grpc.pb.go b/erigon-lib/gointerfaces/txpool/txpool_grpc.pb.go
new file mode 100644
index 00000000000..d8c6da0d0a6
--- /dev/null
+++ b/erigon-lib/gointerfaces/txpool/txpool_grpc.pb.go
@@ -0,0 +1,455 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.3.0
+// - protoc v4.24.2
+// source: txpool/txpool.proto
+
+package txpool
+
+import (
+ context "context"
+ types "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+const (
+ Txpool_Version_FullMethodName = "/txpool.Txpool/Version"
+ Txpool_FindUnknown_FullMethodName = "/txpool.Txpool/FindUnknown"
+ Txpool_Add_FullMethodName = "/txpool.Txpool/Add"
+ Txpool_Transactions_FullMethodName = "/txpool.Txpool/Transactions"
+ Txpool_All_FullMethodName = "/txpool.Txpool/All"
+ Txpool_Pending_FullMethodName = "/txpool.Txpool/Pending"
+ Txpool_OnAdd_FullMethodName = "/txpool.Txpool/OnAdd"
+ Txpool_Status_FullMethodName = "/txpool.Txpool/Status"
+ Txpool_Nonce_FullMethodName = "/txpool.Txpool/Nonce"
+)
+
+// TxpoolClient is the client API for Txpool service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type TxpoolClient interface {
+ // Version returns the service version number
+ Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.VersionReply, error)
+ // preserves incoming order, changes amount, unknown hashes will be omitted
+ FindUnknown(ctx context.Context, in *TxHashes, opts ...grpc.CallOption) (*TxHashes, error)
+ // Expecting signed transactions. Preserves incoming order and amount
+ // Adding txs as local (use P2P to add remote txs)
+ Add(ctx context.Context, in *AddRequest, opts ...grpc.CallOption) (*AddReply, error)
+ // preserves incoming order and amount, if some transaction doesn't exists in pool - returns nil in this slot
+ Transactions(ctx context.Context, in *TransactionsRequest, opts ...grpc.CallOption) (*TransactionsReply, error)
+ // returns all transactions from tx pool
+ All(ctx context.Context, in *AllRequest, opts ...grpc.CallOption) (*AllReply, error)
+ // Returns all pending (processable) transactions, in ready-for-mining order
+ Pending(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PendingReply, error)
+ // subscribe to new transactions add event
+ OnAdd(ctx context.Context, in *OnAddRequest, opts ...grpc.CallOption) (Txpool_OnAddClient, error)
+ // returns high level status
+ Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusReply, error)
+ // returns nonce for given account
+ Nonce(ctx context.Context, in *NonceRequest, opts ...grpc.CallOption) (*NonceReply, error)
+}
+
+type txpoolClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewTxpoolClient(cc grpc.ClientConnInterface) TxpoolClient {
+ return &txpoolClient{cc}
+}
+
+func (c *txpoolClient) Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*types.VersionReply, error) {
+ out := new(types.VersionReply)
+ err := c.cc.Invoke(ctx, Txpool_Version_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *txpoolClient) FindUnknown(ctx context.Context, in *TxHashes, opts ...grpc.CallOption) (*TxHashes, error) {
+ out := new(TxHashes)
+ err := c.cc.Invoke(ctx, Txpool_FindUnknown_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *txpoolClient) Add(ctx context.Context, in *AddRequest, opts ...grpc.CallOption) (*AddReply, error) {
+ out := new(AddReply)
+ err := c.cc.Invoke(ctx, Txpool_Add_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *txpoolClient) Transactions(ctx context.Context, in *TransactionsRequest, opts ...grpc.CallOption) (*TransactionsReply, error) {
+ out := new(TransactionsReply)
+ err := c.cc.Invoke(ctx, Txpool_Transactions_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *txpoolClient) All(ctx context.Context, in *AllRequest, opts ...grpc.CallOption) (*AllReply, error) {
+ out := new(AllReply)
+ err := c.cc.Invoke(ctx, Txpool_All_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *txpoolClient) Pending(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PendingReply, error) {
+ out := new(PendingReply)
+ err := c.cc.Invoke(ctx, Txpool_Pending_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *txpoolClient) OnAdd(ctx context.Context, in *OnAddRequest, opts ...grpc.CallOption) (Txpool_OnAddClient, error) {
+ stream, err := c.cc.NewStream(ctx, &Txpool_ServiceDesc.Streams[0], Txpool_OnAdd_FullMethodName, opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &txpoolOnAddClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type Txpool_OnAddClient interface {
+ Recv() (*OnAddReply, error)
+ grpc.ClientStream
+}
+
+type txpoolOnAddClient struct {
+ grpc.ClientStream
+}
+
+func (x *txpoolOnAddClient) Recv() (*OnAddReply, error) {
+ m := new(OnAddReply)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *txpoolClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusReply, error) {
+ out := new(StatusReply)
+ err := c.cc.Invoke(ctx, Txpool_Status_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *txpoolClient) Nonce(ctx context.Context, in *NonceRequest, opts ...grpc.CallOption) (*NonceReply, error) {
+ out := new(NonceReply)
+ err := c.cc.Invoke(ctx, Txpool_Nonce_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// TxpoolServer is the server API for Txpool service.
+// All implementations must embed UnimplementedTxpoolServer
+// for forward compatibility
+type TxpoolServer interface {
+ // Version returns the service version number
+ Version(context.Context, *emptypb.Empty) (*types.VersionReply, error)
+ // preserves incoming order, changes amount, unknown hashes will be omitted
+ FindUnknown(context.Context, *TxHashes) (*TxHashes, error)
+ // Expecting signed transactions. Preserves incoming order and amount
+ // Adding txs as local (use P2P to add remote txs)
+ Add(context.Context, *AddRequest) (*AddReply, error)
+ // preserves incoming order and amount, if some transaction doesn't exists in pool - returns nil in this slot
+ Transactions(context.Context, *TransactionsRequest) (*TransactionsReply, error)
+ // returns all transactions from tx pool
+ All(context.Context, *AllRequest) (*AllReply, error)
+ // Returns all pending (processable) transactions, in ready-for-mining order
+ Pending(context.Context, *emptypb.Empty) (*PendingReply, error)
+ // subscribe to new transactions add event
+ OnAdd(*OnAddRequest, Txpool_OnAddServer) error
+ // returns high level status
+ Status(context.Context, *StatusRequest) (*StatusReply, error)
+ // returns nonce for given account
+ Nonce(context.Context, *NonceRequest) (*NonceReply, error)
+ mustEmbedUnimplementedTxpoolServer()
+}
+
+// UnimplementedTxpoolServer must be embedded to have forward compatible implementations.
+type UnimplementedTxpoolServer struct {
+}
+
+func (UnimplementedTxpoolServer) Version(context.Context, *emptypb.Empty) (*types.VersionReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Version not implemented")
+}
+func (UnimplementedTxpoolServer) FindUnknown(context.Context, *TxHashes) (*TxHashes, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method FindUnknown not implemented")
+}
+func (UnimplementedTxpoolServer) Add(context.Context, *AddRequest) (*AddReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Add not implemented")
+}
+func (UnimplementedTxpoolServer) Transactions(context.Context, *TransactionsRequest) (*TransactionsReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Transactions not implemented")
+}
+func (UnimplementedTxpoolServer) All(context.Context, *AllRequest) (*AllReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method All not implemented")
+}
+func (UnimplementedTxpoolServer) Pending(context.Context, *emptypb.Empty) (*PendingReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Pending not implemented")
+}
+func (UnimplementedTxpoolServer) OnAdd(*OnAddRequest, Txpool_OnAddServer) error {
+ return status.Errorf(codes.Unimplemented, "method OnAdd not implemented")
+}
+func (UnimplementedTxpoolServer) Status(context.Context, *StatusRequest) (*StatusReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Status not implemented")
+}
+func (UnimplementedTxpoolServer) Nonce(context.Context, *NonceRequest) (*NonceReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Nonce not implemented")
+}
+func (UnimplementedTxpoolServer) mustEmbedUnimplementedTxpoolServer() {}
+
+// UnsafeTxpoolServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to TxpoolServer will
+// result in compilation errors.
+type UnsafeTxpoolServer interface {
+ mustEmbedUnimplementedTxpoolServer()
+}
+
+func RegisterTxpoolServer(s grpc.ServiceRegistrar, srv TxpoolServer) {
+ s.RegisterService(&Txpool_ServiceDesc, srv)
+}
+
+func _Txpool_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(emptypb.Empty)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(TxpoolServer).Version(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Txpool_Version_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(TxpoolServer).Version(ctx, req.(*emptypb.Empty))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Txpool_FindUnknown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(TxHashes)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(TxpoolServer).FindUnknown(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Txpool_FindUnknown_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(TxpoolServer).FindUnknown(ctx, req.(*TxHashes))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Txpool_Add_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AddRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(TxpoolServer).Add(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Txpool_Add_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(TxpoolServer).Add(ctx, req.(*AddRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Txpool_Transactions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(TransactionsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(TxpoolServer).Transactions(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Txpool_Transactions_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(TxpoolServer).Transactions(ctx, req.(*TransactionsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Txpool_All_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AllRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(TxpoolServer).All(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Txpool_All_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(TxpoolServer).All(ctx, req.(*AllRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Txpool_Pending_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(emptypb.Empty)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(TxpoolServer).Pending(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Txpool_Pending_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(TxpoolServer).Pending(ctx, req.(*emptypb.Empty))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Txpool_OnAdd_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(OnAddRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(TxpoolServer).OnAdd(m, &txpoolOnAddServer{stream})
+}
+
+type Txpool_OnAddServer interface {
+ Send(*OnAddReply) error
+ grpc.ServerStream
+}
+
+type txpoolOnAddServer struct {
+ grpc.ServerStream
+}
+
+func (x *txpoolOnAddServer) Send(m *OnAddReply) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _Txpool_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(StatusRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(TxpoolServer).Status(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Txpool_Status_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(TxpoolServer).Status(ctx, req.(*StatusRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Txpool_Nonce_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(NonceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(TxpoolServer).Nonce(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Txpool_Nonce_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(TxpoolServer).Nonce(ctx, req.(*NonceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// Txpool_ServiceDesc is the grpc.ServiceDesc for Txpool service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var Txpool_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "txpool.Txpool",
+ HandlerType: (*TxpoolServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Version",
+ Handler: _Txpool_Version_Handler,
+ },
+ {
+ MethodName: "FindUnknown",
+ Handler: _Txpool_FindUnknown_Handler,
+ },
+ {
+ MethodName: "Add",
+ Handler: _Txpool_Add_Handler,
+ },
+ {
+ MethodName: "Transactions",
+ Handler: _Txpool_Transactions_Handler,
+ },
+ {
+ MethodName: "All",
+ Handler: _Txpool_All_Handler,
+ },
+ {
+ MethodName: "Pending",
+ Handler: _Txpool_Pending_Handler,
+ },
+ {
+ MethodName: "Status",
+ Handler: _Txpool_Status_Handler,
+ },
+ {
+ MethodName: "Nonce",
+ Handler: _Txpool_Nonce_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "OnAdd",
+ Handler: _Txpool_OnAdd_Handler,
+ ServerStreams: true,
+ },
+ },
+ Metadata: "txpool/txpool.proto",
+}
diff --git a/erigon-lib/gointerfaces/type_utils.go b/erigon-lib/gointerfaces/type_utils.go
new file mode 100644
index 00000000000..e2dd156a551
--- /dev/null
+++ b/erigon-lib/gointerfaces/type_utils.go
@@ -0,0 +1,140 @@
+/*
+ Copyright 2021 Erigon contributors
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package gointerfaces
+
+import (
+ "encoding/binary"
+
+ "github.com/holiman/uint256"
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+)
+
+func ConvertH2048ToBloom(h2048 *types.H2048) [256]byte {
+ var bloom [256]byte
+ copy(bloom[:], ConvertH512ToBytes(h2048.Hi.Hi))
+ copy(bloom[64:], ConvertH512ToBytes(h2048.Hi.Lo))
+ copy(bloom[128:], ConvertH512ToBytes(h2048.Lo.Hi))
+ copy(bloom[192:], ConvertH512ToBytes(h2048.Lo.Lo))
+ return bloom
+}
+
+func ConvertBytesToH2048(data []byte) *types.H2048 {
+ return &types.H2048{
+ Hi: &types.H1024{
+ Hi: ConvertBytesToH512(data),
+ Lo: ConvertBytesToH512(data[64:]),
+ },
+ Lo: &types.H1024{
+ Hi: ConvertBytesToH512(data[128:]),
+ Lo: ConvertBytesToH512(data[192:]),
+ },
+ }
+}
+
+func ConvertH256ToHash(h256 *types.H256) [32]byte {
+ var hash [32]byte
+ binary.BigEndian.PutUint64(hash[0:], h256.Hi.Hi)
+ binary.BigEndian.PutUint64(hash[8:], h256.Hi.Lo)
+ binary.BigEndian.PutUint64(hash[16:], h256.Lo.Hi)
+ binary.BigEndian.PutUint64(hash[24:], h256.Lo.Lo)
+ return hash
+}
+
+func ConvertH512ToHash(h512 *types.H512) [64]byte {
+ var b [64]byte
+ binary.BigEndian.PutUint64(b[0:], h512.Hi.Hi.Hi)
+ binary.BigEndian.PutUint64(b[8:], h512.Hi.Hi.Lo)
+ binary.BigEndian.PutUint64(b[16:], h512.Hi.Lo.Hi)
+ binary.BigEndian.PutUint64(b[24:], h512.Hi.Lo.Lo)
+ binary.BigEndian.PutUint64(b[32:], h512.Lo.Hi.Hi)
+ binary.BigEndian.PutUint64(b[40:], h512.Lo.Hi.Lo)
+ binary.BigEndian.PutUint64(b[48:], h512.Lo.Lo.Hi)
+ binary.BigEndian.PutUint64(b[56:], h512.Lo.Lo.Lo)
+ return b
+}
+
+func ConvertHashesToH256(hashes [][32]byte) []*types.H256 {
+ res := make([]*types.H256, len(hashes))
+ for i := range hashes {
+ res[i] = ConvertHashToH256(hashes[i])
+ }
+ return res
+}
+
+func ConvertHashToH256(hash [32]byte) *types.H256 {
+ return &types.H256{
+ Lo: &types.H128{Lo: binary.BigEndian.Uint64(hash[24:]), Hi: binary.BigEndian.Uint64(hash[16:])},
+ Hi: &types.H128{Lo: binary.BigEndian.Uint64(hash[8:]), Hi: binary.BigEndian.Uint64(hash[0:])},
+ }
+}
+
+func ConvertHashToH512(hash [64]byte) *types.H512 {
+ return ConvertBytesToH512(hash[:])
+}
+
+func ConvertH160toAddress(h160 *types.H160) [20]byte {
+ var addr [20]byte
+ binary.BigEndian.PutUint64(addr[0:], h160.Hi.Hi)
+ binary.BigEndian.PutUint64(addr[8:], h160.Hi.Lo)
+ binary.BigEndian.PutUint32(addr[16:], h160.Lo)
+ return addr
+}
+
+func ConvertAddressToH160(addr [20]byte) *types.H160 {
+ return &types.H160{
+ Lo: binary.BigEndian.Uint32(addr[16:]),
+ Hi: &types.H128{Lo: binary.BigEndian.Uint64(addr[8:]), Hi: binary.BigEndian.Uint64(addr[0:])},
+ }
+}
+
+func ConvertH256ToUint256Int(h256 *types.H256) *uint256.Int {
+ // Note: uint256.Int is an array of 4 uint64 in little-endian order, i.e. most significant word is [3]
+ var i uint256.Int
+ i[3] = h256.Hi.Hi
+ i[2] = h256.Hi.Lo
+ i[1] = h256.Lo.Hi
+ i[0] = h256.Lo.Lo
+ return &i
+}
+
+func ConvertUint256IntToH256(i *uint256.Int) *types.H256 {
+ // Note: uint256.Int is an array of 4 uint64 in little-endian order, i.e. most significant word is [3]
+ return &types.H256{
+ Lo: &types.H128{Lo: i[0], Hi: i[1]},
+ Hi: &types.H128{Lo: i[2], Hi: i[3]},
+ }
+}
+
+func ConvertH512ToBytes(h512 *types.H512) []byte {
+ b := ConvertH512ToHash(h512)
+ return b[:]
+}
+
+func ConvertBytesToH512(b []byte) *types.H512 {
+ if len(b) < 64 {
+ var b1 [64]byte
+ copy(b1[:], b)
+ b = b1[:]
+ }
+ return &types.H512{
+ Lo: &types.H256{
+ Lo: &types.H128{Lo: binary.BigEndian.Uint64(b[56:]), Hi: binary.BigEndian.Uint64(b[48:])},
+ Hi: &types.H128{Lo: binary.BigEndian.Uint64(b[40:]), Hi: binary.BigEndian.Uint64(b[32:])},
+ },
+ Hi: &types.H256{
+ Lo: &types.H128{Lo: binary.BigEndian.Uint64(b[24:]), Hi: binary.BigEndian.Uint64(b[16:])},
+ Hi: &types.H128{Lo: binary.BigEndian.Uint64(b[8:]), Hi: binary.BigEndian.Uint64(b[0:])},
+ },
+ }
+}
diff --git a/erigon-lib/gointerfaces/types/types.pb.go b/erigon-lib/gointerfaces/types/types.pb.go
new file mode 100644
index 00000000000..adae72de7ec
--- /dev/null
+++ b/erigon-lib/gointerfaces/types/types.pb.go
@@ -0,0 +1,1520 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.31.0
+// protoc v4.24.2
+// source: types/types.proto
+
+package types
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type H128 struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Hi uint64 `protobuf:"varint,1,opt,name=hi,proto3" json:"hi,omitempty"`
+ Lo uint64 `protobuf:"varint,2,opt,name=lo,proto3" json:"lo,omitempty"`
+}
+
+func (x *H128) Reset() {
+ *x = H128{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_types_types_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *H128) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*H128) ProtoMessage() {}
+
+func (x *H128) ProtoReflect() protoreflect.Message {
+ mi := &file_types_types_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use H128.ProtoReflect.Descriptor instead.
+func (*H128) Descriptor() ([]byte, []int) {
+ return file_types_types_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *H128) GetHi() uint64 {
+ if x != nil {
+ return x.Hi
+ }
+ return 0
+}
+
+func (x *H128) GetLo() uint64 {
+ if x != nil {
+ return x.Lo
+ }
+ return 0
+}
+
+type H160 struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Hi *H128 `protobuf:"bytes,1,opt,name=hi,proto3" json:"hi,omitempty"`
+ Lo uint32 `protobuf:"varint,2,opt,name=lo,proto3" json:"lo,omitempty"`
+}
+
+func (x *H160) Reset() {
+ *x = H160{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_types_types_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *H160) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*H160) ProtoMessage() {}
+
+func (x *H160) ProtoReflect() protoreflect.Message {
+ mi := &file_types_types_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use H160.ProtoReflect.Descriptor instead.
+func (*H160) Descriptor() ([]byte, []int) {
+ return file_types_types_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *H160) GetHi() *H128 {
+ if x != nil {
+ return x.Hi
+ }
+ return nil
+}
+
+func (x *H160) GetLo() uint32 {
+ if x != nil {
+ return x.Lo
+ }
+ return 0
+}
+
+type H256 struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Hi *H128 `protobuf:"bytes,1,opt,name=hi,proto3" json:"hi,omitempty"`
+ Lo *H128 `protobuf:"bytes,2,opt,name=lo,proto3" json:"lo,omitempty"`
+}
+
+func (x *H256) Reset() {
+ *x = H256{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_types_types_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *H256) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*H256) ProtoMessage() {}
+
+func (x *H256) ProtoReflect() protoreflect.Message {
+ mi := &file_types_types_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use H256.ProtoReflect.Descriptor instead.
+func (*H256) Descriptor() ([]byte, []int) {
+ return file_types_types_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *H256) GetHi() *H128 {
+ if x != nil {
+ return x.Hi
+ }
+ return nil
+}
+
+func (x *H256) GetLo() *H128 {
+ if x != nil {
+ return x.Lo
+ }
+ return nil
+}
+
+type H512 struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Hi *H256 `protobuf:"bytes,1,opt,name=hi,proto3" json:"hi,omitempty"`
+ Lo *H256 `protobuf:"bytes,2,opt,name=lo,proto3" json:"lo,omitempty"`
+}
+
+func (x *H512) Reset() {
+ *x = H512{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_types_types_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *H512) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*H512) ProtoMessage() {}
+
+func (x *H512) ProtoReflect() protoreflect.Message {
+ mi := &file_types_types_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use H512.ProtoReflect.Descriptor instead.
+func (*H512) Descriptor() ([]byte, []int) {
+ return file_types_types_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *H512) GetHi() *H256 {
+ if x != nil {
+ return x.Hi
+ }
+ return nil
+}
+
+func (x *H512) GetLo() *H256 {
+ if x != nil {
+ return x.Lo
+ }
+ return nil
+}
+
+type H1024 struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Hi *H512 `protobuf:"bytes,1,opt,name=hi,proto3" json:"hi,omitempty"`
+ Lo *H512 `protobuf:"bytes,2,opt,name=lo,proto3" json:"lo,omitempty"`
+}
+
+func (x *H1024) Reset() {
+ *x = H1024{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_types_types_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *H1024) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*H1024) ProtoMessage() {}
+
+func (x *H1024) ProtoReflect() protoreflect.Message {
+ mi := &file_types_types_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use H1024.ProtoReflect.Descriptor instead.
+func (*H1024) Descriptor() ([]byte, []int) {
+ return file_types_types_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *H1024) GetHi() *H512 {
+ if x != nil {
+ return x.Hi
+ }
+ return nil
+}
+
+func (x *H1024) GetLo() *H512 {
+ if x != nil {
+ return x.Lo
+ }
+ return nil
+}
+
+type H2048 struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Hi *H1024 `protobuf:"bytes,1,opt,name=hi,proto3" json:"hi,omitempty"`
+ Lo *H1024 `protobuf:"bytes,2,opt,name=lo,proto3" json:"lo,omitempty"`
+}
+
+func (x *H2048) Reset() {
+ *x = H2048{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_types_types_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *H2048) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*H2048) ProtoMessage() {}
+
+func (x *H2048) ProtoReflect() protoreflect.Message {
+ mi := &file_types_types_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use H2048.ProtoReflect.Descriptor instead.
+func (*H2048) Descriptor() ([]byte, []int) {
+ return file_types_types_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *H2048) GetHi() *H1024 {
+ if x != nil {
+ return x.Hi
+ }
+ return nil
+}
+
+func (x *H2048) GetLo() *H1024 {
+ if x != nil {
+ return x.Lo
+ }
+ return nil
+}
+
+// Reply message containing the current service version on the service side
+type VersionReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Major uint32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"`
+ Minor uint32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"`
+ Patch uint32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"`
+}
+
+func (x *VersionReply) Reset() {
+ *x = VersionReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_types_types_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VersionReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VersionReply) ProtoMessage() {}
+
+func (x *VersionReply) ProtoReflect() protoreflect.Message {
+ mi := &file_types_types_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VersionReply.ProtoReflect.Descriptor instead.
+func (*VersionReply) Descriptor() ([]byte, []int) {
+ return file_types_types_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *VersionReply) GetMajor() uint32 {
+ if x != nil {
+ return x.Major
+ }
+ return 0
+}
+
+func (x *VersionReply) GetMinor() uint32 {
+ if x != nil {
+ return x.Minor
+ }
+ return 0
+}
+
+func (x *VersionReply) GetPatch() uint32 {
+ if x != nil {
+ return x.Patch
+ }
+ return 0
+}
+
+// ------------------------------------------------------------------------
+// Engine API types
+// See https://github.com/ethereum/execution-apis/blob/main/src/engine
+type ExecutionPayload struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Version uint32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` // v1 - no withdrawals, v2 - with withdrawals, v3 - with blob gas
+ ParentHash *H256 `protobuf:"bytes,2,opt,name=parent_hash,json=parentHash,proto3" json:"parent_hash,omitempty"`
+ Coinbase *H160 `protobuf:"bytes,3,opt,name=coinbase,proto3" json:"coinbase,omitempty"`
+ StateRoot *H256 `protobuf:"bytes,4,opt,name=state_root,json=stateRoot,proto3" json:"state_root,omitempty"`
+ ReceiptRoot *H256 `protobuf:"bytes,5,opt,name=receipt_root,json=receiptRoot,proto3" json:"receipt_root,omitempty"`
+ LogsBloom *H2048 `protobuf:"bytes,6,opt,name=logs_bloom,json=logsBloom,proto3" json:"logs_bloom,omitempty"`
+ PrevRandao *H256 `protobuf:"bytes,7,opt,name=prev_randao,json=prevRandao,proto3" json:"prev_randao,omitempty"`
+ BlockNumber uint64 `protobuf:"varint,8,opt,name=block_number,json=blockNumber,proto3" json:"block_number,omitempty"`
+ GasLimit uint64 `protobuf:"varint,9,opt,name=gas_limit,json=gasLimit,proto3" json:"gas_limit,omitempty"`
+ GasUsed uint64 `protobuf:"varint,10,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"`
+ Timestamp uint64 `protobuf:"varint,11,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ ExtraData []byte `protobuf:"bytes,12,opt,name=extra_data,json=extraData,proto3" json:"extra_data,omitempty"`
+ BaseFeePerGas *H256 `protobuf:"bytes,13,opt,name=base_fee_per_gas,json=baseFeePerGas,proto3" json:"base_fee_per_gas,omitempty"`
+ BlockHash *H256 `protobuf:"bytes,14,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"`
+ Transactions [][]byte `protobuf:"bytes,15,rep,name=transactions,proto3" json:"transactions,omitempty"`
+ Withdrawals []*Withdrawal `protobuf:"bytes,16,rep,name=withdrawals,proto3" json:"withdrawals,omitempty"`
+ BlobGasUsed *uint64 `protobuf:"varint,17,opt,name=blob_gas_used,json=blobGasUsed,proto3,oneof" json:"blob_gas_used,omitempty"`
+ ExcessBlobGas *uint64 `protobuf:"varint,18,opt,name=excess_blob_gas,json=excessBlobGas,proto3,oneof" json:"excess_blob_gas,omitempty"`
+}
+
+func (x *ExecutionPayload) Reset() {
+ *x = ExecutionPayload{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_types_types_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExecutionPayload) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExecutionPayload) ProtoMessage() {}
+
+func (x *ExecutionPayload) ProtoReflect() protoreflect.Message {
+ mi := &file_types_types_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExecutionPayload.ProtoReflect.Descriptor instead.
+func (*ExecutionPayload) Descriptor() ([]byte, []int) {
+ return file_types_types_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *ExecutionPayload) GetVersion() uint32 {
+ if x != nil {
+ return x.Version
+ }
+ return 0
+}
+
+func (x *ExecutionPayload) GetParentHash() *H256 {
+ if x != nil {
+ return x.ParentHash
+ }
+ return nil
+}
+
+func (x *ExecutionPayload) GetCoinbase() *H160 {
+ if x != nil {
+ return x.Coinbase
+ }
+ return nil
+}
+
+func (x *ExecutionPayload) GetStateRoot() *H256 {
+ if x != nil {
+ return x.StateRoot
+ }
+ return nil
+}
+
+func (x *ExecutionPayload) GetReceiptRoot() *H256 {
+ if x != nil {
+ return x.ReceiptRoot
+ }
+ return nil
+}
+
+func (x *ExecutionPayload) GetLogsBloom() *H2048 {
+ if x != nil {
+ return x.LogsBloom
+ }
+ return nil
+}
+
+func (x *ExecutionPayload) GetPrevRandao() *H256 {
+ if x != nil {
+ return x.PrevRandao
+ }
+ return nil
+}
+
+func (x *ExecutionPayload) GetBlockNumber() uint64 {
+ if x != nil {
+ return x.BlockNumber
+ }
+ return 0
+}
+
+func (x *ExecutionPayload) GetGasLimit() uint64 {
+ if x != nil {
+ return x.GasLimit
+ }
+ return 0
+}
+
+func (x *ExecutionPayload) GetGasUsed() uint64 {
+ if x != nil {
+ return x.GasUsed
+ }
+ return 0
+}
+
+func (x *ExecutionPayload) GetTimestamp() uint64 {
+ if x != nil {
+ return x.Timestamp
+ }
+ return 0
+}
+
+func (x *ExecutionPayload) GetExtraData() []byte {
+ if x != nil {
+ return x.ExtraData
+ }
+ return nil
+}
+
+func (x *ExecutionPayload) GetBaseFeePerGas() *H256 {
+ if x != nil {
+ return x.BaseFeePerGas
+ }
+ return nil
+}
+
+func (x *ExecutionPayload) GetBlockHash() *H256 {
+ if x != nil {
+ return x.BlockHash
+ }
+ return nil
+}
+
+func (x *ExecutionPayload) GetTransactions() [][]byte {
+ if x != nil {
+ return x.Transactions
+ }
+ return nil
+}
+
+func (x *ExecutionPayload) GetWithdrawals() []*Withdrawal {
+ if x != nil {
+ return x.Withdrawals
+ }
+ return nil
+}
+
+func (x *ExecutionPayload) GetBlobGasUsed() uint64 {
+ if x != nil && x.BlobGasUsed != nil {
+ return *x.BlobGasUsed
+ }
+ return 0
+}
+
+func (x *ExecutionPayload) GetExcessBlobGas() uint64 {
+ if x != nil && x.ExcessBlobGas != nil {
+ return *x.ExcessBlobGas
+ }
+ return 0
+}
+
+type Withdrawal struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Index uint64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"`
+ ValidatorIndex uint64 `protobuf:"varint,2,opt,name=validator_index,json=validatorIndex,proto3" json:"validator_index,omitempty"`
+ Address *H160 `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"`
+ Amount uint64 `protobuf:"varint,4,opt,name=amount,proto3" json:"amount,omitempty"`
+}
+
+func (x *Withdrawal) Reset() {
+ *x = Withdrawal{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_types_types_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Withdrawal) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Withdrawal) ProtoMessage() {}
+
+func (x *Withdrawal) ProtoReflect() protoreflect.Message {
+ mi := &file_types_types_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Withdrawal.ProtoReflect.Descriptor instead.
+func (*Withdrawal) Descriptor() ([]byte, []int) {
+ return file_types_types_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *Withdrawal) GetIndex() uint64 {
+ if x != nil {
+ return x.Index
+ }
+ return 0
+}
+
+func (x *Withdrawal) GetValidatorIndex() uint64 {
+ if x != nil {
+ return x.ValidatorIndex
+ }
+ return 0
+}
+
+func (x *Withdrawal) GetAddress() *H160 {
+ if x != nil {
+ return x.Address
+ }
+ return nil
+}
+
+func (x *Withdrawal) GetAmount() uint64 {
+ if x != nil {
+ return x.Amount
+ }
+ return 0
+}
+
+type BlobsBundleV1 struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // TODO(eip-4844): define a protobuf message for type KZGCommitment
+ Commitments [][]byte `protobuf:"bytes,1,rep,name=commitments,proto3" json:"commitments,omitempty"`
+ // TODO(eip-4844): define a protobuf message for type Blob
+ Blobs [][]byte `protobuf:"bytes,2,rep,name=blobs,proto3" json:"blobs,omitempty"`
+ Proofs [][]byte `protobuf:"bytes,3,rep,name=proofs,proto3" json:"proofs,omitempty"`
+}
+
+func (x *BlobsBundleV1) Reset() {
+ *x = BlobsBundleV1{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_types_types_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BlobsBundleV1) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BlobsBundleV1) ProtoMessage() {}
+
+func (x *BlobsBundleV1) ProtoReflect() protoreflect.Message {
+ mi := &file_types_types_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BlobsBundleV1.ProtoReflect.Descriptor instead.
+func (*BlobsBundleV1) Descriptor() ([]byte, []int) {
+ return file_types_types_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *BlobsBundleV1) GetCommitments() [][]byte {
+ if x != nil {
+ return x.Commitments
+ }
+ return nil
+}
+
+func (x *BlobsBundleV1) GetBlobs() [][]byte {
+ if x != nil {
+ return x.Blobs
+ }
+ return nil
+}
+
+func (x *BlobsBundleV1) GetProofs() [][]byte {
+ if x != nil {
+ return x.Proofs
+ }
+ return nil
+}
+
+type NodeInfoPorts struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Discovery uint32 `protobuf:"varint,1,opt,name=discovery,proto3" json:"discovery,omitempty"`
+ Listener uint32 `protobuf:"varint,2,opt,name=listener,proto3" json:"listener,omitempty"`
+}
+
+func (x *NodeInfoPorts) Reset() {
+ *x = NodeInfoPorts{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_types_types_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NodeInfoPorts) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NodeInfoPorts) ProtoMessage() {}
+
+func (x *NodeInfoPorts) ProtoReflect() protoreflect.Message {
+ mi := &file_types_types_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NodeInfoPorts.ProtoReflect.Descriptor instead.
+func (*NodeInfoPorts) Descriptor() ([]byte, []int) {
+ return file_types_types_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *NodeInfoPorts) GetDiscovery() uint32 {
+ if x != nil {
+ return x.Discovery
+ }
+ return 0
+}
+
+func (x *NodeInfoPorts) GetListener() uint32 {
+ if x != nil {
+ return x.Listener
+ }
+ return 0
+}
+
+type NodeInfoReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+ Enode string `protobuf:"bytes,3,opt,name=enode,proto3" json:"enode,omitempty"`
+ Enr string `protobuf:"bytes,4,opt,name=enr,proto3" json:"enr,omitempty"`
+ Ports *NodeInfoPorts `protobuf:"bytes,5,opt,name=ports,proto3" json:"ports,omitempty"`
+ ListenerAddr string `protobuf:"bytes,6,opt,name=listener_addr,json=listenerAddr,proto3" json:"listener_addr,omitempty"`
+ Protocols []byte `protobuf:"bytes,7,opt,name=protocols,proto3" json:"protocols,omitempty"`
+}
+
+func (x *NodeInfoReply) Reset() {
+ *x = NodeInfoReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_types_types_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NodeInfoReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NodeInfoReply) ProtoMessage() {}
+
+func (x *NodeInfoReply) ProtoReflect() protoreflect.Message {
+ mi := &file_types_types_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NodeInfoReply.ProtoReflect.Descriptor instead.
+func (*NodeInfoReply) Descriptor() ([]byte, []int) {
+ return file_types_types_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *NodeInfoReply) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *NodeInfoReply) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *NodeInfoReply) GetEnode() string {
+ if x != nil {
+ return x.Enode
+ }
+ return ""
+}
+
+func (x *NodeInfoReply) GetEnr() string {
+ if x != nil {
+ return x.Enr
+ }
+ return ""
+}
+
+func (x *NodeInfoReply) GetPorts() *NodeInfoPorts {
+ if x != nil {
+ return x.Ports
+ }
+ return nil
+}
+
+func (x *NodeInfoReply) GetListenerAddr() string {
+ if x != nil {
+ return x.ListenerAddr
+ }
+ return ""
+}
+
+func (x *NodeInfoReply) GetProtocols() []byte {
+ if x != nil {
+ return x.Protocols
+ }
+ return nil
+}
+
+type PeerInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+ Enode string `protobuf:"bytes,3,opt,name=enode,proto3" json:"enode,omitempty"`
+ Enr string `protobuf:"bytes,4,opt,name=enr,proto3" json:"enr,omitempty"`
+ Caps []string `protobuf:"bytes,5,rep,name=caps,proto3" json:"caps,omitempty"`
+ ConnLocalAddr string `protobuf:"bytes,6,opt,name=conn_local_addr,json=connLocalAddr,proto3" json:"conn_local_addr,omitempty"`
+ ConnRemoteAddr string `protobuf:"bytes,7,opt,name=conn_remote_addr,json=connRemoteAddr,proto3" json:"conn_remote_addr,omitempty"`
+ ConnIsInbound bool `protobuf:"varint,8,opt,name=conn_is_inbound,json=connIsInbound,proto3" json:"conn_is_inbound,omitempty"`
+ ConnIsTrusted bool `protobuf:"varint,9,opt,name=conn_is_trusted,json=connIsTrusted,proto3" json:"conn_is_trusted,omitempty"`
+ ConnIsStatic bool `protobuf:"varint,10,opt,name=conn_is_static,json=connIsStatic,proto3" json:"conn_is_static,omitempty"`
+}
+
+func (x *PeerInfo) Reset() {
+ *x = PeerInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_types_types_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PeerInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PeerInfo) ProtoMessage() {}
+
+func (x *PeerInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_types_types_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PeerInfo.ProtoReflect.Descriptor instead.
+func (*PeerInfo) Descriptor() ([]byte, []int) {
+ return file_types_types_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *PeerInfo) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *PeerInfo) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *PeerInfo) GetEnode() string {
+ if x != nil {
+ return x.Enode
+ }
+ return ""
+}
+
+func (x *PeerInfo) GetEnr() string {
+ if x != nil {
+ return x.Enr
+ }
+ return ""
+}
+
+func (x *PeerInfo) GetCaps() []string {
+ if x != nil {
+ return x.Caps
+ }
+ return nil
+}
+
+func (x *PeerInfo) GetConnLocalAddr() string {
+ if x != nil {
+ return x.ConnLocalAddr
+ }
+ return ""
+}
+
+func (x *PeerInfo) GetConnRemoteAddr() string {
+ if x != nil {
+ return x.ConnRemoteAddr
+ }
+ return ""
+}
+
+func (x *PeerInfo) GetConnIsInbound() bool {
+ if x != nil {
+ return x.ConnIsInbound
+ }
+ return false
+}
+
+func (x *PeerInfo) GetConnIsTrusted() bool {
+ if x != nil {
+ return x.ConnIsTrusted
+ }
+ return false
+}
+
+func (x *PeerInfo) GetConnIsStatic() bool {
+ if x != nil {
+ return x.ConnIsStatic
+ }
+ return false
+}
+
+type ExecutionPayloadBodyV1 struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Transactions [][]byte `protobuf:"bytes,1,rep,name=transactions,proto3" json:"transactions,omitempty"`
+ Withdrawals []*Withdrawal `protobuf:"bytes,2,rep,name=withdrawals,proto3" json:"withdrawals,omitempty"`
+}
+
+func (x *ExecutionPayloadBodyV1) Reset() {
+ *x = ExecutionPayloadBodyV1{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_types_types_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExecutionPayloadBodyV1) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExecutionPayloadBodyV1) ProtoMessage() {}
+
+func (x *ExecutionPayloadBodyV1) ProtoReflect() protoreflect.Message {
+ mi := &file_types_types_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExecutionPayloadBodyV1.ProtoReflect.Descriptor instead.
+func (*ExecutionPayloadBodyV1) Descriptor() ([]byte, []int) {
+ return file_types_types_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *ExecutionPayloadBodyV1) GetTransactions() [][]byte {
+ if x != nil {
+ return x.Transactions
+ }
+ return nil
+}
+
+func (x *ExecutionPayloadBodyV1) GetWithdrawals() []*Withdrawal {
+ if x != nil {
+ return x.Withdrawals
+ }
+ return nil
+}
+
+var file_types_types_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FileOptions)(nil),
+ ExtensionType: (*uint32)(nil),
+ Field: 50001,
+ Name: "types.service_major_version",
+ Tag: "varint,50001,opt,name=service_major_version",
+ Filename: "types/types.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FileOptions)(nil),
+ ExtensionType: (*uint32)(nil),
+ Field: 50002,
+ Name: "types.service_minor_version",
+ Tag: "varint,50002,opt,name=service_minor_version",
+ Filename: "types/types.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FileOptions)(nil),
+ ExtensionType: (*uint32)(nil),
+ Field: 50003,
+ Name: "types.service_patch_version",
+ Tag: "varint,50003,opt,name=service_patch_version",
+ Filename: "types/types.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FileOptions.
+var (
+ // optional uint32 service_major_version = 50001;
+ E_ServiceMajorVersion = &file_types_types_proto_extTypes[0]
+ // optional uint32 service_minor_version = 50002;
+ E_ServiceMinorVersion = &file_types_types_proto_extTypes[1]
+ // optional uint32 service_patch_version = 50003;
+ E_ServicePatchVersion = &file_types_types_proto_extTypes[2]
+)
+
+var File_types_types_proto protoreflect.FileDescriptor
+
+var file_types_types_proto_rawDesc = []byte{
+ 0x0a, 0x11, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x26, 0x0a, 0x04,
+ 0x48, 0x31, 0x32, 0x38, 0x12, 0x0e, 0x0a, 0x02, 0x68, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x02, 0x68, 0x69, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x02, 0x6c, 0x6f, 0x22, 0x33, 0x0a, 0x04, 0x48, 0x31, 0x36, 0x30, 0x12, 0x1b, 0x0a, 0x02,
+ 0x68, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73,
+ 0x2e, 0x48, 0x31, 0x32, 0x38, 0x52, 0x02, 0x68, 0x69, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x6f, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x6c, 0x6f, 0x22, 0x40, 0x0a, 0x04, 0x48, 0x32, 0x35,
+ 0x36, 0x12, 0x1b, 0x0a, 0x02, 0x68, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, 0x32, 0x38, 0x52, 0x02, 0x68, 0x69, 0x12, 0x1b,
+ 0x0a, 0x02, 0x6c, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x73, 0x2e, 0x48, 0x31, 0x32, 0x38, 0x52, 0x02, 0x6c, 0x6f, 0x22, 0x40, 0x0a, 0x04, 0x48,
+ 0x35, 0x31, 0x32, 0x12, 0x1b, 0x0a, 0x02, 0x68, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x02, 0x68, 0x69,
+ 0x12, 0x1b, 0x0a, 0x02, 0x6c, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x02, 0x6c, 0x6f, 0x22, 0x41, 0x0a,
+ 0x05, 0x48, 0x31, 0x30, 0x32, 0x34, 0x12, 0x1b, 0x0a, 0x02, 0x68, 0x69, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x35, 0x31, 0x32, 0x52,
+ 0x02, 0x68, 0x69, 0x12, 0x1b, 0x0a, 0x02, 0x6c, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x35, 0x31, 0x32, 0x52, 0x02, 0x6c, 0x6f,
+ 0x22, 0x43, 0x0a, 0x05, 0x48, 0x32, 0x30, 0x34, 0x38, 0x12, 0x1c, 0x0a, 0x02, 0x68, 0x69, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31,
+ 0x30, 0x32, 0x34, 0x52, 0x02, 0x68, 0x69, 0x12, 0x1c, 0x0a, 0x02, 0x6c, 0x6f, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, 0x30, 0x32,
+ 0x34, 0x52, 0x02, 0x6c, 0x6f, 0x22, 0x50, 0x0a, 0x0c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d,
+ 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f,
+ 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d,
+ 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x22, 0x89, 0x06, 0x0a, 0x10, 0x45, 0x78, 0x65, 0x63,
+ 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x18, 0x0a, 0x07,
+ 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x48, 0x61, 0x73, 0x68, 0x12, 0x27, 0x0a, 0x08, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48,
+ 0x31, 0x36, 0x30, 0x52, 0x08, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x12, 0x2a, 0x0a,
+ 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x09,
+ 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2e, 0x0a, 0x0c, 0x72, 0x65, 0x63,
+ 0x65, 0x69, 0x70, 0x74, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0b, 0x72, 0x65,
+ 0x63, 0x65, 0x69, 0x70, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2b, 0x0a, 0x0a, 0x6c, 0x6f, 0x67,
+ 0x73, 0x5f, 0x62, 0x6c, 0x6f, 0x6f, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x30, 0x34, 0x38, 0x52, 0x09, 0x6c, 0x6f, 0x67,
+ 0x73, 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x12, 0x2c, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x76, 0x5f, 0x72,
+ 0x61, 0x6e, 0x64, 0x61, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x76, 0x52, 0x61,
+ 0x6e, 0x64, 0x61, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75,
+ 0x6d, 0x62, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63,
+ 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x61, 0x73, 0x5f, 0x6c,
+ 0x69, 0x6d, 0x69, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x67, 0x61, 0x73, 0x4c,
+ 0x69, 0x6d, 0x69, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64,
+ 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12,
+ 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0b, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1d, 0x0a,
+ 0x0a, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0c, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x09, 0x65, 0x78, 0x74, 0x72, 0x61, 0x44, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x10,
+ 0x62, 0x61, 0x73, 0x65, 0x5f, 0x66, 0x65, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x67, 0x61, 0x73,
+ 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48,
+ 0x32, 0x35, 0x36, 0x52, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x46, 0x65, 0x65, 0x50, 0x65, 0x72, 0x47,
+ 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68,
+ 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48,
+ 0x32, 0x35, 0x36, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x22,
+ 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0f,
+ 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x0b, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c,
+ 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e,
+ 0x57, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x52, 0x0b, 0x77, 0x69, 0x74, 0x68,
+ 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x73, 0x12, 0x27, 0x0a, 0x0d, 0x62, 0x6c, 0x6f, 0x62, 0x5f,
+ 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00,
+ 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x62, 0x47, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x88, 0x01, 0x01,
+ 0x12, 0x2b, 0x0a, 0x0f, 0x65, 0x78, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x5f,
+ 0x67, 0x61, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x04, 0x48, 0x01, 0x52, 0x0d, 0x65, 0x78, 0x63,
+ 0x65, 0x73, 0x73, 0x42, 0x6c, 0x6f, 0x62, 0x47, 0x61, 0x73, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a,
+ 0x0e, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x42,
+ 0x12, 0x0a, 0x10, 0x5f, 0x65, 0x78, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x5f,
+ 0x67, 0x61, 0x73, 0x22, 0x8a, 0x01, 0x0a, 0x0a, 0x57, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77,
+ 0x61, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x27, 0x0a, 0x0f, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x0e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x49, 0x6e, 0x64, 0x65,
+ 0x78, 0x12, 0x25, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, 0x36, 0x30, 0x52,
+ 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75,
+ 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74,
+ 0x22, 0x5f, 0x0a, 0x0d, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x56,
+ 0x31, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x18, 0x02, 0x20, 0x03,
+ 0x28, 0x0c, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x6f,
+ 0x6f, 0x66, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x72, 0x6f, 0x6f, 0x66,
+ 0x73, 0x22, 0x49, 0x0a, 0x0d, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x6f, 0x72,
+ 0x74, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79,
+ 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0d, 0x52, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x22, 0xca, 0x01, 0x0a,
+ 0x0d, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x0e,
+ 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x72, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x6e, 0x72, 0x12, 0x2a, 0x0a, 0x05, 0x70, 0x6f,
+ 0x72, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x52,
+ 0x05, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e,
+ 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6c,
+ 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0xb2, 0x02, 0x0a, 0x08, 0x50, 0x65,
+ 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6e,
+ 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65,
+ 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65,
+ 0x6e, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x61, 0x70, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09,
+ 0x52, 0x04, 0x63, 0x61, 0x70, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x6c,
+ 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x12, 0x28,
+ 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64,
+ 0x64, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x52, 0x65,
+ 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e,
+ 0x5f, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x49, 0x73, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64,
+ 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x69, 0x73, 0x5f, 0x74, 0x72, 0x75, 0x73,
+ 0x74, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x49,
+ 0x73, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x6e,
+ 0x5f, 0x69, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x49, 0x73, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x22, 0x71,
+ 0x0a, 0x16, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f,
+ 0x61, 0x64, 0x42, 0x6f, 0x64, 0x79, 0x56, 0x31, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x6e,
+ 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c,
+ 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x0b,
+ 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x57, 0x69, 0x74, 0x68, 0x64, 0x72,
+ 0x61, 0x77, 0x61, 0x6c, 0x52, 0x0b, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c,
+ 0x73, 0x3a, 0x52, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6d, 0x61, 0x6a,
+ 0x6f, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c,
+ 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xd1, 0x86, 0x03, 0x20, 0x01, 0x28, 0x0d,
+ 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, 0x61, 0x6a, 0x6f, 0x72, 0x56, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x52, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x5f, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xd2, 0x86, 0x03,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, 0x69, 0x6e,
+ 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x52, 0x0a, 0x15, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x18, 0xd3, 0x86, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x50, 0x61, 0x74, 0x63, 0x68, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0x5a,
+ 0x0d, 0x2e, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_types_types_proto_rawDescOnce sync.Once
+ file_types_types_proto_rawDescData = file_types_types_proto_rawDesc
+)
+
+func file_types_types_proto_rawDescGZIP() []byte {
+ file_types_types_proto_rawDescOnce.Do(func() {
+ file_types_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_types_types_proto_rawDescData)
+ })
+ return file_types_types_proto_rawDescData
+}
+
+var file_types_types_proto_msgTypes = make([]protoimpl.MessageInfo, 14)
+var file_types_types_proto_goTypes = []interface{}{
+ (*H128)(nil), // 0: types.H128
+ (*H160)(nil), // 1: types.H160
+ (*H256)(nil), // 2: types.H256
+ (*H512)(nil), // 3: types.H512
+ (*H1024)(nil), // 4: types.H1024
+ (*H2048)(nil), // 5: types.H2048
+ (*VersionReply)(nil), // 6: types.VersionReply
+ (*ExecutionPayload)(nil), // 7: types.ExecutionPayload
+ (*Withdrawal)(nil), // 8: types.Withdrawal
+ (*BlobsBundleV1)(nil), // 9: types.BlobsBundleV1
+ (*NodeInfoPorts)(nil), // 10: types.NodeInfoPorts
+ (*NodeInfoReply)(nil), // 11: types.NodeInfoReply
+ (*PeerInfo)(nil), // 12: types.PeerInfo
+ (*ExecutionPayloadBodyV1)(nil), // 13: types.ExecutionPayloadBodyV1
+ (*descriptorpb.FileOptions)(nil), // 14: google.protobuf.FileOptions
+}
+var file_types_types_proto_depIdxs = []int32{
+ 0, // 0: types.H160.hi:type_name -> types.H128
+ 0, // 1: types.H256.hi:type_name -> types.H128
+ 0, // 2: types.H256.lo:type_name -> types.H128
+ 2, // 3: types.H512.hi:type_name -> types.H256
+ 2, // 4: types.H512.lo:type_name -> types.H256
+ 3, // 5: types.H1024.hi:type_name -> types.H512
+ 3, // 6: types.H1024.lo:type_name -> types.H512
+ 4, // 7: types.H2048.hi:type_name -> types.H1024
+ 4, // 8: types.H2048.lo:type_name -> types.H1024
+ 2, // 9: types.ExecutionPayload.parent_hash:type_name -> types.H256
+ 1, // 10: types.ExecutionPayload.coinbase:type_name -> types.H160
+ 2, // 11: types.ExecutionPayload.state_root:type_name -> types.H256
+ 2, // 12: types.ExecutionPayload.receipt_root:type_name -> types.H256
+ 5, // 13: types.ExecutionPayload.logs_bloom:type_name -> types.H2048
+ 2, // 14: types.ExecutionPayload.prev_randao:type_name -> types.H256
+ 2, // 15: types.ExecutionPayload.base_fee_per_gas:type_name -> types.H256
+ 2, // 16: types.ExecutionPayload.block_hash:type_name -> types.H256
+ 8, // 17: types.ExecutionPayload.withdrawals:type_name -> types.Withdrawal
+ 1, // 18: types.Withdrawal.address:type_name -> types.H160
+ 10, // 19: types.NodeInfoReply.ports:type_name -> types.NodeInfoPorts
+ 8, // 20: types.ExecutionPayloadBodyV1.withdrawals:type_name -> types.Withdrawal
+ 14, // 21: types.service_major_version:extendee -> google.protobuf.FileOptions
+ 14, // 22: types.service_minor_version:extendee -> google.protobuf.FileOptions
+ 14, // 23: types.service_patch_version:extendee -> google.protobuf.FileOptions
+ 24, // [24:24] is the sub-list for method output_type
+ 24, // [24:24] is the sub-list for method input_type
+ 24, // [24:24] is the sub-list for extension type_name
+ 21, // [21:24] is the sub-list for extension extendee
+ 0, // [0:21] is the sub-list for field type_name
+}
+
+func init() { file_types_types_proto_init() }
+func file_types_types_proto_init() {
+ if File_types_types_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_types_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*H128); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_types_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*H160); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_types_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*H256); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_types_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*H512); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_types_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*H1024); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_types_types_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*H2048); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_types_types_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VersionReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_types_types_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExecutionPayload); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_types_types_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Withdrawal); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_types_types_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BlobsBundleV1); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_types_types_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NodeInfoPorts); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_types_types_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NodeInfoReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_types_types_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PeerInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_types_types_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExecutionPayloadBodyV1); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_types_types_proto_msgTypes[7].OneofWrappers = []interface{}{}
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_types_types_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 14,
+ NumExtensions: 3,
+ NumServices: 0,
+ },
+ GoTypes: file_types_types_proto_goTypes,
+ DependencyIndexes: file_types_types_proto_depIdxs,
+ MessageInfos: file_types_types_proto_msgTypes,
+ ExtensionInfos: file_types_types_proto_extTypes,
+ }.Build()
+ File_types_types_proto = out.File
+ file_types_types_proto_rawDesc = nil
+ file_types_types_proto_goTypes = nil
+ file_types_types_proto_depIdxs = nil
+}
diff --git a/erigon-lib/gointerfaces/version.go b/erigon-lib/gointerfaces/version.go
new file mode 100644
index 00000000000..3adc946022d
--- /dev/null
+++ b/erigon-lib/gointerfaces/version.go
@@ -0,0 +1,46 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package gointerfaces
+
+import (
+ "fmt"
+
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+)
+
+type Version struct {
+ Major, Minor, Patch uint32 // interface Version of the client - to perform compatibility check when opening
+}
+
+func VersionFromProto(r *types.VersionReply) Version {
+ return Version{Major: r.Major, Minor: r.Minor, Patch: r.Patch}
+}
+
+// EnsureVersion - Default policy: allow only patch difference
+func EnsureVersion(local Version, remote *types.VersionReply) bool {
+ if remote.Major != local.Major {
+ return false
+ }
+ if remote.Minor != local.Minor {
+ return false
+ }
+ return true
+}
+
+func (v Version) String() string {
+ return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch)
+}
diff --git a/erigon-lib/kv/Readme.md b/erigon-lib/kv/Readme.md
new file mode 100644
index 00000000000..4075fce0151
--- /dev/null
+++ b/erigon-lib/kv/Readme.md
@@ -0,0 +1,147 @@
+#### `Ethdb` package hold's bouquet of objects to access DB
+
+Words "KV" and "DB" have special meaning here:
+
+- KV - key-value-style API to access data: let developer manage transactions, stateful cursors.
+- DB - object-oriented-style API to access data: Get/Put/Delete/WalkOverTable/MultiPut, managing transactions
+ internally.
+
+So, DB abstraction fits 95% times and leads to more maintainable code - because it looks stateless.
+
+About "key-value-style": Modern key-value databases don't provide Get/Put/Delete methods,
+because it's very hard-drive-unfriendly - it pushes developers do random-disk-access which
+is [order of magnitude slower than sequential read](https://www.seagate.com/sg/en/tech-insights/lies-damn-lies-and-ssd-benchmark-master-ti/).
+To enforce sequential-reads - introduced stateful cursors/iterators - they intentionally look as file-api:
+open_cursor/seek/write_data_from_current_position/move_to_end/step_back/step_forward/delete_key_on_current_position/append.
+
+## Class diagram:
+
+```asciiflow.com
+// This is not call graph, just show classes from low-level to high-level.
+// And show which classes satisfy which interfaces.
+
++-----------------------------------+ +-----------------------------------+
+| github.com/erigonteh/mdbx-go | | google.golang.org/grpc.ClientConn |
+| (app-agnostic MDBX go bindings) | | (app-agnostic RPC and streaming) |
++-----------------------------------+ +-----------------------------------+
+ | |
+ | |
+ v v
++-----------------------------------+ +-----------------------------------+
+| ethdb/kv_mdbx.go | | ethdb/kv_remote.go |
+| (tg-specific MDBX implementaion) | | (tg-specific remote DB access) |
++-----------------------------------+ +-----------------------------------+
+ | |
+ | |
+ v v
++----------------------------------------------------------------------------------------------+
+| eth/kv_interface.go |
+| (Common KV interface. DB-friendly, disk-friendly, cpu-cache-friendly. |
+| Same app code can work with local or remote database. |
+| Allows experiment with another database implementations. |
+| Supports context.Context for cancelation. Any operation can return error) |
++----------------------------------------------------------------------------------------------+
+
+Then:
+turbo/snapshotsync/block_reader.go.go
+erigon-lib/state/aggregator_v3.go
+
+Then:
+kv_temporal.go
+
+```
+
+## ethdb.AbstractKV design:
+
+- InMemory, ReadOnly: `NewMDBX().Flags(mdbx.ReadOnly).InMem().Open()`
+- MultipleDatabases, Customization: `NewMDBX().Path(path).WithBucketsConfig(config).Open()`
+
+
+- 1 Transaction object can be used only within 1 goroutine.
+- Only 1 write transaction can be active at a time (other will wait).
+- Unlimited read transactions can be active concurrently (not blocked by write transaction).
+
+
+- Methods db.Update, db.View - can be used to open and close short transaction.
+- Methods Begin/Commit/Rollback - for long transaction.
+- it's safe to call .Rollback() after .Commit(), multiple rollbacks are also safe. Common transaction patter:
+
+```
+tx, err := db.Begin(true, ethdb.RW)
+if err != nil {
+ return err
+}
+defer tx.Rollback() // important to avoid transactions leak at panic or early return
+
+// ... code which uses database in transaction
+
+err := tx.Commit()
+if err != nil {
+ return err
+}
+```
+
+- No internal copies/allocations. It means: 1. app must copy keys/values before put to database. 2. Data after read from
+ db - valid only during current transaction - copy it if plan use data after transaction Commit/Rollback.
+- Methods .Bucket() and .Cursor(), can’t return nil, can't return error.
+- Bucket and Cursor - are interfaces - means different classes can satisfy it: for example `MdbxCursor`
+ and `MdbxDupSortCursor` classes satisfy it.
+ If your are not familiar with "DupSort" concept, please read [dupsort.md](https://github.com/ledgerwatch/erigon/blob/devel/docs/programmers_guide/dupsort.md)
+
+
+
+- If Cursor returns err!=nil then key SHOULD be != nil (can be []byte{} for example).
+ Then traversal code look as:
+
+```go
+for k, v, err := c.First(); k != nil; k, v, err = c.Next() {
+if err != nil {
+return err
+}
+// logic
+}
+```
+
+- Move cursor: `cursor.Seek(key)`
+
+## ethdb.Database design:
+
+- Allows pass multiple implementations
+- Allows traversal tables by `db.Walk`
+
+## ethdb.TxDb design:
+
+- holds inside 1 long-running transaction and 1 cursor per table
+- method Begin DOESN'T create new TxDb object, it means this object can be passed into other objects by pointer,
+ and high-level app code can start/commit transactions when it needs without re-creating all objects which holds
+ TxDb pointer.
+- This is reason why txDb.CommitAndBegin() method works: inside it creating new transaction object, pointer to TxDb stays
+ valid.
+
+## How to dump/load table
+
+Install all database tools: `make db-tools`
+
+```
+./build/bin/mdbx_dump -a /erigon/chaindata | lz4 > dump.lz4
+lz4 -d < dump.lz4 | ./build/bin/mdbx_load -an /erigon/chaindata
+```
+
+## How to get table checksum
+
+```
+./build/bin/mdbx_dump -s table_name /erigon/chaindata | tail -n +4 | sha256sum # tail here is for excluding header
+
+Header example:
+VERSION=3
+geometry=l268435456,c268435456,u25769803776,s268435456,g268435456
+mapsize=756375552
+maxreaders=120
+format=bytevalue
+database=TBL0001
+type=btree
+db_pagesize=4096
+duplicates=1
+dupsort=1
+HEADER=END
+```
diff --git a/erigon-lib/kv/bitmapdb/bitmapdb.go b/erigon-lib/kv/bitmapdb/bitmapdb.go
new file mode 100644
index 00000000000..0311eb60712
--- /dev/null
+++ b/erigon-lib/kv/bitmapdb/bitmapdb.go
@@ -0,0 +1,430 @@
+/*
+ Copyright 2022 The Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package bitmapdb
+
+import (
+ "bytes"
+ "encoding/binary"
+ "math"
+ "sort"
+ "sync"
+
+ "github.com/RoaringBitmap/roaring"
+ "github.com/RoaringBitmap/roaring/roaring64"
+ "github.com/c2h5oh/datasize"
+
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/kv"
+)
+
+const MaxUint32 = 1<<32 - 1
+
+type ToBitamp interface {
+ ToBitmap() (*roaring64.Bitmap, error)
+}
+
+var roaringPool = sync.Pool{
+ New: func() any {
+ return roaring.New()
+ },
+}
+
+func NewBitmap() *roaring.Bitmap {
+ a := roaringPool.Get().(*roaring.Bitmap)
+ a.Clear()
+ return a
+}
+func ReturnToPool(a *roaring.Bitmap) {
+ if a == nil {
+ return
+ }
+ roaringPool.Put(a)
+}
+
+var roaring64Pool = sync.Pool{
+ New: func() any {
+ return roaring64.New()
+ },
+}
+
+func NewBitmap64() *roaring64.Bitmap {
+ a := roaring64Pool.Get().(*roaring64.Bitmap)
+ a.Clear()
+ return a
+}
+func ReturnToPool64(a *roaring64.Bitmap) {
+ if a == nil {
+ return
+ }
+ roaring64Pool.Put(a)
+}
+
+const ChunkLimit = uint64(1950 * datasize.B) // threshold beyond which MDBX overflow pages appear: 4096 / 2 - (keySize + 8)
+
+// CutLeft - cut from bitmap `targetSize` bytes from left
+// removing lft part from `bm`
+// returns nil on zero cardinality
+func CutLeft(bm *roaring.Bitmap, sizeLimit uint64) *roaring.Bitmap {
+ if bm.GetCardinality() == 0 {
+ return nil
+ }
+
+ sz := bm.GetSerializedSizeInBytes()
+ if sz <= sizeLimit {
+ lft := roaring.New()
+ lft.AddRange(uint64(bm.Minimum()), uint64(bm.Maximum())+1)
+ lft.And(bm)
+ lft.RunOptimize()
+ bm.Clear()
+ return lft
+ }
+
+ from := uint64(bm.Minimum())
+ minMax := bm.Maximum() - bm.Minimum()
+ to := sort.Search(int(minMax), func(i int) bool { // can be optimized to avoid "too small steps", but let's leave it for readability
+ lft := roaring.New() // bitmap.Clear() method intentionally not used here, because then serialized size of bitmap getting bigger
+ lft.AddRange(from, from+uint64(i)+1)
+ lft.And(bm)
+ lft.RunOptimize()
+ return lft.GetSerializedSizeInBytes() > sizeLimit
+ })
+
+ lft := roaring.New()
+ lft.AddRange(from, from+uint64(to)) // no +1 because sort.Search returns element which is just higher threshold - but we need lower
+ lft.And(bm)
+ bm.RemoveRange(from, from+uint64(to))
+ lft.RunOptimize()
+ return lft
+}
+
+func WalkChunks(bm *roaring.Bitmap, sizeLimit uint64, f func(chunk *roaring.Bitmap, isLast bool) error) error {
+ for bm.GetCardinality() > 0 {
+ if err := f(CutLeft(bm, sizeLimit), bm.GetCardinality() == 0); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func WalkChunkWithKeys(k []byte, m *roaring.Bitmap, sizeLimit uint64, f func(chunkKey []byte, chunk *roaring.Bitmap) error) error {
+ return WalkChunks(m, sizeLimit, func(chunk *roaring.Bitmap, isLast bool) error {
+ chunkKey := make([]byte, len(k)+4)
+ copy(chunkKey, k)
+ if isLast {
+ binary.BigEndian.PutUint32(chunkKey[len(k):], ^uint32(0))
+ } else {
+ binary.BigEndian.PutUint32(chunkKey[len(k):], chunk.Maximum())
+ }
+ return f(chunkKey, chunk)
+ })
+}
+
+// TruncateRange - gets existing bitmap in db and call RemoveRange operator on it.
+// starts from hot shard, stops when shard not overlap with [from-to)
+// !Important: [from, to)
+func TruncateRange(db kv.RwTx, bucket string, key []byte, to uint32) error {
+ chunkKey := make([]byte, len(key)+4)
+ copy(chunkKey, key)
+ binary.BigEndian.PutUint32(chunkKey[len(chunkKey)-4:], to)
+ bm, err := Get(db, bucket, key, to, MaxUint32)
+ if err != nil {
+ return err
+ }
+
+ if bm.GetCardinality() > 0 && to <= bm.Maximum() {
+ bm.RemoveRange(uint64(to), uint64(bm.Maximum())+1)
+ }
+
+ c, err := db.Cursor(bucket)
+ if err != nil {
+ return err
+ }
+ defer c.Close()
+ if err := Walk(c, chunkKey, 0, func(k, v []byte) (bool, error) {
+ if !bytes.HasPrefix(k, key) {
+ return false, nil
+ }
+ if err := db.Delete(bucket, k); err != nil {
+ return false, err
+ }
+ return true, nil
+ }); err != nil {
+ return err
+ }
+
+ buf := bytes.NewBuffer(nil)
+ return WalkChunkWithKeys(key, bm, ChunkLimit, func(chunkKey []byte, chunk *roaring.Bitmap) error {
+ buf.Reset()
+ if _, err := chunk.WriteTo(buf); err != nil {
+ return err
+ }
+ return db.Put(bucket, chunkKey, common.Copy(buf.Bytes()))
+ })
+}
+
+// Get - reading as much chunks as needed to satisfy [from, to] condition
+// join all chunks to 1 bitmap by Or operator
+func Get(db kv.Tx, bucket string, key []byte, from, to uint32) (*roaring.Bitmap, error) {
+ var chunks []*roaring.Bitmap
+
+ fromKey := make([]byte, len(key)+4)
+ copy(fromKey, key)
+ binary.BigEndian.PutUint32(fromKey[len(fromKey)-4:], from)
+ c, err := db.Cursor(bucket)
+ if err != nil {
+ return nil, err
+ }
+ defer c.Close()
+ for k, v, err := c.Seek(fromKey); k != nil; k, v, err = c.Next() {
+ if err != nil {
+ return nil, err
+ }
+ if !bytes.HasPrefix(k, key) {
+ break
+ }
+ bm := NewBitmap()
+ defer ReturnToPool(bm)
+ if _, err := bm.ReadFrom(bytes.NewReader(v)); err != nil {
+ return nil, err
+ }
+ chunks = append(chunks, bm)
+ if binary.BigEndian.Uint32(k[len(k)-4:]) >= to {
+ break
+ }
+ }
+ if len(chunks) == 0 {
+ return roaring.New(), nil
+ }
+ return roaring.FastOr(chunks...), nil
+}
+
+// SeekInBitmap - returns value in bitmap which is >= n
+//
+//nolint:deadcode
+func SeekInBitmap(m *roaring.Bitmap, n uint32) (found uint32, ok bool) {
+ i := m.Iterator()
+ i.AdvanceIfNeeded(n)
+ ok = i.HasNext()
+ if ok {
+ found = i.Next()
+ }
+ return found, ok
+}
+
+// CutLeft - cut from bitmap `targetSize` bytes from left
+// removing lft part from `bm`
+// returns nil on zero cardinality
+func CutLeft64(bm *roaring64.Bitmap, sizeLimit uint64) *roaring64.Bitmap {
+ if bm.GetCardinality() == 0 {
+ return nil
+ }
+
+ sz := bm.GetSerializedSizeInBytes()
+ if sz <= sizeLimit {
+ lft := roaring64.New()
+ lft.AddRange(bm.Minimum(), bm.Maximum()+1)
+ lft.And(bm)
+ lft.RunOptimize()
+ bm.Clear()
+ return lft
+ }
+
+ from := bm.Minimum()
+ minMax := bm.Maximum() - bm.Minimum()
+ to := sort.Search(int(minMax), func(i int) bool { // can be optimized to avoid "too small steps", but let's leave it for readability
+ lft := roaring64.New() // bitmap.Clear() method intentionally not used here, because then serialized size of bitmap getting bigger
+ lft.AddRange(from, from+uint64(i)+1)
+ lft.And(bm)
+ lft.RunOptimize()
+ return lft.GetSerializedSizeInBytes() > sizeLimit
+ })
+
+ lft := roaring64.New()
+ lft.AddRange(from, from+uint64(to)) // no +1 because sort.Search returns element which is just higher threshold - but we need lower
+ lft.And(bm)
+ bm.RemoveRange(from, from+uint64(to))
+ lft.RunOptimize()
+ return lft
+}
+
+func WalkChunks64(bm *roaring64.Bitmap, sizeLimit uint64, f func(chunk *roaring64.Bitmap, isLast bool) error) error {
+ for bm.GetCardinality() > 0 {
+ if err := f(CutLeft64(bm, sizeLimit), bm.GetCardinality() == 0); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func WalkChunkWithKeys64(k []byte, m *roaring64.Bitmap, sizeLimit uint64, f func(chunkKey []byte, chunk *roaring64.Bitmap) error) error {
+ return WalkChunks64(m, sizeLimit, func(chunk *roaring64.Bitmap, isLast bool) error {
+ chunkKey := make([]byte, len(k)+8)
+ copy(chunkKey, k)
+ if isLast {
+ binary.BigEndian.PutUint64(chunkKey[len(k):], ^uint64(0))
+ } else {
+ binary.BigEndian.PutUint64(chunkKey[len(k):], chunk.Maximum())
+ }
+ return f(chunkKey, chunk)
+ })
+}
+
+// TruncateRange - gets existing bitmap in db and call RemoveRange operator on it.
+// starts from hot shard, stops when shard not overlap with [from-to)
+// !Important: [from, to)
+func TruncateRange64(db kv.RwTx, bucket string, key []byte, to uint64) error {
+ chunkKey := make([]byte, len(key)+8)
+ copy(chunkKey, key)
+ binary.BigEndian.PutUint64(chunkKey[len(chunkKey)-8:], to)
+ bm, err := Get64(db, bucket, key, to, math.MaxUint64)
+ if err != nil {
+ return err
+ }
+
+ if bm.GetCardinality() > 0 && to <= bm.Maximum() {
+ bm.RemoveRange(to, bm.Maximum()+1)
+ }
+
+ c, err := db.Cursor(bucket)
+ if err != nil {
+ return err
+ }
+ defer c.Close()
+ cDel, err := db.RwCursor(bucket)
+ if err != nil {
+ return err
+ }
+ defer cDel.Close()
+ if err := Walk(c, chunkKey, 0, func(k, v []byte) (bool, error) {
+ if !bytes.HasPrefix(k, key) {
+ return false, nil
+ }
+ if err := cDel.Delete(k); err != nil {
+ return false, err
+ }
+ return true, nil
+ }); err != nil {
+ return err
+ }
+
+ buf := bytes.NewBuffer(nil)
+ return WalkChunkWithKeys64(key, bm, ChunkLimit, func(chunkKey []byte, chunk *roaring64.Bitmap) error {
+ buf.Reset()
+ if _, err := chunk.WriteTo(buf); err != nil {
+ return err
+ }
+ return db.Put(bucket, chunkKey, common.Copy(buf.Bytes()))
+ })
+}
+
+// Get - reading as much chunks as needed to satisfy [from, to] condition
+// join all chunks to 1 bitmap by Or operator
+func Get64(db kv.Tx, bucket string, key []byte, from, to uint64) (*roaring64.Bitmap, error) {
+ var chunks []*roaring64.Bitmap
+
+ fromKey := make([]byte, len(key)+8)
+ copy(fromKey, key)
+ binary.BigEndian.PutUint64(fromKey[len(fromKey)-8:], from)
+
+ c, err := db.Cursor(bucket)
+ if err != nil {
+ return nil, err
+ }
+ defer c.Close()
+ for k, v, err := c.Seek(fromKey); k != nil; k, v, err = c.Next() {
+ if err != nil {
+ return nil, err
+ }
+ if !bytes.HasPrefix(k, key) {
+ break
+ }
+ bm := NewBitmap64()
+ defer ReturnToPool64(bm)
+ _, err := bm.ReadFrom(bytes.NewReader(v))
+ if err != nil {
+ return nil, err
+ }
+ chunks = append(chunks, bm)
+ if binary.BigEndian.Uint64(k[len(k)-8:]) >= to {
+ break
+ }
+ }
+
+ if len(chunks) == 0 {
+ return roaring64.New(), nil
+ }
+ return roaring64.FastOr(chunks...), nil
+}
+
+// SeekInBitmap - returns value in bitmap which is >= n
+func SeekInBitmap64(m *roaring64.Bitmap, n uint64) (found uint64, ok bool) {
+ if m.IsEmpty() {
+ return 0, false
+ }
+ if n == 0 {
+ return m.Minimum(), true
+ }
+ searchRank := m.Rank(n - 1)
+ if searchRank >= m.GetCardinality() {
+ return 0, false
+ }
+ found, _ = m.Select(searchRank)
+ return found, true
+}
+
+func Walk(c kv.Cursor, startkey []byte, fixedbits int, walker func(k, v []byte) (bool, error)) error {
+ fixedbytes, mask := Bytesmask(fixedbits)
+ k, v, err := c.Seek(startkey)
+ if err != nil {
+ return err
+ }
+ for k != nil && len(k) >= fixedbytes && (fixedbits == 0 || bytes.Equal(k[:fixedbytes-1], startkey[:fixedbytes-1]) && (k[fixedbytes-1]&mask) == (startkey[fixedbytes-1]&mask)) {
+ goOn, err := walker(k, v)
+ if err != nil {
+ return err
+ }
+ if !goOn {
+ break
+ }
+ k, v, err = c.Next()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func Bytesmask(fixedbits int) (fixedbytes int, mask byte) {
+ fixedbytes = common.BitLenToByteLen(fixedbits)
+ shiftbits := fixedbits & 7
+ mask = byte(0xff)
+ if shiftbits != 0 {
+ mask = 0xff << (8 - shiftbits)
+ }
+ return fixedbytes, mask
+}
+
+type ToBitmap interface {
+ ToBitmap() (*roaring64.Bitmap, error)
+}
+
+func ToIter(it roaring64.IntIterable64) *ToIterInterface { return &ToIterInterface{it: it} }
+
+type ToIterInterface struct{ it roaring64.IntIterable64 }
+
+func (i *ToIterInterface) HasNext() bool { return i.it.HasNext() }
+func (i *ToIterInterface) Next() (uint64, error) { return i.it.Next(), nil }
diff --git a/erigon-lib/kv/bitmapdb/bitmapdb_test.go b/erigon-lib/kv/bitmapdb/bitmapdb_test.go
new file mode 100644
index 00000000000..b3a4e9bfc02
--- /dev/null
+++ b/erigon-lib/kv/bitmapdb/bitmapdb_test.go
@@ -0,0 +1,71 @@
+/*
+ Copyright 2022 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package bitmapdb_test
+
+import (
+ "testing"
+
+ "github.com/RoaringBitmap/roaring"
+ "github.com/ledgerwatch/erigon-lib/kv/bitmapdb"
+ "github.com/stretchr/testify/require"
+)
+
+func TestCutLeft(t *testing.T) {
+ bm := roaring.New()
+ for j := 0; j < 10_000; j += 20 {
+ bm.AddRange(uint64(j), uint64(j+10))
+ }
+ N := uint64(1024)
+ for bm.GetCardinality() > 0 {
+ lft := bitmapdb.CutLeft(bm, N)
+ lftSz := lft.GetSerializedSizeInBytes()
+ if bm.GetCardinality() > 0 {
+ require.True(t, lftSz > N-256 && lftSz < N+256)
+ } else {
+ require.True(t, lft.GetSerializedSizeInBytes() > 0)
+ require.True(t, lftSz < N+256)
+ }
+ }
+
+ bm = roaring.New()
+ for j := 0; j < 10_000; j += 20 {
+ bm.AddRange(uint64(j), uint64(j+10))
+ }
+ N = uint64(2048)
+ for bm.GetCardinality() > 0 {
+ lft := bitmapdb.CutLeft(bm, N)
+ lftSz := lft.GetSerializedSizeInBytes()
+ if bm.GetCardinality() > 0 {
+ require.True(t, lftSz > N-256 && lftSz < N+256)
+ } else {
+ require.True(t, lft.GetSerializedSizeInBytes() > 0)
+ require.True(t, lftSz < N+256)
+ }
+ }
+
+ bm = roaring.New()
+ bm.Add(1)
+ lft := bitmapdb.CutLeft(bm, N)
+ require.True(t, lft.GetSerializedSizeInBytes() > 0)
+ require.True(t, lft.GetCardinality() == 1)
+ require.True(t, bm.GetCardinality() == 0)
+
+ bm = roaring.New()
+ lft = bitmapdb.CutLeft(bm, N)
+ require.True(t, lft == nil)
+ require.True(t, bm.GetCardinality() == 0)
+}
diff --git a/erigon-lib/kv/bitmapdb/fixed_size.go b/erigon-lib/kv/bitmapdb/fixed_size.go
new file mode 100644
index 00000000000..97bc501b7e7
--- /dev/null
+++ b/erigon-lib/kv/bitmapdb/fixed_size.go
@@ -0,0 +1,320 @@
+/*
+Copyright 2022 Erigon contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package bitmapdb
+
+import (
+ "bufio"
+ "encoding/binary"
+ "fmt"
+ "os"
+ "path/filepath"
+ "reflect"
+ "time"
+ "unsafe"
+
+ "github.com/c2h5oh/datasize"
+ mmap2 "github.com/edsrzf/mmap-go"
+ "github.com/ledgerwatch/log/v3"
+)
+
+type FixedSizeBitmaps struct {
+ f *os.File
+ filePath, fileName string
+
+ data []uint64
+ metaData []byte
+ amount uint64
+ version uint8
+
+ m mmap2.MMap
+ bitsPerBitmap int
+ size int
+ modTime time.Time
+}
+
+func OpenFixedSizeBitmaps(filePath string, bitsPerBitmap int) (*FixedSizeBitmaps, error) {
+ _, fName := filepath.Split(filePath)
+ idx := &FixedSizeBitmaps{
+ filePath: filePath,
+ fileName: fName,
+ bitsPerBitmap: bitsPerBitmap,
+ }
+
+ var err error
+ idx.f, err = os.Open(filePath)
+ if err != nil {
+ return nil, fmt.Errorf("OpenFile: %w", err)
+ }
+ var stat os.FileInfo
+ if stat, err = idx.f.Stat(); err != nil {
+ return nil, err
+ }
+ idx.size = int(stat.Size())
+ idx.modTime = stat.ModTime()
+ idx.m, err = mmap2.MapRegion(idx.f, idx.size, mmap2.RDONLY, 0, 0)
+ if err != nil {
+ return nil, err
+ }
+ idx.metaData = idx.m[:MetaHeaderSize]
+ idx.data = castToArrU64(idx.m[MetaHeaderSize:])
+
+ idx.version = idx.metaData[0]
+ idx.amount = binary.BigEndian.Uint64(idx.metaData[1 : 8+1])
+
+ return idx, nil
+}
+
+func (bm *FixedSizeBitmaps) FileName() string { return bm.fileName }
+func (bm *FixedSizeBitmaps) FilePath() string { return bm.filePath }
+func (bm *FixedSizeBitmaps) Close() {
+ if bm.m != nil {
+ if err := bm.m.Unmap(); err != nil {
+ log.Trace("unmap", "err", err, "file", bm.FileName())
+ }
+ bm.m = nil
+ }
+ if bm.f != nil {
+ if err := bm.f.Close(); err != nil {
+ log.Trace("close", "err", err, "file", bm.FileName())
+ }
+ bm.f = nil
+ }
+}
+
+func (bm *FixedSizeBitmaps) At(item uint64) (res []uint64, err error) {
+ if item > bm.amount {
+ return nil, fmt.Errorf("too big item number: %d > %d", item, bm.amount)
+ }
+
+ n := bm.bitsPerBitmap * int(item)
+ blkFrom, bitFrom := n/64, n%64
+ blkTo := (n+bm.bitsPerBitmap)/64 + 1
+ bitTo := 64
+
+ var j uint64
+ for i := blkFrom; i < blkTo; i++ {
+ if i == blkTo-1 {
+ bitTo = (n + bm.bitsPerBitmap) % 64
+ }
+ for bit := bitFrom; bit < bitTo; bit++ {
+ if bm.data[i]&(1< bm.amount {
+ return 0, 0, false, false, fmt.Errorf("too big item number: %d > %d", item, bm.amount)
+ }
+ n := bm.bitsPerBitmap * int(item)
+ blkFrom, bitFrom := n/64, n%64
+ blkTo := (n+bm.bitsPerBitmap)/64 + 1
+ bitTo := 64
+
+ var j uint64
+ for i := blkFrom; i < blkTo; i++ {
+ if i == blkTo-1 {
+ bitTo = (n + bm.bitsPerBitmap) % 64
+ }
+ for bit := bitFrom; bit < bitTo; bit++ {
+ if bm.data[i]&(1<= after {
+ if !ok {
+ ok = true
+ fst = j
+ } else {
+ ok2 = true
+ snd = j
+ return
+ }
+ }
+ }
+ j++
+ }
+ bitFrom = 0
+ }
+
+ return
+}
+
+type FixedSizeBitmapsWriter struct {
+ f *os.File
+
+ indexFile, tmpIdxFilePath string
+ data []uint64 // slice of correct size for the index to work with
+ metaData []byte
+ m mmap2.MMap
+
+ version uint8
+ amount uint64
+ size int
+ bitsPerBitmap uint64
+
+ logger log.Logger
+ noFsync bool // fsync is enabled by default, but tests can manually disable
+}
+
+const MetaHeaderSize = 64
+
+func NewFixedSizeBitmapsWriter(indexFile string, bitsPerBitmap int, amount uint64, logger log.Logger) (*FixedSizeBitmapsWriter, error) {
+ pageSize := os.Getpagesize()
+ //TODO: use math.SafeMul()
+ bytesAmount := MetaHeaderSize + (bitsPerBitmap*int(amount))/8
+ size := (bytesAmount/pageSize + 1) * pageSize // must be page-size-aligned
+ idx := &FixedSizeBitmapsWriter{
+ indexFile: indexFile,
+ tmpIdxFilePath: indexFile + ".tmp",
+ bitsPerBitmap: uint64(bitsPerBitmap),
+ size: size,
+ amount: amount,
+ version: 1,
+ logger: logger,
+ }
+
+ _ = os.Remove(idx.tmpIdxFilePath)
+
+ var err error
+ idx.f, err = os.Create(idx.tmpIdxFilePath)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := growFileToSize(idx.f, idx.size); err != nil {
+ return nil, err
+ }
+
+ idx.m, err = mmap2.MapRegion(idx.f, idx.size, mmap2.RDWR, 0, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ idx.metaData = idx.m[:MetaHeaderSize]
+ idx.data = castToArrU64(idx.m[MetaHeaderSize:])
+ //if err := mmap.MadviseNormal(idx.m); err != nil {
+ // return nil, err
+ //}
+ idx.metaData[0] = idx.version
+ binary.BigEndian.PutUint64(idx.metaData[1:], idx.amount)
+ idx.amount = binary.BigEndian.Uint64(idx.metaData[1 : 8+1])
+
+ return idx, nil
+}
+func (w *FixedSizeBitmapsWriter) Close() {
+ if w.m != nil {
+ if err := w.m.Unmap(); err != nil {
+ log.Trace("unmap", "err", err, "file", w.f.Name())
+ }
+ w.m = nil
+ }
+ if w.f != nil {
+ if err := w.f.Close(); err != nil {
+ log.Trace("close", "err", err, "file", w.f.Name())
+ }
+ w.f = nil
+ }
+}
+func growFileToSize(f *os.File, size int) error {
+ pageSize := os.Getpagesize()
+ pages := size / pageSize
+ wr := bufio.NewWriterSize(f, int(4*datasize.MB))
+ page := make([]byte, pageSize)
+ for i := 0; i < pages; i++ {
+ if _, err := wr.Write(page); err != nil {
+ return err
+ }
+ }
+ if err := wr.Flush(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Create a []uint64 view of the file
+func castToArrU64(in []byte) []uint64 {
+ var view []uint64
+ header := (*reflect.SliceHeader)(unsafe.Pointer(&view))
+ header.Data = (*reflect.SliceHeader)(unsafe.Pointer(&in)).Data
+ header.Len = len(in) / 8
+ header.Cap = header.Len
+ return view
+}
+
+func (w *FixedSizeBitmapsWriter) AddArray(item uint64, listOfValues []uint64) error {
+ if item > w.amount {
+ return fmt.Errorf("too big item number: %d > %d", item, w.amount)
+ }
+ offset := item * w.bitsPerBitmap
+ for _, v := range listOfValues {
+ if v > w.bitsPerBitmap {
+ return fmt.Errorf("too big value: %d > %d", v, w.bitsPerBitmap)
+ }
+ n := offset + v
+ blkAt, bitAt := int(n/64), int(n%64)
+ if blkAt > len(w.data) {
+ return fmt.Errorf("too big value: %d, %d, max: %d", item, listOfValues, len(w.data))
+ }
+ w.data[blkAt] |= (1 << bitAt)
+ }
+ return nil
+}
+
+func (w *FixedSizeBitmapsWriter) Build() error {
+ if err := w.m.Flush(); err != nil {
+ return err
+ }
+ if err := w.fsync(); err != nil {
+ return err
+ }
+
+ if err := w.m.Unmap(); err != nil {
+ return err
+ }
+ w.m = nil
+
+ if err := w.f.Close(); err != nil {
+ return err
+ }
+ w.f = nil
+
+ _ = os.Remove(w.indexFile)
+ if err := os.Rename(w.tmpIdxFilePath, w.indexFile); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *FixedSizeBitmapsWriter) DisableFsync() { w.noFsync = true }
+
+// fsync - other processes/goroutines must see only "fully-complete" (valid) files. No partial-writes.
+// To achieve it: write to .tmp file then `rename` when file is ready.
+// Machine may power-off right after `rename` - it means `fsync` must be before `rename`
+func (w *FixedSizeBitmapsWriter) fsync() error {
+ if w.noFsync {
+ return nil
+ }
+ if err := w.f.Sync(); err != nil {
+ w.logger.Warn("couldn't fsync", "err", err, "file", w.tmpIdxFilePath)
+ return err
+ }
+ return nil
+}
diff --git a/erigon-lib/kv/bitmapdb/fixed_size_test.go b/erigon-lib/kv/bitmapdb/fixed_size_test.go
new file mode 100644
index 00000000000..9f513c5833b
--- /dev/null
+++ b/erigon-lib/kv/bitmapdb/fixed_size_test.go
@@ -0,0 +1,108 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package bitmapdb
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/ledgerwatch/log/v3"
+ "github.com/stretchr/testify/require"
+)
+
+func TestFixedSizeBitmaps(t *testing.T) {
+
+ tmpDir, require := t.TempDir(), require.New(t)
+ must := require.NoError
+ idxPath := filepath.Join(tmpDir, "idx.tmp")
+ wr, err := NewFixedSizeBitmapsWriter(idxPath, 14, 7, log.New())
+ require.NoError(err)
+ defer wr.Close()
+
+ must(wr.AddArray(0, []uint64{3, 9, 11}))
+ must(wr.AddArray(1, []uint64{1, 2, 3}))
+ must(wr.AddArray(2, []uint64{4, 8, 13}))
+ must(wr.AddArray(3, []uint64{1, 13}))
+ must(wr.AddArray(4, []uint64{1, 13}))
+ must(wr.AddArray(5, []uint64{1, 13}))
+ must(wr.AddArray(6, []uint64{0, 9, 13}))
+ must(wr.AddArray(7, []uint64{7}))
+
+ require.Error(wr.AddArray(8, []uint64{8}))
+ err = wr.Build()
+ require.NoError(err)
+
+ bm, err := OpenFixedSizeBitmaps(idxPath, 14)
+ require.NoError(err)
+ defer bm.Close()
+
+ at := func(item uint64) []uint64 {
+ n, err := bm.At(item)
+ require.NoError(err)
+ return n
+ }
+
+ require.Equal([]uint64{3, 9, 11}, at(0))
+ require.Equal([]uint64{1, 2, 3}, at(1))
+ require.Equal([]uint64{4, 8, 13}, at(2))
+ require.Equal([]uint64{1, 13}, at(3))
+ require.Equal([]uint64{1, 13}, at(4))
+ require.Equal([]uint64{1, 13}, at(5))
+ require.Equal([]uint64{0, 9, 13}, at(6))
+ require.Equal([]uint64{7}, at(7))
+
+ fst, snd, ok, ok2, err := bm.First2At(7, 0)
+ require.NoError(err)
+ require.Equal(uint64(7), fst)
+ require.Equal(uint64(0), snd)
+ require.Equal(true, ok)
+ require.Equal(false, ok2)
+
+ fst, snd, ok, ok2, err = bm.First2At(2, 8)
+ require.NoError(err)
+ require.Equal(uint64(8), fst)
+ require.Equal(uint64(13), snd)
+ require.Equal(true, ok)
+ require.Equal(true, ok2)
+
+ fst, snd, ok, ok2, err = bm.First2At(2, 9)
+ require.NoError(err)
+ require.Equal(uint64(13), fst)
+ require.Equal(uint64(0), snd)
+ require.Equal(true, ok)
+ require.Equal(false, ok2)
+
+ _, err = bm.At(8)
+ require.Error(err)
+}
+
+func TestPageAlined(t *testing.T) {
+ tmpDir, require := t.TempDir(), require.New(t)
+ idxPath := filepath.Join(tmpDir, "idx.tmp")
+
+ bm2, err := NewFixedSizeBitmapsWriter(idxPath, 128, 100, log.New())
+ require.NoError(err)
+ require.Equal((128/8*100/os.Getpagesize()+1)*os.Getpagesize(), bm2.size)
+ defer bm2.Close()
+ bm2.Close()
+
+ bm3, err := NewFixedSizeBitmapsWriter(idxPath, 128, 1000, log.New())
+ require.NoError(err)
+ require.Equal((128/8*1000/os.Getpagesize()+1)*os.Getpagesize(), bm3.size)
+ defer bm3.Close()
+}
diff --git a/erigon-lib/kv/bitmapdb/stream.go b/erigon-lib/kv/bitmapdb/stream.go
new file mode 100644
index 00000000000..59ffe5f0bba
--- /dev/null
+++ b/erigon-lib/kv/bitmapdb/stream.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2021 Erigon contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package bitmapdb
+
+import (
+ "github.com/RoaringBitmap/roaring/roaring64"
+)
+
+type BitmapStream struct {
+ bm *roaring64.Bitmap
+ it roaring64.IntPeekable64
+}
+
+func NewBitmapStream(bm *roaring64.Bitmap) *BitmapStream {
+ return &BitmapStream{bm: bm, it: bm.Iterator()}
+}
+func (it *BitmapStream) HasNext() bool { return it.it.HasNext() }
+func (it *BitmapStream) Close() { ReturnToPool64(it.bm) }
+func (it *BitmapStream) Next() (uint64, error) { return it.it.Next(), nil }
+func (it *BitmapStream) ToBitmap() (*roaring64.Bitmap, error) { return it.bm, nil }
diff --git a/common/dbutils/composite_keys.go b/erigon-lib/kv/dbutils/composite_keys.go
similarity index 95%
rename from common/dbutils/composite_keys.go
rename to erigon-lib/kv/dbutils/composite_keys.go
index d2274f022e3..2be332fbfee 100644
--- a/common/dbutils/composite_keys.go
+++ b/erigon-lib/kv/dbutils/composite_keys.go
@@ -174,12 +174,3 @@ func encodeTimestamp(timestamp uint64) []byte {
}
return suffix
}
-
-func decodeTimestamp(suffix []byte) (uint64, []byte) {
- bytecount := int(suffix[0] >> 5)
- timestamp := uint64(suffix[0] & 0x1f)
- for i := 1; i < bytecount; i++ {
- timestamp = (timestamp << 8) | uint64(suffix[i])
- }
- return timestamp, suffix[bytecount:]
-}
diff --git a/common/dbutils/composite_keys_test.go b/erigon-lib/kv/dbutils/composite_keys_test.go
similarity index 93%
rename from common/dbutils/composite_keys_test.go
rename to erigon-lib/kv/dbutils/composite_keys_test.go
index 7eac33bcfc1..619225a1b47 100644
--- a/common/dbutils/composite_keys_test.go
+++ b/erigon-lib/kv/dbutils/composite_keys_test.go
@@ -3,10 +3,9 @@ package dbutils
import (
"testing"
+ "github.com/ledgerwatch/erigon-lib/common"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/stretchr/testify/assert"
-
- "github.com/ledgerwatch/erigon/common"
)
func TestPlainParseStoragePrefix(t *testing.T) {
@@ -36,7 +35,7 @@ func TestPlainParseCompositeStorageKey(t *testing.T) {
}
func TestParseStoragePrefix(t *testing.T) {
- expectedAddrHash, _ := common.HashData(libcommon.HexToAddress("0x5A0b54D5dc17e0AadC383d2db43B0a0D3E029c4c").Bytes())
+ expectedAddrHash, _ := libcommon.HashData(libcommon.HexToAddress("0x5A0b54D5dc17e0AadC383d2db43B0a0D3E029c4c").Bytes())
expectedIncarnation := uint64(999000999)
prefix := GenerateStoragePrefix(expectedAddrHash[:], expectedIncarnation)
diff --git a/common/dbutils/helper.go b/erigon-lib/kv/dbutils/helper.go
similarity index 100%
rename from common/dbutils/helper.go
rename to erigon-lib/kv/dbutils/helper.go
diff --git a/common/dbutils/history_index.go b/erigon-lib/kv/dbutils/history_index.go
similarity index 100%
rename from common/dbutils/history_index.go
rename to erigon-lib/kv/dbutils/history_index.go
diff --git a/common/dbutils/suffix_type.go b/erigon-lib/kv/dbutils/suffix_type.go
similarity index 100%
rename from common/dbutils/suffix_type.go
rename to erigon-lib/kv/dbutils/suffix_type.go
diff --git a/erigon-lib/kv/helpers.go b/erigon-lib/kv/helpers.go
new file mode 100644
index 00000000000..727a140a124
--- /dev/null
+++ b/erigon-lib/kv/helpers.go
@@ -0,0 +1,222 @@
+/*
+ Copyright 2022 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package kv
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/erigontech/mdbx-go/mdbx"
+ "github.com/ledgerwatch/erigon-lib/common"
+)
+
+func DefaultPageSize() uint64 {
+ osPageSize := os.Getpagesize()
+ if osPageSize < 4096 { // reduce further may lead to errors (because some data is just big)
+ osPageSize = 4096
+ } else if osPageSize > mdbx.MaxPageSize {
+ osPageSize = mdbx.MaxPageSize
+ }
+ osPageSize = osPageSize / 4096 * 4096 // ensure it's rounded
+ return uint64(osPageSize)
+}
+
+// BigChunks - read `table` by big chunks - restart read transaction after each 1 minutes
+func BigChunks(db RoDB, table string, from []byte, walker func(tx Tx, k, v []byte) (bool, error)) error {
+ rollbackEvery := time.NewTicker(1 * time.Minute)
+
+ var stop bool
+ for !stop {
+ if err := db.View(context.Background(), func(tx Tx) error {
+ c, err := tx.Cursor(table)
+ if err != nil {
+ return err
+ }
+ defer c.Close()
+
+ k, v, err := c.Seek(from)
+ Loop:
+ for ; k != nil; k, v, err = c.Next() {
+ if err != nil {
+ return err
+ }
+
+ // break loop before walker() call, to make sure all keys are received by walker() exactly once
+ select {
+ case <-rollbackEvery.C:
+
+ break Loop
+ default:
+ }
+
+ ok, err := walker(tx, k, v)
+ if err != nil {
+ return err
+ }
+ if !ok {
+ stop = true
+ break
+ }
+ }
+
+ if k == nil {
+ stop = true
+ }
+
+ from = common.Copy(k) // next transaction will start from this key
+
+ return nil
+ }); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+var (
+ bytesTrue = []byte{1}
+ bytesFalse = []byte{0}
+)
+
+func bytes2bool(in []byte) bool {
+ if len(in) < 1 {
+ return false
+ }
+ return in[0] == 1
+}
+
+var ErrChanged = fmt.Errorf("key must not change")
+
+// EnsureNotChangedBool - used to store immutable config flags in db. protects from human mistakes
+func EnsureNotChangedBool(tx GetPut, bucket string, k []byte, value bool) (ok, enabled bool, err error) {
+ vBytes, err := tx.GetOne(bucket, k)
+ if err != nil {
+ return false, enabled, err
+ }
+ if vBytes == nil {
+ if value {
+ vBytes = bytesTrue
+ } else {
+ vBytes = bytesFalse
+ }
+ if err := tx.Put(bucket, k, vBytes); err != nil {
+ return false, enabled, err
+ }
+ }
+
+ enabled = bytes2bool(vBytes)
+ return value == enabled, enabled, nil
+}
+
+func GetBool(tx Getter, bucket string, k []byte) (enabled bool, err error) {
+ vBytes, err := tx.GetOne(bucket, k)
+ if err != nil {
+ return false, err
+ }
+ return bytes2bool(vBytes), nil
+}
+
+func ReadAhead(ctx context.Context, db RoDB, progress *atomic.Bool, table string, from []byte, amount uint32) (clean func()) {
+ if db == nil {
+ return func() {}
+ }
+ if ok := progress.CompareAndSwap(false, true); !ok {
+ return func() {}
+ }
+ ctx, cancel := context.WithCancel(ctx)
+ wg := sync.WaitGroup{}
+ clean = func() {
+ cancel()
+ wg.Wait()
+ }
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ defer progress.Store(false)
+ _ = db.View(ctx, func(tx Tx) error {
+ c, err := tx.Cursor(table)
+ if err != nil {
+ return err
+ }
+ defer c.Close()
+
+ for k, v, err := c.Seek(from); k != nil && amount > 0; k, v, err = c.Next() {
+ if err != nil {
+ return err
+ }
+ if len(v) > 0 {
+ _, _ = v[0], v[len(v)-1]
+ }
+ amount--
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ }
+ return nil
+ })
+ }()
+ return clean
+}
+
+// FirstKey - candidate on move to kv.Tx interface
+func FirstKey(tx Tx, table string) ([]byte, error) {
+ c, err := tx.Cursor(table)
+ if err != nil {
+ return nil, err
+ }
+ defer c.Close()
+ k, _, err := c.First()
+ if err != nil {
+ return nil, err
+ }
+ return k, nil
+}
+
+// LastKey - candidate on move to kv.Tx interface
+func LastKey(tx Tx, table string) ([]byte, error) {
+ c, err := tx.Cursor(table)
+ if err != nil {
+ return nil, err
+ }
+ defer c.Close()
+ k, _, err := c.Last()
+ if err != nil {
+ return nil, err
+ }
+ return k, nil
+}
+
+// NextSubtree does []byte++. Returns false if overflow.
+func NextSubtree(in []byte) ([]byte, bool) {
+ r := make([]byte, len(in))
+ copy(r, in)
+ for i := len(r) - 1; i >= 0; i-- {
+ if r[i] != 255 {
+ r[i]++
+ return r, true
+ }
+
+ r = r[:i] // make it shorter, because in tries after 11ff goes 12, but not 1200
+ }
+ return nil, false
+}
diff --git a/erigon-lib/kv/iter/helpers.go b/erigon-lib/kv/iter/helpers.go
new file mode 100644
index 00000000000..05dc18a1015
--- /dev/null
+++ b/erigon-lib/kv/iter/helpers.go
@@ -0,0 +1,114 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package iter
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func ToArr[T any](s Unary[T]) (res []T, err error) {
+ for s.HasNext() {
+ k, err := s.Next()
+ if err != nil {
+ return res, err
+ }
+ res = append(res, k)
+ }
+ return res, nil
+}
+
+func ToDualArray[K, V any](s Dual[K, V]) (keys []K, values []V, err error) {
+ for s.HasNext() {
+ k, v, err := s.Next()
+ if err != nil {
+ return keys, values, err
+ }
+ keys = append(keys, k)
+ values = append(values, v)
+ }
+ return keys, values, nil
+}
+
+func ExpectEqualU64(tb testing.TB, s1, s2 Unary[uint64]) {
+ tb.Helper()
+ ExpectEqual[uint64](tb, s1, s2)
+}
+func ExpectEqual[V comparable](tb testing.TB, s1, s2 Unary[V]) {
+ tb.Helper()
+ for s1.HasNext() && s2.HasNext() {
+ k1, e1 := s1.Next()
+ k2, e2 := s2.Next()
+ require.Equal(tb, e1 == nil, e2 == nil)
+ require.Equal(tb, k1, k2)
+ }
+
+ has1 := s1.HasNext()
+ has2 := s2.HasNext()
+ var label string
+ if has1 {
+ v1, _ := s1.Next()
+ label = fmt.Sprintf("v1: %v", v1)
+ }
+ if has2 {
+ v2, _ := s2.Next()
+ label += fmt.Sprintf(" v2: %v", v2)
+ }
+ require.False(tb, has1, label)
+ require.False(tb, has2, label)
+}
+
+// PairsWithErrorIter - return N, keys and then error
+type PairsWithErrorIter struct {
+ errorAt, i int
+}
+
+func PairsWithError(errorAt int) *PairsWithErrorIter {
+ return &PairsWithErrorIter{errorAt: errorAt}
+}
+func (m *PairsWithErrorIter) HasNext() bool { return true }
+func (m *PairsWithErrorIter) Next() ([]byte, []byte, error) {
+ if m.i >= m.errorAt {
+ return nil, nil, fmt.Errorf("expected error at iteration: %d", m.errorAt)
+ }
+ m.i++
+ return []byte(fmt.Sprintf("%x", m.i)), []byte(fmt.Sprintf("%x", m.i)), nil
+}
+
+func Count[T any](s Unary[T]) (cnt int, err error) {
+ for s.HasNext() {
+ _, err := s.Next()
+ if err != nil {
+ return cnt, err
+ }
+ cnt++
+ }
+ return cnt, err
+}
+
+func CountDual[K, V any](s Dual[K, V]) (cnt int, err error) {
+ for s.HasNext() {
+ _, _, err := s.Next()
+ if err != nil {
+ return cnt, err
+ }
+ cnt++
+ }
+ return cnt, err
+}
diff --git a/erigon-lib/kv/iter/iter.go b/erigon-lib/kv/iter/iter.go
new file mode 100644
index 00000000000..d722caed15e
--- /dev/null
+++ b/erigon-lib/kv/iter/iter.go
@@ -0,0 +1,571 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package iter
+
+import (
+ "bytes"
+
+ "github.com/ledgerwatch/erigon-lib/kv/order"
+ "golang.org/x/exp/constraints"
+ "golang.org/x/exp/slices"
+)
+
+type Closer interface {
+ Close()
+}
+
+var (
+ EmptyU64 = &EmptyUnary[uint64]{}
+ EmptyKV = &EmptyDual[[]byte, []byte]{}
+)
+
+type (
+ EmptyUnary[T any] struct{}
+ EmptyDual[K, V any] struct{}
+)
+
+func (EmptyUnary[T]) HasNext() bool { return false }
+func (EmptyUnary[T]) Next() (v T, err error) { return v, err }
+func (EmptyDual[K, V]) HasNext() bool { return false }
+func (EmptyDual[K, V]) Next() (k K, v V, err error) { return k, v, err }
+
+type ArrStream[V any] struct {
+ arr []V
+ i int
+}
+
+func ReverseArray[V any](arr []V) *ArrStream[V] {
+ arr = slices.Clone(arr)
+ for i, j := 0, len(arr)-1; i < j; i, j = i+1, j-1 {
+ arr[i], arr[j] = arr[j], arr[i]
+ }
+ return Array(arr)
+}
+func Array[V any](arr []V) *ArrStream[V] { return &ArrStream[V]{arr: arr} }
+func (it *ArrStream[V]) HasNext() bool { return it.i < len(it.arr) }
+func (it *ArrStream[V]) Close() {}
+func (it *ArrStream[V]) Next() (V, error) {
+ v := it.arr[it.i]
+ it.i++
+ return v, nil
+}
+func (it *ArrStream[V]) NextBatch() ([]V, error) {
+ v := it.arr[it.i:]
+ it.i = len(it.arr)
+ return v, nil
+}
+
+func Range[T constraints.Integer](from, to T) *RangeIter[T] {
+ if from == to {
+ to++
+ }
+ return &RangeIter[T]{i: from, to: to}
+}
+
+type RangeIter[T constraints.Integer] struct {
+ i, to T
+}
+
+func (it *RangeIter[T]) HasNext() bool { return it.i < it.to }
+func (it *RangeIter[T]) Close() {}
+func (it *RangeIter[T]) Next() (T, error) {
+ v := it.i
+ it.i++
+ return v, nil
+}
+
+// UnionKVIter - merge 2 kv.Pairs streams to 1 in lexicographically order
+// 1-st stream has higher priority - when 2 streams return same key
+type UnionKVIter struct {
+ x, y KV
+ xHasNext, yHasNext bool
+ xNextK, xNextV []byte
+ yNextK, yNextV []byte
+ limit int
+ err error
+}
+
+func UnionKV(x, y KV, limit int) KV {
+ if x == nil && y == nil {
+ return EmptyKV
+ }
+ if x == nil {
+ return y
+ }
+ if y == nil {
+ return x
+ }
+ m := &UnionKVIter{x: x, y: y, limit: limit}
+ m.advanceX()
+ m.advanceY()
+ return m
+}
+func (m *UnionKVIter) HasNext() bool {
+ return m.err != nil || (m.limit != 0 && m.xHasNext) || (m.limit != 0 && m.yHasNext)
+}
+func (m *UnionKVIter) advanceX() {
+ if m.err != nil {
+ return
+ }
+ m.xHasNext = m.x.HasNext()
+ if m.xHasNext {
+ m.xNextK, m.xNextV, m.err = m.x.Next()
+ }
+}
+func (m *UnionKVIter) advanceY() {
+ if m.err != nil {
+ return
+ }
+ m.yHasNext = m.y.HasNext()
+ if m.yHasNext {
+ m.yNextK, m.yNextV, m.err = m.y.Next()
+ }
+}
+func (m *UnionKVIter) Next() ([]byte, []byte, error) {
+ if m.err != nil {
+ return nil, nil, m.err
+ }
+ m.limit--
+ if m.xHasNext && m.yHasNext {
+ cmp := bytes.Compare(m.xNextK, m.yNextK)
+ if cmp < 0 {
+ k, v, err := m.xNextK, m.xNextV, m.err
+ m.advanceX()
+ return k, v, err
+ } else if cmp == 0 {
+ k, v, err := m.xNextK, m.xNextV, m.err
+ m.advanceX()
+ m.advanceY()
+ return k, v, err
+ }
+ k, v, err := m.yNextK, m.yNextV, m.err
+ m.advanceY()
+ return k, v, err
+ }
+ if m.xHasNext {
+ k, v, err := m.xNextK, m.xNextV, m.err
+ m.advanceX()
+ return k, v, err
+ }
+ k, v, err := m.yNextK, m.yNextV, m.err
+ m.advanceY()
+ return k, v, err
+}
+
+// func (m *UnionKVIter) ToArray() (keys, values [][]byte, err error) { return ToKVArray(m) }
+func (m *UnionKVIter) Close() {
+ if x, ok := m.x.(Closer); ok {
+ x.Close()
+ }
+ if y, ok := m.y.(Closer); ok {
+ y.Close()
+ }
+}
+
+// UnionUnary
+type UnionUnary[T constraints.Ordered] struct {
+ x, y Unary[T]
+ asc bool
+ xHas, yHas bool
+ xNextK, yNextK T
+ err error
+ limit int
+}
+
+func Union[T constraints.Ordered](x, y Unary[T], asc order.By, limit int) Unary[T] {
+ if x == nil && y == nil {
+ return &EmptyUnary[T]{}
+ }
+ if x == nil {
+ return y
+ }
+ if y == nil {
+ return x
+ }
+ if !x.HasNext() {
+ return y
+ }
+ if !y.HasNext() {
+ return x
+ }
+ m := &UnionUnary[T]{x: x, y: y, asc: bool(asc), limit: limit}
+ m.advanceX()
+ m.advanceY()
+ return m
+}
+
+func (m *UnionUnary[T]) HasNext() bool {
+ return m.err != nil || (m.limit != 0 && m.xHas) || (m.limit != 0 && m.yHas)
+}
+func (m *UnionUnary[T]) advanceX() {
+ if m.err != nil {
+ return
+ }
+ m.xHas = m.x.HasNext()
+ if m.xHas {
+ m.xNextK, m.err = m.x.Next()
+ }
+}
+func (m *UnionUnary[T]) advanceY() {
+ if m.err != nil {
+ return
+ }
+ m.yHas = m.y.HasNext()
+ if m.yHas {
+ m.yNextK, m.err = m.y.Next()
+ }
+}
+
+func (m *UnionUnary[T]) less() bool {
+ return (m.asc && m.xNextK < m.yNextK) || (!m.asc && m.xNextK > m.yNextK)
+}
+
+func (m *UnionUnary[T]) Next() (res T, err error) {
+ if m.err != nil {
+ return res, m.err
+ }
+ m.limit--
+ if m.xHas && m.yHas {
+ if m.less() {
+ k, err := m.xNextK, m.err
+ m.advanceX()
+ return k, err
+ } else if m.xNextK == m.yNextK {
+ k, err := m.xNextK, m.err
+ m.advanceX()
+ m.advanceY()
+ return k, err
+ }
+ k, err := m.yNextK, m.err
+ m.advanceY()
+ return k, err
+ }
+ if m.xHas {
+ k, err := m.xNextK, m.err
+ m.advanceX()
+ return k, err
+ }
+ k, err := m.yNextK, m.err
+ m.advanceY()
+ return k, err
+}
+func (m *UnionUnary[T]) Close() {
+ if x, ok := m.x.(Closer); ok {
+ x.Close()
+ }
+ if y, ok := m.y.(Closer); ok {
+ y.Close()
+ }
+}
+
+// IntersectIter
+type IntersectIter[T constraints.Ordered] struct {
+ x, y Unary[T]
+ xHasNext, yHasNext bool
+ xNextK, yNextK T
+ limit int
+ err error
+}
+
+func Intersect[T constraints.Ordered](x, y Unary[T], limit int) Unary[T] {
+ if x == nil || y == nil || !x.HasNext() || !y.HasNext() {
+ return &EmptyUnary[T]{}
+ }
+ m := &IntersectIter[T]{x: x, y: y, limit: limit}
+ m.advance()
+ return m
+}
+func (m *IntersectIter[T]) HasNext() bool {
+ return m.err != nil || (m.limit != 0 && m.xHasNext && m.yHasNext)
+}
+func (m *IntersectIter[T]) advance() {
+ m.advanceX()
+ m.advanceY()
+ for m.xHasNext && m.yHasNext {
+ if m.err != nil {
+ break
+ }
+ if m.xNextK < m.yNextK {
+ m.advanceX()
+ continue
+ } else if m.xNextK == m.yNextK {
+ return
+ } else {
+ m.advanceY()
+ continue
+ }
+ }
+ m.xHasNext = false
+}
+
+func (m *IntersectIter[T]) advanceX() {
+ if m.err != nil {
+ return
+ }
+ m.xHasNext = m.x.HasNext()
+ if m.xHasNext {
+ m.xNextK, m.err = m.x.Next()
+ }
+}
+func (m *IntersectIter[T]) advanceY() {
+ if m.err != nil {
+ return
+ }
+ m.yHasNext = m.y.HasNext()
+ if m.yHasNext {
+ m.yNextK, m.err = m.y.Next()
+ }
+}
+func (m *IntersectIter[T]) Next() (T, error) {
+ if m.err != nil {
+ return m.xNextK, m.err
+ }
+ m.limit--
+ k, err := m.xNextK, m.err
+ m.advance()
+ return k, err
+}
+func (m *IntersectIter[T]) Close() {
+ if x, ok := m.x.(Closer); ok {
+ x.Close()
+ }
+ if y, ok := m.y.(Closer); ok {
+ y.Close()
+ }
+}
+
+// TransformDualIter - analog `map` (in terms of map-filter-reduce pattern)
+type TransformDualIter[K, V any] struct {
+ it Dual[K, V]
+ transform func(K, V) (K, V, error)
+}
+
+func TransformDual[K, V any](it Dual[K, V], transform func(K, V) (K, V, error)) *TransformDualIter[K, V] {
+ return &TransformDualIter[K, V]{it: it, transform: transform}
+}
+func (m *TransformDualIter[K, V]) HasNext() bool { return m.it.HasNext() }
+func (m *TransformDualIter[K, V]) Next() (K, V, error) {
+ k, v, err := m.it.Next()
+ if err != nil {
+ return k, v, err
+ }
+ return m.transform(k, v)
+}
+func (m *TransformDualIter[K, v]) Close() {
+ if x, ok := m.it.(Closer); ok {
+ x.Close()
+ }
+}
+
+type TransformKV2U64Iter[K, V []byte] struct {
+ it KV
+ transform func(K, V) (uint64, error)
+}
+
+func TransformKV2U64[K, V []byte](it KV, transform func(K, V) (uint64, error)) *TransformKV2U64Iter[K, V] {
+ return &TransformKV2U64Iter[K, V]{it: it, transform: transform}
+}
+func (m *TransformKV2U64Iter[K, V]) HasNext() bool { return m.it.HasNext() }
+func (m *TransformKV2U64Iter[K, V]) Next() (uint64, error) {
+ k, v, err := m.it.Next()
+ if err != nil {
+ return 0, err
+ }
+ return m.transform(k, v)
+}
+func (m *TransformKV2U64Iter[K, v]) Close() {
+ if x, ok := m.it.(Closer); ok {
+ x.Close()
+ }
+}
+
+// FilterDualIter - analog `map` (in terms of map-filter-reduce pattern)
+// please avoid reading from Disk/DB more elements and then filter them. Better
+// push-down filter conditions to lower-level iterator to reduce disk reads amount.
+type FilterDualIter[K, V any] struct {
+ it Dual[K, V]
+ filter func(K, V) bool
+ hasNext bool
+ err error
+ nextK K
+ nextV V
+}
+
+func FilterKV(it KV, filter func(k, v []byte) bool) *FilterDualIter[[]byte, []byte] {
+ return FilterDual[[]byte, []byte](it, filter)
+}
+func FilterDual[K, V any](it Dual[K, V], filter func(K, V) bool) *FilterDualIter[K, V] {
+ i := &FilterDualIter[K, V]{it: it, filter: filter}
+ i.advance()
+ return i
+}
+func (m *FilterDualIter[K, V]) advance() {
+ if m.err != nil {
+ return
+ }
+ m.hasNext = false
+ for m.it.HasNext() {
+ // create new variables, to avoid leaking outside of loop
+ key, val, err := m.it.Next()
+ if err != nil {
+ m.err = err
+ return
+ }
+ if m.filter(key, val) {
+ m.hasNext = true
+ m.nextK, m.nextV = key, val
+ break
+ }
+ }
+}
+func (m *FilterDualIter[K, V]) HasNext() bool { return m.err != nil || m.hasNext }
+func (m *FilterDualIter[K, V]) Next() (k K, v V, err error) {
+ k, v, err = m.nextK, m.nextV, m.err
+ m.advance()
+ return k, v, err
+}
+func (m *FilterDualIter[K, v]) Close() {
+ if x, ok := m.it.(Closer); ok {
+ x.Close()
+ }
+}
+
+// FilterUnaryIter - analog `map` (in terms of map-filter-reduce pattern)
+// please avoid reading from Disk/DB more elements and then filter them. Better
+// push-down filter conditions to lower-level iterator to reduce disk reads amount.
+type FilterUnaryIter[T any] struct {
+ it Unary[T]
+ filter func(T) bool
+ hasNext bool
+ err error
+ nextK T
+}
+
+func FilterU64(it U64, filter func(k uint64) bool) *FilterUnaryIter[uint64] {
+ return FilterUnary[uint64](it, filter)
+}
+func FilterUnary[T any](it Unary[T], filter func(T) bool) *FilterUnaryIter[T] {
+ i := &FilterUnaryIter[T]{it: it, filter: filter}
+ i.advance()
+ return i
+}
+func (m *FilterUnaryIter[T]) advance() {
+ if m.err != nil {
+ return
+ }
+ m.hasNext = false
+ for m.it.HasNext() {
+ // create new variables, to avoid leaking outside of loop
+ key, err := m.it.Next()
+ if err != nil {
+ m.err = err
+ return
+ }
+ if m.filter(key) {
+ m.hasNext, m.nextK = true, key
+ break
+ }
+ }
+}
+func (m *FilterUnaryIter[T]) HasNext() bool { return m.err != nil || m.hasNext }
+func (m *FilterUnaryIter[T]) Next() (k T, err error) {
+ k, err = m.nextK, m.err
+ m.advance()
+ return k, err
+}
+func (m *FilterUnaryIter[T]) Close() {
+ if x, ok := m.it.(Closer); ok {
+ x.Close()
+ }
+}
+
+// PaginatedIter - for remote-list pagination
+//
+// Rationale: If an API does not support pagination from the start, supporting it later is troublesome because adding pagination breaks the API's behavior. Clients that are unaware that the API now uses pagination could incorrectly assume that they received a complete result, when in fact they only received the first page.
+//
+// To support pagination (returning list results in pages) in a List method, the API shall:
+// - The client uses this field to request a specific page of the list results.
+// - define an int32 field page_size in the List method's request message. Clients use this field to specify the maximum number of results to be returned by the server. The server may further constrain the maximum number of results returned in a single page. If the page_size is 0, the server will decide the number of results to be returned.
+// - define a string field next_page_token in the List method's response message. This field represents the pagination token to retrieve the next page of results. If the value is "", it means no further results for the request.
+//
+// see: https://cloud.google.com/apis/design/design_patterns
+type Paginated[T any] struct {
+ arr []T
+ i int
+ err error
+ nextPage NextPageUnary[T]
+ nextPageToken string
+ initialized bool
+}
+
+func Paginate[T any](f NextPageUnary[T]) *Paginated[T] { return &Paginated[T]{nextPage: f} }
+func (it *Paginated[T]) HasNext() bool {
+ if it.err != nil || it.i < len(it.arr) {
+ return true
+ }
+ if it.initialized && it.nextPageToken == "" {
+ return false
+ }
+ it.initialized = true
+ it.i = 0
+ it.arr, it.nextPageToken, it.err = it.nextPage(it.nextPageToken)
+ return it.err != nil || it.i < len(it.arr)
+}
+func (it *Paginated[T]) Close() {}
+func (it *Paginated[T]) Next() (v T, err error) {
+ if it.err != nil {
+ return v, it.err
+ }
+ v = it.arr[it.i]
+ it.i++
+ return v, nil
+}
+
+type PaginatedDual[K, V any] struct {
+ keys []K
+ values []V
+ i int
+ err error
+ nextPage NextPageDual[K, V]
+ nextPageToken string
+ initialized bool
+}
+
+func PaginateDual[K, V any](f NextPageDual[K, V]) *PaginatedDual[K, V] {
+ return &PaginatedDual[K, V]{nextPage: f}
+}
+func (it *PaginatedDual[K, V]) HasNext() bool {
+ if it.err != nil || it.i < len(it.keys) {
+ return true
+ }
+ if it.initialized && it.nextPageToken == "" {
+ return false
+ }
+ it.initialized = true
+ it.i = 0
+ it.keys, it.values, it.nextPageToken, it.err = it.nextPage(it.nextPageToken)
+ return it.err != nil || it.i < len(it.keys)
+}
+func (it *PaginatedDual[K, V]) Close() {}
+func (it *PaginatedDual[K, V]) Next() (k K, v V, err error) {
+ if it.err != nil {
+ return k, v, it.err
+ }
+ k, v = it.keys[it.i], it.values[it.i]
+ it.i++
+ return k, v, nil
+}
diff --git a/erigon-lib/kv/iter/iter_interface.go b/erigon-lib/kv/iter/iter_interface.go
new file mode 100644
index 00000000000..dbe0e6ba4f1
--- /dev/null
+++ b/erigon-lib/kv/iter/iter_interface.go
@@ -0,0 +1,115 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package iter
+
+// Iterators - composable high-level abstraction to iterate over. It's more high-level than kv.Cursor and provides less controll, less features, but enough to build an app.
+//
+// for s.HasNext() {
+// k, v, err := s.Next()
+// if err != nil {
+// return err
+// }
+// }
+// Invariants:
+// 1. HasNext() is Idempotent
+// 2. K, V are valid at-least 2 .Next() calls! It allows zero-copy composition of iterators. Example: iter.Union
+// - 1 value used by User and 1 value used internally by iter.Union
+// 3. No `Close` method: all streams produced by TemporalTx will be closed inside `tx.Rollback()` (by casting to `kv.Closer`)
+// 4. automatically checks cancelation of `ctx` passed to `db.Begin(ctx)`, can skip this
+// check in loops on stream. Dual has very limited API - user has no way to
+// terminate it - but user can specify more strict conditions when creating stream (then server knows better when to stop)
+
+// Dual - return 2 items - usually called Key and Value (or `k` and `v`)
+// Example:
+//
+// for s.HasNext() {
+// k, v, err := s.Next()
+// if err != nil {
+// return err
+// }
+// }
+type Dual[K, V any] interface {
+ Next() (K, V, error)
+ HasNext() bool
+}
+
+// Unary - return 1 item. Example:
+//
+// for s.HasNext() {
+// v, err := s.Next()
+// if err != nil {
+// return err
+// }
+// }
+type Unary[V any] interface {
+ Next() (V, error)
+ //NextBatch() ([]V, error)
+ HasNext() bool
+}
+
+// KV - return 2 items of type []byte - usually called Key and Value (or `k` and `v`). Example:
+//
+// for s.HasNext() {
+// k, v, err := s.Next()
+// if err != nil {
+// return err
+// }
+// }
+
+// often used shortcuts
+type (
+ U64 Unary[uint64]
+ KV Dual[[]byte, []byte]
+)
+
+func ToU64Arr(s U64) ([]uint64, error) { return ToArr[uint64](s) }
+func ToKVArray(s KV) ([][]byte, [][]byte, error) { return ToDualArray[[]byte, []byte](s) }
+
+func ToArrU64Must(s U64) []uint64 {
+ arr, err := ToArr[uint64](s)
+ if err != nil {
+ panic(err)
+ }
+ return arr
+}
+func ToArrKVMust(s KV) ([][]byte, [][]byte) {
+ keys, values, err := ToDualArray[[]byte, []byte](s)
+ if err != nil {
+ panic(err)
+ }
+ return keys, values
+}
+
+func CountU64(s U64) (int, error) { return Count[uint64](s) }
+func CountKV(s KV) (int, error) { return CountDual[[]byte, []byte](s) }
+
+func TransformKV(it KV, transform func(k, v []byte) ([]byte, []byte, error)) *TransformDualIter[[]byte, []byte] {
+ return TransformDual[[]byte, []byte](it, transform)
+}
+
+// internal types
+type (
+ NextPageUnary[T any] func(pageToken string) (arr []T, nextPageToken string, err error)
+ NextPageDual[K, V any] func(pageToken string) (keys []K, values []V, nextPageToken string, err error)
+)
+
+func PaginateKV(f NextPageDual[[]byte, []byte]) *PaginatedDual[[]byte, []byte] {
+ return PaginateDual[[]byte, []byte](f)
+}
+func PaginateU64(f NextPageUnary[uint64]) *Paginated[uint64] {
+ return Paginate[uint64](f)
+}
diff --git a/erigon-lib/kv/iter/iter_test.go b/erigon-lib/kv/iter/iter_test.go
new file mode 100644
index 00000000000..2c0a02a1ba3
--- /dev/null
+++ b/erigon-lib/kv/iter/iter_test.go
@@ -0,0 +1,416 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package iter_test
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "testing"
+
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/iter"
+ "github.com/ledgerwatch/erigon-lib/kv/memdb"
+ "github.com/ledgerwatch/erigon-lib/kv/order"
+ "github.com/stretchr/testify/require"
+)
+
+func TestUnion(t *testing.T) {
+ t.Run("arrays", func(t *testing.T) {
+ s1 := iter.Array[uint64]([]uint64{1, 3, 6, 7})
+ s2 := iter.Array[uint64]([]uint64{2, 3, 7, 8})
+ s3 := iter.Union[uint64](s1, s2, order.Asc, -1)
+ res, err := iter.ToArr[uint64](s3)
+ require.NoError(t, err)
+ require.Equal(t, []uint64{1, 2, 3, 6, 7, 8}, res)
+
+ s1 = iter.ReverseArray[uint64]([]uint64{1, 3, 6, 7})
+ s2 = iter.ReverseArray[uint64]([]uint64{2, 3, 7, 8})
+ s3 = iter.Union[uint64](s1, s2, order.Desc, -1)
+ res, err = iter.ToArr[uint64](s3)
+ require.NoError(t, err)
+ require.Equal(t, []uint64{8, 7, 6, 3, 2, 1}, res)
+
+ s1 = iter.ReverseArray[uint64]([]uint64{1, 3, 6, 7})
+ s2 = iter.ReverseArray[uint64]([]uint64{2, 3, 7, 8})
+ s3 = iter.Union[uint64](s1, s2, order.Desc, 2)
+ res, err = iter.ToArr[uint64](s3)
+ require.NoError(t, err)
+ require.Equal(t, []uint64{8, 7}, res)
+
+ })
+ t.Run("empty left", func(t *testing.T) {
+ s1 := iter.EmptyU64
+ s2 := iter.Array[uint64]([]uint64{2, 3, 7, 8})
+ s3 := iter.Union[uint64](s1, s2, order.Asc, -1)
+ res, err := iter.ToArr[uint64](s3)
+ require.NoError(t, err)
+ require.Equal(t, []uint64{2, 3, 7, 8}, res)
+ })
+ t.Run("empty right", func(t *testing.T) {
+ s1 := iter.Array[uint64]([]uint64{1, 3, 4, 5, 6, 7})
+ s2 := iter.EmptyU64
+ s3 := iter.Union[uint64](s1, s2, order.Asc, -1)
+ res, err := iter.ToArr[uint64](s3)
+ require.NoError(t, err)
+ require.Equal(t, []uint64{1, 3, 4, 5, 6, 7}, res)
+ })
+ t.Run("empty", func(t *testing.T) {
+ s1 := iter.EmptyU64
+ s2 := iter.EmptyU64
+ s3 := iter.Union[uint64](s1, s2, order.Asc, -1)
+ res, err := iter.ToArr[uint64](s3)
+ require.NoError(t, err)
+ require.Nil(t, res)
+ })
+}
+func TestUnionPairs(t *testing.T) {
+ db := memdb.NewTestDB(t)
+ ctx := context.Background()
+ t.Run("simple", func(t *testing.T) {
+ require := require.New(t)
+ tx, _ := db.BeginRw(ctx)
+ defer tx.Rollback()
+ _ = tx.Put(kv.E2AccountsHistory, []byte{1}, []byte{1})
+ _ = tx.Put(kv.E2AccountsHistory, []byte{3}, []byte{1})
+ _ = tx.Put(kv.E2AccountsHistory, []byte{4}, []byte{1})
+ _ = tx.Put(kv.PlainState, []byte{2}, []byte{9})
+ _ = tx.Put(kv.PlainState, []byte{3}, []byte{9})
+ it, _ := tx.Range(kv.E2AccountsHistory, nil, nil)
+ it2, _ := tx.Range(kv.PlainState, nil, nil)
+ keys, values, err := iter.ToKVArray(iter.UnionKV(it, it2, -1))
+ require.NoError(err)
+ require.Equal([][]byte{{1}, {2}, {3}, {4}}, keys)
+ require.Equal([][]byte{{1}, {9}, {1}, {1}}, values)
+ })
+ t.Run("empty 1st", func(t *testing.T) {
+ require := require.New(t)
+ tx, _ := db.BeginRw(ctx)
+ defer tx.Rollback()
+ _ = tx.Put(kv.PlainState, []byte{2}, []byte{9})
+ _ = tx.Put(kv.PlainState, []byte{3}, []byte{9})
+ it, _ := tx.Range(kv.E2AccountsHistory, nil, nil)
+ it2, _ := tx.Range(kv.PlainState, nil, nil)
+ keys, _, err := iter.ToKVArray(iter.UnionKV(it, it2, -1))
+ require.NoError(err)
+ require.Equal([][]byte{{2}, {3}}, keys)
+ })
+ t.Run("empty 2nd", func(t *testing.T) {
+ require := require.New(t)
+ tx, _ := db.BeginRw(ctx)
+ defer tx.Rollback()
+ _ = tx.Put(kv.E2AccountsHistory, []byte{1}, []byte{1})
+ _ = tx.Put(kv.E2AccountsHistory, []byte{3}, []byte{1})
+ _ = tx.Put(kv.E2AccountsHistory, []byte{4}, []byte{1})
+ it, _ := tx.Range(kv.E2AccountsHistory, nil, nil)
+ it2, _ := tx.Range(kv.PlainState, nil, nil)
+ keys, _, err := iter.ToKVArray(iter.UnionKV(it, it2, -1))
+ require.NoError(err)
+ require.Equal([][]byte{{1}, {3}, {4}}, keys)
+ })
+ t.Run("empty both", func(t *testing.T) {
+ require := require.New(t)
+ tx, _ := db.BeginRw(ctx)
+ defer tx.Rollback()
+ it, _ := tx.Range(kv.E2AccountsHistory, nil, nil)
+ it2, _ := tx.Range(kv.PlainState, nil, nil)
+ m := iter.UnionKV(it, it2, -1)
+ require.False(m.HasNext())
+ })
+ t.Run("error handling", func(t *testing.T) {
+ require := require.New(t)
+ tx, _ := db.BeginRw(ctx)
+ defer tx.Rollback()
+ it := iter.PairsWithError(10)
+ it2 := iter.PairsWithError(12)
+ keys, _, err := iter.ToKVArray(iter.UnionKV(it, it2, -1))
+ require.Equal("expected error at iteration: 10", err.Error())
+ require.Equal(10, len(keys))
+ })
+}
+
+func TestIntersect(t *testing.T) {
+ t.Run("intersect", func(t *testing.T) {
+ s1 := iter.Array[uint64]([]uint64{1, 3, 4, 5, 6, 7})
+ s2 := iter.Array[uint64]([]uint64{2, 3, 7})
+ s3 := iter.Intersect[uint64](s1, s2, -1)
+ res, err := iter.ToArr[uint64](s3)
+ require.NoError(t, err)
+ require.Equal(t, []uint64{3, 7}, res)
+
+ s1 = iter.Array[uint64]([]uint64{1, 3, 4, 5, 6, 7})
+ s2 = iter.Array[uint64]([]uint64{2, 3, 7})
+ s3 = iter.Intersect[uint64](s1, s2, 1)
+ res, err = iter.ToArr[uint64](s3)
+ require.NoError(t, err)
+ require.Equal(t, []uint64{3}, res)
+ })
+ t.Run("empty left", func(t *testing.T) {
+ s1 := iter.EmptyU64
+ s2 := iter.Array[uint64]([]uint64{2, 3, 7, 8})
+ s3 := iter.Intersect[uint64](s1, s2, -1)
+ res, err := iter.ToArr[uint64](s3)
+ require.NoError(t, err)
+ require.Nil(t, res)
+
+ s2 = iter.Array[uint64]([]uint64{2, 3, 7, 8})
+ s3 = iter.Intersect[uint64](nil, s2, -1)
+ res, err = iter.ToArr[uint64](s3)
+ require.NoError(t, err)
+ require.Nil(t, res)
+ })
+ t.Run("empty right", func(t *testing.T) {
+ s1 := iter.Array[uint64]([]uint64{1, 3, 4, 5, 6, 7})
+ s2 := iter.EmptyU64
+ s3 := iter.Intersect[uint64](s1, s2, -1)
+ res, err := iter.ToArr[uint64](s3)
+ require.NoError(t, err)
+ require.Nil(t, nil, res)
+
+ s1 = iter.Array[uint64]([]uint64{1, 3, 4, 5, 6, 7})
+ s3 = iter.Intersect[uint64](s1, nil, -1)
+ res, err = iter.ToArr[uint64](s3)
+ require.NoError(t, err)
+ require.Nil(t, res)
+ })
+ t.Run("empty", func(t *testing.T) {
+ s1 := iter.EmptyU64
+ s2 := iter.EmptyU64
+ s3 := iter.Intersect[uint64](s1, s2, -1)
+ res, err := iter.ToArr[uint64](s3)
+ require.NoError(t, err)
+ require.Nil(t, res)
+
+ s3 = iter.Intersect[uint64](nil, nil, -1)
+ res, err = iter.ToArr[uint64](s3)
+ require.NoError(t, err)
+ require.Nil(t, res)
+ })
+}
+
+func TestRange(t *testing.T) {
+ t.Run("range", func(t *testing.T) {
+ s1 := iter.Range[uint64](1, 4)
+ res, err := iter.ToArr[uint64](s1)
+ require.NoError(t, err)
+ require.Equal(t, []uint64{1, 2, 3}, res)
+ })
+ t.Run("empty", func(t *testing.T) {
+ s1 := iter.Range[uint64](1, 1)
+ res, err := iter.ToArr[uint64](s1)
+ require.NoError(t, err)
+ require.Equal(t, []uint64{1}, res)
+ })
+}
+
+func TestPaginated(t *testing.T) {
+ t.Run("paginated", func(t *testing.T) {
+ i := 0
+ s1 := iter.Paginate[uint64](func(pageToken string) (arr []uint64, nextPageToken string, err error) {
+ i++
+ switch i {
+ case 1:
+ return []uint64{1, 2, 3}, "test", nil
+ case 2:
+ return []uint64{4, 5, 6}, "test", nil
+ case 3:
+ return []uint64{7}, "", nil
+ case 4:
+ panic("must not happen")
+ }
+ return
+ })
+ res, err := iter.ToArr[uint64](s1)
+ require.NoError(t, err)
+ require.Equal(t, []uint64{1, 2, 3, 4, 5, 6, 7}, res)
+
+ //idempotency
+ require.False(t, s1.HasNext())
+ require.False(t, s1.HasNext())
+ })
+ t.Run("error", func(t *testing.T) {
+ i := 0
+ testErr := fmt.Errorf("test")
+ s1 := iter.Paginate[uint64](func(pageToken string) (arr []uint64, nextPageToken string, err error) {
+ i++
+ switch i {
+ case 1:
+ return []uint64{1, 2, 3}, "test", nil
+ case 2:
+ return nil, "test", testErr
+ case 3:
+ panic("must not happen")
+ }
+ return
+ })
+ res, err := iter.ToArr[uint64](s1)
+ require.ErrorIs(t, err, testErr)
+ require.Equal(t, []uint64{1, 2, 3}, res)
+
+ //idempotency
+ require.True(t, s1.HasNext())
+ require.True(t, s1.HasNext())
+ _, err = s1.Next()
+ require.ErrorIs(t, err, testErr)
+ })
+ t.Run("empty", func(t *testing.T) {
+ s1 := iter.Paginate[uint64](func(pageToken string) (arr []uint64, nextPageToken string, err error) {
+ return []uint64{}, "", nil
+ })
+ res, err := iter.ToArr[uint64](s1)
+ require.NoError(t, err)
+ require.Nil(t, res)
+
+ //idempotency
+ require.False(t, s1.HasNext())
+ require.False(t, s1.HasNext())
+ })
+}
+
+func TestPaginatedDual(t *testing.T) {
+ t.Run("paginated", func(t *testing.T) {
+ i := 0
+ s1 := iter.PaginateKV(func(pageToken string) (keys, values [][]byte, nextPageToken string, err error) {
+ i++
+ switch i {
+ case 1:
+ return [][]byte{{1}, {2}, {3}}, [][]byte{{1}, {2}, {3}}, "test", nil
+ case 2:
+ return [][]byte{{4}, {5}, {6}}, [][]byte{{4}, {5}, {6}}, "test", nil
+ case 3:
+ return [][]byte{{7}}, [][]byte{{7}}, "", nil
+ case 4:
+ panic("must not happen")
+ }
+ return
+ })
+
+ keys, values, err := iter.ToKVArray(s1)
+ require.NoError(t, err)
+ require.Equal(t, [][]byte{{1}, {2}, {3}, {4}, {5}, {6}, {7}}, keys)
+ require.Equal(t, [][]byte{{1}, {2}, {3}, {4}, {5}, {6}, {7}}, values)
+
+ //idempotency
+ require.False(t, s1.HasNext())
+ require.False(t, s1.HasNext())
+ })
+ t.Run("error", func(t *testing.T) {
+ i := 0
+ testErr := fmt.Errorf("test")
+ s1 := iter.PaginateKV(func(pageToken string) (keys, values [][]byte, nextPageToken string, err error) {
+ i++
+ switch i {
+ case 1:
+ return [][]byte{{1}, {2}, {3}}, [][]byte{{1}, {2}, {3}}, "test", nil
+ case 2:
+ return nil, nil, "test", testErr
+ case 3:
+ panic("must not happen")
+ }
+ return
+ })
+ keys, values, err := iter.ToKVArray(s1)
+ require.ErrorIs(t, err, testErr)
+ require.Equal(t, [][]byte{{1}, {2}, {3}}, keys)
+ require.Equal(t, [][]byte{{1}, {2}, {3}}, values)
+
+ //idempotency
+ require.True(t, s1.HasNext())
+ require.True(t, s1.HasNext())
+ _, _, err = s1.Next()
+ require.ErrorIs(t, err, testErr)
+ })
+ t.Run("empty", func(t *testing.T) {
+ s1 := iter.PaginateKV(func(pageToken string) (keys, values [][]byte, nextPageToken string, err error) {
+ return [][]byte{}, [][]byte{}, "", nil
+ })
+ keys, values, err := iter.ToKVArray(s1)
+ require.NoError(t, err)
+ require.Nil(t, keys)
+ require.Nil(t, values)
+
+ //idempotency
+ require.False(t, s1.HasNext())
+ require.False(t, s1.HasNext())
+ })
+}
+
+func TestFiler(t *testing.T) {
+ createKVIter := func() iter.KV {
+ i := 0
+ return iter.PaginateKV(func(pageToken string) (keys, values [][]byte, nextPageToken string, err error) {
+ i++
+ switch i {
+ case 1:
+ return [][]byte{{1}, {2}, {3}}, [][]byte{{1}, {2}, {3}}, "test", nil
+ case 2:
+ return nil, nil, "", nil
+ }
+ return
+ })
+
+ }
+ t.Run("dual", func(t *testing.T) {
+ s2 := iter.FilterKV(createKVIter(), func(k, v []byte) bool { return bytes.Equal(k, []byte{1}) })
+ keys, values, err := iter.ToKVArray(s2)
+ require.NoError(t, err)
+ require.Equal(t, [][]byte{{1}}, keys)
+ require.Equal(t, [][]byte{{1}}, values)
+
+ s2 = iter.FilterKV(createKVIter(), func(k, v []byte) bool { return bytes.Equal(k, []byte{3}) })
+ keys, values, err = iter.ToKVArray(s2)
+ require.NoError(t, err)
+ require.Equal(t, [][]byte{{3}}, keys)
+ require.Equal(t, [][]byte{{3}}, values)
+
+ s2 = iter.FilterKV(createKVIter(), func(k, v []byte) bool { return bytes.Equal(k, []byte{4}) })
+ keys, values, err = iter.ToKVArray(s2)
+ require.NoError(t, err)
+ require.Nil(t, keys)
+ require.Nil(t, values)
+
+ s2 = iter.FilterKV(iter.EmptyKV, func(k, v []byte) bool { return bytes.Equal(k, []byte{4}) })
+ keys, values, err = iter.ToKVArray(s2)
+ require.NoError(t, err)
+ require.Nil(t, keys)
+ require.Nil(t, values)
+ })
+ t.Run("unary", func(t *testing.T) {
+ s1 := iter.Array[uint64]([]uint64{1, 2, 3})
+ s2 := iter.FilterU64(s1, func(k uint64) bool { return k == 1 })
+ res, err := iter.ToU64Arr(s2)
+ require.NoError(t, err)
+ require.Equal(t, []uint64{1}, res)
+
+ s1 = iter.Array[uint64]([]uint64{1, 2, 3})
+ s2 = iter.FilterU64(s1, func(k uint64) bool { return k == 3 })
+ res, err = iter.ToU64Arr(s2)
+ require.NoError(t, err)
+ require.Equal(t, []uint64{3}, res)
+
+ s1 = iter.Array[uint64]([]uint64{1, 2, 3})
+ s2 = iter.FilterU64(s1, func(k uint64) bool { return k == 4 })
+ res, err = iter.ToU64Arr(s2)
+ require.NoError(t, err)
+ require.Nil(t, res)
+
+ s2 = iter.FilterU64(iter.EmptyU64, func(k uint64) bool { return k == 4 })
+ res, err = iter.ToU64Arr(s2)
+ require.NoError(t, err)
+ require.Nil(t, res)
+ })
+}
diff --git a/erigon-lib/kv/kv_interface.go b/erigon-lib/kv/kv_interface.go
new file mode 100644
index 00000000000..852e08601f6
--- /dev/null
+++ b/erigon-lib/kv/kv_interface.go
@@ -0,0 +1,554 @@
+/*
+ Copyright 2022 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package kv
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "unsafe"
+
+ "github.com/ledgerwatch/erigon-lib/kv/iter"
+ "github.com/ledgerwatch/erigon-lib/kv/order"
+ "github.com/ledgerwatch/erigon-lib/metrics"
+)
+
+//Variables Naming:
+// tx - Database Transaction
+// txn - Ethereum Transaction (and TxNum - is also number of Etherum Transaction)
+// blockNum - Ethereum block number - same across all nodes. blockID - auto-increment ID - which can be differrent across all nodes
+// txNum/txID - same
+// RoTx - Read-Only Database Transaction. RwTx - read-write
+// k, v - key, value
+// ts - TimeStamp. Usually it's Etherum's TransactionNumber (auto-increment ID). Or BlockNumber.
+// Cursor - low-level mdbx-tide api to navigate over Table
+// Iter - high-level iterator-like api over Table/InvertedIndex/History/Domain. Has less features than Cursor. See package `iter`.
+
+//Methods Naming:
+// Prune: delete old data
+// Unwind: delete recent data
+// Get: exact match of criterias
+// Range: [from, to). from=nil means StartOfTable, to=nil means EndOfTable, rangeLimit=-1 means Unlimited
+// Range is analog of SQL's: SELECT * FROM Table WHERE k>=from AND k stream of server-side pushes ---
+
+ // Range [from, to)
+ // Range(from, nil) means [from, EndOfTable)
+ // Range(nil, to) means [StartOfTable, to)
+ Range(table string, fromPrefix, toPrefix []byte) (iter.KV, error)
+ // Stream is like Range, but for requesting huge data (Example: full table scan). Client can't stop it.
+ //Stream(table string, fromPrefix, toPrefix []byte) (iter.KV, error)
+ // RangeAscend - like Range [from, to) but also allow pass Limit parameters
+ // Limit -1 means Unlimited
+ RangeAscend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error)
+ //StreamAscend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error)
+ // RangeDescend - is like Range [from, to), but expecing `from`<`to`
+ // example: RangeDescend("Table", "B", "A", -1)
+ RangeDescend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error)
+ //StreamDescend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error)
+ // Prefix - is exactly Range(Table, prefix, kv.NextSubtree(prefix))
+ Prefix(table string, prefix []byte) (iter.KV, error)
+
+ // RangeDupSort - like Range but for fixed single key and iterating over range of values
+ RangeDupSort(table string, key []byte, fromPrefix, toPrefix []byte, asc order.By, limit int) (iter.KV, error)
+
+ // --- High-Level methods: 1request -> 1page of values in response -> send next page request ---
+ // Paginate(table string, fromPrefix, toPrefix []byte) (PairsStream, error)
+
+ // --- High-Level deprecated methods ---
+
+ ForEach(table string, fromPrefix []byte, walker func(k, v []byte) error) error
+ ForPrefix(table string, prefix []byte, walker func(k, v []byte) error) error
+ ForAmount(table string, prefix []byte, amount uint32, walker func(k, v []byte) error) error
+
+ // Pointer to the underlying C transaction handle (e.g. *C.MDBX_txn)
+ CHandle() unsafe.Pointer
+ BucketSize(table string) (uint64, error)
+}
+
+// RwTx
+//
+// WARNING:
+// - RwTx is not threadsafe and may only be used in the goroutine that created it.
+// - ReadOnly transactions do not lock goroutine to thread, RwTx does
+// - User Can't call runtime.LockOSThread/runtime.UnlockOSThread in same goroutine until RwTx Commit/Rollback
+type RwTx interface {
+ Tx
+ StatelessWriteTx
+ BucketMigrator
+
+ RwCursor(table string) (RwCursor, error)
+ RwCursorDupSort(table string) (RwCursorDupSort, error)
+
+ // CollectMetrics - does collect all DB-related and Tx-related metrics
+ // this method exists only in RwTx to avoid concurrency
+ CollectMetrics()
+}
+
+type BucketMigratorRO interface {
+ ListBuckets() ([]string, error)
+}
+
+// BucketMigrator used for buckets migration, don't use it in usual app code
+type BucketMigrator interface {
+ BucketMigratorRO
+ DropBucket(string) error
+ CreateBucket(string) error
+ ExistsBucket(string) (bool, error)
+ ClearBucket(string) error
+}
+
+// Cursor - class for navigating through a database
+// CursorDupSort are inherit this class
+//
+// If methods (like First/Next/Seek) return error, then returned key SHOULD not be nil (can be []byte{} for example).
+// Then looping code will look as:
+// c := kv.Cursor(bucketName)
+//
+// for k, v, err := c.First(); k != nil; k, v, err = c.Next() {
+// if err != nil {
+// return err
+// }
+// ... logic
+// }
+type Cursor interface {
+ First() ([]byte, []byte, error) // First - position at first key/data item
+ Seek(seek []byte) ([]byte, []byte, error) // Seek - position at first key greater than or equal to specified key
+ SeekExact(key []byte) ([]byte, []byte, error) // SeekExact - position at exact matching key if exists
+ Next() ([]byte, []byte, error) // Next - position at next key/value (can iterate over DupSort key/values automatically)
+ Prev() ([]byte, []byte, error) // Prev - position at previous key
+ Last() ([]byte, []byte, error) // Last - position at last key and last possible value
+ Current() ([]byte, []byte, error) // Current - return key/data at current cursor position
+
+ Count() (uint64, error) // Count - fast way to calculate amount of keys in bucket. It counts all keys even if Prefix was set.
+
+ Close()
+}
+
+type RwCursor interface {
+ Cursor
+
+ Put(k, v []byte) error // Put - based on order
+ Append(k []byte, v []byte) error // Append - append the given key/data pair to the end of the database. This option allows fast bulk loading when keys are already known to be in the correct order.
+ Delete(k []byte) error // Delete - short version of SeekExact+DeleteCurrent or SeekBothExact+DeleteCurrent
+
+ // DeleteCurrent This function deletes the key/data pair to which the cursor refers.
+ // This does not invalidate the cursor, so operations such as MDB_NEXT
+ // can still be used on it.
+ // Both MDB_NEXT and MDB_GET_CURRENT will return the same record after
+ // this operation.
+ DeleteCurrent() error
+}
+
+// CursorDupSort
+//
+// Example:
+//
+// for k, v, err = cursor.First(); k != nil; k, v, err = cursor.NextNoDup() {
+// if err != nil {
+// return err
+// }
+// for ; v != nil; _, v, err = cursor.NextDup() {
+// if err != nil {
+// return err
+// }
+//
+// }
+// }
+type CursorDupSort interface {
+ Cursor
+
+ // SeekBothExact -
+ // second parameter can be nil only if searched key has no duplicates, or return error
+ SeekBothExact(key, value []byte) ([]byte, []byte, error)
+ SeekBothRange(key, value []byte) ([]byte, error) // SeekBothRange - exact match of the key, but range match of the value
+ FirstDup() ([]byte, error) // FirstDup - position at first data item of current key
+ NextDup() ([]byte, []byte, error) // NextDup - position at next data item of current key
+ NextNoDup() ([]byte, []byte, error) // NextNoDup - position at first data item of next key
+ PrevDup() ([]byte, []byte, error)
+ PrevNoDup() ([]byte, []byte, error)
+ LastDup() ([]byte, error) // LastDup - position at last data item of current key
+
+ CountDuplicates() (uint64, error) // CountDuplicates - number of duplicates for the current key
+}
+
+type RwCursorDupSort interface {
+ CursorDupSort
+ RwCursor
+
+ PutNoDupData(key, value []byte) error // PutNoDupData - inserts key without dupsort
+ DeleteCurrentDuplicates() error // DeleteCurrentDuplicates - deletes all of the data items for the current key
+ DeleteExact(k1, k2 []byte) error // DeleteExact - delete 1 value from given key
+ AppendDup(key, value []byte) error // AppendDup - same as Append, but for sorted dup data
+}
+
+// ---- Temporal part
+
+type (
+ Domain string
+ History string
+ InvertedIdx string
+)
+
+type TemporalTx interface {
+ Tx
+ DomainGet(name Domain, k, k2 []byte) (v []byte, ok bool, err error)
+ DomainGetAsOf(name Domain, k, k2 []byte, ts uint64) (v []byte, ok bool, err error)
+ HistoryGet(name History, k []byte, ts uint64) (v []byte, ok bool, err error)
+
+ // IndexRange - return iterator over range of inverted index for given key `k`
+ // Asc semantic: [from, to) AND from > to
+ // Desc semantic: [from, to) AND from < to
+ // Limit -1 means Unlimited
+ // from -1, to -1 means unbounded (StartOfTable, EndOfTable)
+ // Example: IndexRange("IndexName", 10, 5, order.Desc, -1)
+ // Example: IndexRange("IndexName", -1, -1, order.Asc, 10)
+ IndexRange(name InvertedIdx, k []byte, fromTs, toTs int, asc order.By, limit int) (timestamps iter.U64, err error)
+ HistoryRange(name History, fromTs, toTs int, asc order.By, limit int) (it iter.KV, err error)
+ DomainRange(name Domain, fromKey, toKey []byte, ts uint64, asc order.By, limit int) (it iter.KV, err error)
+}
diff --git a/erigon-lib/kv/kvcache/cache.go b/erigon-lib/kv/kvcache/cache.go
new file mode 100644
index 00000000000..700029a4fa1
--- /dev/null
+++ b/erigon-lib/kv/kvcache/cache.go
@@ -0,0 +1,971 @@
+/*
+Copyright 2021 Erigon contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kvcache
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "fmt"
+ "hash"
+ "sort"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/c2h5oh/datasize"
+ btree2 "github.com/tidwall/btree"
+ "golang.org/x/crypto/sha3"
+
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/gointerfaces"
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/remote"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/metrics"
+)
+
+type CacheValidationResult struct {
+ RequestCancelled bool
+ Enabled bool
+ LatestStateBehind bool
+ CacheCleared bool
+ LatestStateID uint64
+ StateKeysOutOfSync [][]byte
+ CodeKeysOutOfSync [][]byte
+}
+
+type Cache interface {
+ // View - returns CacheView consistent with givent kv.Tx
+ View(ctx context.Context, tx kv.Tx) (CacheView, error)
+ OnNewBlock(sc *remote.StateChangeBatch)
+ Len() int
+ ValidateCurrentRoot(ctx context.Context, tx kv.Tx) (*CacheValidationResult, error)
+}
+type CacheView interface {
+ Get(k []byte) ([]byte, error)
+ GetCode(k []byte) ([]byte, error)
+}
+
+// Coherent works on top of Database Transaction and pair Coherent+ReadTransaction must
+// provide "Serializable Isolation Level" semantic: all data form consistent db view at moment
+// when read transaction started, read data are immutable until end of read transaction, reader can't see newer updates
+//
+// Every time a new state change comes, we do the following:
+// - Check that prevBlockHeight and prevBlockHash match what is the top values we have, and if they don't we
+// invalidate the cache, because we missed some messages and cannot consider the cache coherent anymore.
+// - Clone the cache pointer (such that the previous pointer is still accessible, but new one shared the content with it),
+// apply state updates to the cloned cache pointer and save under the new identified made from blockHeight and blockHash.
+// - If there is a conditional variable corresponding to the identifier, remove it from the map and notify conditional
+// variable, waking up the read-only transaction waiting on it.
+//
+// On the other hand, whenever we have a cache miss (by looking at the top cache), we do the following:
+// - Once read the current block height and block hash (canonical) from underlying db transaction
+// - Construct the identifier from the current block height and block hash
+// - Look for the constructed identifier in the cache. If the identifier is found, use the corresponding
+// cache in conjunction with this read-only transaction (it will be consistent with it). If the identifier is
+// not found, it means that the transaction has been committed in Erigon, but the state update has not
+// arrived yet (as shown in the picture on the right). Insert conditional variable for this identifier and wait on
+// it until either cache with the given identifier appears, or timeout (indicating that the cache update
+// mechanism is broken and cache is likely invalidated).
+//
+
+// Pair.Value == nil - is a marker of absense key in db
+
+// Coherent
+// High-level guaranties:
+// - Keys/Values returned by cache are valid/immutable until end of db transaction
+// - CacheView is always coherent with given db transaction -
+//
+// Rules of set view.isCanonical value:
+// - method View can't parent.Clone() - because parent view is not coherent with current kv.Tx
+// - only OnNewBlock method may do parent.Clone() and apply StateChanges to create coherent view of kv.Tx
+// - parent.Clone() can't be caled if parent.isCanonical=false
+// - only OnNewBlock method can set view.isCanonical=true
+//
+// Rules of filling cache.stateEvict:
+// - changes in Canonical View SHOULD reflect in stateEvict
+// - changes in Non-Canonical View SHOULD NOT reflect in stateEvict
+type Coherent struct {
+ hasher hash.Hash
+ codeEvictLen metrics.Gauge
+ codeKeys metrics.Gauge
+ keys metrics.Gauge
+ evict metrics.Gauge
+ latestStateView *CoherentRoot
+ codeMiss metrics.Counter
+ timeout metrics.Counter
+ hits metrics.Counter
+ codeHits metrics.Counter
+ roots map[uint64]*CoherentRoot
+ stateEvict *ThreadSafeEvictionList
+ codeEvict *ThreadSafeEvictionList
+ miss metrics.Counter
+ cfg CoherentConfig
+ latestStateVersionID uint64
+ lock sync.Mutex
+ waitExceededCount atomic.Int32 // used as a circuit breaker to stop the cache waiting for new blocks
+}
+
+type CoherentRoot struct {
+ cache *btree2.BTreeG[*Element]
+ codeCache *btree2.BTreeG[*Element]
+ ready chan struct{} // close when ready
+ readyChanClosed atomic.Bool // protecting `ready` field from double-close (on unwind). Consumers don't need check this field.
+
+ // Views marked as `Canonical` if it received onNewBlock message
+ // we may drop `Non-Canonical` views even if they had fresh keys
+ // keys added to `Non-Canonical` views SHOULD NOT be added to stateEvict
+ // cache.latestStateView is always `Canonical`
+ isCanonical bool
+}
+
+// CoherentView - dumb object, which proxy all requests to Coherent object.
+// It's thread-safe, because immutable
+type CoherentView struct {
+ tx kv.Tx
+ cache *Coherent
+ stateVersionID uint64
+}
+
+func (c *CoherentView) Get(k []byte) ([]byte, error) { return c.cache.Get(k, c.tx, c.stateVersionID) }
+func (c *CoherentView) GetCode(k []byte) ([]byte, error) {
+ return c.cache.GetCode(k, c.tx, c.stateVersionID)
+}
+
+var _ Cache = (*Coherent)(nil) // compile-time interface check
+var _ CacheView = (*CoherentView)(nil) // compile-time interface check
+
+const (
+ DEGREE = 32
+ MAX_WAITS = 100
+)
+
+type CoherentConfig struct {
+ CacheSize datasize.ByteSize
+ CodeCacheSize datasize.ByteSize
+ WaitForNewBlock bool // should we wait 10ms for a new block message to arrive when calling View?
+ WithStorage bool
+ MetricsLabel string
+ NewBlockWait time.Duration // how long wait
+ KeepViews uint64 // keep in memory up to this amount of views, evict older
+}
+
+var DefaultCoherentConfig = CoherentConfig{
+ KeepViews: 5,
+ NewBlockWait: 5 * time.Millisecond,
+ CacheSize: 2 * datasize.GB,
+ CodeCacheSize: 2 * datasize.GB,
+ MetricsLabel: "default",
+ WithStorage: true,
+ WaitForNewBlock: true,
+}
+
+func New(cfg CoherentConfig) *Coherent {
+ if cfg.KeepViews == 0 {
+ panic("empty config passed")
+ }
+
+ return &Coherent{
+ roots: map[uint64]*CoherentRoot{},
+ stateEvict: &ThreadSafeEvictionList{l: NewList()},
+ codeEvict: &ThreadSafeEvictionList{l: NewList()},
+ hasher: sha3.NewLegacyKeccak256(),
+ cfg: cfg,
+ miss: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_total{result="miss",name="%s"}`, cfg.MetricsLabel)),
+ hits: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_total{result="hit",name="%s"}`, cfg.MetricsLabel)),
+ timeout: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_timeout_total{name="%s"}`, cfg.MetricsLabel)),
+ keys: metrics.GetOrCreateGauge(fmt.Sprintf(`cache_keys_total{name="%s"}`, cfg.MetricsLabel)),
+ evict: metrics.GetOrCreateGauge(fmt.Sprintf(`cache_list_total{name="%s"}`, cfg.MetricsLabel)),
+ codeMiss: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_code_total{result="miss",name="%s"}`, cfg.MetricsLabel)),
+ codeHits: metrics.GetOrCreateCounter(fmt.Sprintf(`cache_code_total{result="hit",name="%s"}`, cfg.MetricsLabel)),
+ codeKeys: metrics.GetOrCreateGauge(fmt.Sprintf(`cache_code_keys_total{name="%s"}`, cfg.MetricsLabel)),
+ codeEvictLen: metrics.GetOrCreateGauge(fmt.Sprintf(`cache_code_list_total{name="%s"}`, cfg.MetricsLabel)),
+ }
+}
+
+// selectOrCreateRoot - used for usual getting root
+func (c *Coherent) selectOrCreateRoot(versionID uint64) *CoherentRoot {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ r, ok := c.roots[versionID]
+ if ok {
+ return r
+ }
+
+ r = &CoherentRoot{
+ ready: make(chan struct{}),
+ cache: btree2.NewBTreeG[*Element](Less),
+ codeCache: btree2.NewBTreeG[*Element](Less),
+ }
+ c.roots[versionID] = r
+ return r
+}
+
+// advanceRoot - used for advancing root onNewBlock
+func (c *Coherent) advanceRoot(stateVersionID uint64) (r *CoherentRoot) {
+ r, rootExists := c.roots[stateVersionID]
+
+ // if nothing has progressed just return the existing root
+ if c.latestStateVersionID == stateVersionID && rootExists {
+ return r
+ }
+
+ if !rootExists {
+ r = &CoherentRoot{ready: make(chan struct{})}
+ c.roots[stateVersionID] = r
+ }
+
+ if prevView, ok := c.roots[stateVersionID-1]; ok && prevView.isCanonical {
+ //log.Info("advance: clone", "from", viewID-1, "to", viewID)
+ r.cache = prevView.cache.Copy()
+ r.codeCache = prevView.codeCache.Copy()
+ } else {
+ c.stateEvict.Init()
+ c.codeEvict.Init()
+ if r.cache == nil {
+ //log.Info("advance: new", "to", viewID)
+ r.cache = btree2.NewBTreeG[*Element](Less)
+ r.codeCache = btree2.NewBTreeG[*Element](Less)
+ } else {
+ r.cache.Walk(func(items []*Element) bool {
+ for _, i := range items {
+ c.stateEvict.PushFront(i)
+ }
+ return true
+ })
+ r.codeCache.Walk(func(items []*Element) bool {
+ for _, i := range items {
+ c.codeEvict.PushFront(i)
+ }
+ return true
+ })
+ }
+ }
+ r.isCanonical = true
+
+ c.evictRoots()
+ c.latestStateVersionID = stateVersionID
+ c.latestStateView = r
+
+ c.keys.SetInt(c.latestStateView.cache.Len())
+ c.codeKeys.SetInt(c.latestStateView.codeCache.Len())
+ c.evict.SetInt(c.stateEvict.Len())
+ c.codeEvictLen.SetInt(c.codeEvict.Len())
+ return r
+}
+
+func (c *Coherent) OnNewBlock(stateChanges *remote.StateChangeBatch) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ c.waitExceededCount.Store(0) // reset the circuit breaker
+ id := stateChanges.StateVersionId
+ r := c.advanceRoot(id)
+ for _, sc := range stateChanges.ChangeBatch {
+ for i := range sc.Changes {
+ switch sc.Changes[i].Action {
+ case remote.Action_UPSERT:
+ addr := gointerfaces.ConvertH160toAddress(sc.Changes[i].Address)
+ v := sc.Changes[i].Data
+ //fmt.Printf("set: %x,%x\n", addr, v)
+ c.add(addr[:], v, r, id)
+ case remote.Action_UPSERT_CODE:
+ addr := gointerfaces.ConvertH160toAddress(sc.Changes[i].Address)
+ v := sc.Changes[i].Data
+ c.add(addr[:], v, r, id)
+ c.hasher.Reset()
+ c.hasher.Write(sc.Changes[i].Code)
+ k := make([]byte, 32)
+ c.hasher.Sum(k)
+ c.addCode(k, sc.Changes[i].Code, r, id)
+ case remote.Action_REMOVE:
+ addr := gointerfaces.ConvertH160toAddress(sc.Changes[i].Address)
+ c.add(addr[:], nil, r, id)
+ case remote.Action_STORAGE:
+ //skip, will check later
+ case remote.Action_CODE:
+ c.hasher.Reset()
+ c.hasher.Write(sc.Changes[i].Code)
+ k := make([]byte, 32)
+ c.hasher.Sum(k)
+ c.addCode(k, sc.Changes[i].Code, r, id)
+ default:
+ panic("not implemented yet")
+ }
+ if c.cfg.WithStorage && len(sc.Changes[i].StorageChanges) > 0 {
+ addr := gointerfaces.ConvertH160toAddress(sc.Changes[i].Address)
+ for _, change := range sc.Changes[i].StorageChanges {
+ loc := gointerfaces.ConvertH256ToHash(change.Location)
+ k := make([]byte, 20+8+32)
+ copy(k, addr[:])
+ binary.BigEndian.PutUint64(k[20:], sc.Changes[i].Incarnation)
+ copy(k[20+8:], loc[:])
+ c.add(k, change.Data, r, id)
+ }
+ }
+ }
+ }
+
+ switched := r.readyChanClosed.CompareAndSwap(false, true)
+ if switched {
+ close(r.ready) //broadcast
+ }
+ //log.Info("on new block handled", "viewID", stateChanges.StateVersionID)
+}
+
+func (c *Coherent) View(ctx context.Context, tx kv.Tx) (CacheView, error) {
+ idBytes, err := tx.GetOne(kv.Sequence, kv.PlainStateVersion)
+ if err != nil {
+ return nil, err
+ }
+ var id uint64
+ if len(idBytes) == 0 {
+ id = 0
+ } else {
+ id = binary.BigEndian.Uint64(idBytes)
+ }
+ r := c.selectOrCreateRoot(id)
+
+ if !c.cfg.WaitForNewBlock || c.waitExceededCount.Load() >= MAX_WAITS {
+ return &CoherentView{stateVersionID: id, tx: tx, cache: c}, nil
+ }
+
+ select { // fast non-blocking path
+ case <-r.ready:
+ //fmt.Printf("recv broadcast: %d\n", id)
+ return &CoherentView{stateVersionID: id, tx: tx, cache: c}, nil
+ default:
+ }
+
+ select { // slow blocking path
+ case <-r.ready:
+ //fmt.Printf("recv broadcast2: %d\n", tx.ViewID())
+ case <-ctx.Done():
+ return nil, fmt.Errorf("kvcache rootNum=%x, %w", tx.ViewID(), ctx.Err())
+ case <-time.After(c.cfg.NewBlockWait): //TODO: switch to timer to save resources
+ c.timeout.Inc()
+ c.waitExceededCount.Add(1)
+ //log.Info("timeout", "db_id", id, "has_btree", r.cache != nil)
+ }
+ return &CoherentView{stateVersionID: id, tx: tx, cache: c}, nil
+}
+
+func (c *Coherent) getFromCache(k []byte, id uint64, code bool) (*Element, *CoherentRoot, error) {
+ // using the full lock here rather than RLock as RLock causes a lot of calls to runtime.usleep degrading
+ // performance under load
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ r, ok := c.roots[id]
+ if !ok {
+ return nil, r, fmt.Errorf("too old ViewID: %d, latestStateVersionID=%d", id, c.latestStateVersionID)
+ }
+ isLatest := c.latestStateVersionID == id
+
+ var it *Element
+ if code {
+ it, _ = r.codeCache.Get(&Element{K: k})
+ } else {
+ it, _ = r.cache.Get(&Element{K: k})
+ }
+ if it != nil && isLatest {
+ c.stateEvict.MoveToFront(it)
+ }
+
+ return it, r, nil
+}
+func (c *Coherent) Get(k []byte, tx kv.Tx, id uint64) ([]byte, error) {
+ it, r, err := c.getFromCache(k, id, false)
+ if err != nil {
+ return nil, err
+ }
+
+ if it != nil {
+ //fmt.Printf("from cache: %#x,%x\n", k, it.(*Element).V)
+ c.hits.Inc()
+ return it.V, nil
+ }
+ c.miss.Inc()
+
+ v, err := tx.GetOne(kv.PlainState, k)
+ if err != nil {
+ return nil, err
+ }
+ //fmt.Printf("from db: %#x,%x\n", k, v)
+
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ v = c.add(common.Copy(k), common.Copy(v), r, id).V
+ return v, nil
+}
+
+func (c *Coherent) GetCode(k []byte, tx kv.Tx, id uint64) ([]byte, error) {
+ it, r, err := c.getFromCache(k, id, true)
+ if err != nil {
+ return nil, err
+ }
+
+ if it != nil {
+ //fmt.Printf("from cache: %#x,%x\n", k, it.(*Element).V)
+ c.codeHits.Inc()
+ return it.V, nil
+ }
+ c.codeMiss.Inc()
+
+ v, err := tx.GetOne(kv.Code, k)
+ if err != nil {
+ return nil, err
+ }
+ //fmt.Printf("from db: %#x,%x\n", k, v)
+
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ v = c.addCode(common.Copy(k), common.Copy(v), r, id).V
+ return v, nil
+}
+func (c *Coherent) removeOldest(r *CoherentRoot) {
+ e := c.stateEvict.Oldest()
+ if e != nil {
+ c.stateEvict.Remove(e)
+ r.cache.Delete(e)
+ }
+}
+func (c *Coherent) removeOldestCode(r *CoherentRoot) {
+ e := c.codeEvict.Oldest()
+ if e != nil {
+ c.codeEvict.Remove(e)
+ r.codeCache.Delete(e)
+ }
+}
+func (c *Coherent) add(k, v []byte, r *CoherentRoot, id uint64) *Element {
+ it := &Element{K: k, V: v}
+ replaced, _ := r.cache.Set(it)
+ if c.latestStateVersionID != id {
+ //fmt.Printf("add to non-last viewID: %d<%d\n", c.latestViewID, id)
+ return it
+ }
+ if replaced != nil {
+ c.stateEvict.Remove(replaced)
+ }
+ c.stateEvict.PushFront(it)
+
+ // clear down cache until size below the configured limit
+ for c.stateEvict.Size() > int(c.cfg.CacheSize.Bytes()) {
+ c.removeOldest(r)
+ }
+
+ return it
+}
+func (c *Coherent) addCode(k, v []byte, r *CoherentRoot, id uint64) *Element {
+ it := &Element{K: k, V: v}
+ replaced, _ := r.codeCache.Set(it)
+ if c.latestStateVersionID != id {
+ //fmt.Printf("add to non-last viewID: %d<%d\n", c.latestViewID, id)
+ return it
+ }
+ if replaced != nil {
+ c.codeEvict.Remove(replaced)
+ }
+ c.codeEvict.PushFront(it)
+
+ for c.codeEvict.Size() > int(c.cfg.CodeCacheSize.Bytes()) {
+ c.removeOldestCode(r)
+ }
+
+ return it
+}
+
+func (c *Coherent) ValidateCurrentRoot(ctx context.Context, tx kv.Tx) (*CacheValidationResult, error) {
+
+ result := &CacheValidationResult{
+ Enabled: true,
+ RequestCancelled: false,
+ }
+
+ select {
+ case <-ctx.Done():
+ result.RequestCancelled = true
+ return result, nil
+ default:
+ }
+
+ idBytes, err := tx.GetOne(kv.Sequence, kv.PlainStateVersion)
+ if err != nil {
+ return nil, err
+ }
+ stateID := binary.BigEndian.Uint64(idBytes)
+ result.LatestStateID = stateID
+
+ // if the latest view id in the cache is not the same as the tx or one below it
+ // then the cache will be a new one for the next call so return early
+ if stateID > c.latestStateVersionID {
+ result.LatestStateBehind = true
+ return result, nil
+ }
+
+ root := c.selectOrCreateRoot(c.latestStateVersionID)
+
+ // ensure the root is ready or wait and press on
+ select {
+ case <-root.ready:
+ case <-time.After(c.cfg.NewBlockWait):
+ }
+
+ // check context again after potentially waiting for root to be ready
+ select {
+ case <-ctx.Done():
+ result.RequestCancelled = true
+ return result, nil
+ default:
+ }
+
+ clearCache := false
+
+ compare := func(cache *btree2.BTreeG[*Element], bucket string) (bool, [][]byte, error) {
+ keys := make([][]byte, 0)
+
+ for {
+ val, ok := cache.PopMax()
+ if !ok {
+ break
+ }
+
+ // check the db
+ inDb, err := tx.GetOne(bucket, val.K)
+ if err != nil {
+ return false, keys, err
+ }
+
+ if !bytes.Equal(inDb, val.V) {
+ keys = append(keys, val.K)
+ clearCache = true
+ }
+
+ select {
+ case <-ctx.Done():
+ return true, keys, nil
+ default:
+ }
+ }
+
+ return false, keys, nil
+ }
+
+ cache, codeCache := c.cloneCaches(root)
+
+ cancelled, keys, err := compare(cache, kv.PlainState)
+ if err != nil {
+ return nil, err
+ }
+ result.StateKeysOutOfSync = keys
+ if cancelled {
+ result.RequestCancelled = true
+ return result, nil
+ }
+
+ cancelled, keys, err = compare(codeCache, kv.Code)
+ if err != nil {
+ return nil, err
+ }
+ result.CodeKeysOutOfSync = keys
+ if cancelled {
+ result.RequestCancelled = true
+ return result, nil
+ }
+
+ if clearCache {
+ c.clearCaches(root)
+ }
+ result.CacheCleared = clearCache
+
+ return result, nil
+}
+
+func (c *Coherent) cloneCaches(r *CoherentRoot) (cache *btree2.BTreeG[*Element], codeCache *btree2.BTreeG[*Element]) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ cache = r.cache.Copy()
+ codeCache = r.codeCache.Copy()
+ return cache, codeCache
+}
+
+func (c *Coherent) clearCaches(r *CoherentRoot) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ r.cache.Clear()
+ r.codeCache.Clear()
+}
+
+type Stat struct {
+ BlockNum uint64
+ BlockHash [32]byte
+ Lenght int
+}
+
+func DebugStats(cache Cache) []Stat {
+ res := []Stat{}
+ casted, ok := cache.(*Coherent)
+ if !ok {
+ return res
+ }
+ casted.lock.Lock()
+ for root, r := range casted.roots {
+ res = append(res, Stat{
+ BlockNum: root,
+ Lenght: r.cache.Len(),
+ })
+ }
+ casted.lock.Unlock()
+ sort.Slice(res, func(i, j int) bool { return res[i].BlockNum < res[j].BlockNum })
+ return res
+}
+func AssertCheckValues(ctx context.Context, tx kv.Tx, cache Cache) (int, error) {
+ defer func(t time.Time) { fmt.Printf("AssertCheckValues:327: %s\n", time.Since(t)) }(time.Now())
+ view, err := cache.View(ctx, tx)
+ if err != nil {
+ return 0, err
+ }
+ castedView, ok := view.(*CoherentView)
+ if !ok {
+ return 0, nil
+ }
+ casted, ok := cache.(*Coherent)
+ if !ok {
+ return 0, nil
+ }
+ checked := 0
+ casted.lock.Lock()
+ defer casted.lock.Unlock()
+ //log.Info("AssertCheckValues start", "db_id", tx.ViewID(), "mem_id", casted.id.Load(), "len", casted.cache.Len())
+ root, ok := casted.roots[castedView.stateVersionID]
+ if !ok {
+ return 0, nil
+ }
+ root.cache.Walk(func(items []*Element) bool {
+ for _, i := range items {
+ k, v := i.K, i.V
+ var dbV []byte
+ dbV, err = tx.GetOne(kv.PlainState, k)
+ if err != nil {
+ return false
+ }
+ if !bytes.Equal(dbV, v) {
+ err = fmt.Errorf("key: %x, has different values: %x != %x", k, v, dbV)
+ return false
+ }
+ checked++
+ }
+ return true
+ })
+ return checked, err
+}
+func (c *Coherent) evictRoots() {
+ if c.latestStateVersionID <= c.cfg.KeepViews {
+ return
+ }
+ if len(c.roots) < int(c.cfg.KeepViews) {
+ return
+ }
+ to := c.latestStateVersionID - c.cfg.KeepViews
+ toDel := make([]uint64, 0, len(c.roots))
+ for txID := range c.roots {
+ if txID > to {
+ continue
+ }
+ toDel = append(toDel, txID)
+ }
+ //log.Info("forget old roots", "list", fmt.Sprintf("%d", toDel))
+ for _, txID := range toDel {
+ delete(c.roots, txID)
+ }
+}
+func (c *Coherent) Len() int {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if c.latestStateView == nil {
+ return 0
+ }
+ return c.latestStateView.cache.Len() //todo: is it same with cache.len()?
+}
+
+// Element is an element of a linked list.
+type Element struct {
+ // Next and previous pointers in the doubly-linked list of elements.
+ // To simplify the implementation, internally a list l is implemented
+ // as a ring, such that &l.root is both the next element of the last
+ // list element (l.Back()) and the previous element of the first list
+ // element (l.Front()).
+ next, prev *Element
+
+ // The list to which this element belongs.
+ list *List
+
+ // The value stored with this element.
+ K, V []byte
+}
+
+func (e *Element) Size() int { return len(e.K) + len(e.V) }
+
+func Less(a, b *Element) bool { return bytes.Compare(a.K, b.K) < 0 }
+
+type ThreadSafeEvictionList struct {
+ l *List
+ lock sync.Mutex
+}
+
+func (l *ThreadSafeEvictionList) Init() {
+ l.lock.Lock()
+ l.l.Init()
+ l.lock.Unlock()
+}
+func (l *ThreadSafeEvictionList) PushFront(e *Element) {
+ l.lock.Lock()
+ l.l.PushFront(e)
+ l.lock.Unlock()
+}
+
+func (l *ThreadSafeEvictionList) MoveToFront(e *Element) {
+ l.lock.Lock()
+ l.l.MoveToFront(e)
+ l.lock.Unlock()
+}
+
+func (l *ThreadSafeEvictionList) Remove(e *Element) {
+ l.lock.Lock()
+ l.l.Remove(e)
+ l.lock.Unlock()
+}
+
+func (l *ThreadSafeEvictionList) Oldest() *Element {
+ l.lock.Lock()
+ e := l.l.Back()
+ l.lock.Unlock()
+ return e
+}
+
+func (l *ThreadSafeEvictionList) Len() int {
+ l.lock.Lock()
+ length := l.l.Len()
+ l.lock.Unlock()
+ return length
+}
+
+func (l *ThreadSafeEvictionList) Size() int {
+ l.lock.Lock()
+ size := l.l.Size()
+ l.lock.Unlock()
+ return size
+}
+
+// ========= copypaste of List implementation from stdlib ========
+
+// Next returns the next list element or nil.
+func (e *Element) Next() *Element {
+ if p := e.next; e.list != nil && p != &e.list.root {
+ return p
+ }
+ return nil
+}
+
+// Prev returns the previous list element or nil.
+func (e *Element) Prev() *Element {
+ if p := e.prev; e.list != nil && p != &e.list.root {
+ return p
+ }
+ return nil
+}
+
+// List represents a doubly linked list.
+// The zero value for List is an empty list ready to use.
+type List struct {
+ root Element // sentinel list element, only &root, root.prev, and root.next are used
+ len int // current list length excluding (this) sentinel element
+ size int // size of items in list in bytes
+}
+
+// Init initializes or clears list l.
+func (l *List) Init() *List {
+ l.root.next = &l.root
+ l.root.prev = &l.root
+ l.len = 0
+ l.size = 0
+ return l
+}
+
+// New returns an initialized list.
+func NewList() *List { return new(List).Init() }
+
+// Len returns the number of elements of list l.
+// The complexity is O(1).
+func (l *List) Len() int { return l.len }
+
+// Size returns the size of the elements in the list by bytes
+func (l *List) Size() int { return l.size }
+
+// Front returns the first element of list l or nil if the list is empty.
+func (l *List) Front() *Element {
+ if l.len == 0 {
+ return nil
+ }
+ return l.root.next
+}
+
+// Back returns the last element of list l or nil if the list is empty.
+func (l *List) Back() *Element {
+ if l.len == 0 {
+ return nil
+ }
+ return l.root.prev
+}
+
+// lazyInit lazily initializes a zero List value.
+func (l *List) lazyInit() {
+ if l.root.next == nil {
+ l.Init()
+ }
+}
+
+// insert inserts e after at, increments l.len, and returns e.
+func (l *List) insert(e, at *Element) *Element {
+ e.prev = at
+ e.next = at.next
+ e.prev.next = e
+ e.next.prev = e
+ e.list = l
+ l.len++
+ l.size += e.Size()
+ return e
+}
+
+// insertValue is a convenience wrapper for insert(&Element{Value: v}, at).
+func (l *List) insertValue(e, at *Element) *Element {
+ return l.insert(e, at)
+}
+
+// remove removes e from its list, decrements l.len, and returns e.
+func (l *List) remove(e *Element) *Element {
+ e.prev.next = e.next
+ e.next.prev = e.prev
+ e.next = nil // avoid memory leaks
+ e.prev = nil // avoid memory leaks
+ e.list = nil
+ l.len--
+ l.size -= e.Size()
+ return e
+}
+
+// move moves e to next to at and returns e.
+func (l *List) move(e, at *Element) *Element {
+ if e == at {
+ return e
+ }
+ e.prev.next = e.next
+ e.next.prev = e.prev
+
+ e.prev = at
+ e.next = at.next
+ e.prev.next = e
+ e.next.prev = e
+
+ return e
+}
+
+// Remove removes e from l if e is an element of list l.
+// It returns the element value e.Value.
+// The element must not be nil.
+func (l *List) Remove(e *Element) ([]byte, []byte) {
+ if e.list == l {
+ // if e.list == l, l must have been initialized when e was inserted
+ // in l or l == nil (e is a zero Element) and l.remove will crash
+ l.remove(e)
+ }
+ return e.K, e.V
+}
+
+// PushFront inserts a new element e with value v at the front of list l and returns e.
+func (l *List) PushFront(e *Element) *Element {
+ l.lazyInit()
+ return l.insertValue(e, &l.root)
+}
+
+// PushBack inserts a new element e with value v at the back of list l and returns e.
+func (l *List) PushBack(e *Element) *Element {
+ l.lazyInit()
+ return l.insertValue(e, l.root.prev)
+}
+
+// InsertBefore inserts a new element e with value v immediately before mark and returns e.
+// If mark is not an element of l, the list is not modified.
+// The mark must not be nil.
+func (l *List) InsertBefore(e *Element, mark *Element) *Element {
+ if mark.list != l {
+ return nil
+ }
+ // see comment in List.Remove about initialization of l
+ return l.insertValue(e, mark.prev)
+}
+
+// InsertAfter inserts a new element e with value v immediately after mark and returns e.
+// If mark is not an element of l, the list is not modified.
+// The mark must not be nil.
+func (l *List) InsertAfter(e *Element, mark *Element) *Element {
+ if mark.list != l {
+ return nil
+ }
+ // see comment in List.Remove about initialization of l
+ return l.insertValue(e, mark)
+}
+
+// MoveToFront moves element e to the front of list l.
+// If e is not an element of l, the list is not modified.
+// The element must not be nil.
+func (l *List) MoveToFront(e *Element) {
+ if e.list != l || l.root.next == e {
+ return
+ }
+ // see comment in List.Remove about initialization of l
+ l.move(e, &l.root)
+}
+
+// MoveToBack moves element e to the back of list l.
+// If e is not an element of l, the list is not modified.
+// The element must not be nil.
+func (l *List) MoveToBack(e *Element) {
+ if e.list != l || l.root.prev == e {
+ return
+ }
+ // see comment in List.Remove about initialization of l
+ l.move(e, l.root.prev)
+}
+
+// MoveBefore moves element e to its new position before mark.
+// If e or mark is not an element of l, or e == mark, the list is not modified.
+// The element and mark must not be nil.
+func (l *List) MoveBefore(e, mark *Element) {
+ if e.list != l || e == mark || mark.list != l {
+ return
+ }
+ l.move(e, mark.prev)
+}
+
+// MoveAfter moves element e to its new position after mark.
+// If e or mark is not an element of l, or e == mark, the list is not modified.
+// The element and mark must not be nil.
+func (l *List) MoveAfter(e, mark *Element) {
+ if e.list != l || e == mark || mark.list != l {
+ return
+ }
+ l.move(e, mark)
+}
diff --git a/erigon-lib/kv/kvcache/cache_test.go b/erigon-lib/kv/kvcache/cache_test.go
new file mode 100644
index 00000000000..0f119831afe
--- /dev/null
+++ b/erigon-lib/kv/kvcache/cache_test.go
@@ -0,0 +1,374 @@
+/*
+Copyright 2021 Erigon contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package kvcache
+
+import (
+ "context"
+ "encoding/binary"
+ "fmt"
+ "sync"
+ "testing"
+
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/gointerfaces"
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/remote"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/memdb"
+ "github.com/stretchr/testify/require"
+)
+
+func TestEvictionInUnexpectedOrder(t *testing.T) {
+ // Order: View - 2, OnNewBlock - 2, View - 5, View - 6, OnNewBlock - 3, OnNewBlock - 4, View - 5, OnNewBlock - 5, OnNewBlock - 100
+ require := require.New(t)
+ cfg := DefaultCoherentConfig
+ cfg.CacheSize = 3
+ cfg.NewBlockWait = 0
+ c := New(cfg)
+ c.selectOrCreateRoot(2)
+ require.Equal(1, len(c.roots))
+ require.Equal(0, int(c.latestStateVersionID))
+ require.False(c.roots[2].isCanonical)
+
+ c.add([]byte{1}, nil, c.roots[2], 2)
+ require.Equal(0, c.stateEvict.Len())
+
+ c.advanceRoot(2)
+ require.Equal(1, len(c.roots))
+ require.Equal(2, int(c.latestStateVersionID))
+ require.True(c.roots[2].isCanonical)
+
+ c.add([]byte{1}, nil, c.roots[2], 2)
+ require.Equal(1, c.stateEvict.Len())
+
+ c.selectOrCreateRoot(5)
+ require.Equal(2, len(c.roots))
+ require.Equal(2, int(c.latestStateVersionID))
+ require.False(c.roots[5].isCanonical)
+
+ c.add([]byte{2}, nil, c.roots[5], 5) // not added to evict list
+ require.Equal(1, c.stateEvict.Len())
+ c.add([]byte{2}, nil, c.roots[2], 2) // added to evict list, because it's latest view
+ require.Equal(2, c.stateEvict.Len())
+
+ c.selectOrCreateRoot(6)
+ require.Equal(3, len(c.roots))
+ require.Equal(2, int(c.latestStateVersionID))
+ require.False(c.roots[6].isCanonical) // parrent exists, but parent has isCanonical=false
+
+ c.advanceRoot(3)
+ require.Equal(4, len(c.roots))
+ require.Equal(3, int(c.latestStateVersionID))
+ require.True(c.roots[3].isCanonical)
+
+ c.advanceRoot(4)
+ require.Equal(5, len(c.roots))
+ require.Equal(4, int(c.latestStateVersionID))
+ require.True(c.roots[4].isCanonical)
+
+ c.selectOrCreateRoot(5)
+ require.Equal(5, len(c.roots))
+ require.Equal(4, int(c.latestStateVersionID))
+ require.False(c.roots[5].isCanonical)
+
+ c.advanceRoot(5)
+ require.Equal(5, len(c.roots))
+ require.Equal(5, int(c.latestStateVersionID))
+ require.True(c.roots[5].isCanonical)
+
+ c.advanceRoot(100)
+ require.Equal(6, len(c.roots))
+ require.Equal(100, int(c.latestStateVersionID))
+ require.True(c.roots[100].isCanonical)
+
+ //c.add([]byte{1}, nil, c.roots[2], 2)
+ require.Equal(0, c.latestStateView.cache.Len())
+ require.Equal(0, c.stateEvict.Len())
+}
+
+func TestEviction(t *testing.T) {
+ require, ctx := require.New(t), context.Background()
+ cfg := DefaultCoherentConfig
+ cfg.CacheSize = 21
+ cfg.NewBlockWait = 0
+ c := New(cfg)
+ db := memdb.NewTestDB(t)
+ k1, k2 := [20]byte{1}, [20]byte{2}
+
+ var id uint64
+ _ = db.Update(ctx, func(tx kv.RwTx) error {
+ _ = tx.Put(kv.PlainState, k1[:], []byte{1})
+ id = tx.ViewID()
+ var versionID [8]byte
+ binary.BigEndian.PutUint64(versionID[:], id)
+ _ = tx.Put(kv.Sequence, kv.PlainStateVersion, versionID[:])
+ cacheView, _ := c.View(ctx, tx)
+ view := cacheView.(*CoherentView)
+ _, _ = c.Get(k1[:], tx, view.stateVersionID)
+ _, _ = c.Get([]byte{1}, tx, view.stateVersionID)
+ _, _ = c.Get([]byte{2}, tx, view.stateVersionID)
+ _, _ = c.Get([]byte{3}, tx, view.stateVersionID)
+ //require.Equal(c.roots[c.latestViewID].cache.Len(), c.stateEvict.Len())
+ return nil
+ })
+ require.Equal(0, c.stateEvict.Len())
+ //require.Equal(c.roots[c.latestViewID].cache.Len(), c.stateEvict.Len())
+ c.OnNewBlock(&remote.StateChangeBatch{
+ StateVersionId: id + 1,
+ ChangeBatch: []*remote.StateChange{
+ {
+ Direction: remote.Direction_FORWARD,
+ Changes: []*remote.AccountChange{{
+ Action: remote.Action_UPSERT,
+ Address: gointerfaces.ConvertAddressToH160(k1),
+ Data: []byte{2},
+ }},
+ },
+ },
+ })
+ require.Equal(21, c.stateEvict.Size())
+ require.Equal(1, c.stateEvict.Len())
+ require.Equal(c.roots[c.latestStateVersionID].cache.Len(), c.stateEvict.Len())
+ _ = db.Update(ctx, func(tx kv.RwTx) error {
+ _ = tx.Put(kv.PlainState, k1[:], []byte{1})
+ id = tx.ViewID()
+ cacheView, _ := c.View(ctx, tx)
+ var versionID [8]byte
+ binary.BigEndian.PutUint64(versionID[:], id)
+ _ = tx.Put(kv.Sequence, kv.PlainStateVersion, versionID[:])
+ view := cacheView.(*CoherentView)
+ _, _ = c.Get(k1[:], tx, view.stateVersionID)
+ _, _ = c.Get(k2[:], tx, view.stateVersionID)
+ _, _ = c.Get([]byte{5}, tx, view.stateVersionID)
+ _, _ = c.Get([]byte{6}, tx, view.stateVersionID)
+ return nil
+ })
+ require.Equal(c.roots[c.latestStateVersionID].cache.Len(), c.stateEvict.Len())
+ require.Equal(int(cfg.CacheSize.Bytes()), c.stateEvict.Size())
+}
+
+func TestAPI(t *testing.T) {
+ require := require.New(t)
+ c := New(DefaultCoherentConfig)
+ k1, k2 := [20]byte{1}, [20]byte{2}
+ db := memdb.NewTestDB(t)
+ get := func(key [20]byte, expectTxnID uint64) (res [1]chan []byte) {
+ wg := sync.WaitGroup{}
+ for i := 0; i < len(res); i++ {
+ wg.Add(1)
+ res[i] = make(chan []byte)
+ go func(out chan []byte) {
+ require.NoError(db.View(context.Background(), func(tx kv.Tx) error {
+ if expectTxnID != tx.ViewID() {
+ panic(fmt.Sprintf("epxected: %d, got: %d", expectTxnID, tx.ViewID()))
+ }
+ wg.Done()
+ cacheView, err := c.View(context.Background(), tx)
+ view := cacheView.(*CoherentView)
+ if err != nil {
+ panic(err)
+ }
+ v, err := c.Get(key[:], tx, view.stateVersionID)
+ if err != nil {
+ panic(err)
+ }
+ out <- common.Copy(v)
+ return nil
+ }))
+ }(res[i])
+ }
+ wg.Wait() // ensure that all goroutines started their transactions
+ return res
+ }
+ put := func(k, v []byte) uint64 {
+ var txID uint64
+ require.NoError(db.Update(context.Background(), func(tx kv.RwTx) error {
+ _ = tx.Put(kv.PlainState, k, v)
+ txID = tx.ViewID()
+ var versionID [8]byte
+ binary.BigEndian.PutUint64(versionID[:], txID)
+ _ = tx.Put(kv.Sequence, kv.PlainStateVersion, versionID[:])
+ return nil
+ }))
+ return txID
+ }
+ // block 1 - represents existing state (no notifications about this data will come to client)
+ txID1 := put(k2[:], []byte{42})
+
+ wg := sync.WaitGroup{}
+
+ res1, res2 := get(k1, txID1), get(k2, txID1) // will return immediately
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i := range res1 {
+ require.Nil(<-res1[i])
+ }
+ for i := range res2 {
+ require.Equal([]byte{42}, <-res2[i])
+ }
+ fmt.Printf("done1: \n")
+ }()
+
+ txID2 := put(k1[:], []byte{2})
+ fmt.Printf("-----1 %d, %d\n", txID1, txID2)
+ res3, res4 := get(k1, txID2), get(k2, txID2) // will see View of transaction 2
+ txID3 := put(k1[:], []byte{3}) // even if core already on block 3
+
+ c.OnNewBlock(&remote.StateChangeBatch{
+ StateVersionId: txID2,
+ PendingBlockBaseFee: 1,
+ ChangeBatch: []*remote.StateChange{
+ {
+ Direction: remote.Direction_FORWARD,
+ BlockHeight: 2,
+ BlockHash: gointerfaces.ConvertHashToH256([32]byte{}),
+ Changes: []*remote.AccountChange{{
+ Action: remote.Action_UPSERT,
+ Address: gointerfaces.ConvertAddressToH160(k1),
+ Data: []byte{2},
+ }},
+ },
+ },
+ })
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i := range res3 {
+ require.Equal([]byte{2}, <-res3[i])
+ }
+ for i := range res4 {
+ require.Equal([]byte{42}, <-res4[i])
+ }
+ fmt.Printf("done2: \n")
+ }()
+ fmt.Printf("-----2\n")
+
+ res5, res6 := get(k1, txID3), get(k2, txID3) // will see View of transaction 3, even if notification has not enough changes
+ c.OnNewBlock(&remote.StateChangeBatch{
+ StateVersionId: txID3,
+ PendingBlockBaseFee: 1,
+ ChangeBatch: []*remote.StateChange{
+ {
+ Direction: remote.Direction_FORWARD,
+ BlockHeight: 3,
+ BlockHash: gointerfaces.ConvertHashToH256([32]byte{}),
+ Changes: []*remote.AccountChange{{
+ Action: remote.Action_UPSERT,
+ Address: gointerfaces.ConvertAddressToH160(k1),
+ Data: []byte{3},
+ }},
+ },
+ },
+ })
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i := range res5 {
+ require.Equal([]byte{3}, <-res5[i])
+ }
+ fmt.Printf("-----21\n")
+ for i := range res6 {
+ require.Equal([]byte{42}, <-res6[i])
+ }
+ fmt.Printf("done3: \n")
+ }()
+ fmt.Printf("-----3\n")
+ txID4 := put(k1[:], []byte{2})
+ _ = txID4
+ c.OnNewBlock(&remote.StateChangeBatch{
+ StateVersionId: txID4,
+ PendingBlockBaseFee: 1,
+ ChangeBatch: []*remote.StateChange{
+ {
+ Direction: remote.Direction_UNWIND,
+ BlockHeight: 2,
+ BlockHash: gointerfaces.ConvertHashToH256([32]byte{}),
+ Changes: []*remote.AccountChange{{
+ Action: remote.Action_UPSERT,
+ Address: gointerfaces.ConvertAddressToH160(k1),
+ Data: []byte{2},
+ }},
+ },
+ },
+ })
+ fmt.Printf("-----4\n")
+ txID5 := put(k1[:], []byte{4}) // reorg to new chain
+ c.OnNewBlock(&remote.StateChangeBatch{
+ StateVersionId: txID4,
+ PendingBlockBaseFee: 1,
+ ChangeBatch: []*remote.StateChange{
+ {
+ Direction: remote.Direction_FORWARD,
+ BlockHeight: 3,
+ BlockHash: gointerfaces.ConvertHashToH256([32]byte{2}),
+ Changes: []*remote.AccountChange{{
+ Action: remote.Action_UPSERT,
+ Address: gointerfaces.ConvertAddressToH160(k1),
+ Data: []byte{4},
+ }},
+ },
+ },
+ })
+ fmt.Printf("-----5\n")
+
+ res7, res8 := get(k1, txID5), get(k2, txID5) // will see View of transaction 3, even if notification has not enough changes
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i := range res7 {
+ require.Equal([]byte{4}, <-res7[i])
+ }
+ for i := range res8 {
+ require.Equal([]byte{42}, <-res8[i])
+ }
+ fmt.Printf("done4: \n")
+ }()
+ err := db.View(context.Background(), func(tx kv.Tx) error {
+ _, err := AssertCheckValues(context.Background(), tx, c)
+ require.NoError(err)
+ return nil
+ })
+ require.NoError(err)
+
+ wg.Wait()
+}
+
+func TestCode(t *testing.T) {
+ require, ctx := require.New(t), context.Background()
+ c := New(DefaultCoherentConfig)
+ db := memdb.NewTestDB(t)
+ k1, k2 := [20]byte{1}, [20]byte{2}
+
+ _ = db.Update(ctx, func(tx kv.RwTx) error {
+ _ = tx.Put(kv.Code, k1[:], k2[:])
+ cacheView, _ := c.View(ctx, tx)
+ view := cacheView.(*CoherentView)
+
+ v, err := c.GetCode(k1[:], tx, view.stateVersionID)
+ require.NoError(err)
+ require.Equal(k2[:], v)
+
+ v, err = c.GetCode(k1[:], tx, view.stateVersionID)
+ require.NoError(err)
+ require.Equal(k2[:], v)
+
+ //require.Equal(c.roots[c.latestViewID].cache.Len(), c.stateEvict.Len())
+ return nil
+ })
+}
diff --git a/erigon-lib/kv/kvcache/dummy.go b/erigon-lib/kv/kvcache/dummy.go
new file mode 100644
index 00000000000..68697d0039e
--- /dev/null
+++ b/erigon-lib/kv/kvcache/dummy.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2021 Erigon contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package kvcache
+
+import (
+ "context"
+
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/remote"
+ "github.com/ledgerwatch/erigon-lib/kv"
+)
+
+// DummyCache - doesn't remember anything - can be used when service is not remote
+type DummyCache struct{}
+
+var _ Cache = (*DummyCache)(nil) // compile-time interface check
+var _ CacheView = (*DummyView)(nil) // compile-time interface check
+
+func NewDummy() *DummyCache { return &DummyCache{} }
+func (c *DummyCache) View(_ context.Context, tx kv.Tx) (CacheView, error) {
+ return &DummyView{cache: c, tx: tx}, nil
+}
+func (c *DummyCache) OnNewBlock(sc *remote.StateChangeBatch) {}
+func (c *DummyCache) Evict() int { return 0 }
+func (c *DummyCache) Len() int { return 0 }
+func (c *DummyCache) Get(k []byte, tx kv.Tx, id uint64) ([]byte, error) {
+ return tx.GetOne(kv.PlainState, k)
+}
+func (c *DummyCache) GetCode(k []byte, tx kv.Tx, id uint64) ([]byte, error) {
+ return tx.GetOne(kv.Code, k)
+}
+func (c *DummyCache) ValidateCurrentRoot(_ context.Context, _ kv.Tx) (*CacheValidationResult, error) {
+ return &CacheValidationResult{Enabled: false}, nil
+}
+
+type DummyView struct {
+ cache *DummyCache
+ tx kv.Tx
+}
+
+func (c *DummyView) Get(k []byte) ([]byte, error) { return c.cache.Get(k, c.tx, 0) }
+func (c *DummyView) GetCode(k []byte) ([]byte, error) { return c.cache.GetCode(k, c.tx, 0) }
diff --git a/erigon-lib/kv/kvcfg/accessors_config.go b/erigon-lib/kv/kvcfg/accessors_config.go
new file mode 100644
index 00000000000..5c68771e45d
--- /dev/null
+++ b/erigon-lib/kv/kvcfg/accessors_config.go
@@ -0,0 +1,63 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package kvcfg
+
+import (
+ "context"
+
+ "github.com/ledgerwatch/erigon-lib/kv"
+)
+
+type ConfigKey []byte
+
+var (
+ HistoryV3 = ConfigKey("history.v3")
+)
+
+func (k ConfigKey) Enabled(tx kv.Tx) (bool, error) { return kv.GetBool(tx, kv.DatabaseInfo, k) }
+func (k ConfigKey) FromDB(db kv.RoDB) (enabled bool) {
+ if err := db.View(context.Background(), func(tx kv.Tx) error {
+ var err error
+ enabled, err = k.Enabled(tx)
+ if err != nil {
+ return err
+ }
+ return nil
+ }); err != nil {
+ panic(err)
+ }
+ return enabled
+}
+func (k ConfigKey) WriteOnce(tx kv.RwTx, v bool) (bool, error) {
+ _, enabled, err := kv.EnsureNotChangedBool(tx, kv.DatabaseInfo, k, v)
+ return enabled, err
+}
+func (k ConfigKey) EnsureNotChanged(tx kv.RwTx, value bool) (ok, enabled bool, err error) {
+ return kv.EnsureNotChangedBool(tx, kv.DatabaseInfo, k, value)
+}
+func (k ConfigKey) ForceWrite(tx kv.RwTx, enabled bool) error {
+ if enabled {
+ if err := tx.Put(kv.DatabaseInfo, k, []byte{1}); err != nil {
+ return err
+ }
+ } else {
+ if err := tx.Put(kv.DatabaseInfo, k, []byte{0}); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/erigon-lib/kv/mdbx/kv_abstract_test.go b/erigon-lib/kv/mdbx/kv_abstract_test.go
new file mode 100644
index 00000000000..a504eac6407
--- /dev/null
+++ b/erigon-lib/kv/mdbx/kv_abstract_test.go
@@ -0,0 +1,635 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package mdbx_test
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "runtime"
+ "testing"
+
+ "github.com/ledgerwatch/erigon-lib/gointerfaces"
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/remote"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/mdbx"
+ "github.com/ledgerwatch/erigon-lib/kv/memdb"
+ "github.com/ledgerwatch/erigon-lib/kv/remotedb"
+ "github.com/ledgerwatch/erigon-lib/kv/remotedbserver"
+ "github.com/ledgerwatch/log/v3"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/test/bufconn"
+)
+
+func TestSequence(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("fix me on win please")
+ }
+
+ writeDBs, _ := setupDatabases(t, log.New(), func(defaultBuckets kv.TableCfg) kv.TableCfg {
+ return defaultBuckets
+ })
+ ctx := context.Background()
+
+ for _, db := range writeDBs {
+ db := db
+ tx, err := db.BeginRw(ctx)
+ require.NoError(t, err)
+ defer tx.Rollback()
+
+ i, err := tx.ReadSequence(kv.ChaindataTables[0])
+ require.NoError(t, err)
+ require.Equal(t, uint64(0), i)
+ i, err = tx.IncrementSequence(kv.ChaindataTables[0], 1)
+ require.NoError(t, err)
+ require.Equal(t, uint64(0), i)
+ i, err = tx.IncrementSequence(kv.ChaindataTables[0], 6)
+ require.NoError(t, err)
+ require.Equal(t, uint64(1), i)
+ i, err = tx.IncrementSequence(kv.ChaindataTables[0], 1)
+ require.NoError(t, err)
+ require.Equal(t, uint64(7), i)
+
+ i, err = tx.ReadSequence(kv.ChaindataTables[1])
+ require.NoError(t, err)
+ require.Equal(t, uint64(0), i)
+ i, err = tx.IncrementSequence(kv.ChaindataTables[1], 1)
+ require.NoError(t, err)
+ require.Equal(t, uint64(0), i)
+ i, err = tx.IncrementSequence(kv.ChaindataTables[1], 6)
+ require.NoError(t, err)
+ require.Equal(t, uint64(1), i)
+ i, err = tx.IncrementSequence(kv.ChaindataTables[1], 1)
+ require.NoError(t, err)
+ require.Equal(t, uint64(7), i)
+ tx.Rollback()
+ }
+}
+
+func TestManagedTx(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("fix me on win please")
+ }
+
+ logger := log.New()
+ defaultConfig := kv.ChaindataTablesCfg
+ defer func() {
+ kv.ChaindataTablesCfg = defaultConfig
+ }()
+
+ bucketID := 0
+ bucket1 := kv.ChaindataTables[bucketID]
+ bucket2 := kv.ChaindataTables[bucketID+1]
+ writeDBs, readDBs := setupDatabases(t, logger, func(defaultBuckets kv.TableCfg) kv.TableCfg {
+ return map[string]kv.TableCfgItem{
+ bucket1: {
+ Flags: kv.DupSort,
+ AutoDupSortKeysConversion: true,
+ DupToLen: 4,
+ DupFromLen: 6,
+ },
+ bucket2: {
+ Flags: 0,
+ },
+ }
+ })
+
+ ctx := context.Background()
+
+ for _, db := range writeDBs {
+ db := db
+ tx, err := db.BeginRw(ctx)
+ require.NoError(t, err)
+ defer tx.Rollback()
+
+ c, err := tx.RwCursor(bucket1)
+ require.NoError(t, err)
+ c1, err := tx.RwCursor(bucket2)
+ require.NoError(t, err)
+ require.NoError(t, c.Append([]byte{0}, []byte{1}))
+ require.NoError(t, c1.Append([]byte{0}, []byte{1}))
+ require.NoError(t, c.Append([]byte{0, 0, 0, 0, 0, 1}, []byte{1})) // prefixes of len=FromLen for DupSort test (other keys must be 0 {
+ opts = opts.DirtySpace(dbg.DirtySpace()) //nolint
+ }
+ if dbg.NoSync() {
+ opts = opts.Flags(func(u uint) uint { return u | mdbx.SafeNoSync }) //nolint
+ }
+ if dbg.MergeTr() > 0 {
+ opts = opts.WriteMergeThreshold(uint64(dbg.MergeTr() * 8192)) //nolint
+ }
+ if dbg.MdbxReadAhead() {
+ opts = opts.Flags(func(u uint) uint { return u &^ mdbx.NoReadahead }) //nolint
+ }
+ if opts.flags&mdbx.Accede != 0 || opts.flags&mdbx.Readonly != 0 {
+ for retry := 0; ; retry++ {
+ exists := dir.FileExist(filepath.Join(opts.path, "mdbx.dat"))
+ if exists {
+ break
+ }
+ if retry >= 5 {
+ return nil, fmt.Errorf("%w, label: %s, path: %s", ErrDBDoesNotExists, opts.label.String(), opts.path)
+ }
+ select {
+ case <-time.After(500 * time.Millisecond):
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+ }
+
+ }
+
+ env, err := mdbx.NewEnv()
+ if err != nil {
+ return nil, err
+ }
+ if opts.verbosity != -1 {
+ err = env.SetDebug(mdbx.LogLvl(opts.verbosity), mdbx.DbgDoNotChange, mdbx.LoggerDoNotChange) // temporary disable error, because it works if call it 1 time, but returns error if call it twice in same process (what often happening in tests)
+ if err != nil {
+ return nil, fmt.Errorf("db verbosity set: %w", err)
+ }
+ }
+ if err = env.SetOption(mdbx.OptMaxDB, 200); err != nil {
+ return nil, err
+ }
+ if err = env.SetOption(mdbx.OptMaxReaders, kv.ReadersLimit); err != nil {
+ return nil, err
+ }
+
+ if opts.flags&mdbx.Accede == 0 {
+ if err = env.SetGeometry(-1, -1, int(opts.mapSize), int(opts.growthStep), opts.shrinkThreshold, int(opts.pageSize)); err != nil {
+ return nil, err
+ }
+ if err = os.MkdirAll(opts.path, 0744); err != nil {
+ return nil, fmt.Errorf("could not create dir: %s, %w", opts.path, err)
+ }
+ }
+
+ err = env.Open(opts.path, opts.flags, 0664)
+ if err != nil {
+ if err != nil {
+ return nil, fmt.Errorf("%w, label: %s, trace: %s", err, opts.label.String(), stack2.Trace().String())
+ }
+ }
+
+ // mdbx will not change pageSize if db already exists. means need read real value after env.open()
+ in, err := env.Info(nil)
+ if err != nil {
+ if err != nil {
+ return nil, fmt.Errorf("%w, label: %s, trace: %s", err, opts.label.String(), stack2.Trace().String())
+ }
+ }
+
+ opts.pageSize = uint64(in.PageSize)
+ opts.mapSize = datasize.ByteSize(in.MapSize)
+ if opts.label == kv.ChainDB {
+ opts.log.Info("[db] open", "lable", opts.label, "sizeLimit", opts.mapSize, "pageSize", opts.pageSize)
+ } else {
+ opts.log.Debug("[db] open", "lable", opts.label, "sizeLimit", opts.mapSize, "pageSize", opts.pageSize)
+ }
+
+ // erigon using big transactions
+ // increase "page measured" options. need do it after env.Open() because default are depend on pageSize known only after env.Open()
+ if !opts.HasFlag(mdbx.Accede) && !opts.HasFlag(mdbx.Readonly) {
+ // 1/8 is good for transactions with a lot of modifications - to reduce invalidation size.
+ // But Erigon app now using Batch and etl.Collectors to avoid writing to DB frequently changing data.
+ // It means most of our writes are: APPEND or "single UPSERT per key during transaction"
+ //if err = env.SetOption(mdbx.OptSpillMinDenominator, 8); err != nil {
+ // return nil, err
+ //}
+
+ txnDpInitial, err := env.GetOption(mdbx.OptTxnDpInitial)
+ if err != nil {
+ return nil, err
+ }
+ if err = env.SetOption(mdbx.OptTxnDpInitial, txnDpInitial*2); err != nil {
+ return nil, err
+ }
+ dpReserveLimit, err := env.GetOption(mdbx.OptDpReverseLimit)
+ if err != nil {
+ return nil, err
+ }
+ if err = env.SetOption(mdbx.OptDpReverseLimit, dpReserveLimit*2); err != nil {
+ return nil, err
+ }
+
+ if err = env.SetOption(mdbx.OptTxnDpLimit, opts.dirtySpace/opts.pageSize); err != nil {
+ return nil, err
+ }
+ // must be in the range from 12.5% (almost empty) to 50% (half empty)
+ // which corresponds to the range from 8192 and to 32768 in units respectively
+ if err = env.SetOption(mdbx.OptMergeThreshold16dot16Percent, opts.mergeThreshold); err != nil {
+ return nil, err
+ }
+ }
+
+ dirtyPagesLimit, err := env.GetOption(mdbx.OptTxnDpLimit)
+ if err != nil {
+ return nil, err
+ }
+
+ if opts.syncPeriod != 0 {
+ if err = env.SetSyncPeriod(opts.syncPeriod); err != nil {
+ env.Close()
+ return nil, err
+ }
+ }
+ //if err := env.SetOption(mdbx.OptSyncBytes, uint64(math2.MaxUint64)); err != nil {
+ // return nil, err
+ //}
+
+ if opts.roTxsLimiter == nil {
+ targetSemCount := int64(runtime.GOMAXPROCS(-1) * 16)
+ opts.roTxsLimiter = semaphore.NewWeighted(targetSemCount) // 1 less than max to allow unlocking to happen
+ }
+ db := &MdbxKV{
+ opts: opts,
+ env: env,
+ log: opts.log,
+ wg: &sync.WaitGroup{},
+ buckets: kv.TableCfg{},
+ txSize: dirtyPagesLimit * opts.pageSize,
+ roTxsLimiter: opts.roTxsLimiter,
+
+ leakDetector: dbg.NewLeakDetector("db."+opts.label.String(), dbg.SlowTx()),
+ }
+
+ customBuckets := opts.bucketsCfg(kv.ChaindataTablesCfg)
+ for name, cfg := range customBuckets { // copy map to avoid changing global variable
+ db.buckets[name] = cfg
+ }
+
+ buckets := bucketSlice(db.buckets)
+ if err := db.openDBIs(buckets); err != nil {
+ return nil, err
+ }
+
+ // Configure buckets and open deprecated buckets
+ if err := env.View(func(tx *mdbx.Txn) error {
+ for _, name := range buckets {
+ // Open deprecated buckets if they exist, don't create
+ if !db.buckets[name].IsDeprecated {
+ continue
+ }
+ cnfCopy := db.buckets[name]
+ dbi, createErr := tx.OpenDBI(name, mdbx.DBAccede, nil, nil)
+ if createErr != nil {
+ if mdbx.IsNotFound(createErr) {
+ cnfCopy.DBI = NonExistingDBI
+ db.buckets[name] = cnfCopy
+ continue // if deprecated bucket couldn't be open - then it's deleted and it's fine
+ } else {
+ return fmt.Errorf("bucket: %s, %w", name, createErr)
+ }
+ }
+ cnfCopy.DBI = kv.DBI(dbi)
+ db.buckets[name] = cnfCopy
+ }
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+
+ if !opts.inMem {
+ if staleReaders, err := db.env.ReaderCheck(); err != nil {
+ db.log.Error("failed ReaderCheck", "err", err)
+ } else if staleReaders > 0 {
+ db.log.Info("cleared reader slots from dead processes", "amount", staleReaders)
+ }
+
+ }
+ db.path = opts.path
+ addToPathDbMap(opts.path, db)
+ return db, nil
+}
+
+func (opts MdbxOpts) MustOpen() kv.RwDB {
+ db, err := opts.Open(context.Background())
+ if err != nil {
+ panic(fmt.Errorf("fail to open mdbx: %w", err))
+ }
+ return db
+}
+
+type MdbxKV struct {
+ log log.Logger
+ env *mdbx.Env
+ wg *sync.WaitGroup
+ buckets kv.TableCfg
+ roTxsLimiter *semaphore.Weighted // does limit amount of concurrent Ro transactions - in most casess runtime.NumCPU() is good value for this channel capacity - this channel can be shared with other components (like Decompressor)
+ opts MdbxOpts
+ txSize uint64
+ closed atomic.Bool
+ path string
+
+ leakDetector *dbg.LeakDetector
+}
+
+func (db *MdbxKV) PageSize() uint64 { return db.opts.pageSize }
+func (db *MdbxKV) ReadOnly() bool { return db.opts.HasFlag(mdbx.Readonly) }
+func (db *MdbxKV) Accede() bool { return db.opts.HasFlag(mdbx.Accede) }
+
+func (db *MdbxKV) CHandle() unsafe.Pointer {
+ return db.env.CHandle()
+}
+
+// openDBIs - first trying to open existing DBI's in RO transaction
+// otherwise re-try by RW transaction
+// it allow open DB from another process - even if main process holding long RW transaction
+func (db *MdbxKV) openDBIs(buckets []string) error {
+ if db.ReadOnly() || db.Accede() {
+ return db.View(context.Background(), func(tx kv.Tx) error {
+ for _, name := range buckets {
+ if db.buckets[name].IsDeprecated {
+ continue
+ }
+ if err := tx.(kv.BucketMigrator).CreateBucket(name); err != nil {
+ return err
+ }
+ }
+ return tx.Commit() // when open db as read-only, commit of this RO transaction is required
+ })
+ }
+
+ return db.Update(context.Background(), func(tx kv.RwTx) error {
+ for _, name := range buckets {
+ if db.buckets[name].IsDeprecated {
+ continue
+ }
+ if err := tx.(kv.BucketMigrator).CreateBucket(name); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+}
+
+// Close closes db
+// All transactions must be closed before closing the database.
+func (db *MdbxKV) Close() {
+ if ok := db.closed.CompareAndSwap(false, true); !ok {
+ return
+ }
+ db.wg.Wait()
+ db.env.Close()
+ db.env = nil
+
+ if db.opts.inMem {
+ if err := os.RemoveAll(db.opts.path); err != nil {
+ db.log.Warn("failed to remove in-mem db file", "err", err)
+ }
+ }
+ removeFromPathDbMap(db.path)
+}
+
+func (db *MdbxKV) BeginRo(ctx context.Context) (txn kv.Tx, err error) {
+ if db.closed.Load() {
+ return nil, fmt.Errorf("db closed")
+ }
+
+ // don't try to acquire if the context is already done
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ // otherwise carry on
+ }
+
+ // will return nil err if context is cancelled (may appear to acquire the semaphore)
+ if semErr := db.roTxsLimiter.Acquire(ctx, 1); semErr != nil {
+ return nil, semErr
+ }
+
+ defer func() {
+ if txn == nil {
+ // on error, or if there is whatever reason that we don't return a tx,
+ // we need to free up the limiter slot, otherwise it could lead to deadlocks
+ db.roTxsLimiter.Release(1)
+ }
+ }()
+
+ tx, err := db.env.BeginTxn(nil, mdbx.Readonly)
+ if err != nil {
+ return nil, fmt.Errorf("%w, label: %s, trace: %s", err, db.opts.label.String(), stack2.Trace().String())
+ }
+ db.wg.Add(1)
+ return &MdbxTx{
+ ctx: ctx,
+ db: db,
+ tx: tx,
+ readOnly: true,
+ id: db.leakDetector.Add(),
+ }, nil
+}
+
+func (db *MdbxKV) BeginRw(ctx context.Context) (kv.RwTx, error) {
+ return db.beginRw(ctx, 0)
+}
+func (db *MdbxKV) BeginRwNosync(ctx context.Context) (kv.RwTx, error) {
+ return db.beginRw(ctx, mdbx.TxNoSync)
+}
+
+func (db *MdbxKV) beginRw(ctx context.Context, flags uint) (txn kv.RwTx, err error) {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+
+ if db.closed.Load() {
+ return nil, fmt.Errorf("db closed")
+ }
+ runtime.LockOSThread()
+ tx, err := db.env.BeginTxn(nil, flags)
+ if err != nil {
+ runtime.UnlockOSThread() // unlock only in case of error. normal flow is "defer .Rollback()"
+ return nil, fmt.Errorf("%w, lable: %s, trace: %s", err, db.opts.label.String(), stack2.Trace().String())
+ }
+ db.wg.Add(1)
+ return &MdbxTx{
+ db: db,
+ tx: tx,
+ ctx: ctx,
+ id: db.leakDetector.Add(),
+ }, nil
+}
+
+type MdbxTx struct {
+ tx *mdbx.Txn
+ db *MdbxKV
+ cursors map[uint64]*mdbx.Cursor
+ streams []kv.Closer
+ statelessCursors map[string]kv.RwCursor
+ readOnly bool
+ cursorID uint64
+ ctx context.Context
+ id uint64 // set only if TRACE_TX=true
+}
+
+type MdbxCursor struct {
+ tx *MdbxTx
+ c *mdbx.Cursor
+ bucketName string
+ bucketCfg kv.TableCfgItem
+ dbi mdbx.DBI
+ id uint64
+}
+
+func (db *MdbxKV) Env() *mdbx.Env {
+ return db.env
+}
+
+func (db *MdbxKV) AllDBI() map[string]kv.DBI {
+ res := map[string]kv.DBI{}
+ for name, cfg := range db.buckets {
+ res[name] = cfg.DBI
+ }
+ return res
+}
+
+func (db *MdbxKV) AllTables() kv.TableCfg {
+ return db.buckets
+}
+
+func (tx *MdbxTx) IsRo() bool { return tx.readOnly }
+func (tx *MdbxTx) ViewID() uint64 { return tx.tx.ID() }
+
+func (tx *MdbxTx) CollectMetrics() {
+ if tx.db.opts.label != kv.ChainDB {
+ return
+ }
+
+ info, err := tx.db.env.Info(tx.tx)
+ if err != nil {
+ return
+ }
+ if info.SinceReaderCheck.Hours() > 1 {
+ if staleReaders, err := tx.db.env.ReaderCheck(); err != nil {
+ tx.db.log.Error("failed ReaderCheck", "err", err)
+ } else if staleReaders > 0 {
+ tx.db.log.Info("cleared reader slots from dead processes", "amount", staleReaders)
+ }
+ }
+
+ kv.DbSize.SetUint64(info.Geo.Current)
+ kv.DbPgopsNewly.SetUint64(info.PageOps.Newly)
+ kv.DbPgopsCow.SetUint64(info.PageOps.Cow)
+ kv.DbPgopsClone.SetUint64(info.PageOps.Clone)
+ kv.DbPgopsSplit.SetUint64(info.PageOps.Split)
+ kv.DbPgopsMerge.SetUint64(info.PageOps.Merge)
+ kv.DbPgopsSpill.SetUint64(info.PageOps.Spill)
+ kv.DbPgopsUnspill.SetUint64(info.PageOps.Unspill)
+ kv.DbPgopsWops.SetUint64(info.PageOps.Wops)
+
+ txInfo, err := tx.tx.Info(true)
+ if err != nil {
+ return
+ }
+
+ kv.TxDirty.SetUint64(txInfo.SpaceDirty)
+ kv.TxLimit.SetUint64(tx.db.txSize)
+ kv.TxSpill.SetUint64(txInfo.Spill)
+ kv.TxUnspill.SetUint64(txInfo.Unspill)
+
+ gc, err := tx.BucketStat("gc")
+ if err != nil {
+ return
+ }
+ kv.GcLeafMetric.SetUint64(gc.LeafPages)
+ kv.GcOverflowMetric.SetUint64(gc.OverflowPages)
+ kv.GcPagesMetric.SetUint64((gc.LeafPages + gc.OverflowPages) * tx.db.opts.pageSize / 8)
+}
+
+// ListBuckets - all buckets stored as keys of un-named bucket
+func (tx *MdbxTx) ListBuckets() ([]string, error) { return tx.tx.ListDBI() }
+
+func (db *MdbxKV) View(ctx context.Context, f func(tx kv.Tx) error) (err error) {
+ // can't use db.env.View method - because it calls commit for read transactions - it conflicts with write transactions.
+ tx, err := db.BeginRo(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ return f(tx)
+}
+
+func (db *MdbxKV) UpdateNosync(ctx context.Context, f func(tx kv.RwTx) error) (err error) {
+ tx, err := db.BeginRwNosync(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ err = f(tx)
+ if err != nil {
+ return err
+ }
+ err = tx.Commit()
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (db *MdbxKV) Update(ctx context.Context, f func(tx kv.RwTx) error) (err error) {
+ tx, err := db.BeginRw(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ err = f(tx)
+ if err != nil {
+ return err
+ }
+ err = tx.Commit()
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *MdbxTx) CreateBucket(name string) error {
+ cnfCopy := tx.db.buckets[name]
+ dbi, err := tx.tx.OpenDBI(name, mdbx.DBAccede, nil, nil)
+ if err != nil && !mdbx.IsNotFound(err) {
+ return fmt.Errorf("create table: %s, %w", name, err)
+ }
+ if err == nil {
+ cnfCopy.DBI = kv.DBI(dbi)
+ var flags uint
+ flags, err = tx.tx.Flags(dbi)
+ if err != nil {
+ return err
+ }
+ cnfCopy.Flags = kv.TableFlags(flags)
+
+ tx.db.buckets[name] = cnfCopy
+ return nil
+ }
+
+ // if bucket doesn't exists - create it
+
+ var flags = tx.db.buckets[name].Flags
+ var nativeFlags uint
+ if !(tx.db.ReadOnly() || tx.db.Accede()) {
+ nativeFlags |= mdbx.Create
+ }
+
+ if flags&kv.DupSort != 0 {
+ nativeFlags |= mdbx.DupSort
+ flags ^= kv.DupSort
+ }
+ if flags != 0 {
+ return fmt.Errorf("some not supported flag provided for bucket")
+ }
+
+ dbi, err = tx.tx.OpenDBI(name, nativeFlags, nil, nil)
+
+ if err != nil {
+ return fmt.Errorf("create table: %s, %w", name, err)
+ }
+ cnfCopy.DBI = kv.DBI(dbi)
+
+ tx.db.buckets[name] = cnfCopy
+ return nil
+}
+
+func (tx *MdbxTx) dropEvenIfBucketIsNotDeprecated(name string) error {
+ dbi := tx.db.buckets[name].DBI
+ // if bucket was not open on db start, then it's may be deprecated
+ // try to open it now without `Create` flag, and if fail then nothing to drop
+ if dbi == NonExistingDBI {
+ nativeDBI, err := tx.tx.OpenDBI(name, 0, nil, nil)
+ if err != nil {
+ if mdbx.IsNotFound(err) {
+ return nil // DBI doesn't exists means no drop needed
+ }
+ return fmt.Errorf("bucket: %s, %w", name, err)
+ }
+ dbi = kv.DBI(nativeDBI)
+ }
+
+ if err := tx.tx.Drop(mdbx.DBI(dbi), true); err != nil {
+ return err
+ }
+ cnfCopy := tx.db.buckets[name]
+ cnfCopy.DBI = NonExistingDBI
+ tx.db.buckets[name] = cnfCopy
+ return nil
+}
+
+func (tx *MdbxTx) ClearBucket(bucket string) error {
+ dbi := tx.db.buckets[bucket].DBI
+ if dbi == NonExistingDBI {
+ return nil
+ }
+ return tx.tx.Drop(mdbx.DBI(dbi), false)
+}
+
+func (tx *MdbxTx) DropBucket(bucket string) error {
+ if cfg, ok := tx.db.buckets[bucket]; !(ok && cfg.IsDeprecated) {
+ return fmt.Errorf("%w, bucket: %s", kv.ErrAttemptToDeleteNonDeprecatedBucket, bucket)
+ }
+
+ return tx.dropEvenIfBucketIsNotDeprecated(bucket)
+}
+
+func (tx *MdbxTx) ExistsBucket(bucket string) (bool, error) {
+ if cfg, ok := tx.db.buckets[bucket]; ok {
+ return cfg.DBI != NonExistingDBI, nil
+ }
+ return false, nil
+}
+
+func (tx *MdbxTx) Commit() error {
+ if tx.tx == nil {
+ return nil
+ }
+ defer func() {
+ tx.tx = nil
+ tx.db.wg.Done()
+ if tx.readOnly {
+ tx.db.roTxsLimiter.Release(1)
+ } else {
+ runtime.UnlockOSThread()
+ }
+ tx.db.leakDetector.Del(tx.id)
+ }()
+ tx.closeCursors()
+
+ //slowTx := 10 * time.Second
+ //if debug.SlowCommit() > 0 {
+ // slowTx = debug.SlowCommit()
+ //}
+ //
+ //if debug.BigRoTxKb() > 0 || debug.BigRwTxKb() > 0 {
+ // tx.PrintDebugInfo()
+ //}
+ tx.CollectMetrics()
+
+ latency, err := tx.tx.Commit()
+ if err != nil {
+ return fmt.Errorf("lable: %s, %w", tx.db.opts.label, err)
+ }
+
+ if tx.db.opts.label == kv.ChainDB {
+ kv.DbCommitPreparation.Observe(latency.Preparation.Seconds())
+ //kv.DbCommitAudit.Update(latency.Audit.Seconds())
+ kv.DbCommitWrite.Observe(latency.Write.Seconds())
+ kv.DbCommitSync.Observe(latency.Sync.Seconds())
+ kv.DbCommitEnding.Observe(latency.Ending.Seconds())
+ kv.DbCommitTotal.Observe(latency.Whole.Seconds())
+
+ //kv.DbGcWorkPnlMergeTime.Update(latency.GCDetails.WorkPnlMergeTime.Seconds())
+ //kv.DbGcWorkPnlMergeVolume.Set(uint64(latency.GCDetails.WorkPnlMergeVolume))
+ //kv.DbGcWorkPnlMergeCalls.Set(uint64(latency.GCDetails.WorkPnlMergeCalls))
+ //
+ //kv.DbGcSelfPnlMergeTime.Update(latency.GCDetails.SelfPnlMergeTime.Seconds())
+ //kv.DbGcSelfPnlMergeVolume.Set(uint64(latency.GCDetails.SelfPnlMergeVolume))
+ //kv.DbGcSelfPnlMergeCalls.Set(uint64(latency.GCDetails.SelfPnlMergeCalls))
+ }
+
+ return nil
+}
+
+func (tx *MdbxTx) Rollback() {
+ if tx.tx == nil {
+ return
+ }
+ defer func() {
+ tx.tx = nil
+ tx.db.wg.Done()
+ if tx.readOnly {
+ tx.db.roTxsLimiter.Release(1)
+ } else {
+ runtime.UnlockOSThread()
+ }
+ tx.db.leakDetector.Del(tx.id)
+ }()
+ tx.closeCursors()
+ //tx.printDebugInfo()
+ tx.tx.Abort()
+}
+
+func (tx *MdbxTx) SpaceDirty() (uint64, uint64, error) {
+ txInfo, err := tx.tx.Info(true)
+ if err != nil {
+ return 0, 0, err
+ }
+
+ return txInfo.SpaceDirty, tx.db.txSize, nil
+}
+
+func (tx *MdbxTx) PrintDebugInfo() {
+ /*
+ txInfo, err := tx.tx.Info(true)
+ if err != nil {
+ panic(err)
+ }
+
+ txSize := uint(txInfo.SpaceDirty / 1024)
+ doPrint := debug.BigRoTxKb() == 0 && debug.BigRwTxKb() == 0 ||
+ tx.readOnly && debug.BigRoTxKb() > 0 && txSize > debug.BigRoTxKb() ||
+ (!tx.readOnly && debug.BigRwTxKb() > 0 && txSize > debug.BigRwTxKb())
+ if doPrint {
+ tx.db.log.Info("Tx info",
+ "id", txInfo.Id,
+ "read_lag", txInfo.ReadLag,
+ "ro", tx.readOnly,
+ //"space_retired_mb", txInfo.SpaceRetired/1024/1024,
+ "space_dirty_mb", txInfo.SpaceDirty/1024/1024,
+ //"callers", debug.Callers(7),
+ )
+ }
+ */
+}
+
+func (tx *MdbxTx) closeCursors() {
+ for _, c := range tx.cursors {
+ if c != nil {
+ c.Close()
+ }
+ }
+ tx.cursors = nil
+ for _, c := range tx.streams {
+ if c != nil {
+ c.Close()
+ }
+ }
+ tx.statelessCursors = nil
+}
+
+func (tx *MdbxTx) statelessCursor(bucket string) (kv.RwCursor, error) {
+ if tx.statelessCursors == nil {
+ tx.statelessCursors = make(map[string]kv.RwCursor)
+ }
+ c, ok := tx.statelessCursors[bucket]
+ if !ok {
+ var err error
+ c, err = tx.RwCursor(bucket)
+ if err != nil {
+ return nil, err
+ }
+ tx.statelessCursors[bucket] = c
+ }
+ return c, nil
+}
+
+func (tx *MdbxTx) Put(table string, k, v []byte) error {
+ c, err := tx.statelessCursor(table)
+ if err != nil {
+ return err
+ }
+ return c.Put(k, v)
+}
+
+func (tx *MdbxTx) Delete(table string, k []byte) error {
+ c, err := tx.statelessCursor(table)
+ if err != nil {
+ return err
+ }
+ return c.Delete(k)
+}
+
+func (tx *MdbxTx) GetOne(bucket string, k []byte) ([]byte, error) {
+ c, err := tx.statelessCursor(bucket)
+ if err != nil {
+ return nil, err
+ }
+ _, v, err := c.SeekExact(k)
+ return v, err
+}
+
+func (tx *MdbxTx) Has(bucket string, key []byte) (bool, error) {
+ c, err := tx.statelessCursor(bucket)
+ if err != nil {
+ return false, err
+ }
+ k, _, err := c.Seek(key)
+ if err != nil {
+ return false, err
+ }
+ return bytes.Equal(key, k), nil
+}
+
+func (tx *MdbxTx) Append(bucket string, k, v []byte) error {
+ c, err := tx.statelessCursor(bucket)
+ if err != nil {
+ return err
+ }
+ return c.Append(k, v)
+}
+func (tx *MdbxTx) AppendDup(bucket string, k, v []byte) error {
+ c, err := tx.statelessCursor(bucket)
+ if err != nil {
+ return err
+ }
+ return c.(*MdbxDupSortCursor).AppendDup(k, v)
+}
+
+func (tx *MdbxTx) IncrementSequence(bucket string, amount uint64) (uint64, error) {
+ c, err := tx.statelessCursor(kv.Sequence)
+ if err != nil {
+ return 0, err
+ }
+ _, v, err := c.SeekExact([]byte(bucket))
+ if err != nil {
+ return 0, err
+ }
+
+ var currentV uint64 = 0
+ if len(v) > 0 {
+ currentV = binary.BigEndian.Uint64(v)
+ }
+
+ newVBytes := make([]byte, 8)
+ binary.BigEndian.PutUint64(newVBytes, currentV+amount)
+ err = c.Put([]byte(bucket), newVBytes)
+ if err != nil {
+ return 0, err
+ }
+ return currentV, nil
+}
+
+func (tx *MdbxTx) ReadSequence(bucket string) (uint64, error) {
+ c, err := tx.statelessCursor(kv.Sequence)
+ if err != nil {
+ return 0, err
+ }
+ _, v, err := c.SeekExact([]byte(bucket))
+ if err != nil && !mdbx.IsNotFound(err) {
+ return 0, err
+ }
+
+ var currentV uint64
+ if len(v) > 0 {
+ currentV = binary.BigEndian.Uint64(v)
+ }
+
+ return currentV, nil
+}
+
+func (tx *MdbxTx) BucketSize(name string) (uint64, error) {
+ st, err := tx.BucketStat(name)
+ if err != nil {
+ return 0, err
+ }
+ return (st.LeafPages + st.BranchPages + st.OverflowPages) * tx.db.opts.pageSize, nil
+}
+
+func (tx *MdbxTx) BucketStat(name string) (*mdbx.Stat, error) {
+ if name == "freelist" || name == "gc" || name == "free_list" {
+ return tx.tx.StatDBI(mdbx.DBI(0))
+ }
+ if name == "root" {
+ return tx.tx.StatDBI(mdbx.DBI(1))
+ }
+ st, err := tx.tx.StatDBI(mdbx.DBI(tx.db.buckets[name].DBI))
+ if err != nil {
+ return nil, fmt.Errorf("bucket: %s, %w", name, err)
+ }
+ return st, nil
+}
+
+func (tx *MdbxTx) DBSize() (uint64, error) {
+ info, err := tx.db.env.Info(tx.tx)
+ if err != nil {
+ return 0, err
+ }
+ return info.Geo.Current, err
+}
+
+func (tx *MdbxTx) RwCursor(bucket string) (kv.RwCursor, error) {
+ b := tx.db.buckets[bucket]
+ if b.AutoDupSortKeysConversion {
+ return tx.stdCursor(bucket)
+ }
+
+ if b.Flags&kv.DupSort != 0 {
+ return tx.RwCursorDupSort(bucket)
+ }
+
+ return tx.stdCursor(bucket)
+}
+
+func (tx *MdbxTx) Cursor(bucket string) (kv.Cursor, error) {
+ return tx.RwCursor(bucket)
+}
+
+func (tx *MdbxTx) stdCursor(bucket string) (kv.RwCursor, error) {
+ b := tx.db.buckets[bucket]
+ c := &MdbxCursor{bucketName: bucket, tx: tx, bucketCfg: b, dbi: mdbx.DBI(tx.db.buckets[bucket].DBI), id: tx.cursorID}
+ tx.cursorID++
+
+ var err error
+ c.c, err = tx.tx.OpenCursor(c.dbi)
+ if err != nil {
+ return nil, fmt.Errorf("table: %s, %w, stack: %s", c.bucketName, err, dbg.Stack())
+ }
+
+ // add to auto-cleanup on end of transactions
+ if tx.cursors == nil {
+ tx.cursors = map[uint64]*mdbx.Cursor{}
+ }
+ tx.cursors[c.id] = c.c
+ return c, nil
+}
+
+func (tx *MdbxTx) RwCursorDupSort(bucket string) (kv.RwCursorDupSort, error) {
+ basicCursor, err := tx.stdCursor(bucket)
+ if err != nil {
+ return nil, err
+ }
+ return &MdbxDupSortCursor{MdbxCursor: basicCursor.(*MdbxCursor)}, nil
+}
+
+func (tx *MdbxTx) CursorDupSort(bucket string) (kv.CursorDupSort, error) {
+ return tx.RwCursorDupSort(bucket)
+}
+
+// methods here help to see better pprof picture
+func (c *MdbxCursor) set(k []byte) ([]byte, []byte, error) { return c.c.Get(k, nil, mdbx.Set) }
+func (c *MdbxCursor) getCurrent() ([]byte, []byte, error) { return c.c.Get(nil, nil, mdbx.GetCurrent) }
+func (c *MdbxCursor) first() ([]byte, []byte, error) { return c.c.Get(nil, nil, mdbx.First) }
+func (c *MdbxCursor) next() ([]byte, []byte, error) { return c.c.Get(nil, nil, mdbx.Next) }
+func (c *MdbxCursor) nextDup() ([]byte, []byte, error) { return c.c.Get(nil, nil, mdbx.NextDup) }
+func (c *MdbxCursor) nextNoDup() ([]byte, []byte, error) { return c.c.Get(nil, nil, mdbx.NextNoDup) }
+func (c *MdbxCursor) prev() ([]byte, []byte, error) { return c.c.Get(nil, nil, mdbx.Prev) }
+func (c *MdbxCursor) prevDup() ([]byte, []byte, error) { return c.c.Get(nil, nil, mdbx.PrevDup) }
+func (c *MdbxCursor) prevNoDup() ([]byte, []byte, error) { return c.c.Get(nil, nil, mdbx.PrevNoDup) }
+func (c *MdbxCursor) last() ([]byte, []byte, error) { return c.c.Get(nil, nil, mdbx.Last) }
+func (c *MdbxCursor) delCurrent() error { return c.c.Del(mdbx.Current) }
+func (c *MdbxCursor) delAllDupData() error { return c.c.Del(mdbx.AllDups) }
+func (c *MdbxCursor) put(k, v []byte) error { return c.c.Put(k, v, 0) }
+func (c *MdbxCursor) putCurrent(k, v []byte) error { return c.c.Put(k, v, mdbx.Current) }
+func (c *MdbxCursor) putNoOverwrite(k, v []byte) error { return c.c.Put(k, v, mdbx.NoOverwrite) }
+func (c *MdbxCursor) getBoth(k, v []byte) ([]byte, error) {
+ _, v, err := c.c.Get(k, v, mdbx.GetBoth)
+ return v, err
+}
+func (c *MdbxCursor) setRange(k []byte) ([]byte, []byte, error) {
+ return c.c.Get(k, nil, mdbx.SetRange)
+}
+func (c *MdbxCursor) getBothRange(k, v []byte) ([]byte, error) {
+ _, v, err := c.c.Get(k, v, mdbx.GetBothRange)
+ return v, err
+}
+func (c *MdbxCursor) firstDup() ([]byte, error) {
+ _, v, err := c.c.Get(nil, nil, mdbx.FirstDup)
+ return v, err
+}
+func (c *MdbxCursor) lastDup() ([]byte, error) {
+ _, v, err := c.c.Get(nil, nil, mdbx.LastDup)
+ return v, err
+}
+
+func (c *MdbxCursor) Count() (uint64, error) {
+ st, err := c.tx.tx.StatDBI(c.dbi)
+ if err != nil {
+ return 0, err
+ }
+ return st.Entries, nil
+}
+
+func (c *MdbxCursor) First() ([]byte, []byte, error) {
+ return c.Seek(nil)
+}
+
+func (c *MdbxCursor) Last() ([]byte, []byte, error) {
+ k, v, err := c.last()
+ if err != nil {
+ if mdbx.IsNotFound(err) {
+ return nil, nil, nil
+ }
+ err = fmt.Errorf("failed MdbxKV cursor.Last(): %w, bucket: %s", err, c.bucketName)
+ return []byte{}, nil, err
+ }
+
+ b := c.bucketCfg
+ if b.AutoDupSortKeysConversion && len(k) == b.DupToLen {
+ keyPart := b.DupFromLen - b.DupToLen
+ k = append(k, v[:keyPart]...)
+ v = v[keyPart:]
+ }
+
+ return k, v, nil
+}
+
+func (c *MdbxCursor) Seek(seek []byte) (k, v []byte, err error) {
+ if c.bucketCfg.AutoDupSortKeysConversion {
+ return c.seekDupSort(seek)
+ }
+
+ if len(seek) == 0 {
+ k, v, err = c.first()
+ } else {
+ k, v, err = c.setRange(seek)
+ }
+ if err != nil {
+ if mdbx.IsNotFound(err) {
+ return nil, nil, nil
+ }
+ err = fmt.Errorf("failed MdbxKV cursor.Seek(): %w, bucket: %s, key: %x", err, c.bucketName, seek)
+ return []byte{}, nil, err
+ }
+
+ return k, v, nil
+}
+
+func (c *MdbxCursor) seekDupSort(seek []byte) (k, v []byte, err error) {
+ b := c.bucketCfg
+ from, to := b.DupFromLen, b.DupToLen
+ if len(seek) == 0 {
+ k, v, err = c.first()
+ if err != nil {
+ if mdbx.IsNotFound(err) {
+ return nil, nil, nil
+ }
+ return []byte{}, nil, err
+ }
+
+ if len(k) == to {
+ k2 := make([]byte, 0, len(k)+from-to)
+ k2 = append(append(k2, k...), v[:from-to]...)
+ v = v[from-to:]
+ k = k2
+ }
+ return k, v, nil
+ }
+
+ var seek1, seek2 []byte
+ if len(seek) > to {
+ seek1, seek2 = seek[:to], seek[to:]
+ } else {
+ seek1 = seek
+ }
+ k, v, err = c.setRange(seek1)
+ if err != nil {
+ if mdbx.IsNotFound(err) {
+ return nil, nil, nil
+ }
+
+ return []byte{}, nil, err
+ }
+
+ if seek2 != nil && bytes.Equal(seek1, k) {
+ v, err = c.getBothRange(seek1, seek2)
+ if err != nil && mdbx.IsNotFound(err) {
+ k, v, err = c.next()
+ if err != nil {
+ if mdbx.IsNotFound(err) {
+ return nil, nil, nil
+ }
+ return []byte{}, nil, err
+ }
+ } else if err != nil {
+ return []byte{}, nil, err
+ }
+ }
+ if len(k) == to {
+ k2 := make([]byte, 0, len(k)+from-to)
+ k2 = append(append(k2, k...), v[:from-to]...)
+ v = v[from-to:]
+ k = k2
+ }
+
+ return k, v, nil
+}
+
+func (c *MdbxCursor) Next() (k, v []byte, err error) {
+ k, v, err = c.next()
+ if err != nil {
+ if mdbx.IsNotFound(err) {
+ return nil, nil, nil
+ }
+ return []byte{}, nil, fmt.Errorf("failed MdbxKV cursor.Next(): %w", err)
+ }
+
+ b := c.bucketCfg
+ if b.AutoDupSortKeysConversion && len(k) == b.DupToLen {
+ keyPart := b.DupFromLen - b.DupToLen
+ k = append(k, v[:keyPart]...)
+ v = v[keyPart:]
+ }
+
+ return k, v, nil
+}
+
+func (c *MdbxCursor) Prev() (k, v []byte, err error) {
+ k, v, err = c.prev()
+ if err != nil {
+ if mdbx.IsNotFound(err) {
+ return nil, nil, nil
+ }
+ return []byte{}, nil, fmt.Errorf("failed MdbxKV cursor.Prev(): %w", err)
+ }
+
+ b := c.bucketCfg
+ if b.AutoDupSortKeysConversion && len(k) == b.DupToLen {
+ keyPart := b.DupFromLen - b.DupToLen
+ k = append(k, v[:keyPart]...)
+ v = v[keyPart:]
+ }
+
+ return k, v, nil
+}
+
+// Current - return key/data at current cursor position
+func (c *MdbxCursor) Current() ([]byte, []byte, error) {
+ k, v, err := c.getCurrent()
+ if err != nil {
+ if mdbx.IsNotFound(err) {
+ return nil, nil, nil
+ }
+ return []byte{}, nil, err
+ }
+
+ b := c.bucketCfg
+ if b.AutoDupSortKeysConversion && len(k) == b.DupToLen {
+ keyPart := b.DupFromLen - b.DupToLen
+ k = append(k, v[:keyPart]...)
+ v = v[keyPart:]
+ }
+
+ return k, v, nil
+}
+
+func (c *MdbxCursor) Delete(k []byte) error {
+ if c.bucketCfg.AutoDupSortKeysConversion {
+ return c.deleteDupSort(k)
+ }
+
+ _, _, err := c.set(k)
+ if err != nil {
+ if mdbx.IsNotFound(err) {
+ return nil
+ }
+ return err
+ }
+
+ if c.bucketCfg.Flags&mdbx.DupSort != 0 {
+ return c.delAllDupData()
+ }
+ return c.delCurrent()
+}
+
+// DeleteCurrent This function deletes the key/data pair to which the cursor refers.
+// This does not invalidate the cursor, so operations such as MDB_NEXT
+// can still be used on it.
+// Both MDB_NEXT and MDB_GET_CURRENT will return the same record after
+// this operation.
+func (c *MdbxCursor) DeleteCurrent() error {
+ return c.delCurrent()
+}
+
+func (c *MdbxCursor) deleteDupSort(key []byte) error {
+ b := c.bucketCfg
+ from, to := b.DupFromLen, b.DupToLen
+ if len(key) != from && len(key) >= to {
+ return fmt.Errorf("delete from dupsort bucket: %s, can have keys of len==%d and len<%d. key: %x,%d", c.bucketName, from, to, key, len(key))
+ }
+
+ if len(key) == from {
+ v, err := c.getBothRange(key[:to], key[to:])
+ if err != nil { // if key not found, or found another one - then nothing to delete
+ if mdbx.IsNotFound(err) {
+ return nil
+ }
+ return err
+ }
+ if !bytes.Equal(v[:from-to], key[to:]) {
+ return nil
+ }
+ return c.delCurrent()
+ }
+
+ _, _, err := c.set(key)
+ if err != nil {
+ if mdbx.IsNotFound(err) {
+ return nil
+ }
+ return err
+ }
+
+ return c.delCurrent()
+}
+
+func (c *MdbxCursor) PutNoOverwrite(key []byte, value []byte) error {
+ if c.bucketCfg.AutoDupSortKeysConversion {
+ panic("not implemented")
+ }
+
+ return c.putNoOverwrite(key, value)
+}
+
+func (c *MdbxCursor) Put(key []byte, value []byte) error {
+ b := c.bucketCfg
+ if b.AutoDupSortKeysConversion {
+ if err := c.putDupSort(key, value); err != nil {
+ return fmt.Errorf("label: %s, table: %s, err: %w", c.tx.db.opts.label, c.bucketName, err)
+ }
+ return nil
+ }
+ if err := c.put(key, value); err != nil {
+ return fmt.Errorf("label: %s, table: %s, err: %w", c.tx.db.opts.label, c.bucketName, err)
+ }
+ return nil
+}
+
+func (c *MdbxCursor) putDupSort(key []byte, value []byte) error {
+ b := c.bucketCfg
+ from, to := b.DupFromLen, b.DupToLen
+ if len(key) != from && len(key) >= to {
+ return fmt.Errorf("label: %s, table: %s, can have keys of len==%d and len<%d. key: %x,%d", c.tx.db.opts.label, c.bucketName, from, to, key, len(key))
+ }
+
+ if len(key) != from {
+ err := c.putNoOverwrite(key, value)
+ if err != nil {
+ if mdbx.IsKeyExists(err) {
+ return c.putCurrent(key, value)
+ }
+ return fmt.Errorf("label: %s, putNoOverwrite, bucket: %s, key: %x, val: %x, err: %w", c.tx.db.opts.label, c.bucketName, key, value, err)
+ }
+ return nil
+ }
+
+ value = append(key[to:], value...)
+ key = key[:to]
+ v, err := c.getBothRange(key, value[:from-to])
+ if err != nil { // if key not found, or found another one - then just insert
+ if mdbx.IsNotFound(err) {
+ return c.put(key, value)
+ }
+ return err
+ }
+
+ if bytes.Equal(v[:from-to], value[:from-to]) {
+ if len(v) == len(value) { // in DupSort case mdbx.Current works only with values of same length
+ return c.putCurrent(key, value)
+ }
+ err = c.delCurrent()
+ if err != nil {
+ return err
+ }
+ }
+
+ return c.put(key, value)
+}
+
+func (c *MdbxCursor) SeekExact(key []byte) ([]byte, []byte, error) {
+ b := c.bucketCfg
+ if b.AutoDupSortKeysConversion && len(key) == b.DupFromLen {
+ from, to := b.DupFromLen, b.DupToLen
+ v, err := c.getBothRange(key[:to], key[to:])
+ if err != nil {
+ if mdbx.IsNotFound(err) {
+ return nil, nil, nil
+ }
+ return []byte{}, nil, err
+ }
+ if !bytes.Equal(key[to:], v[:from-to]) {
+ return nil, nil, nil
+ }
+ return key[:to], v[from-to:], nil
+ }
+
+ k, v, err := c.set(key)
+ if err != nil {
+ if mdbx.IsNotFound(err) {
+ return nil, nil, nil
+ }
+ return []byte{}, nil, err
+ }
+ return k, v, nil
+}
+
+// Append - speedy feature of mdbx which is not part of KV interface.
+// Cast your cursor to *MdbxCursor to use this method.
+// Return error - if provided data will not sorted (or bucket have old records which mess with new in sorting manner).
+func (c *MdbxCursor) Append(k []byte, v []byte) error {
+ if c.bucketCfg.AutoDupSortKeysConversion {
+ b := c.bucketCfg
+ from, to := b.DupFromLen, b.DupToLen
+ if len(k) != from && len(k) >= to {
+ return fmt.Errorf("label: %s, append dupsort bucket: %s, can have keys of len==%d and len<%d. key: %x,%d", c.tx.db.opts.label, c.bucketName, from, to, k, len(k))
+ }
+
+ if len(k) == from {
+ v = append(k[to:], v...)
+ k = k[:to]
+ }
+ }
+
+ if c.bucketCfg.Flags&mdbx.DupSort != 0 {
+ if err := c.c.Put(k, v, mdbx.AppendDup); err != nil {
+ return fmt.Errorf("label: %s, bucket: %s, %w", c.tx.db.opts.label, c.bucketName, err)
+ }
+ return nil
+ }
+
+ if err := c.c.Put(k, v, mdbx.Append); err != nil {
+ return fmt.Errorf("label: %s, bucket: %s, %w", c.tx.db.opts.label, c.bucketName, err)
+ }
+ return nil
+}
+
+func (c *MdbxCursor) Close() {
+ if c.c != nil {
+ c.c.Close()
+ delete(c.tx.cursors, c.id)
+ c.c = nil
+ }
+}
+
+type MdbxDupSortCursor struct {
+ *MdbxCursor
+}
+
+func (c *MdbxDupSortCursor) Internal() *mdbx.Cursor {
+ return c.c
+}
+
+// DeleteExact - does delete
+func (c *MdbxDupSortCursor) DeleteExact(k1, k2 []byte) error {
+ _, err := c.getBoth(k1, k2)
+ if err != nil { // if key not found, or found another one - then nothing to delete
+ if mdbx.IsNotFound(err) {
+ return nil
+ }
+ return err
+ }
+ return c.delCurrent()
+}
+
+func (c *MdbxDupSortCursor) SeekBothExact(key, value []byte) ([]byte, []byte, error) {
+ v, err := c.getBoth(key, value)
+ if err != nil {
+ if mdbx.IsNotFound(err) {
+ return nil, nil, nil
+ }
+ return []byte{}, nil, fmt.Errorf("in SeekBothExact: %w", err)
+ }
+ return key, v, nil
+}
+
+func (c *MdbxDupSortCursor) SeekBothRange(key, value []byte) ([]byte, error) {
+ v, err := c.getBothRange(key, value)
+ if err != nil {
+ if mdbx.IsNotFound(err) {
+ return nil, nil
+ }
+ return nil, fmt.Errorf("in SeekBothRange, table=%s: %w", c.bucketName, err)
+ }
+ return v, nil
+}
+
+func (c *MdbxDupSortCursor) FirstDup() ([]byte, error) {
+ v, err := c.firstDup()
+ if err != nil {
+ if mdbx.IsNotFound(err) {
+ return nil, nil
+ }
+ return nil, fmt.Errorf("in FirstDup: %w", err)
+ }
+ return v, nil
+}
+
+// NextDup - iterate only over duplicates of current key
+func (c *MdbxDupSortCursor) NextDup() ([]byte, []byte, error) {
+ k, v, err := c.nextDup()
+ if err != nil {
+ if mdbx.IsNotFound(err) {
+ return nil, nil, nil
+ }
+ return []byte{}, nil, fmt.Errorf("in NextDup: %w", err)
+ }
+ return k, v, nil
+}
+
+// NextNoDup - iterate with skipping all duplicates
+func (c *MdbxDupSortCursor) NextNoDup() ([]byte, []byte, error) {
+ k, v, err := c.nextNoDup()
+ if err != nil {
+ if mdbx.IsNotFound(err) {
+ return nil, nil, nil
+ }
+ return []byte{}, nil, fmt.Errorf("in NextNoDup: %w", err)
+ }
+ return k, v, nil
+}
+
+func (c *MdbxDupSortCursor) PrevDup() ([]byte, []byte, error) {
+ k, v, err := c.prevDup()
+ if err != nil {
+ if mdbx.IsNotFound(err) {
+ return nil, nil, nil
+ }
+ return []byte{}, nil, fmt.Errorf("in PrevDup: %w", err)
+ }
+ return k, v, nil
+}
+
+func (c *MdbxDupSortCursor) PrevNoDup() ([]byte, []byte, error) {
+ k, v, err := c.prevNoDup()
+ if err != nil {
+ if mdbx.IsNotFound(err) {
+ return nil, nil, nil
+ }
+ return []byte{}, nil, fmt.Errorf("in PrevNoDup: %w", err)
+ }
+ return k, v, nil
+}
+
+func (c *MdbxDupSortCursor) LastDup() ([]byte, error) {
+ v, err := c.lastDup()
+ if err != nil {
+ if mdbx.IsNotFound(err) {
+ return nil, nil
+ }
+ return nil, fmt.Errorf("in LastDup: %w", err)
+ }
+ return v, nil
+}
+
+func (c *MdbxDupSortCursor) Append(k []byte, v []byte) error {
+ if err := c.c.Put(k, v, mdbx.Append|mdbx.AppendDup); err != nil {
+ return fmt.Errorf("label: %s, in Append: bucket=%s, %w", c.tx.db.opts.label, c.bucketName, err)
+ }
+ return nil
+}
+
+func (c *MdbxDupSortCursor) AppendDup(k []byte, v []byte) error {
+ if err := c.c.Put(k, v, mdbx.AppendDup); err != nil {
+ return fmt.Errorf("label: %s, in AppendDup: bucket=%s, %w", c.tx.db.opts.label, c.bucketName, err)
+ }
+ return nil
+}
+
+func (c *MdbxDupSortCursor) PutNoDupData(k, v []byte) error {
+ if err := c.c.Put(k, v, mdbx.NoDupData); err != nil {
+ return fmt.Errorf("label: %s, in PutNoDupData: %w", c.tx.db.opts.label, err)
+ }
+
+ return nil
+}
+
+// DeleteCurrentDuplicates - delete all of the data items for the current key.
+func (c *MdbxDupSortCursor) DeleteCurrentDuplicates() error {
+ if err := c.delAllDupData(); err != nil {
+ return fmt.Errorf("label: %s,in DeleteCurrentDuplicates: %w", c.tx.db.opts.label, err)
+ }
+ return nil
+}
+
+// CountDuplicates returns the number of duplicates for the current key. See mdb_cursor_count
+func (c *MdbxDupSortCursor) CountDuplicates() (uint64, error) {
+ res, err := c.c.Count()
+ if err != nil {
+ return 0, fmt.Errorf("in CountDuplicates: %w", err)
+ }
+ return res, nil
+}
+
+func bucketSlice(b kv.TableCfg) []string {
+ buckets := make([]string, 0, len(b))
+ for name := range b {
+ buckets = append(buckets, name)
+ }
+ sort.Slice(buckets, func(i, j int) bool {
+ return strings.Compare(buckets[i], buckets[j]) < 0
+ })
+ return buckets
+}
+
+func (tx *MdbxTx) ForEach(bucket string, fromPrefix []byte, walker func(k, v []byte) error) error {
+ c, err := tx.Cursor(bucket)
+ if err != nil {
+ return err
+ }
+ defer c.Close()
+
+ for k, v, err := c.Seek(fromPrefix); k != nil; k, v, err = c.Next() {
+ if err != nil {
+ return err
+ }
+ if err := walker(k, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (tx *MdbxTx) ForPrefix(bucket string, prefix []byte, walker func(k, v []byte) error) error {
+ c, err := tx.Cursor(bucket)
+ if err != nil {
+ return err
+ }
+ defer c.Close()
+
+ for k, v, err := c.Seek(prefix); k != nil; k, v, err = c.Next() {
+ if err != nil {
+ return err
+ }
+ if !bytes.HasPrefix(k, prefix) {
+ break
+ }
+ if err := walker(k, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (tx *MdbxTx) Prefix(table string, prefix []byte) (iter.KV, error) {
+ nextPrefix, ok := kv.NextSubtree(prefix)
+ if !ok {
+ return tx.Range(table, prefix, nil)
+ }
+ return tx.Range(table, prefix, nextPrefix)
+}
+
+func (tx *MdbxTx) Range(table string, fromPrefix, toPrefix []byte) (iter.KV, error) {
+ return tx.RangeAscend(table, fromPrefix, toPrefix, -1)
+}
+func (tx *MdbxTx) RangeAscend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error) {
+ return tx.rangeOrderLimit(table, fromPrefix, toPrefix, order.Asc, limit)
+}
+func (tx *MdbxTx) RangeDescend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error) {
+ return tx.rangeOrderLimit(table, fromPrefix, toPrefix, order.Desc, limit)
+}
+
+type cursor2iter struct {
+ c kv.Cursor
+ fromPrefix, toPrefix, nextK, nextV []byte
+ err error
+ orderAscend order.By
+ limit int64
+ ctx context.Context
+}
+
+func (tx *MdbxTx) rangeOrderLimit(table string, fromPrefix, toPrefix []byte, orderAscend order.By, limit int) (*cursor2iter, error) {
+ s := &cursor2iter{ctx: tx.ctx, fromPrefix: fromPrefix, toPrefix: toPrefix, orderAscend: orderAscend, limit: int64(limit)}
+ tx.streams = append(tx.streams, s)
+ return s.init(table, tx)
+}
+func (s *cursor2iter) init(table string, tx kv.Tx) (*cursor2iter, error) {
+ if s.orderAscend && s.fromPrefix != nil && s.toPrefix != nil && bytes.Compare(s.fromPrefix, s.toPrefix) >= 0 {
+ return s, fmt.Errorf("tx.Dual: %x must be lexicographicaly before %x", s.fromPrefix, s.toPrefix)
+ }
+ if !s.orderAscend && s.fromPrefix != nil && s.toPrefix != nil && bytes.Compare(s.fromPrefix, s.toPrefix) <= 0 {
+ return s, fmt.Errorf("tx.Dual: %x must be lexicographicaly before %x", s.toPrefix, s.fromPrefix)
+ }
+ c, err := tx.Cursor(table)
+ if err != nil {
+ return s, err
+ }
+ s.c = c
+
+ if s.fromPrefix == nil { // no initial position
+ if s.orderAscend {
+ s.nextK, s.nextV, s.err = s.c.First()
+ } else {
+ s.nextK, s.nextV, s.err = s.c.Last()
+ }
+ return s, s.err
+ }
+
+ if s.orderAscend {
+ s.nextK, s.nextV, s.err = s.c.Seek(s.fromPrefix)
+ return s, s.err
+ } else {
+ // seek exactly to given key or previous one
+ s.nextK, s.nextV, s.err = s.c.SeekExact(s.fromPrefix)
+ if s.err != nil {
+ return s, s.err
+ }
+ if s.nextK != nil { // go to last value of this key
+ if casted, ok := s.c.(kv.CursorDupSort); ok {
+ s.nextV, s.err = casted.LastDup()
+ }
+ } else { // key not found, go to prev one
+ s.nextK, s.nextV, s.err = s.c.Prev()
+ }
+ return s, s.err
+ }
+}
+
+func (s *cursor2iter) Close() {
+ if s.c != nil {
+ s.c.Close()
+ }
+}
+func (s *cursor2iter) HasNext() bool {
+ if s.err != nil { // always true, then .Next() call will return this error
+ return true
+ }
+ if s.limit == 0 { // limit reached
+ return false
+ }
+ if s.nextK == nil { // EndOfTable
+ return false
+ }
+ if s.toPrefix == nil { // s.nextK == nil check is above
+ return true
+ }
+
+ //Asc: [from, to) AND from > to
+ //Desc: [from, to) AND from < to
+ cmp := bytes.Compare(s.nextK, s.toPrefix)
+ return (bool(s.orderAscend) && cmp < 0) || (!bool(s.orderAscend) && cmp > 0)
+}
+func (s *cursor2iter) Next() (k, v []byte, err error) {
+ select {
+ case <-s.ctx.Done():
+ return nil, nil, s.ctx.Err()
+ default:
+ }
+ s.limit--
+ k, v, err = s.nextK, s.nextV, s.err
+ if s.orderAscend {
+ s.nextK, s.nextV, s.err = s.c.Next()
+ } else {
+ s.nextK, s.nextV, s.err = s.c.Prev()
+ }
+ return k, v, err
+}
+
+func (tx *MdbxTx) RangeDupSort(table string, key []byte, fromPrefix, toPrefix []byte, asc order.By, limit int) (iter.KV, error) {
+ s := &cursorDup2iter{ctx: tx.ctx, key: key, fromPrefix: fromPrefix, toPrefix: toPrefix, orderAscend: bool(asc), limit: int64(limit)}
+ tx.streams = append(tx.streams, s)
+ return s.init(table, tx)
+}
+
+type cursorDup2iter struct {
+ c kv.CursorDupSort
+ key []byte
+ fromPrefix, toPrefix, nextV []byte
+ err error
+ orderAscend bool
+ limit int64
+ ctx context.Context
+}
+
+func (s *cursorDup2iter) init(table string, tx kv.Tx) (*cursorDup2iter, error) {
+ if s.orderAscend && s.fromPrefix != nil && s.toPrefix != nil && bytes.Compare(s.fromPrefix, s.toPrefix) >= 0 {
+ return s, fmt.Errorf("tx.Dual: %x must be lexicographicaly before %x", s.fromPrefix, s.toPrefix)
+ }
+ if !s.orderAscend && s.fromPrefix != nil && s.toPrefix != nil && bytes.Compare(s.fromPrefix, s.toPrefix) <= 0 {
+ return s, fmt.Errorf("tx.Dual: %x must be lexicographicaly before %x", s.toPrefix, s.fromPrefix)
+ }
+ c, err := tx.CursorDupSort(table)
+ if err != nil {
+ return s, err
+ }
+ s.c = c
+ k, _, err := c.SeekExact(s.key)
+ if err != nil {
+ return s, err
+ }
+ if k == nil {
+ return s, nil
+ }
+
+ if s.fromPrefix == nil { // no initial position
+ if s.orderAscend {
+ s.nextV, s.err = s.c.FirstDup()
+ } else {
+ s.nextV, s.err = s.c.LastDup()
+ }
+ return s, s.err
+ }
+
+ if s.orderAscend {
+ s.nextV, s.err = s.c.SeekBothRange(s.key, s.fromPrefix)
+ return s, s.err
+ } else {
+ // seek exactly to given key or previous one
+ _, s.nextV, s.err = s.c.SeekBothExact(s.key, s.fromPrefix)
+ if s.nextV == nil { // no such key
+ _, s.nextV, s.err = s.c.PrevDup()
+ }
+ return s, s.err
+ }
+}
+
+func (s *cursorDup2iter) Close() {
+ if s.c != nil {
+ s.c.Close()
+ }
+}
+func (s *cursorDup2iter) HasNext() bool {
+ if s.err != nil { // always true, then .Next() call will return this error
+ return true
+ }
+ if s.limit == 0 { // limit reached
+ return false
+ }
+ if s.nextV == nil { // EndOfTable
+ return false
+ }
+ if s.toPrefix == nil { // s.nextK == nil check is above
+ return true
+ }
+
+ //Asc: [from, to) AND from > to
+ //Desc: [from, to) AND from < to
+ cmp := bytes.Compare(s.nextV, s.toPrefix)
+ return (s.orderAscend && cmp < 0) || (!s.orderAscend && cmp > 0)
+}
+func (s *cursorDup2iter) Next() (k, v []byte, err error) {
+ select {
+ case <-s.ctx.Done():
+ return nil, nil, s.ctx.Err()
+ default:
+ }
+ s.limit--
+ v, err = s.nextV, s.err
+ if s.orderAscend {
+ _, s.nextV, s.err = s.c.NextDup()
+ } else {
+ _, s.nextV, s.err = s.c.PrevDup()
+ }
+ return s.key, v, err
+}
+
+func (tx *MdbxTx) ForAmount(bucket string, fromPrefix []byte, amount uint32, walker func(k, v []byte) error) error {
+ if amount == 0 {
+ return nil
+ }
+ c, err := tx.Cursor(bucket)
+ if err != nil {
+ return err
+ }
+ defer c.Close()
+
+ for k, v, err := c.Seek(fromPrefix); k != nil && amount > 0; k, v, err = c.Next() {
+ if err != nil {
+ return err
+ }
+ if err := walker(k, v); err != nil {
+ return err
+ }
+ amount--
+ }
+ return nil
+}
+
+func (tx *MdbxTx) CHandle() unsafe.Pointer {
+ return tx.tx.CHandle()
+}
diff --git a/erigon-lib/kv/mdbx/kv_mdbx_temporary.go b/erigon-lib/kv/mdbx/kv_mdbx_temporary.go
new file mode 100644
index 00000000000..c5e585bffe1
--- /dev/null
+++ b/erigon-lib/kv/mdbx/kv_mdbx_temporary.go
@@ -0,0 +1,89 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package mdbx
+
+import (
+ "context"
+ "os"
+ "unsafe"
+
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/log/v3"
+)
+
+type TemporaryMdbx struct {
+ db kv.RwDB
+ path string
+}
+
+func NewTemporaryMdbx(ctx context.Context, tempdir string) (kv.RwDB, error) {
+ path, err := os.MkdirTemp(tempdir, "mdbx-temp")
+ if err != nil {
+ return &TemporaryMdbx{}, err
+ }
+
+ db, err := NewMDBX(log.New()).Label(kv.InMem).Path(path).Open(ctx)
+ if err != nil {
+ return &TemporaryMdbx{}, err
+ }
+
+ return &TemporaryMdbx{
+ db: db,
+ path: path,
+ }, nil
+}
+
+func (t *TemporaryMdbx) ReadOnly() bool { return t.db.ReadOnly() }
+func (t *TemporaryMdbx) Update(ctx context.Context, f func(kv.RwTx) error) error {
+ return t.db.Update(ctx, f)
+}
+
+func (t *TemporaryMdbx) UpdateNosync(ctx context.Context, f func(kv.RwTx) error) error {
+ return t.db.UpdateNosync(ctx, f)
+}
+
+func (t *TemporaryMdbx) BeginRw(ctx context.Context) (kv.RwTx, error) {
+ return t.db.BeginRw(ctx)
+}
+func (t *TemporaryMdbx) BeginRwNosync(ctx context.Context) (kv.RwTx, error) {
+ return t.db.BeginRwNosync(ctx)
+}
+
+func (t *TemporaryMdbx) View(ctx context.Context, f func(kv.Tx) error) error {
+ return t.db.View(ctx, f)
+}
+
+func (t *TemporaryMdbx) BeginRo(ctx context.Context) (kv.Tx, error) {
+ return t.db.BeginRo(ctx)
+}
+
+func (t *TemporaryMdbx) AllTables() kv.TableCfg {
+ return t.db.AllTables()
+}
+
+func (t *TemporaryMdbx) PageSize() uint64 {
+ return t.db.PageSize()
+}
+
+func (t *TemporaryMdbx) Close() {
+ t.db.Close()
+ os.RemoveAll(t.path)
+}
+
+func (t *TemporaryMdbx) CHandle() unsafe.Pointer {
+ panic("CHandle not implemented")
+}
diff --git a/erigon-lib/kv/mdbx/kv_mdbx_test.go b/erigon-lib/kv/mdbx/kv_mdbx_test.go
new file mode 100644
index 00000000000..e79a852dae2
--- /dev/null
+++ b/erigon-lib/kv/mdbx/kv_mdbx_test.go
@@ -0,0 +1,775 @@
+/*
+ Copyright 2022 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package mdbx
+
+import (
+ "context"
+ "testing"
+
+ "github.com/c2h5oh/datasize"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/order"
+ "github.com/ledgerwatch/log/v3"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func BaseCase(t *testing.T) (kv.RwDB, kv.RwTx, kv.RwCursorDupSort) {
+ t.Helper()
+ path := t.TempDir()
+ logger := log.New()
+ table := "Table"
+ db := NewMDBX(logger).InMem(path).WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg {
+ return kv.TableCfg{
+ table: kv.TableCfgItem{Flags: kv.DupSort},
+ kv.Sequence: kv.TableCfgItem{},
+ }
+ }).MapSize(128 * datasize.MB).MustOpen()
+ t.Cleanup(db.Close)
+
+ tx, err := db.BeginRw(context.Background())
+ require.NoError(t, err)
+ t.Cleanup(tx.Rollback)
+
+ c, err := tx.RwCursorDupSort(table)
+ require.NoError(t, err)
+ t.Cleanup(c.Close)
+
+ // Insert some dupsorted records
+ require.NoError(t, c.Put([]byte("key1"), []byte("value1.1")))
+ require.NoError(t, c.Put([]byte("key3"), []byte("value3.1")))
+ require.NoError(t, c.Put([]byte("key1"), []byte("value1.3")))
+ require.NoError(t, c.Put([]byte("key3"), []byte("value3.3")))
+
+ return db, tx, c
+}
+
+func iteration(t *testing.T, c kv.RwCursorDupSort, start []byte, val []byte) ([]string, []string) {
+ t.Helper()
+ var keys []string
+ var values []string
+ var err error
+ i := 0
+ for k, v, err := start, val, err; k != nil; k, v, err = c.Next() {
+ require.Nil(t, err)
+ keys = append(keys, string(k))
+ values = append(values, string(v))
+ i += 1
+ }
+ for ind := i; ind > 1; ind-- {
+ c.Prev()
+ }
+
+ return keys, values
+}
+
+func TestSeekBothRange(t *testing.T) {
+ _, _, c := BaseCase(t)
+
+ v, err := c.SeekBothRange([]byte("key2"), []byte("value1.2"))
+ require.NoError(t, err)
+ // SeekBothRange does exact match of the key, but range match of the value, so we get nil here
+ require.Nil(t, v)
+
+ v, err = c.SeekBothRange([]byte("key3"), []byte("value3.2"))
+ require.NoError(t, err)
+ require.Equal(t, "value3.3", string(v))
+}
+
+func TestRange(t *testing.T) {
+ t.Run("Asc", func(t *testing.T) {
+ _, tx, _ := BaseCase(t)
+
+ //[from, to)
+ it, err := tx.Range("Table", []byte("key1"), []byte("key3"))
+ require.NoError(t, err)
+ require.True(t, it.HasNext())
+ k, v, err := it.Next()
+ require.NoError(t, err)
+ require.Equal(t, "key1", string(k))
+ require.Equal(t, "value1.1", string(v))
+
+ require.True(t, it.HasNext())
+ k, v, err = it.Next()
+ require.NoError(t, err)
+ require.Equal(t, "key1", string(k))
+ require.Equal(t, "value1.3", string(v))
+
+ require.False(t, it.HasNext())
+ require.False(t, it.HasNext())
+
+ // [from, nil) means [from, INF)
+ it, err = tx.Range("Table", []byte("key1"), nil)
+ require.NoError(t, err)
+ cnt := 0
+ for it.HasNext() {
+ _, _, err := it.Next()
+ require.NoError(t, err)
+ cnt++
+ }
+ require.Equal(t, 4, cnt)
+ })
+ t.Run("Desc", func(t *testing.T) {
+ _, tx, _ := BaseCase(t)
+
+ //[from, to)
+ it, err := tx.RangeDescend("Table", []byte("key3"), []byte("key1"), kv.Unlim)
+ require.NoError(t, err)
+ require.True(t, it.HasNext())
+ k, v, err := it.Next()
+ require.NoError(t, err)
+ require.Equal(t, "key3", string(k))
+ require.Equal(t, "value3.3", string(v))
+
+ require.True(t, it.HasNext())
+ k, v, err = it.Next()
+ require.NoError(t, err)
+ require.Equal(t, "key3", string(k))
+ require.Equal(t, "value3.1", string(v))
+
+ require.False(t, it.HasNext())
+
+ it, err = tx.RangeDescend("Table", nil, nil, 2)
+ require.NoError(t, err)
+
+ cnt := 0
+ for it.HasNext() {
+ _, _, err := it.Next()
+ require.NoError(t, err)
+ cnt++
+ }
+ require.Equal(t, 2, cnt)
+ })
+}
+
+func TestRangeDupSort(t *testing.T) {
+ t.Run("Asc", func(t *testing.T) {
+ _, tx, _ := BaseCase(t)
+
+ //[from, to)
+ it, err := tx.RangeDupSort("Table", []byte("key1"), nil, nil, order.Asc, -1)
+ require.NoError(t, err)
+ require.True(t, it.HasNext())
+ k, v, err := it.Next()
+ require.NoError(t, err)
+ require.Equal(t, "key1", string(k))
+ require.Equal(t, "value1.1", string(v))
+
+ require.True(t, it.HasNext())
+ k, v, err = it.Next()
+ require.NoError(t, err)
+ require.Equal(t, "key1", string(k))
+ require.Equal(t, "value1.3", string(v))
+
+ require.False(t, it.HasNext())
+ require.False(t, it.HasNext())
+
+ // [from, nil) means [from, INF)
+ it, err = tx.Range("Table", []byte("key1"), nil)
+ require.NoError(t, err)
+ cnt := 0
+ for it.HasNext() {
+ _, _, err := it.Next()
+ require.NoError(t, err)
+ cnt++
+ }
+ require.Equal(t, 4, cnt)
+ })
+ t.Run("Desc", func(t *testing.T) {
+ _, tx, _ := BaseCase(t)
+
+ //[from, to)
+ it, err := tx.RangeDupSort("Table", []byte("key3"), nil, nil, order.Desc, -1)
+ require.NoError(t, err)
+ require.True(t, it.HasNext())
+ k, v, err := it.Next()
+ require.NoError(t, err)
+ require.Equal(t, "key3", string(k))
+ require.Equal(t, "value3.3", string(v))
+
+ require.True(t, it.HasNext())
+ k, v, err = it.Next()
+ require.NoError(t, err)
+ require.Equal(t, "key3", string(k))
+ require.Equal(t, "value3.1", string(v))
+
+ require.False(t, it.HasNext())
+
+ it, err = tx.RangeDescend("Table", nil, nil, 2)
+ require.NoError(t, err)
+
+ cnt := 0
+ for it.HasNext() {
+ _, _, err := it.Next()
+ require.NoError(t, err)
+ cnt++
+ }
+ require.Equal(t, 2, cnt)
+ })
+}
+
+func TestLastDup(t *testing.T) {
+ db, tx, _ := BaseCase(t)
+
+ err := tx.Commit()
+ require.NoError(t, err)
+ roTx, err := db.BeginRo(context.Background())
+ require.NoError(t, err)
+ defer roTx.Rollback()
+
+ roC, err := roTx.CursorDupSort("Table")
+ require.NoError(t, err)
+ defer roC.Close()
+
+ var keys, vals []string
+ var k, v []byte
+ for k, _, err = roC.First(); err == nil && k != nil; k, _, err = roC.NextNoDup() {
+ v, err = roC.LastDup()
+ require.NoError(t, err)
+ keys = append(keys, string(k))
+ vals = append(vals, string(v))
+ }
+ require.NoError(t, err)
+ require.Equal(t, []string{"key1", "key3"}, keys)
+ require.Equal(t, []string{"value1.3", "value3.3"}, vals)
+}
+
+func TestPutGet(t *testing.T) {
+ _, tx, c := BaseCase(t)
+
+ require.NoError(t, c.Put([]byte(""), []byte("value1.1")))
+
+ var v []byte
+ v, err := tx.GetOne("Table", []byte("key1"))
+ require.Nil(t, err)
+ require.Equal(t, v, []byte("value1.1"))
+
+ v, err = tx.GetOne("RANDOM", []byte("key1"))
+ require.Error(t, err) // Error from non-existent bucket returns error
+ require.Nil(t, v)
+}
+
+func TestIncrementRead(t *testing.T) {
+ _, tx, _ := BaseCase(t)
+
+ table := "Table"
+
+ _, err := tx.IncrementSequence(table, uint64(12))
+ require.Nil(t, err)
+ chaV, err := tx.ReadSequence(table)
+ require.Nil(t, err)
+ require.Equal(t, chaV, uint64(12))
+ _, err = tx.IncrementSequence(table, uint64(240))
+ require.Nil(t, err)
+ chaV, err = tx.ReadSequence(table)
+ require.Nil(t, err)
+ require.Equal(t, chaV, uint64(252))
+}
+
+func TestHasDelete(t *testing.T) {
+ _, tx, _ := BaseCase(t)
+
+ table := "Table"
+
+ require.NoError(t, tx.Put(table, []byte("key2"), []byte("value2.1")))
+ require.NoError(t, tx.Put(table, []byte("key4"), []byte("value4.1")))
+ require.NoError(t, tx.Put(table, []byte("key5"), []byte("value5.1")))
+
+ c, err := tx.RwCursorDupSort(table)
+ require.NoError(t, err)
+ defer c.Close()
+ require.NoError(t, c.DeleteExact([]byte("key1"), []byte("value1.1")))
+ require.NoError(t, c.DeleteExact([]byte("key1"), []byte("value1.3")))
+ require.NoError(t, c.DeleteExact([]byte("key1"), []byte("value1.1"))) //valid but already deleted
+ require.NoError(t, c.DeleteExact([]byte("key2"), []byte("value1.1"))) //valid key but wrong value
+
+ res, err := tx.Has(table, []byte("key1"))
+ require.Nil(t, err)
+ require.False(t, res)
+
+ res, err = tx.Has(table, []byte("key2"))
+ require.Nil(t, err)
+ require.True(t, res)
+
+ res, err = tx.Has(table, []byte("key3"))
+ require.Nil(t, err)
+ require.True(t, res) //There is another key3 left
+
+ res, err = tx.Has(table, []byte("k"))
+ require.Nil(t, err)
+ require.False(t, res)
+}
+
+func TestForAmount(t *testing.T) {
+ _, tx, _ := BaseCase(t)
+
+ table := "Table"
+
+ require.NoError(t, tx.Put(table, []byte("key2"), []byte("value2.1")))
+ require.NoError(t, tx.Put(table, []byte("key4"), []byte("value4.1")))
+ require.NoError(t, tx.Put(table, []byte("key5"), []byte("value5.1")))
+
+ var keys []string
+
+ err := tx.ForAmount(table, []byte("key3"), uint32(2), func(k, v []byte) error {
+ keys = append(keys, string(k))
+ return nil
+ })
+ require.Nil(t, err)
+ require.Equal(t, []string{"key3", "key3"}, keys)
+
+ var keys1 []string
+
+ err1 := tx.ForAmount(table, []byte("key1"), 100, func(k, v []byte) error {
+ keys1 = append(keys1, string(k))
+ return nil
+ })
+ require.Nil(t, err1)
+ require.Equal(t, []string{"key1", "key1", "key2", "key3", "key3", "key4", "key5"}, keys1)
+
+ var keys2 []string
+
+ err2 := tx.ForAmount(table, []byte("value"), 100, func(k, v []byte) error {
+ keys2 = append(keys2, string(k))
+ return nil
+ })
+ require.Nil(t, err2)
+ require.Nil(t, keys2)
+
+ var keys3 []string
+
+ err3 := tx.ForAmount(table, []byte("key1"), 0, func(k, v []byte) error {
+ keys3 = append(keys3, string(k))
+ return nil
+ })
+ require.Nil(t, err3)
+ require.Nil(t, keys3)
+}
+
+func TestForPrefix(t *testing.T) {
+ _, tx, _ := BaseCase(t)
+
+ table := "Table"
+
+ var keys []string
+
+ err := tx.ForPrefix(table, []byte("key"), func(k, v []byte) error {
+ keys = append(keys, string(k))
+ return nil
+ })
+ require.Nil(t, err)
+ require.Equal(t, []string{"key1", "key1", "key3", "key3"}, keys)
+
+ var keys1 []string
+
+ err = tx.ForPrefix(table, []byte("key1"), func(k, v []byte) error {
+ keys1 = append(keys1, string(k))
+ return nil
+ })
+ require.Nil(t, err)
+ require.Equal(t, []string{"key1", "key1"}, keys1)
+
+ var keys2 []string
+
+ err = tx.ForPrefix(table, []byte("e"), func(k, v []byte) error {
+ keys2 = append(keys2, string(k))
+ return nil
+ })
+ require.Nil(t, err)
+ require.Nil(t, keys2)
+}
+
+func TestAppendFirstLast(t *testing.T) {
+ _, tx, c := BaseCase(t)
+
+ table := "Table"
+
+ require.Error(t, tx.Append(table, []byte("key2"), []byte("value2.1")))
+ require.NoError(t, tx.Append(table, []byte("key6"), []byte("value6.1")))
+ require.Error(t, tx.Append(table, []byte("key4"), []byte("value4.1")))
+ require.NoError(t, tx.AppendDup(table, []byte("key2"), []byte("value1.11")))
+
+ k, v, err := c.First()
+ require.Nil(t, err)
+ require.Equal(t, k, []byte("key1"))
+ require.Equal(t, v, []byte("value1.1"))
+
+ keys, values := iteration(t, c, k, v)
+ require.Equal(t, []string{"key1", "key1", "key2", "key3", "key3", "key6"}, keys)
+ require.Equal(t, []string{"value1.1", "value1.3", "value1.11", "value3.1", "value3.3", "value6.1"}, values)
+
+ k, v, err = c.Last()
+ require.Nil(t, err)
+ require.Equal(t, k, []byte("key6"))
+ require.Equal(t, v, []byte("value6.1"))
+
+ keys, values = iteration(t, c, k, v)
+ require.Equal(t, []string{"key6"}, keys)
+ require.Equal(t, []string{"value6.1"}, values)
+}
+
+func TestNextPrevCurrent(t *testing.T) {
+ _, _, c := BaseCase(t)
+
+ k, v, err := c.First()
+ require.Nil(t, err)
+ keys, values := iteration(t, c, k, v)
+ require.Equal(t, []string{"key1", "key1", "key3", "key3"}, keys)
+ require.Equal(t, []string{"value1.1", "value1.3", "value3.1", "value3.3"}, values)
+
+ k, v, err = c.Next()
+ require.Equal(t, []byte("key1"), k)
+ require.Nil(t, err)
+ keys, values = iteration(t, c, k, v)
+ require.Equal(t, []string{"key1", "key3", "key3"}, keys)
+ require.Equal(t, []string{"value1.3", "value3.1", "value3.3"}, values)
+
+ k, v, err = c.Current()
+ require.Nil(t, err)
+ keys, values = iteration(t, c, k, v)
+ require.Equal(t, []string{"key1", "key3", "key3"}, keys)
+ require.Equal(t, []string{"value1.3", "value3.1", "value3.3"}, values)
+ require.Equal(t, k, []byte("key1"))
+ require.Equal(t, v, []byte("value1.3"))
+
+ k, v, err = c.Next()
+ require.Nil(t, err)
+ keys, values = iteration(t, c, k, v)
+ require.Equal(t, []string{"key3", "key3"}, keys)
+ require.Equal(t, []string{"value3.1", "value3.3"}, values)
+
+ k, v, err = c.Prev()
+ require.Nil(t, err)
+ keys, values = iteration(t, c, k, v)
+ require.Equal(t, []string{"key1", "key3", "key3"}, keys)
+ require.Equal(t, []string{"value1.3", "value3.1", "value3.3"}, values)
+
+ k, v, err = c.Current()
+ require.Nil(t, err)
+ keys, values = iteration(t, c, k, v)
+ require.Equal(t, []string{"key1", "key3", "key3"}, keys)
+ require.Equal(t, []string{"value1.3", "value3.1", "value3.3"}, values)
+
+ k, v, err = c.Prev()
+ require.Nil(t, err)
+ keys, values = iteration(t, c, k, v)
+ require.Equal(t, []string{"key1", "key1", "key3", "key3"}, keys)
+ require.Equal(t, []string{"value1.1", "value1.3", "value3.1", "value3.3"}, values)
+
+ err = c.DeleteCurrent()
+ require.Nil(t, err)
+ k, v, err = c.Current()
+ require.Nil(t, err)
+ keys, values = iteration(t, c, k, v)
+ require.Equal(t, []string{"key1", "key3", "key3"}, keys)
+ require.Equal(t, []string{"value1.3", "value3.1", "value3.3"}, values)
+
+}
+
+func TestSeek(t *testing.T) {
+ _, _, c := BaseCase(t)
+
+ k, v, err := c.Seek([]byte("k"))
+ require.Nil(t, err)
+ keys, values := iteration(t, c, k, v)
+ require.Equal(t, []string{"key1", "key1", "key3", "key3"}, keys)
+ require.Equal(t, []string{"value1.1", "value1.3", "value3.1", "value3.3"}, values)
+
+ k, v, err = c.Seek([]byte("key3"))
+ require.Nil(t, err)
+ keys, values = iteration(t, c, k, v)
+ require.Equal(t, []string{"key3", "key3"}, keys)
+ require.Equal(t, []string{"value3.1", "value3.3"}, values)
+
+ k, v, err = c.Seek([]byte("xyz"))
+ require.Nil(t, err)
+ keys, values = iteration(t, c, k, v)
+ require.Nil(t, keys)
+ require.Nil(t, values)
+}
+
+func TestSeekExact(t *testing.T) {
+ _, _, c := BaseCase(t)
+
+ k, v, err := c.SeekExact([]byte("key3"))
+ require.Nil(t, err)
+ keys, values := iteration(t, c, k, v)
+ require.Equal(t, []string{"key3", "key3"}, keys)
+ require.Equal(t, []string{"value3.1", "value3.3"}, values)
+
+ k, v, err = c.SeekExact([]byte("key"))
+ require.Nil(t, err)
+ keys, values = iteration(t, c, k, v)
+ require.Nil(t, keys)
+ require.Nil(t, values)
+}
+
+func TestSeekBothExact(t *testing.T) {
+ _, _, c := BaseCase(t)
+
+ k, v, err := c.SeekBothExact([]byte("key1"), []byte("value1.2"))
+ require.Nil(t, err)
+ keys, values := iteration(t, c, k, v)
+ require.Nil(t, keys)
+ require.Nil(t, values)
+
+ k, v, err = c.SeekBothExact([]byte("key2"), []byte("value1.1"))
+ require.Nil(t, err)
+ keys, values = iteration(t, c, k, v)
+ require.Nil(t, keys)
+ require.Nil(t, values)
+
+ k, v, err = c.SeekBothExact([]byte("key1"), []byte("value1.1"))
+ require.Nil(t, err)
+ keys, values = iteration(t, c, k, v)
+ require.Equal(t, []string{"key1", "key1", "key3", "key3"}, keys)
+ require.Equal(t, []string{"value1.1", "value1.3", "value3.1", "value3.3"}, values)
+
+ k, v, err = c.SeekBothExact([]byte("key3"), []byte("value3.3"))
+ require.Nil(t, err)
+ keys, values = iteration(t, c, k, v)
+ require.Equal(t, []string{"key3"}, keys)
+ require.Equal(t, []string{"value3.3"}, values)
+}
+
+func TestNextDups(t *testing.T) {
+ _, tx, _ := BaseCase(t)
+
+ table := "Table"
+
+ c, err := tx.RwCursorDupSort(table)
+ require.NoError(t, err)
+ defer c.Close()
+ require.NoError(t, c.DeleteExact([]byte("key1"), []byte("value1.1")))
+ require.NoError(t, c.DeleteExact([]byte("key1"), []byte("value1.3")))
+ require.NoError(t, c.DeleteExact([]byte("key3"), []byte("value3.1"))) //valid but already deleted
+ require.NoError(t, c.DeleteExact([]byte("key3"), []byte("value3.3"))) //valid key but wrong value
+
+ require.NoError(t, tx.Put(table, []byte("key2"), []byte("value1.1")))
+ require.NoError(t, c.Put([]byte("key2"), []byte("value1.2")))
+ require.NoError(t, c.Put([]byte("key3"), []byte("value1.6")))
+ require.NoError(t, c.Put([]byte("key"), []byte("value1.7")))
+
+ k, v, err := c.Current()
+ require.Nil(t, err)
+ keys, values := iteration(t, c, k, v)
+ require.Equal(t, []string{"key", "key2", "key2", "key3"}, keys)
+ require.Equal(t, []string{"value1.7", "value1.1", "value1.2", "value1.6"}, values)
+
+ v, err = c.FirstDup()
+ require.Nil(t, err)
+ keys, values = iteration(t, c, k, v)
+ require.Equal(t, []string{"key", "key2", "key2", "key3"}, keys)
+ require.Equal(t, []string{"value1.7", "value1.1", "value1.2", "value1.6"}, values)
+
+ k, v, err = c.NextNoDup()
+ require.Nil(t, err)
+ keys, values = iteration(t, c, k, v)
+ require.Equal(t, []string{"key2", "key2", "key3"}, keys)
+ require.Equal(t, []string{"value1.1", "value1.2", "value1.6"}, values)
+
+ k, v, err = c.NextDup()
+ require.Nil(t, err)
+ keys, values = iteration(t, c, k, v)
+ require.Equal(t, []string{"key2", "key3"}, keys)
+ require.Equal(t, []string{"value1.2", "value1.6"}, values)
+
+ v, err = c.LastDup()
+ require.Nil(t, err)
+ keys, values = iteration(t, c, k, v)
+ require.Equal(t, []string{"key2", "key3"}, keys)
+ require.Equal(t, []string{"value1.2", "value1.6"}, values)
+
+ k, v, err = c.NextDup()
+ require.Nil(t, err)
+ keys, values = iteration(t, c, k, v)
+ require.Nil(t, keys)
+ require.Nil(t, values)
+
+ k, v, err = c.NextNoDup()
+ require.Nil(t, err)
+ keys, values = iteration(t, c, k, v)
+ require.Equal(t, []string{"key3"}, keys)
+ require.Equal(t, []string{"value1.6"}, values)
+}
+
+func TestCurrentDup(t *testing.T) {
+ _, _, c := BaseCase(t)
+
+ count, err := c.CountDuplicates()
+ require.Nil(t, err)
+ require.Equal(t, count, uint64(2))
+
+ require.Error(t, c.PutNoDupData([]byte("key3"), []byte("value3.3")))
+ require.NoError(t, c.DeleteCurrentDuplicates())
+
+ k, v, err := c.SeekExact([]byte("key1"))
+ require.Nil(t, err)
+ keys, values := iteration(t, c, k, v)
+ require.Equal(t, []string{"key1", "key1"}, keys)
+ require.Equal(t, []string{"value1.1", "value1.3"}, values)
+
+ require.Equal(t, []string{"key1", "key1"}, keys)
+ require.Equal(t, []string{"value1.1", "value1.3"}, values)
+}
+
+func TestDupDelete(t *testing.T) {
+ _, _, c := BaseCase(t)
+
+ k, _, err := c.Current()
+ require.Nil(t, err)
+ require.Equal(t, []byte("key3"), k)
+
+ err = c.DeleteCurrentDuplicates()
+ require.Nil(t, err)
+
+ err = c.Delete([]byte("key1"))
+ require.Nil(t, err)
+
+ count, err := c.Count()
+ require.Nil(t, err)
+ assert.Zero(t, count)
+}
+
+func baseAutoConversion(t *testing.T) (kv.RwDB, kv.RwTx, kv.RwCursor) {
+ t.Helper()
+ path := t.TempDir()
+ logger := log.New()
+ db := NewMDBX(logger).InMem(path).MustOpen()
+
+ tx, err := db.BeginRw(context.Background())
+ require.NoError(t, err)
+
+ c, err := tx.RwCursor(kv.PlainState)
+ require.NoError(t, err)
+
+ // Insert some records
+ require.NoError(t, c.Put([]byte("A"), []byte("0")))
+ require.NoError(t, c.Put([]byte("A..........................._______________________________A"), []byte("1")))
+ require.NoError(t, c.Put([]byte("A..........................._______________________________C"), []byte("2")))
+ require.NoError(t, c.Put([]byte("B"), []byte("8")))
+ require.NoError(t, c.Put([]byte("C"), []byte("9")))
+ require.NoError(t, c.Put([]byte("D..........................._______________________________A"), []byte("3")))
+ require.NoError(t, c.Put([]byte("D..........................._______________________________C"), []byte("4")))
+
+ return db, tx, c
+}
+
+func TestAutoConversion(t *testing.T) {
+ db, tx, c := baseAutoConversion(t)
+ defer db.Close()
+ defer tx.Rollback()
+ defer c.Close()
+
+ // key length conflict
+ require.Error(t, c.Put([]byte("A..........................."), []byte("?")))
+
+ require.NoError(t, c.Delete([]byte("A..........................._______________________________A")))
+ require.NoError(t, c.Put([]byte("B"), []byte("7")))
+ require.NoError(t, c.Delete([]byte("C")))
+ require.NoError(t, c.Put([]byte("D..........................._______________________________C"), []byte("6")))
+ require.NoError(t, c.Put([]byte("D..........................._______________________________E"), []byte("5")))
+
+ k, v, err := c.First()
+ require.NoError(t, err)
+ assert.Equal(t, []byte("A"), k)
+ assert.Equal(t, []byte("0"), v)
+
+ k, v, err = c.Next()
+ require.NoError(t, err)
+ assert.Equal(t, []byte("A..........................._______________________________C"), k)
+ assert.Equal(t, []byte("2"), v)
+
+ k, v, err = c.Next()
+ require.NoError(t, err)
+ assert.Equal(t, []byte("B"), k)
+ assert.Equal(t, []byte("7"), v)
+
+ k, v, err = c.Next()
+ require.NoError(t, err)
+ assert.Equal(t, []byte("D..........................._______________________________A"), k)
+ assert.Equal(t, []byte("3"), v)
+
+ k, v, err = c.Next()
+ require.NoError(t, err)
+ assert.Equal(t, []byte("D..........................._______________________________C"), k)
+ assert.Equal(t, []byte("6"), v)
+
+ k, v, err = c.Next()
+ require.NoError(t, err)
+ assert.Equal(t, []byte("D..........................._______________________________E"), k)
+ assert.Equal(t, []byte("5"), v)
+
+ k, v, err = c.Next()
+ require.NoError(t, err)
+ assert.Nil(t, k)
+ assert.Nil(t, v)
+}
+
+func TestAutoConversionSeekBothRange(t *testing.T) {
+ db, tx, nonDupC := baseAutoConversion(t)
+ nonDupC.Close()
+ defer db.Close()
+ defer tx.Rollback()
+
+ c, err := tx.RwCursorDupSort(kv.PlainState)
+ require.NoError(t, err)
+
+ require.NoError(t, c.Delete([]byte("A..........................._______________________________A")))
+ require.NoError(t, c.Put([]byte("D..........................._______________________________C"), []byte("6")))
+ require.NoError(t, c.Put([]byte("D..........................._______________________________E"), []byte("5")))
+
+ v, err := c.SeekBothRange([]byte("A..........................."), []byte("_______________________________A"))
+ require.NoError(t, err)
+ assert.Equal(t, []byte("_______________________________C2"), v)
+
+ _, v, err = c.NextDup()
+ require.NoError(t, err)
+ assert.Nil(t, v)
+
+ v, err = c.SeekBothRange([]byte("A..........................."), []byte("_______________________________X"))
+ require.NoError(t, err)
+ assert.Nil(t, v)
+
+ v, err = c.SeekBothRange([]byte("B..........................."), []byte(""))
+ require.NoError(t, err)
+ assert.Nil(t, v)
+
+ v, err = c.SeekBothRange([]byte("C..........................."), []byte(""))
+ require.NoError(t, err)
+ assert.Nil(t, v)
+
+ v, err = c.SeekBothRange([]byte("D..........................."), []byte(""))
+ require.NoError(t, err)
+ assert.Equal(t, []byte("_______________________________A3"), v)
+
+ _, v, err = c.NextDup()
+ require.NoError(t, err)
+ assert.Equal(t, []byte("_______________________________C6"), v)
+
+ _, v, err = c.NextDup()
+ require.NoError(t, err)
+ assert.Equal(t, []byte("_______________________________E5"), v)
+
+ _, v, err = c.NextDup()
+ require.NoError(t, err)
+ assert.Nil(t, v)
+
+ v, err = c.SeekBothRange([]byte("X..........................."), []byte("_______________________________Y"))
+ require.NoError(t, err)
+ assert.Nil(t, v)
+}
diff --git a/erigon-lib/kv/mdbx/kv_migrator_test.go b/erigon-lib/kv/mdbx/kv_migrator_test.go
new file mode 100644
index 00000000000..05eef49fd91
--- /dev/null
+++ b/erigon-lib/kv/mdbx/kv_migrator_test.go
@@ -0,0 +1,121 @@
+//go:build !windows
+
+/*
+Copyright 2021 Erigon contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package mdbx_test
+
+import (
+ "context"
+ "errors"
+ "testing"
+ "time"
+
+ "github.com/c2h5oh/datasize"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/mdbx"
+ "github.com/ledgerwatch/erigon-lib/kv/memdb"
+ "github.com/ledgerwatch/log/v3"
+ "github.com/stretchr/testify/require"
+)
+
+func TestBucketCRUD(t *testing.T) {
+ require := require.New(t)
+ db, tx := memdb.NewTestTx(t)
+
+ normalBucket := kv.ChaindataTables[15]
+ deprecatedBucket := kv.ChaindataDeprecatedTables[0]
+ migrator := tx
+
+ // check thad buckets have unique DBI's
+ uniquness := map[kv.DBI]bool{}
+ castedKv, ok := db.(*mdbx.MdbxKV)
+ if !ok {
+ t.Skip()
+ }
+ for _, dbi := range castedKv.AllDBI() {
+ if dbi == mdbx.NonExistingDBI {
+ continue
+ }
+ _, ok := uniquness[dbi]
+ require.False(ok)
+ uniquness[dbi] = true
+ }
+
+ require.True(migrator.ExistsBucket(normalBucket))
+ require.True(errors.Is(migrator.DropBucket(normalBucket), kv.ErrAttemptToDeleteNonDeprecatedBucket))
+
+ require.False(migrator.ExistsBucket(deprecatedBucket))
+ require.NoError(migrator.CreateBucket(deprecatedBucket))
+ require.True(migrator.ExistsBucket(deprecatedBucket))
+
+ require.NoError(migrator.DropBucket(deprecatedBucket))
+ require.False(migrator.ExistsBucket(deprecatedBucket))
+
+ require.NoError(migrator.CreateBucket(deprecatedBucket))
+ require.True(migrator.ExistsBucket(deprecatedBucket))
+
+ c, err := tx.RwCursor(deprecatedBucket)
+ require.NoError(err)
+ err = c.Put([]byte{1}, []byte{1})
+ require.NoError(err)
+ v, err := tx.GetOne(deprecatedBucket, []byte{1})
+ require.NoError(err)
+ require.Equal([]byte{1}, v)
+
+ buckets, err := migrator.ListBuckets()
+ require.NoError(err)
+ require.True(len(buckets) > 10)
+
+ // check thad buckets have unique DBI's
+ uniquness = map[kv.DBI]bool{}
+ for _, dbi := range castedKv.AllDBI() {
+ if dbi == mdbx.NonExistingDBI {
+ continue
+ }
+ _, ok := uniquness[dbi]
+ require.False(ok)
+ uniquness[dbi] = true
+ }
+}
+
+func TestReadOnlyMode(t *testing.T) {
+ path := t.TempDir()
+ logger := log.New()
+ db1 := mdbx.NewMDBX(logger).Path(path).MapSize(16 * datasize.MB).WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg {
+ return kv.TableCfg{
+ kv.Headers: kv.TableCfgItem{},
+ }
+ }).MustOpen()
+ db1.Close()
+ time.Sleep(10 * time.Millisecond) // win sometime need time to close file
+
+ db2 := mdbx.NewMDBX(logger).Readonly().Path(path).MapSize(16 * datasize.MB).WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg {
+ return kv.TableCfg{
+ kv.Headers: kv.TableCfgItem{},
+ }
+ }).MustOpen()
+ defer db2.Close()
+
+ tx, err := db2.BeginRo(context.Background())
+ require.NoError(t, err)
+ defer tx.Rollback()
+
+ c, err := tx.Cursor(kv.Headers)
+ require.NoError(t, err)
+ defer c.Close()
+ _, _, err = c.Seek([]byte("some prefix"))
+ require.NoError(t, err)
+}
diff --git a/erigon-lib/kv/mdbx/util.go b/erigon-lib/kv/mdbx/util.go
new file mode 100644
index 00000000000..f0a53d60a1f
--- /dev/null
+++ b/erigon-lib/kv/mdbx/util.go
@@ -0,0 +1,48 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package mdbx
+
+import (
+ "context"
+
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/log/v3"
+)
+
+func MustOpen(path string) kv.RwDB {
+ db, err := Open(context.Background(), path, log.New(), false)
+ if err != nil {
+ panic(err)
+ }
+ return db
+}
+
+// Open - main method to open database.
+func Open(ctx context.Context, path string, logger log.Logger, accede bool) (kv.RwDB, error) {
+ var db kv.RwDB
+ var err error
+ opts := NewMDBX(logger).Path(path)
+ if accede {
+ opts = opts.Accede()
+ }
+ db, err = opts.Open(ctx)
+
+ if err != nil {
+ return nil, err
+ }
+ return db, nil
+}
diff --git a/ethdb/olddb/database_test.go b/erigon-lib/kv/membatch/database_test.go
similarity index 84%
rename from ethdb/olddb/database_test.go
rename to erigon-lib/kv/membatch/database_test.go
index 7767f3005af..c2b789162cf 100644
--- a/ethdb/olddb/database_test.go
+++ b/erigon-lib/kv/membatch/database_test.go
@@ -16,7 +16,7 @@
//go:build !js
-package olddb
+package membatch
import (
"bytes"
@@ -29,9 +29,6 @@ import (
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/memdb"
- "github.com/ledgerwatch/erigon/common"
- "github.com/ledgerwatch/erigon/ethdb"
- "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -207,42 +204,3 @@ func TestParallelPutGet(t *testing.T) {
}
pending.Wait()
}
-
-var hexEntries = map[string]string{
- "6b": "89c6",
- "91": "c476",
- "a8": "0a514e",
- "bb": "7a",
- "bd": "fe76",
- "c0": "12",
-}
-
-var startKey = common.FromHex("a0")
-var fixedBits = 3
-
-var keysInRange = [][]byte{common.FromHex("a8"), common.FromHex("bb"), common.FromHex("bd")}
-
-func TestWalk(t *testing.T) {
- _, tx := memdb.NewTestTx(t)
-
- for k, v := range hexEntries {
- err := tx.Put(testBucket, common.FromHex(k), common.FromHex(v))
- if err != nil {
- t.Fatalf("put failed: %v", err)
- }
- }
-
- var gotKeys [][]byte
- c, err := tx.Cursor(testBucket)
- if err != nil {
- panic(err)
- }
- defer c.Close()
- err = ethdb.Walk(c, startKey, fixedBits, func(key, val []byte) (bool, error) {
- gotKeys = append(gotKeys, common.CopyBytes(key))
- return true, nil
- })
- assert.NoError(t, err)
-
- assert.Equal(t, keysInRange, gotKeys)
-}
diff --git a/ethdb/olddb/mapmutation.go b/erigon-lib/kv/membatch/mapmutation.go
similarity index 65%
rename from ethdb/olddb/mapmutation.go
rename to erigon-lib/kv/membatch/mapmutation.go
index 385d201ddd0..994a042f67f 100644
--- a/ethdb/olddb/mapmutation.go
+++ b/erigon-lib/kv/membatch/mapmutation.go
@@ -1,8 +1,9 @@
-package olddb
+package membatch
import (
"context"
"encoding/binary"
+ "errors"
"fmt"
"sync"
"time"
@@ -11,13 +12,11 @@ import (
"github.com/ledgerwatch/erigon-lib/etl"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/log/v3"
-
- "github.com/ledgerwatch/erigon/ethdb"
)
-type mapmutation struct {
+type Mapmutation struct {
puts map[string]map[string][]byte // table -> key -> value ie. blocks -> hash -> blockBod
- db kv.RwTx
+ db kv.Tx
quit <-chan struct{}
clean func()
mu sync.RWMutex
@@ -32,10 +31,10 @@ type mapmutation struct {
// Common pattern:
//
// batch := db.NewBatch()
-// defer batch.Rollback()
+// defer batch.Close()
// ... some calculations on `batch`
// batch.Commit()
-func NewHashBatch(tx kv.RwTx, quit <-chan struct{}, tmpdir string, logger log.Logger) *mapmutation {
+func NewHashBatch(tx kv.Tx, quit <-chan struct{}, tmpdir string, logger log.Logger) *Mapmutation {
clean := func() {}
if quit == nil {
ch := make(chan struct{})
@@ -43,7 +42,7 @@ func NewHashBatch(tx kv.RwTx, quit <-chan struct{}, tmpdir string, logger log.Lo
quit = ch
}
- return &mapmutation{
+ return &Mapmutation{
db: tx,
puts: make(map[string]map[string][]byte),
quit: quit,
@@ -53,14 +52,7 @@ func NewHashBatch(tx kv.RwTx, quit <-chan struct{}, tmpdir string, logger log.Lo
}
}
-func (m *mapmutation) RwKV() kv.RwDB {
- if casted, ok := m.db.(ethdb.HasRwKV); ok {
- return casted.RwKV()
- }
- return nil
-}
-
-func (m *mapmutation) getMem(table string, key []byte) ([]byte, bool) {
+func (m *Mapmutation) getMem(table string, key []byte) ([]byte, bool) {
m.mu.RLock()
defer m.mu.RUnlock()
if _, ok := m.puts[table]; !ok {
@@ -73,7 +65,7 @@ func (m *mapmutation) getMem(table string, key []byte) ([]byte, bool) {
return nil, false
}
-func (m *mapmutation) IncrementSequence(bucket string, amount uint64) (res uint64, err error) {
+func (m *Mapmutation) IncrementSequence(bucket string, amount uint64) (res uint64, err error) {
v, ok := m.getMem(kv.Sequence, []byte(bucket))
if !ok && m.db != nil {
v, err = m.db.GetOne(kv.Sequence, []byte(bucket))
@@ -95,7 +87,7 @@ func (m *mapmutation) IncrementSequence(bucket string, amount uint64) (res uint6
return currentV, nil
}
-func (m *mapmutation) ReadSequence(bucket string) (res uint64, err error) {
+func (m *Mapmutation) ReadSequence(bucket string) (res uint64, err error) {
v, ok := m.getMem(kv.Sequence, []byte(bucket))
if !ok && m.db != nil {
v, err = m.db.GetOne(kv.Sequence, []byte(bucket))
@@ -112,7 +104,7 @@ func (m *mapmutation) ReadSequence(bucket string) (res uint64, err error) {
}
// Can only be called from the worker thread
-func (m *mapmutation) GetOne(table string, key []byte) ([]byte, error) {
+func (m *Mapmutation) GetOne(table string, key []byte) ([]byte, error) {
if value, ok := m.getMem(table, key); ok {
return value, nil
}
@@ -127,21 +119,7 @@ func (m *mapmutation) GetOne(table string, key []byte) ([]byte, error) {
return nil, nil
}
-// Can only be called from the worker thread
-func (m *mapmutation) Get(table string, key []byte) ([]byte, error) {
- value, err := m.GetOne(table, key)
- if err != nil {
- return nil, err
- }
-
- if value == nil {
- return nil, ethdb.ErrKeyNotFound
- }
-
- return value, nil
-}
-
-func (m *mapmutation) Last(table string) ([]byte, []byte, error) {
+func (m *Mapmutation) Last(table string) ([]byte, []byte, error) {
c, err := m.db.Cursor(table)
if err != nil {
return nil, nil, err
@@ -150,7 +128,7 @@ func (m *mapmutation) Last(table string) ([]byte, []byte, error) {
return c.Last()
}
-func (m *mapmutation) Has(table string, key []byte) (bool, error) {
+func (m *Mapmutation) Has(table string, key []byte) (bool, error) {
if _, ok := m.getMem(table, key); ok {
return ok, nil
}
@@ -161,7 +139,7 @@ func (m *mapmutation) Has(table string, key []byte) (bool, error) {
}
// puts a table key with a value and if the table is not found then it appends a table
-func (m *mapmutation) Put(table string, k, v []byte) error {
+func (m *Mapmutation) Put(table string, k, v []byte) error {
m.mu.Lock()
defer m.mu.Unlock()
if _, ok := m.puts[table]; !ok {
@@ -183,40 +161,40 @@ func (m *mapmutation) Put(table string, k, v []byte) error {
return nil
}
-func (m *mapmutation) Append(table string, key []byte, value []byte) error {
+func (m *Mapmutation) Append(table string, key []byte, value []byte) error {
return m.Put(table, key, value)
}
-func (m *mapmutation) AppendDup(table string, key []byte, value []byte) error {
+func (m *Mapmutation) AppendDup(table string, key []byte, value []byte) error {
return m.Put(table, key, value)
}
-func (m *mapmutation) BatchSize() int {
+func (m *Mapmutation) BatchSize() int {
m.mu.RLock()
defer m.mu.RUnlock()
return m.size
}
-func (m *mapmutation) ForEach(bucket string, fromPrefix []byte, walker func(k, v []byte) error) error {
+func (m *Mapmutation) ForEach(bucket string, fromPrefix []byte, walker func(k, v []byte) error) error {
m.panicOnEmptyDB()
return m.db.ForEach(bucket, fromPrefix, walker)
}
-func (m *mapmutation) ForPrefix(bucket string, prefix []byte, walker func(k, v []byte) error) error {
+func (m *Mapmutation) ForPrefix(bucket string, prefix []byte, walker func(k, v []byte) error) error {
m.panicOnEmptyDB()
return m.db.ForPrefix(bucket, prefix, walker)
}
-func (m *mapmutation) ForAmount(bucket string, prefix []byte, amount uint32, walker func(k, v []byte) error) error {
+func (m *Mapmutation) ForAmount(bucket string, prefix []byte, amount uint32, walker func(k, v []byte) error) error {
m.panicOnEmptyDB()
return m.db.ForAmount(bucket, prefix, amount, walker)
}
-func (m *mapmutation) Delete(table string, k []byte) error {
+func (m *Mapmutation) Delete(table string, k []byte) error {
return m.Put(table, k, nil)
}
-func (m *mapmutation) doCommit(tx kv.RwTx) error {
+func (m *Mapmutation) doCommit(tx kv.RwTx) error {
logEvery := time.NewTicker(30 * time.Second)
defer logEvery.Stop()
count := 0
@@ -235,7 +213,7 @@ func (m *mapmutation) doCommit(tx kv.RwTx) error {
tx.CollectMetrics()
}
}
- if err := collector.Load(m.db, table, etl.IdentityLoadFunc, etl.TransformArgs{Quit: m.quit}); err != nil {
+ if err := collector.Load(tx, table, etl.IdentityLoadFunc, etl.TransformArgs{Quit: m.quit}); err != nil {
return err
}
}
@@ -244,51 +222,43 @@ func (m *mapmutation) doCommit(tx kv.RwTx) error {
return nil
}
-func (m *mapmutation) Commit() error {
- if m.db == nil {
- return nil
+func (m *Mapmutation) Flush(ctx context.Context, tx kv.RwTx) error {
+ if tx == nil {
+ return errors.New("rwTx needed")
}
m.mu.Lock()
defer m.mu.Unlock()
- if err := m.doCommit(m.db); err != nil {
+ if err := m.doCommit(tx); err != nil {
return err
}
m.puts = map[string]map[string][]byte{}
m.size = 0
m.count = 0
- m.clean()
return nil
}
-func (m *mapmutation) Rollback() {
+func (m *Mapmutation) Close() {
+ if m.clean == nil {
+ return
+ }
+
m.mu.Lock()
defer m.mu.Unlock()
m.puts = map[string]map[string][]byte{}
m.size = 0
m.count = 0
m.size = 0
- m.clean()
-}
-func (m *mapmutation) Close() {
- m.Rollback()
-}
+ m.clean()
+ m.clean = nil
-func (m *mapmutation) Begin(ctx context.Context, flags ethdb.TxFlags) (ethdb.DbWithPendingMutations, error) {
- panic("mutation can't start transaction, because doesn't own it")
}
+func (m *Mapmutation) Commit() error { panic("not db txn, use .Flush method") }
+func (m *Mapmutation) Rollback() { panic("not db txn, use .Close method") }
-func (m *mapmutation) panicOnEmptyDB() {
+func (m *Mapmutation) panicOnEmptyDB() {
if m.db == nil {
panic("Not implemented")
}
}
-
-func (m *mapmutation) SetRwKV(kv kv.RwDB) {
- hasRwKV, ok := m.db.(ethdb.HasRwKV)
- if !ok {
- log.Warn("Failed to convert mapmutation type to HasRwKV interface")
- }
- hasRwKV.SetRwKV(kv)
-}
diff --git a/erigon-lib/kv/membatch/mapmutation_test.go b/erigon-lib/kv/membatch/mapmutation_test.go
new file mode 100644
index 00000000000..a658c834fa7
--- /dev/null
+++ b/erigon-lib/kv/membatch/mapmutation_test.go
@@ -0,0 +1,33 @@
+package membatch
+
+import (
+ "context"
+ "os"
+ "testing"
+
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/memdb"
+ "github.com/ledgerwatch/log/v3"
+ "github.com/stretchr/testify/require"
+)
+
+func TestMapmutation_Flush_Close(t *testing.T) {
+ db := memdb.NewTestDB(t)
+
+ tx, err := db.BeginRw(context.Background())
+ require.NoError(t, err)
+ defer tx.Rollback()
+
+ batch := NewHashBatch(tx, nil, os.TempDir(), log.New())
+ defer func() {
+ batch.Close()
+ }()
+ err = batch.Put(kv.ChaindataTables[0], []byte{1}, []byte{1})
+ require.NoError(t, err)
+ err = batch.Put(kv.ChaindataTables[0], []byte{2}, []byte{2})
+ require.NoError(t, err)
+ err = batch.Flush(context.Background(), tx)
+ require.NoError(t, err)
+ batch.Close()
+ batch.Close()
+}
diff --git a/erigon-lib/kv/membatchwithdb/memory_mutation.go b/erigon-lib/kv/membatchwithdb/memory_mutation.go
new file mode 100644
index 00000000000..c00f93cb2c6
--- /dev/null
+++ b/erigon-lib/kv/membatchwithdb/memory_mutation.go
@@ -0,0 +1,513 @@
+/*
+ Copyright 2022 Erigon contributors
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package membatchwithdb
+
+import (
+ "bytes"
+ "context"
+ "unsafe"
+
+ "github.com/c2h5oh/datasize"
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/kv/iter"
+ "github.com/ledgerwatch/erigon-lib/kv/order"
+ "github.com/ledgerwatch/log/v3"
+
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/mdbx"
+)
+
+type MemoryMutation struct {
+ memTx kv.RwTx
+ memDb kv.RwDB
+ deletedEntries map[string]map[string]struct{}
+ clearedTables map[string]struct{}
+ db kv.Tx
+ statelessCursors map[string]kv.RwCursor
+}
+
+// NewMemoryBatch - starts in-mem batch
+//
+// Common pattern:
+//
+// batch := NewMemoryBatch(db, tmpDir)
+// defer batch.Close()
+// ... some calculations on `batch`
+// batch.Commit()
+func NewMemoryBatch(tx kv.Tx, tmpDir string) *MemoryMutation {
+ tmpDB := mdbx.NewMDBX(log.New()).InMem(tmpDir).GrowthStep(64 * datasize.MB).MapSize(512 * datasize.GB).MustOpen()
+ memTx, err := tmpDB.BeginRw(context.Background())
+ if err != nil {
+ panic(err)
+ }
+ if err := initSequences(tx, memTx); err != nil {
+ return nil
+ }
+
+ return &MemoryMutation{
+ db: tx,
+ memDb: tmpDB,
+ memTx: memTx,
+ deletedEntries: make(map[string]map[string]struct{}),
+ clearedTables: make(map[string]struct{}),
+ }
+}
+
+func NewMemoryBatchWithCustomDB(tx kv.Tx, db kv.RwDB, uTx kv.RwTx, tmpDir string) *MemoryMutation {
+ return &MemoryMutation{
+ db: tx,
+ memDb: db,
+ memTx: uTx,
+ deletedEntries: make(map[string]map[string]struct{}),
+ clearedTables: make(map[string]struct{}),
+ }
+}
+
+func (m *MemoryMutation) UpdateTxn(tx kv.Tx) {
+ m.db = tx
+ m.statelessCursors = nil
+}
+
+func (m *MemoryMutation) isTableCleared(table string) bool {
+ _, ok := m.clearedTables[table]
+ return ok
+}
+
+func (m *MemoryMutation) isEntryDeleted(table string, key []byte) bool {
+ _, ok := m.deletedEntries[table]
+ if !ok {
+ return ok
+ }
+ _, ok = m.deletedEntries[table][string(key)]
+ return ok
+}
+
+func (m *MemoryMutation) DBSize() (uint64, error) {
+ panic("not implemented")
+}
+
+func initSequences(db kv.Tx, memTx kv.RwTx) error {
+ cursor, err := db.Cursor(kv.Sequence)
+ if err != nil {
+ return err
+ }
+ for k, v, err := cursor.First(); k != nil; k, v, err = cursor.Next() {
+ if err != nil {
+ return err
+ }
+ if err := memTx.Put(kv.Sequence, k, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (m *MemoryMutation) IncrementSequence(bucket string, amount uint64) (uint64, error) {
+ return m.memTx.IncrementSequence(bucket, amount)
+}
+
+func (m *MemoryMutation) ReadSequence(bucket string) (uint64, error) {
+ return m.memTx.ReadSequence(bucket)
+}
+
+func (m *MemoryMutation) ForAmount(bucket string, prefix []byte, amount uint32, walker func(k, v []byte) error) error {
+ if amount == 0 {
+ return nil
+ }
+ c, err := m.Cursor(bucket)
+ if err != nil {
+ return err
+ }
+ defer c.Close()
+
+ for k, v, err := c.Seek(prefix); k != nil && amount > 0; k, v, err = c.Next() {
+ if err != nil {
+ return err
+ }
+ if err := walker(k, v); err != nil {
+ return err
+ }
+ amount--
+ }
+ return nil
+}
+
+func (m *MemoryMutation) statelessCursor(table string) (kv.RwCursor, error) {
+ if m.statelessCursors == nil {
+ m.statelessCursors = make(map[string]kv.RwCursor)
+ }
+ c, ok := m.statelessCursors[table]
+ if !ok {
+ var err error
+ c, err = m.RwCursor(table)
+ if err != nil {
+ return nil, err
+ }
+ m.statelessCursors[table] = c
+ }
+ return c, nil
+}
+
+// Can only be called from the worker thread
+func (m *MemoryMutation) GetOne(table string, key []byte) ([]byte, error) {
+ c, err := m.statelessCursor(table)
+ if err != nil {
+ return nil, err
+ }
+ _, v, err := c.SeekExact(key)
+ return v, err
+}
+
+func (m *MemoryMutation) Last(table string) ([]byte, []byte, error) {
+ panic("not implemented. (MemoryMutation.Last)")
+}
+
+// Has return whether a key is present in a certain table.
+func (m *MemoryMutation) Has(table string, key []byte) (bool, error) {
+ c, err := m.statelessCursor(table)
+ if err != nil {
+ return false, err
+ }
+ k, _, err := c.Seek(key)
+ if err != nil {
+ return false, err
+ }
+ return bytes.Equal(key, k), nil
+}
+
+func (m *MemoryMutation) Put(table string, k, v []byte) error {
+ return m.memTx.Put(table, k, v)
+}
+
+func (m *MemoryMutation) Append(table string, key []byte, value []byte) error {
+ return m.memTx.Append(table, key, value)
+}
+
+func (m *MemoryMutation) AppendDup(table string, key []byte, value []byte) error {
+ c, err := m.statelessCursor(table)
+ if err != nil {
+ return err
+ }
+ return c.(*memoryMutationCursor).AppendDup(key, value)
+}
+
+func (m *MemoryMutation) ForEach(bucket string, fromPrefix []byte, walker func(k, v []byte) error) error {
+ c, err := m.Cursor(bucket)
+ if err != nil {
+ return err
+ }
+ defer c.Close()
+
+ for k, v, err := c.Seek(fromPrefix); k != nil; k, v, err = c.Next() {
+ if err != nil {
+ return err
+ }
+ if err := walker(k, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (m *MemoryMutation) Prefix(table string, prefix []byte) (iter.KV, error) {
+ nextPrefix, ok := kv.NextSubtree(prefix)
+ if !ok {
+ return m.Stream(table, prefix, nil)
+ }
+ return m.Stream(table, prefix, nextPrefix)
+}
+func (m *MemoryMutation) Stream(table string, fromPrefix, toPrefix []byte) (iter.KV, error) {
+ panic("please implement me")
+}
+func (m *MemoryMutation) StreamAscend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error) {
+ panic("please implement me")
+}
+func (m *MemoryMutation) StreamDescend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error) {
+ panic("please implement me")
+}
+func (m *MemoryMutation) Range(table string, fromPrefix, toPrefix []byte) (iter.KV, error) {
+ panic("please implement me")
+}
+func (m *MemoryMutation) RangeAscend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error) {
+ panic("please implement me")
+}
+func (m *MemoryMutation) RangeDescend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error) {
+ panic("please implement me")
+}
+func (m *MemoryMutation) RangeDupSort(table string, key []byte, fromPrefix, toPrefix []byte, asc order.By, limit int) (iter.KV, error) {
+ panic("please implement me")
+}
+
+func (m *MemoryMutation) ForPrefix(bucket string, prefix []byte, walker func(k, v []byte) error) error {
+ c, err := m.Cursor(bucket)
+ if err != nil {
+ return err
+ }
+ defer c.Close()
+
+ for k, v, err := c.Seek(prefix); k != nil; k, v, err = c.Next() {
+ if err != nil {
+ return err
+ }
+ if !bytes.HasPrefix(k, prefix) {
+ break
+ }
+ if err := walker(k, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (m *MemoryMutation) Delete(table string, k []byte) error {
+ if _, ok := m.deletedEntries[table]; !ok {
+ m.deletedEntries[table] = make(map[string]struct{})
+ }
+ m.deletedEntries[table][string(k)] = struct{}{}
+ return m.memTx.Delete(table, k)
+}
+
+func (m *MemoryMutation) Commit() error {
+ m.statelessCursors = nil
+ return nil
+}
+
+func (m *MemoryMutation) Rollback() {
+ m.memTx.Rollback()
+ m.memDb.Close()
+ m.statelessCursors = nil
+}
+
+func (m *MemoryMutation) Close() {
+ m.Rollback()
+}
+
+func (m *MemoryMutation) BucketSize(bucket string) (uint64, error) {
+ return m.memTx.BucketSize(bucket)
+}
+
+func (m *MemoryMutation) DropBucket(bucket string) error {
+ panic("Not implemented")
+}
+
+func (m *MemoryMutation) ExistsBucket(bucket string) (bool, error) {
+ panic("Not implemented")
+}
+
+func (m *MemoryMutation) ListBuckets() ([]string, error) {
+ panic("Not implemented")
+}
+
+func (m *MemoryMutation) ClearBucket(bucket string) error {
+ m.clearedTables[bucket] = struct{}{}
+ return m.memTx.ClearBucket(bucket)
+}
+
+func (m *MemoryMutation) CollectMetrics() {
+}
+
+func (m *MemoryMutation) CreateBucket(bucket string) error {
+ return m.memTx.CreateBucket(bucket)
+}
+
+func (m *MemoryMutation) Flush(tx kv.RwTx) error {
+ // Obtain buckets touched.
+ buckets, err := m.memTx.ListBuckets()
+ if err != nil {
+ return err
+ }
+ // Obliterate buckets who are to be deleted
+ for bucket := range m.clearedTables {
+ if err := tx.ClearBucket(bucket); err != nil {
+ return err
+ }
+ }
+ // Obliterate entries who are to be deleted
+ for bucket, keys := range m.deletedEntries {
+ for key := range keys {
+ if err := tx.Delete(bucket, []byte(key)); err != nil {
+ return err
+ }
+ }
+ }
+ // Iterate over each bucket and apply changes accordingly.
+ for _, bucket := range buckets {
+ if isTablePurelyDupsort(bucket) {
+ cbucket, err := m.memTx.CursorDupSort(bucket)
+ if err != nil {
+ return err
+ }
+ defer cbucket.Close()
+ dbCursor, err := tx.RwCursorDupSort(bucket)
+ if err != nil {
+ return err
+ }
+ defer dbCursor.Close()
+ for k, v, err := cbucket.First(); k != nil; k, v, err = cbucket.Next() {
+ if err != nil {
+ return err
+ }
+ if err := dbCursor.Put(k, v); err != nil {
+ return err
+ }
+ }
+ } else {
+ cbucket, err := m.memTx.Cursor(bucket)
+ if err != nil {
+ return err
+ }
+ defer cbucket.Close()
+ for k, v, err := cbucket.First(); k != nil; k, v, err = cbucket.Next() {
+ if err != nil {
+ return err
+ }
+ if err := tx.Put(bucket, k, v); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (m *MemoryMutation) Diff() (*MemoryDiff, error) {
+ memDiff := &MemoryDiff{
+ diff: make(map[table][]entry),
+ deletedEntries: make(map[string][]string),
+ }
+ // Obtain buckets touched.
+ buckets, err := m.memTx.ListBuckets()
+ if err != nil {
+ return nil, err
+ }
+ // Obliterate buckets who are to be deleted
+ for bucket := range m.clearedTables {
+ memDiff.clearedTableNames = append(memDiff.clearedTableNames, bucket)
+ }
+ // Obliterate entries who are to be deleted
+ for bucket, keys := range m.deletedEntries {
+ for key := range keys {
+ memDiff.deletedEntries[bucket] = append(memDiff.deletedEntries[bucket], key)
+ }
+ }
+ // Iterate over each bucket and apply changes accordingly.
+ for _, bucket := range buckets {
+ if isTablePurelyDupsort(bucket) {
+ cbucket, err := m.memTx.CursorDupSort(bucket)
+ if err != nil {
+ return nil, err
+ }
+ defer cbucket.Close()
+
+ t := table{
+ name: bucket,
+ dupsort: true,
+ }
+ for k, v, err := cbucket.First(); k != nil; k, v, err = cbucket.Next() {
+ if err != nil {
+ return nil, err
+ }
+ memDiff.diff[t] = append(memDiff.diff[t], entry{
+ k: common.Copy(k),
+ v: common.Copy(v),
+ })
+ }
+ } else {
+ cbucket, err := m.memTx.Cursor(bucket)
+ if err != nil {
+ return nil, err
+ }
+ defer cbucket.Close()
+ t := table{
+ name: bucket,
+ dupsort: false,
+ }
+ for k, v, err := cbucket.First(); k != nil; k, v, err = cbucket.Next() {
+ if err != nil {
+ return nil, err
+ }
+ memDiff.diff[t] = append(memDiff.diff[t], entry{
+ k: common.Copy(k),
+ v: common.Copy(v),
+ })
+ }
+ }
+ }
+ return memDiff, nil
+}
+
+// Check if a bucket is dupsorted and has dupsort conversion off
+func isTablePurelyDupsort(bucket string) bool {
+ config, ok := kv.ChaindataTablesCfg[bucket]
+ // If we do not have the configuration we assume it is not dupsorted
+ if !ok {
+ return false
+ }
+ return !config.AutoDupSortKeysConversion && config.Flags == kv.DupSort
+}
+
+func (m *MemoryMutation) MemDB() kv.RwDB {
+ return m.memDb
+}
+
+func (m *MemoryMutation) MemTx() kv.RwTx {
+ return m.memTx
+}
+
+// Cursor creates a new cursor (the real fun begins here)
+func (m *MemoryMutation) makeCursor(bucket string) (kv.RwCursorDupSort, error) {
+ c := &memoryMutationCursor{}
+ // We can filter duplicates in dup sorted table
+ c.table = bucket
+
+ var err error
+ c.cursor, err = m.db.CursorDupSort(bucket)
+ if err != nil {
+ return nil, err
+ }
+ c.memCursor, err = m.memTx.RwCursorDupSort(bucket)
+ if err != nil {
+ return nil, err
+ }
+ c.mutation = m
+ return c, err
+}
+
+// Cursor creates a new cursor (the real fun begins here)
+func (m *MemoryMutation) RwCursorDupSort(bucket string) (kv.RwCursorDupSort, error) {
+ return m.makeCursor(bucket)
+}
+
+// Cursor creates a new cursor (the real fun begins here)
+func (m *MemoryMutation) RwCursor(bucket string) (kv.RwCursor, error) {
+ return m.makeCursor(bucket)
+}
+
+// Cursor creates a new cursor (the real fun begins here)
+func (m *MemoryMutation) CursorDupSort(bucket string) (kv.CursorDupSort, error) {
+ return m.makeCursor(bucket)
+}
+
+// Cursor creates a new cursor (the real fun begins here)
+func (m *MemoryMutation) Cursor(bucket string) (kv.Cursor, error) {
+ return m.makeCursor(bucket)
+}
+
+func (m *MemoryMutation) ViewID() uint64 {
+ panic("ViewID Not implemented")
+}
+
+func (m *MemoryMutation) CHandle() unsafe.Pointer {
+ panic("CHandle not implemented")
+}
diff --git a/erigon-lib/kv/membatchwithdb/memory_mutation_cursor.go b/erigon-lib/kv/membatchwithdb/memory_mutation_cursor.go
new file mode 100644
index 00000000000..c21b9e4015b
--- /dev/null
+++ b/erigon-lib/kv/membatchwithdb/memory_mutation_cursor.go
@@ -0,0 +1,506 @@
+/*
+ Copyright 2022 Erigon contributors
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package membatchwithdb
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/kv"
+)
+
+type NextType int
+
+const (
+ Normal NextType = iota
+ Dup
+ NoDup
+)
+
+// entry for the cursor
+type cursorEntry struct {
+ key []byte
+ value []byte
+}
+
+// cursor
+type memoryMutationCursor struct {
+ // entry history
+ cursor kv.CursorDupSort
+ memCursor kv.RwCursorDupSort
+ // we keep the mining mutation so that we can insert new elements in db
+ mutation *MemoryMutation
+ table string
+ currentPair cursorEntry
+ currentDbEntry cursorEntry
+ currentMemEntry cursorEntry
+ isPrevFromDb bool
+}
+
+func (m *memoryMutationCursor) isTableCleared() bool {
+ return m.mutation.isTableCleared(m.table)
+}
+
+func (m *memoryMutationCursor) isEntryDeleted(key []byte, value []byte, t NextType) bool {
+ if t == Normal {
+ return m.mutation.isEntryDeleted(m.table, key)
+ } else {
+ return m.mutation.isEntryDeleted(m.table, m.convertAutoDupsort(key, value))
+ }
+}
+
+// First move cursor to first position and return key and value accordingly.
+func (m *memoryMutationCursor) First() ([]byte, []byte, error) {
+ memKey, memValue, err := m.memCursor.First()
+ if err != nil || m.isTableCleared() {
+ return memKey, memValue, err
+ }
+
+ dbKey, dbValue, err := m.cursor.First()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if dbKey != nil && m.isEntryDeleted(dbKey, dbValue, Normal) {
+ if dbKey, dbValue, err = m.getNextOnDb(Normal); err != nil {
+ return nil, nil, err
+ }
+ }
+
+ return m.resolveCursorPriority(memKey, memValue, dbKey, dbValue, Normal)
+}
+
+func (m *memoryMutationCursor) getNextOnDb(t NextType) (key []byte, value []byte, err error) {
+ switch t {
+ case Normal:
+ key, value, err = m.cursor.Next()
+ if err != nil {
+ return
+ }
+ case Dup:
+ key, value, err = m.cursor.NextDup()
+ if err != nil {
+ return
+ }
+ case NoDup:
+ key, value, err = m.cursor.NextNoDup()
+ if err != nil {
+ return
+ }
+ default:
+ err = fmt.Errorf("invalid next type")
+ return
+ }
+
+ for key != nil && value != nil && m.isEntryDeleted(key, value, t) {
+ switch t {
+ case Normal:
+ key, value, err = m.cursor.Next()
+ if err != nil {
+ return
+ }
+ case Dup:
+ key, value, err = m.cursor.NextDup()
+ if err != nil {
+ return
+ }
+ case NoDup:
+ key, value, err = m.cursor.NextNoDup()
+ if err != nil {
+ return
+ }
+ default:
+ err = fmt.Errorf("invalid next type")
+ return
+ }
+ }
+ return
+}
+
+func (m *memoryMutationCursor) convertAutoDupsort(key []byte, value []byte) []byte {
+ config, ok := kv.ChaindataTablesCfg[m.table]
+ // If we do not have the configuration we assume it is not dupsorted
+ if !ok || !config.AutoDupSortKeysConversion {
+ return key
+ }
+ if len(key) != config.DupToLen {
+ return key
+ }
+ return append(key, value[:config.DupFromLen-config.DupToLen]...)
+}
+
+// Current return the current key and values the cursor is on.
+func (m *memoryMutationCursor) Current() ([]byte, []byte, error) {
+ if m.isTableCleared() {
+ return m.memCursor.Current()
+ }
+ return common.Copy(m.currentPair.key), common.Copy(m.currentPair.value), nil
+}
+
+func (m *memoryMutationCursor) skipIntersection(memKey, memValue, dbKey, dbValue []byte, t NextType) (newDbKey []byte, newDbValue []byte, err error) {
+ newDbKey = dbKey
+ newDbValue = dbValue
+ config, ok := kv.ChaindataTablesCfg[m.table]
+ dupSortTable := ok && ((config.Flags & kv.DupSort) != 0)
+ autoKeyConversion := ok && config.AutoDupSortKeysConversion
+ dupsortOffset := 0
+ if autoKeyConversion {
+ dupsortOffset = config.DupFromLen - config.DupToLen
+ }
+ // Check for duplicates
+ if bytes.Equal(memKey, dbKey) {
+ var skip bool
+ if t == Normal {
+ skip = !dupSortTable || autoKeyConversion || bytes.Equal(memValue, dbValue)
+ } else {
+ skip = bytes.Equal(memValue, dbValue) ||
+ (dupsortOffset != 0 && len(memValue) >= dupsortOffset && len(dbValue) >= dupsortOffset && bytes.Equal(memValue[:dupsortOffset], dbValue[:dupsortOffset]))
+ }
+ if skip {
+ if newDbKey, newDbValue, err = m.getNextOnDb(t); err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+func (m *memoryMutationCursor) resolveCursorPriority(memKey, memValue, dbKey, dbValue []byte, t NextType) ([]byte, []byte, error) {
+ if memValue == nil && dbValue == nil {
+ return nil, nil, nil
+ }
+
+ var err error
+ dbKey, dbValue, err = m.skipIntersection(memKey, memValue, dbKey, dbValue, t)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ m.currentDbEntry = cursorEntry{dbKey, dbValue}
+ m.currentMemEntry = cursorEntry{memKey, memValue}
+ // compare entries
+ if bytes.Equal(memKey, dbKey) {
+ m.isPrevFromDb = dbValue != nil && (memValue == nil || bytes.Compare(memValue, dbValue) > 0)
+ } else {
+ m.isPrevFromDb = dbValue != nil && (memKey == nil || bytes.Compare(memKey, dbKey) > 0)
+ }
+ if dbValue == nil {
+ m.currentDbEntry = cursorEntry{}
+ }
+ if memValue == nil {
+ m.currentMemEntry = cursorEntry{}
+ }
+ if m.isPrevFromDb {
+ m.currentPair = cursorEntry{dbKey, dbValue}
+ return dbKey, dbValue, nil
+ }
+
+ m.currentPair = cursorEntry{memKey, memValue}
+ return memKey, memValue, nil
+}
+
+// Next returns the next element of the mutation.
+func (m *memoryMutationCursor) Next() ([]byte, []byte, error) {
+ if m.isTableCleared() {
+ return m.memCursor.Next()
+ }
+
+ if m.isPrevFromDb {
+ k, v, err := m.getNextOnDb(Normal)
+ if err != nil {
+ return nil, nil, err
+ }
+ return m.resolveCursorPriority(m.currentMemEntry.key, m.currentMemEntry.value, k, v, Normal)
+ }
+
+ memK, memV, err := m.memCursor.Next()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return m.resolveCursorPriority(memK, memV, m.currentDbEntry.key, m.currentDbEntry.value, Normal)
+}
+
+// NextDup returns the next element of the mutation.
+func (m *memoryMutationCursor) NextDup() ([]byte, []byte, error) {
+ if m.isTableCleared() {
+ return m.memCursor.NextDup()
+ }
+
+ if m.isPrevFromDb {
+ k, v, err := m.getNextOnDb(Dup)
+
+ if err != nil {
+ return nil, nil, err
+ }
+ return m.resolveCursorPriority(m.currentMemEntry.key, m.currentMemEntry.value, k, v, Dup)
+ }
+
+ memK, memV, err := m.memCursor.NextDup()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return m.resolveCursorPriority(memK, memV, m.currentDbEntry.key, m.currentDbEntry.value, Dup)
+}
+
+// Seek move pointer to a key at a certain position.
+func (m *memoryMutationCursor) Seek(seek []byte) ([]byte, []byte, error) {
+ if m.isTableCleared() {
+ return m.memCursor.Seek(seek)
+ }
+
+ dbKey, dbValue, err := m.cursor.Seek(seek)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // If the entry is marked as deleted find one that is not
+ if dbKey != nil && m.isEntryDeleted(dbKey, dbValue, Normal) {
+ dbKey, dbValue, err = m.getNextOnDb(Normal)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ memKey, memValue, err := m.memCursor.Seek(seek)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return m.resolveCursorPriority(memKey, memValue, dbKey, dbValue, Normal)
+}
+
+// Seek move pointer to a key at a certain position.
+func (m *memoryMutationCursor) SeekExact(seek []byte) ([]byte, []byte, error) {
+ memKey, memValue, err := m.memCursor.SeekExact(seek)
+ if err != nil || m.isTableCleared() {
+ return memKey, memValue, err
+ }
+
+ if memKey != nil {
+ m.currentMemEntry.key = memKey
+ m.currentMemEntry.value = memValue
+ m.currentDbEntry.key, m.currentDbEntry.value, err = m.cursor.Seek(seek)
+ m.isPrevFromDb = false
+ m.currentPair = cursorEntry{memKey, memValue}
+ return memKey, memValue, err
+ }
+
+ dbKey, dbValue, err := m.cursor.SeekExact(seek)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if dbKey != nil && !m.mutation.isEntryDeleted(m.table, seek) {
+ m.currentDbEntry.key = dbKey
+ m.currentDbEntry.value = dbValue
+ m.currentMemEntry.key, m.currentMemEntry.value, err = m.memCursor.Seek(seek)
+ m.isPrevFromDb = true
+ m.currentPair = cursorEntry{dbKey, dbValue}
+ return dbKey, dbValue, err
+ }
+ return nil, nil, nil
+}
+
+func (m *memoryMutationCursor) Put(k, v []byte) error {
+ return m.mutation.Put(m.table, common.Copy(k), common.Copy(v))
+}
+
+func (m *memoryMutationCursor) Append(k []byte, v []byte) error {
+ return m.mutation.Append(m.table, common.Copy(k), common.Copy(v))
+
+}
+
+func (m *memoryMutationCursor) AppendDup(k []byte, v []byte) error {
+ return m.memCursor.AppendDup(common.Copy(k), common.Copy(v))
+}
+
+func (m *memoryMutationCursor) PutNoDupData(key, value []byte) error {
+ panic("Not implemented")
+}
+
+func (m *memoryMutationCursor) Delete(k []byte) error {
+ return m.mutation.Delete(m.table, k)
+}
+
+func (m *memoryMutationCursor) DeleteCurrent() error {
+ panic("DeleteCurrent Not implemented")
+}
+func (m *memoryMutationCursor) DeleteExact(_, _ []byte) error {
+ panic("DeleteExact Not implemented")
+}
+
+func (m *memoryMutationCursor) DeleteCurrentDuplicates() error {
+ config, ok := kv.ChaindataTablesCfg[m.table]
+ autoKeyConversion := ok && config.AutoDupSortKeysConversion
+ if autoKeyConversion {
+ panic("DeleteCurrentDuplicates Not implemented for AutoDupSortKeysConversion tables")
+ }
+
+ k, _, err := m.Current()
+ if err != nil {
+ return err
+ }
+ if k != nil {
+ return m.Delete(k)
+ }
+ return nil
+}
+
+// Seek move pointer to a key at a certain position.
+func (m *memoryMutationCursor) SeekBothRange(key, value []byte) ([]byte, error) {
+ if m.isTableCleared() {
+ return m.memCursor.SeekBothRange(key, value)
+ }
+
+ dbValue, err := m.cursor.SeekBothRange(key, value)
+ if err != nil {
+ return nil, err
+ }
+
+ if dbValue != nil && m.isEntryDeleted(key, dbValue, Dup) {
+ _, dbValue, err = m.getNextOnDb(Dup)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ memValue, err := m.memCursor.SeekBothRange(key, value)
+ if err != nil {
+ return nil, err
+ }
+ _, retValue, err := m.resolveCursorPriority(key, memValue, key, dbValue, Dup)
+ return retValue, err
+}
+
+func (m *memoryMutationCursor) Last() ([]byte, []byte, error) {
+ memKey, memValue, err := m.memCursor.Last()
+ if err != nil || m.isTableCleared() {
+ return memKey, memValue, err
+ }
+
+ dbKey, dbValue, err := m.cursor.Last()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ dbKey, dbValue, err = m.skipIntersection(memKey, memValue, dbKey, dbValue, Normal)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ m.currentDbEntry = cursorEntry{dbKey, dbValue}
+ m.currentMemEntry = cursorEntry{memKey, memValue}
+
+ // Basic checks
+ if dbKey != nil && m.isEntryDeleted(dbKey, dbValue, Normal) {
+ m.currentDbEntry = cursorEntry{}
+ m.isPrevFromDb = false
+ return memKey, memValue, nil
+ }
+
+ if dbValue == nil {
+ m.isPrevFromDb = false
+ return memKey, memValue, nil
+ }
+
+ if memValue == nil {
+ m.isPrevFromDb = true
+ return dbKey, dbValue, nil
+ }
+ // Check which one is last and return it
+ keyCompare := bytes.Compare(memKey, dbKey)
+ if keyCompare == 0 {
+ if bytes.Compare(memValue, dbValue) > 0 {
+ m.currentDbEntry = cursorEntry{}
+ m.isPrevFromDb = false
+ return memKey, memValue, nil
+ }
+ m.currentMemEntry = cursorEntry{}
+ m.isPrevFromDb = true
+ return dbKey, dbValue, nil
+ }
+
+ if keyCompare > 0 {
+ m.currentDbEntry = cursorEntry{}
+ m.isPrevFromDb = false
+ return memKey, memValue, nil
+ }
+
+ m.currentMemEntry = cursorEntry{}
+ m.isPrevFromDb = true
+ return dbKey, dbValue, nil
+}
+
+func (m *memoryMutationCursor) Prev() ([]byte, []byte, error) {
+ panic("Prev is not implemented!")
+}
+func (m *memoryMutationCursor) PrevDup() ([]byte, []byte, error) {
+ panic("Prev is not implemented!")
+}
+func (m *memoryMutationCursor) PrevNoDup() ([]byte, []byte, error) {
+ panic("Prev is not implemented!")
+}
+
+func (m *memoryMutationCursor) Close() {
+ if m.cursor != nil {
+ m.cursor.Close()
+ }
+ if m.memCursor != nil {
+ m.memCursor.Close()
+ }
+}
+
+func (m *memoryMutationCursor) Count() (uint64, error) {
+ panic("Not implemented")
+}
+
+func (m *memoryMutationCursor) FirstDup() ([]byte, error) {
+ panic("Not implemented")
+}
+
+func (m *memoryMutationCursor) NextNoDup() ([]byte, []byte, error) {
+ if m.isTableCleared() {
+ return m.memCursor.NextNoDup()
+ }
+
+ if m.isPrevFromDb {
+ k, v, err := m.getNextOnDb(NoDup)
+ if err != nil {
+ return nil, nil, err
+ }
+ return m.resolveCursorPriority(m.currentMemEntry.key, m.currentMemEntry.value, k, v, NoDup)
+ }
+
+ memK, memV, err := m.memCursor.NextNoDup()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return m.resolveCursorPriority(memK, memV, m.currentDbEntry.key, m.currentDbEntry.value, NoDup)
+}
+
+func (m *memoryMutationCursor) LastDup() ([]byte, error) {
+ panic("Not implemented")
+}
+
+func (m *memoryMutationCursor) CountDuplicates() (uint64, error) {
+ panic("Not implemented")
+}
+
+func (m *memoryMutationCursor) SeekBothExact(key, value []byte) ([]byte, []byte, error) {
+ panic("SeekBothExact Not implemented")
+}
diff --git a/erigon-lib/kv/membatchwithdb/memory_mutation_diff.go b/erigon-lib/kv/membatchwithdb/memory_mutation_diff.go
new file mode 100644
index 00000000000..ed8b12fdb73
--- /dev/null
+++ b/erigon-lib/kv/membatchwithdb/memory_mutation_diff.go
@@ -0,0 +1,58 @@
+package membatchwithdb
+
+import "github.com/ledgerwatch/erigon-lib/kv"
+
+type entry struct {
+ k []byte
+ v []byte
+}
+
+type MemoryDiff struct {
+ diff map[table][]entry // god.
+ deletedEntries map[string][]string
+ clearedTableNames []string
+}
+
+type table struct {
+ name string
+ dupsort bool
+}
+
+func (m *MemoryDiff) Flush(tx kv.RwTx) error {
+ // Obliterate buckets who are to be deleted
+ for _, bucket := range m.clearedTableNames {
+ if err := tx.ClearBucket(bucket); err != nil {
+ return err
+ }
+ }
+ // Obliterate entries who are to be deleted
+ for bucket, keys := range m.deletedEntries {
+ for _, key := range keys {
+ if err := tx.Delete(bucket, []byte(key)); err != nil {
+ return err
+ }
+ }
+ }
+ // Iterate over each bucket and apply changes accordingly.
+ for bucketInfo, bucketDiff := range m.diff {
+ if bucketInfo.dupsort {
+ dbCursor, err := tx.RwCursorDupSort(bucketInfo.name)
+ if err != nil {
+ return err
+ }
+ defer dbCursor.Close()
+ for _, entry := range bucketDiff {
+ if err := dbCursor.Put(entry.k, entry.v); err != nil {
+ return err
+ }
+ }
+ } else {
+ for _, entry := range bucketDiff {
+ if err := tx.Put(bucketInfo.name, entry.k, entry.v); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
diff --git a/erigon-lib/kv/membatchwithdb/memory_mutation_test.go b/erigon-lib/kv/membatchwithdb/memory_mutation_test.go
new file mode 100644
index 00000000000..6bbc7d00da6
--- /dev/null
+++ b/erigon-lib/kv/membatchwithdb/memory_mutation_test.go
@@ -0,0 +1,667 @@
+/*
+ Copyright 2022 Erigon contributors
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package membatchwithdb
+
+import (
+ "testing"
+
+ "github.com/ledgerwatch/erigon-lib/kv/memdb"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/ledgerwatch/erigon-lib/kv"
+)
+
+func initializeDbNonDupSort(rwTx kv.RwTx) {
+ rwTx.Put(kv.HashedAccounts, []byte("AAAA"), []byte("value"))
+ rwTx.Put(kv.HashedAccounts, []byte("CAAA"), []byte("value1"))
+ rwTx.Put(kv.HashedAccounts, []byte("CBAA"), []byte("value2"))
+ rwTx.Put(kv.HashedAccounts, []byte("CCAA"), []byte("value3"))
+}
+
+func TestPutAppendHas(t *testing.T) {
+ _, rwTx := memdb.NewTestTx(t)
+
+ initializeDbNonDupSort(rwTx)
+
+ batch := NewMemoryBatch(rwTx, "")
+ require.NoError(t, batch.Append(kv.HashedAccounts, []byte("AAAA"), []byte("value1.5")))
+ require.Error(t, batch.Append(kv.HashedAccounts, []byte("AAAA"), []byte("value1.3")))
+ require.NoError(t, batch.Put(kv.HashedAccounts, []byte("AAAA"), []byte("value1.3")))
+ require.NoError(t, batch.Append(kv.HashedAccounts, []byte("CBAA"), []byte("value3.5")))
+ require.Error(t, batch.Append(kv.HashedAccounts, []byte("CBAA"), []byte("value3.1")))
+ require.NoError(t, batch.AppendDup(kv.HashedAccounts, []byte("CBAA"), []byte("value3.1")))
+ require.Error(t, batch.Append(kv.HashedAccounts, []byte("AAAA"), []byte("value1.3")))
+
+ require.Nil(t, batch.Flush(rwTx))
+
+ exist, err := batch.Has(kv.HashedAccounts, []byte("AAAA"))
+ require.Nil(t, err)
+ require.Equal(t, exist, true)
+
+ val, err := batch.GetOne(kv.HashedAccounts, []byte("AAAA"))
+ require.Nil(t, err)
+ require.Equal(t, val, []byte("value1.3"))
+
+ exist, err = batch.Has(kv.HashedAccounts, []byte("KKKK"))
+ require.Nil(t, err)
+ require.Equal(t, exist, false)
+}
+
+func TestLastMiningDB(t *testing.T) {
+ _, rwTx := memdb.NewTestTx(t)
+
+ initializeDbNonDupSort(rwTx)
+
+ batch := NewMemoryBatch(rwTx, "")
+ batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4"))
+ batch.Put(kv.HashedAccounts, []byte("BCAA"), []byte("value5"))
+
+ cursor, err := batch.Cursor(kv.HashedAccounts)
+ require.NoError(t, err)
+
+ key, value, err := cursor.Last()
+ require.NoError(t, err)
+
+ require.Equal(t, key, []byte("CCAA"))
+ require.Equal(t, value, []byte("value3"))
+
+ key, value, err = cursor.Next()
+ require.NoError(t, err)
+ require.Equal(t, key, []byte(nil))
+ require.Equal(t, value, []byte(nil))
+}
+
+func TestLastMiningMem(t *testing.T) {
+ _, rwTx := memdb.NewTestTx(t)
+
+ initializeDbNonDupSort(rwTx)
+
+ batch := NewMemoryBatch(rwTx, "")
+ batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4"))
+ batch.Put(kv.HashedAccounts, []byte("DCAA"), []byte("value5"))
+
+ cursor, err := batch.Cursor(kv.HashedAccounts)
+ require.NoError(t, err)
+
+ key, value, err := cursor.Last()
+ require.NoError(t, err)
+
+ require.Equal(t, key, []byte("DCAA"))
+ require.Equal(t, value, []byte("value5"))
+
+ key, value, err = cursor.Next()
+ require.NoError(t, err)
+ require.Equal(t, key, []byte(nil))
+ require.Equal(t, value, []byte(nil))
+}
+
+func TestDeleteMining(t *testing.T) {
+ _, rwTx := memdb.NewTestTx(t)
+
+ initializeDbNonDupSort(rwTx)
+ batch := NewMemoryBatch(rwTx, "")
+ batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4"))
+ batch.Put(kv.HashedAccounts, []byte("DCAA"), []byte("value5"))
+ batch.Put(kv.HashedAccounts, []byte("FCAA"), []byte("value5"))
+
+ batch.Delete(kv.HashedAccounts, []byte("BAAA"))
+ batch.Delete(kv.HashedAccounts, []byte("CBAA"))
+
+ cursor, err := batch.Cursor(kv.HashedAccounts)
+ require.NoError(t, err)
+
+ key, value, err := cursor.SeekExact([]byte("BAAA"))
+ require.NoError(t, err)
+ require.Equal(t, key, []byte(nil))
+ require.Equal(t, value, []byte(nil))
+
+ key, value, err = cursor.SeekExact([]byte("CBAA"))
+ require.NoError(t, err)
+ require.Equal(t, key, []byte(nil))
+ require.Equal(t, value, []byte(nil))
+}
+
+func TestFlush(t *testing.T) {
+ _, rwTx := memdb.NewTestTx(t)
+
+ initializeDbNonDupSort(rwTx)
+ batch := NewMemoryBatch(rwTx, "")
+ batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4"))
+ batch.Put(kv.HashedAccounts, []byte("AAAA"), []byte("value5"))
+ batch.Put(kv.HashedAccounts, []byte("FCAA"), []byte("value5"))
+
+ require.NoError(t, batch.Flush(rwTx))
+
+ value, err := rwTx.GetOne(kv.HashedAccounts, []byte("BAAA"))
+ require.NoError(t, err)
+ require.Equal(t, value, []byte("value4"))
+
+ value, err = rwTx.GetOne(kv.HashedAccounts, []byte("AAAA"))
+ require.NoError(t, err)
+ require.Equal(t, value, []byte("value5"))
+}
+
+func TestForEach(t *testing.T) {
+ _, rwTx := memdb.NewTestTx(t)
+
+ initializeDbNonDupSort(rwTx)
+
+ batch := NewMemoryBatch(rwTx, "")
+ batch.Put(kv.HashedAccounts, []byte("FCAA"), []byte("value5"))
+ require.NoError(t, batch.Flush(rwTx))
+
+ var keys []string
+ var values []string
+ err := batch.ForEach(kv.HashedAccounts, []byte("XYAZ"), func(k, v []byte) error {
+ keys = append(keys, string(k))
+ values = append(values, string(v))
+ return nil
+ })
+ require.Nil(t, err)
+ require.Nil(t, keys)
+ require.Nil(t, values)
+
+ err = batch.ForEach(kv.HashedAccounts, []byte("CC"), func(k, v []byte) error {
+ keys = append(keys, string(k))
+ values = append(values, string(v))
+ return nil
+ })
+ require.Nil(t, err)
+ require.Equal(t, []string{"CCAA", "FCAA"}, keys)
+ require.Equal(t, []string{"value3", "value5"}, values)
+
+ var keys1 []string
+ var values1 []string
+
+ err = batch.ForEach(kv.HashedAccounts, []byte("A"), func(k, v []byte) error {
+ keys1 = append(keys1, string(k))
+ values1 = append(values1, string(v))
+ return nil
+ })
+ require.Nil(t, err)
+ require.Equal(t, []string{"AAAA", "CAAA", "CBAA", "CCAA", "FCAA"}, keys1)
+ require.Equal(t, []string{"value", "value1", "value2", "value3", "value5"}, values1)
+}
+
+func TestForPrefix(t *testing.T) {
+ _, rwTx := memdb.NewTestTx(t)
+
+ initializeDbNonDupSort(rwTx)
+
+ batch := NewMemoryBatch(rwTx, "")
+ var keys1 []string
+ var values1 []string
+
+ err := batch.ForPrefix(kv.HashedAccounts, []byte("AB"), func(k, v []byte) error {
+ keys1 = append(keys1, string(k))
+ values1 = append(values1, string(v))
+ return nil
+ })
+ require.Nil(t, err)
+ require.Nil(t, keys1)
+ require.Nil(t, values1)
+
+ err = batch.ForPrefix(kv.HashedAccounts, []byte("AAAA"), func(k, v []byte) error {
+ keys1 = append(keys1, string(k))
+ values1 = append(values1, string(v))
+ return nil
+ })
+ require.Nil(t, err)
+ require.Equal(t, []string{"AAAA"}, keys1)
+ require.Equal(t, []string{"value"}, values1)
+
+ var keys []string
+ var values []string
+ err = batch.ForPrefix(kv.HashedAccounts, []byte("C"), func(k, v []byte) error {
+ keys = append(keys, string(k))
+ values = append(values, string(v))
+ return nil
+ })
+ require.Nil(t, err)
+ require.Equal(t, []string{"CAAA", "CBAA", "CCAA"}, keys)
+ require.Equal(t, []string{"value1", "value2", "value3"}, values)
+}
+
+func TestForAmount(t *testing.T) {
+ _, rwTx := memdb.NewTestTx(t)
+
+ initializeDbNonDupSort(rwTx)
+
+ batch := NewMemoryBatch(rwTx, "")
+ defer batch.Close()
+
+ var keys []string
+ var values []string
+ err := batch.ForAmount(kv.HashedAccounts, []byte("C"), uint32(3), func(k, v []byte) error {
+ keys = append(keys, string(k))
+ values = append(values, string(v))
+ return nil
+ })
+
+ require.Nil(t, err)
+ require.Equal(t, []string{"CAAA", "CBAA", "CCAA"}, keys)
+ require.Equal(t, []string{"value1", "value2", "value3"}, values)
+
+ var keys1 []string
+ var values1 []string
+ err = batch.ForAmount(kv.HashedAccounts, []byte("C"), uint32(10), func(k, v []byte) error {
+ keys1 = append(keys1, string(k))
+ values1 = append(values1, string(v))
+ return nil
+ })
+
+ require.Nil(t, err)
+ require.Equal(t, []string{"CAAA", "CBAA", "CCAA"}, keys1)
+ require.Equal(t, []string{"value1", "value2", "value3"}, values1)
+}
+
+func TestGetOneAfterClearBucket(t *testing.T) {
+ _, rwTx := memdb.NewTestTx(t)
+
+ initializeDbNonDupSort(rwTx)
+
+ batch := NewMemoryBatch(rwTx, "")
+ defer batch.Close()
+
+ err := batch.ClearBucket(kv.HashedAccounts)
+ require.Nil(t, err)
+
+ cond := batch.isTableCleared(kv.HashedAccounts)
+ require.True(t, cond)
+
+ val, err := batch.GetOne(kv.HashedAccounts, []byte("A"))
+ require.Nil(t, err)
+ require.Nil(t, val)
+
+ val, err = batch.GetOne(kv.HashedAccounts, []byte("AAAA"))
+ require.Nil(t, err)
+ require.Nil(t, val)
+}
+
+func TestSeekExactAfterClearBucket(t *testing.T) {
+ _, rwTx := memdb.NewTestTx(t)
+
+ initializeDbNonDupSort(rwTx)
+
+ batch := NewMemoryBatch(rwTx, "")
+ defer batch.Close()
+
+ err := batch.ClearBucket(kv.HashedAccounts)
+ require.Nil(t, err)
+
+ cond := batch.isTableCleared(kv.HashedAccounts)
+ require.True(t, cond)
+
+ cursor, err := batch.RwCursor(kv.HashedAccounts)
+ require.NoError(t, err)
+
+ key, val, err := cursor.SeekExact([]byte("AAAA"))
+ require.Nil(t, err)
+ assert.Nil(t, key)
+ assert.Nil(t, val)
+
+ err = cursor.Put([]byte("AAAA"), []byte("valueX"))
+ require.Nil(t, err)
+
+ key, val, err = cursor.SeekExact([]byte("AAAA"))
+ require.Nil(t, err)
+ assert.Equal(t, []byte("AAAA"), key)
+ assert.Equal(t, []byte("valueX"), val)
+
+ key, val, err = cursor.SeekExact([]byte("BBBB"))
+ require.Nil(t, err)
+ assert.Nil(t, key)
+ assert.Nil(t, val)
+}
+
+func TestFirstAfterClearBucket(t *testing.T) {
+ _, rwTx := memdb.NewTestTx(t)
+
+ initializeDbNonDupSort(rwTx)
+
+ batch := NewMemoryBatch(rwTx, "")
+ defer batch.Close()
+
+ err := batch.ClearBucket(kv.HashedAccounts)
+ require.Nil(t, err)
+
+ err = batch.Put(kv.HashedAccounts, []byte("BBBB"), []byte("value5"))
+ require.Nil(t, err)
+
+ cursor, err := batch.Cursor(kv.HashedAccounts)
+ require.NoError(t, err)
+
+ key, val, err := cursor.First()
+ require.Nil(t, err)
+ assert.Equal(t, []byte("BBBB"), key)
+ assert.Equal(t, []byte("value5"), val)
+
+ key, val, err = cursor.Next()
+ require.Nil(t, err)
+ assert.Nil(t, key)
+ assert.Nil(t, val)
+}
+
+func TestIncReadSequence(t *testing.T) {
+ _, rwTx := memdb.NewTestTx(t)
+
+ initializeDbNonDupSort(rwTx)
+
+ batch := NewMemoryBatch(rwTx, "")
+ defer batch.Close()
+
+ _, err := batch.IncrementSequence(kv.HashedAccounts, uint64(12))
+ require.Nil(t, err)
+
+ val, err := batch.ReadSequence(kv.HashedAccounts)
+ require.Nil(t, err)
+ require.Equal(t, val, uint64(12))
+}
+
+func initializeDbDupSort(rwTx kv.RwTx) {
+ rwTx.Put(kv.AccountChangeSet, []byte("key1"), []byte("value1.1"))
+ rwTx.Put(kv.AccountChangeSet, []byte("key3"), []byte("value3.1"))
+ rwTx.Put(kv.AccountChangeSet, []byte("key1"), []byte("value1.3"))
+ rwTx.Put(kv.AccountChangeSet, []byte("key3"), []byte("value3.3"))
+}
+
+func TestNext(t *testing.T) {
+ _, rwTx := memdb.NewTestTx(t)
+
+ initializeDbDupSort(rwTx)
+
+ batch := NewMemoryBatch(rwTx, "")
+ defer batch.Close()
+
+ batch.Put(kv.AccountChangeSet, []byte("key1"), []byte("value1.2"))
+
+ cursor, err := batch.CursorDupSort(kv.AccountChangeSet)
+ require.NoError(t, err)
+
+ k, v, err := cursor.First()
+ require.Nil(t, err)
+ assert.Equal(t, []byte("key1"), k)
+ assert.Equal(t, []byte("value1.1"), v)
+
+ k, v, err = cursor.Next()
+ require.Nil(t, err)
+ assert.Equal(t, []byte("key1"), k)
+ assert.Equal(t, []byte("value1.2"), v)
+
+ k, v, err = cursor.Next()
+ require.Nil(t, err)
+ assert.Equal(t, []byte("key1"), k)
+ assert.Equal(t, []byte("value1.3"), v)
+
+ k, v, err = cursor.Next()
+ require.Nil(t, err)
+ assert.Equal(t, []byte("key3"), k)
+ assert.Equal(t, []byte("value3.1"), v)
+
+ k, v, err = cursor.Next()
+ require.Nil(t, err)
+ assert.Equal(t, []byte("key3"), k)
+ assert.Equal(t, []byte("value3.3"), v)
+
+ k, v, err = cursor.Next()
+ require.Nil(t, err)
+ assert.Nil(t, k)
+ assert.Nil(t, v)
+}
+
+func TestNextNoDup(t *testing.T) {
+ _, rwTx := memdb.NewTestTx(t)
+
+ initializeDbDupSort(rwTx)
+
+ batch := NewMemoryBatch(rwTx, "")
+ defer batch.Close()
+
+ batch.Put(kv.AccountChangeSet, []byte("key2"), []byte("value2.1"))
+ batch.Put(kv.AccountChangeSet, []byte("key2"), []byte("value2.2"))
+
+ cursor, err := batch.CursorDupSort(kv.AccountChangeSet)
+ require.NoError(t, err)
+
+ k, _, err := cursor.First()
+ require.Nil(t, err)
+ assert.Equal(t, []byte("key1"), k)
+
+ k, _, err = cursor.NextNoDup()
+ require.Nil(t, err)
+ assert.Equal(t, []byte("key2"), k)
+
+ k, _, err = cursor.NextNoDup()
+ require.Nil(t, err)
+ assert.Equal(t, []byte("key3"), k)
+}
+
+func TestDeleteCurrentDuplicates(t *testing.T) {
+ _, rwTx := memdb.NewTestTx(t)
+
+ initializeDbDupSort(rwTx)
+
+ batch := NewMemoryBatch(rwTx, "")
+ defer batch.Close()
+
+ cursor, err := batch.RwCursorDupSort(kv.AccountChangeSet)
+ require.NoError(t, err)
+
+ require.NoError(t, cursor.Put([]byte("key3"), []byte("value3.2")))
+
+ key, _, err := cursor.SeekExact([]byte("key3"))
+ require.NoError(t, err)
+ require.Equal(t, []byte("key3"), key)
+
+ require.NoError(t, cursor.DeleteCurrentDuplicates())
+
+ require.NoError(t, batch.Flush(rwTx))
+
+ var keys []string
+ var values []string
+ err = rwTx.ForEach(kv.AccountChangeSet, nil, func(k, v []byte) error {
+ keys = append(keys, string(k))
+ values = append(values, string(v))
+ return nil
+ })
+ require.NoError(t, err)
+
+ require.Equal(t, []string{"key1", "key1"}, keys)
+ require.Equal(t, []string{"value1.1", "value1.3"}, values)
+}
+
+func TestSeekBothRange(t *testing.T) {
+ _, rwTx := memdb.NewTestTx(t)
+
+ rwTx.Put(kv.AccountChangeSet, []byte("key1"), []byte("value1.1"))
+ rwTx.Put(kv.AccountChangeSet, []byte("key3"), []byte("value3.3"))
+
+ batch := NewMemoryBatch(rwTx, "")
+ defer batch.Close()
+
+ cursor, err := batch.RwCursorDupSort(kv.AccountChangeSet)
+ require.NoError(t, err)
+
+ require.NoError(t, cursor.Put([]byte("key3"), []byte("value3.1")))
+ require.NoError(t, cursor.Put([]byte("key1"), []byte("value1.3")))
+
+ v, err := cursor.SeekBothRange([]byte("key2"), []byte("value1.2"))
+ require.NoError(t, err)
+ // SeekBothRange does exact match of the key, but range match of the value, so we get nil here
+ require.Nil(t, v)
+
+ v, err = cursor.SeekBothRange([]byte("key3"), []byte("value3.2"))
+ require.NoError(t, err)
+ require.Equal(t, "value3.3", string(v))
+}
+
+func initializeDbAutoConversion(rwTx kv.RwTx) {
+ rwTx.Put(kv.PlainState, []byte("A"), []byte("0"))
+ rwTx.Put(kv.PlainState, []byte("A..........................._______________________________A"), []byte("1"))
+ rwTx.Put(kv.PlainState, []byte("A..........................._______________________________C"), []byte("2"))
+ rwTx.Put(kv.PlainState, []byte("B"), []byte("8"))
+ rwTx.Put(kv.PlainState, []byte("C"), []byte("9"))
+ rwTx.Put(kv.PlainState, []byte("D..........................._______________________________A"), []byte("3"))
+ rwTx.Put(kv.PlainState, []byte("D..........................._______________________________C"), []byte("4"))
+}
+
+func TestAutoConversion(t *testing.T) {
+ _, rwTx := memdb.NewTestTx(t)
+
+ initializeDbAutoConversion(rwTx)
+
+ batch := NewMemoryBatch(rwTx, "")
+ defer batch.Close()
+
+ c, err := batch.RwCursor(kv.PlainState)
+ require.NoError(t, err)
+
+ // key length conflict
+ require.Error(t, c.Put([]byte("A..........................."), []byte("?")))
+
+ require.NoError(t, c.Delete([]byte("A..........................._______________________________A")))
+ require.NoError(t, c.Put([]byte("B"), []byte("7")))
+ require.NoError(t, c.Delete([]byte("C")))
+ require.NoError(t, c.Put([]byte("D..........................._______________________________C"), []byte("6")))
+ require.NoError(t, c.Put([]byte("D..........................._______________________________E"), []byte("5")))
+
+ k, v, err := c.First()
+ require.NoError(t, err)
+ assert.Equal(t, []byte("A"), k)
+ assert.Equal(t, []byte("0"), v)
+
+ k, v, err = c.Next()
+ require.NoError(t, err)
+ assert.Equal(t, []byte("A..........................._______________________________C"), k)
+ assert.Equal(t, []byte("2"), v)
+
+ k, v, err = c.Next()
+ require.NoError(t, err)
+ assert.Equal(t, []byte("B"), k)
+ assert.Equal(t, []byte("7"), v)
+
+ k, v, err = c.Next()
+ require.NoError(t, err)
+ assert.Equal(t, []byte("D..........................._______________________________A"), k)
+ assert.Equal(t, []byte("3"), v)
+
+ k, v, err = c.Next()
+ require.NoError(t, err)
+ assert.Equal(t, []byte("D..........................._______________________________C"), k)
+ assert.Equal(t, []byte("6"), v)
+
+ k, v, err = c.Next()
+ require.NoError(t, err)
+ assert.Equal(t, []byte("D..........................._______________________________E"), k)
+ assert.Equal(t, []byte("5"), v)
+
+ k, v, err = c.Next()
+ require.NoError(t, err)
+ assert.Nil(t, k)
+ assert.Nil(t, v)
+}
+
+func TestAutoConversionDelete(t *testing.T) {
+ _, rwTx := memdb.NewTestTx(t)
+
+ initializeDbAutoConversion(rwTx)
+
+ batch := NewMemoryBatch(rwTx, "")
+ defer batch.Close()
+
+ c, err := batch.RwCursor(kv.PlainState)
+ require.NoError(t, err)
+
+ require.NoError(t, c.Delete([]byte("A..........................._______________________________A")))
+ require.NoError(t, c.Delete([]byte("A..........................._______________________________C")))
+ require.NoError(t, c.Delete([]byte("B")))
+ require.NoError(t, c.Delete([]byte("C")))
+
+ k, v, err := c.First()
+ require.NoError(t, err)
+ assert.Equal(t, []byte("A"), k)
+ assert.Equal(t, []byte("0"), v)
+
+ k, v, err = c.Next()
+ require.NoError(t, err)
+ assert.Equal(t, []byte("D..........................._______________________________A"), k)
+ assert.Equal(t, []byte("3"), v)
+
+ k, v, err = c.Next()
+ require.NoError(t, err)
+ assert.Equal(t, []byte("D..........................._______________________________C"), k)
+ assert.Equal(t, []byte("4"), v)
+
+ k, v, err = c.Next()
+ require.NoError(t, err)
+ assert.Nil(t, k)
+ assert.Nil(t, v)
+}
+
+func TestAutoConversionSeekBothRange(t *testing.T) {
+ _, rwTx := memdb.NewTestTx(t)
+
+ initializeDbAutoConversion(rwTx)
+
+ batch := NewMemoryBatch(rwTx, "")
+ defer batch.Close()
+
+ c, err := batch.RwCursorDupSort(kv.PlainState)
+ require.NoError(t, err)
+
+ require.NoError(t, c.Delete([]byte("A..........................._______________________________A")))
+ require.NoError(t, c.Put([]byte("D..........................._______________________________C"), []byte("6")))
+ require.NoError(t, c.Put([]byte("D..........................._______________________________E"), []byte("5")))
+
+ v, err := c.SeekBothRange([]byte("A..........................."), []byte("_______________________________A"))
+ require.NoError(t, err)
+ assert.Equal(t, []byte("_______________________________C2"), v)
+
+ _, v, err = c.NextDup()
+ require.NoError(t, err)
+ assert.Nil(t, v)
+
+ v, err = c.SeekBothRange([]byte("A..........................."), []byte("_______________________________X"))
+ require.NoError(t, err)
+ assert.Nil(t, v)
+
+ v, err = c.SeekBothRange([]byte("B..........................."), []byte(""))
+ require.NoError(t, err)
+ assert.Nil(t, v)
+
+ v, err = c.SeekBothRange([]byte("C..........................."), []byte(""))
+ require.NoError(t, err)
+ assert.Nil(t, v)
+
+ v, err = c.SeekBothRange([]byte("D..........................."), []byte(""))
+ require.NoError(t, err)
+ assert.Equal(t, []byte("_______________________________A3"), v)
+
+ _, v, err = c.NextDup()
+ require.NoError(t, err)
+ assert.Equal(t, []byte("_______________________________C6"), v)
+
+ _, v, err = c.NextDup()
+ require.NoError(t, err)
+ assert.Equal(t, []byte("_______________________________E5"), v)
+
+ _, v, err = c.NextDup()
+ require.NoError(t, err)
+ assert.Nil(t, v)
+
+ v, err = c.SeekBothRange([]byte("X..........................."), []byte("_______________________________Y"))
+ require.NoError(t, err)
+ assert.Nil(t, v)
+}
diff --git a/erigon-lib/kv/memdb/memory_database.go b/erigon-lib/kv/memdb/memory_database.go
new file mode 100644
index 00000000000..57b90680d5f
--- /dev/null
+++ b/erigon-lib/kv/memdb/memory_database.go
@@ -0,0 +1,132 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package memdb
+
+import (
+ "context"
+ "testing"
+
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/mdbx"
+ "github.com/ledgerwatch/log/v3"
+)
+
+func New(tmpDir string) kv.RwDB {
+ return mdbx.NewMDBX(log.New()).InMem(tmpDir).MustOpen()
+}
+
+func NewPoolDB(tmpDir string) kv.RwDB {
+ return mdbx.NewMDBX(log.New()).InMem(tmpDir).Label(kv.TxPoolDB).WithTableCfg(func(_ kv.TableCfg) kv.TableCfg { return kv.TxpoolTablesCfg }).MustOpen()
+}
+func NewDownloaderDB(tmpDir string) kv.RwDB {
+ return mdbx.NewMDBX(log.New()).InMem(tmpDir).Label(kv.DownloaderDB).WithTableCfg(func(_ kv.TableCfg) kv.TableCfg { return kv.DownloaderTablesCfg }).MustOpen()
+}
+func NewSentryDB(tmpDir string) kv.RwDB {
+ return mdbx.NewMDBX(log.New()).InMem(tmpDir).Label(kv.SentryDB).WithTableCfg(func(_ kv.TableCfg) kv.TableCfg { return kv.SentryTablesCfg }).MustOpen()
+}
+
+func NewTestDB(tb testing.TB) kv.RwDB {
+ tb.Helper()
+ tmpDir := tb.TempDir()
+ tb.Helper()
+ db := New(tmpDir)
+ tb.Cleanup(db.Close)
+ return db
+}
+
+func BeginRw(tb testing.TB, db kv.RwDB) kv.RwTx {
+ tb.Helper()
+ tx, err := db.BeginRw(context.Background())
+ if err != nil {
+ tb.Fatal(err)
+ }
+ tb.Cleanup(tx.Rollback)
+ return tx
+}
+
+func BeginRo(tb testing.TB, db kv.RoDB) kv.Tx {
+ tb.Helper()
+ tx, err := db.BeginRo(context.Background())
+ if err != nil {
+ tb.Fatal(err)
+ }
+ tb.Cleanup(tx.Rollback)
+ return tx
+}
+
+func NewTestPoolDB(tb testing.TB) kv.RwDB {
+ tb.Helper()
+ tmpDir := tb.TempDir()
+ db := NewPoolDB(tmpDir)
+ tb.Cleanup(db.Close)
+ return db
+}
+
+func NewTestDownloaderDB(tb testing.TB) kv.RwDB {
+ tb.Helper()
+ tmpDir := tb.TempDir()
+ db := NewDownloaderDB(tmpDir)
+ tb.Cleanup(db.Close)
+ return db
+}
+
+func NewTestSentrylDB(tb testing.TB) kv.RwDB {
+ tb.Helper()
+ tmpDir := tb.TempDir()
+ db := NewPoolDB(tmpDir)
+ tb.Cleanup(db.Close)
+ return db
+}
+
+func NewTestTx(tb testing.TB) (kv.RwDB, kv.RwTx) {
+ tb.Helper()
+ tmpDir := tb.TempDir()
+ db := New(tmpDir)
+ tb.Cleanup(db.Close)
+ tx, err := db.BeginRw(context.Background())
+ if err != nil {
+ tb.Fatal(err)
+ }
+ tb.Cleanup(tx.Rollback)
+ return db, tx
+}
+
+func NewTestPoolTx(tb testing.TB) (kv.RwDB, kv.RwTx) {
+ tb.Helper()
+ db := NewTestPoolDB(tb)
+ tx, err := db.BeginRw(context.Background()) //nolint
+ if err != nil {
+ tb.Fatal(err)
+ }
+ if tb != nil {
+ tb.Cleanup(tx.Rollback)
+ }
+ return db, tx
+}
+
+func NewTestSentryTx(tb testing.TB) (kv.RwDB, kv.RwTx) {
+ tb.Helper()
+ db := NewTestSentrylDB(tb)
+ tx, err := db.BeginRw(context.Background()) //nolint
+ if err != nil {
+ tb.Fatal(err)
+ }
+ if tb != nil {
+ tb.Cleanup(tx.Rollback)
+ }
+ return db, tx
+}
diff --git a/erigon-lib/kv/order/order.go b/erigon-lib/kv/order/order.go
new file mode 100644
index 00000000000..d8e3d05081b
--- /dev/null
+++ b/erigon-lib/kv/order/order.go
@@ -0,0 +1,24 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package order
+
+type By bool
+
+const (
+ Asc By = true
+ Desc By = false
+)
diff --git a/erigon-lib/kv/rawdbv3/txnum.go b/erigon-lib/kv/rawdbv3/txnum.go
new file mode 100644
index 00000000000..f01bae47650
--- /dev/null
+++ b/erigon-lib/kv/rawdbv3/txnum.go
@@ -0,0 +1,234 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package rawdbv3
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sort"
+
+ "github.com/ledgerwatch/erigon-lib/kv"
+)
+
+type txNums struct{}
+
+var TxNums txNums
+
+// Min - returns maxTxNum in given block. If block not found - return last available value (`latest`/`pending` state)
+func (txNums) Max(tx kv.Tx, blockNum uint64) (maxTxNum uint64, err error) {
+ var k [8]byte
+ binary.BigEndian.PutUint64(k[:], blockNum)
+ c, err := tx.Cursor(kv.MaxTxNum)
+ if err != nil {
+ return 0, err
+ }
+ defer c.Close()
+ _, v, err := c.SeekExact(k[:])
+ if err != nil {
+ return 0, err
+ }
+ if len(v) == 0 {
+ _, v, err = c.Last()
+ if err != nil {
+ return 0, err
+ }
+ if len(v) == 0 {
+ return 0, nil
+ }
+ }
+ return binary.BigEndian.Uint64(v), nil
+}
+
+// Min = `max(blockNum-1)+1` returns minTxNum in given block. If block not found - return last available value (`latest`/`pending` state)
+func (txNums) Min(tx kv.Tx, blockNum uint64) (maxTxNum uint64, err error) {
+ if blockNum == 0 {
+ return 0, nil
+ }
+ var k [8]byte
+ binary.BigEndian.PutUint64(k[:], blockNum-1)
+ c, err := tx.Cursor(kv.MaxTxNum)
+ if err != nil {
+ return 0, err
+ }
+ defer c.Close()
+
+ _, v, err := c.SeekExact(k[:])
+ if err != nil {
+ return 0, err
+ }
+ if len(v) == 0 {
+ _, v, err = c.Last()
+ if err != nil {
+ return 0, err
+ }
+ if len(v) == 0 {
+ return 0, nil
+ }
+ }
+ return binary.BigEndian.Uint64(v) + 1, nil
+}
+
+func (txNums) Append(tx kv.RwTx, blockNum, maxTxNum uint64) (err error) {
+ lastK, err := LastKey(tx, kv.MaxTxNum)
+ if err != nil {
+ return err
+ }
+ if len(lastK) != 0 {
+ lastBlockNum := binary.BigEndian.Uint64(lastK)
+ if lastBlockNum > 1 && lastBlockNum+1 != blockNum { //allow genesis
+ return fmt.Errorf("append with gap blockNum=%d, but current heigh=%d", blockNum, lastBlockNum)
+ }
+ }
+
+ var k, v [8]byte
+ binary.BigEndian.PutUint64(k[:], blockNum)
+ binary.BigEndian.PutUint64(v[:], maxTxNum)
+ if err := tx.Append(kv.MaxTxNum, k[:], v[:]); err != nil {
+ return err
+ }
+ return nil
+}
+func (txNums) WriteForGenesis(tx kv.RwTx, maxTxNum uint64) (err error) {
+ var k, v [8]byte
+ binary.BigEndian.PutUint64(k[:], 0)
+ binary.BigEndian.PutUint64(v[:], maxTxNum)
+ return tx.Put(kv.MaxTxNum, k[:], v[:])
+}
+func (txNums) Truncate(tx kv.RwTx, blockNum uint64) (err error) {
+ var seek [8]byte
+ binary.BigEndian.PutUint64(seek[:], blockNum)
+ c, err := tx.RwCursor(kv.MaxTxNum)
+ if err != nil {
+ return err
+ }
+ defer c.Close()
+ for k, _, err := c.Seek(seek[:]); k != nil; k, _, err = c.Next() {
+ if err != nil {
+ return err
+ }
+ if err = c.DeleteCurrent(); err != nil {
+ return err
+ }
+
+ }
+ return nil
+}
+func (txNums) FindBlockNum(tx kv.Tx, endTxNumMinimax uint64) (ok bool, blockNum uint64, err error) {
+ var seek [8]byte
+ c, err := tx.Cursor(kv.MaxTxNum)
+ if err != nil {
+ return false, 0, err
+ }
+ defer c.Close()
+
+ cnt, err := c.Count()
+ if err != nil {
+ return false, 0, err
+ }
+
+ blockNum = uint64(sort.Search(int(cnt), func(i int) bool {
+ binary.BigEndian.PutUint64(seek[:], uint64(i))
+ var v []byte
+ _, v, err = c.SeekExact(seek[:])
+ return binary.BigEndian.Uint64(v) >= endTxNumMinimax
+ }))
+ if err != nil {
+ return false, 0, err
+ }
+ if blockNum == cnt {
+ return false, 0, nil
+ }
+ return true, blockNum, nil
+}
+func (txNums) Last(tx kv.Tx) (blockNum, txNum uint64, err error) {
+ c, err := tx.Cursor(kv.MaxTxNum)
+ if err != nil {
+ return 0, 0, err
+ }
+ defer c.Close()
+
+ lastK, lastV, err := c.Last()
+ if err != nil {
+ return 0, 0, err
+ }
+ if lastK == nil || lastV == nil {
+ return 0, 0, nil
+ }
+ return binary.BigEndian.Uint64(lastK), binary.BigEndian.Uint64(lastV), nil
+}
+func (txNums) First(tx kv.Tx) (blockNum, txNum uint64, err error) {
+ c, err := tx.Cursor(kv.MaxTxNum)
+ if err != nil {
+ return 0, 0, err
+ }
+ defer c.Close()
+
+ lastK, lastV, err := c.First()
+ if err != nil {
+ return 0, 0, err
+ }
+ if lastK == nil || lastV == nil {
+ return 0, 0, nil
+ }
+ return binary.BigEndian.Uint64(lastK), binary.BigEndian.Uint64(lastV), nil
+}
+
+// LastKey
+func LastKey(tx kv.Tx, table string) ([]byte, error) {
+ c, err := tx.Cursor(table)
+ if err != nil {
+ return nil, err
+ }
+ defer c.Close()
+ k, _, err := c.Last()
+ if err != nil {
+ return nil, err
+ }
+ return k, nil
+}
+
+// Last - candidate on move to kv.Tx interface
+func Last(tx kv.Tx, table string) ([]byte, []byte, error) {
+ c, err := tx.Cursor(table)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer c.Close()
+ k, v, err := c.Last()
+ if err != nil {
+ return nil, nil, err
+ }
+ return k, v, nil
+}
+
+// SecondKey - useful if table always has zero-key (for example genesis block)
+func SecondKey(tx kv.Tx, table string) ([]byte, error) {
+ c, err := tx.Cursor(table)
+ if err != nil {
+ return nil, err
+ }
+ defer c.Close()
+ _, _, err = c.First()
+ if err != nil {
+ return nil, err
+ }
+ k, _, err := c.Next()
+ if err != nil {
+ return nil, err
+ }
+ return k, nil
+}
diff --git a/erigon-lib/kv/remotedb/kv_remote.go b/erigon-lib/kv/remotedb/kv_remote.go
new file mode 100644
index 00000000000..7df14ae88de
--- /dev/null
+++ b/erigon-lib/kv/remotedb/kv_remote.go
@@ -0,0 +1,737 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package remotedb
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "fmt"
+ "runtime"
+ "unsafe"
+
+ "github.com/ledgerwatch/erigon-lib/kv/iter"
+ "github.com/ledgerwatch/erigon-lib/kv/order"
+ "github.com/ledgerwatch/log/v3"
+ "golang.org/x/sync/semaphore"
+ "google.golang.org/grpc"
+ "google.golang.org/protobuf/types/known/emptypb"
+
+ "github.com/ledgerwatch/erigon-lib/gointerfaces"
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/grpcutil"
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/remote"
+ "github.com/ledgerwatch/erigon-lib/kv"
+)
+
+// generate the messages and services
+type remoteOpts struct {
+ remoteKV remote.KVClient
+ log log.Logger
+ bucketsCfg kv.TableCfg
+ DialAddress string
+ version gointerfaces.Version
+}
+
+var _ kv.TemporalTx = (*tx)(nil)
+
+type DB struct {
+ remoteKV remote.KVClient
+ log log.Logger
+ buckets kv.TableCfg
+ roTxsLimiter *semaphore.Weighted
+ opts remoteOpts
+}
+
+type tx struct {
+ stream remote.KV_TxClient
+ ctx context.Context
+ streamCancelFn context.CancelFunc
+ db *DB
+ statelessCursors map[string]kv.Cursor
+ cursors []*remoteCursor
+ streams []kv.Closer
+ viewID, id uint64
+ streamingRequested bool
+}
+
+type remoteCursor struct {
+ ctx context.Context
+ stream remote.KV_TxClient
+ tx *tx
+ bucketName string
+ bucketCfg kv.TableCfgItem
+ id uint32
+}
+
+type remoteCursorDupSort struct {
+ *remoteCursor
+}
+
+func (opts remoteOpts) ReadOnly() remoteOpts {
+ return opts
+}
+
+func (opts remoteOpts) WithBucketsConfig(c kv.TableCfg) remoteOpts {
+ opts.bucketsCfg = c
+ return opts
+}
+
+func (opts remoteOpts) Open() (*DB, error) {
+ targetSemCount := int64(runtime.GOMAXPROCS(-1)) - 1
+ if targetSemCount <= 1 {
+ targetSemCount = 2
+ }
+
+ db := &DB{
+ opts: opts,
+ remoteKV: opts.remoteKV,
+ log: log.New("remote_db", opts.DialAddress),
+ buckets: kv.TableCfg{},
+ roTxsLimiter: semaphore.NewWeighted(targetSemCount), // 1 less than max to allow unlocking
+ }
+ customBuckets := opts.bucketsCfg
+ for name, cfg := range customBuckets { // copy map to avoid changing global variable
+ db.buckets[name] = cfg
+ }
+
+ return db, nil
+}
+
+func (opts remoteOpts) MustOpen() kv.RwDB {
+ db, err := opts.Open()
+ if err != nil {
+ panic(err)
+ }
+ return db
+}
+
+// NewRemote defines new remove KV connection (without actually opening it)
+// version parameters represent the version the KV client is expecting,
+// compatibility check will be performed when the KV connection opens
+func NewRemote(v gointerfaces.Version, logger log.Logger, remoteKV remote.KVClient) remoteOpts {
+ return remoteOpts{bucketsCfg: kv.ChaindataTablesCfg, version: v, log: logger, remoteKV: remoteKV}
+}
+
+func (db *DB) PageSize() uint64 { panic("not implemented") }
+func (db *DB) ReadOnly() bool { return true }
+func (db *DB) AllTables() kv.TableCfg { return db.buckets }
+
+func (db *DB) EnsureVersionCompatibility() bool {
+ versionReply, err := db.remoteKV.Version(context.Background(), &emptypb.Empty{}, grpc.WaitForReady(true))
+ if err != nil {
+ db.log.Error("getting Version", "error", err)
+ return false
+ }
+ if !gointerfaces.EnsureVersion(db.opts.version, versionReply) {
+ db.log.Error("incompatible interface versions", "client", db.opts.version.String(),
+ "server", fmt.Sprintf("%d.%d.%d", versionReply.Major, versionReply.Minor, versionReply.Patch))
+ return false
+ }
+ db.log.Info("interfaces compatible", "client", db.opts.version.String(),
+ "server", fmt.Sprintf("%d.%d.%d", versionReply.Major, versionReply.Minor, versionReply.Patch))
+ return true
+}
+
+func (db *DB) Close() {}
+
+func (db *DB) CHandle() unsafe.Pointer {
+ panic("CHandle not implemented")
+}
+
+func (db *DB) BeginRo(ctx context.Context) (txn kv.Tx, err error) {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+
+ if semErr := db.roTxsLimiter.Acquire(ctx, 1); semErr != nil {
+ return nil, semErr
+ }
+
+ defer func() {
+ // ensure we release the semaphore on error
+ if txn == nil {
+ db.roTxsLimiter.Release(1)
+ }
+ }()
+
+ streamCtx, streamCancelFn := context.WithCancel(ctx) // We create child context for the stream so we can cancel it to prevent leak
+ stream, err := db.remoteKV.Tx(streamCtx)
+ if err != nil {
+ streamCancelFn()
+ return nil, err
+ }
+ msg, err := stream.Recv()
+ if err != nil {
+ streamCancelFn()
+ return nil, err
+ }
+ return &tx{ctx: ctx, db: db, stream: stream, streamCancelFn: streamCancelFn, viewID: msg.ViewId, id: msg.TxId}, nil
+}
+func (db *DB) BeginTemporalRo(ctx context.Context) (kv.TemporalTx, error) {
+ t, err := db.BeginRo(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return t.(kv.TemporalTx), nil
+}
+func (db *DB) BeginRw(ctx context.Context) (kv.RwTx, error) {
+ return nil, fmt.Errorf("remote db provider doesn't support .BeginRw method")
+}
+func (db *DB) BeginRwNosync(ctx context.Context) (kv.RwTx, error) {
+ return nil, fmt.Errorf("remote db provider doesn't support .BeginRw method")
+}
+func (db *DB) BeginTemporalRw(ctx context.Context) (kv.RwTx, error) {
+ return nil, fmt.Errorf("remote db provider doesn't support .BeginTemporalRw method")
+}
+func (db *DB) BeginTemporalRwNosync(ctx context.Context) (kv.RwTx, error) {
+ return nil, fmt.Errorf("remote db provider doesn't support .BeginTemporalRwNosync method")
+}
+
+func (db *DB) View(ctx context.Context, f func(tx kv.Tx) error) (err error) {
+ tx, err := db.BeginRo(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ return f(tx)
+}
+func (db *DB) ViewTemporal(ctx context.Context, f func(tx kv.TemporalTx) error) (err error) {
+ tx, err := db.BeginTemporalRo(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ return f(tx)
+}
+
+func (db *DB) Update(ctx context.Context, f func(tx kv.RwTx) error) (err error) {
+ return fmt.Errorf("remote db provider doesn't support .Update method")
+}
+func (db *DB) UpdateNosync(ctx context.Context, f func(tx kv.RwTx) error) (err error) {
+ return fmt.Errorf("remote db provider doesn't support .UpdateNosync method")
+}
+
+func (tx *tx) ViewID() uint64 { return tx.viewID }
+func (tx *tx) CollectMetrics() {}
+func (tx *tx) IncrementSequence(bucket string, amount uint64) (uint64, error) {
+ panic("not implemented yet")
+}
+func (tx *tx) ReadSequence(bucket string) (uint64, error) {
+ panic("not implemented yet")
+}
+func (tx *tx) Append(bucket string, k, v []byte) error { panic("no write methods") }
+func (tx *tx) AppendDup(bucket string, k, v []byte) error { panic("no write methods") }
+
+func (tx *tx) Commit() error {
+ panic("remote db is read-only")
+}
+
+func (tx *tx) Rollback() {
+ // don't close opened cursors - just close stream, server will cleanup everything well
+ tx.closeGrpcStream()
+ tx.db.roTxsLimiter.Release(1)
+ for _, c := range tx.streams {
+ c.Close()
+ }
+}
+func (tx *tx) DBSize() (uint64, error) { panic("not implemented") }
+
+func (tx *tx) statelessCursor(bucket string) (kv.Cursor, error) {
+ if tx.statelessCursors == nil {
+ tx.statelessCursors = make(map[string]kv.Cursor)
+ }
+ c, ok := tx.statelessCursors[bucket]
+ if !ok {
+ var err error
+ c, err = tx.Cursor(bucket)
+ if err != nil {
+ return nil, err
+ }
+ tx.statelessCursors[bucket] = c
+ }
+ return c, nil
+}
+
+func (tx *tx) BucketSize(name string) (uint64, error) { panic("not implemented") }
+
+func (tx *tx) ForEach(bucket string, fromPrefix []byte, walker func(k, v []byte) error) error {
+ it, err := tx.Range(bucket, fromPrefix, nil)
+ if err != nil {
+ return err
+ }
+ for it.HasNext() {
+ k, v, err := it.Next()
+ if err != nil {
+ return err
+ }
+ if err := walker(k, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (tx *tx) ForPrefix(bucket string, prefix []byte, walker func(k, v []byte) error) error {
+ it, err := tx.Prefix(bucket, prefix)
+ if err != nil {
+ return err
+ }
+ for it.HasNext() {
+ k, v, err := it.Next()
+ if err != nil {
+ return err
+ }
+ if err := walker(k, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// TODO: this must be deprecated
+func (tx *tx) ForAmount(bucket string, fromPrefix []byte, amount uint32, walker func(k, v []byte) error) error {
+ if amount == 0 {
+ return nil
+ }
+ c, err := tx.Cursor(bucket)
+ if err != nil {
+ return err
+ }
+ defer c.Close()
+
+ for k, v, err := c.Seek(fromPrefix); k != nil && amount > 0; k, v, err = c.Next() {
+ if err != nil {
+ return err
+ }
+ if err := walker(k, v); err != nil {
+ return err
+ }
+ amount--
+ }
+ return nil
+}
+
+func (tx *tx) GetOne(bucket string, k []byte) (val []byte, err error) {
+ c, err := tx.statelessCursor(bucket)
+ if err != nil {
+ return nil, err
+ }
+ _, val, err = c.SeekExact(k)
+ return val, err
+}
+
+func (tx *tx) Has(bucket string, k []byte) (bool, error) {
+ c, err := tx.statelessCursor(bucket)
+ if err != nil {
+ return false, err
+ }
+ kk, _, err := c.Seek(k)
+ if err != nil {
+ return false, err
+ }
+ return bytes.Equal(k, kk), nil
+}
+
+func (c *remoteCursor) SeekExact(k []byte) (key, val []byte, err error) {
+ return c.seekExact(k)
+}
+
+func (c *remoteCursor) Prev() ([]byte, []byte, error) {
+ return c.prev()
+}
+
+func (tx *tx) Cursor(bucket string) (kv.Cursor, error) {
+ b := tx.db.buckets[bucket]
+ c := &remoteCursor{tx: tx, ctx: tx.ctx, bucketName: bucket, bucketCfg: b, stream: tx.stream}
+ tx.cursors = append(tx.cursors, c)
+ if err := c.stream.Send(&remote.Cursor{Op: remote.Op_OPEN, BucketName: c.bucketName}); err != nil {
+ return nil, err
+ }
+ msg, err := c.stream.Recv()
+ if err != nil {
+ return nil, err
+ }
+ c.id = msg.CursorId
+ return c, nil
+}
+
+func (tx *tx) ListBuckets() ([]string, error) {
+ return nil, fmt.Errorf("function ListBuckets is not implemented for remoteTx")
+}
+
+// func (c *remoteCursor) Put(k []byte, v []byte) error { panic("not supported") }
+// func (c *remoteCursor) PutNoOverwrite(k []byte, v []byte) error { panic("not supported") }
+// func (c *remoteCursor) Append(k []byte, v []byte) error { panic("not supported") }
+// func (c *remoteCursor) Delete(k []byte) error { panic("not supported") }
+// func (c *remoteCursor) DeleteCurrent() error { panic("not supported") }
+func (c *remoteCursor) Count() (uint64, error) {
+ if err := c.stream.Send(&remote.Cursor{Cursor: c.id, Op: remote.Op_COUNT}); err != nil {
+ return 0, err
+ }
+ pair, err := c.stream.Recv()
+ if err != nil {
+ return 0, err
+ }
+ return binary.BigEndian.Uint64(pair.V), nil
+
+}
+
+func (c *remoteCursor) first() ([]byte, []byte, error) {
+ if err := c.stream.Send(&remote.Cursor{Cursor: c.id, Op: remote.Op_FIRST}); err != nil {
+ return []byte{}, nil, err
+ }
+ pair, err := c.stream.Recv()
+ if err != nil {
+ return []byte{}, nil, err
+ }
+ return pair.K, pair.V, nil
+}
+
+func (c *remoteCursor) next() ([]byte, []byte, error) {
+ if err := c.stream.Send(&remote.Cursor{Cursor: c.id, Op: remote.Op_NEXT}); err != nil {
+ return []byte{}, nil, err
+ }
+ pair, err := c.stream.Recv()
+ if err != nil {
+ return []byte{}, nil, err
+ }
+ return pair.K, pair.V, nil
+}
+func (c *remoteCursor) nextDup() ([]byte, []byte, error) {
+ if err := c.stream.Send(&remote.Cursor{Cursor: c.id, Op: remote.Op_NEXT_DUP}); err != nil {
+ return []byte{}, nil, err
+ }
+ pair, err := c.stream.Recv()
+ if err != nil {
+ return []byte{}, nil, err
+ }
+ return pair.K, pair.V, nil
+}
+func (c *remoteCursor) nextNoDup() ([]byte, []byte, error) {
+ if err := c.stream.Send(&remote.Cursor{Cursor: c.id, Op: remote.Op_NEXT_NO_DUP}); err != nil {
+ return []byte{}, nil, err
+ }
+ pair, err := c.stream.Recv()
+ if err != nil {
+ return []byte{}, nil, err
+ }
+ return pair.K, pair.V, nil
+}
+func (c *remoteCursor) prev() ([]byte, []byte, error) {
+ if err := c.stream.Send(&remote.Cursor{Cursor: c.id, Op: remote.Op_PREV}); err != nil {
+ return []byte{}, nil, err
+ }
+ pair, err := c.stream.Recv()
+ if err != nil {
+ return []byte{}, nil, err
+ }
+ return pair.K, pair.V, nil
+}
+func (c *remoteCursor) prevDup() ([]byte, []byte, error) {
+ if err := c.stream.Send(&remote.Cursor{Cursor: c.id, Op: remote.Op_PREV_DUP}); err != nil {
+ return []byte{}, nil, err
+ }
+ pair, err := c.stream.Recv()
+ if err != nil {
+ return []byte{}, nil, err
+ }
+ return pair.K, pair.V, nil
+}
+func (c *remoteCursor) prevNoDup() ([]byte, []byte, error) {
+ if err := c.stream.Send(&remote.Cursor{Cursor: c.id, Op: remote.Op_PREV_NO_DUP}); err != nil {
+ return []byte{}, nil, err
+ }
+ pair, err := c.stream.Recv()
+ if err != nil {
+ return []byte{}, nil, err
+ }
+ return pair.K, pair.V, nil
+}
+func (c *remoteCursor) last() ([]byte, []byte, error) {
+ if err := c.stream.Send(&remote.Cursor{Cursor: c.id, Op: remote.Op_LAST}); err != nil {
+ return []byte{}, nil, err
+ }
+ pair, err := c.stream.Recv()
+ if err != nil {
+ return []byte{}, nil, err
+ }
+ return pair.K, pair.V, nil
+}
+func (c *remoteCursor) setRange(k []byte) ([]byte, []byte, error) {
+ if err := c.stream.Send(&remote.Cursor{Cursor: c.id, Op: remote.Op_SEEK, K: k}); err != nil {
+ return []byte{}, nil, err
+ }
+ pair, err := c.stream.Recv()
+ if err != nil {
+ return []byte{}, nil, err
+ }
+ return pair.K, pair.V, nil
+}
+func (c *remoteCursor) seekExact(k []byte) ([]byte, []byte, error) {
+ if err := c.stream.Send(&remote.Cursor{Cursor: c.id, Op: remote.Op_SEEK_EXACT, K: k}); err != nil {
+ return []byte{}, nil, err
+ }
+ pair, err := c.stream.Recv()
+ if err != nil {
+ return []byte{}, nil, err
+ }
+ return pair.K, pair.V, nil
+}
+func (c *remoteCursor) getBothRange(k, v []byte) ([]byte, error) {
+ if err := c.stream.Send(&remote.Cursor{Cursor: c.id, Op: remote.Op_SEEK_BOTH, K: k, V: v}); err != nil {
+ return nil, err
+ }
+ pair, err := c.stream.Recv()
+ if err != nil {
+ return nil, err
+ }
+ return pair.V, nil
+}
+func (c *remoteCursor) seekBothExact(k, v []byte) ([]byte, []byte, error) {
+ if err := c.stream.Send(&remote.Cursor{Cursor: c.id, Op: remote.Op_SEEK_BOTH_EXACT, K: k, V: v}); err != nil {
+ return []byte{}, nil, err
+ }
+ pair, err := c.stream.Recv()
+ if err != nil {
+ return []byte{}, nil, err
+ }
+ return pair.K, pair.V, nil
+}
+func (c *remoteCursor) firstDup() ([]byte, error) {
+ if err := c.stream.Send(&remote.Cursor{Cursor: c.id, Op: remote.Op_FIRST_DUP}); err != nil {
+ return nil, err
+ }
+ pair, err := c.stream.Recv()
+ if err != nil {
+ return nil, err
+ }
+ return pair.V, nil
+}
+func (c *remoteCursor) lastDup() ([]byte, error) {
+ if err := c.stream.Send(&remote.Cursor{Cursor: c.id, Op: remote.Op_LAST_DUP}); err != nil {
+ return nil, err
+ }
+ pair, err := c.stream.Recv()
+ if err != nil {
+ return nil, err
+ }
+ return pair.V, nil
+}
+func (c *remoteCursor) getCurrent() ([]byte, []byte, error) {
+ if err := c.stream.Send(&remote.Cursor{Cursor: c.id, Op: remote.Op_CURRENT}); err != nil {
+ return []byte{}, nil, err
+ }
+ pair, err := c.stream.Recv()
+ if err != nil {
+ return []byte{}, nil, err
+ }
+ return pair.K, pair.V, nil
+}
+
+func (c *remoteCursor) Current() ([]byte, []byte, error) {
+ return c.getCurrent()
+}
+
+// Seek - doesn't start streaming (because much of code does only several .Seek calls without reading sequence of data)
+// .Next() - does request streaming (if configured by user)
+func (c *remoteCursor) Seek(seek []byte) ([]byte, []byte, error) {
+ return c.setRange(seek)
+}
+
+func (c *remoteCursor) First() ([]byte, []byte, error) {
+ return c.first()
+}
+
+// Next - returns next data element from server, request streaming (if configured by user)
+func (c *remoteCursor) Next() ([]byte, []byte, error) {
+ return c.next()
+}
+
+func (c *remoteCursor) Last() ([]byte, []byte, error) {
+ return c.last()
+}
+
+func (tx *tx) closeGrpcStream() {
+ if tx.stream == nil {
+ return
+ }
+ defer tx.streamCancelFn() // hard cancel stream if graceful wasn't successful
+
+ if tx.streamingRequested {
+ // if streaming is in progress, can't use `CloseSend` - because
+ // server will not read it right not - it busy with streaming data
+ // TODO: set flag 'tx.streamingRequested' to false when got terminator from server (nil key or os.EOF)
+ tx.streamCancelFn()
+ } else {
+ // try graceful close stream
+ err := tx.stream.CloseSend()
+ if err != nil {
+ doLog := !grpcutil.IsEndOfStream(err)
+ if doLog {
+ log.Warn("couldn't send msg CloseSend to server", "err", err)
+ }
+ } else {
+ _, err = tx.stream.Recv()
+ if err != nil {
+ doLog := !grpcutil.IsEndOfStream(err)
+ if doLog {
+ log.Warn("received unexpected error from server after CloseSend", "err", err)
+ }
+ }
+ }
+ }
+ tx.stream = nil
+ tx.streamingRequested = false
+}
+
+func (c *remoteCursor) Close() {
+ if c.stream == nil {
+ return
+ }
+ st := c.stream
+ c.stream = nil
+ if err := st.Send(&remote.Cursor{Cursor: c.id, Op: remote.Op_CLOSE}); err == nil {
+ _, _ = st.Recv()
+ }
+}
+
+func (tx *tx) CursorDupSort(bucket string) (kv.CursorDupSort, error) {
+ b := tx.db.buckets[bucket]
+ c := &remoteCursor{tx: tx, ctx: tx.ctx, bucketName: bucket, bucketCfg: b, stream: tx.stream}
+ tx.cursors = append(tx.cursors, c)
+ if err := c.stream.Send(&remote.Cursor{Op: remote.Op_OPEN_DUP_SORT, BucketName: c.bucketName}); err != nil {
+ return nil, err
+ }
+ msg, err := c.stream.Recv()
+ if err != nil {
+ return nil, err
+ }
+ c.id = msg.CursorId
+ return &remoteCursorDupSort{remoteCursor: c}, nil
+}
+
+func (c *remoteCursorDupSort) SeekBothExact(k, v []byte) ([]byte, []byte, error) {
+ return c.seekBothExact(k, v)
+}
+
+func (c *remoteCursorDupSort) SeekBothRange(k, v []byte) ([]byte, error) {
+ return c.getBothRange(k, v)
+}
+
+func (c *remoteCursorDupSort) DeleteExact(k1, k2 []byte) error { panic("not supported") }
+func (c *remoteCursorDupSort) AppendDup(k []byte, v []byte) error { panic("not supported") }
+func (c *remoteCursorDupSort) PutNoDupData(k, v []byte) error { panic("not supported") }
+func (c *remoteCursorDupSort) DeleteCurrentDuplicates() error { panic("not supported") }
+func (c *remoteCursorDupSort) CountDuplicates() (uint64, error) { panic("not supported") }
+
+func (c *remoteCursorDupSort) FirstDup() ([]byte, error) { return c.firstDup() }
+func (c *remoteCursorDupSort) NextDup() ([]byte, []byte, error) { return c.nextDup() }
+func (c *remoteCursorDupSort) NextNoDup() ([]byte, []byte, error) { return c.nextNoDup() }
+func (c *remoteCursorDupSort) PrevDup() ([]byte, []byte, error) { return c.prevDup() }
+func (c *remoteCursorDupSort) PrevNoDup() ([]byte, []byte, error) { return c.prevNoDup() }
+func (c *remoteCursorDupSort) LastDup() ([]byte, error) { return c.lastDup() }
+
+// Temporal Methods
+func (tx *tx) DomainGetAsOf(name kv.Domain, k, k2 []byte, ts uint64) (v []byte, ok bool, err error) {
+ reply, err := tx.db.remoteKV.DomainGet(tx.ctx, &remote.DomainGetReq{TxId: tx.id, Table: string(name), K: k, K2: k2, Ts: ts})
+ if err != nil {
+ return nil, false, err
+ }
+ return reply.V, reply.Ok, nil
+}
+
+func (tx *tx) DomainGet(name kv.Domain, k, k2 []byte) (v []byte, ok bool, err error) {
+ reply, err := tx.db.remoteKV.DomainGet(tx.ctx, &remote.DomainGetReq{TxId: tx.id, Table: string(name), K: k, K2: k2, Latest: true})
+ if err != nil {
+ return nil, false, err
+ }
+ return reply.V, reply.Ok, nil
+}
+
+func (tx *tx) DomainRange(name kv.Domain, fromKey, toKey []byte, ts uint64, asc order.By, limit int) (it iter.KV, err error) {
+ return iter.PaginateKV(func(pageToken string) (keys, vals [][]byte, nextPageToken string, err error) {
+ reply, err := tx.db.remoteKV.DomainRange(tx.ctx, &remote.DomainRangeReq{TxId: tx.id, Table: string(name), FromKey: fromKey, ToKey: toKey, Ts: ts, OrderAscend: bool(asc), Limit: int64(limit)})
+ if err != nil {
+ return nil, nil, "", err
+ }
+ return reply.Keys, reply.Values, reply.NextPageToken, nil
+ }), nil
+}
+func (tx *tx) HistoryGet(name kv.History, k []byte, ts uint64) (v []byte, ok bool, err error) {
+ reply, err := tx.db.remoteKV.HistoryGet(tx.ctx, &remote.HistoryGetReq{TxId: tx.id, Table: string(name), K: k, Ts: ts})
+ if err != nil {
+ return nil, false, err
+ }
+ return reply.V, reply.Ok, nil
+}
+func (tx *tx) HistoryRange(name kv.History, fromTs, toTs int, asc order.By, limit int) (it iter.KV, err error) {
+ return iter.PaginateKV(func(pageToken string) (keys, vals [][]byte, nextPageToken string, err error) {
+ reply, err := tx.db.remoteKV.HistoryRange(tx.ctx, &remote.HistoryRangeReq{TxId: tx.id, Table: string(name), FromTs: int64(fromTs), ToTs: int64(toTs), OrderAscend: bool(asc), Limit: int64(limit)})
+ if err != nil {
+ return nil, nil, "", err
+ }
+ return reply.Keys, reply.Values, reply.NextPageToken, nil
+ }), nil
+}
+
+func (tx *tx) IndexRange(name kv.InvertedIdx, k []byte, fromTs, toTs int, asc order.By, limit int) (timestamps iter.U64, err error) {
+ return iter.PaginateU64(func(pageToken string) (arr []uint64, nextPageToken string, err error) {
+ req := &remote.IndexRangeReq{TxId: tx.id, Table: string(name), K: k, FromTs: int64(fromTs), ToTs: int64(toTs), OrderAscend: bool(asc), Limit: int64(limit)}
+ reply, err := tx.db.remoteKV.IndexRange(tx.ctx, req)
+ if err != nil {
+ return nil, "", err
+ }
+ return reply.Timestamps, reply.NextPageToken, nil
+ }), nil
+}
+
+func (tx *tx) Prefix(table string, prefix []byte) (iter.KV, error) {
+ nextPrefix, ok := kv.NextSubtree(prefix)
+ if !ok {
+ return tx.Range(table, prefix, nil)
+ }
+ return tx.Range(table, prefix, nextPrefix)
+}
+
+func (tx *tx) rangeOrderLimit(table string, fromPrefix, toPrefix []byte, asc order.By, limit int) (iter.KV, error) {
+ return iter.PaginateKV(func(pageToken string) (keys [][]byte, values [][]byte, nextPageToken string, err error) {
+ req := &remote.RangeReq{TxId: tx.id, Table: table, FromPrefix: fromPrefix, ToPrefix: toPrefix, OrderAscend: bool(asc), Limit: int64(limit)}
+ reply, err := tx.db.remoteKV.Range(tx.ctx, req)
+ if err != nil {
+ return nil, nil, "", err
+ }
+ return reply.Keys, reply.Values, reply.NextPageToken, nil
+ }), nil
+}
+func (tx *tx) Range(table string, fromPrefix, toPrefix []byte) (iter.KV, error) {
+ return tx.rangeOrderLimit(table, fromPrefix, toPrefix, order.Asc, -1)
+}
+func (tx *tx) RangeAscend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error) {
+ return tx.rangeOrderLimit(table, fromPrefix, toPrefix, order.Asc, limit)
+}
+func (tx *tx) RangeDescend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error) {
+ return tx.rangeOrderLimit(table, fromPrefix, toPrefix, order.Desc, limit)
+}
+func (tx *tx) RangeDupSort(table string, key []byte, fromPrefix, toPrefix []byte, asc order.By, limit int) (iter.KV, error) {
+ panic("not implemented yet")
+}
+
+func (tx *tx) CHandle() unsafe.Pointer {
+ panic("CHandle not implemented")
+}
diff --git a/erigon-lib/kv/remotedbserver/remotedbserver.go b/erigon-lib/kv/remotedbserver/remotedbserver.go
new file mode 100644
index 00000000000..07191e55fe6
--- /dev/null
+++ b/erigon-lib/kv/remotedbserver/remotedbserver.go
@@ -0,0 +1,675 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package remotedbserver
+
+import (
+ "context"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ledgerwatch/log/v3"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/emptypb"
+
+ "github.com/ledgerwatch/erigon-lib/common"
+ "github.com/ledgerwatch/erigon-lib/common/dbg"
+ "github.com/ledgerwatch/erigon-lib/common/hexutility"
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/remote"
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/iter"
+ "github.com/ledgerwatch/erigon-lib/kv/order"
+)
+
+// MaxTxTTL - kv interface provide high-consistancy guaranties: Serializable Isolations Level https://en.wikipedia.org/wiki/Isolation_(database_systems)
+// But it comes with cost: DB will start grow if run too long read transactions (hours)
+// We decided limit TTL of transaction to `MaxTxTTL`
+//
+// It means you sill have `Serializable` if tx living < `MaxTxTTL`
+// You start have Read Committed Level if tx living > `MaxTxTTL`
+//
+// It's done by `renew` method: after `renew` call reader will see all changes committed after last `renew` call.
+//
+// Erigon has much Historical data - which is immutable: reading of historical data for hours still gives you consistant data.
+const MaxTxTTL = 60 * time.Second
+
+// KvServiceAPIVersion - use it to track changes in API
+// 1.1.0 - added pending transactions, add methods eth_getRawTransactionByHash, eth_retRawTransactionByBlockHashAndIndex, eth_retRawTransactionByBlockNumberAndIndex| Yes | |
+// 1.2.0 - Added separated services for mining and txpool methods
+// 2.0.0 - Rename all buckets
+// 3.0.0 - ??
+// 4.0.0 - Server send tx.ViewID() after open tx
+// 5.0 - BlockTransaction table now has canonical ids (txs of non-canonical blocks moving to NonCanonicalTransaction table)
+// 5.1.0 - Added blockGasLimit to the StateChangeBatch
+// 6.0.0 - Blocks now have system-txs - in the begin/end of block
+// 6.1.0 - Add methods Range, IndexRange, HistoryGet, HistoryRange
+// 6.2.0 - Add HistoryFiles to reply of Snapshots() method
+var KvServiceAPIVersion = &types.VersionReply{Major: 6, Minor: 2, Patch: 0}
+
+type KvServer struct {
+ remote.UnimplementedKVServer // must be embedded to have forward compatible implementations.
+
+ kv kv.RoDB
+ stateChangeStreams *StateChangePubSub
+ blockSnapshots Snapsthots
+ historySnapshots Snapsthots
+ ctx context.Context
+
+ //v3 fields
+ txIdGen atomic.Uint64
+ txsMapLock *sync.RWMutex
+ txs map[uint64]*threadSafeTx
+
+ trace bool
+ rangeStep int // make sure `s.with` has limited time
+ logger log.Logger
+}
+
+type threadSafeTx struct {
+ kv.Tx
+ sync.Mutex
+}
+
+type Snapsthots interface {
+ Files() []string
+}
+
+func NewKvServer(ctx context.Context, db kv.RoDB, snapshots Snapsthots, historySnapshots Snapsthots, logger log.Logger) *KvServer {
+ return &KvServer{
+ trace: false,
+ rangeStep: 1024,
+ kv: db, stateChangeStreams: newStateChangeStreams(), ctx: ctx,
+ blockSnapshots: snapshots, historySnapshots: historySnapshots,
+ txs: map[uint64]*threadSafeTx{}, txsMapLock: &sync.RWMutex{},
+ logger: logger,
+ }
+}
+
+// Version returns the service-side interface version number
+func (s *KvServer) Version(context.Context, *emptypb.Empty) (*types.VersionReply, error) {
+ dbSchemaVersion := &kv.DBSchemaVersion
+ if KvServiceAPIVersion.Major > dbSchemaVersion.Major {
+ return KvServiceAPIVersion, nil
+ }
+ if dbSchemaVersion.Major > KvServiceAPIVersion.Major {
+ return dbSchemaVersion, nil
+ }
+ if KvServiceAPIVersion.Minor > dbSchemaVersion.Minor {
+ return KvServiceAPIVersion, nil
+ }
+ if dbSchemaVersion.Minor > KvServiceAPIVersion.Minor {
+ return dbSchemaVersion, nil
+ }
+ return dbSchemaVersion, nil
+}
+
+func (s *KvServer) begin(ctx context.Context) (id uint64, err error) {
+ if s.trace {
+ s.logger.Info(fmt.Sprintf("[kv_server] begin %d %s\n", id, dbg.Stack()))
+ }
+ s.txsMapLock.Lock()
+ defer s.txsMapLock.Unlock()
+ tx, errBegin := s.kv.BeginRo(ctx)
+ if errBegin != nil {
+ return 0, errBegin
+ }
+ id = s.txIdGen.Add(1)
+ s.txs[id] = &threadSafeTx{Tx: tx}
+ return id, nil
+}
+
+// renew - rollback and begin tx without changing it's `id`
+func (s *KvServer) renew(ctx context.Context, id uint64) (err error) {
+ if s.trace {
+ s.logger.Info(fmt.Sprintf("[kv_server] renew %d %s\n", id, dbg.Stack()[:2]))
+ }
+ s.txsMapLock.Lock()
+ defer s.txsMapLock.Unlock()
+ tx, ok := s.txs[id]
+ if ok {
+ tx.Lock()
+ defer tx.Unlock()
+ tx.Rollback()
+ }
+ newTx, errBegin := s.kv.BeginRo(ctx)
+ if errBegin != nil {
+ return fmt.Errorf("kvserver: %w", err)
+ }
+ s.txs[id] = &threadSafeTx{Tx: newTx}
+ return nil
+}
+
+func (s *KvServer) rollback(id uint64) {
+ if s.trace {
+ s.logger.Info(fmt.Sprintf("[kv_server] rollback %d %s\n", id, dbg.Stack()[:2]))
+ }
+ s.txsMapLock.Lock()
+ defer s.txsMapLock.Unlock()
+ tx, ok := s.txs[id]
+ if ok {
+ tx.Lock()
+ defer tx.Unlock()
+ tx.Rollback() //nolint
+ delete(s.txs, id)
+ }
+}
+
+// with - provides exclusive access to `tx` object. Use it if you need open Cursor or run another method of `tx` object.
+// it's ok to use same `kv.RoTx` from different goroutines, but such use must be guarded by `with` method.
+//
+// !Important: client may open multiple Cursors and multiple Streams on same `tx` in same time
+// it means server must do limited amount of work inside `with` method (periodically release `tx` for other streams)
+// long-living server-side streams must read limited-portion of data inside `with`, send this portion to
+// client, portion of data it to client, then read next portion in another `with` call.
+// It will allow cooperative access to `tx` object
+func (s *KvServer) with(id uint64, f func(kv.Tx) error) error {
+ s.txsMapLock.RLock()
+ tx, ok := s.txs[id]
+ s.txsMapLock.RUnlock()
+ if !ok {
+ return fmt.Errorf("txn %d already rollback", id)
+ }
+
+ if s.trace {
+ s.logger.Info(fmt.Sprintf("[kv_server] with %d try lock %s\n", id, dbg.Stack()[:2]))
+ }
+ tx.Lock()
+ if s.trace {
+ s.logger.Info(fmt.Sprintf("[kv_server] with %d can lock %s\n", id, dbg.Stack()[:2]))
+ }
+ defer func() {
+ tx.Unlock()
+ if s.trace {
+ s.logger.Info(fmt.Sprintf("[kv_server] with %d unlock %s\n", id, dbg.Stack()[:2]))
+ }
+ }()
+ return f(tx.Tx)
+}
+
+func (s *KvServer) Tx(stream remote.KV_TxServer) error {
+ id, errBegin := s.begin(stream.Context())
+ if errBegin != nil {
+ return fmt.Errorf("server-side error: %w", errBegin)
+ }
+ defer s.rollback(id)
+
+ var viewID uint64
+ if err := s.with(id, func(tx kv.Tx) error {
+ viewID = tx.ViewID()
+ return nil
+ }); err != nil {
+ return fmt.Errorf("kvserver: %w", err)
+ }
+ if err := stream.Send(&remote.Pair{ViewId: viewID, TxId: id}); err != nil {
+ return fmt.Errorf("server-side error: %w", err)
+ }
+
+ var CursorID uint32
+ type CursorInfo struct {
+ bucket string
+ c kv.Cursor
+ k, v []byte //fields to save current position of cursor - used when Tx reopen
+ }
+ cursors := map[uint32]*CursorInfo{}
+
+ txTicker := time.NewTicker(MaxTxTTL)
+ defer txTicker.Stop()
+
+ // send all items to client, if k==nil - still send it to client and break loop
+ for {
+ in, recvErr := stream.Recv()
+ if recvErr != nil {
+ if errors.Is(recvErr, io.EOF) { // termination
+ return nil
+ }
+ return fmt.Errorf("server-side error: %w", recvErr)
+ }
+
+ //TODO: protect against client - which doesn't send any requests
+ select {
+ default:
+ case <-txTicker.C:
+ for _, c := range cursors { // save positions of cursor, will restore after Tx reopening
+ k, v, err := c.c.Current()
+ if err != nil {
+ return fmt.Errorf("kvserver: %w", err)
+ }
+ c.k = bytesCopy(k)
+ c.v = bytesCopy(v)
+ }
+
+ if err := s.renew(stream.Context(), id); err != nil {
+ return err
+ }
+ if err := s.with(id, func(tx kv.Tx) error {
+ for _, c := range cursors { // restore all cursors position
+ var err error
+ c.c, err = tx.Cursor(c.bucket)
+ if err != nil {
+ return err
+ }
+ switch casted := c.c.(type) {
+ case kv.CursorDupSort:
+ v, err := casted.SeekBothRange(c.k, c.v)
+ if err != nil {
+ return fmt.Errorf("server-side error: %w", err)
+ }
+ if v == nil { // it may happen that key where we stopped disappeared after transaction reopen, then just move to next key
+ _, _, err = casted.Next()
+ if err != nil {
+ return fmt.Errorf("server-side error: %w", err)
+ }
+ }
+ case kv.Cursor:
+ if _, _, err := c.c.Seek(c.k); err != nil {
+ return fmt.Errorf("server-side error: %w", err)
+ }
+ }
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+ }
+
+ var c kv.Cursor
+ if in.BucketName == "" {
+ cInfo, ok := cursors[in.Cursor]
+ if !ok {
+ return fmt.Errorf("server-side error: unknown Cursor=%d, Op=%s", in.Cursor, in.Op)
+ }
+ c = cInfo.c
+ }
+ switch in.Op {
+ case remote.Op_OPEN:
+ CursorID++
+ var err error
+ if err := s.with(id, func(tx kv.Tx) error {
+ c, err = tx.Cursor(in.BucketName)
+ if err != nil {
+ return err
+ }
+ return nil
+ }); err != nil {
+ return fmt.Errorf("kvserver: %w", err)
+ }
+ cursors[CursorID] = &CursorInfo{
+ bucket: in.BucketName,
+ c: c,
+ }
+ if err := stream.Send(&remote.Pair{CursorId: CursorID}); err != nil {
+ return fmt.Errorf("kvserver: %w", err)
+ }
+ continue
+ case remote.Op_OPEN_DUP_SORT:
+ CursorID++
+ var err error
+ if err := s.with(id, func(tx kv.Tx) error {
+ c, err = tx.CursorDupSort(in.BucketName)
+ if err != nil {
+ return err
+ }
+ return nil
+ }); err != nil {
+ return fmt.Errorf("kvserver: %w", err)
+ }
+ cursors[CursorID] = &CursorInfo{
+ bucket: in.BucketName,
+ c: c,
+ }
+ if err := stream.Send(&remote.Pair{CursorId: CursorID}); err != nil {
+ return fmt.Errorf("server-side error: %w", err)
+ }
+ continue
+ case remote.Op_CLOSE:
+ cInfo, ok := cursors[in.Cursor]
+ if !ok {
+ return fmt.Errorf("server-side error: unknown Cursor=%d, Op=%s", in.Cursor, in.Op)
+ }
+ cInfo.c.Close()
+ delete(cursors, in.Cursor)
+ if err := stream.Send(&remote.Pair{}); err != nil {
+ return fmt.Errorf("server-side error: %w", err)
+ }
+ continue
+ default:
+ }
+
+ if err := handleOp(c, stream, in); err != nil {
+ return fmt.Errorf("server-side error: %w", err)
+ }
+ }
+}
+
+func handleOp(c kv.Cursor, stream remote.KV_TxServer, in *remote.Cursor) error {
+ var k, v []byte
+ var err error
+ switch in.Op {
+ case remote.Op_FIRST:
+ k, v, err = c.First()
+ case remote.Op_FIRST_DUP:
+ v, err = c.(kv.CursorDupSort).FirstDup()
+ case remote.Op_SEEK:
+ k, v, err = c.Seek(in.K)
+ case remote.Op_SEEK_BOTH:
+ v, err = c.(kv.CursorDupSort).SeekBothRange(in.K, in.V)
+ case remote.Op_CURRENT:
+ k, v, err = c.Current()
+ case remote.Op_LAST:
+ k, v, err = c.Last()
+ case remote.Op_LAST_DUP:
+ v, err = c.(kv.CursorDupSort).LastDup()
+ case remote.Op_NEXT:
+ k, v, err = c.Next()
+ case remote.Op_NEXT_DUP:
+ k, v, err = c.(kv.CursorDupSort).NextDup()
+ case remote.Op_NEXT_NO_DUP:
+ k, v, err = c.(kv.CursorDupSort).NextNoDup()
+ case remote.Op_PREV:
+ k, v, err = c.Prev()
+ //case remote.Op_PREV_DUP:
+ // k, v, err = c.(ethdb.CursorDupSort).Prev()
+ // if err != nil {
+ // return err
+ // }
+ //case remote.Op_PREV_NO_DUP:
+ // k, v, err = c.Prev()
+ // if err != nil {
+ // return err
+ // }
+ case remote.Op_SEEK_EXACT:
+ k, v, err = c.SeekExact(in.K)
+ case remote.Op_SEEK_BOTH_EXACT:
+ k, v, err = c.(kv.CursorDupSort).SeekBothExact(in.K, in.V)
+ case remote.Op_COUNT:
+ cnt, err := c.Count()
+ if err != nil {
+ return err
+ }
+ v = hexutility.EncodeTs(cnt)
+ default:
+ return fmt.Errorf("unknown operation: %s", in.Op)
+ }
+ if err != nil {
+ return err
+ }
+
+ if err := stream.Send(&remote.Pair{K: k, V: v}); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func bytesCopy(b []byte) []byte {
+ if b == nil {
+ return nil
+ }
+ copiedBytes := make([]byte, len(b))
+ copy(copiedBytes, b)
+ return copiedBytes
+}
+
+func (s *KvServer) StateChanges(req *remote.StateChangeRequest, server remote.KV_StateChangesServer) error {
+ ch, remove := s.stateChangeStreams.Sub()
+ defer remove()
+ for {
+ select {
+ case reply := <-ch:
+ if err := server.Send(reply); err != nil {
+ return err
+ }
+ case <-s.ctx.Done():
+ return nil
+ case <-server.Context().Done():
+ return nil
+ }
+ }
+}
+
+func (s *KvServer) SendStateChanges(ctx context.Context, sc *remote.StateChangeBatch) {
+ s.stateChangeStreams.Pub(sc)
+}
+
+func (s *KvServer) Snapshots(ctx context.Context, _ *remote.SnapshotsRequest) (*remote.SnapshotsReply, error) {
+ if s.blockSnapshots == nil || reflect.ValueOf(s.blockSnapshots).IsNil() { // nolint
+ return &remote.SnapshotsReply{BlocksFiles: []string{}, HistoryFiles: []string{}}, nil
+ }
+
+ return &remote.SnapshotsReply{BlocksFiles: s.blockSnapshots.Files(), HistoryFiles: s.historySnapshots.Files()}, nil
+}
+
+type StateChangePubSub struct {
+ chans map[uint]chan *remote.StateChangeBatch
+ id uint
+ mu sync.RWMutex
+}
+
+func newStateChangeStreams() *StateChangePubSub {
+ return &StateChangePubSub{}
+}
+
+func (s *StateChangePubSub) Sub() (ch chan *remote.StateChangeBatch, remove func()) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.chans == nil {
+ s.chans = make(map[uint]chan *remote.StateChangeBatch)
+ }
+ s.id++
+ id := s.id
+ ch = make(chan *remote.StateChangeBatch, 8)
+ s.chans[id] = ch
+ return ch, func() { s.remove(id) }
+}
+
+func (s *StateChangePubSub) Pub(reply *remote.StateChangeBatch) {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ for _, ch := range s.chans {
+ common.PrioritizedSend(ch, reply)
+ }
+}
+
+func (s *StateChangePubSub) Len() int {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ return len(s.chans)
+}
+
+func (s *StateChangePubSub) remove(id uint) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ ch, ok := s.chans[id]
+ if !ok { // double-unsubscribe support
+ return
+ }
+ close(ch)
+ delete(s.chans, id)
+}
+
+// Temporal methods
+func (s *KvServer) DomainGet(ctx context.Context, req *remote.DomainGetReq) (reply *remote.DomainGetReply, err error) {
+ reply = &remote.DomainGetReply{}
+ if err := s.with(req.TxId, func(tx kv.Tx) error {
+ ttx, ok := tx.(kv.TemporalTx)
+ if !ok {
+ return fmt.Errorf("server DB doesn't implement kv.Temporal interface")
+ }
+ if req.Latest {
+ reply.V, reply.Ok, err = ttx.DomainGet(kv.Domain(req.Table), req.K, req.K2)
+ if err != nil {
+ return err
+ }
+ } else {
+ reply.V, reply.Ok, err = ttx.DomainGetAsOf(kv.Domain(req.Table), req.K, req.K2, req.Ts)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ return reply, nil
+}
+func (s *KvServer) HistoryGet(ctx context.Context, req *remote.HistoryGetReq) (reply *remote.HistoryGetReply, err error) {
+ reply = &remote.HistoryGetReply{}
+ if err := s.with(req.TxId, func(tx kv.Tx) error {
+ ttx, ok := tx.(kv.TemporalTx)
+ if !ok {
+ return fmt.Errorf("server DB doesn't implement kv.Temporal interface")
+ }
+ reply.V, reply.Ok, err = ttx.HistoryGet(kv.History(req.Table), req.K, req.Ts)
+ if err != nil {
+ return err
+ }
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ return reply, nil
+}
+
+const PageSizeLimit = 4 * 4096
+
+func (s *KvServer) IndexRange(ctx context.Context, req *remote.IndexRangeReq) (*remote.IndexRangeReply, error) {
+ reply := &remote.IndexRangeReply{}
+ from, limit := int(req.FromTs), int(req.Limit)
+ if req.PageToken != "" {
+ var pagination remote.IndexPagination
+ if err := unmarshalPagination(req.PageToken, &pagination); err != nil {
+ return nil, err
+ }
+ from, limit = int(pagination.NextTimeStamp), int(pagination.Limit)
+ }
+ if req.PageSize <= 0 || req.PageSize > PageSizeLimit {
+ req.PageSize = PageSizeLimit
+ }
+
+ if err := s.with(req.TxId, func(tx kv.Tx) error {
+ ttx, ok := tx.(kv.TemporalTx)
+ if !ok {
+ return fmt.Errorf("server DB doesn't implement kv.Temporal interface")
+ }
+ it, err := ttx.IndexRange(kv.InvertedIdx(req.Table), req.K, from, int(req.ToTs), order.By(req.OrderAscend), limit)
+ if err != nil {
+ return err
+ }
+ for it.HasNext() {
+ v, err := it.Next()
+ if err != nil {
+ return err
+ }
+ reply.Timestamps = append(reply.Timestamps, v)
+ limit--
+ }
+ if len(reply.Timestamps) == PageSizeLimit && it.HasNext() {
+ next, err := it.Next()
+ if err != nil {
+ return err
+ }
+ reply.NextPageToken, err = marshalPagination(&remote.IndexPagination{NextTimeStamp: int64(next), Limit: int64(limit)})
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ return reply, nil
+}
+
+func (s *KvServer) Range(ctx context.Context, req *remote.RangeReq) (*remote.Pairs, error) {
+ from, limit := req.FromPrefix, int(req.Limit)
+ if req.PageToken != "" {
+ var pagination remote.ParisPagination
+ if err := unmarshalPagination(req.PageToken, &pagination); err != nil {
+ return nil, err
+ }
+ from, limit = pagination.NextKey, int(pagination.Limit)
+ }
+ if req.PageSize <= 0 || req.PageSize > PageSizeLimit {
+ req.PageSize = PageSizeLimit
+ }
+
+ reply := &remote.Pairs{}
+ var err error
+ if err = s.with(req.TxId, func(tx kv.Tx) error {
+ var it iter.KV
+ if req.OrderAscend {
+ it, err = tx.RangeAscend(req.Table, from, req.ToPrefix, limit)
+ if err != nil {
+ return err
+ }
+ } else {
+ it, err = tx.RangeDescend(req.Table, from, req.ToPrefix, limit)
+ if err != nil {
+ return err
+ }
+ }
+ for it.HasNext() {
+ k, v, err := it.Next()
+ if err != nil {
+ return err
+ }
+ reply.Keys = append(reply.Keys, k)
+ reply.Values = append(reply.Values, v)
+ limit--
+ }
+ if len(reply.Keys) == PageSizeLimit && it.HasNext() {
+ nextK, _, err := it.Next()
+ if err != nil {
+ return err
+ }
+ reply.NextPageToken, err = marshalPagination(&remote.ParisPagination{NextKey: nextK, Limit: int64(limit)})
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ return reply, nil
+}
+
+// see: https://cloud.google.com/apis/design/design_patterns
+func marshalPagination(m proto.Message) (string, error) {
+ pageToken, err := proto.Marshal(m)
+ if err != nil {
+ return "", err
+ }
+ return base64.StdEncoding.EncodeToString(pageToken), nil
+}
+
+func unmarshalPagination(pageToken string, m proto.Message) error {
+ token, err := base64.StdEncoding.DecodeString(pageToken)
+ if err != nil {
+ return err
+ }
+ if err = proto.Unmarshal(token, m); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/erigon-lib/kv/remotedbserver/server_test.go b/erigon-lib/kv/remotedbserver/server_test.go
new file mode 100644
index 00000000000..fec193f0389
--- /dev/null
+++ b/erigon-lib/kv/remotedbserver/server_test.go
@@ -0,0 +1,97 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package remotedbserver
+
+import (
+ "context"
+ "runtime"
+ "testing"
+
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/memdb"
+ "github.com/ledgerwatch/log/v3"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/sync/errgroup"
+)
+
+func TestKvServer_renew(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("fix me on win please")
+ }
+
+ require, ctx, db := require.New(t), context.Background(), memdb.NewTestDB(t)
+ require.NoError(db.Update(ctx, func(tx kv.RwTx) error {
+ wc, err := tx.RwCursorDupSort(kv.PlainState)
+ require.NoError(err)
+ require.NoError(wc.Append([]byte{1}, []byte{1}))
+ require.NoError(wc.Append([]byte{1}, []byte{2}))
+ require.NoError(wc.Append([]byte{2}, []byte{1}))
+ require.NoError(wc.Append([]byte{3}, []byte{1}))
+ return nil
+ }))
+
+ s := NewKvServer(ctx, db, nil, nil, log.New())
+ g, ctx := errgroup.WithContext(ctx)
+ testCase := func() error {
+ id, err := s.begin(ctx)
+ if err != nil {
+ return err
+ }
+ var c, c2 kv.Cursor
+ if err = s.with(id, func(tx kv.Tx) error {
+ c, err = tx.Cursor(kv.PlainState)
+ return err
+ }); err != nil {
+ return err
+ }
+ k, v, err := c.First()
+ require.NoError(err)
+ require.Equal([]byte{1}, k)
+ require.Equal([]byte{1}, v)
+
+ if err = s.renew(ctx, id); err != nil {
+ return err
+ }
+
+ if err = s.with(id, func(tx kv.Tx) error {
+ c, err = tx.Cursor(kv.PlainState)
+ if err != nil {
+ return err
+ }
+ c2, err = tx.Cursor(kv.PlainState)
+ return err
+ }); err != nil {
+ return err
+ }
+
+ k, v, err = c.Next()
+ require.NoError(err)
+ require.Equal([]byte{1}, k)
+ require.Equal([]byte{1}, v)
+ k, v, err = c2.Next()
+ require.NoError(err)
+ require.Equal([]byte{1}, k)
+ require.Equal([]byte{1}, v)
+
+ s.rollback(id)
+ return nil
+ }
+ for i := 0; i < 10; i++ {
+ g.Go(testCase)
+ }
+ require.NoError(g.Wait())
+}
diff --git a/erigon-lib/kv/tables.go b/erigon-lib/kv/tables.go
new file mode 100644
index 00000000000..423b7303464
--- /dev/null
+++ b/erigon-lib/kv/tables.go
@@ -0,0 +1,908 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package kv
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
+)
+
+// DBSchemaVersion versions list
+// 5.0 - BlockTransaction table now has canonical ids (txs of non-canonical blocks moving to NonCanonicalTransaction table)
+// 6.0 - BlockTransaction table now has system-txs before and after block (records are absent if block has no system-tx, but sequence increasing)
+// 6.1 - Canonical/NonCanonical/BadBlock transitions now stored in same table: kv.EthTx. Add kv.BadBlockNumber table
+var DBSchemaVersion = types.VersionReply{Major: 6, Minor: 1, Patch: 0}
+
+// ChaindataTables
+
+// Dictionary:
+// "Plain State" - state where keys arent' hashed. "CurrentState" - same, but keys are hashed. "PlainState" used for blocks execution. "CurrentState" used mostly for Merkle root calculation.
+// "incarnation" - uint64 number - how much times given account was SelfDestruct'ed.
+
+/*
+PlainState logical layout:
+
+ Contains Accounts:
+ key - address (unhashed)
+ value - account encoded for storage
+ Contains Storage:
+ key - address (unhashed) + incarnation + storage key (unhashed)
+ value - storage value(common.hash)
+
+Physical layout:
+
+ PlainState and HashedStorage utilises DupSort feature of MDBX (store multiple values inside 1 key).
+
+-------------------------------------------------------------
+
+ key | value
+
+-------------------------------------------------------------
+[acc_hash] | [acc_value]
+[acc_hash]+[inc] | [storage1_hash]+[storage1_value]
+
+ | [storage2_hash]+[storage2_value] // this value has no own key. it's 2nd value of [acc_hash]+[inc] key.
+ | [storage3_hash]+[storage3_value]
+ | ...
+
+[acc_hash]+[old_inc] | [storage1_hash]+[storage1_value]
+
+ | ...
+
+[acc2_hash] | [acc2_value]
+
+ ...
+*/
+const PlainState = "PlainState"
+
+// PlainContractCode -
+// key - address+incarnation
+// value - code hash
+const PlainContractCode = "PlainCodeHash"
+
+/*
+AccountChangeSet and StorageChangeSet - of block N store values of state before block N changed them.
+Because values "after" change stored in PlainState.
+Logical format:
+
+ key - blockNum_u64 + key_in_plain_state
+ value - value_in_plain_state_before_blockNum_changes
+
+Example: If block N changed account A from value X to Y. Then:
+
+ AccountChangeSet has record: bigEndian(N) + A -> X
+ PlainState has record: A -> Y
+
+See also: docs/programmers_guide/db_walkthrough.MD#table-history-of-accounts
+
+As you can see if block N changes much accounts - then all records have repetitive prefix `bigEndian(N)`.
+MDBX can store such prefixes only once - by DupSort feature (see `docs/programmers_guide/dupsort.md`).
+Both buckets are DupSort-ed and have physical format:
+AccountChangeSet:
+
+ key - blockNum_u64
+ value - address + account(encoded)
+
+StorageChangeSet:
+
+ key - blockNum_u64 + address + incarnation_u64
+ value - plain_storage_key + value
+*/
+const AccountChangeSet = "AccountChangeSet"
+const StorageChangeSet = "StorageChangeSet"
+
+const (
+
+ //HashedAccounts
+ // key - address hash
+ // value - account encoded for storage
+ // Contains Storage:
+ //key - address hash + incarnation + storage key hash
+ //value - storage value(common.hash)
+ HashedAccounts = "HashedAccount"
+ HashedStorage = "HashedStorage"
+)
+
+/*
+AccountsHistory and StorageHistory - indices designed to serve next 2 type of requests:
+1. what is smallest block number >= X where account A changed
+2. get last shard of A - to append there new block numbers
+
+Task 1. is part of "get historical state" operation (see `core/state:GetAsOf`):
+If `db.Seek(A+bigEndian(X))` returns non-last shard -
+
+ then get block number from shard value Y := RoaringBitmap(shard_value).GetGte(X)
+ and with Y go to ChangeSets: db.Get(ChangeSets, Y+A)
+
+If `db.Seek(A+bigEndian(X))` returns last shard -
+
+ then we go to PlainState: db.Get(PlainState, A)
+
+Format:
+ - index split to shards by 2Kb - RoaringBitmap encoded sorted list of block numbers
+ (to avoid performance degradation of popular accounts or look deep into history.
+ Also 2Kb allows avoid Overflow pages inside DB.)
+ - if shard is not last - then key has suffix 8 bytes = bigEndian(max_block_num_in_this_shard)
+ - if shard is last - then key has suffix 8 bytes = 0xFF
+
+It allows:
+ - server task 1. by 1 db operation db.Seek(A+bigEndian(X))
+ - server task 2. by 1 db operation db.Get(A+0xFF)
+
+see also: docs/programmers_guide/db_walkthrough.MD#table-change-sets
+
+AccountsHistory:
+
+ key - address + shard_id_u64
+ value - roaring bitmap - list of block where it changed
+
+StorageHistory
+
+ key - address + storage_key + shard_id_u64
+ value - roaring bitmap - list of block where it changed
+*/
+const E2AccountsHistory = "AccountHistory"
+const E2StorageHistory = "StorageHistory"
+
+const (
+
+ //key - contract code hash
+ //value - contract code
+ Code = "Code"
+
+ //key - addressHash+incarnation
+ //value - code hash
+ ContractCode = "HashedCodeHash"
+
+ // IncarnationMap for deleted accounts
+ //key - address
+ //value - incarnation of account when it was last deleted
+ IncarnationMap = "IncarnationMap"
+
+ //TEVMCode -
+ //key - contract code hash
+ //value - contract TEVM code
+ ContractTEVMCode = "TEVMCode"
+)
+
+/*
+TrieOfAccounts and TrieOfStorage
+hasState,groups - mark prefixes existing in hashed_account table
+hasTree - mark prefixes existing in trie_account table (not related with branchNodes)
+hasHash - mark prefixes which hashes are saved in current trie_account record (actually only hashes of branchNodes can be saved)
+@see UnmarshalTrieNode
+@see integrity.Trie
+
++-----------------------------------------------------------------------------------------------------+
+| DB record: 0x0B, hasState: 0b1011, hasTree: 0b1001, hasHash: 0b1001, hashes: [x,x] |
++-----------------------------------------------------------------------------------------------------+
+
+ | | |
+ v | v
+
++---------------------------------------------+ | +--------------------------------------+
+| DB record: 0x0B00, hasState: 0b10001 | | | DB record: 0x0B03, hasState: 0b10010 |
+| hasTree: 0, hasHash: 0b10000, hashes: [x] | | | hasTree: 0, hasHash: 0, hashes: [] |
++---------------------------------------------+ | +--------------------------------------+
+
+ | | | | |
+ v v v v v
+
++------------------+ +----------------------+ +---------------+ +---------------+ +---------------+
+| Account: | | BranchNode: 0x0B0004 | | Account: | | Account: | | Account: |
+| 0x0B0000... | | has no record in | | 0x0B01... | | 0x0B0301... | | 0x0B0304... |
+| in HashedAccount | | TrieAccount | | | | | | |
++------------------+ +----------------------+ +---------------+ +---------------+ +---------------+
+
+ | |
+ v v
+ +---------------+ +---------------+
+ | Account: | | Account: |
+ | 0x0B000400... | | 0x0B000401... |
+ +---------------+ +---------------+
+
+Invariants:
+- hasTree is subset of hasState
+- hasHash is subset of hasState
+- first level in account_trie always exists if hasState>0
+- TrieStorage record of account.root (length=40) must have +1 hash - it's account.root
+- each record in TrieAccount table must have parent (may be not direct) and this parent must have correct bit in hasTree bitmap
+- if hasState has bit - then HashedAccount table must have record according to this bit
+- each TrieAccount record must cover some state (means hasState is always > 0)
+- TrieAccount records with length=1 can satisfy (hasBranch==0&&hasHash==0) condition
+- Other records in TrieAccount and TrieStorage must (hasTree!=0 || hasHash!=0)
+*/
+const TrieOfAccounts = "TrieAccount"
+const TrieOfStorage = "TrieStorage"
+
+// Mapping [block number] => [Verkle Root]
+const VerkleRoots = "VerkleRoots"
+
+// Mapping [Verkle Root] => [Rlp-Encoded Verkle Node]
+const VerkleTrie = "VerkleTrie"
+
+const (
+ // DatabaseInfo is used to store information about data layout.
+ DatabaseInfo = "DbInfo"
+
+ // Naming:
+ // NeaderNumber - Ethereum-specific block number. All nodes have same BlockNum.
+ // NeaderID - auto-increment ID. Depends on order in which node see headers.
+ // Invariant: for all headers in snapshots Number == ID. It means no reason to store Num/ID for this headers in DB.
+ // Same about: TxNum/TxID, BlockNum/BlockID
+ HeaderNumber = "HeaderNumber" // header_hash -> header_num_u64
+ BadHeaderNumber = "BadHeaderNumber" // header_hash -> header_num_u64
+ HeaderCanonical = "CanonicalHeader" // block_num_u64 -> header hash
+ Headers = "Header" // block_num_u64 + hash -> header (RLP)
+ HeaderTD = "HeadersTotalDifficulty" // block_num_u64 + hash -> td (RLP)
+
+ BlockBody = "BlockBody" // block_num_u64 + hash -> block body
+
+ // Naming:
+ // TxNum - Ethereum canonical transaction number - same across all nodes.
+ // TxnID - auto-increment ID - can be differrent across all nodes
+ // BlockNum/BlockID - same
+ //
+ // EthTx - stores all transactions of Canonical/NonCanonical/Bad blocks
+ // TxnID (auto-increment ID) - means nodes in network will have different ID of same transactions
+ // Snapshots (frozen data): using TxNum (not TxnID)
+ //
+ // During ReOrg - txs are not removed/updated
+ //
+ // Also this table has system-txs before and after block: if
+ // block has no system-tx - records are absent, but TxnID increasing
+ //
+ // In Erigon3: table MaxTxNum storing TxNum (not TxnID). History/Indices are using TxNum (not TxnID).
+ EthTx = "BlockTransaction" // tx_id_u64 -> rlp(tx)
+ NonCanonicalTxs = "NonCanonicalTransaction" // tbl_sequence_u64 -> rlp(tx)
+ MaxTxNum = "MaxTxNum" // block_number_u64 -> max_tx_num_in_block_u64
+
+ Receipts = "Receipt" // block_num_u64 -> canonical block receipts (non-canonical are not stored)
+ Log = "TransactionLog" // block_num_u64 + txId -> logs of transaction
+
+ // Stores bitmap indices - in which block numbers saw logs of given 'address' or 'topic'
+ // [addr or topic] + [2 bytes inverted shard number] -> bitmap(blockN)
+ // indices are sharded - because some bitmaps are >1Mb and when new incoming blocks process it
+ // updates ~300 of bitmaps - by append small amount new values. It cause much big writes (MDBX does copy-on-write).
+ //
+ // if last existing shard size merge it with delta
+ // if serialized size of delta > ShardLimit - break down to multiple shards
+ // shard number - it's biggest value in bitmap
+ LogTopicIndex = "LogTopicIndex"
+ LogAddressIndex = "LogAddressIndex"
+
+ // CallTraceSet is the name of the table that contain the mapping of block number to the set (sorted) of all accounts
+ // touched by call traces. It is DupSort-ed table
+ // 8-byte BE block number -> account address -> two bits (one for "from", another for "to")
+ CallTraceSet = "CallTraceSet"
+ // Indices for call traces - have the same format as LogTopicIndex and LogAddressIndex
+ // Store bitmap indices - in which block number we saw calls from (CallFromIndex) or to (CallToIndex) some addresses
+ CallFromIndex = "CallFromIndex"
+ CallToIndex = "CallToIndex"
+
+ // Cumulative indexes for estimation of stage execution
+ CumulativeGasIndex = "CumulativeGasIndex"
+ CumulativeTransactionIndex = "CumulativeTransactionIndex"
+
+ TxLookup = "BlockTransactionLookup" // hash -> transaction/receipt lookup metadata
+
+ ConfigTable = "Config" // config prefix for the db
+
+ // Progress of sync stages: stageName -> stageData
+ SyncStageProgress = "SyncStage"
+
+ Clique = "Clique"
+ CliqueSeparate = "CliqueSeparate"
+ CliqueSnapshot = "CliqueSnapshot"
+ CliqueLastSnapshot = "CliqueLastSnapshot"
+
+ // Proof-of-stake
+ // Beacon chain head that is been executed at the current time
+ CurrentExecutionPayload = "CurrentExecutionPayload"
+
+ // Node database tables (see nodedb.go)
+
+ // NodeRecords stores P2P node records (ENR)
+ NodeRecords = "NodeRecord"
+ // Inodes stores P2P discovery service info about the nodes
+ Inodes = "Inode"
+
+ // Transaction senders - stored separately from the block bodies
+ Senders = "TxSender" // block_num_u64 + blockHash -> sendersList (no serialization format, every 20 bytes is new sender)
+
+ // headBlockKey tracks the latest know full block's hash.
+ HeadBlockKey = "LastBlock"
+
+ HeadHeaderKey = "LastHeader"
+
+ // headBlockHash, safeBlockHash, finalizedBlockHash of the latest Engine API forkchoice
+ LastForkchoice = "LastForkchoice"
+
+ // TransitionBlockKey tracks the last proof-of-work block
+ TransitionBlockKey = "TransitionBlock"
+
+ // migrationName -> serialized SyncStageProgress and SyncStageUnwind buckets
+ // it stores stages progress to understand in which context was executed migration
+ // in case of bug-report developer can ask content of this bucket
+ Migrations = "Migration"
+
+ Sequence = "Sequence" // tbl_name -> seq_u64
+
+ Epoch = "DevEpoch" // block_num_u64+block_hash->transition_proof
+ PendingEpoch = "DevPendingEpoch" // block_num_u64+block_hash->transition_proof
+
+ Issuance = "Issuance" // block_num_u64->RLP(issuance+burnt[0 if < london])
+
+ StateAccounts = "StateAccounts"
+ StateStorage = "StateStorage"
+ StateCode = "StateCode"
+ StateCommitment = "StateCommitment"
+
+ // BOR
+ BorReceipts = "BorReceipt"
+ BorFinality = "BorFinality"
+ BorTxLookup = "BlockBorTransactionLookup" // transaction_hash -> block_num_u64
+ BorSeparate = "BorSeparate" // persisted snapshots of the Validator Sets, with their proposer priorities
+ BorEvents = "BorEvents" // event_id -> event_payload
+ BorEventNums = "BorEventNums" // block_num -> event_id (first event_id in that block)
+ BorSpans = "BorSpans" // span_id -> span (in JSON encoding)
+
+ // Downloader
+ BittorrentCompletion = "BittorrentCompletion"
+ BittorrentInfo = "BittorrentInfo"
+
+ // Domains/Histry/InvertedIndices
+ // Contants have "Tbl" prefix, to avoid collision with actual Domain names
+ // This constants is very rarely used in APP, but Domain/History/Idx names are widely used
+ TblAccountKeys = "AccountKeys"
+ TblAccountVals = "AccountVals"
+ TblAccountHistoryKeys = "AccountHistoryKeys"
+ TblAccountHistoryVals = "AccountHistoryVals"
+ TblAccountIdx = "AccountIdx"
+
+ TblStorageKeys = "StorageKeys"
+ TblStorageVals = "StorageVals"
+ TblStorageHistoryKeys = "StorageHistoryKeys"
+ TblStorageHistoryVals = "StorageHistoryVals"
+ TblStorageIdx = "StorageIdx"
+
+ TblCodeKeys = "CodeKeys"
+ TblCodeVals = "CodeVals"
+ TblCodeHistoryKeys = "CodeHistoryKeys"
+ TblCodeHistoryVals = "CodeHistoryVals"
+ TblCodeIdx = "CodeIdx"
+
+ TblCommitmentKeys = "CommitmentKeys"
+ TblCommitmentVals = "CommitmentVals"
+ TblCommitmentHistoryKeys = "CommitmentHistoryKeys"
+ TblCommitmentHistoryVals = "CommitmentHistoryVals"
+ TblCommitmentIdx = "CommitmentIdx"
+
+ TblLogAddressKeys = "LogAddressKeys"
+ TblLogAddressIdx = "LogAddressIdx"
+ TblLogTopicsKeys = "LogTopicsKeys"
+ TblLogTopicsIdx = "LogTopicsIdx"
+
+ TblTracesFromKeys = "TracesFromKeys"
+ TblTracesFromIdx = "TracesFromIdx"
+ TblTracesToKeys = "TracesToKeys"
+ TblTracesToIdx = "TracesToIdx"
+
+ Snapshots = "Snapshots" // name -> hash
+
+ //State Reconstitution
+ RAccountKeys = "RAccountKeys"
+ RAccountIdx = "RAccountIdx"
+ RStorageKeys = "RStorageKeys"
+ RStorageIdx = "RStorageIdx"
+ RCodeKeys = "RCodeKeys"
+ RCodeIdx = "RCodeIdx"
+
+ PlainStateR = "PlainStateR" // temporary table for PlainState reconstitution
+ PlainStateD = "PlainStateD" // temporary table for PlainStare reconstitution, deletes
+ CodeR = "CodeR" // temporary table for Code reconstitution
+ CodeD = "CodeD" // temporary table for Code reconstitution, deletes
+ PlainContractR = "PlainContractR" // temporary table for PlainContract reconstitution
+ PlainContractD = "PlainContractD" // temporary table for PlainContract reconstitution, deletes
+
+ // Erigon-CL Objects
+
+ // [slot] => [Beacon state]
+ BeaconState = "BeaconState"
+ // [slot] => [signature + block without execution payload]
+ BeaconBlocks = "BeaconBlock"
+ // [slot] => [attestation list (custom encoding)]
+ Attestetations = "Attestetations"
+
+ // [slot] => [Canonical block root]
+ CanonicalBlockRoots = "CanonicalBlockRoots"
+ // [Root (block root] => Slot
+ BlockRootToSlot = "BlockRootToSlot"
+ // [Block Root] => [State Root]
+ BlockRootToStateRoot = "BlockRootToStateRoot"
+ StateRootToBlockRoot = "StateRootToBlockRoot"
+
+ BlockRootToBlockNumber = "BlockRootToBlockNumber"
+ BlockRootToBlockHash = "BlockRootToBlockHash"
+
+ LastBeaconSnapshot = "LastBeaconSnapshot"
+ LastBeaconSnapshotKey = "LastBeaconSnapshotKey"
+
+ // [Block Root] => [Parent Root]
+ BlockRootToParentRoot = "BlockRootToParentRoot"
+
+ HighestFinalized = "HighestFinalized" // hash -> transaction/receipt lookup metadata
+
+ // BlockRoot => Beacon Block Header
+ BeaconBlockHeaders = "BeaconBlockHeaders"
+
+ // LightClientStore => LightClientStore object
+ // LightClientFinalityUpdate => latest finality update
+ // LightClientOptimisticUpdate => latest optimistic update
+ LightClient = "LightClient"
+ // Period (one every 27 hours) => LightClientUpdate
+ LightClientUpdates = "LightClientUpdates"
+ // Beacon historical data
+ // ValidatorIndex => [Field]
+ ValidatorPublicKeys = "ValidatorPublickeys"
+ // ValidatorIndex + Slot => [Field]
+ ValidatorEffectiveBalance = "ValidatorEffectiveBalance"
+ ValidatorSlashings = "ValidatorSlashings"
+ ValidatorBalance = "ValidatorBalance"
+ StaticValidators = "StaticValidators"
+ StateEvents = "StateEvents"
+
+ // External data
+ StateRoot = "StateRoot"
+ BlockRoot = "BlockRoot"
+ MinimalBeaconState = "MinimalBeaconState"
+ InactivityScores = "InactivityScores"
+ PreviousEpochParticipation = "PreviousEpochParticipation"
+ CurrentEpochParticipation = "CurrentEpochParticipation"
+ Checkpoints = "Checkpoints"
+ NextSyncCommittee = "NextSyncCommittee"
+ CurrentSyncCommittee = "CurrentSyncCommittee"
+ HistoricalRoots = "HistoricalRoots"
+ HistoricalSummaries = "HistoricalSummaries"
+ CurrentEpochAttestations = "EpochAttestations"
+ PreviousEpochAttestations = "PreviousAttestations"
+ Eth1DataVotes = "Eth1DataVotes"
+
+ IntraRandaoMixes = "IntraRandaoMixes" // [validator_index+slot] => [randao_mix]
+ RandaoMixes = "RandaoMixes" // [validator_index+slot] => [randao_mix]
+ Proposers = "BlockProposers" // epoch => proposers indicies
+
+ StatesProcessingProgress = "StatesProcessingProgress"
+)
+
+// Keys
+var (
+ //StorageModeTEVM - does not translate EVM to TEVM
+ StorageModeTEVM = []byte("smTEVM")
+
+ PruneTypeOlder = []byte("older")
+ PruneTypeBefore = []byte("before")
+
+ PruneHistory = []byte("pruneHistory")
+ PruneHistoryType = []byte("pruneHistoryType")
+ PruneReceipts = []byte("pruneReceipts")
+ PruneReceiptsType = []byte("pruneReceiptsType")
+ PruneTxIndex = []byte("pruneTxIndex")
+ PruneTxIndexType = []byte("pruneTxIndexType")
+ PruneCallTraces = []byte("pruneCallTraces")
+ PruneCallTracesType = []byte("pruneCallTracesType")
+
+ DBSchemaVersionKey = []byte("dbVersion")
+
+ BittorrentPeerID = "peerID"
+ CurrentHeadersSnapshotHash = []byte("CurrentHeadersSnapshotHash")
+ CurrentHeadersSnapshotBlock = []byte("CurrentHeadersSnapshotBlock")
+ CurrentBodiesSnapshotHash = []byte("CurrentBodiesSnapshotHash")
+ CurrentBodiesSnapshotBlock = []byte("CurrentBodiesSnapshotBlock")
+ PlainStateVersion = []byte("PlainStateVersion")
+
+ HighestFinalizedKey = []byte("HighestFinalized")
+ LightClientStore = []byte("LightClientStore")
+ LightClientFinalityUpdate = []byte("LightClientFinalityUpdate")
+ LightClientOptimisticUpdate = []byte("LightClientOptimisticUpdate")
+
+ StatesProcessingKey = []byte("StatesProcessing")
+)
+
+// ChaindataTables - list of all buckets. App will panic if some bucket is not in this list.
+// This list will be sorted in `init` method.
+// ChaindataTablesCfg - can be used to find index in sorted version of ChaindataTables list by name
+var ChaindataTables = []string{
+ E2AccountsHistory,
+ E2StorageHistory,
+ Code,
+ ContractCode,
+ HeaderNumber,
+ BadHeaderNumber,
+ BlockBody,
+ Receipts,
+ TxLookup,
+ ConfigTable,
+ CurrentExecutionPayload,
+ DatabaseInfo,
+ IncarnationMap,
+ ContractTEVMCode,
+ CliqueSeparate,
+ CliqueLastSnapshot,
+ CliqueSnapshot,
+ SyncStageProgress,
+ PlainState,
+ PlainContractCode,
+ AccountChangeSet,
+ StorageChangeSet,
+ Senders,
+ HeadBlockKey,
+ HeadHeaderKey,
+ LastForkchoice,
+ Migrations,
+ LogTopicIndex,
+ LogAddressIndex,
+ CallTraceSet,
+ CallFromIndex,
+ CallToIndex,
+ CumulativeGasIndex,
+ CumulativeTransactionIndex,
+ Log,
+ Sequence,
+ EthTx,
+ NonCanonicalTxs,
+ TrieOfAccounts,
+ TrieOfStorage,
+ HashedAccounts,
+ HashedStorage,
+ HeaderCanonical,
+ Headers,
+ HeaderTD,
+ Epoch,
+ PendingEpoch,
+ Issuance,
+ StateAccounts,
+ StateStorage,
+ StateCode,
+ StateCommitment,
+ BorReceipts,
+ BorFinality,
+ BorTxLookup,
+ BorSeparate,
+ BorEvents,
+ BorEventNums,
+ BorSpans,
+ TblAccountKeys,
+ TblAccountVals,
+ TblAccountHistoryKeys,
+ TblAccountHistoryVals,
+ TblAccountIdx,
+
+ TblStorageKeys,
+ TblStorageVals,
+ TblStorageHistoryKeys,
+ TblStorageHistoryVals,
+ TblStorageIdx,
+
+ TblCodeKeys,
+ TblCodeVals,
+ TblCodeHistoryKeys,
+ TblCodeHistoryVals,
+ TblCodeIdx,
+
+ TblCommitmentKeys,
+ TblCommitmentVals,
+ TblCommitmentHistoryKeys,
+ TblCommitmentHistoryVals,
+ TblCommitmentIdx,
+
+ TblLogAddressKeys,
+ TblLogAddressIdx,
+ TblLogTopicsKeys,
+ TblLogTopicsIdx,
+
+ TblTracesFromKeys,
+ TblTracesFromIdx,
+ TblTracesToKeys,
+ TblTracesToIdx,
+
+ Snapshots,
+ MaxTxNum,
+
+ RAccountKeys,
+ RAccountIdx,
+ RStorageKeys,
+ RStorageIdx,
+ RCodeKeys,
+ RCodeIdx,
+
+ VerkleRoots,
+ VerkleTrie,
+ // Beacon stuff
+ BeaconState,
+ BeaconBlocks,
+ CanonicalBlockRoots,
+ BlockRootToSlot,
+ BlockRootToStateRoot,
+ StateRootToBlockRoot,
+ BlockRootToParentRoot,
+ BeaconBlockHeaders,
+ HighestFinalized,
+ Attestetations,
+ LightClient,
+ LightClientUpdates,
+ BlockRootToBlockHash,
+ BlockRootToBlockNumber,
+ LastBeaconSnapshot,
+ // State Reconstitution
+ ValidatorPublicKeys,
+ ValidatorEffectiveBalance,
+ ValidatorBalance,
+ ValidatorSlashings,
+ StaticValidators,
+ StateEvents,
+ // Other stuff (related to state reconstitution)
+ BlockRoot,
+ StateRoot,
+ MinimalBeaconState,
+ RandaoMixes,
+ Proposers,
+ StatesProcessingProgress,
+ PreviousEpochParticipation,
+ CurrentEpochParticipation,
+ InactivityScores,
+ Checkpoints,
+ NextSyncCommittee,
+ CurrentSyncCommittee,
+ HistoricalRoots,
+ HistoricalSummaries,
+ CurrentEpochAttestations,
+ PreviousEpochAttestations,
+ Eth1DataVotes,
+ IntraRandaoMixes,
+}
+
+const (
+ RecentLocalTransaction = "RecentLocalTransaction" // sequence_u64 -> tx_hash
+ PoolTransaction = "PoolTransaction" // txHash -> sender_id_u64+tx_rlp
+ PoolInfo = "PoolInfo" // option_key -> option_value
+)
+
+var TxPoolTables = []string{
+ RecentLocalTransaction,
+ PoolTransaction,
+ PoolInfo,
+}
+var SentryTables = []string{}
+var DownloaderTables = []string{
+ BittorrentCompletion,
+ BittorrentInfo,
+}
+var ReconTables = []string{
+ PlainStateR,
+ PlainStateD,
+ CodeR,
+ CodeD,
+ PlainContractR,
+ PlainContractD,
+}
+
+// ChaindataDeprecatedTables - list of buckets which can be programmatically deleted - for example after migration
+var ChaindataDeprecatedTables = []string{
+ Clique,
+ TransitionBlockKey,
+}
+
+type CmpFunc func(k1, k2, v1, v2 []byte) int
+
+type TableCfg map[string]TableCfgItem
+type Bucket string
+
+type DBI uint
+type TableFlags uint
+
+const (
+ Default TableFlags = 0x00
+ ReverseKey TableFlags = 0x02
+ DupSort TableFlags = 0x04
+ IntegerKey TableFlags = 0x08
+ IntegerDup TableFlags = 0x20
+ ReverseDup TableFlags = 0x40
+)
+
+type TableCfgItem struct {
+ Flags TableFlags
+ // AutoDupSortKeysConversion - enables some keys transformation - to change db layout without changing app code.
+ // Use it wisely - it helps to do experiments with DB format faster, but better reduce amount of Magic in app.
+ // If good DB format found, push app code to accept this format and then disable this property.
+ AutoDupSortKeysConversion bool
+ IsDeprecated bool
+ DBI DBI
+ // DupFromLen - if user provide key of this length, then next transformation applied:
+ // v = append(k[DupToLen:], v...)
+ // k = k[:DupToLen]
+ // And opposite at retrieval
+ // Works only if AutoDupSortKeysConversion enabled
+ DupFromLen int
+ DupToLen int
+}
+
+var ChaindataTablesCfg = TableCfg{
+ HashedStorage: {
+ Flags: DupSort,
+ AutoDupSortKeysConversion: true,
+ DupFromLen: 72,
+ DupToLen: 40,
+ },
+ AccountChangeSet: {Flags: DupSort},
+ StorageChangeSet: {Flags: DupSort},
+ PlainState: {
+ Flags: DupSort,
+ AutoDupSortKeysConversion: true,
+ DupFromLen: 60,
+ DupToLen: 28,
+ },
+ CallTraceSet: {Flags: DupSort},
+
+ TblAccountKeys: {Flags: DupSort},
+ TblAccountHistoryKeys: {Flags: DupSort},
+ TblAccountHistoryVals: {Flags: DupSort},
+ TblAccountIdx: {Flags: DupSort},
+ TblStorageKeys: {Flags: DupSort},
+ TblStorageHistoryKeys: {Flags: DupSort},
+ TblStorageHistoryVals: {Flags: DupSort},
+ TblStorageIdx: {Flags: DupSort},
+ TblCodeKeys: {Flags: DupSort},
+ TblCodeHistoryKeys: {Flags: DupSort},
+ TblCodeIdx: {Flags: DupSort},
+ TblCommitmentKeys: {Flags: DupSort},
+ TblCommitmentHistoryKeys: {Flags: DupSort},
+ TblCommitmentIdx: {Flags: DupSort},
+ TblLogAddressKeys: {Flags: DupSort},
+ TblLogAddressIdx: {Flags: DupSort},
+ TblLogTopicsKeys: {Flags: DupSort},
+ TblLogTopicsIdx: {Flags: DupSort},
+ TblTracesFromKeys: {Flags: DupSort},
+ TblTracesFromIdx: {Flags: DupSort},
+ TblTracesToKeys: {Flags: DupSort},
+ TblTracesToIdx: {Flags: DupSort},
+ RAccountKeys: {Flags: DupSort},
+ RAccountIdx: {Flags: DupSort},
+ RStorageKeys: {Flags: DupSort},
+ RStorageIdx: {Flags: DupSort},
+ RCodeKeys: {Flags: DupSort},
+ RCodeIdx: {Flags: DupSort},
+}
+
+var BorTablesCfg = TableCfg{
+ BorReceipts: {Flags: DupSort},
+ BorFinality: {Flags: DupSort},
+ BorTxLookup: {Flags: DupSort},
+ BorEvents: {Flags: DupSort},
+ BorEventNums: {Flags: DupSort},
+ BorSpans: {Flags: DupSort},
+}
+
+var TxpoolTablesCfg = TableCfg{}
+var SentryTablesCfg = TableCfg{}
+var DownloaderTablesCfg = TableCfg{}
+var ReconTablesCfg = TableCfg{
+ PlainStateD: {Flags: DupSort},
+ CodeD: {Flags: DupSort},
+ PlainContractD: {Flags: DupSort},
+}
+
+func TablesCfgByLabel(label Label) TableCfg {
+ switch label {
+ case ChainDB:
+ return ChaindataTablesCfg
+ case TxPoolDB:
+ return TxpoolTablesCfg
+ case SentryDB:
+ return SentryTablesCfg
+ case DownloaderDB:
+ return DownloaderTablesCfg
+ default:
+ panic(fmt.Sprintf("unexpected label: %s", label))
+ }
+}
+func sortBuckets() {
+ sort.SliceStable(ChaindataTables, func(i, j int) bool {
+ return strings.Compare(ChaindataTables[i], ChaindataTables[j]) < 0
+ })
+}
+
+func init() {
+ reinit()
+}
+
+func reinit() {
+ sortBuckets()
+
+ for _, name := range ChaindataTables {
+ _, ok := ChaindataTablesCfg[name]
+ if !ok {
+ ChaindataTablesCfg[name] = TableCfgItem{}
+ }
+ }
+
+ for _, name := range ChaindataDeprecatedTables {
+ _, ok := ChaindataTablesCfg[name]
+ if !ok {
+ ChaindataTablesCfg[name] = TableCfgItem{}
+ }
+ tmp := ChaindataTablesCfg[name]
+ tmp.IsDeprecated = true
+ ChaindataTablesCfg[name] = tmp
+ }
+
+ for _, name := range TxPoolTables {
+ _, ok := TxpoolTablesCfg[name]
+ if !ok {
+ TxpoolTablesCfg[name] = TableCfgItem{}
+ }
+ }
+
+ for _, name := range SentryTables {
+ _, ok := SentryTablesCfg[name]
+ if !ok {
+ SentryTablesCfg[name] = TableCfgItem{}
+ }
+ }
+
+ for _, name := range DownloaderTables {
+ _, ok := DownloaderTablesCfg[name]
+ if !ok {
+ DownloaderTablesCfg[name] = TableCfgItem{}
+ }
+ }
+
+ for _, name := range ReconTables {
+ _, ok := ReconTablesCfg[name]
+ if !ok {
+ ReconTablesCfg[name] = TableCfgItem{}
+ }
+ }
+}
+
+// Temporal
+
+const (
+ AccountsDomain Domain = "AccountsDomain"
+ StorageDomain Domain = "StorageDomain"
+ CodeDomain Domain = "CodeDomain"
+)
+
+const (
+ AccountsHistory History = "AccountsHistory"
+ StorageHistory History = "StorageHistory"
+ CodeHistory History = "CodeHistory"
+)
+
+const (
+ AccountsHistoryIdx InvertedIdx = "AccountsHistoryIdx"
+ StorageHistoryIdx InvertedIdx = "StorageHistoryIdx"
+ CodeHistoryIdx InvertedIdx = "CodeHistoryIdx"
+
+ LogTopicIdx InvertedIdx = "LogTopicIdx"
+ LogAddrIdx InvertedIdx = "LogAddrIdx"
+ TracesFromIdx InvertedIdx = "TracesFromIdx"
+ TracesToIdx InvertedIdx = "TracesToIdx"
+)
diff --git a/erigon-lib/kv/temporal/historyv2/account_changeset.go b/erigon-lib/kv/temporal/historyv2/account_changeset.go
new file mode 100644
index 00000000000..db09519f5f4
--- /dev/null
+++ b/erigon-lib/kv/temporal/historyv2/account_changeset.go
@@ -0,0 +1,78 @@
+/*
+ Copyright 2022 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package historyv2
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "sort"
+
+ "github.com/ledgerwatch/erigon-lib/common/hexutility"
+ "github.com/ledgerwatch/erigon-lib/common/length"
+ "github.com/ledgerwatch/erigon-lib/kv"
+)
+
+type Encoder func(blockN uint64, s *ChangeSet, f func(k, v []byte) error) error
+type Decoder func(dbKey, dbValue []byte) (blockN uint64, k, v []byte, err error)
+
+func NewAccountChangeSet() *ChangeSet {
+ return &ChangeSet{
+ Changes: make([]Change, 0),
+ keyLen: length.Addr,
+ }
+}
+
+func EncodeAccounts(blockN uint64, s *ChangeSet, f func(k, v []byte) error) error {
+ sort.Sort(s)
+ newK := hexutility.EncodeTs(blockN)
+ for _, cs := range s.Changes {
+ newV := make([]byte, len(cs.Key)+len(cs.Value))
+ copy(newV, cs.Key)
+ copy(newV[len(cs.Key):], cs.Value)
+ if err := f(newK, newV); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func DecodeAccounts(dbKey, dbValue []byte) (uint64, []byte, []byte, error) {
+ blockN := binary.BigEndian.Uint64(dbKey)
+ if len(dbValue) < length.Addr {
+ return 0, nil, nil, fmt.Errorf("account changes purged for block %d", blockN)
+ }
+ k := dbValue[:length.Addr]
+ v := dbValue[length.Addr:]
+ return blockN, k, v, nil
+}
+
+func FindAccount(c kv.CursorDupSort, blockNumber uint64, key []byte) ([]byte, error) {
+ k := hexutility.EncodeTs(blockNumber)
+ v, err := c.SeekBothRange(k, key)
+ if err != nil {
+ return nil, err
+ }
+ _, k, v, err = DecodeAccounts(k, v)
+ if err != nil {
+ return nil, err
+ }
+ if !bytes.HasPrefix(k, key) {
+ return nil, nil
+ }
+ return v, nil
+}
diff --git a/erigon-lib/kv/temporal/historyv2/account_changeset_test.go b/erigon-lib/kv/temporal/historyv2/account_changeset_test.go
new file mode 100644
index 00000000000..231ce569284
--- /dev/null
+++ b/erigon-lib/kv/temporal/historyv2/account_changeset_test.go
@@ -0,0 +1,86 @@
+/*
+ Copyright 2022 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package historyv2
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "reflect"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/ledgerwatch/erigon-lib/common/hexutility"
+ "github.com/ledgerwatch/erigon-lib/kv"
+)
+
+func TestEncodingAccount(t *testing.T) {
+ bkt := kv.AccountChangeSet
+ m := Mapper[bkt]
+
+ ch := m.New()
+ // empty StorageChangeSset first
+ err := m.Encode(1, ch, func(k, v []byte) error {
+ return fmt.Errorf("must never call")
+ })
+ assert.NoError(t, err)
+
+ vals := [][]byte{
+ hexutility.MustDecodeHex("f7f6db1eb17c6d582078e0ffdd0c"),
+ hexutility.MustDecodeHex("b1e9b5c16355eede662031dd621d08faf4ea"),
+ hexutility.MustDecodeHex("862cf52b74f1cea41ddd8ffa4b3e7c7790"),
+ }
+ numOfElements := 3
+ for i := 0; i < numOfElements; i++ {
+ address := hexutility.MustDecodeHex(fmt.Sprintf("0xBe828AD8B538D1D691891F6c725dEdc5989abBc%d", i))
+ err2 := ch.Add(address, vals[i])
+ if err2 != nil {
+ t.Fatal(err)
+ }
+ }
+
+ ch2 := m.New()
+ err = m.Encode(1, ch, func(k, v []byte) error {
+ var err error
+ _, k, v, err = m.Decode(k, v)
+ if err != nil {
+ return err
+ }
+ return ch2.Add(k, v)
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(ch, ch2) {
+ fmt.Println("ch", len(ch.Changes), "ch2", len(ch2.Changes))
+ for i, v := range ch.Changes {
+ fmt.Println("Line ", i)
+
+ if !bytes.Equal(v.Key, ch2.Changes[i].Key) || !bytes.Equal(v.Value, ch2.Changes[i].Value) {
+ fmt.Println("Diff ", i)
+ fmt.Println("k1", hex.EncodeToString(v.Key), len(v.Key))
+ fmt.Println("k2", hex.EncodeToString(ch2.Changes[i].Key))
+ fmt.Println("v1", hex.EncodeToString(v.Value))
+ fmt.Println("v2", hex.EncodeToString(ch2.Changes[i].Value))
+ }
+ }
+ fmt.Printf("%+v %+v\n", ch, ch2)
+ t.Fatal("not equal")
+ }
+}
diff --git a/erigon-lib/kv/temporal/historyv2/changeset.go b/erigon-lib/kv/temporal/historyv2/changeset.go
new file mode 100644
index 00000000000..4ca0eeadef0
--- /dev/null
+++ b/erigon-lib/kv/temporal/historyv2/changeset.go
@@ -0,0 +1,260 @@
+/*
+ Copyright 2022 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package historyv2
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ math2 "math"
+ "reflect"
+
+ "github.com/ledgerwatch/erigon-lib/common/hexutility"
+ "github.com/ledgerwatch/erigon-lib/common/length"
+ "github.com/ledgerwatch/erigon-lib/kv"
+)
+
+func NewChangeSet() *ChangeSet {
+ return &ChangeSet{
+ Changes: make([]Change, 0),
+ }
+}
+
+type Change struct {
+ Key []byte
+ Value []byte
+}
+
+// ChangeSet is a map with keys of the same size.
+// Both keys and values are byte strings.
+type ChangeSet struct {
+ // Invariant: all keys are of the same size.
+ Changes []Change
+ keyLen int
+}
+
+// BEGIN sort.Interface
+
+func (s *ChangeSet) Len() int {
+ return len(s.Changes)
+}
+
+func (s *ChangeSet) Swap(i, j int) {
+ s.Changes[i], s.Changes[j] = s.Changes[j], s.Changes[i]
+}
+
+func (s *ChangeSet) Less(i, j int) bool {
+ cmp := bytes.Compare(s.Changes[i].Key, s.Changes[j].Key)
+ if cmp == 0 {
+ cmp = bytes.Compare(s.Changes[i].Value, s.Changes[j].Value)
+ }
+ return cmp < 0
+}
+
+// END sort.Interface
+func (s *ChangeSet) KeySize() int {
+ if s.keyLen != 0 {
+ return s.keyLen
+ }
+ for _, c := range s.Changes {
+ return len(c.Key)
+ }
+ return 0
+}
+
+func (s *ChangeSet) checkKeySize(key []byte) error {
+ if (s.Len() == 0 && s.KeySize() == 0) || (len(key) == s.KeySize() && len(key) > 0) {
+ return nil
+ }
+
+ return fmt.Errorf("wrong key size in AccountChangeSet: expected %d, actual %d", s.KeySize(), len(key))
+}
+
+// Add adds a new entry to the AccountChangeSet.
+// One must not add an existing key
+// and may add keys only of the same size.
+func (s *ChangeSet) Add(key []byte, value []byte) error {
+ if err := s.checkKeySize(key); err != nil {
+ return err
+ }
+
+ s.Changes = append(s.Changes, Change{
+ Key: key,
+ Value: value,
+ })
+ return nil
+}
+
+func (s *ChangeSet) ChangedKeys() map[string]struct{} {
+ m := make(map[string]struct{}, len(s.Changes))
+ for i := range s.Changes {
+ m[string(s.Changes[i].Key)] = struct{}{}
+ }
+ return m
+}
+
+func (s *ChangeSet) Equals(s2 *ChangeSet) bool {
+ return reflect.DeepEqual(s.Changes, s2.Changes)
+}
+
+// Encoded Method
+func FromDBFormat(dbKey, dbValue []byte) (uint64, []byte, []byte, error) {
+ if len(dbKey) == 8 {
+ return DecodeAccounts(dbKey, dbValue)
+ } else {
+ return DecodeStorage(dbKey, dbValue)
+ }
+}
+
+func AvailableFrom(tx kv.Tx) (uint64, error) {
+ c, err := tx.Cursor(kv.AccountChangeSet)
+ if err != nil {
+ return math2.MaxUint64, err
+ }
+ defer c.Close()
+ k, _, err := c.First()
+ if err != nil {
+ return math2.MaxUint64, err
+ }
+ if len(k) == 0 {
+ return math2.MaxUint64, nil
+ }
+ return binary.BigEndian.Uint64(k), nil
+}
+func AvailableStorageFrom(tx kv.Tx) (uint64, error) {
+ c, err := tx.Cursor(kv.StorageChangeSet)
+ if err != nil {
+ return math2.MaxUint64, err
+ }
+ defer c.Close()
+ k, _, err := c.First()
+ if err != nil {
+ return math2.MaxUint64, err
+ }
+ if len(k) == 0 {
+ return math2.MaxUint64, nil
+ }
+ return binary.BigEndian.Uint64(k), nil
+}
+
+func ForEach(db kv.Tx, bucket string, startkey []byte, walker func(blockN uint64, k, v []byte) error) error {
+ var blockN uint64
+ return db.ForEach(bucket, startkey, func(k, v []byte) error {
+ var err error
+ blockN, k, v, err = FromDBFormat(k, v)
+ if err != nil {
+ return err
+ }
+ return walker(blockN, k, v)
+ })
+}
+func ForPrefix(db kv.Tx, bucket string, startkey []byte, walker func(blockN uint64, k, v []byte) error) error {
+ var blockN uint64
+ return db.ForPrefix(bucket, startkey, func(k, v []byte) error {
+ var err error
+ blockN, k, v, err = FromDBFormat(k, v)
+ if err != nil {
+ return err
+ }
+ return walker(blockN, k, v)
+ })
+}
+
+func Truncate(tx kv.RwTx, from uint64) error {
+ keyStart := hexutility.EncodeTs(from)
+
+ {
+ c, err := tx.RwCursorDupSort(kv.AccountChangeSet)
+ if err != nil {
+ return err
+ }
+ defer c.Close()
+ for k, _, err := c.Seek(keyStart); k != nil; k, _, err = c.NextNoDup() {
+ if err != nil {
+ return err
+ }
+ if err = tx.Delete(kv.AccountChangeSet, k); err != nil {
+ return err
+ }
+ if err != nil {
+ return err
+ }
+ }
+ }
+ {
+ c, err := tx.RwCursorDupSort(kv.StorageChangeSet)
+ if err != nil {
+ return err
+ }
+ defer c.Close()
+ for k, _, err := c.Seek(keyStart); k != nil; k, _, err = c.NextNoDup() {
+ if err != nil {
+ return err
+ }
+ if err = tx.Delete(kv.StorageChangeSet, k); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+type CSMapper struct {
+ IndexBucket string
+ IndexChunkKey func([]byte, uint64) []byte
+ Find func(cursor kv.CursorDupSort, blockNumber uint64, key []byte) ([]byte, error)
+ New func() *ChangeSet
+ Encode Encoder
+ Decode Decoder
+}
+
+var Mapper = map[string]CSMapper{
+ kv.AccountChangeSet: {
+ IndexBucket: kv.E2AccountsHistory,
+ IndexChunkKey: AccountIndexChunkKey,
+ New: NewAccountChangeSet,
+ Find: FindAccount,
+ Encode: EncodeAccounts,
+ Decode: DecodeAccounts,
+ },
+ kv.StorageChangeSet: {
+ IndexBucket: kv.E2StorageHistory,
+ IndexChunkKey: StorageIndexChunkKey,
+ Find: FindStorage,
+ New: NewStorageChangeSet,
+ Encode: EncodeStorage,
+ Decode: DecodeStorage,
+ },
+}
+
+func AccountIndexChunkKey(key []byte, blockNumber uint64) []byte {
+ blockNumBytes := make([]byte, length.Addr+8)
+ copy(blockNumBytes, key)
+ binary.BigEndian.PutUint64(blockNumBytes[length.Addr:], blockNumber)
+
+ return blockNumBytes
+}
+
+func StorageIndexChunkKey(key []byte, blockNumber uint64) []byte {
+ //remove incarnation and add block number
+ blockNumBytes := make([]byte, length.Addr+length.Hash+8)
+ copy(blockNumBytes, key[:length.Addr])
+ copy(blockNumBytes[length.Addr:], key[length.Addr+length.Incarnation:])
+ binary.BigEndian.PutUint64(blockNumBytes[length.Addr+length.Hash:], blockNumber)
+
+ return blockNumBytes
+}
diff --git a/erigon-lib/kv/temporal/historyv2/find_by_history.go b/erigon-lib/kv/temporal/historyv2/find_by_history.go
new file mode 100644
index 00000000000..42c895f2f9f
--- /dev/null
+++ b/erigon-lib/kv/temporal/historyv2/find_by_history.go
@@ -0,0 +1,78 @@
+/*
+ Copyright 2022 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package historyv2
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+
+ "github.com/RoaringBitmap/roaring/roaring64"
+ "github.com/ledgerwatch/erigon-lib/common/length"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/erigon-lib/kv/bitmapdb"
+)
+
+func FindByHistory(indexC kv.Cursor, changesC kv.CursorDupSort, storage bool, key []byte, timestamp uint64) ([]byte, bool, error) {
+ var csBucket string
+ if storage {
+ csBucket = kv.StorageChangeSet
+ } else {
+ csBucket = kv.AccountChangeSet
+ }
+
+ k, v, seekErr := indexC.Seek(Mapper[csBucket].IndexChunkKey(key, timestamp))
+ if seekErr != nil {
+ return nil, false, seekErr
+ }
+
+ if k == nil {
+ return nil, false, nil
+ }
+ if storage {
+ if !bytes.Equal(k[:length.Addr], key[:length.Addr]) ||
+ !bytes.Equal(k[length.Addr:length.Addr+length.Hash], key[length.Addr+length.Incarnation:]) {
+ return nil, false, nil
+ }
+ } else {
+ if !bytes.HasPrefix(k, key) {
+ return nil, false, nil
+ }
+ }
+ index := roaring64.New()
+ if _, err := index.ReadFrom(bytes.NewReader(v)); err != nil {
+ return nil, false, err
+ }
+ found, ok := bitmapdb.SeekInBitmap64(index, timestamp)
+ changeSetBlock := found
+
+ var data []byte
+ var err error
+ if ok {
+ data, err = Mapper[csBucket].Find(changesC, changeSetBlock, key)
+ if err != nil {
+ if !errors.Is(err, ErrNotFound) {
+ return nil, false, fmt.Errorf("finding %x in the changeset %d: %w", key, changeSetBlock, err)
+ }
+ return nil, false, nil
+ }
+ } else {
+ return nil, false, nil
+ }
+
+ return data, true, nil
+}
diff --git a/erigon-lib/kv/temporal/historyv2/readme.md b/erigon-lib/kv/temporal/historyv2/readme.md
new file mode 100644
index 00000000000..5b35b0d8083
--- /dev/null
+++ b/erigon-lib/kv/temporal/historyv2/readme.md
@@ -0,0 +1,65 @@
+#Changesets encoding
+## Storage changeset encoding
+Storage encoding contains several blocks: Address hashes, Incarnations, Length of values, Values. AccountChangeSet is serialized in the following manner in order to facilitate binary search
+### Address hashes
+There are a lot of address hashes duplication in storage changeset when we have multiple changes in one contract. To avoid it we can store only unique address hashes.
+First 4 bytes contains number or unique contract hashes in one changeset.
+Then we store address hashes with sum of key hashes from first element.
+
+For example: for `addrHash1Inc1Key1, addrHash1Inc1Key2, addrHash2Inc1Key1, addrHash2Inc1Key3` it stores
+`2,addrHash1,2,addrHash2,4`
+### Incarnations
+Currently, there are a few not default incarnations(!=1) in current state. That was the reason why we store incarnation only if it's not equal to fffffffe(inverted 1).
+First part is 4 byte that contains number of not default incarnations
+Then we store array of id of address hash(4 byte) plus incarnation(8 byte)
+For example: for `addrHash1fffffffe..., addrHash1fffffffd...` it stores
+`1,1,fffffffd`
+
+### Values lengths
+The default value length is 32(common.Hash), but if we remove leading 0 then average size became ~7. Because a length of value could be from 0 to 32 we need this section to be able to find quite fast value by key.
+It is contiguous array of accumulating value indexes like `len(val0), len(val0)+len(val1), ..., len(val0)+len(val1)+...+len(val_{N-1})`
+To reduce cost of it we have three numbers: numOfUint8, numOfUint16, numOfUint32. They can answer to the question: How many lengths of values we can put to uint8, uint16, uint32.
+This number could be huge if one of the contracts was suicided during block execution. Then we could have thousands of empty values, and we are able to store them in uint8(but it depends).
+For example for values: "ffa","","faa" it stores `3,0,0,3,3,6`
+
+### Values
+Contiguous array of values.
+
+### Finally
+Value | Type | Comment
+------------ | ------------- | -------------
+numOfUniqueElements | uint32 |
+Address hashes | [numOfUniqueElements]{[32]byte+[4]byte} | [numOfUniqueElements](common.Hash + uint32)
+numOfNotDefaultIncarnations | uint32 | mostly - 0
+Incarnations | [numOfNotDefaultIncarnations]{[4]byte + [8]byte} | []{idOfAddrHash(uint32) + incarnation(uint64)}
+Keys | [][32]byte | []common.Hash
+numOfUint8 | uint32 |
+numOfUint16 | uint32 |
+numOfUint32 | uint32 |
+Values lengths in uint8 | [numOfUint8]uint8 |
+Values lengths in uint16 | [numOfUint16]uint16 |
+Values lengths in uint32 | [numOfUint32]uint32 |
+Values | [][]byte |
+
+
+
+
+## Account changeset encoding
+AccountChangeSet is serialized in the following manner in order to facilitate binary search. Account changeset encoding contains several blocks: Keys, Length of values, Values. Key is address hash of account. Value is CBOR encoded account without storage root and code hash.
+
+### Keys
+The number of keys N (uint32, 4 bytes)
+Contiguous array of keys (N*32 bytes)
+### Values lengths
+ Contiguous array of accumulating value indexes:
+len(val0), len(val0)+len(val1), ..., len(val0)+len(val1)+...+len(val_{N-1})
+(4*N bytes since the lengths are treated as uint32).
+### Values
+Contiguous array of values.
+### Finally
+Value | Type | Comment
+------------ | ------------- | -------------
+num of keys | uint32 |
+address hashes | [num of keys][32]byte | [num of keys]common.Hash
+values lengths | [num of keys]uint32
+values | [num of keys][]byte
diff --git a/erigon-lib/kv/temporal/historyv2/storage_changeset.go b/erigon-lib/kv/temporal/historyv2/storage_changeset.go
new file mode 100644
index 00000000000..61b435394c2
--- /dev/null
+++ b/erigon-lib/kv/temporal/historyv2/storage_changeset.go
@@ -0,0 +1,87 @@
+/*
+ Copyright 2022 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package historyv2
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "sort"
+
+ "github.com/ledgerwatch/erigon-lib/common/length"
+ "github.com/ledgerwatch/erigon-lib/kv"
+)
+
+var (
+ ErrNotFound = errors.New("not found")
+)
+
+func NewStorageChangeSet() *ChangeSet {
+ return &ChangeSet{
+ Changes: make([]Change, 0),
+ keyLen: length.Addr + length.Hash + length.Incarnation,
+ }
+}
+
+func EncodeStorage(blockN uint64, s *ChangeSet, f func(k, v []byte) error) error {
+ sort.Sort(s)
+ keyPart := length.Addr + length.Incarnation
+ for _, cs := range s.Changes {
+ newK := make([]byte, length.BlockNum+keyPart)
+ binary.BigEndian.PutUint64(newK, blockN)
+ copy(newK[8:], cs.Key[:keyPart])
+ newV := make([]byte, 0, length.Hash+len(cs.Value))
+ newV = append(append(newV, cs.Key[keyPart:]...), cs.Value...)
+ if err := f(newK, newV); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func DecodeStorage(dbKey, dbValue []byte) (uint64, []byte, []byte, error) {
+ blockN := binary.BigEndian.Uint64(dbKey)
+ if len(dbValue) < length.Hash {
+ return 0, nil, nil, fmt.Errorf("storage changes purged for block %d", blockN)
+ }
+ k := make([]byte, length.Addr+length.Incarnation+length.Hash)
+ dbKey = dbKey[length.BlockNum:] // remove BlockN bytes
+ copy(k, dbKey)
+ copy(k[len(dbKey):], dbValue[:length.Hash])
+ v := dbValue[length.Hash:]
+ if len(v) == 0 {
+ v = nil
+ }
+
+ return blockN, k, v, nil
+}
+
+func FindStorage(c kv.CursorDupSort, blockNumber uint64, k []byte) ([]byte, error) {
+ addWithInc, loc := k[:length.Addr+length.Incarnation], k[length.Addr+length.Incarnation:]
+ seek := make([]byte, length.BlockNum+length.Addr+length.Incarnation)
+ binary.BigEndian.PutUint64(seek, blockNumber)
+ copy(seek[8:], addWithInc)
+ v, err := c.SeekBothRange(seek, loc)
+ if err != nil {
+ return nil, err
+ }
+ if !bytes.HasPrefix(v, loc) {
+ return nil, ErrNotFound
+ }
+ return v[length.Hash:], nil
+}
diff --git a/erigon-lib/metrics/counter.go b/erigon-lib/metrics/counter.go
new file mode 100644
index 00000000000..3fd0415015b
--- /dev/null
+++ b/erigon-lib/metrics/counter.go
@@ -0,0 +1,65 @@
+package metrics
+
+import (
+ "fmt"
+
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+)
+
+type Counter interface {
+ prometheus.Counter
+ ValueGetter
+ AddInt(v int)
+ AddUint64(v uint64)
+}
+
+type counter struct {
+ prometheus.Counter
+}
+
+// GetValue returns native float64 value stored by this counter
+func (c *counter) GetValue() float64 {
+ var m dto.Metric
+ if err := c.Write(&m); err != nil {
+ panic(fmt.Errorf("calling GetValue with invalid metric: %w", err))
+ }
+
+ return m.GetCounter().GetValue()
+}
+
+// GetValueUint64 returns native float64 value stored by this counter cast to
+// an uint64 value for convenience
+func (c *counter) GetValueUint64() uint64 {
+ return uint64(c.GetValue())
+}
+
+// AddInt adds an int value to the native float64 value stored by this counter.
+//
+// This is a convenience function for better UX which is safe for int values up
+// to 2^53 (mantissa bits).
+//
+// This is fine for all usages in our codebase, and it is
+// unlikely we will hit issues with this.
+//
+// If, however there is a new requirement that requires accuracy for more than
+// 2^53 we can implement our own simple intCounter that satisfies the Counter
+// interface.
+func (c *counter) AddInt(v int) {
+ c.Add(float64(v))
+}
+
+// AddUint64 adds an uint64 value to the native float64 value stored by this counter.
+//
+// This is a convenience function for better UX which is safe for int values up
+// to 2^53 (mantissa bits).
+//
+// This is fine for all usages in our codebase, and it is
+// unlikely we will hit issues with this.
+//
+// If, however there is a new requirement that requires accuracy for more than
+// 2^53 we can implement our own simple uintCounter that satisfies the Counter
+// interface.
+func (c *counter) AddUint64(v uint64) {
+ c.Add(float64(v))
+}
diff --git a/erigon-lib/metrics/duration_observer.go b/erigon-lib/metrics/duration_observer.go
new file mode 100644
index 00000000000..1b6b91d9060
--- /dev/null
+++ b/erigon-lib/metrics/duration_observer.go
@@ -0,0 +1,14 @@
+package metrics
+
+import (
+ "time"
+)
+
+type DurationObserver interface {
+ // ObserveDuration observes duration since start time
+ ObserveDuration(start time.Time)
+}
+
+func secondsSince(start time.Time) float64 {
+ return time.Since(start).Seconds()
+}
diff --git a/erigon-lib/metrics/gauge.go b/erigon-lib/metrics/gauge.go
new file mode 100644
index 00000000000..f9f7bc07751
--- /dev/null
+++ b/erigon-lib/metrics/gauge.go
@@ -0,0 +1,76 @@
+package metrics
+
+import (
+ "fmt"
+
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+)
+
+type Gauge interface {
+ prometheus.Gauge
+ ValueGetter
+ SetUint32(v uint32)
+ SetUint64(v uint64)
+ SetInt(v int)
+}
+
+type gauge struct {
+ prometheus.Gauge
+}
+
+// GetValue returns native float64 value stored by this gauge
+func (g *gauge) GetValue() float64 {
+ var m dto.Metric
+ if err := g.Write(&m); err != nil {
+ panic(fmt.Errorf("calling GetValue with invalid metric: %w", err))
+ }
+
+ return m.GetGauge().GetValue()
+}
+
+// GetValueUint64 returns native float64 value stored by this gauge cast to
+// an uint64 value for convenience
+func (g *gauge) GetValueUint64() uint64 {
+ return uint64(g.GetValue())
+}
+
+// SetUint32 sets gauge using an uint32 value. Note under the hood this
+// is a cast to float64 which is the native type of prometheus gauges.
+//
+// This is a convenience function for better UX.
+func (g *gauge) SetUint32(v uint32) {
+ g.Set(float64(v))
+}
+
+// SetUint64 sets gauge using an uint64 value. Note under the hood this
+// is a cast to float64 which is the native type of prometheus gauges.
+//
+// This is a convenience function for better UX which is safe for uints up
+// to 2^53 (mantissa bits).
+//
+// This is fine for all usages in our codebase, and it is
+// unlikely we will hit issues with this.
+//
+// If, however there is a new requirement that requires accuracy for more than
+// 2^53 we can implement our own simple uintGauge that satisfies the Gauge
+// interface.
+func (g *gauge) SetUint64(v uint64) {
+ g.Set(float64(v))
+}
+
+// SetInt sets gauge using an int value. Note under the hood this
+// is a cast to float64 which is the native type of prometheus gauges.
+//
+// This is a convenience function for better UX which is safe for uints up
+// to 2^53 (mantissa bits).
+//
+// This is fine for all usages in our codebase, and it is
+// unlikely we will hit issues with this.
+//
+// If, however there is a new requirement that requires accuracy for more than
+// 2^53 we can implement our own simple intGauge that satisfies the Gauge
+// interface.
+func (g *gauge) SetInt(v int) {
+ g.Set(float64(v))
+}
diff --git a/erigon-lib/metrics/histogram.go b/erigon-lib/metrics/histogram.go
new file mode 100644
index 00000000000..3477570eb27
--- /dev/null
+++ b/erigon-lib/metrics/histogram.go
@@ -0,0 +1,20 @@
+package metrics
+
+import (
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type Histogram interface {
+ prometheus.Histogram
+ DurationObserver
+}
+
+type histogram struct {
+ prometheus.Summary
+}
+
+func (h *histogram) ObserveDuration(start time.Time) {
+ h.Observe(secondsSince(start))
+}
diff --git a/erigon-lib/metrics/parsing.go b/erigon-lib/metrics/parsing.go
new file mode 100644
index 00000000000..34e23ccccb2
--- /dev/null
+++ b/erigon-lib/metrics/parsing.go
@@ -0,0 +1,111 @@
+package metrics
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+func parseMetric(s string) (string, prometheus.Labels, error) {
+ if len(s) == 0 {
+ return "", nil, fmt.Errorf("metric cannot be empty")
+ }
+ n := strings.IndexByte(s, '{')
+ if n < 0 {
+ if err := validateIdent(s); err != nil {
+ return "", nil, err
+ }
+
+ return s, nil, nil
+ }
+ ident := s[:n]
+ s = s[n+1:]
+ if err := validateIdent(ident); err != nil {
+ return "", nil, err
+ }
+ if len(s) == 0 || s[len(s)-1] != '}' {
+ return "", nil, fmt.Errorf("missing closing curly brace at the end of %q", ident)
+ }
+
+ tags, err := parseTags(s[:len(s)-1])
+
+ if err != nil {
+ return "", nil, err
+ }
+
+ return ident, tags, nil
+}
+
+func parseTags(s string) (prometheus.Labels, error) {
+ if len(s) == 0 {
+ return nil, nil
+ }
+
+ var labels prometheus.Labels
+
+ for {
+ n := strings.IndexByte(s, '=')
+ if n < 0 {
+ return nil, fmt.Errorf("missing `=` after %q", s)
+ }
+ ident := s[:n]
+ s = s[n+1:]
+ if err := validateIdent(ident); err != nil {
+ return nil, err
+ }
+ if len(s) == 0 || s[0] != '"' {
+ return nil, fmt.Errorf("missing starting `\"` for %q value; tail=%q", ident, s)
+ }
+ s = s[1:]
+
+ value := ""
+
+ for {
+ n = strings.IndexByte(s, '"')
+ if n < 0 {
+ return nil, fmt.Errorf("missing trailing `\"` for %q value; tail=%q", ident, s)
+ }
+ m := n
+ for m > 0 && s[m-1] == '\\' {
+ m--
+ }
+ if (n-m)%2 == 1 {
+ value = value + s[:n]
+ s = s[n+1:]
+ continue
+ }
+ value = value + s[:n]
+ if labels == nil {
+ labels = prometheus.Labels{}
+ }
+ labels[ident] = value
+ s = s[n+1:]
+ if len(s) == 0 {
+ return labels, nil
+ }
+ if !strings.HasPrefix(s, ",") {
+ return nil, fmt.Errorf("missing `,` after %q value; tail=%q", ident, s)
+ }
+ s = skipSpace(s[1:])
+ break
+ }
+ }
+}
+
+func skipSpace(s string) string {
+ for len(s) > 0 && s[0] == ' ' {
+ s = s[1:]
+ }
+ return s
+}
+
+func validateIdent(s string) error {
+ if !identRegexp.MatchString(s) {
+ return fmt.Errorf("invalid identifier %q", s)
+ }
+ return nil
+}
+
+var identRegexp = regexp.MustCompile("^[a-zA-Z_:.][a-zA-Z0-9_:.]*$")
diff --git a/erigon-lib/metrics/register.go b/erigon-lib/metrics/register.go
new file mode 100644
index 00000000000..2ac13a6b4ca
--- /dev/null
+++ b/erigon-lib/metrics/register.go
@@ -0,0 +1,173 @@
+package metrics
+
+import (
+ "fmt"
+)
+
+// NewCounter registers and returns new counter with the given name.
+//
+// name must be valid Prometheus-compatible metric with possible labels.
+// For instance,
+//
+// - foo
+// - foo{bar="baz"}
+// - foo{bar="baz",aaa="b"}
+//
+// The returned counter is safe to use from concurrent goroutines.
+func NewCounter(name string) Counter {
+ c, err := defaultSet.NewCounter(name)
+ if err != nil {
+ panic(fmt.Errorf("could not create new counter: %w", err))
+ }
+
+ return &counter{c}
+}
+
+// GetOrCreateCounter returns registered counter with the given name
+// or creates new counter if the registry doesn't contain counter with
+// the given name.
+//
+// name must be valid Prometheus-compatible metric with possible labels.
+// For instance,
+//
+// - foo
+// - foo{bar="baz"}
+// - foo{bar="baz",aaa="b"}
+//
+// The returned counter is safe to use from concurrent goroutines.
+//
+// Performance tip: prefer NewCounter instead of GetOrCreateCounter.
+func GetOrCreateCounter(name string) Counter {
+ c, err := defaultSet.GetOrCreateCounter(name)
+ if err != nil {
+ panic(fmt.Errorf("could not get or create new counter: %w", err))
+ }
+
+ return &counter{c}
+}
+
+// NewGauge registers and returns gauge with the given name.
+//
+// name must be valid Prometheus-compatible metric with possible labels.
+// For instance,
+//
+// - foo
+// - foo{bar="baz"}
+// - foo{bar="baz",aaa="b"}
+//
+// The returned gauge is safe to use from concurrent goroutines.
+func NewGauge(name string) Gauge {
+ g, err := defaultSet.NewGauge(name)
+ if err != nil {
+ panic(fmt.Errorf("could not create new gauge: %w", err))
+ }
+
+ return &gauge{g}
+}
+
+// GetOrCreateGauge returns registered gauge with the given name
+// or creates new gauge if the registry doesn't contain gauge with
+// the given name.
+//
+// name must be valid Prometheus-compatible metric with possible labels.
+// For instance,
+//
+// - foo
+// - foo{bar="baz"}
+// - foo{bar="baz",aaa="b"}
+//
+// The returned gauge is safe to use from concurrent goroutines.
+//
+// Performance tip: prefer NewGauge instead of GetOrCreateGauge.
+func GetOrCreateGauge(name string) Gauge {
+ g, err := defaultSet.GetOrCreateGauge(name)
+ if err != nil {
+ panic(fmt.Errorf("could not get or create new gauge: %w", err))
+ }
+
+ return &gauge{g}
+}
+
+// NewSummary creates and returns new summary with the given name.
+//
+// name must be valid Prometheus-compatible metric with possible labels.
+// For instance,
+//
+// - foo
+// - foo{bar="baz"}
+// - foo{bar="baz",aaa="b"}
+//
+// The returned summary is safe to use from concurrent goroutines.
+func NewSummary(name string) Summary {
+ s, err := defaultSet.NewSummary(name)
+ if err != nil {
+ panic(fmt.Errorf("could not create new summary: %w", err))
+ }
+
+ return &summary{s}
+}
+
+// GetOrCreateSummary returns registered summary with the given name
+// or creates new summary if the registry doesn't contain summary with
+// the given name.
+//
+// name must be valid Prometheus-compatible metric with possible labels.
+// For instance,
+//
+// - foo
+// - foo{bar="baz"}
+// - foo{bar="baz",aaa="b"}
+//
+// The returned summary is safe to use from concurrent goroutines.
+//
+// Performance tip: prefer NewSummary instead of GetOrCreateSummary.
+func GetOrCreateSummary(name string) Summary {
+ s, err := defaultSet.GetOrCreateSummary(name)
+ if err != nil {
+ panic(fmt.Errorf("could not get or create new summary: %w", err))
+ }
+
+ return &summary{s}
+}
+
+// NewHistogram creates and returns new histogram with the given name.
+//
+// name must be valid Prometheus-compatible metric with possible labels.
+// For instance,
+//
+// - foo
+// - foo{bar="baz"}
+// - foo{bar="baz",aaa="b"}
+//
+// The returned histogram is safe to use from concurrent goroutines.
+func NewHistogram(name string) Histogram {
+ h, err := defaultSet.NewHistogram(name)
+ if err != nil {
+ panic(fmt.Errorf("could not create new histogram: %w", err))
+ }
+
+ return &histogram{h}
+}
+
+// GetOrCreateHistogram returns registered histogram with the given name
+// or creates new histogram if the registry doesn't contain histogram with
+// the given name.
+//
+// name must be valid Prometheus-compatible metric with possible labels.
+// For instance,
+//
+// - foo
+// - foo{bar="baz"}
+// - foo{bar="baz",aaa="b"}
+//
+// The returned histogram is safe to use from concurrent goroutines.
+//
+// Performance tip: prefer NewHistogram instead of GetOrCreateHistogram.
+func GetOrCreateHistogram(name string) Histogram {
+ h, err := defaultSet.GetOrCreateHistogram(name)
+ if err != nil {
+ panic(fmt.Errorf("could not get or create new histogram: %w", err))
+ }
+
+ return &histogram{h}
+}
diff --git a/erigon-lib/metrics/set.go b/erigon-lib/metrics/set.go
new file mode 100644
index 00000000000..2b0418fd2bd
--- /dev/null
+++ b/erigon-lib/metrics/set.go
@@ -0,0 +1,508 @@
+package metrics
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type namedMetric struct {
+ name string
+ metric prometheus.Metric
+ isAux bool
+}
+
+// Set is a set of metrics.
+//
+// Metrics belonging to a set are exported separately from global metrics.
+//
+// Set.WritePrometheus must be called for exporting metrics from the set.
+type Set struct {
+ mu sync.Mutex
+ a []*namedMetric
+ m map[string]*namedMetric
+}
+
+var defaultSet = NewSet()
+
+// NewSet creates new set of metrics.
+//
+// Pass the set to RegisterSet() function in order to export its metrics via global WritePrometheus() call.
+func NewSet() *Set {
+ return &Set{
+ m: make(map[string]*namedMetric),
+ }
+}
+
+func (s *Set) Describe(ch chan<- *prometheus.Desc) {
+ lessFunc := func(i, j int) bool {
+ return s.a[i].name < s.a[j].name
+ }
+ s.mu.Lock()
+ if !sort.SliceIsSorted(s.a, lessFunc) {
+ sort.Slice(s.a, lessFunc)
+ }
+ sa := append([]*namedMetric(nil), s.a...)
+ s.mu.Unlock()
+ for _, nm := range sa {
+ ch <- nm.metric.Desc()
+ }
+}
+
+func (s *Set) Collect(ch chan<- prometheus.Metric) {
+ lessFunc := func(i, j int) bool {
+ return s.a[i].name < s.a[j].name
+ }
+ s.mu.Lock()
+ if !sort.SliceIsSorted(s.a, lessFunc) {
+ sort.Slice(s.a, lessFunc)
+ }
+ sa := append([]*namedMetric(nil), s.a...)
+ s.mu.Unlock()
+ for _, nm := range sa {
+ ch <- nm.metric
+ }
+}
+
+// NewHistogram creates and returns new histogram in s with the given name.
+//
+// name must be valid Prometheus-compatible metric with possible labels.
+// For instance,
+//
+// - foo
+// - foo{bar="baz"}
+// - foo{bar="baz",aaa="b"}
+//
+// The returned histogram is safe to use from concurrent goroutines.
+func (s *Set) NewHistogram(name string, help ...string) (prometheus.Histogram, error) {
+ h, err := newHistogram(name, help...)
+ if err != nil {
+ return nil, err
+ }
+
+ s.registerMetric(name, h)
+ return h, nil
+}
+
+func newHistogram(name string, help ...string) (prometheus.Histogram, error) {
+ name, labels, err := parseMetric(name)
+ if err != nil {
+ return nil, err
+ }
+
+ return prometheus.NewHistogram(prometheus.HistogramOpts{
+ Name: name,
+ ConstLabels: labels,
+ Help: strings.Join(help, " "),
+ }), nil
+}
+
+// GetOrCreateHistogram returns registered histogram in s with the given name
+// or creates new histogram if s doesn't contain histogram with the given name.
+//
+// name must be valid Prometheus-compatible metric with possible labels.
+// For instance,
+//
+// - foo
+// - foo{bar="baz"}
+// - foo{bar="baz",aaa="b"}
+//
+// The returned histogram is safe to use from concurrent goroutines.
+//
+// Performance tip: prefer NewHistogram instead of GetOrCreateHistogram.
+func (s *Set) GetOrCreateHistogram(name string, help ...string) (prometheus.Histogram, error) {
+ s.mu.Lock()
+ nm := s.m[name]
+ s.mu.Unlock()
+ if nm == nil {
+ metric, err := newHistogram(name, help...)
+ if err != nil {
+ return nil, fmt.Errorf("invalid metric name %q: %w", name, err)
+ }
+
+ nmNew := &namedMetric{
+ name: name,
+ metric: metric,
+ }
+
+ s.mu.Lock()
+ nm = s.m[name]
+ if nm == nil {
+ nm = nmNew
+ s.m[name] = nm
+ s.a = append(s.a, nm)
+ }
+ s.mu.Unlock()
+ }
+
+ h, ok := nm.metric.(prometheus.Histogram)
+ if !ok {
+ return nil, fmt.Errorf("metric %q isn't a Histogram. It is %T", name, nm.metric)
+ }
+
+ return h, nil
+}
+
+// NewCounter registers and returns new counter with the given name in the s.
+//
+// name must be valid Prometheus-compatible metric with possible labels.
+// For instance,
+//
+// - foo
+// - foo{bar="baz"}
+// - foo{bar="baz",aaa="b"}
+//
+// The returned counter is safe to use from concurrent goroutines.
+func (s *Set) NewCounter(name string, help ...string) (prometheus.Counter, error) {
+ c, err := newCounter(name, help...)
+ if err != nil {
+ return nil, err
+ }
+
+ s.registerMetric(name, c)
+ return c, nil
+}
+
+func newCounter(name string, help ...string) (prometheus.Counter, error) {
+ name, labels, err := parseMetric(name)
+ if err != nil {
+ return nil, err
+ }
+
+ return prometheus.NewCounter(prometheus.CounterOpts{
+ Name: name,
+ Help: strings.Join(help, " "),
+ ConstLabels: labels,
+ }), nil
+}
+
+// GetOrCreateCounter returns registered counter in s with the given name
+// or creates new counter if s doesn't contain counter with the given name.
+//
+// name must be valid Prometheus-compatible metric with possible labels.
+// For instance,
+//
+// - foo
+// - foo{bar="baz"}
+// - foo{bar="baz",aaa="b"}
+//
+// The returned counter is safe to use from concurrent goroutines.
+//
+// Performance tip: prefer NewCounter instead of GetOrCreateCounter.
+func (s *Set) GetOrCreateCounter(name string, help ...string) (prometheus.Counter, error) {
+ s.mu.Lock()
+ nm := s.m[name]
+ s.mu.Unlock()
+ if nm == nil {
+ // Slow path - create and register missing counter.
+ metric, err := newCounter(name, help...)
+ if err != nil {
+ return nil, fmt.Errorf("invalid metric name %q: %w", name, err)
+ }
+
+ nmNew := &namedMetric{
+ name: name,
+ metric: metric,
+ }
+ s.mu.Lock()
+ nm = s.m[name]
+ if nm == nil {
+ nm = nmNew
+ s.m[name] = nm
+ s.a = append(s.a, nm)
+ }
+ s.mu.Unlock()
+ }
+
+ c, ok := nm.metric.(prometheus.Counter)
+ if !ok {
+ return nil, fmt.Errorf("metric %q isn't a Counter. It is %T", name, nm.metric)
+ }
+
+ return c, nil
+}
+
+// NewGauge registers and returns gauge with the given name.
+//
+// name must be valid Prometheus-compatible metric with possible labels.
+// For instance,
+//
+// - foo
+// - foo{bar="baz"}
+// - foo{bar="baz",aaa="b"}
+//
+// f must be safe for concurrent calls.
+//
+// The returned gauge is safe to use from concurrent goroutines.
+func (s *Set) NewGauge(name string, help ...string) (prometheus.Gauge, error) {
+ g, err := newGauge(name, help...)
+ if err != nil {
+ return nil, err
+ }
+
+ s.registerMetric(name, g)
+ return g, nil
+}
+
+func newGauge(name string, help ...string) (prometheus.Gauge, error) {
+ name, labels, err := parseMetric(name)
+ if err != nil {
+ return nil, err
+ }
+
+ return prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: name,
+ Help: strings.Join(help, " "),
+ ConstLabels: labels,
+ }), nil
+}
+
+// GetOrCreateGauge returns registered gauge with the given name in s
+// or creates new gauge if s doesn't contain gauge with the given name.
+//
+// name must be valid Prometheus-compatible metric with possible labels.
+// For instance,
+//
+// - foo
+// - foo{bar="baz"}
+// - foo{bar="baz",aaa="b"}
+//
+// The returned gauge is safe to use from concurrent goroutines.
+//
+// Performance tip: prefer NewGauge instead of GetOrCreateGauge.
+func (s *Set) GetOrCreateGauge(name string, help ...string) (prometheus.Gauge, error) {
+ s.mu.Lock()
+ nm := s.m[name]
+ s.mu.Unlock()
+ if nm == nil {
+ // Slow path - create and register missing gauge.
+ metric, err := newGauge(name, help...)
+ if err != nil {
+ return nil, fmt.Errorf("invalid metric name %q: %w", name, err)
+ }
+
+ nmNew := &namedMetric{
+ name: name,
+ metric: metric,
+ }
+ s.mu.Lock()
+ nm = s.m[name]
+ if nm == nil {
+ nm = nmNew
+ s.m[name] = nm
+ s.a = append(s.a, nm)
+ }
+ s.mu.Unlock()
+ }
+
+ g, ok := nm.metric.(prometheus.Gauge)
+ if !ok {
+ return nil, fmt.Errorf("metric %q isn't a Gauge. It is %T", name, nm.metric)
+ }
+
+ return g, nil
+}
+
+const defaultSummaryWindow = 5 * time.Minute
+
+var defaultSummaryQuantiles = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.97: 0.003, 0.99: 0.001}
+
+// NewSummary creates and returns new summary with the given name in s.
+//
+// name must be valid Prometheus-compatible metric with possible labels.
+// For instance,
+//
+// - foo
+// - foo{bar="baz"}
+// - foo{bar="baz",aaa="b"}
+//
+// The returned summary is safe to use from concurrent goroutines.
+func (s *Set) NewSummary(name string, help ...string) (prometheus.Summary, error) {
+ return s.NewSummaryExt(name, defaultSummaryWindow, defaultSummaryQuantiles, help...)
+}
+
+func newSummary(name string, window time.Duration, quantiles map[float64]float64, help ...string) (prometheus.Summary, error) {
+ name, labels, err := parseMetric(name)
+ if err != nil {
+ return nil, err
+ }
+
+ return prometheus.NewSummary(prometheus.SummaryOpts{
+ Name: name,
+ ConstLabels: labels,
+ Objectives: quantiles,
+ MaxAge: window,
+ Help: strings.Join(help, " "),
+ }), nil
+}
+
+// GetOrCreateSummary returns registered summary with the given name in s
+// or creates new summary if s doesn't contain summary with the given name.
+//
+// name must be valid Prometheus-compatible metric with possible labels.
+// For instance,
+//
+// - foo
+// - foo{bar="baz"}
+// - foo{bar="baz",aaa="b"}
+//
+// The returned summary is safe to use from concurrent goroutines.
+//
+// Performance tip: prefer NewSummary instead of GetOrCreateSummary.
+func (s *Set) GetOrCreateSummary(name string, help ...string) (prometheus.Summary, error) {
+ return s.GetOrCreateSummaryExt(name, defaultSummaryWindow, defaultSummaryQuantiles, help...)
+}
+
+// NewSummaryExt creates and returns new summary in s with the given name,
+// window and quantiles.
+//
+// name must be valid Prometheus-compatible metric with possible labels.
+// For instance,
+//
+// - foo
+// - foo{bar="baz"}
+// - foo{bar="baz",aaa="b"}
+//
+// The returned summary is safe to use from concurrent goroutines.
+func (s *Set) NewSummaryExt(name string, window time.Duration, quantiles map[float64]float64, help ...string) (prometheus.Summary, error) {
+ metric, err := newSummary(name, window, quantiles, help...)
+ if err != nil {
+ return nil, fmt.Errorf("invalid metric name %q: %w", name, err)
+ }
+
+ s.registerMetric(name, metric)
+ return metric, nil
+}
+
+// GetOrCreateSummaryExt returns registered summary with the given name,
+// window and quantiles in s or creates new summary if s doesn't
+// contain summary with the given name.
+//
+// name must be valid Prometheus-compatible metric with possible labels.
+// For instance,
+//
+// - foo
+// - foo{bar="baz"}
+// - foo{bar="baz",aaa="b"}
+//
+// The returned summary is safe to use from concurrent goroutines.
+//
+// Performance tip: prefer NewSummaryExt instead of GetOrCreateSummaryExt.
+func (s *Set) GetOrCreateSummaryExt(name string, window time.Duration, quantiles map[float64]float64, help ...string) (prometheus.Summary, error) {
+ s.mu.Lock()
+ nm := s.m[name]
+ s.mu.Unlock()
+ if nm == nil {
+ // Slow path - create and register missing summary.
+ metric, err := newSummary(name, window, quantiles, help...)
+ if err != nil {
+ return nil, fmt.Errorf("invalid metric name %q: %w", name, err)
+ }
+
+ nmNew := &namedMetric{
+ name: name,
+ metric: metric,
+ }
+ s.mu.Lock()
+ nm = s.m[name]
+ if nm == nil {
+ nm = nmNew
+ s.m[name] = nm
+ s.a = append(s.a, nm)
+ }
+ s.mu.Unlock()
+ }
+
+ sm, ok := nm.metric.(prometheus.Summary)
+ if !ok {
+ return nil, fmt.Errorf("metric %q isn't a Summary. It is %T", name, nm.metric)
+ }
+
+ return sm, nil
+}
+
+func (s *Set) registerMetric(name string, m prometheus.Metric) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.mustRegisterLocked(name, m)
+}
+
+// mustRegisterLocked registers given metric with the given name.
+//
+// Panics if the given name was already registered before.
+func (s *Set) mustRegisterLocked(name string, m prometheus.Metric) {
+ _, ok := s.m[name]
+ if !ok {
+ nm := &namedMetric{
+ name: name,
+ metric: m,
+ }
+ s.m[name] = nm
+ s.a = append(s.a, nm)
+ }
+ if ok {
+ panic(fmt.Errorf("metric %q is already registered", name))
+ }
+}
+
+// UnregisterMetric removes metric with the given name from s.
+//
+// True is returned if the metric has been removed.
+// False is returned if the given metric is missing in s.
+func (s *Set) UnregisterMetric(name string) bool {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ nm, ok := s.m[name]
+ if !ok {
+ return false
+ }
+ return s.unregisterMetricLocked(nm)
+}
+
+func (s *Set) unregisterMetricLocked(nm *namedMetric) bool {
+ name := nm.name
+ delete(s.m, name)
+
+ deleteFromList := func(metricName string) {
+ for i, nm := range s.a {
+ if nm.name == metricName {
+ s.a = append(s.a[:i], s.a[i+1:]...)
+ return
+ }
+ }
+ panic(fmt.Errorf("cannot find metric %q in the list of registered metrics", name))
+ }
+
+ // remove metric from s.a
+ deleteFromList(name)
+
+ return true
+}
+
+// UnregisterAllMetrics de-registers all metrics registered in s.
+func (s *Set) UnregisterAllMetrics() {
+ metricNames := s.ListMetricNames()
+ for _, name := range metricNames {
+ s.UnregisterMetric(name)
+ }
+}
+
+// ListMetricNames returns sorted list of all the metrics in s.
+func (s *Set) ListMetricNames() []string {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ metricNames := make([]string, 0, len(s.m))
+ for _, nm := range s.m {
+ if nm.isAux {
+ continue
+ }
+ metricNames = append(metricNames, nm.name)
+ }
+ sort.Strings(metricNames)
+ return metricNames
+}
diff --git a/erigon-lib/metrics/setup.go b/erigon-lib/metrics/setup.go
new file mode 100644
index 00000000000..e3c6a5e4ef2
--- /dev/null
+++ b/erigon-lib/metrics/setup.go
@@ -0,0 +1,35 @@
+package metrics
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/ledgerwatch/log/v3"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+)
+
+var EnabledExpensive = false
+
+// Setup starts a dedicated metrics server at the given address.
+// This function enables metrics reporting separate from pprof.
+func Setup(address string, logger log.Logger) *http.ServeMux {
+ prometheus.DefaultRegisterer.MustRegister(defaultSet)
+
+ prometheusMux := http.NewServeMux()
+ prometheusMux.Handle("/debug/metrics/prometheus", promhttp.Handler())
+
+ promServer := &http.Server{
+ Addr: address,
+ Handler: prometheusMux,
+ }
+
+ go func() {
+ if err := promServer.ListenAndServe(); err != nil {
+ logger.Error("Failure in running Prometheus server", "err", err)
+ }
+ }()
+
+ logger.Info("Enabling metrics export to prometheus", "path", fmt.Sprintf("http://%s/debug/metrics/prometheus", address))
+ return prometheusMux
+}
diff --git a/erigon-lib/metrics/summary.go b/erigon-lib/metrics/summary.go
new file mode 100644
index 00000000000..615c487edef
--- /dev/null
+++ b/erigon-lib/metrics/summary.go
@@ -0,0 +1,20 @@
+package metrics
+
+import (
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type Summary interface {
+ prometheus.Summary
+ DurationObserver
+}
+
+type summary struct {
+ prometheus.Summary
+}
+
+func (s *summary) ObserveDuration(start time.Time) {
+ s.Observe(secondsSince(start))
+}
diff --git a/metrics/methelp/timer.go b/erigon-lib/metrics/timer.go
similarity index 75%
rename from metrics/methelp/timer.go
rename to erigon-lib/metrics/timer.go
index 3aefda89d1b..1608e666d51 100644
--- a/metrics/methelp/timer.go
+++ b/erigon-lib/metrics/timer.go
@@ -1,48 +1,47 @@
-package methelp
+package metrics
import (
"fmt"
"strings"
"time"
-
- "github.com/VictoriaMetrics/metrics"
)
type HistTimer struct {
- *metrics.Histogram
-
+ Histogram
start time.Time
-
- name string
+ name string
}
func NewHistTimer(name string) *HistTimer {
rawName := strings.Split(name, "{")
return &HistTimer{
- Histogram: metrics.GetOrCreateCompatibleHistogram(name),
+ Histogram: GetOrCreateHistogram(name),
start: time.Now(),
name: rawName[0],
}
}
func (h *HistTimer) PutSince() {
- h.Histogram.UpdateDuration(h.start)
+ h.Histogram.ObserveDuration(h.start)
}
func (h *HistTimer) Tag(pairs ...string) *HistTimer {
if len(pairs)%2 != 0 {
pairs = append(pairs, "UNEQUAL_KEY_VALUE_TAGS")
}
- toJoin := []string{}
+
+ var toJoin []string
for i := 0; i < len(pairs); i = i + 2 {
toJoin = append(toJoin, fmt.Sprintf(`%s="%s"`, pairs[i], pairs[i+1]))
}
+
tags := ""
if len(toJoin) > 0 {
tags = "{" + strings.Join(toJoin, ",") + "}"
}
+
return &HistTimer{
- Histogram: metrics.GetOrCreateCompatibleHistogram(h.name + tags),
+ Histogram: GetOrCreateHistogram(h.name + tags),
start: time.Now(),
name: h.name,
}
diff --git a/erigon-lib/metrics/value_getter.go b/erigon-lib/metrics/value_getter.go
new file mode 100644
index 00000000000..e4f24e20505
--- /dev/null
+++ b/erigon-lib/metrics/value_getter.go
@@ -0,0 +1,6 @@
+package metrics
+
+type ValueGetter interface {
+ GetValue() float64
+ GetValueUint64() uint64
+}
diff --git a/erigon-lib/mmap/mmap_unix.go b/erigon-lib/mmap/mmap_unix.go
new file mode 100644
index 00000000000..4c98a05b071
--- /dev/null
+++ b/erigon-lib/mmap/mmap_unix.go
@@ -0,0 +1,112 @@
+//go:build !windows
+
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package mmap
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+const MaxMapSize = 0xFFFFFFFFFFFF
+
+// mmap memory maps a DB's data file.
+func MmapRw(f *os.File, size int) ([]byte, *[MaxMapSize]byte, error) {
+ // Map the data file to memory.
+ mmapHandle1, err := unix.Mmap(int(f.Fd()), 0, size, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Advise the kernel that the mmap is accessed randomly.
+ err = unix.Madvise(mmapHandle1, syscall.MADV_RANDOM)
+ if err != nil && !errors.Is(err, syscall.ENOSYS) {
+ // Ignore not implemented error in kernel because it still works.
+ return nil, nil, fmt.Errorf("madvise: %w", err)
+ }
+ mmapHandle2 := (*[MaxMapSize]byte)(unsafe.Pointer(&mmapHandle1[0]))
+ return mmapHandle1, mmapHandle2, nil
+}
+func Mmap(f *os.File, size int) ([]byte, *[MaxMapSize]byte, error) {
+ // Map the data file to memory.
+ mmapHandle1, err := unix.Mmap(int(f.Fd()), 0, size, syscall.PROT_READ, syscall.MAP_SHARED)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Advise the kernel that the mmap is accessed randomly.
+ err = unix.Madvise(mmapHandle1, syscall.MADV_RANDOM)
+ if err != nil && !errors.Is(err, syscall.ENOSYS) {
+ // Ignore not implemented error in kernel because it still works.
+ return nil, nil, fmt.Errorf("madvise: %w", err)
+ }
+ mmapHandle2 := (*[MaxMapSize]byte)(unsafe.Pointer(&mmapHandle1[0]))
+ return mmapHandle1, mmapHandle2, nil
+}
+
+func MadviseSequential(mmapHandle1 []byte) error {
+ err := unix.Madvise(mmapHandle1, syscall.MADV_SEQUENTIAL)
+ if err != nil && !errors.Is(err, syscall.ENOSYS) {
+ // Ignore not implemented error in kernel because it still works.
+ return fmt.Errorf("madvise: %w", err)
+ }
+ return nil
+}
+
+func MadviseNormal(mmapHandle1 []byte) error {
+ err := unix.Madvise(mmapHandle1, syscall.MADV_NORMAL)
+ if err != nil && !errors.Is(err, syscall.ENOSYS) {
+ // Ignore not implemented error in kernel because it still works.
+ return fmt.Errorf("madvise: %w", err)
+ }
+ return nil
+}
+
+func MadviseWillNeed(mmapHandle1 []byte) error {
+ err := unix.Madvise(mmapHandle1, syscall.MADV_WILLNEED)
+ if err != nil && !errors.Is(err, syscall.ENOSYS) {
+ // Ignore not implemented error in kernel because it still works.
+ return fmt.Errorf("madvise: %w", err)
+ }
+ return nil
+}
+
+func MadviseRandom(mmapHandle1 []byte) error {
+ err := unix.Madvise(mmapHandle1, syscall.MADV_RANDOM)
+ if err != nil && !errors.Is(err, syscall.ENOSYS) {
+ // Ignore not implemented error in kernel because it still works.
+ return fmt.Errorf("madvise: %w", err)
+ }
+ return nil
+}
+
+// munmap unmaps a DB's data file from memory.
+func Munmap(mmapHandle1 []byte, _ *[MaxMapSize]byte) error {
+ // Ignore the unmap if we have no mapped data.
+ if mmapHandle1 == nil {
+ return nil
+ }
+ // Unmap using the original byte slice.
+ err := unix.Munmap(mmapHandle1)
+ return err
+}
diff --git a/erigon-lib/mmap/mmap_windows.go b/erigon-lib/mmap/mmap_windows.go
new file mode 100644
index 00000000000..b343ebb4024
--- /dev/null
+++ b/erigon-lib/mmap/mmap_windows.go
@@ -0,0 +1,68 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package mmap
+
+import (
+ "os"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+const MaxMapSize = 0xFFFFFFFFFFFF
+
+func Mmap(f *os.File, size int) ([]byte, *[MaxMapSize]byte, error) {
+ // Open a file mapping handle.
+ sizelo := uint32(size >> 32)
+ sizehi := uint32(size) & 0xffffffff
+ h, errno := windows.CreateFileMapping(windows.Handle(f.Fd()), nil, windows.PAGE_READONLY, sizelo, sizehi, nil)
+ if h == 0 {
+ return nil, nil, os.NewSyscallError("CreateFileMapping", errno)
+ }
+
+ // Create the memory map.
+ addr, errno := windows.MapViewOfFile(h, windows.FILE_MAP_READ, 0, 0, uintptr(size))
+ if addr == 0 {
+ return nil, nil, os.NewSyscallError("MapViewOfFile", errno)
+ }
+
+ // Close mapping handle.
+ if err := windows.CloseHandle(windows.Handle(h)); err != nil {
+ return nil, nil, os.NewSyscallError("CloseHandle", err)
+ }
+
+ // Convert to a byte array.
+ mmapHandle2 := ((*[MaxMapSize]byte)(unsafe.Pointer(addr)))
+ return mmapHandle2[:size], mmapHandle2, nil
+}
+
+func MadviseSequential(mmapHandle1 []byte) error { return nil }
+func MadviseNormal(mmapHandle1 []byte) error { return nil }
+func MadviseWillNeed(mmapHandle1 []byte) error { return nil }
+func MadviseRandom(mmapHandle1 []byte) error { return nil }
+
+func Munmap(_ []byte, mmapHandle2 *[MaxMapSize]byte) error {
+ if mmapHandle2 == nil {
+ return nil
+ }
+
+ addr := (uintptr)(unsafe.Pointer(&mmapHandle2[0]))
+ if err := windows.UnmapViewOfFile(addr); err != nil {
+ return os.NewSyscallError("UnmapViewOfFile", err)
+ }
+ return nil
+}
diff --git a/erigon-lib/mmap/total_memory.go b/erigon-lib/mmap/total_memory.go
new file mode 100644
index 00000000000..75a2f38d7d8
--- /dev/null
+++ b/erigon-lib/mmap/total_memory.go
@@ -0,0 +1,22 @@
+package mmap
+
+import (
+ "runtime/debug"
+
+ "github.com/ledgerwatch/erigon-lib/common/cmp"
+ "github.com/pbnjay/memory"
+)
+
+func TotalMemory() uint64 {
+ mem := memory.TotalMemory()
+
+ if cgroupsMemLimit, err := cgroupsMemoryLimit(); (err == nil) && (cgroupsMemLimit > 0) {
+ mem = cmp.Min(mem, cgroupsMemLimit)
+ }
+
+ if goMemLimit := debug.SetMemoryLimit(-1); goMemLimit > 0 {
+ mem = cmp.Min(mem, uint64(goMemLimit))
+ }
+
+ return mem
+}
diff --git a/erigon-lib/mmap/total_memory_cgroups.go b/erigon-lib/mmap/total_memory_cgroups.go
new file mode 100644
index 00000000000..dbca502d02f
--- /dev/null
+++ b/erigon-lib/mmap/total_memory_cgroups.go
@@ -0,0 +1,118 @@
+//go:build linux
+
+/*
+https://github.com/raulk/go-watchdog
+https://github.com/elee1766/go-watchdog
+
+The MIT License (MIT)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/
+
+package mmap
+
+import (
+ "errors"
+ "fmt"
+ "os"
+
+ "github.com/containerd/cgroups/v3"
+ "github.com/containerd/cgroups/v3/cgroup1"
+ "github.com/containerd/cgroups/v3/cgroup2"
+)
+
+// cgroupsMemoryLimit will try to discover
+// the memory limit from the cgroup of the process (derived from /proc/self/cgroup),
+// or from the root cgroup path if the PID == 1 (which indicates that the process
+// is running in a container).
+//
+// Memory usage is calculated by querying the cgroup stats.
+//
+// This function will return an error immediately if the OS does not support cgroups,
+// or if another error occurs during initialization.
+func cgroupsMemoryLimit() (uint64, error) {
+ switch cgroups.Mode() {
+ case cgroups.Unified:
+ return cgroupsV2MemoryLimit()
+ case cgroups.Legacy:
+ return cgroupsV1MemoryLimit()
+ case cgroups.Unavailable:
+ fallthrough
+ default:
+ return 0, errors.New("cgroups not supported in this environment")
+ }
+}
+
+func cgroupsV1MemoryLimit() (uint64, error) {
+ // use self path unless our PID is 1, in which case we're running inside
+ // a container and our limits are in the root path.
+ path := cgroup1.NestedPath("")
+ if pid := os.Getpid(); pid == 1 {
+ path = cgroup1.RootPath
+ }
+
+ cgroup, err := cgroup1.Load(path, cgroup1.WithHiearchy(func() ([]cgroup1.Subsystem, error) {
+ system, err := cgroup1.Default()
+ if err != nil {
+ return nil, err
+ }
+ var out []cgroup1.Subsystem
+ for _, v := range system {
+ switch v.Name() {
+ case cgroup1.Memory:
+ out = append(out, v)
+ }
+ }
+ return out, nil
+ }))
+ if err != nil {
+ return 0, fmt.Errorf("failed to load cgroup1 for process: %w", err)
+ }
+
+ if stat, err := cgroup.Stat(); err != nil {
+ return 0, fmt.Errorf("failed to load memory cgroup1 stats: %w", err)
+ } else if stat.Memory == nil || stat.Memory.Usage == nil {
+ return 0, fmt.Errorf("cgroup1 memory stats are nil; aborting")
+ } else {
+ return stat.Memory.Usage.Limit, nil
+ }
+}
+
+func cgroupsV2MemoryLimit() (uint64, error) {
+ // use self path unless our PID is 1, in which case we're running inside
+ // a container and our limits are in the root path.
+ pid := os.Getpid()
+ path, err := cgroup2.PidGroupPath(pid)
+ if err != nil {
+ return 0, fmt.Errorf("failed to load cgroup2 path for process pid %d: %w", pid, err)
+ }
+
+ cgroup, err := cgroup2.Load(path)
+ if err != nil {
+ return 0, fmt.Errorf("failed to load cgroup2 for process: %w", err)
+ }
+
+ if stat, err := cgroup.Stat(); err != nil {
+ return 0, fmt.Errorf("failed to load cgroup2 memory stats: %w", err)
+ } else if stat.Memory == nil {
+ return 0, fmt.Errorf("cgroup2 memory stats are nil; aborting")
+ } else {
+ return stat.Memory.UsageLimit, nil
+ }
+}
diff --git a/erigon-lib/mmap/total_memory_cgroups_stub.go b/erigon-lib/mmap/total_memory_cgroups_stub.go
new file mode 100644
index 00000000000..0d921aa905b
--- /dev/null
+++ b/erigon-lib/mmap/total_memory_cgroups_stub.go
@@ -0,0 +1,11 @@
+//go:build !linux
+
+package mmap
+
+import (
+ "errors"
+)
+
+func cgroupsMemoryLimit() (uint64, error) {
+ return 0, errors.New("cgroups not supported in this environment")
+}
diff --git a/erigon-lib/patricia/patricia.go b/erigon-lib/patricia/patricia.go
new file mode 100644
index 00000000000..f2ccc86c51b
--- /dev/null
+++ b/erigon-lib/patricia/patricia.go
@@ -0,0 +1,795 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package patricia
+
+import (
+ "fmt"
+ "math/bits"
+ "strings"
+
+ "github.com/ledgerwatch/erigon-lib/common/cmp"
+ "github.com/ledgerwatch/erigon-lib/sais"
+ "golang.org/x/exp/slices"
+)
+
+// Implementation of paticia tree for efficient search of substrings from a dictionary in a given string
+type node struct {
+ val interface{} // value associated with the key
+ n0 *node
+ n1 *node
+ p0 uint32
+ p1 uint32
+}
+
+func tostr(x uint32) string {
+ str := fmt.Sprintf("%b", x)
+ for len(str) < 32 {
+ str = "0" + str
+ }
+ return str[:x&0x1f]
+}
+
+// print assumes values are byte slices
+func (n *node) print(sb *strings.Builder, indent string) {
+ sb.WriteString(indent)
+ fmt.Fprintf(sb, "%p ", n)
+ sb.WriteString(tostr(n.p0))
+ sb.WriteString("\n")
+ if n.n0 != nil {
+ n.n0.print(sb, indent+" ")
+ }
+ sb.WriteString(indent)
+ fmt.Fprintf(sb, "%p ", n)
+ sb.WriteString(tostr(n.p1))
+ sb.WriteString("\n")
+ if n.n1 != nil {
+ n.n1.print(sb, indent+" ")
+ }
+ if n.val != nil {
+ sb.WriteString(indent)
+ sb.WriteString("val:")
+ fmt.Fprintf(sb, " %x", n.val.([]byte))
+ sb.WriteString("\n")
+ }
+}
+
+func (n *node) String() string {
+ var sb strings.Builder
+ n.print(&sb, "")
+ return sb.String()
+}
+
+// state represent a position anywhere inside patricia tree
+// position can be identified by combination of node, and the partitioning
+// of that node's p0 or p1 into head and tail.
+// As with p0 and p1, head and tail are encoded as follows:
+// lowest 5 bits encode the length in bits, and the remaining 27 bits
+// encode the actual head or tail.
+// For example, if the position is at the beginning of a node,
+// head would be zero, and tail would be equal to either p0 or p1,
+// depending on whether the position corresponds to going left (0) or right (1).
+type state struct {
+ n *node
+ head uint32
+ tail uint32
+}
+
+func (s *state) String() string {
+ return fmt.Sprintf("%p head %s tail %s", s.n, tostr(s.head), tostr(s.tail))
+}
+
+func (s *state) reset(n *node) {
+ s.n = n
+ s.head = 0
+ s.tail = 0
+}
+
+func makestate(n *node) *state {
+ return &state{n: n, head: 0, tail: 0}
+}
+
+// transition consumes next byte of the key, moves the state to corresponding
+// node of the patricia tree and returns divergence prefix (0 if there is no divergence)
+func (s *state) transition(b byte, readonly bool) uint32 {
+ bitsLeft := 8 // Bits in b to process
+ b32 := uint32(b) << 24
+ for bitsLeft > 0 {
+ if s.head == 0 {
+ // tail has not been determined yet, do it now
+ if b32&0x80000000 == 0 {
+ s.tail = s.n.p0
+ } else {
+ s.tail = s.n.p1
+ }
+ }
+ if s.tail == 0 {
+ // state positioned at the end of the current node
+ return b32 | uint32(bitsLeft)
+ }
+ tailLen := int(s.tail & 0x1f)
+ firstDiff := bits.LeadingZeros32(s.tail ^ b32) // First bit where b32 and tail are different
+ if firstDiff < bitsLeft {
+ // divergence (where the key being searched and the existing structure of patricia tree becomes incompatible) is within currently supplied byte of the search key, b
+ if firstDiff >= tailLen {
+ // divergence is within currently supplied byte of the search key, b, but outside of the current node
+ bitsLeft -= tailLen
+ b32 <<= tailLen
+ // Need to switch to the next node
+ if (s.head == 0 && s.tail&0x80000000 == 0) || (s.head != 0 && s.head&0x80000000 == 0) {
+ if s.n.n0 == nil {
+ panic("")
+ }
+ s.n = s.n.n0
+ } else {
+ if s.n.n1 == nil {
+ panic("")
+ }
+ s.n = s.n.n1
+ }
+ s.head = 0
+ s.tail = 0
+ } else {
+ // divergence is within currently supplied byte of the search key, b, and within the current node
+ bitsLeft -= firstDiff
+ b32 <<= firstDiff
+ // there is divergence, move head and tail
+ mask := ^(uint32(1)<<(32-firstDiff) - 1)
+ s.head |= (s.tail & mask) >> (s.head & 0x1f)
+ s.head += uint32(firstDiff)
+ s.tail = (s.tail&0xffffffe0)<> (s.head & 0x1f)
+ s.head += uint32(bitsLeft)
+ s.tail = (s.tail&0xffffffe0)< 27 {
+ mask := ^(uint32(1)<<(headLen+5) - 1)
+ //fmt.Printf("mask = %b\n", mask)
+ s.head |= (d32 & mask) >> headLen
+ s.head += uint32(27 - headLen)
+ //fmt.Printf("s.head %s\n", tostr(s.head))
+ var dn node
+ if (s.head == 0 && s.tail&0x80000000 == 0) || (s.head != 0 && s.head&0x80000000 == 0) {
+ s.n.p0 = s.head
+ s.n.n0 = &dn
+ } else {
+ s.n.p1 = s.head
+ s.n.n1 = &dn
+ }
+ s.n = &dn
+ s.head = 0
+ s.tail = 0
+ d32 <<= 27 - headLen
+ dLen -= (27 - headLen)
+ headLen = 0
+ }
+ //fmt.Printf("headLen %d + dLen %d = %d\n", headLen, dLen, headLen+dLen)
+ mask := ^(uint32(1)<<(32-dLen) - 1)
+ //fmt.Printf("mask = %b\n", mask)
+ s.head |= (d32 & mask) >> headLen
+ s.head += uint32(dLen)
+ //fmt.Printf("s.head %s\n", tostr(s.head))
+ if (s.head == 0 && s.tail&0x80000000 == 0) || (s.head != 0 && s.head&0x80000000 == 0) {
+ s.n.p0 = s.head
+ } else {
+ s.n.p1 = s.head
+ }
+ return
+ }
+ // create a new node
+ var dn node
+ if divergence&0x80000000 == 0 {
+ dn.p0 = divergence
+ dn.p1 = s.tail
+ if (s.head == 0 && s.tail&0x80000000 == 0) || (s.head != 0 && s.head&0x80000000 == 0) {
+ dn.n1 = s.n.n0
+ } else {
+ dn.n1 = s.n.n1
+ }
+ } else {
+ dn.p1 = divergence
+ dn.p0 = s.tail
+ if (s.head == 0 && s.tail&0x80000000 == 0) || (s.head != 0 && s.head&0x80000000 == 0) {
+ dn.n0 = s.n.n0
+ } else {
+ dn.n0 = s.n.n1
+ }
+ }
+ if (s.head == 0 && s.tail&0x80000000 == 0) || (s.head != 0 && s.head&0x80000000 == 0) {
+ s.n.n0 = &dn
+ s.n.p0 = s.head
+ } else {
+ s.n.n1 = &dn
+ s.n.p1 = s.head
+ }
+ s.n = &dn
+ s.head = divergence
+ s.tail = 0
+}
+
+func (n *node) insert(key []byte, value interface{}) {
+ s := makestate(n)
+ for _, b := range key {
+ divergence := s.transition(b, false /* readonly */)
+ if divergence != 0 {
+ s.diverge(divergence)
+ }
+ }
+ s.insert(value)
+}
+
+func (s *state) insert(value interface{}) {
+ if s.tail != 0 {
+ s.diverge(0)
+ }
+ if s.head != 0 {
+ var dn node
+ if s.head&0x80000000 == 0 {
+ s.n.n0 = &dn
+ } else {
+ s.n.n1 = &dn
+ }
+ s.n = &dn
+ s.head = 0
+ }
+ //fmt.Printf("set val to %p\n", s.n)
+ s.n.val = value
+}
+
+func (n *node) get(key []byte) (interface{}, bool) {
+ s := makestate(n)
+ for _, b := range key {
+ divergence := s.transition(b, true /* readonly */)
+ //fmt.Printf("get %x, b = %x, divergence = %s\nstate=%s\n", key, b, tostr(divergence), s)
+ if divergence != 0 {
+ return nil, false
+ }
+ }
+ if s.tail != 0 {
+ return nil, false
+ }
+ return s.n.val, s.n.val != nil
+}
+
+type PatriciaTree struct {
+ root node
+}
+
+func (pt *PatriciaTree) Insert(key []byte, value interface{}) {
+ //fmt.Printf("%p Insert [%x]\n", pt, key)
+ pt.root.insert(key, value)
+}
+
+func (pt *PatriciaTree) Get(key []byte) (interface{}, bool) {
+ return pt.root.get(key)
+}
+
+type Match struct {
+ Val interface{}
+ Start int
+ End int
+}
+
+type Matches []Match
+
+func (m Matches) Len() int {
+ return len(m)
+}
+
+func (m Matches) Less(i, j int) bool {
+ return m[i].Start < m[j].Start
+}
+
+func (m *Matches) Swap(i, j int) {
+ (*m)[i], (*m)[j] = (*m)[j], (*m)[i]
+}
+
+type MatchFinder struct {
+ pt *PatriciaTree
+ s state
+ matches []Match
+}
+
+func NewMatchFinder(pt *PatriciaTree) *MatchFinder {
+ return &MatchFinder{pt: pt}
+}
+
+type MatchFinder2 struct {
+ top *node // Top of nodeStack
+ pt *PatriciaTree
+ nodeStack []*node
+ matchStack []Match
+ matches Matches
+ sa []int32
+ lcp []int32
+ inv []int32
+ headLen int
+ tailLen int
+ side int // 0, 1, or 2 (if side is not determined yet)
+}
+
+func NewMatchFinder2(pt *PatriciaTree) *MatchFinder2 {
+ return &MatchFinder2{pt: pt, top: &pt.root, nodeStack: []*node{&pt.root}, side: 2}
+}
+
+// unfold consumes next byte of the key, moves the state to corresponding
+// node of the patricia tree and returns divergence prefix (0 if there is no divergence)
+func (mf2 *MatchFinder2) unfold(b byte) uint32 {
+ //fmt.Printf("unfold %x, headLen = %d, tailLen = %d, nodeStackLen = %d\n", b, mf2.headLen, mf2.tailLen, len(mf2.nodeStack))
+ //var sb strings.Builder
+ bitsLeft := 8 // Bits in b to process
+ b32 := uint32(b) << 24
+ for bitsLeft > 0 {
+ if mf2.side == 2 {
+ // tail has not been determined yet, do it now
+ if b32&0x80000000 == 0 {
+ mf2.side = 0
+ mf2.headLen = 0
+ mf2.tailLen = int(mf2.top.p0 & 0x1f)
+ } else {
+ mf2.side = 1
+ mf2.headLen = 0
+ mf2.tailLen = int(mf2.top.p1 & 0x1f)
+ }
+ if mf2.tailLen == 0 {
+ // state positioned at the end of the current node
+ mf2.side = 2
+ //fmt.Fprintf(&sb, "1 ")
+ //fmt.Printf("%s\n", sb.String())
+ return b32 | uint32(bitsLeft)
+ }
+ }
+ if mf2.tailLen == 0 {
+ // Need to switch to the next node
+ if mf2.side == 0 {
+ if mf2.top.n0 == nil {
+ //fmt.Fprintf(&sb, "2 ")
+ //fmt.Printf("%s\n", sb.String())
+ return b32 | uint32(bitsLeft)
+ }
+ mf2.nodeStack = append(mf2.nodeStack, mf2.top.n0)
+ mf2.top = mf2.top.n0
+ //fmt.Fprintf(&sb, "a1,0,bl=%d ", bitsLeft)
+ } else if mf2.side == 1 {
+ if mf2.top.n1 == nil {
+ //fmt.Fprintf(&sb, "3 ")
+ //fmt.Printf("%s\n", sb.String())
+ return b32 | uint32(bitsLeft)
+ }
+ mf2.nodeStack = append(mf2.nodeStack, mf2.top.n1)
+ mf2.top = mf2.top.n1
+ //fmt.Fprintf(&sb, "a1,1,bl=%d ", bitsLeft)
+ } else {
+ panic("")
+ }
+ mf2.headLen = 0
+ mf2.side = 2
+ }
+ var tail uint32
+ if mf2.side == 0 {
+ tail = (mf2.top.p0 & 0xffffffe0) << mf2.headLen
+ } else if mf2.side == 1 {
+ tail = (mf2.top.p1 & 0xffffffe0) << mf2.headLen
+ } else {
+ return b32 | uint32(bitsLeft)
+ }
+ firstDiff := bits.LeadingZeros32(tail ^ b32) // First bit where b32 and tail are different
+ if firstDiff < bitsLeft {
+ // divergence (where the key being searched and the existing structure of patricia tree becomes incompatible) is within currently supplied byte of the search key, b
+ if firstDiff >= mf2.tailLen {
+ // divergence is within currently supplied byte of the search key, b, but outside of the current node
+ //fmt.Fprintf(&sb, "4,tl=%d ", mf2.tailLen)
+ bitsLeft -= mf2.tailLen
+ b32 <<= mf2.tailLen
+ mf2.headLen += mf2.tailLen
+ mf2.tailLen = 0
+ } else {
+ // divergence is within currently supplied byte of the search key, b, and within the current node
+ bitsLeft -= firstDiff
+ b32 <<= firstDiff
+ // there is divergence, move head and tail
+ mf2.tailLen -= firstDiff
+ mf2.headLen += firstDiff
+ //fmt.Fprintf(&sb, "5 ")
+ //fmt.Printf("%s\n", sb.String())
+ return b32 | uint32(bitsLeft)
+ }
+ } else if mf2.tailLen < bitsLeft {
+ // divergence is outside of currently supplied byte of the search key, b
+ bitsLeft -= mf2.tailLen
+ b32 <<= mf2.tailLen
+ mf2.headLen += mf2.tailLen
+ mf2.tailLen = 0
+ //fmt.Fprintf(&sb, "6 ")
+ } else {
+ // key byte is consumed, but stay on the same node
+ //fmt.Fprintf(&sb, "7,bl=%d ", bitsLeft)
+ mf2.tailLen -= bitsLeft
+ mf2.headLen += bitsLeft
+ bitsLeft = 0
+ b32 = 0
+ }
+ if mf2.tailLen == 0 {
+ // Need to switch to the next node
+ if mf2.side == 0 {
+ if mf2.top.n0 == nil {
+ //fmt.Fprintf(&sb, "8 ")
+ //fmt.Printf("%s\n", sb.String())
+ return b32 | uint32(bitsLeft)
+ }
+ mf2.nodeStack = append(mf2.nodeStack, mf2.top.n0)
+ mf2.top = mf2.top.n0
+ //fmt.Fprintf(&sb, "a2,0,bl=%d ", bitsLeft)
+ } else if mf2.side == 1 {
+ if mf2.top.n1 == nil {
+ //fmt.Fprintf(&sb, "9 ")
+ //fmt.Printf("%s\n", sb.String())
+ return b32 | uint32(bitsLeft)
+ }
+ mf2.nodeStack = append(mf2.nodeStack, mf2.top.n1)
+ mf2.top = mf2.top.n1
+ //fmt.Fprintf(&sb, "a2,1,bl=%d ", bitsLeft)
+ } else {
+ panic("")
+ }
+ mf2.headLen = 0
+ mf2.side = 2
+ }
+ }
+ //fmt.Printf("%s\n", sb.String())
+ return 0
+}
+
+// unfold moves the match finder back up the stack by specified number of bits
+func (mf2 *MatchFinder2) fold(bits int) {
+ //fmt.Printf("fold %d, headLen = %d, tailLen = %d, nodeStackLen = %d\n", bits, mf2.headLen, mf2.tailLen, len(mf2.nodeStack))
+ bitsLeft := bits
+ for bitsLeft > 0 {
+ //fmt.Printf("headLen = %d, bitsLeft = %d, head = %b, tail = %b, nodeStackLen = %d\n", headLen, bitsLeft, mf2.head, mf2.tail, len(mf2.nodeStack))
+ if mf2.headLen == bitsLeft {
+ mf2.headLen = 0
+ mf2.tailLen = 0
+ mf2.side = 2
+ bitsLeft = 0
+ } else if mf2.headLen >= bitsLeft {
+ // folding only affects top node, take bits from end of the head and prepend it to the tail
+ mf2.headLen -= bitsLeft
+ mf2.tailLen += bitsLeft
+ bitsLeft = 0
+ } else {
+ // folding affects not only top node, remove top node
+ bitsLeft -= mf2.headLen
+ mf2.nodeStack = mf2.nodeStack[:len(mf2.nodeStack)-1]
+ prevTop := mf2.top
+ mf2.top = mf2.nodeStack[len(mf2.nodeStack)-1]
+ if mf2.top.n0 == prevTop {
+ mf2.side = 0
+ mf2.headLen = int(mf2.top.p0 & 0x1f)
+ //fmt.Printf("mf2.head = p0 %b\n", mf2.head)
+ } else if mf2.top.n1 == prevTop {
+ mf2.side = 1
+ mf2.headLen = int(mf2.top.p1 & 0x1f)
+ //fmt.Printf("mf2.head = p1 %b\n", mf2.head)
+ } else {
+ panic("")
+ }
+ mf2.tailLen = 0
+ }
+ }
+}
+
+func (mf2 *MatchFinder2) FindLongestMatches(data []byte) []Match {
+ //fmt.Printf("mf2=%p pt=%p data=[%x]\n", mf2, mf2.pt, data)
+ mf2.matches = mf2.matches[:0]
+ if len(data) < 2 {
+ return mf2.matches
+ }
+ mf2.nodeStack = append(mf2.nodeStack[:0], &mf2.pt.root)
+ mf2.matchStack = mf2.matchStack[:0]
+ mf2.top = &mf2.pt.root
+ mf2.side = 2
+ mf2.tailLen = 0
+ mf2.headLen = 0
+ n := len(data)
+ if cap(mf2.sa) < n {
+ mf2.sa = make([]int32, n)
+ } else {
+ mf2.sa = mf2.sa[:n]
+ }
+ if err := sais.Sais(data, mf2.sa); err != nil {
+ panic(err)
+ }
+ if cap(mf2.inv) < n {
+ mf2.inv = make([]int32, n)
+ } else {
+ mf2.inv = mf2.inv[:n]
+ }
+ for i := 0; i < n; i++ {
+ mf2.inv[mf2.sa[i]] = int32(i)
+ }
+ var k int
+ // Process all suffixes one by one starting from
+ // first suffix in txt[]
+ if cap(mf2.lcp) < n {
+ mf2.lcp = make([]int32, n)
+ } else {
+ mf2.lcp = mf2.lcp[:n]
+ }
+ for i := 0; i < n; i++ {
+ /* If the current suffix is at n-1, then we don’t
+ have next substring to consider. So lcp is not
+ defined for this substring, we put zero. */
+ if mf2.inv[i] == int32(n-1) {
+ k = 0
+ continue
+ }
+
+ /* j contains index of the next substring to
+ be considered to compare with the present
+ substring, i.e., next string in suffix array */
+ j := int(mf2.sa[mf2.inv[i]+1])
+
+ // Directly start matching from k'th index as
+ // at-least k-1 characters will match
+ for i+k < n && j+k < n && data[i+k] == data[j+k] {
+ k++
+ }
+ mf2.lcp[mf2.inv[i]] = int32(k) // lcp for the present suffix.
+
+ // Deleting the starting character from the string.
+ if k > 0 {
+ k--
+ }
+ }
+ //fmt.Printf("sa=[%d]\n", mf2.sa)
+ //fmt.Printf("lcp=[%d]\n", mf2.lcp)
+ depth := 0 // Depth in bits
+ var lastMatch *Match
+ for i := 0; i < n; i++ {
+ // lcp[i] is the Longest Common Prefix of suffixes starting from sa[i] and sa[i+1]
+ //fmt.Printf("Suffix [%x], depth = %d\n", data[mf2.sa[i]:n], depth)
+ if i > 0 {
+ lcp := int(mf2.lcp[i-1])
+ // lcp[i-1] is the Longest Common Prefix of suffixes starting from sa[i-1] and sa[i]
+ if depth > 8*lcp {
+ //fmt.Printf("before fold depth = %d, mf2.lcp[i-1] = %d\n", depth, mf2.lcp[i-1])
+ mf2.fold(depth - 8*lcp)
+ depth = 8 * lcp
+ //b1, d1 := mf2.Current()
+ //fmt.Printf("current: [%x] %d, depth = %d\n", b1, d1, depth)
+ //fmt.Printf("after fold depth = %d\n", depth)
+ for lastMatch != nil && lastMatch.End-lastMatch.Start > lcp {
+ //fmt.Printf("Popped %d: [%d-%d] [%x]\n", len(mf2.matchStack)-1, lastMatch.Start, lastMatch.End, data[lastMatch.Start:lastMatch.End])
+ mf2.matchStack = mf2.matchStack[:len(mf2.matchStack)-1]
+ if len(mf2.matchStack) == 0 {
+ lastMatch = nil
+ } else {
+ lastMatch = &mf2.matchStack[len(mf2.matchStack)-1]
+ }
+ }
+ } else {
+ r := depth % 8
+ if r > 0 {
+ mf2.fold(r)
+ depth -= r
+ //b1, d1 := mf2.Current()
+ //fmt.Printf("current: [%x] %d, depth = %d\n", b1, d1, depth)
+ }
+ }
+ }
+ sa := int(mf2.sa[i])
+ start := sa + depth/8
+ for end := start + 1; end <= n; end++ {
+ //fmt.Printf("Looking at [%d-%d] [%x]\n", sa, end, data[sa:end])
+ d := mf2.unfold(data[end-1])
+ depth += 8 - int(d&0x1f)
+ //fmt.Printf("after unfold depth=%d\n", depth)
+ //b1, d1 := mf2.Current()
+ //fmt.Printf("current: [%x][%x] %d, depth = %d\n", b1, data[sa:end], d1, depth)
+ if d != 0 {
+ //fmt.Printf("divergence found: %b\n", d)
+ break
+ }
+ if mf2.tailLen != 0 || mf2.top.val == nil {
+ //fmt.Printf("tailLen = %d, val == nil %t, p=%p\n", mf2.tailLen, mf2.top.val == nil, mf2.top)
+ continue
+ }
+ if cap(mf2.matchStack) == len(mf2.matchStack) {
+ mf2.matchStack = append(mf2.matchStack, Match{})
+ } else {
+ mf2.matchStack = mf2.matchStack[:len(mf2.matchStack)+1]
+ }
+ lastMatch = &mf2.matchStack[len(mf2.matchStack)-1]
+ // This possibly overwrites previous match for the same start position
+ //fmt.Printf("Push on the match stack [%d-%d] [%x]\n", sa, end, data[sa:end])
+ lastMatch.Start = sa
+ lastMatch.End = end
+ lastMatch.Val = mf2.top.val
+ }
+ if lastMatch != nil {
+ mf2.matches = append(mf2.matches, Match{})
+ m := &mf2.matches[len(mf2.matches)-1]
+ m.Start = sa
+ m.End = sa + lastMatch.End - lastMatch.Start
+ m.Val = lastMatch.Val
+ //fmt.Printf("Added new Match: [%d-%d] [%x]\n", m.Start, m.End, data[m.Start:m.End])
+ }
+ }
+ //fmt.Printf("before sorting %d matches\n", len(mf2.matches))
+ if len(mf2.matches) < 2 {
+ return mf2.matches
+ }
+ //sort.Sort(&mf2.matches)
+ slices.SortFunc(mf2.matches, func(i, j Match) int { return cmp.Compare(i.Start, j.Start) })
+
+ lastEnd := mf2.matches[0].End
+ j := 1
+ for i, m := range mf2.matches {
+ if i > 0 {
+ if m.End > lastEnd {
+ if i != j {
+ mf2.matches[j] = m
+ }
+ lastEnd = m.End
+ j++
+ }
+ }
+ }
+ return mf2.matches[:j]
+}
+
+func (mf2 *MatchFinder2) Current() ([]byte, int) {
+ var b []byte
+ var depth int
+ last := len(mf2.nodeStack) - 1
+ for i, n := range mf2.nodeStack {
+ var p uint32
+ if i < last {
+ next := mf2.nodeStack[i+1]
+ if n.n0 == next {
+ p = n.p0
+ } else if n.n1 == next {
+ p = n.p1
+ } else {
+ panic("")
+ }
+ } else {
+ if mf2.side == 0 {
+ p = n.p0
+ } else if mf2.side == 1 {
+ p = n.p1
+ }
+ p = (p & 0xffffffe0) | uint32(mf2.headLen)
+ }
+ fmt.Printf("i,p=%d, %b\n", i, p)
+ // Add bit by bit
+ for (p & 0x1f) > 0 {
+ if depth >= 8*len(b) {
+ b = append(b, 0)
+ }
+ if p&0x80000000 != 0 {
+ b[depth/8] |= uint8(1) << (7 - (depth % 8))
+ }
+ depth++
+ p = ((p & 0xffffffe0) << 1) | (p & 0x1f) - 1
+ }
+ }
+ return b, depth
+}
+
+func (mf *MatchFinder) FindLongestMatches(data []byte) []Match {
+ matchCount := 0
+ s := &mf.s
+ lastEnd := 0
+ for start := 0; start < len(data); start++ {
+ s.reset(&mf.pt.root)
+ emitted := false
+ for end := start + 1; end <= len(data); end++ {
+ if d := s.transition(data[end-1], true /* readonly */); d != 0 {
+ break
+ }
+ if s.tail != 0 || s.n.val == nil || end <= lastEnd {
+ continue
+ }
+ var m *Match
+ if emitted {
+ m = &mf.matches[matchCount-1]
+ } else {
+ if matchCount == len(mf.matches) {
+ mf.matches = append(mf.matches, Match{})
+ m = &mf.matches[len(mf.matches)-1]
+ } else {
+ m = &mf.matches[matchCount]
+ }
+ matchCount++
+ emitted = true
+ }
+ // This possibly overwrites previous match for the same start position
+ m.Start = start
+ m.End = end
+ m.Val = s.n.val
+ lastEnd = end
+ }
+ }
+ return mf.matches[:matchCount]
+}
diff --git a/erigon-lib/patricia/patricia_fuzz_test.go b/erigon-lib/patricia/patricia_fuzz_test.go
new file mode 100644
index 00000000000..240096daf68
--- /dev/null
+++ b/erigon-lib/patricia/patricia_fuzz_test.go
@@ -0,0 +1,149 @@
+//go:build !nofuzz
+
+/*
+Copyright 2021 Erigon contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package patricia
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "testing"
+)
+
+// go test -trimpath -v -fuzz=FuzzPatricia -fuzztime=10s ./patricia
+
+func FuzzPatricia(f *testing.F) {
+ f.Fuzz(func(t *testing.T, build []byte, test []byte) {
+ var n node
+ keyMap := make(map[string][]byte)
+ i := 0
+ for i < len(build) {
+ keyLen := int(build[i]>>4) + 1
+ valLen := int(build[i]&15) + 1
+ i++
+ var key []byte
+ var val []byte
+ for keyLen > 0 && i < len(build) {
+ key = append(key, build[i])
+ i++
+ keyLen--
+ }
+ for valLen > 0 && i < len(build) {
+ val = append(val, build[i])
+ i++
+ valLen--
+ }
+ n.insert(key, val)
+ keyMap[string(key)] = val
+ }
+ var testKeys [][]byte
+ i = 0
+ for i < len(test) {
+ keyLen := int(test[i]>>4) + 1
+ i++
+ var key []byte
+ for keyLen > 0 && i < len(test) {
+ key = append(key, test[i])
+ i++
+ keyLen--
+ }
+ if _, ok := keyMap[string(key)]; !ok {
+ testKeys = append(testKeys, key)
+ }
+ }
+ // Test for keys
+ for key, vals := range keyMap {
+ v, ok := n.get([]byte(key))
+ if ok {
+ if !bytes.Equal(vals, v.([]byte)) {
+ t.Errorf("for key %x expected value %x, got %x", key, vals, v.([]byte))
+ }
+ }
+ }
+ // Test for non-existent keys
+ for _, key := range testKeys {
+ _, ok := n.get(key)
+ if ok {
+ t.Errorf("unexpected key found [%x]", key)
+ }
+ }
+ })
+}
+
+func FuzzLongestMatch(f *testing.F) {
+ f.Fuzz(func(t *testing.T, build []byte, test []byte) {
+ var pt PatriciaTree
+ keyMap := make(map[string][]byte)
+ i := 0
+ for i < len(build) {
+ keyLen := int(build[i]>>4) + 1
+ valLen := int(build[i]&15) + 1
+ i++
+ var key []byte
+ var val []byte
+ for keyLen > 0 && i < len(build) {
+ key = append(key, build[i])
+ i++
+ keyLen--
+ }
+ for valLen > 0 && i < len(build) {
+ val = append(val, build[i])
+ i++
+ valLen--
+ }
+ pt.Insert(key, val)
+ keyMap[string(key)] = val
+ }
+ var keys []string
+ for key := range keyMap {
+ keys = append(keys, key)
+ }
+ if len(keys) == 0 {
+ t.Skip()
+ }
+ var data []byte
+ for i := 0; i < 4*(len(test)/4); i += 4 {
+ keyIdx := int(binary.BigEndian.Uint32(test[i : i+4]))
+ keyIdx = keyIdx % len(keys)
+ key := []byte(keys[keyIdx])
+ data = append(data, key...)
+ for j := 0; j < len(key); j++ {
+ data = append(data, key[len(key)-1-j])
+ }
+ }
+ mf := NewMatchFinder(&pt)
+ m1 := mf.FindLongestMatches(data)
+ mf2 := NewMatchFinder2(&pt)
+ m2 := mf2.FindLongestMatches(data)
+ if len(m1) == len(m2) {
+ for i, m := range m1 {
+ mm := m2[i]
+ if m.Start != mm.Start || m.End != mm.End {
+ t.Errorf("mismatch, expected %+v, got %+v", m, mm)
+ }
+ }
+ } else {
+ t.Errorf("matches %d, expected %d", len(m2), len(m1))
+ for _, m := range m1 {
+ fmt.Printf("%+v, match1: [%x]\n", m, data[m.Start:m.End])
+ }
+ for _, m := range m2 {
+ fmt.Printf("%+v, match2: [%x]\n", m, data[m.Start:m.End])
+ }
+ }
+ })
+}
diff --git a/erigon-lib/patricia/patricia_test.go b/erigon-lib/patricia/patricia_test.go
new file mode 100644
index 00000000000..1cf2e85a0e4
--- /dev/null
+++ b/erigon-lib/patricia/patricia_test.go
@@ -0,0 +1,1644 @@
+/*
+ Copyright 2021 Erigon contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package patricia
+
+import (
+ "encoding/hex"
+ "fmt"
+ "testing"
+)
+
+func TestInserts1(t *testing.T) {
+ n := &node{}
+ s := makestate(n)
+ d := s.transition(0x34, true)
+ fmt.Printf("1 tree:\n%sstate: %s\ndivergence %s\n\n", n, s, tostr(d))
+ s.diverge(d)
+ fmt.Printf("2 tree:\n%sstate: %s\n\n", n, s)
+ d = s.transition(0x56, true)
+ fmt.Printf("3 tree:\n%sstate: %s\ndivergence %s\n\n", n, s, tostr(d))
+ s.diverge(d)
+ fmt.Printf("4 tree:\n%sstate: %s\n\n", n, s)
+ d = s.transition(0xff, true)
+ fmt.Printf("5 tree:\n%sstate: %s\ndivergence %s\n\n", n, s, tostr(d))
+ s.diverge(d)
+ fmt.Printf("6 tree:\n%sstate: %s\n\n", n, s)
+ d = s.transition(0xcc, true)
+ fmt.Printf("7 tree:\n%sstate: %s\ndivergence %s\n\n", n, s, tostr(d))
+ s.diverge(d)
+ fmt.Printf("8 tree:\n%sstate: %s\n\n", n, s)
+ s.insert(nil)
+ s = makestate(n)
+ d = s.transition(0x34, true)
+ fmt.Printf("9 tree:\n%sstate: %s\ndivergence %s\n\n", n, s, tostr(d))
+ d = s.transition(0x66, true)
+ fmt.Printf("10 tree:\n%sstate: %s\ndivergence %s\n\n", n, s, tostr(d))
+ s.diverge(d)
+ fmt.Printf("11 tree:\n%sstate: %s\n\n", n, s)
+
+ n.insert([]byte{0xff, 0xff, 0xff, 0xff, 0xff}, []byte{0x01})
+ fmt.Printf("12 tree:\n%s\n", n)
+
+ n.insert([]byte{0xff, 0xff, 0xff, 0xff, 0x0f}, []byte{0x02})
+ fmt.Printf("13 tree:\n%s\n", n)
+
+ n.insert([]byte{0xff, 0xff, 0xff, 0xff, 0xff}, []byte{0x03})
+ fmt.Printf("14 tree:\n%s\n", n)
+
+ vs, ok := n.get([]byte{0xff, 0xff, 0xff, 0xff, 0x0f})
+ fmt.Printf("15 vs = %v, ok = %t\n", vs, ok)
+
+ vs, ok = n.get([]byte{0xff, 0xff, 0xff, 0xff, 0xff})
+ fmt.Printf("16 vs = %v, ok = %t\n", vs, ok)
+
+ vs, ok = n.get([]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0x56})
+ fmt.Printf("17 vs = %v, ok = %t\n", vs, ok)
+
+ vs, ok = n.get([]byte{0x34, 0x56, 0xff, 0xcc})
+ fmt.Printf("18 vs = %v, ok = %t\n", vs, ok)
+
+ vs, ok = n.get([]byte{})
+ fmt.Printf("19 vs = %v, ok = %t\n", vs, ok)
+}
+
+func TestInserts2(t *testing.T) {
+ var n node
+ n.insert([]byte{0xff}, []byte{0x03, 0x03, 0x03, 0x1a, 0xed, 0xed})
+ n.insert([]byte{0xed}, []byte{})
+ fmt.Printf("tree:\n%s", &n)
+
+ vs, ok := n.get([]byte{0xff})
+ fmt.Printf("vs = %v, ok = %t\n", vs, ok)
+
+ vs, ok = n.get([]byte{0xed})
+ fmt.Printf("vs = %v, ok = %t\n", vs, ok)
+}
+
+func TestFindMatches1(t *testing.T) {
+ var pt PatriciaTree
+ pt.Insert([]byte("wolf"), []byte{1})
+ pt.Insert([]byte("winter"), []byte{2})
+ pt.Insert([]byte("wolfs"), []byte{3})
+ fmt.Printf("n\n%s", &pt.root)
+ mf := NewMatchFinder2(&pt)
+ data := []byte("Who lives here in winter, wolfs")
+ matches := mf.FindLongestMatches(data)
+ for _, m := range matches {
+ fmt.Printf("%+v, match [%s]\n", m, data[m.Start:m.End])
+ }
+ if len(matches) != 2 {
+ t.Errorf("expected matches: %d, got %d", 2, len(matches))
+ }
+}
+
+func TestFindMatches2(t *testing.T) {
+ var pt PatriciaTree
+ pt.Insert([]byte("wolf"), []byte{1})
+ pt.Insert([]byte("winter"), []byte{2})
+ pt.Insert([]byte("wolfs?"), []byte{3})
+ fmt.Printf("n\n%s", &pt.root)
+ mf2 := NewMatchFinder2(&pt)
+ data := []byte("Who lives here in winter, wolfs?")
+ matches := mf2.FindLongestMatches(data)
+ for _, m := range matches {
+ fmt.Printf("%+v, match: [%s]\n", m, data[m.Start:m.End])
+ }
+ if len(matches) != 2 {
+ t.Errorf("expected matches: %d, got %d", 2, len(matches))
+ }
+}
+
+func decodeHex(in string) []byte {
+ payload, err := hex.DecodeString(in)
+ if err != nil {
+ panic(err)
+ }
+ return payload
+}
+
+func TestFindMatches3(t *testing.T) {
+ var pt PatriciaTree
+ v := []byte{1}
+ pt.Insert(decodeHex("00000000000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("000000000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("0000000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("00000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("0000000000000000000000000000"), v)
+ pt.Insert(decodeHex("0100000000000000000000003b30000001000003"), v)
+ pt.Insert(decodeHex("0000000000000000003b30000001000003000100"), v)
+ pt.Insert(decodeHex("000000000000000000003b300000010000030001"), v)
+ pt.Insert(decodeHex("00000000000000000000003b3000000100000300"), v)
+ pt.Insert(decodeHex("00000000000000000000000000"), v)
+ pt.Insert(decodeHex("00000000000000003b30000001000003000100"), v)
+ pt.Insert(decodeHex("000000000000000000000000"), v)
+ pt.Insert(decodeHex("000000000000003b30000001000003000100"), v)
+ pt.Insert(decodeHex("0000000000003b30000001000003000100"), v)
+ pt.Insert(decodeHex("00000000003b30000001000003000100"), v)
+ pt.Insert(decodeHex("000000003b30000001000003000100"), v)
+ pt.Insert(decodeHex("0000003b30000001000003000100"), v)
+ pt.Insert(decodeHex("00003b30000001000003000100"), v)
+ pt.Insert(decodeHex("0100000000000000"), v)
+ pt.Insert(decodeHex("003b30000001000003000100"), v)
+ pt.Insert(decodeHex("3b30000001000003000100"), v)
+ pt.Insert(decodeHex("00000000000000003b3000000100000300010000"), v)
+ pt.Insert(decodeHex("0100000000000000000000003a30000001000000"), v)
+ pt.Insert(decodeHex("000000003a300000010000000000010010000000"), v)
+ pt.Insert(decodeHex("00000000003a3000000100000000000100100000"), v)
+ pt.Insert(decodeHex("0000000000003a30000001000000000001001000"), v)
+ pt.Insert(decodeHex("000000000000003a300000010000000000010010"), v)
+ pt.Insert(decodeHex("00000000000000003a3000000100000000000100"), v)
+ pt.Insert(decodeHex("0000000000000000003a30000001000000000001"), v)
+ pt.Insert(decodeHex("000000000000000000003a300000010000000000"), v)
+ pt.Insert(decodeHex("00000000000000000000003a3000000100000000"), v)
+ mf2 := NewMatchFinder2(&pt)
+ data := decodeHex("0100000000000000000000003a30000001000000000001001000000044004500")
+ matches := mf2.FindLongestMatches(data)
+ for _, m := range matches {
+ fmt.Printf("%+v, match: [%x]\n", m, data[m.Start:m.End])
+ }
+ if len(matches) != 9 {
+ t.Errorf("expected matches: %d, got %d", 9, len(matches))
+ }
+}
+
+func TestFindMatches4(t *testing.T) {
+ var pt PatriciaTree
+ v := []byte{1}
+ pt.Insert(decodeHex("00000000000000000000000000000000000000"), v)
+ mf2 := NewMatchFinder2(&pt)
+ data := decodeHex("01")
+ matches := mf2.FindLongestMatches(data)
+ for _, m := range matches {
+ fmt.Printf("%+v, match: [%x]\n", m, data[m.Start:m.End])
+ }
+ if len(matches) != 0 {
+ t.Errorf("expected matches: %d, got %d", 0, len(matches))
+ }
+}
+
+func TestFindMatches5(t *testing.T) {
+ var pt PatriciaTree
+ v := []byte{1}
+ pt.Insert(decodeHex("0434e37673a8e0aaa536828f0d5b0ddba12fece1"), v)
+ pt.Insert(decodeHex("e28e72fcf78647adce1f1252f240bbfaebd63bcc"), v)
+ pt.Insert(decodeHex("34e28e72fcf78647adce1f1252f240bbfaebd63b"), v)
+ pt.Insert(decodeHex("0434e28e72fcf78647adce1f1252f240bbfaebd6"), v)
+ pt.Insert(decodeHex("090bdc64a7e3632cde8f4689f47acfc0760e35bce43af50d4b1f5973463bde62"), v)
+ pt.Insert(decodeHex("00090bdc64a7e3632cde8f4689f47acfc0760e35bce43af50d4b1f5973463bde"), v)
+ pt.Insert(decodeHex("0000000000"), v)
+ pt.Insert(decodeHex("00000000000000000000"), v)
+ pt.Insert(decodeHex("000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("0000000000000000000000000000"), v)
+ pt.Insert(decodeHex("000000000000000000"), v)
+ pt.Insert(decodeHex("0000000000000000"), v)
+ pt.Insert(decodeHex("00000000000000000000000000"), v)
+ pt.Insert(decodeHex("000000000000000000000000"), v)
+ pt.Insert(decodeHex("f47acfc0760e35bce43af50d4b1f5973463bde62"), v)
+ pt.Insert(decodeHex("e3632cde8f4689f47acfc0760e35bce43af50d4b"), v)
+ pt.Insert(decodeHex("de8f4689f47acfc0760e35bce43af50d4b1f5973"), v)
+ pt.Insert(decodeHex("dc64a7e3632cde8f4689f47acfc0760e35bce43a"), v)
+ pt.Insert(decodeHex("a7e3632cde8f4689f47acfc0760e35bce43af50d"), v)
+ pt.Insert(decodeHex("8f4689f47acfc0760e35bce43af50d4b1f597346"), v)
+ pt.Insert(decodeHex("89f47acfc0760e35bce43af50d4b1f5973463bde"), v)
+ pt.Insert(decodeHex("64a7e3632cde8f4689f47acfc0760e35bce43af5"), v)
+ pt.Insert(decodeHex("632cde8f4689f47acfc0760e35bce43af50d4b1f"), v)
+ pt.Insert(decodeHex("4689f47acfc0760e35bce43af50d4b1f5973463b"), v)
+ pt.Insert(decodeHex("2cde8f4689f47acfc0760e35bce43af50d4b1f59"), v)
+ pt.Insert(decodeHex("0bdc64a7e3632cde8f4689f47acfc0760e35bce4"), v)
+ pt.Insert(decodeHex("7acfc0760e35bce43af50d4b1f5973463bde62"), v)
+ pt.Insert(decodeHex("0000000000000000000000"), v)
+ pt.Insert(decodeHex("cfc0760e35bce43af50d4b1f5973463bde62"), v)
+ pt.Insert(decodeHex("00000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("c0760e35bce43af50d4b1f5973463bde62"), v)
+ pt.Insert(decodeHex("0000000000000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("00000000000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("760e35bce43af50d4b1f5973463bde62"), v)
+ pt.Insert(decodeHex("000000000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("0e35bce43af50d4b1f5973463bde62"), v)
+ pt.Insert(decodeHex("0000000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("35bce43af50d4b1f5973463bde62"), v)
+ pt.Insert(decodeHex("bce43af50d4b1f5973463bde62"), v)
+ pt.Insert(decodeHex("e43af50d4b1f5973463bde62"), v)
+ pt.Insert(decodeHex("1090bdc64a7e3632cde8f4689f47acfc0760e35bce43af50d4b1f5973463bde6"), v)
+ pt.Insert(decodeHex("3af50d4b1f5973463bde62"), v)
+ pt.Insert(decodeHex("f50d4b1f5973463bde62"), v)
+ pt.Insert(decodeHex("fc0fd417d3a29f2962b8badecbf4d3036e28fcd7dcf22db126f130193790f769"), v)
+ pt.Insert(decodeHex("ecbb6a595f07cbc2fc0fd417d3a29f2962b8badecbf4d3036e28fcd7dcf22db1"), v)
+ pt.Insert(decodeHex("df415bb7ae2363ecbb6a595f07cbc2fc0fd417d3a29f2962b8badecbf4d3036e"), v)
+ pt.Insert(decodeHex("d417d3a29f2962b8badecbf4d3036e28fcd7dcf22db126f130193790f7698ee4"), v)
+ pt.Insert(decodeHex("cbc2fc0fd417d3a29f2962b8badecbf4d3036e28fcd7dcf22db126f130193790"), v)
+ pt.Insert(decodeHex("c2fc0fd417d3a29f2962b8badecbf4d3036e28fcd7dcf22db126f130193790f7"), v)
+ pt.Insert(decodeHex("bb6a595f07cbc2fc0fd417d3a29f2962b8badecbf4d3036e28fcd7dcf22db126"), v)
+ pt.Insert(decodeHex("b7ae2363ecbb6a595f07cbc2fc0fd417d3a29f2962b8badecbf4d3036e28fcd7"), v)
+ pt.Insert(decodeHex("ae2363ecbb6a595f07cbc2fc0fd417d3a29f2962b8badecbf4d3036e28fcd7dc"), v)
+ pt.Insert(decodeHex("6a595f07cbc2fc0fd417d3a29f2962b8badecbf4d3036e28fcd7dcf22db126f1"), v)
+ pt.Insert(decodeHex("63ecbb6a595f07cbc2fc0fd417d3a29f2962b8badecbf4d3036e28fcd7dcf22d"), v)
+ pt.Insert(decodeHex("5f07cbc2fc0fd417d3a29f2962b8badecbf4d3036e28fcd7dcf22db126f13019"), v)
+ pt.Insert(decodeHex("5bb7ae2363ecbb6a595f07cbc2fc0fd417d3a29f2962b8badecbf4d3036e28fc"), v)
+ pt.Insert(decodeHex("595f07cbc2fc0fd417d3a29f2962b8badecbf4d3036e28fcd7dcf22db126f130"), v)
+ pt.Insert(decodeHex("415bb7ae2363ecbb6a595f07cbc2fc0fd417d3a29f2962b8badecbf4d3036e28"), v)
+ pt.Insert(decodeHex("34df415bb7ae2363ecbb6a595f07cbc2fc0fd417d3a29f2962b8badecbf4d303"), v)
+ pt.Insert(decodeHex("2363ecbb6a595f07cbc2fc0fd417d3a29f2962b8badecbf4d3036e28fcd7dcf2"), v)
+ pt.Insert(decodeHex("0fd417d3a29f2962b8badecbf4d3036e28fcd7dcf22db126f130193790f7698e"), v)
+ pt.Insert(decodeHex("07cbc2fc0fd417d3a29f2962b8badecbf4d3036e28fcd7dcf22db126f1301937"), v)
+ pt.Insert(decodeHex("0434df415bb7ae2363ecbb6a595f07cbc2fc0fd417d3a29f2962b8badecbf4d3"), v)
+ pt.Insert(decodeHex("0d4b1f5973463bde62"), v)
+ pt.Insert(decodeHex("0000000000000000000000000000000000000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("fc0fd417d37e04bc63768597761b6c198fd8bd0feded3970bcdafd3adaa9dce4"), v)
+ pt.Insert(decodeHex("ecbb6a595f07cbc2fc0fd417d37e04bc63768597761b6c198fd8bd0feded3970"), v)
+ pt.Insert(decodeHex("df415bb7ae2363ecbb6a595f07cbc2fc0fd417d37e04bc63768597761b6c198f"), v)
+ pt.Insert(decodeHex("d417d37e04bc63768597761b6c198fd8bd0feded3970bcdafd3adaa9dce41b48"), v)
+ pt.Insert(decodeHex("d37e04bc63768597761b6c198fd8bd0feded3970bcdafd3adaa9dce41b48747f"), v)
+ pt.Insert(decodeHex("cbc2fc0fd417d37e04bc63768597761b6c198fd8bd0feded3970bcdafd3adaa9"), v)
+ pt.Insert(decodeHex("c2fc0fd417d37e04bc63768597761b6c198fd8bd0feded3970bcdafd3adaa9dc"), v)
+ pt.Insert(decodeHex("bb6a595f07cbc2fc0fd417d37e04bc63768597761b6c198fd8bd0feded3970bc"), v)
+ pt.Insert(decodeHex("b7ae2363ecbb6a595f07cbc2fc0fd417d37e04bc63768597761b6c198fd8bd0f"), v)
+ pt.Insert(decodeHex("ae2363ecbb6a595f07cbc2fc0fd417d37e04bc63768597761b6c198fd8bd0fed"), v)
+ pt.Insert(decodeHex("6a595f07cbc2fc0fd417d37e04bc63768597761b6c198fd8bd0feded3970bcda"), v)
+ pt.Insert(decodeHex("63ecbb6a595f07cbc2fc0fd417d37e04bc63768597761b6c198fd8bd0feded39"), v)
+ pt.Insert(decodeHex("5f07cbc2fc0fd417d37e04bc63768597761b6c198fd8bd0feded3970bcdafd3a"), v)
+ pt.Insert(decodeHex("5bb7ae2363ecbb6a595f07cbc2fc0fd417d37e04bc63768597761b6c198fd8bd"), v)
+ pt.Insert(decodeHex("595f07cbc2fc0fd417d37e04bc63768597761b6c198fd8bd0feded3970bcdafd"), v)
+ pt.Insert(decodeHex("415bb7ae2363ecbb6a595f07cbc2fc0fd417d37e04bc63768597761b6c198fd8"), v)
+ pt.Insert(decodeHex("34df415bb7ae2363ecbb6a595f07cbc2fc0fd417d37e04bc63768597761b6c19"), v)
+ pt.Insert(decodeHex("2363ecbb6a595f07cbc2fc0fd417d37e04bc63768597761b6c198fd8bd0feded"), v)
+ pt.Insert(decodeHex("17d37e04bc63768597761b6c198fd8bd0feded3970bcdafd3adaa9dce41b4874"), v)
+ pt.Insert(decodeHex("0fd417d37e04bc63768597761b6c198fd8bd0feded3970bcdafd3adaa9dce41b"), v)
+ pt.Insert(decodeHex("07cbc2fc0fd417d37e04bc63768597761b6c198fd8bd0feded3970bcdafd3ada"), v)
+ pt.Insert(decodeHex("0434df415bb7ae2363ecbb6a595f07cbc2fc0fd417d37e04bc63768597761b6c"), v)
+ pt.Insert(decodeHex("df415bb7ae2363ecbb6a595f07cbc2fc0fd417d3"), v)
+ pt.Insert(decodeHex("34df415bb7ae2363ecbb6a595f07cbc2fc0fd417"), v)
+ pt.Insert(decodeHex("0434df415bb7ae2363ecbb6a595f07cbc2fc0fd4"), v)
+ pt.Insert(decodeHex("4b1f5973463bde62"), v)
+ pt.Insert(decodeHex("415bb7ae2363ecbb6a595f07cbc2fc0fd417d3"), v)
+ pt.Insert(decodeHex("5bb7ae2363ecbb6a595f07cbc2fc0fd417d3"), v)
+ pt.Insert(decodeHex("f4689f47acfc0760e35bce43af50d4b1f5973463"), v)
+ pt.Insert(decodeHex("e8f4689f47acfc0760e35bce43af50d4b1f59734"), v)
+ pt.Insert(decodeHex("cde8f4689f47acfc0760e35bce43af50d4b1f597"), v)
+ pt.Insert(decodeHex("c64a7e3632cde8f4689f47acfc0760e35bce43af"), v)
+ pt.Insert(decodeHex("bdc64a7e3632cde8f4689f47acfc0760e35bce43"), v)
+ pt.Insert(decodeHex("9f47acfc0760e35bce43af50d4b1f5973463bde6"), v)
+ pt.Insert(decodeHex("90bdc64a7e3632cde8f4689f47acfc0760e35bce"), v)
+ pt.Insert(decodeHex("7e3632cde8f4689f47acfc0760e35bce43af50d4"), v)
+ pt.Insert(decodeHex("689f47acfc0760e35bce43af50d4b1f5973463bd"), v)
+ pt.Insert(decodeHex("4a7e3632cde8f4689f47acfc0760e35bce43af50"), v)
+ pt.Insert(decodeHex("3632cde8f4689f47acfc0760e35bce43af50d4b1"), v)
+ pt.Insert(decodeHex("32cde8f4689f47acfc0760e35bce43af50d4b1f5"), v)
+ pt.Insert(decodeHex("b7ae2363ecbb6a595f07cbc2fc0fd417d3"), v)
+ pt.Insert(decodeHex("47acfc0760e35bce43af50d4b1f5973463bde6"), v)
+ pt.Insert(decodeHex("ae2363ecbb6a595f07cbc2fc0fd417d3"), v)
+ pt.Insert(decodeHex("acfc0760e35bce43af50d4b1f5973463bde6"), v)
+ pt.Insert(decodeHex("2363ecbb6a595f07cbc2fc0fd417d3"), v)
+ pt.Insert(decodeHex("fc0760e35bce43af50d4b1f5973463bde6"), v)
+ pt.Insert(decodeHex("63ecbb6a595f07cbc2fc0fd417d3"), v)
+ pt.Insert(decodeHex("0000000000000000000000000000000000000001"), v)
+ pt.Insert(decodeHex("0760e35bce43af50d4b1f5973463bde6"), v)
+ pt.Insert(decodeHex("bc63768597761b6c198fd8bd0feded3970bcdafd"), v)
+ pt.Insert(decodeHex("97761b6c198fd8bd0feded3970bcdafd3adaa9dc"), v)
+ pt.Insert(decodeHex("8fd8bd0feded3970bcdafd3adaa9dce41b48747f"), v)
+ pt.Insert(decodeHex("8597761b6c198fd8bd0feded3970bcdafd3adaa9"), v)
+ pt.Insert(decodeHex("7e04bc63768597761b6c198fd8bd0feded3970bc"), v)
+ pt.Insert(decodeHex("768597761b6c198fd8bd0feded3970bcdafd3ada"), v)
+ pt.Insert(decodeHex("761b6c198fd8bd0feded3970bcdafd3adaa9dce4"), v)
+ pt.Insert(decodeHex("6c198fd8bd0feded3970bcdafd3adaa9dce41b48"), v)
+ pt.Insert(decodeHex("63768597761b6c198fd8bd0feded3970bcdafd3a"), v)
+ pt.Insert(decodeHex("1b6c198fd8bd0feded3970bcdafd3adaa9dce41b"), v)
+ pt.Insert(decodeHex("198fd8bd0feded3970bcdafd3adaa9dce41b4874"), v)
+ pt.Insert(decodeHex("04bc63768597761b6c198fd8bd0feded3970bcda"), v)
+ pt.Insert(decodeHex("00000000000000000000000000000000000001"), v)
+ pt.Insert(decodeHex("0000000000000000000000000000000000000000000000000000000000000002"), v)
+ pt.Insert(decodeHex("ecbb6a595f07cbc2fc0fd417d3"), v)
+ pt.Insert(decodeHex("60e35bce43af50d4b1f5973463bde6"), v)
+ pt.Insert(decodeHex("d8bd0feded3970bcdafd3adaa9dce41b48747f"), v)
+ pt.Insert(decodeHex("000000000000000000000000000000000001"), v)
+ pt.Insert(decodeHex("60e3997d5a409c25fe09d77351b6"), v)
+ pt.Insert(decodeHex("bd0feded3970bcdafd3adaa9dce41b48747f"), v)
+ mf2 := NewMatchFinder2(&pt)
+ data := decodeHex("9d7d9d7d082073e2920896915d0e0239a7e852d86b26e03a188bc5b947972aeec206d63b6744043493d38e72c5281e78f6b364eacac6fa907ecba1640000000000000000000000000000000000000000000000000000000007bfa482043493d38e72c5281e78f6b364eacac6fa907ecba1640000000000000000000000000000000000000000000000000000000000000011043493d38e72c5281e78f6b364eacac6fa907ecba1640000000000000000000000000000000000000000000000000000000000000002043493d38e72c5281e78f6b364eacac6fa907ecba164000000000000000000000000000000000000000000000000000000000000001e0820a516e4eeef0852f3c4ee0f11237e5e5127ed67a64e43a2f2ebef2d6bc26bb384082073404b8fb6bb42e5a0c9bb7d6253d9d72084bed3991df1efd25512e7f713e796043493d38e72c5281e78f6b364eacac6fa907ecba164000000000000000000000000000000000000000000000000000000000000001f043493d38e72c5281e78f6b364eacac6fa907ecba1640000000000000000000000000000000000000000000000000000000000000012082010db8a472df5096168436e756dbf37edce306a01f4fa7a889f7ad8195e1154a9043493d38e72c5281e78f6b364eacac6fa907ecba1640000000000000000000000000000000000000000000000000000000000000006")
+ matches := mf2.FindLongestMatches(data)
+ for _, m := range matches {
+ fmt.Printf("%+v, match: [%x]\n", m, data[m.Start:m.End])
+ }
+ if len(matches) != 88 {
+ t.Errorf("expected matches: %d, got %d", 88, len(matches))
+ }
+}
+
+func TestFindMatches6(t *testing.T) {
+ var pt PatriciaTree
+ v := []byte{1}
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("73ffffffffffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("600160a060020a03"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffff16815260200191505060405180910390a1"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffff1681"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffff168156"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffff1681565b"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffff1681565b60"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffffffff168152602001908152602001"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffffff16815260200190815260200160"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffff1681526020019081526020016000"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffff168152602001908152602001600020"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffff16815260200190815260200160002060"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffff1681526020019081526020016000206000"), v)
+ pt.Insert(decodeHex("73ffffffffffffffffffffffffffffffffffffffff1681526020019081526020"), v)
+ pt.Insert(decodeHex("81526020019081526020016000206000"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffff168152"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffff16815260"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffff1681526020"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffff168152602001"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffff1681565b60"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffff168152602001"), v)
+ pt.Insert(decodeHex("526020019081526020016000206000"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffff1682"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffff1681565b60"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffff168152602001"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffff1682"), v)
+ pt.Insert(decodeHex("6020019081526020016000206000"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffff1681565b60"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffff168152602001"), v)
+ pt.Insert(decodeHex("8152602001908152602001600020600050"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffff1682"), v)
+ pt.Insert(decodeHex("ffffffffffffffff168152602001"), v)
+ pt.Insert(decodeHex("80806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f16801561"), v)
+ pt.Insert(decodeHex("60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f16"), v)
+ pt.Insert(decodeHex("5b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f"), v)
+ pt.Insert(decodeHex("5180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015"), v)
+ pt.Insert(decodeHex("405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680"), v)
+ pt.Insert(decodeHex("20019081526020016000206000"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffff1681565b60"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffff168152602001"), v)
+ pt.Insert(decodeHex("52602001908152602001600020600050"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffff1682"), v)
+ pt.Insert(decodeHex("ffffffffffffffff1681565b60"), v)
+ pt.Insert(decodeHex("ffffffffffffff168152602001"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffff1681565b60"), v)
+ pt.Insert(decodeHex("019081526020016000206000"), v)
+ pt.Insert(decodeHex("ffffffffffffffffff168152602001"), v)
+ pt.Insert(decodeHex("1681526020019081526020016000206000"), v)
+ pt.Insert(decodeHex("602001908152602001600020600050"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffff1682"), v)
+ pt.Insert(decodeHex("ffffffffffffff1681565b60"), v)
+ pt.Insert(decodeHex("ffffffffffffffffff1681565b60"), v)
+ pt.Insert(decodeHex("ffffffffffff168152602001"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffff168152602001908152602001600020600050"), v)
+ pt.Insert(decodeHex("9081526020016000206000"), v)
+ pt.Insert(decodeHex("5050604051849350600080"), v)
+ pt.Insert(decodeHex("2001908152602001600020600050"), v)
+ pt.Insert(decodeHex("505060405180910390f35b"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffff1682"), v)
+ pt.Insert(decodeHex("6000506000600060005054815260200190815260200160002060006101000a81"), v)
+ pt.Insert(decodeHex("00506000600060005054815260200190815260200160002060006101000a8154"), v)
+ pt.Insert(decodeHex("ffffffffffff1681565b60"), v)
+ pt.Insert(decodeHex("ffffff16815260200191505060405180910390a1"), v)
+ pt.Insert(decodeHex("ffffffffff168152602001"), v)
+ pt.Insert(decodeHex("8281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156100a4578082"), v)
+ pt.Insert(decodeHex("81038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156100a457808203"), v)
+ pt.Insert(decodeHex("6020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156100a4"), v)
+ pt.Insert(decodeHex("60005060003373ffffffffffffffffffffffffffffffffffffffff1681526020"), v)
+ pt.Insert(decodeHex("60003373ffffffffffffffffffffffffffffffffffffffff1681526020019081"), v)
+ pt.Insert(decodeHex("5060003373ffffffffffffffffffffffffffffffffffffffff16815260200190"), v)
+ pt.Insert(decodeHex("3373ffffffffffffffffffffffffffffffffffffffff16815260200190815260"), v)
+ pt.Insert(decodeHex("20018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156100a457"), v)
+ pt.Insert(decodeHex("038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156100a45780820380"), v)
+ pt.Insert(decodeHex("018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156100a45780"), v)
+ pt.Insert(decodeHex("005060003373ffffffffffffffffffffffffffffffffffffffff168152602001"), v)
+ pt.Insert(decodeHex("003373ffffffffffffffffffffffffffffffffffffffff168152602001908152"), v)
+ pt.Insert(decodeHex("01908152602001600020600050"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffff16815260200190"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffff1681526020019081"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffff168152602001908152"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffff16815260200190815260"), v)
+ pt.Insert(decodeHex("ffffffffffffffffff1681526020019081526020"), v)
+ pt.Insert(decodeHex("ffffffffffffffff168152602001908152602001"), v)
+ pt.Insert(decodeHex("ffffffffffffff16815260200190815260200160"), v)
+ pt.Insert(decodeHex("ffffffffffff1681526020019081526020016000"), v)
+ pt.Insert(decodeHex("ffffffffff168152602001908152602001600020"), v)
+ pt.Insert(decodeHex("ffffffff16815260200190815260200160002060"), v)
+ pt.Insert(decodeHex("ffffff1681526020019081526020016000206000"), v)
+ pt.Insert(decodeHex("81526020016000206000"), v)
+ pt.Insert(decodeHex("50604051849350600080"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffff1682"), v)
+ pt.Insert(decodeHex("5060405180910390f35b"), v)
+ pt.Insert(decodeHex("168152602001908152602001600020600050"), v)
+ pt.Insert(decodeHex("ffff16815260200191505060405180910390a1"), v)
+ pt.Insert(decodeHex("67ffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffff1682"), v)
+ pt.Insert(decodeHex("ffff1681526020019081526020016000206000"), v)
+ pt.Insert(decodeHex("6040518082815260200191505060405180910390"), v)
+ pt.Insert(decodeHex("ffffffffff1681565b60"), v)
+ pt.Insert(decodeHex("908152602001600020600050"), v)
+ pt.Insert(decodeHex("8281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390"), v)
+ pt.Insert(decodeHex("81038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f3"), v)
+ pt.Insert(decodeHex("80806020018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f150905001925050506040"), v)
+ pt.Insert(decodeHex("806020018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f15090500192505050604051"), v)
+ pt.Insert(decodeHex("60405180806020018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f150905001925050"), v)
+ pt.Insert(decodeHex("6020018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180"), v)
+ pt.Insert(decodeHex("5b60405180806020018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250"), v)
+ pt.Insert(decodeHex("5180806020018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060"), v)
+ pt.Insert(decodeHex("405180806020018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f15090500192505050"), v)
+ pt.Insert(decodeHex("20018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f150905001925050506040518091"), v)
+ pt.Insert(decodeHex("038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b"), v)
+ pt.Insert(decodeHex("018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f15090500192505050604051809103"), v)
+ pt.Insert(decodeHex("ffffffff168152602001"), v)
+ pt.Insert(decodeHex("5b60405180828152602001915050604051809103"), v)
+ pt.Insert(decodeHex("565b604051808281526020019150506040518091"), v)
+ pt.Insert(decodeHex("518082815260200191505060405180910390f35b"), v)
+ pt.Insert(decodeHex("40518082815260200191505060405180910390f3"), v)
+ pt.Insert(decodeHex("ff16815260200191505060405180910390a1"), v)
+ pt.Insert(decodeHex("ffffffffffffffffff1682"), v)
+ pt.Insert(decodeHex("808281526102cb94909290828280156100d757"), v)
+ pt.Insert(decodeHex("ff1681526020019081526020016000206000"), v)
+ pt.Insert(decodeHex("526020016000206000"), v)
+ pt.Insert(decodeHex("f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f15090509081019060"), v)
+ pt.Insert(decodeHex("a060020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff191681556001810180548482559084528284209192849261"), v)
+ pt.Insert(decodeHex("90f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190"), v)
+ pt.Insert(decodeHex("8252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60"), v)
+ pt.Insert(decodeHex("60a060020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff1916815560018101805484825590845282842091928492"), v)
+ pt.Insert(decodeHex("602081815260408083206002548452825282208354815467ffffffffffffffff191667ffffffffffffffff919091161781556001848101805491830180548382"), v)
+ pt.Insert(decodeHex("600435600160a060020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff191681556001810180548482559084528284"), v)
+ pt.Insert(decodeHex("600160a060020a0333168252602082815260408084208585528252928390206001018054601f810183900490920260a090810190945260808281529293909190"), v)
+ pt.Insert(decodeHex("600160a060020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff191681556001810180548482559084528284209192"), v)
+ pt.Insert(decodeHex("60009054906101000a900473ffffffffffffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("52838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b6040"), v)
+ pt.Insert(decodeHex("52600160a060020a0333168252602082815260408084208585528252928390206001018054601f810183900490920260a0908101909452608082815292939091"), v)
+ pt.Insert(decodeHex("526000828152604090208054829081101561000257506000908152602090200154905081565b610315600435600160a060020a03331660009081526020818152"), v)
+ pt.Insert(decodeHex("35600160a060020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff1916815560018101805484825590845282842091"), v)
+ pt.Insert(decodeHex("2081815260408083206002548452825282208354815467ffffffffffffffff191667ffffffffffffffff91909116178155600184810180549183018054838255"), v)
+ pt.Insert(decodeHex("0435600160a060020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff19168155600181018054848255908452828420"), v)
+ pt.Insert(decodeHex("0160a060020a0333168252602082815260408084208585528252928390206001018054601f810183900490920260a09081019094526080828152929390919082"), v)
+ pt.Insert(decodeHex("0160a060020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff19168155600181018054848255908452828420919284"), v)
+ pt.Insert(decodeHex("009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("60405180910390f35b"), v)
+ pt.Insert(decodeHex("8082815260200191505060405180910390f35b"), v)
+ pt.Insert(decodeHex("ffffffffffffff1682"), v)
+ pt.Insert(decodeHex("600160a060020a033316600090815261"), v)
+ pt.Insert(decodeHex("8152602001600020600050"), v)
+ pt.Insert(decodeHex("90600052602060002090"), v)
+ pt.Insert(decodeHex("928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f8101859004909402850160405260"), v)
+ pt.Insert(decodeHex("836020036101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de5782"), v)
+ pt.Insert(decodeHex("822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f8101859004909402850160405260608481526100"), v)
+ pt.Insert(decodeHex("820380516001836020036101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080"), v)
+ pt.Insert(decodeHex("818152928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f8101859004909402850160"), v)
+ pt.Insert(decodeHex("8152928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f810185900490940285016040"), v)
+ pt.Insert(decodeHex("8152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f810185900490940285016040526060"), v)
+ pt.Insert(decodeHex("815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020"), v)
+ pt.Insert(decodeHex("80822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f81018590049094028501604052606084815261"), v)
+ pt.Insert(decodeHex("80820380516001836020036101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c0908490"), v)
+ pt.Insert(decodeHex("80519060200190808383829060006004602084601f0104600302600f01f15090"), v)
+ pt.Insert(decodeHex("80516001836020036101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c0908490801561"), v)
+ pt.Insert(decodeHex("6101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de578201919060"), v)
+ pt.Insert(decodeHex("604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f8101859004909402850160405260608481"), v)
+ pt.Insert(decodeHex("60243560006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f81018590"), v)
+ pt.Insert(decodeHex("6020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f810185900490940285"), v)
+ pt.Insert(decodeHex("6020036101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de578201"), v)
+ pt.Insert(decodeHex("60200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b"), v)
+ pt.Insert(decodeHex("60043560243560006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f81"), v)
+ pt.Insert(decodeHex("6001836020036101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de"), v)
+ pt.Insert(decodeHex("60006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f81018590049094"), v)
+ pt.Insert(decodeHex("5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c09084"), v)
+ pt.Insert(decodeHex("52928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f81018590049094028501604052"), v)
+ pt.Insert(decodeHex("52604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f81018590049094028501604052606084"), v)
+ pt.Insert(decodeHex("5260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de5782019190600052602060002090"), v)
+ pt.Insert(decodeHex("516001836020036101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103"), v)
+ pt.Insert(decodeHex("4080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f810185900490940285016040526060848152"), v)
+ pt.Insert(decodeHex("3560243560006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f810185"), v)
+ pt.Insert(decodeHex("3560006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f810185900490"), v)
+ pt.Insert(decodeHex("243560006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f8101859004"), v)
+ pt.Insert(decodeHex("2090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f81018590049094028501604052606084815261006c"), v)
+ pt.Insert(decodeHex("20818152928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f81018590049094028501"), v)
+ pt.Insert(decodeHex("20036101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191"), v)
+ pt.Insert(decodeHex("200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b81"), v)
+ pt.Insert(decodeHex("1916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de5782019190600052602060"), v)
+ pt.Insert(decodeHex("16815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de578201919060005260206000"), v)
+ pt.Insert(decodeHex("0a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de578201919060005260"), v)
+ pt.Insert(decodeHex("043560243560006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f8101"), v)
+ pt.Insert(decodeHex("0380516001836020036101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c09084908015"), v)
+ pt.Insert(decodeHex("036101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de5782019190"), v)
+ pt.Insert(decodeHex("031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020"), v)
+ pt.Insert(decodeHex("0191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b8154"), v)
+ pt.Insert(decodeHex("01836020036101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de57"), v)
+ pt.Insert(decodeHex("01000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000"), v)
+ pt.Insert(decodeHex("006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f8101859004909402"), v)
+ pt.Insert(decodeHex("000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de5782019190600052"), v)
+ pt.Insert(decodeHex("ffffffff1681565b60"), v)
+ pt.Insert(decodeHex("60006005600a4306101561032f57600190506103"), v)
+ pt.Insert(decodeHex("600160a060020a033316600090815260"), v)
+ pt.Insert(decodeHex("ffffff168152602001"), v)
+ pt.Insert(decodeHex("82815260200191505060405180910390f35b"), v)
+ pt.Insert(decodeHex("9060006004602084601f0104600302600f01f150"), v)
+ pt.Insert(decodeHex("8383829060006004602084601f0104600302600f"), v)
+ pt.Insert(decodeHex("83829060006004602084601f0104600302600f01"), v)
+ pt.Insert(decodeHex("829060006004602084601f0104600302600f01f1"), v)
+ pt.Insert(decodeHex("808383829060006004602084601f010460030260"), v)
+ pt.Insert(decodeHex("60006004602084601f0104600302600f01f15090"), v)
+ pt.Insert(decodeHex("815260200191505060405180910390f35b"), v)
+ pt.Insert(decodeHex("0000000000000000000000000000"), v)
+ pt.Insert(decodeHex("8352600160a060020a0333168152808252838120858252825283902060010180548451601f820184900484028101840190955280855292939290918301828280"), v)
+ pt.Insert(decodeHex("83526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f82018490048402810184019095528085529293929091"), v)
+ pt.Insert(decodeHex("81810183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f82018490048402810184019095528085529293"), v)
+ pt.Insert(decodeHex("810183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f8201849004840281018401909552808552929392"), v)
+ pt.Insert(decodeHex("808352600160a060020a0333168152808252838120858252825283902060010180548451601f8201849004840281018401909552808552929392909183018282"), v)
+ pt.Insert(decodeHex("8051602081810183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f820184900484028101840190955280"), v)
+ pt.Insert(decodeHex("60408051602081810183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f82018490048402810184019095"), v)
+ pt.Insert(decodeHex("602081810183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f8201849004840281018401909552808552"), v)
+ pt.Insert(decodeHex("60043560408051602081810183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f82018490048402810184"), v)
+ pt.Insert(decodeHex("600160a060020a0333168152808252838120858252825283902060010180548451601f8201849004840281018401909552808552929392909183018282801561"), v)
+ pt.Insert(decodeHex("6000808352600160a060020a0333168152808252838120858252825283902060010180548451601f820184900484028101840190955280855292939290918301"), v)
+ pt.Insert(decodeHex("6000526020600020"), v)
+ pt.Insert(decodeHex("52600160a060020a0333168152808252838120858252825283902060010180548451601f82018490048402810184019095528085529293929091830182828015"), v)
+ pt.Insert(decodeHex("526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f8201849004840281018401909552808552929392909183"), v)
+ pt.Insert(decodeHex("51602081810183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f82018490048402810184019095528085"), v)
+ pt.Insert(decodeHex("408051602081810183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f8201849004840281018401909552"), v)
+ pt.Insert(decodeHex("3560408051602081810183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f820184900484028101840190"), v)
+ pt.Insert(decodeHex("2081810183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f820184900484028101840190955280855292"), v)
+ pt.Insert(decodeHex("043560408051602081810183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f8201849004840281018401"), v)
+ pt.Insert(decodeHex("0183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f820184900484028101840190955280855292939290"), v)
+ pt.Insert(decodeHex("0160a060020a0333168152808252838120858252825283902060010180548451601f820184900484028101840190955280855292939290918301828280156101"), v)
+ pt.Insert(decodeHex("00808352600160a060020a0333168152808252838120858252825283902060010180548451601f82018490048402810184019095528085529293929091830182"), v)
+ pt.Insert(decodeHex("0160a060020a033316600090815261"), v)
+ pt.Insert(decodeHex("006005600a4306101561032f57600190506103"), v)
+ pt.Insert(decodeHex("6020016000206000"), v)
+ pt.Insert(decodeHex("000000000000000000"), v)
+ pt.Insert(decodeHex("7c01000000000000000000000000000000000000000000000000000000009004"), v)
+ pt.Insert(decodeHex("6000357c01000000000000000000000000000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("357c010000000000000000000000000000000000000000000000000000000090"), v)
+ pt.Insert(decodeHex("0100000000000000000000000000000000000000000000000000000000900480"), v)
+ pt.Insert(decodeHex("00357c0100000000000000000000000000000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("0000000000000000000000000000000000000000000000000000000090048063"), v)
+ pt.Insert(decodeHex("0160a060020a033316600090815260"), v)
+ pt.Insert(decodeHex("405180910390f35b"), v)
+ pt.Insert(decodeHex("006004602084601f0104600302600f01f15090"), v)
+ pt.Insert(decodeHex("52602001600020600050"), v)
+ pt.Insert(decodeHex("ffffffffffff1682"), v)
+ pt.Insert(decodeHex("600460209081526040808320849055600590"), v)
+ pt.Insert(decodeHex("928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f810185900485028601850190"), v)
+ pt.Insert(decodeHex("836020036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff1681526020018060200182810382528381"), v)
+ pt.Insert(decodeHex("822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f810185900485028601850190965285855261"), v)
+ pt.Insert(decodeHex("820380516001836020036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff1681526020018060200182"), v)
+ pt.Insert(decodeHex("818152928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f810185900485028601"), v)
+ pt.Insert(decodeHex("8152928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f81018590048502860185"), v)
+ pt.Insert(decodeHex("8152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f81018590048502860185019096"), v)
+ pt.Insert(decodeHex("815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff16815260200180602001828103825283818154815260200191508054"), v)
+ pt.Insert(decodeHex("80822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f8101859004850286018501909652858552"), v)
+ pt.Insert(decodeHex("80820380516001836020036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff16815260200180602001"), v)
+ pt.Insert(decodeHex("806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156104"), v)
+ pt.Insert(decodeHex("80516001836020036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff16815260200180602001828103"), v)
+ pt.Insert(decodeHex("6101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff168152602001806020018281038252838181548152"), v)
+ pt.Insert(decodeHex("604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f810185900485028601850190965285"), v)
+ pt.Insert(decodeHex("60243560006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f8101"), v)
+ pt.Insert(decodeHex("6020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f81018590048502"), v)
+ pt.Insert(decodeHex("6020036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff168152602001806020018281038252838181"), v)
+ pt.Insert(decodeHex("60200191505b509250505060405180910390f35b604051808367ffffffffffffffff168152602001806020018281038252838181548152602001915080548015"), v)
+ pt.Insert(decodeHex("60043560243560006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b6040805160206024803560048181013560"), v)
+ pt.Insert(decodeHex("6001836020036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff168152602001806020018281038252"), v)
+ pt.Insert(decodeHex("60006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f8101859004"), v)
+ pt.Insert(decodeHex("5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff168152602001806020"), v)
+ pt.Insert(decodeHex("52928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f8101859004850286018501"), v)
+ pt.Insert(decodeHex("52604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f8101859004850286018501909652"), v)
+ pt.Insert(decodeHex("5260200191505b509250505060405180910390f35b604051808367ffffffffffffffff1681526020018060200182810382528381815481526020019150805480"), v)
+ pt.Insert(decodeHex("5260200191505060405180910390f35b"), v)
+ pt.Insert(decodeHex("516001836020036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff1681526020018060200182810382"), v)
+ pt.Insert(decodeHex("4080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f81018590048502860185019096528585"), v)
+ pt.Insert(decodeHex("3560243560006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f81"), v)
+ pt.Insert(decodeHex("3560006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f81018590"), v)
+ pt.Insert(decodeHex("243560006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f810185"), v)
+ pt.Insert(decodeHex("2090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f81018590048502860185019096528585526100"), v)
+ pt.Insert(decodeHex("20818152928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f8101859004850286"), v)
+ pt.Insert(decodeHex("20036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff16815260200180602001828103825283818154"), v)
+ pt.Insert(decodeHex("200191505b509250505060405180910390f35b604051808367ffffffffffffffff16815260200180602001828103825283818154815260200191508054801561"), v)
+ pt.Insert(decodeHex("1916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff1681526020018060200182810382528381815481526020019150"), v)
+ pt.Insert(decodeHex("16815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff168152602001806020018281038252838181548152602001915080"), v)
+ pt.Insert(decodeHex("0a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff168152602001806020018281038252838181548152602001"), v)
+ pt.Insert(decodeHex("043560243560006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f"), v)
+ pt.Insert(decodeHex("0380516001836020036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff168152602001806020018281"), v)
+ pt.Insert(decodeHex("036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff1681526020018060200182810382528381815481"), v)
+ pt.Insert(decodeHex("031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff16815260200180602001828103825283818154815260200191"), v)
+ pt.Insert(decodeHex("0191505b509250505060405180910390f35b604051808367ffffffffffffffff1681526020018060200182810382528381815481526020019150805480156104"), v)
+ pt.Insert(decodeHex("01836020036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff16815260200180602001828103825283"), v)
+ pt.Insert(decodeHex("01000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff16815260200180602001828103825283818154815260"), v)
+ pt.Insert(decodeHex("006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f810185900485"), v)
+ pt.Insert(decodeHex("000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff1681526020018060200182810382528381815481526020"), v)
+ pt.Insert(decodeHex("600052602060002090"), v)
+ pt.Insert(decodeHex("820191906000526020600020905b81"), v)
+ pt.Insert(decodeHex("00000000000000000000000000"), v)
+ pt.Insert(decodeHex("ffffff1681565b60"), v)
+ pt.Insert(decodeHex("60a060020a033316600090815261"), v)
+ pt.Insert(decodeHex("ffff168152602001"), v)
+ pt.Insert(decodeHex("0000000000000000"), v)
+ pt.Insert(decodeHex("6004602084601f0104600302600f01f15090"), v)
+ pt.Insert(decodeHex("000000000000000000000000"), v)
+ pt.Insert(decodeHex("60a060020a033316600090815260"), v)
+ pt.Insert(decodeHex("600160a060020a033316"), v)
+ pt.Insert(decodeHex("0460209081526040808320849055600590"), v)
+ pt.Insert(decodeHex("f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b8154815290600101906020018083116103c1"), v)
+ pt.Insert(decodeHex("9250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b81548152906001"), v)
+ pt.Insert(decodeHex("910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b815481529060010190602001808311"), v)
+ pt.Insert(decodeHex("90f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b8154815290600101906020018083116103"), v)
+ pt.Insert(decodeHex("9081900360200190f35b60408051600160a060020a03929092168252519081900360200190f35b60408051918252519081900360200190f35b50815b60025467"), v)
+ pt.Insert(decodeHex("900360200190f35b60408051600160a060020a03929092168252519081900360200190f35b60408051918252519081900360200190f35b50815b60025467ffff"), v)
+ pt.Insert(decodeHex("8252519081900360200190f35b60408051600160a060020a03929092168252519081900360200190f35b60408051918252519081900360200190f35b50815b60"), v)
+ pt.Insert(decodeHex("81900360200190f35b60408051600160a060020a03929092168252519081900360200190f35b60408051918252519081900360200190f35b50815b60025467ff"), v)
+ pt.Insert(decodeHex("80910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b8154815290600101906020018083"), v)
+ pt.Insert(decodeHex("606060405260e060020a60003504630a3b0a4f81146100315780634e71d92d14"), v)
+ pt.Insert(decodeHex("6060405260e060020a60003504630a3b0a4f81146100315780634e71d92d1461"), v)
+ pt.Insert(decodeHex("60405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b8154815290600101906020"), v)
+ pt.Insert(decodeHex("60200190f35b60408051600160a060020a03929092168252519081900360200190f35b60408051918252519081900360200190f35b50815b60025467ffffffff"), v)
+ pt.Insert(decodeHex("52519081900360200190f35b60408051600160a060020a03929092168252519081900360200190f35b60408051918252519081900360200190f35b50815b6002"), v)
+ pt.Insert(decodeHex("519081900360200190f35b60408051600160a060020a03929092168252519081900360200190f35b60408051918252519081900360200190f35b50815b600254"), v)
+ pt.Insert(decodeHex("5180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b81548152906001019060200180"), v)
+ pt.Insert(decodeHex("5060405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b81548152906001019060"), v)
+ pt.Insert(decodeHex("505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b815481529060010190"), v)
+ pt.Insert(decodeHex("50505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b8154815290600101"), v)
+ pt.Insert(decodeHex("405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b815481529060010190602001"), v)
+ pt.Insert(decodeHex("200190f35b60408051600160a060020a03929092168252519081900360200190f35b60408051918252519081900360200190f35b50815b60025467ffffffffff"), v)
+ pt.Insert(decodeHex("0390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b81548152906001019060200180831161"), v)
+ pt.Insert(decodeHex("0360200190f35b60408051600160a060020a03929092168252519081900360200190f35b60408051918252519081900360200190f35b50815b60025467ffffff"), v)
+ pt.Insert(decodeHex("0190f35b60408051600160a060020a03929092168252519081900360200190f35b60408051918252519081900360200190f35b50815b60025467ffffffffffff"), v)
+ pt.Insert(decodeHex("91906000526020600020905b8154815290600101"), v)
+ pt.Insert(decodeHex("906000526020600020905b815481529060010190"), v)
+ pt.Insert(decodeHex("820191906000526020600020905b815481529060"), v)
+ pt.Insert(decodeHex("6020600020905b81548152906001019060200180"), v)
+ pt.Insert(decodeHex("60200191505060405180910390f35b"), v)
+ pt.Insert(decodeHex("6000526020600020905b81548152906001019060"), v)
+ pt.Insert(decodeHex("600020905b815481529060010190602001808311"), v)
+ pt.Insert(decodeHex("57820191906000526020600020905b8154815290"), v)
+ pt.Insert(decodeHex("526020600020905b815481529060010190602001"), v)
+ pt.Insert(decodeHex("20600020905b8154815290600101906020018083"), v)
+ pt.Insert(decodeHex("0191906000526020600020905b81548152906001"), v)
+ pt.Insert(decodeHex("00526020600020905b8154815290600101906020"), v)
+ pt.Insert(decodeHex("0020905b81548152906001019060200180831161"), v)
+ pt.Insert(decodeHex("50505060405180910390"), v)
+ pt.Insert(decodeHex("57823582600050559160200191906001019061"), v)
+ pt.Insert(decodeHex("0191906000526020600020905b81"), v)
+ pt.Insert(decodeHex("602001600020600050"), v)
+ pt.Insert(decodeHex("a060020a033316600090815261"), v)
+ pt.Insert(decodeHex("04602084601f0104600302600f01f15090"), v)
+ pt.Insert(decodeHex("ffff168152602001908152602001600020600050"), v)
+ pt.Insert(decodeHex("006004602084601f0104600302600f01f1509050"), v)
+ pt.Insert(decodeHex("906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b"), v)
+ pt.Insert(decodeHex("9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681"), v)
+ pt.Insert(decodeHex("81815233600160a060020a031682526001602090815260409283902080549182"), v)
+ pt.Insert(decodeHex("6101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60"), v)
+ pt.Insert(decodeHex("60606040526000357c0100000000000000000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("606040526000357c010000000000000000000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("6040526000357c01000000000000000000000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("565b60405180806020018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f15090500192"), v)
+ pt.Insert(decodeHex("54906101000a900473ffffffffffffffffffffffffffffffffffffffff168156"), v)
+ pt.Insert(decodeHex("526000357c010000000000000000000000000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("40526000357c0100000000000000000000000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("018367ffffffffffffffff16815260200150600360005060003373ffffffffff"), v)
+ pt.Insert(decodeHex("20905b81548152906001019060200180831161"), v)
+ pt.Insert(decodeHex("0052602060002090"), v)
+ pt.Insert(decodeHex("a060020a033316600090815260"), v)
+ pt.Insert(decodeHex("9081101561000257"), v)
+ pt.Insert(decodeHex("0000000000000000000000"), v)
+ pt.Insert(decodeHex("200191505060405180910390f35b"), v)
+ pt.Insert(decodeHex("ff168152602001908152602001600020600050"), v)
+ pt.Insert(decodeHex("6004602084601f0104600302600f01f1509050"), v)
+ pt.Insert(decodeHex("0160a060020a033316"), v)
+ pt.Insert(decodeHex("91906000526020600020905b81"), v)
+ pt.Insert(decodeHex("602084601f0104600302600f01f15090"), v)
+ pt.Insert(decodeHex("546802b5e3af16b187ff"), v)
+ pt.Insert(decodeHex("905b81548152906001019060200180831161"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("ffffffffffffffff169060010182565b60408051602060248035600481810135601f810185900485028601850190965285855261007795903594604494909392"), v)
+ pt.Insert(decodeHex("ffffffffffffff169060010182565b60408051602060248035600481810135601f81018590048502860185019096528585526100779590359460449490939290"), v)
+ pt.Insert(decodeHex("ffffffffffff169060010182565b60408051602060248035600481810135601f8101859004850286018501909652858552610077959035946044949093929092"), v)
+ pt.Insert(decodeHex("ffffffffff169060010182565b60408051602060248035600481810135601f810185900485028601850190965285855261007795903594604494909392909201"), v)
+ pt.Insert(decodeHex("ffffffff169060010182565b60408051602060248035600481810135601f81018590048502860185019096528585526100779590359460449490939290920191"), v)
+ pt.Insert(decodeHex("ffffff169060010182565b60408051602060248035600481810135601f8101859004850286018501909652858552610077959035946044949093929092019181"), v)
+ pt.Insert(decodeHex("ffff169060010182565b60408051602060248035600481810135601f810185900485028601850190965285855261007795903594604494909392909201918190"), v)
+ pt.Insert(decodeHex("ff169060010182565b60408051602060248035600481810135601f81018590048502860185019096528585526100779590359460449490939290920191819084"), v)
+ pt.Insert(decodeHex("f35b60408051918252519081900360200190f35b6040518080602001828103825283818151815260200191508051906020019080838382906000600460208460"), v)
+ pt.Insert(decodeHex("f1509050019250505060405180910390f35b60408051918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060"), v)
+ pt.Insert(decodeHex("935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f810185900485028601850190965285855261007795"), v)
+ pt.Insert(decodeHex("9250505060405180910390f35b60408051918252519081900360200190f35b604051808060200182810382528381815181526020019150805190602001908083"), v)
+ pt.Insert(decodeHex("918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302"), v)
+ pt.Insert(decodeHex("915080519060200190808383829060006004602084601f0104600302600f01f1"), v)
+ pt.Insert(decodeHex("915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b604080519182525190819003"), v)
+ pt.Insert(decodeHex("910390f35b60408051918252519081900360200190f35b6040518080602001828103825283818151815260200191508051906020019080838382906000600460"), v)
+ pt.Insert(decodeHex("90f35b60408051918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084"), v)
+ pt.Insert(decodeHex("90935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f8101859004850286018501909652858552610077"), v)
+ pt.Insert(decodeHex("9081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f1"), v)
+ pt.Insert(decodeHex("90815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f8101859004850286018501909652858552610077959035"), v)
+ pt.Insert(decodeHex("90808383829060006004602084601f0104600302600f01f15090509081019060"), v)
+ pt.Insert(decodeHex("906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffff"), v)
+ pt.Insert(decodeHex("90602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b6040"), v)
+ pt.Insert(decodeHex("9060200190808383829060006004602084601f0104600302600f01f150905090"), v)
+ pt.Insert(decodeHex("9060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190"), v)
+ pt.Insert(decodeHex("9060010182565b60408051602060248035600481810135601f810185900485028601850190965285855261007795903594604494909392909201918190840183"), v)
+ pt.Insert(decodeHex("9060006004602084601f0104600302600f01f150905090810190601f16801561"), v)
+ pt.Insert(decodeHex("9060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b60405180806020018281"), v)
+ pt.Insert(decodeHex("9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673"), v)
+ pt.Insert(decodeHex("9050019250505060405180910390f35b60408051918252519081900360200190f35b604051808060200182810382528381815181526020019150805190602001"), v)
+ pt.Insert(decodeHex("900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffff"), v)
+ pt.Insert(decodeHex("900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f15090"), v)
+ pt.Insert(decodeHex("84601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b6040518080602001828103825283818151"), v)
+ pt.Insert(decodeHex("8383829060006004602084601f0104600302600f01f150905090810190601f16"), v)
+ pt.Insert(decodeHex("8383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b60405180806020"), v)
+ pt.Insert(decodeHex("83829060006004602084601f0104600302600f01f150905090810190601f1680"), v)
+ pt.Insert(decodeHex("83829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b6040518080602001"), v)
+ pt.Insert(decodeHex("838181518152602001915080519060200190808383829060006004602084601f"), v)
+ pt.Insert(decodeHex("838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b604080"), v)
+ pt.Insert(decodeHex("836020036101000a031916815260200191505b509250505060405180910390f3"), v)
+ pt.Insert(decodeHex("829060006004602084601f0104600302600f01f150905090810190601f168015"), v)
+ pt.Insert(decodeHex("829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b604051808060200182"), v)
+ pt.Insert(decodeHex("8281038252838181518152602001915080519060200190808383829060006004"), v)
+ pt.Insert(decodeHex("82565b60408051602060248035600481810135601f81018590048502860185019096528585526100779590359460449490939290920191819084018382808284"), v)
+ pt.Insert(decodeHex("8252838181518152602001915080519060200190808383829060006004602084"), v)
+ pt.Insert(decodeHex("8252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f010460030260"), v)
+ pt.Insert(decodeHex("820380516001836020036101000a031916815260200191505b50925050506040"), v)
+ pt.Insert(decodeHex("81900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150"), v)
+ pt.Insert(decodeHex("8181518152602001915080519060200190808383829060006004602084601f01"), v)
+ pt.Insert(decodeHex("8181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051"), v)
+ pt.Insert(decodeHex("8152602001915080519060200190808383829060006004602084601f01046003"), v)
+ pt.Insert(decodeHex("8152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252"), v)
+ pt.Insert(decodeHex("815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f810185900485028601850190965285855261007795903594"), v)
+ pt.Insert(decodeHex("81518152602001915080519060200190808383829060006004602084601f0104"), v)
+ pt.Insert(decodeHex("81518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b6040805191"), v)
+ pt.Insert(decodeHex("8103825283818151815260200191508051906020019080838382906000600460"), v)
+ pt.Insert(decodeHex("80910390f35b60408051918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004"), v)
+ pt.Insert(decodeHex("808383829060006004602084601f0104600302600f01f150905090810190601f"), v)
+ pt.Insert(decodeHex("808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b604051808060"), v)
+ pt.Insert(decodeHex("80820380516001836020036101000a031916815260200191505b509250505060"), v)
+ pt.Insert(decodeHex("805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f810185900485028601850190965285855261007795903594604494"), v)
+ pt.Insert(decodeHex("8051918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f010460"), v)
+ pt.Insert(decodeHex("80519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b6040805191825251908190036020"), v)
+ pt.Insert(decodeHex("8051602060248035600481810135601f810185900485028601850190965285855261007795903594604494909392909201918190840183828082843750949550"), v)
+ pt.Insert(decodeHex("80516001836020036101000a031916815260200191505b509250505060405180"), v)
+ pt.Insert(decodeHex("8035600481810135601f810185900485028601850190965285855261007795903594604494909392909201918190840183828082843750949550505050505060"), v)
+ pt.Insert(decodeHex("73ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffff"), v)
+ pt.Insert(decodeHex("6c600435600160a060020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff1916815560018101805484825590845282"), v)
+ pt.Insert(decodeHex("67ffffffffffffffff169060010182565b60408051602060248035600481810135601f8101859004850286018501909652858552610077959035946044949093"), v)
+ pt.Insert(decodeHex("6101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffff"), v)
+ pt.Insert(decodeHex("61006c600435600160a060020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff191681556001810180548482559084"), v)
+ pt.Insert(decodeHex("60408051918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f01"), v)
+ pt.Insert(decodeHex("60408051602060248035600481810135601f81018590048502860185019096528585526100779590359460449490939290920191819084018382808284375094"), v)
+ pt.Insert(decodeHex("60405180910390f35b60408051918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060"), v)
+ pt.Insert(decodeHex("60248035600481810135601f81018590048502860185019096528585526100779590359460449490939290920191819084018382808284375094955050505050"), v)
+ pt.Insert(decodeHex("602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b604051808060200182810382528381"), v)
+ pt.Insert(decodeHex("602060248035600481810135601f8101859004850286018501909652858552610077959035946044949093929092019181908401838280828437509495505050"), v)
+ pt.Insert(decodeHex("6020036101000a031916815260200191505b509250505060405180910390f35b"), v)
+ pt.Insert(decodeHex("602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b604051"), v)
+ pt.Insert(decodeHex("602001915080519060200190808383829060006004602084601f010460030260"), v)
+ pt.Insert(decodeHex("602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b604080519182525190"), v)
+ pt.Insert(decodeHex("60200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090"), v)
+ pt.Insert(decodeHex("60200190808383829060006004602084601f0104600302600f01f15090509081"), v)
+ pt.Insert(decodeHex("60200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f3"), v)
+ pt.Insert(decodeHex("6020018281038252838181518152602001915080519060200190808383829060"), v)
+ pt.Insert(decodeHex("601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b604051808060200182810382528381815181"), v)
+ pt.Insert(decodeHex("600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b60405180806020018281038252838181518152602001915080"), v)
+ pt.Insert(decodeHex("6004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b60405180806020018281038252"), v)
+ pt.Insert(decodeHex("600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b60405180806020018281038252838181518152602001"), v)
+ pt.Insert(decodeHex("6001836020036101000a031916815260200191505b5092505050604051809103"), v)
+ pt.Insert(decodeHex("60010182565b60408051602060248035600481810135601f81018590048502860185019096528585526100779590359460449490939290920191819084018382"), v)
+ pt.Insert(decodeHex("60006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b6040518080602001828103"), v)
+ pt.Insert(decodeHex("6000600050600060016000506000868152602001908152602001600020600050"), v)
+ pt.Insert(decodeHex("5b61006c600435600160a060020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff1916815560018101805484825590"), v)
+ pt.Insert(decodeHex("5b60408051918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f"), v)
+ pt.Insert(decodeHex("5b60408051602060248035600481810135601f810185900485028601850190965285855261007795903594604494909392909201918190840183828082843750"), v)
+ pt.Insert(decodeHex("5780820380516001836020036101000a031916815260200191505b5092505050"), v)
+ pt.Insert(decodeHex("565b61006c600435600160a060020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff19168155600181018054848255"), v)
+ pt.Insert(decodeHex("565b60408051602060248035600481810135601f8101859004850286018501909652858552610077959035946044949093929092019181908401838280828437"), v)
+ pt.Insert(decodeHex("54906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ff"), v)
+ pt.Insert(decodeHex("5467ffffffffffffffff169060010182565b60408051602060248035600481810135601f81018590048502860185019096528585526100779590359460449490"), v)
+ pt.Insert(decodeHex("5290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f81018590048502860185019096528585526100779590"), v)
+ pt.Insert(decodeHex("5283818151815260200191508051906020019080838382906000600460208460"), v)
+ pt.Insert(decodeHex("52602001915080519060200190808383829060006004602084601f0104600302"), v)
+ pt.Insert(decodeHex("52602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b6040805191825251"), v)
+ pt.Insert(decodeHex("52519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f"), v)
+ pt.Insert(decodeHex("5220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f81018590048502860185019096528585526100779590359460"), v)
+ pt.Insert(decodeHex("51918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f01046003"), v)
+ pt.Insert(decodeHex("519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01"), v)
+ pt.Insert(decodeHex("519060200190808383829060006004602084601f0104600302600f01f1509050"), v)
+ pt.Insert(decodeHex("519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b604080519182525190819003602001"), v)
+ pt.Insert(decodeHex("518152602001915080519060200190808383829060006004602084601f010460"), v)
+ pt.Insert(decodeHex("518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b604080519182"), v)
+ pt.Insert(decodeHex("5180910390f35b60408051918252519081900360200190f35b604051808060200182810382528381815181526020019150805190602001908083838290600060"), v)
+ pt.Insert(decodeHex("51602060248035600481810135601f81018590048502860185019096528585526100779590359460449490939290920191819084018382808284375094955050"), v)
+ pt.Insert(decodeHex("516001836020036101000a031916815260200191505b50925050506040518091"), v)
+ pt.Insert(decodeHex("509050019250505060405180910390f35b60408051918252519081900360200190f35b6040518080602001828103825283818151815260200191508051906020"), v)
+ pt.Insert(decodeHex("5080519060200190808383829060006004602084601f0104600302600f01f150"), v)
+ pt.Insert(decodeHex("5080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360"), v)
+ pt.Insert(decodeHex("5060405180910390f35b60408051918252519081900360200190f35b604051808060200182810382528381815181526020019150805190602001908083838290"), v)
+ pt.Insert(decodeHex("505060405180910390f35b60408051918252519081900360200190f35b6040518080602001828103825283818151815260200191508051906020019080838382"), v)
+ pt.Insert(decodeHex("50505060405180910390f35b60408051918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383"), v)
+ pt.Insert(decodeHex("50019250505060405180910390f35b60408051918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190"), v)
+ pt.Insert(decodeHex("408051918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104"), v)
+ pt.Insert(decodeHex("408051602060248035600481810135601f8101859004850286018501909652858552610077959035946044949093929092019181908401838280828437509495"), v)
+ pt.Insert(decodeHex("405180910390f35b60408051918252519081900360200190f35b6040518080602001828103825283818151815260200191508051906020019080838382906000"), v)
+ pt.Insert(decodeHex("248035600481810135601f8101859004850286018501909652858552610077959035946044949093929092019181908401838280828437509495505050505050"), v)
+ pt.Insert(decodeHex("2084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b60405180806020018281038252838181"), v)
+ pt.Insert(decodeHex("20805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f8101859004850286018501909652858552610077959035946044"), v)
+ pt.Insert(decodeHex("2060248035600481810135601f810185900485028601850190965285855261007795903594604494909392909201918190840183828082843750949550505050"), v)
+ pt.Insert(decodeHex("2002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b60405180"), v)
+ pt.Insert(decodeHex("2001915080519060200190808383829060006004602084601f0104600302600f"), v)
+ pt.Insert(decodeHex("2001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081"), v)
+ pt.Insert(decodeHex("200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f15090509081"), v)
+ pt.Insert(decodeHex("200190808383829060006004602084601f0104600302600f01f1509050908101"), v)
+ pt.Insert(decodeHex("200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b"), v)
+ pt.Insert(decodeHex("2001828103825283818151815260200191508051906020019080838382906000"), v)
+ pt.Insert(decodeHex("1f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b60405180806020018281038252838181518152"), v)
+ pt.Insert(decodeHex("169060010182565b60408051602060248035600481810135601f8101859004850286018501909652858552610077959035946044949093929092019181908401"), v)
+ pt.Insert(decodeHex("0f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b6040518080602001828103825283818151815260200191508051"), v)
+ pt.Insert(decodeHex("0a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffff"), v)
+ pt.Insert(decodeHex("0473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffff"), v)
+ pt.Insert(decodeHex("04602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b6040518080602001828103825283"), v)
+ pt.Insert(decodeHex("04600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b604051808060200182810382528381815181526020"), v)
+ pt.Insert(decodeHex("0390f35b60408051918252519081900360200190f35b604051808060200182810382528381815181526020019150805190602001908083838290600060046020"), v)
+ pt.Insert(decodeHex("0382528381815181526020019150805190602001908083838290600060046020"), v)
+ pt.Insert(decodeHex("0380516001836020036101000a031916815260200191505b5092505050604051"), v)
+ pt.Insert(decodeHex("0360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f1509050"), v)
+ pt.Insert(decodeHex("0302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b6040518080602001828103825283818151815260200191"), v)
+ pt.Insert(decodeHex("02808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b6040518080"), v)
+ pt.Insert(decodeHex("02600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b604051808060200182810382528381815181526020019150"), v)
+ pt.Insert(decodeHex("01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b604051808060200182810382528381815181526020019150805190"), v)
+ pt.Insert(decodeHex("019250505060405180910390f35b60408051918252519081900360200190f35b6040518080602001828103825283818151815260200191508051906020019080"), v)
+ pt.Insert(decodeHex("01915080519060200190808383829060006004602084601f0104600302600f01"), v)
+ pt.Insert(decodeHex("01915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b6040805191825251908190"), v)
+ pt.Insert(decodeHex("0190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f1509050908101"), v)
+ pt.Insert(decodeHex("0190808383829060006004602084601f0104600302600f01f150905090810190"), v)
+ pt.Insert(decodeHex("0190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b60"), v)
+ pt.Insert(decodeHex("01836020036101000a031916815260200191505b509250505060405180910390"), v)
+ pt.Insert(decodeHex("0182810382528381815181526020019150805190602001908083838290600060"), v)
+ pt.Insert(decodeHex("0182565b60408051602060248035600481810135601f810185900485028601850190965285855261007795903594604494909392909201918190840183828082"), v)
+ pt.Insert(decodeHex("0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b6040518080602001828103825283818151815260"), v)
+ pt.Insert(decodeHex("010182565b60408051602060248035600481810135601f8101859004850286018501909652858552610077959035946044949093929092019181908401838280"), v)
+ pt.Insert(decodeHex("01000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffff"), v)
+ pt.Insert(decodeHex("006c600435600160a060020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff19168155600181018054848255908452"), v)
+ pt.Insert(decodeHex("006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b604051808060200182810382"), v)
+ pt.Insert(decodeHex("0060005060006001600050600086815260200190815260200160002060005060"), v)
+ pt.Insert(decodeHex("000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffff"), v)
+
+ mf2 := NewMatchFinder2(&pt)
+ data := decodeHex("6060604052361561008a576000357c01000000000000000000000000000000000000000000000000000000009004806301cb3b20146102e357806329dcb0cf14" +
+ "6102f057806338af3eed146103115780636e66f6e9146103485780637a3a0e841461037f5780637b3e5e7b146103a0578063a035b1fe146103c1578063dc0d3d" +
+ "ff146103e25761008a565b6102e15b60006000600660005060066000508054600101908181548183558181151161011957600202816002028360005260206000" +
+ "20918201910161011891906100cf565b808211156101145760006000820160006101000a81549073ffffffffffffffffffffffffffffffffffffffff02191690" +
+ "556001820160005060009055506001016100cf565b5090565b5b5050508154811015610002579060005260206000209060020201600091509150338282506000" +
+ "0160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908302179055503482825060010160005081905550818150600101600050" +
+ "546002600082828250540192505081905550600560009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffff" +
+ "ffffffffffffffffffff166390b98a11336004600050548585506001016000505404604051837c01000000000000000000000000000000000000000000000000" +
+ "00000000028152600401808373ffffffffffffffffffffffffffffffffffffffff168152602001828152602001925050506020604051808303816000876161da" +
+ "5a03f1156100025750505060405151507fe842aea7a5f1b01049d752008c53c52890b1a6daf660cf39e8eec506112bbdf682825060000160009054906101000a" +
+ "900473ffffffffffffffffffffffffffffffffffffffff16838350600101600050546001604051808473ffffffffffffffffffffffffffffffffffffffff1681" +
+ "52602001838152602001828152602001935050505060405180910390a15b5050565b005b6102ee6004506104ec565b005b6102fb60045061045e565b60405180" +
+ "82815260200191505060405180910390f35b61031c600450610426565b604051808273ffffffffffffffffffffffffffffffffffffffff168152602001915050" +
+ "60405180910390f35b610353600450610470565b604051808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b" +
+ "61038a60045061044c565b6040518082815260200191505060405180910390f35b6103ab600450610455565b6040518082815260200191505060405180910390" +
+ "f35b6103cc600450610467565b6040518082815260200191505060405180910390f35b6103f3600480359060200150610496565b604051808373ffffffffffff" +
+ "ffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390f35b600060009054906101000a900473ffffffffffffffffffffff" +
+ "ffffffffffffffffff1681565b60016000505481565b60026000505481565b60036000505481565b60046000505481565b600560009054906101000a900473ff" +
+ "ffffffffffffffffffffffffffffffffffffff1681565b6006600050818154811015610002579060005260206000209060020201600091509050806000016000" +
+ "9054906101000a900473ffffffffffffffffffffffffffffffffffffffff16908060010160005054905082565b6000600360005054421015156107fc57600160" +
+ "0050546002600050541015156105f357600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffff" +
+ "ffffffffffffffff166000600260005054604051809050600060405180830381858888f19350505050507fe842aea7a5f1b01049d752008c53c52890b1a6daf6" +
+ "60cf39e8eec506112bbdf6600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff166002600050546000604051808473ffffffff" +
+ "ffffffffffffffffffffffffffffffff168152602001838152602001828152602001935050505060405180910390a16107c1565b7fe842aea7a5f1b01049d752" +
+ "008c53c52890b1a6daf660cf39e8eec506112bbdf66000600b600060405180848152602001838152602001828152602001935050505060405180910390a16000" +
+ "90505b6006600050548110156107c057600660005081815481101561000257906000526020600020906002020160005060000160009054906101000a900473ff" +
+ "ffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1660006006600050838154811015610002579060005260" +
+ "20600020906002020160005060010160005054604051809050600060405180830381858888f19350505050507fe842aea7a5f1b01049d752008c53c52890b1a6" +
+ "daf660cf39e8eec506112bbdf6600660005082815481101561000257906000526020600020906002020160005060000160009054906101000a900473ffffffff" +
+ "ffffffffffffffffffffffffffffffff166006600050838154811015610002579060005260206000209060020201600050600101600050546000604051808473" +
+ "ffffffffffffffffffffffffffffffffffffffff168152602001838152602001828152602001935050505060405180910390a15b806001019050805061064256" +
+ "5b5b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b5b5056")
+ matches := mf2.FindLongestMatches(data)
+ for _, m := range matches {
+ fmt.Printf("%+v, match: [%x]\n", m, data[m.Start:m.End])
+ }
+ if len(matches) != 234 {
+ t.Errorf("expected matches: %d, got %d", 234, len(matches))
+ }
+}
+
+func TestFindMatches7(t *testing.T) {
+ var pt PatriciaTree
+ v := []byte{1}
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("73ffffffffffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("600160a060020a03"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffff16815260200191505060405180910390a1"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffff1681"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffff168156"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffff1681565b"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffff1681565b60"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffffffff168152602001908152602001"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffffff16815260200190815260200160"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffff1681526020019081526020016000"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffff168152602001908152602001600020"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffff16815260200190815260200160002060"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffff1681526020019081526020016000206000"), v)
+ pt.Insert(decodeHex("73ffffffffffffffffffffffffffffffffffffffff1681526020019081526020"), v)
+ pt.Insert(decodeHex("81526020019081526020016000206000"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffff168152"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffff16815260"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffff1681526020"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffff168152602001"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffff1681565b60"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffff168152602001"), v)
+ pt.Insert(decodeHex("526020019081526020016000206000"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffff1682"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffff1681565b60"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffff168152602001"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffff1682"), v)
+ pt.Insert(decodeHex("6020019081526020016000206000"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffff1681565b60"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffff168152602001"), v)
+ pt.Insert(decodeHex("8152602001908152602001600020600050"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffff1682"), v)
+ pt.Insert(decodeHex("ffffffffffffffff168152602001"), v)
+ pt.Insert(decodeHex("80806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f16801561"), v)
+ pt.Insert(decodeHex("60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f16"), v)
+ pt.Insert(decodeHex("5b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f"), v)
+ pt.Insert(decodeHex("5180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015"), v)
+ pt.Insert(decodeHex("405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680"), v)
+ pt.Insert(decodeHex("20019081526020016000206000"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffff1681565b60"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffff168152602001"), v)
+ pt.Insert(decodeHex("52602001908152602001600020600050"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffff1682"), v)
+ pt.Insert(decodeHex("ffffffffffffffff1681565b60"), v)
+ pt.Insert(decodeHex("ffffffffffffff168152602001"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffff1681565b60"), v)
+ pt.Insert(decodeHex("019081526020016000206000"), v)
+ pt.Insert(decodeHex("ffffffffffffffffff168152602001"), v)
+ pt.Insert(decodeHex("1681526020019081526020016000206000"), v)
+ pt.Insert(decodeHex("602001908152602001600020600050"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffff1682"), v)
+ pt.Insert(decodeHex("ffffffffffffff1681565b60"), v)
+ pt.Insert(decodeHex("ffffffffffffffffff1681565b60"), v)
+ pt.Insert(decodeHex("ffffffffffff168152602001"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffff168152602001908152602001600020600050"), v)
+ pt.Insert(decodeHex("9081526020016000206000"), v)
+ pt.Insert(decodeHex("5050604051849350600080"), v)
+ pt.Insert(decodeHex("2001908152602001600020600050"), v)
+ pt.Insert(decodeHex("505060405180910390f35b"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffff1682"), v)
+ pt.Insert(decodeHex("6000506000600060005054815260200190815260200160002060006101000a81"), v)
+ pt.Insert(decodeHex("00506000600060005054815260200190815260200160002060006101000a8154"), v)
+ pt.Insert(decodeHex("ffffffffffff1681565b60"), v)
+ pt.Insert(decodeHex("ffffff16815260200191505060405180910390a1"), v)
+ pt.Insert(decodeHex("ffffffffff168152602001"), v)
+ pt.Insert(decodeHex("8281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156100a4578082"), v)
+ pt.Insert(decodeHex("81038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156100a457808203"), v)
+ pt.Insert(decodeHex("6020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156100a4"), v)
+ pt.Insert(decodeHex("60005060003373ffffffffffffffffffffffffffffffffffffffff1681526020"), v)
+ pt.Insert(decodeHex("60003373ffffffffffffffffffffffffffffffffffffffff1681526020019081"), v)
+ pt.Insert(decodeHex("5060003373ffffffffffffffffffffffffffffffffffffffff16815260200190"), v)
+ pt.Insert(decodeHex("3373ffffffffffffffffffffffffffffffffffffffff16815260200190815260"), v)
+ pt.Insert(decodeHex("20018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156100a457"), v)
+ pt.Insert(decodeHex("038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156100a45780820380"), v)
+ pt.Insert(decodeHex("018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156100a45780"), v)
+ pt.Insert(decodeHex("005060003373ffffffffffffffffffffffffffffffffffffffff168152602001"), v)
+ pt.Insert(decodeHex("003373ffffffffffffffffffffffffffffffffffffffff168152602001908152"), v)
+ pt.Insert(decodeHex("01908152602001600020600050"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffff16815260200190"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffff1681526020019081"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffff168152602001908152"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffff16815260200190815260"), v)
+ pt.Insert(decodeHex("ffffffffffffffffff1681526020019081526020"), v)
+ pt.Insert(decodeHex("ffffffffffffffff168152602001908152602001"), v)
+ pt.Insert(decodeHex("ffffffffffffff16815260200190815260200160"), v)
+ pt.Insert(decodeHex("ffffffffffff1681526020019081526020016000"), v)
+ pt.Insert(decodeHex("ffffffffff168152602001908152602001600020"), v)
+ pt.Insert(decodeHex("ffffffff16815260200190815260200160002060"), v)
+ pt.Insert(decodeHex("ffffff1681526020019081526020016000206000"), v)
+ pt.Insert(decodeHex("81526020016000206000"), v)
+ pt.Insert(decodeHex("50604051849350600080"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffff1682"), v)
+ pt.Insert(decodeHex("5060405180910390f35b"), v)
+ pt.Insert(decodeHex("168152602001908152602001600020600050"), v)
+ pt.Insert(decodeHex("ffff16815260200191505060405180910390a1"), v)
+ pt.Insert(decodeHex("67ffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffff1682"), v)
+ pt.Insert(decodeHex("ffff1681526020019081526020016000206000"), v)
+ pt.Insert(decodeHex("6040518082815260200191505060405180910390"), v)
+ pt.Insert(decodeHex("ffffffffff1681565b60"), v)
+ pt.Insert(decodeHex("908152602001600020600050"), v)
+ pt.Insert(decodeHex("8281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390"), v)
+ pt.Insert(decodeHex("81038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f3"), v)
+ pt.Insert(decodeHex("80806020018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f150905001925050506040"), v)
+ pt.Insert(decodeHex("806020018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f15090500192505050604051"), v)
+ pt.Insert(decodeHex("60405180806020018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f150905001925050"), v)
+ pt.Insert(decodeHex("6020018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180"), v)
+ pt.Insert(decodeHex("5b60405180806020018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250"), v)
+ pt.Insert(decodeHex("5180806020018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060"), v)
+ pt.Insert(decodeHex("405180806020018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f15090500192505050"), v)
+ pt.Insert(decodeHex("20018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f150905001925050506040518091"), v)
+ pt.Insert(decodeHex("038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b"), v)
+ pt.Insert(decodeHex("018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f15090500192505050604051809103"), v)
+ pt.Insert(decodeHex("ffffffff168152602001"), v)
+ pt.Insert(decodeHex("5b60405180828152602001915050604051809103"), v)
+ pt.Insert(decodeHex("565b604051808281526020019150506040518091"), v)
+ pt.Insert(decodeHex("518082815260200191505060405180910390f35b"), v)
+ pt.Insert(decodeHex("40518082815260200191505060405180910390f3"), v)
+ pt.Insert(decodeHex("ff16815260200191505060405180910390a1"), v)
+ pt.Insert(decodeHex("ffffffffffffffffff1682"), v)
+ pt.Insert(decodeHex("808281526102cb94909290828280156100d757"), v)
+ pt.Insert(decodeHex("ff1681526020019081526020016000206000"), v)
+ pt.Insert(decodeHex("526020016000206000"), v)
+ pt.Insert(decodeHex("f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f15090509081019060"), v)
+ pt.Insert(decodeHex("a060020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff191681556001810180548482559084528284209192849261"), v)
+ pt.Insert(decodeHex("90f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190"), v)
+ pt.Insert(decodeHex("8252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60"), v)
+ pt.Insert(decodeHex("60a060020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff1916815560018101805484825590845282842091928492"), v)
+ pt.Insert(decodeHex("602081815260408083206002548452825282208354815467ffffffffffffffff191667ffffffffffffffff919091161781556001848101805491830180548382"), v)
+ pt.Insert(decodeHex("600435600160a060020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff191681556001810180548482559084528284"), v)
+ pt.Insert(decodeHex("600160a060020a0333168252602082815260408084208585528252928390206001018054601f810183900490920260a090810190945260808281529293909190"), v)
+ pt.Insert(decodeHex("600160a060020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff191681556001810180548482559084528284209192"), v)
+ pt.Insert(decodeHex("60009054906101000a900473ffffffffffffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("52838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b6040"), v)
+ pt.Insert(decodeHex("52600160a060020a0333168252602082815260408084208585528252928390206001018054601f810183900490920260a0908101909452608082815292939091"), v)
+ pt.Insert(decodeHex("526000828152604090208054829081101561000257506000908152602090200154905081565b610315600435600160a060020a03331660009081526020818152"), v)
+ pt.Insert(decodeHex("35600160a060020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff1916815560018101805484825590845282842091"), v)
+ pt.Insert(decodeHex("2081815260408083206002548452825282208354815467ffffffffffffffff191667ffffffffffffffff91909116178155600184810180549183018054838255"), v)
+ pt.Insert(decodeHex("0435600160a060020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff19168155600181018054848255908452828420"), v)
+ pt.Insert(decodeHex("0160a060020a0333168252602082815260408084208585528252928390206001018054601f810183900490920260a09081019094526080828152929390919082"), v)
+ pt.Insert(decodeHex("0160a060020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff19168155600181018054848255908452828420919284"), v)
+ pt.Insert(decodeHex("009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("60405180910390f35b"), v)
+ pt.Insert(decodeHex("8082815260200191505060405180910390f35b"), v)
+ pt.Insert(decodeHex("ffffffffffffff1682"), v)
+ pt.Insert(decodeHex("600160a060020a033316600090815261"), v)
+ pt.Insert(decodeHex("8152602001600020600050"), v)
+ pt.Insert(decodeHex("90600052602060002090"), v)
+ pt.Insert(decodeHex("928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f8101859004909402850160405260"), v)
+ pt.Insert(decodeHex("836020036101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de5782"), v)
+ pt.Insert(decodeHex("822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f8101859004909402850160405260608481526100"), v)
+ pt.Insert(decodeHex("820380516001836020036101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080"), v)
+ pt.Insert(decodeHex("818152928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f8101859004909402850160"), v)
+ pt.Insert(decodeHex("8152928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f810185900490940285016040"), v)
+ pt.Insert(decodeHex("8152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f810185900490940285016040526060"), v)
+ pt.Insert(decodeHex("815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020"), v)
+ pt.Insert(decodeHex("80822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f81018590049094028501604052606084815261"), v)
+ pt.Insert(decodeHex("80820380516001836020036101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c0908490"), v)
+ pt.Insert(decodeHex("80519060200190808383829060006004602084601f0104600302600f01f15090"), v)
+ pt.Insert(decodeHex("80516001836020036101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c0908490801561"), v)
+ pt.Insert(decodeHex("6101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de578201919060"), v)
+ pt.Insert(decodeHex("604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f8101859004909402850160405260608481"), v)
+ pt.Insert(decodeHex("60243560006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f81018590"), v)
+ pt.Insert(decodeHex("6020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f810185900490940285"), v)
+ pt.Insert(decodeHex("6020036101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de578201"), v)
+ pt.Insert(decodeHex("60200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b"), v)
+ pt.Insert(decodeHex("60043560243560006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f81"), v)
+ pt.Insert(decodeHex("6001836020036101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de"), v)
+ pt.Insert(decodeHex("60006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f81018590049094"), v)
+ pt.Insert(decodeHex("5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c09084"), v)
+ pt.Insert(decodeHex("52928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f81018590049094028501604052"), v)
+ pt.Insert(decodeHex("52604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f81018590049094028501604052606084"), v)
+ pt.Insert(decodeHex("5260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de5782019190600052602060002090"), v)
+ pt.Insert(decodeHex("516001836020036101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103"), v)
+ pt.Insert(decodeHex("4080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f810185900490940285016040526060848152"), v)
+ pt.Insert(decodeHex("3560243560006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f810185"), v)
+ pt.Insert(decodeHex("3560006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f810185900490"), v)
+ pt.Insert(decodeHex("243560006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f8101859004"), v)
+ pt.Insert(decodeHex("2090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f81018590049094028501604052606084815261006c"), v)
+ pt.Insert(decodeHex("20818152928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f81018590049094028501"), v)
+ pt.Insert(decodeHex("20036101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191"), v)
+ pt.Insert(decodeHex("200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b81"), v)
+ pt.Insert(decodeHex("1916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de5782019190600052602060"), v)
+ pt.Insert(decodeHex("16815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de578201919060005260206000"), v)
+ pt.Insert(decodeHex("0a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de578201919060005260"), v)
+ pt.Insert(decodeHex("043560243560006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f8101"), v)
+ pt.Insert(decodeHex("0380516001836020036101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c09084908015"), v)
+ pt.Insert(decodeHex("036101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de5782019190"), v)
+ pt.Insert(decodeHex("031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020"), v)
+ pt.Insert(decodeHex("0191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b8154"), v)
+ pt.Insert(decodeHex("01836020036101000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de57"), v)
+ pt.Insert(decodeHex("01000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000"), v)
+ pt.Insert(decodeHex("006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b6080602060248035600481810135601f8101859004909402"), v)
+ pt.Insert(decodeHex("000a031916815260200191505b509250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de5782019190600052"), v)
+ pt.Insert(decodeHex("ffffffff1681565b60"), v)
+ pt.Insert(decodeHex("60006005600a4306101561032f57600190506103"), v)
+ pt.Insert(decodeHex("600160a060020a033316600090815260"), v)
+ pt.Insert(decodeHex("ffffff168152602001"), v)
+ pt.Insert(decodeHex("82815260200191505060405180910390f35b"), v)
+ pt.Insert(decodeHex("9060006004602084601f0104600302600f01f150"), v)
+ pt.Insert(decodeHex("8383829060006004602084601f0104600302600f"), v)
+ pt.Insert(decodeHex("83829060006004602084601f0104600302600f01"), v)
+ pt.Insert(decodeHex("829060006004602084601f0104600302600f01f1"), v)
+ pt.Insert(decodeHex("808383829060006004602084601f010460030260"), v)
+ pt.Insert(decodeHex("60006004602084601f0104600302600f01f15090"), v)
+ pt.Insert(decodeHex("815260200191505060405180910390f35b"), v)
+ pt.Insert(decodeHex("0000000000000000000000000000"), v)
+ pt.Insert(decodeHex("8352600160a060020a0333168152808252838120858252825283902060010180548451601f820184900484028101840190955280855292939290918301828280"), v)
+ pt.Insert(decodeHex("83526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f82018490048402810184019095528085529293929091"), v)
+ pt.Insert(decodeHex("81810183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f82018490048402810184019095528085529293"), v)
+ pt.Insert(decodeHex("810183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f8201849004840281018401909552808552929392"), v)
+ pt.Insert(decodeHex("808352600160a060020a0333168152808252838120858252825283902060010180548451601f8201849004840281018401909552808552929392909183018282"), v)
+ pt.Insert(decodeHex("8051602081810183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f820184900484028101840190955280"), v)
+ pt.Insert(decodeHex("60408051602081810183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f82018490048402810184019095"), v)
+ pt.Insert(decodeHex("602081810183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f8201849004840281018401909552808552"), v)
+ pt.Insert(decodeHex("60043560408051602081810183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f82018490048402810184"), v)
+ pt.Insert(decodeHex("600160a060020a0333168152808252838120858252825283902060010180548451601f8201849004840281018401909552808552929392909183018282801561"), v)
+ pt.Insert(decodeHex("6000808352600160a060020a0333168152808252838120858252825283902060010180548451601f820184900484028101840190955280855292939290918301"), v)
+ pt.Insert(decodeHex("6000526020600020"), v)
+ pt.Insert(decodeHex("52600160a060020a0333168152808252838120858252825283902060010180548451601f82018490048402810184019095528085529293929091830182828015"), v)
+ pt.Insert(decodeHex("526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f8201849004840281018401909552808552929392909183"), v)
+ pt.Insert(decodeHex("51602081810183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f82018490048402810184019095528085"), v)
+ pt.Insert(decodeHex("408051602081810183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f8201849004840281018401909552"), v)
+ pt.Insert(decodeHex("3560408051602081810183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f820184900484028101840190"), v)
+ pt.Insert(decodeHex("2081810183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f820184900484028101840190955280855292"), v)
+ pt.Insert(decodeHex("043560408051602081810183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f8201849004840281018401"), v)
+ pt.Insert(decodeHex("0183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f820184900484028101840190955280855292939290"), v)
+ pt.Insert(decodeHex("0160a060020a0333168152808252838120858252825283902060010180548451601f820184900484028101840190955280855292939290918301828280156101"), v)
+ pt.Insert(decodeHex("00808352600160a060020a0333168152808252838120858252825283902060010180548451601f82018490048402810184019095528085529293929091830182"), v)
+ pt.Insert(decodeHex("0160a060020a033316600090815261"), v)
+ pt.Insert(decodeHex("006005600a4306101561032f57600190506103"), v)
+ pt.Insert(decodeHex("6020016000206000"), v)
+ pt.Insert(decodeHex("000000000000000000"), v)
+ pt.Insert(decodeHex("7c01000000000000000000000000000000000000000000000000000000009004"), v)
+ pt.Insert(decodeHex("6000357c01000000000000000000000000000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("357c010000000000000000000000000000000000000000000000000000000090"), v)
+ pt.Insert(decodeHex("0100000000000000000000000000000000000000000000000000000000900480"), v)
+ pt.Insert(decodeHex("00357c0100000000000000000000000000000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("0000000000000000000000000000000000000000000000000000000090048063"), v)
+ pt.Insert(decodeHex("0160a060020a033316600090815260"), v)
+ pt.Insert(decodeHex("405180910390f35b"), v)
+ pt.Insert(decodeHex("006004602084601f0104600302600f01f15090"), v)
+ pt.Insert(decodeHex("52602001600020600050"), v)
+ pt.Insert(decodeHex("ffffffffffff1682"), v)
+ pt.Insert(decodeHex("600460209081526040808320849055600590"), v)
+ pt.Insert(decodeHex("928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f810185900485028601850190"), v)
+ pt.Insert(decodeHex("836020036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff1681526020018060200182810382528381"), v)
+ pt.Insert(decodeHex("822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f810185900485028601850190965285855261"), v)
+ pt.Insert(decodeHex("820380516001836020036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff1681526020018060200182"), v)
+ pt.Insert(decodeHex("818152928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f810185900485028601"), v)
+ pt.Insert(decodeHex("8152928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f81018590048502860185"), v)
+ pt.Insert(decodeHex("8152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f81018590048502860185019096"), v)
+ pt.Insert(decodeHex("815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff16815260200180602001828103825283818154815260200191508054"), v)
+ pt.Insert(decodeHex("80822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f8101859004850286018501909652858552"), v)
+ pt.Insert(decodeHex("80820380516001836020036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff16815260200180602001"), v)
+ pt.Insert(decodeHex("806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156104"), v)
+ pt.Insert(decodeHex("80516001836020036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff16815260200180602001828103"), v)
+ pt.Insert(decodeHex("6101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff168152602001806020018281038252838181548152"), v)
+ pt.Insert(decodeHex("604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f810185900485028601850190965285"), v)
+ pt.Insert(decodeHex("60243560006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f8101"), v)
+ pt.Insert(decodeHex("6020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f81018590048502"), v)
+ pt.Insert(decodeHex("6020036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff168152602001806020018281038252838181"), v)
+ pt.Insert(decodeHex("60200191505b509250505060405180910390f35b604051808367ffffffffffffffff168152602001806020018281038252838181548152602001915080548015"), v)
+ pt.Insert(decodeHex("60043560243560006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b6040805160206024803560048181013560"), v)
+ pt.Insert(decodeHex("6001836020036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff168152602001806020018281038252"), v)
+ pt.Insert(decodeHex("60006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f8101859004"), v)
+ pt.Insert(decodeHex("5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff168152602001806020"), v)
+ pt.Insert(decodeHex("52928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f8101859004850286018501"), v)
+ pt.Insert(decodeHex("52604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f8101859004850286018501909652"), v)
+ pt.Insert(decodeHex("5260200191505b509250505060405180910390f35b604051808367ffffffffffffffff1681526020018060200182810382528381815481526020019150805480"), v)
+ pt.Insert(decodeHex("5260200191505060405180910390f35b"), v)
+ pt.Insert(decodeHex("516001836020036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff1681526020018060200182810382"), v)
+ pt.Insert(decodeHex("4080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f81018590048502860185019096528585"), v)
+ pt.Insert(decodeHex("3560243560006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f81"), v)
+ pt.Insert(decodeHex("3560006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f81018590"), v)
+ pt.Insert(decodeHex("243560006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f810185"), v)
+ pt.Insert(decodeHex("2090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f81018590048502860185019096528585526100"), v)
+ pt.Insert(decodeHex("20818152928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f8101859004850286"), v)
+ pt.Insert(decodeHex("20036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff16815260200180602001828103825283818154"), v)
+ pt.Insert(decodeHex("200191505b509250505060405180910390f35b604051808367ffffffffffffffff16815260200180602001828103825283818154815260200191508054801561"), v)
+ pt.Insert(decodeHex("1916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff1681526020018060200182810382528381815481526020019150"), v)
+ pt.Insert(decodeHex("16815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff168152602001806020018281038252838181548152602001915080"), v)
+ pt.Insert(decodeHex("0a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff168152602001806020018281038252838181548152602001"), v)
+ pt.Insert(decodeHex("043560243560006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f"), v)
+ pt.Insert(decodeHex("0380516001836020036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff168152602001806020018281"), v)
+ pt.Insert(decodeHex("036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff1681526020018060200182810382528381815481"), v)
+ pt.Insert(decodeHex("031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff16815260200180602001828103825283818154815260200191"), v)
+ pt.Insert(decodeHex("0191505b509250505060405180910390f35b604051808367ffffffffffffffff1681526020018060200182810382528381815481526020019150805480156104"), v)
+ pt.Insert(decodeHex("01836020036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff16815260200180602001828103825283"), v)
+ pt.Insert(decodeHex("01000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff16815260200180602001828103825283818154815260"), v)
+ pt.Insert(decodeHex("006020818152928152604080822090935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f810185900485"), v)
+ pt.Insert(decodeHex("000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff1681526020018060200182810382528381815481526020"), v)
+ pt.Insert(decodeHex("600052602060002090"), v)
+ pt.Insert(decodeHex("820191906000526020600020905b81"), v)
+ pt.Insert(decodeHex("00000000000000000000000000"), v)
+ pt.Insert(decodeHex("ffffff1681565b60"), v)
+ pt.Insert(decodeHex("60a060020a033316600090815261"), v)
+ pt.Insert(decodeHex("ffff168152602001"), v)
+ pt.Insert(decodeHex("0000000000000000"), v)
+ pt.Insert(decodeHex("6004602084601f0104600302600f01f15090"), v)
+ pt.Insert(decodeHex("000000000000000000000000"), v)
+ pt.Insert(decodeHex("60a060020a033316600090815260"), v)
+ pt.Insert(decodeHex("600160a060020a033316"), v)
+ pt.Insert(decodeHex("0460209081526040808320849055600590"), v)
+ pt.Insert(decodeHex("f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b8154815290600101906020018083116103c1"), v)
+ pt.Insert(decodeHex("9250505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b81548152906001"), v)
+ pt.Insert(decodeHex("910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b815481529060010190602001808311"), v)
+ pt.Insert(decodeHex("90f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b8154815290600101906020018083116103"), v)
+ pt.Insert(decodeHex("9081900360200190f35b60408051600160a060020a03929092168252519081900360200190f35b60408051918252519081900360200190f35b50815b60025467"), v)
+ pt.Insert(decodeHex("900360200190f35b60408051600160a060020a03929092168252519081900360200190f35b60408051918252519081900360200190f35b50815b60025467ffff"), v)
+ pt.Insert(decodeHex("8252519081900360200190f35b60408051600160a060020a03929092168252519081900360200190f35b60408051918252519081900360200190f35b50815b60"), v)
+ pt.Insert(decodeHex("81900360200190f35b60408051600160a060020a03929092168252519081900360200190f35b60408051918252519081900360200190f35b50815b60025467ff"), v)
+ pt.Insert(decodeHex("80910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b8154815290600101906020018083"), v)
+ pt.Insert(decodeHex("606060405260e060020a60003504630a3b0a4f81146100315780634e71d92d14"), v)
+ pt.Insert(decodeHex("6060405260e060020a60003504630a3b0a4f81146100315780634e71d92d1461"), v)
+ pt.Insert(decodeHex("60405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b8154815290600101906020"), v)
+ pt.Insert(decodeHex("60200190f35b60408051600160a060020a03929092168252519081900360200190f35b60408051918252519081900360200190f35b50815b60025467ffffffff"), v)
+ pt.Insert(decodeHex("52519081900360200190f35b60408051600160a060020a03929092168252519081900360200190f35b60408051918252519081900360200190f35b50815b6002"), v)
+ pt.Insert(decodeHex("519081900360200190f35b60408051600160a060020a03929092168252519081900360200190f35b60408051918252519081900360200190f35b50815b600254"), v)
+ pt.Insert(decodeHex("5180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b81548152906001019060200180"), v)
+ pt.Insert(decodeHex("5060405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b81548152906001019060"), v)
+ pt.Insert(decodeHex("505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b815481529060010190"), v)
+ pt.Insert(decodeHex("50505060405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b8154815290600101"), v)
+ pt.Insert(decodeHex("405180910390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b815481529060010190602001"), v)
+ pt.Insert(decodeHex("200190f35b60408051600160a060020a03929092168252519081900360200190f35b60408051918252519081900360200190f35b50815b60025467ffffffffff"), v)
+ pt.Insert(decodeHex("0390f35b606082815260406080908152825460a081905260c090849080156103de57820191906000526020600020905b81548152906001019060200180831161"), v)
+ pt.Insert(decodeHex("0360200190f35b60408051600160a060020a03929092168252519081900360200190f35b60408051918252519081900360200190f35b50815b60025467ffffff"), v)
+ pt.Insert(decodeHex("0190f35b60408051600160a060020a03929092168252519081900360200190f35b60408051918252519081900360200190f35b50815b60025467ffffffffffff"), v)
+ pt.Insert(decodeHex("91906000526020600020905b8154815290600101"), v)
+ pt.Insert(decodeHex("906000526020600020905b815481529060010190"), v)
+ pt.Insert(decodeHex("820191906000526020600020905b815481529060"), v)
+ pt.Insert(decodeHex("6020600020905b81548152906001019060200180"), v)
+ pt.Insert(decodeHex("60200191505060405180910390f35b"), v)
+ pt.Insert(decodeHex("6000526020600020905b81548152906001019060"), v)
+ pt.Insert(decodeHex("600020905b815481529060010190602001808311"), v)
+ pt.Insert(decodeHex("57820191906000526020600020905b8154815290"), v)
+ pt.Insert(decodeHex("526020600020905b815481529060010190602001"), v)
+ pt.Insert(decodeHex("20600020905b8154815290600101906020018083"), v)
+ pt.Insert(decodeHex("0191906000526020600020905b81548152906001"), v)
+ pt.Insert(decodeHex("00526020600020905b8154815290600101906020"), v)
+ pt.Insert(decodeHex("0020905b81548152906001019060200180831161"), v)
+ pt.Insert(decodeHex("50505060405180910390"), v)
+ pt.Insert(decodeHex("57823582600050559160200191906001019061"), v)
+ pt.Insert(decodeHex("0191906000526020600020905b81"), v)
+ pt.Insert(decodeHex("602001600020600050"), v)
+ pt.Insert(decodeHex("a060020a033316600090815261"), v)
+ pt.Insert(decodeHex("04602084601f0104600302600f01f15090"), v)
+ pt.Insert(decodeHex("ffff168152602001908152602001600020600050"), v)
+ pt.Insert(decodeHex("006004602084601f0104600302600f01f1509050"), v)
+ pt.Insert(decodeHex("906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b"), v)
+ pt.Insert(decodeHex("9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681"), v)
+ pt.Insert(decodeHex("81815233600160a060020a031682526001602090815260409283902080549182"), v)
+ pt.Insert(decodeHex("6101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60"), v)
+ pt.Insert(decodeHex("60606040526000357c0100000000000000000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("606040526000357c010000000000000000000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("6040526000357c01000000000000000000000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("565b60405180806020018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f15090500192"), v)
+ pt.Insert(decodeHex("54906101000a900473ffffffffffffffffffffffffffffffffffffffff168156"), v)
+ pt.Insert(decodeHex("526000357c010000000000000000000000000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("40526000357c0100000000000000000000000000000000000000000000000000"), v)
+ pt.Insert(decodeHex("018367ffffffffffffffff16815260200150600360005060003373ffffffffff"), v)
+ pt.Insert(decodeHex("20905b81548152906001019060200180831161"), v)
+ pt.Insert(decodeHex("0052602060002090"), v)
+ pt.Insert(decodeHex("a060020a033316600090815260"), v)
+ pt.Insert(decodeHex("9081101561000257"), v)
+ pt.Insert(decodeHex("0000000000000000000000"), v)
+ pt.Insert(decodeHex("200191505060405180910390f35b"), v)
+ pt.Insert(decodeHex("ff168152602001908152602001600020600050"), v)
+ pt.Insert(decodeHex("6004602084601f0104600302600f01f1509050"), v)
+ pt.Insert(decodeHex("0160a060020a033316"), v)
+ pt.Insert(decodeHex("91906000526020600020905b81"), v)
+ pt.Insert(decodeHex("602084601f0104600302600f01f15090"), v)
+ pt.Insert(decodeHex("546802b5e3af16b187ff"), v)
+ pt.Insert(decodeHex("905b81548152906001019060200180831161"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("ffffffffffffffff169060010182565b60408051602060248035600481810135601f810185900485028601850190965285855261007795903594604494909392"), v)
+ pt.Insert(decodeHex("ffffffffffffff169060010182565b60408051602060248035600481810135601f81018590048502860185019096528585526100779590359460449490939290"), v)
+ pt.Insert(decodeHex("ffffffffffff169060010182565b60408051602060248035600481810135601f8101859004850286018501909652858552610077959035946044949093929092"), v)
+ pt.Insert(decodeHex("ffffffffff169060010182565b60408051602060248035600481810135601f810185900485028601850190965285855261007795903594604494909392909201"), v)
+ pt.Insert(decodeHex("ffffffff169060010182565b60408051602060248035600481810135601f81018590048502860185019096528585526100779590359460449490939290920191"), v)
+ pt.Insert(decodeHex("ffffff169060010182565b60408051602060248035600481810135601f8101859004850286018501909652858552610077959035946044949093929092019181"), v)
+ pt.Insert(decodeHex("ffff169060010182565b60408051602060248035600481810135601f810185900485028601850190965285855261007795903594604494909392909201918190"), v)
+ pt.Insert(decodeHex("ff169060010182565b60408051602060248035600481810135601f81018590048502860185019096528585526100779590359460449490939290920191819084"), v)
+ pt.Insert(decodeHex("f35b60408051918252519081900360200190f35b6040518080602001828103825283818151815260200191508051906020019080838382906000600460208460"), v)
+ pt.Insert(decodeHex("f1509050019250505060405180910390f35b60408051918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060"), v)
+ pt.Insert(decodeHex("935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f810185900485028601850190965285855261007795"), v)
+ pt.Insert(decodeHex("9250505060405180910390f35b60408051918252519081900360200190f35b604051808060200182810382528381815181526020019150805190602001908083"), v)
+ pt.Insert(decodeHex("918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302"), v)
+ pt.Insert(decodeHex("915080519060200190808383829060006004602084601f0104600302600f01f1"), v)
+ pt.Insert(decodeHex("915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b604080519182525190819003"), v)
+ pt.Insert(decodeHex("910390f35b60408051918252519081900360200190f35b6040518080602001828103825283818151815260200191508051906020019080838382906000600460"), v)
+ pt.Insert(decodeHex("90f35b60408051918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084"), v)
+ pt.Insert(decodeHex("90935290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f8101859004850286018501909652858552610077"), v)
+ pt.Insert(decodeHex("9081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f1"), v)
+ pt.Insert(decodeHex("90815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f8101859004850286018501909652858552610077959035"), v)
+ pt.Insert(decodeHex("90808383829060006004602084601f0104600302600f01f15090509081019060"), v)
+ pt.Insert(decodeHex("906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffff"), v)
+ pt.Insert(decodeHex("90602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b6040"), v)
+ pt.Insert(decodeHex("9060200190808383829060006004602084601f0104600302600f01f150905090"), v)
+ pt.Insert(decodeHex("9060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190"), v)
+ pt.Insert(decodeHex("9060010182565b60408051602060248035600481810135601f810185900485028601850190965285855261007795903594604494909392909201918190840183"), v)
+ pt.Insert(decodeHex("9060006004602084601f0104600302600f01f150905090810190601f16801561"), v)
+ pt.Insert(decodeHex("9060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b60405180806020018281"), v)
+ pt.Insert(decodeHex("9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673"), v)
+ pt.Insert(decodeHex("9050019250505060405180910390f35b60408051918252519081900360200190f35b604051808060200182810382528381815181526020019150805190602001"), v)
+ pt.Insert(decodeHex("900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffff"), v)
+ pt.Insert(decodeHex("900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f15090"), v)
+ pt.Insert(decodeHex("84601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b6040518080602001828103825283818151"), v)
+ pt.Insert(decodeHex("8383829060006004602084601f0104600302600f01f150905090810190601f16"), v)
+ pt.Insert(decodeHex("8383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b60405180806020"), v)
+ pt.Insert(decodeHex("83829060006004602084601f0104600302600f01f150905090810190601f1680"), v)
+ pt.Insert(decodeHex("83829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b6040518080602001"), v)
+ pt.Insert(decodeHex("838181518152602001915080519060200190808383829060006004602084601f"), v)
+ pt.Insert(decodeHex("838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b604080"), v)
+ pt.Insert(decodeHex("836020036101000a031916815260200191505b509250505060405180910390f3"), v)
+ pt.Insert(decodeHex("829060006004602084601f0104600302600f01f150905090810190601f168015"), v)
+ pt.Insert(decodeHex("829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b604051808060200182"), v)
+ pt.Insert(decodeHex("8281038252838181518152602001915080519060200190808383829060006004"), v)
+ pt.Insert(decodeHex("82565b60408051602060248035600481810135601f81018590048502860185019096528585526100779590359460449490939290920191819084018382808284"), v)
+ pt.Insert(decodeHex("8252838181518152602001915080519060200190808383829060006004602084"), v)
+ pt.Insert(decodeHex("8252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f010460030260"), v)
+ pt.Insert(decodeHex("820380516001836020036101000a031916815260200191505b50925050506040"), v)
+ pt.Insert(decodeHex("81900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150"), v)
+ pt.Insert(decodeHex("8181518152602001915080519060200190808383829060006004602084601f01"), v)
+ pt.Insert(decodeHex("8181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051"), v)
+ pt.Insert(decodeHex("8152602001915080519060200190808383829060006004602084601f01046003"), v)
+ pt.Insert(decodeHex("8152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252"), v)
+ pt.Insert(decodeHex("815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f810185900485028601850190965285855261007795903594"), v)
+ pt.Insert(decodeHex("81518152602001915080519060200190808383829060006004602084601f0104"), v)
+ pt.Insert(decodeHex("81518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b6040805191"), v)
+ pt.Insert(decodeHex("8103825283818151815260200191508051906020019080838382906000600460"), v)
+ pt.Insert(decodeHex("80910390f35b60408051918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004"), v)
+ pt.Insert(decodeHex("808383829060006004602084601f0104600302600f01f150905090810190601f"), v)
+ pt.Insert(decodeHex("808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b604051808060"), v)
+ pt.Insert(decodeHex("80820380516001836020036101000a031916815260200191505b509250505060"), v)
+ pt.Insert(decodeHex("805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f810185900485028601850190965285855261007795903594604494"), v)
+ pt.Insert(decodeHex("8051918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f010460"), v)
+ pt.Insert(decodeHex("80519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b6040805191825251908190036020"), v)
+ pt.Insert(decodeHex("8051602060248035600481810135601f810185900485028601850190965285855261007795903594604494909392909201918190840183828082843750949550"), v)
+ pt.Insert(decodeHex("80516001836020036101000a031916815260200191505b509250505060405180"), v)
+ pt.Insert(decodeHex("8035600481810135601f810185900485028601850190965285855261007795903594604494909392909201918190840183828082843750949550505050505060"), v)
+ pt.Insert(decodeHex("73ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffff"), v)
+ pt.Insert(decodeHex("6c600435600160a060020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff1916815560018101805484825590845282"), v)
+ pt.Insert(decodeHex("67ffffffffffffffff169060010182565b60408051602060248035600481810135601f8101859004850286018501909652858552610077959035946044949093"), v)
+ pt.Insert(decodeHex("6101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffff"), v)
+ pt.Insert(decodeHex("61006c600435600160a060020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff191681556001810180548482559084"), v)
+ pt.Insert(decodeHex("60408051918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f01"), v)
+ pt.Insert(decodeHex("60408051602060248035600481810135601f81018590048502860185019096528585526100779590359460449490939290920191819084018382808284375094"), v)
+ pt.Insert(decodeHex("60405180910390f35b60408051918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060"), v)
+ pt.Insert(decodeHex("60248035600481810135601f81018590048502860185019096528585526100779590359460449490939290920191819084018382808284375094955050505050"), v)
+ pt.Insert(decodeHex("602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b604051808060200182810382528381"), v)
+ pt.Insert(decodeHex("602060248035600481810135601f8101859004850286018501909652858552610077959035946044949093929092019181908401838280828437509495505050"), v)
+ pt.Insert(decodeHex("6020036101000a031916815260200191505b509250505060405180910390f35b"), v)
+ pt.Insert(decodeHex("602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b604051"), v)
+ pt.Insert(decodeHex("602001915080519060200190808383829060006004602084601f010460030260"), v)
+ pt.Insert(decodeHex("602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b604080519182525190"), v)
+ pt.Insert(decodeHex("60200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090"), v)
+ pt.Insert(decodeHex("60200190808383829060006004602084601f0104600302600f01f15090509081"), v)
+ pt.Insert(decodeHex("60200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f3"), v)
+ pt.Insert(decodeHex("6020018281038252838181518152602001915080519060200190808383829060"), v)
+ pt.Insert(decodeHex("601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b604051808060200182810382528381815181"), v)
+ pt.Insert(decodeHex("600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b60405180806020018281038252838181518152602001915080"), v)
+ pt.Insert(decodeHex("6004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b60405180806020018281038252"), v)
+ pt.Insert(decodeHex("600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b60405180806020018281038252838181518152602001"), v)
+ pt.Insert(decodeHex("6001836020036101000a031916815260200191505b5092505050604051809103"), v)
+ pt.Insert(decodeHex("60010182565b60408051602060248035600481810135601f81018590048502860185019096528585526100779590359460449490939290920191819084018382"), v)
+ pt.Insert(decodeHex("60006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b6040518080602001828103"), v)
+ pt.Insert(decodeHex("6000600050600060016000506000868152602001908152602001600020600050"), v)
+ pt.Insert(decodeHex("5b61006c600435600160a060020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff1916815560018101805484825590"), v)
+ pt.Insert(decodeHex("5b60408051918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f"), v)
+ pt.Insert(decodeHex("5b60408051602060248035600481810135601f810185900485028601850190965285855261007795903594604494909392909201918190840183828082843750"), v)
+ pt.Insert(decodeHex("5780820380516001836020036101000a031916815260200191505b5092505050"), v)
+ pt.Insert(decodeHex("565b61006c600435600160a060020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff19168155600181018054848255"), v)
+ pt.Insert(decodeHex("565b60408051602060248035600481810135601f8101859004850286018501909652858552610077959035946044949093929092019181908401838280828437"), v)
+ pt.Insert(decodeHex("54906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ff"), v)
+ pt.Insert(decodeHex("5467ffffffffffffffff169060010182565b60408051602060248035600481810135601f81018590048502860185019096528585526100779590359460449490"), v)
+ pt.Insert(decodeHex("5290815220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f81018590048502860185019096528585526100779590"), v)
+ pt.Insert(decodeHex("5283818151815260200191508051906020019080838382906000600460208460"), v)
+ pt.Insert(decodeHex("52602001915080519060200190808383829060006004602084601f0104600302"), v)
+ pt.Insert(decodeHex("52602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b6040805191825251"), v)
+ pt.Insert(decodeHex("52519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f"), v)
+ pt.Insert(decodeHex("5220805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f81018590048502860185019096528585526100779590359460"), v)
+ pt.Insert(decodeHex("51918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f01046003"), v)
+ pt.Insert(decodeHex("519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01"), v)
+ pt.Insert(decodeHex("519060200190808383829060006004602084601f0104600302600f01f1509050"), v)
+ pt.Insert(decodeHex("519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b604080519182525190819003602001"), v)
+ pt.Insert(decodeHex("518152602001915080519060200190808383829060006004602084601f010460"), v)
+ pt.Insert(decodeHex("518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b604080519182"), v)
+ pt.Insert(decodeHex("5180910390f35b60408051918252519081900360200190f35b604051808060200182810382528381815181526020019150805190602001908083838290600060"), v)
+ pt.Insert(decodeHex("51602060248035600481810135601f81018590048502860185019096528585526100779590359460449490939290920191819084018382808284375094955050"), v)
+ pt.Insert(decodeHex("516001836020036101000a031916815260200191505b50925050506040518091"), v)
+ pt.Insert(decodeHex("509050019250505060405180910390f35b60408051918252519081900360200190f35b6040518080602001828103825283818151815260200191508051906020"), v)
+ pt.Insert(decodeHex("5080519060200190808383829060006004602084601f0104600302600f01f150"), v)
+ pt.Insert(decodeHex("5080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360"), v)
+ pt.Insert(decodeHex("5060405180910390f35b60408051918252519081900360200190f35b604051808060200182810382528381815181526020019150805190602001908083838290"), v)
+ pt.Insert(decodeHex("505060405180910390f35b60408051918252519081900360200190f35b6040518080602001828103825283818151815260200191508051906020019080838382"), v)
+ pt.Insert(decodeHex("50505060405180910390f35b60408051918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383"), v)
+ pt.Insert(decodeHex("50019250505060405180910390f35b60408051918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190"), v)
+ pt.Insert(decodeHex("408051918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104"), v)
+ pt.Insert(decodeHex("408051602060248035600481810135601f8101859004850286018501909652858552610077959035946044949093929092019181908401838280828437509495"), v)
+ pt.Insert(decodeHex("405180910390f35b60408051918252519081900360200190f35b6040518080602001828103825283818151815260200191508051906020019080838382906000"), v)
+ pt.Insert(decodeHex("248035600481810135601f8101859004850286018501909652858552610077959035946044949093929092019181908401838280828437509495505050505050"), v)
+ pt.Insert(decodeHex("2084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b60405180806020018281038252838181"), v)
+ pt.Insert(decodeHex("20805467ffffffffffffffff169060010182565b60408051602060248035600481810135601f8101859004850286018501909652858552610077959035946044"), v)
+ pt.Insert(decodeHex("2060248035600481810135601f810185900485028601850190965285855261007795903594604494909392909201918190840183828082843750949550505050"), v)
+ pt.Insert(decodeHex("2002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b60405180"), v)
+ pt.Insert(decodeHex("2001915080519060200190808383829060006004602084601f0104600302600f"), v)
+ pt.Insert(decodeHex("2001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081"), v)
+ pt.Insert(decodeHex("200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f15090509081"), v)
+ pt.Insert(decodeHex("200190808383829060006004602084601f0104600302600f01f1509050908101"), v)
+ pt.Insert(decodeHex("200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b"), v)
+ pt.Insert(decodeHex("1f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b60405180806020018281038252838181518152"), v)
+ pt.Insert(decodeHex("169060010182565b60408051602060248035600481810135601f8101859004850286018501909652858552610077959035946044949093929092019181908401"), v)
+ pt.Insert(decodeHex("0f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b6040518080602001828103825283818151815260200191508051"), v)
+ pt.Insert(decodeHex("0a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffff"), v)
+ pt.Insert(decodeHex("0473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffff"), v)
+ pt.Insert(decodeHex("04602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b6040518080602001828103825283"), v)
+ pt.Insert(decodeHex("04600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b604051808060200182810382528381815181526020"), v)
+ pt.Insert(decodeHex("0390f35b60408051918252519081900360200190f35b604051808060200182810382528381815181526020019150805190602001908083838290600060046020"), v)
+ pt.Insert(decodeHex("0382528381815181526020019150805190602001908083838290600060046020"), v)
+ pt.Insert(decodeHex("0380516001836020036101000a031916815260200191505b5092505050604051"), v)
+ pt.Insert(decodeHex("0360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f1509050"), v)
+ pt.Insert(decodeHex("0302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b6040518080602001828103825283818151815260200191"), v)
+ pt.Insert(decodeHex("02808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b6040518080"), v)
+ pt.Insert(decodeHex("02600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b604051808060200182810382528381815181526020019150"), v)
+ pt.Insert(decodeHex("01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b604051808060200182810382528381815181526020019150805190"), v)
+ pt.Insert(decodeHex("019250505060405180910390f35b60408051918252519081900360200190f35b6040518080602001828103825283818151815260200191508051906020019080"), v)
+ pt.Insert(decodeHex("01915080519060200190808383829060006004602084601f0104600302600f01"), v)
+ pt.Insert(decodeHex("01915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b6040805191825251908190"), v)
+ pt.Insert(decodeHex("0190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f1509050908101"), v)
+ pt.Insert(decodeHex("0190808383829060006004602084601f0104600302600f01f150905090810190"), v)
+ pt.Insert(decodeHex("0190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b60"), v)
+ pt.Insert(decodeHex("01836020036101000a031916815260200191505b509250505060405180910390"), v)
+ pt.Insert(decodeHex("0182810382528381815181526020019150805190602001908083838290600060"), v)
+ pt.Insert(decodeHex("0182565b60408051602060248035600481810135601f810185900485028601850190965285855261007795903594604494909392909201918190840183828082"), v)
+ pt.Insert(decodeHex("0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b6040518080602001828103825283818151815260"), v)
+ pt.Insert(decodeHex("010182565b60408051602060248035600481810135601f8101859004850286018501909652858552610077959035946044949093929092019181908401838280"), v)
+ pt.Insert(decodeHex("01000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffff"), v)
+ pt.Insert(decodeHex("006c600435600160a060020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff19168155600181018054848255908452"), v)
+ pt.Insert(decodeHex("006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b604051808060200182810382"), v)
+ pt.Insert(decodeHex("0060005060006001600050600086815260200190815260200160002060005060"), v)
+ pt.Insert(decodeHex("000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffff"), v)
+ mf2 := NewMatchFinder2(&pt)
+ data := decodeHex("606060405236156100615760e060020a60003504630a936fe5811461006357806315853113146100d857806339bfc4a11461010b5780636939cd971461014357" +
+ "8063c36948b5146101d3578063de6f24bb14610205578063fe1e3eca146102f5575b005b6040805160208181018352600080835233600160a060020a03168152" +
+ "60018252839020805484518184028101840190955280855261036094928301828280156100ce57602002820191906000526020600020905b8160005054815260" +
+ "200190600101908083116100b7575b5050505050905090565b6103aa600435602435600160205260008281526040902080548290811015610002575060009081" +
+ "52602090200154905081565b6103aa600435600160a060020a03331660009081526020818152604080832084845290915290205467ffffffffffffffff165b91" +
+ "9050565b6103bc60043560408051602081810183526000808352600160a060020a0333168152808252838120858252825283902060010180548451601f820184" +
+ "900484028101840190955280855292939290918301828280156101c757820191906000526020600020905b8154815290600101906020018083116101aa578290" +
+ "03601f168201915b5050505050905061013e565b61042a60043560243560006020818152928152604080822090935290815220805467ffffffffffffffff1690" +
+ "60010182565b60408051602060248035600481810135601f81018590048502860185019096528585526100619590359460449490939290920191819084018382" +
+ "808284375094955050505050506000600082604051808280519060200190808383829060006004602084601f0104600302600f01f15090910182900390912083" +
+ "5467ffffffffffffffff191642178455915160018054828255818652939550926020601f91909101047fb10e2d527612073b26eecdfd717e6a320cf44b4afac2" +
+ "b0732d9fcbe2b7fa0cf690810192821561052a579182015b8281111561052a5782518260005055916020019190600101906102d7565b610061600435600160a0" +
+ "60020a03331660009081526020818152604080832084845282528220805467ffffffffffffffff19168155600181018054848255908452828420919284926104" +
+ "9592601f01919091048101905b80821115610522576000815560010161034c565b60405180806020018281038252838181518152602001915080519060200190" +
+ "602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b60408051918252519081900360200190f35b604051" +
+ "80806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f16801561" +
+ "041c5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b604051808367ffffffffffffffff16815260200180" +
+ "602001828103825283818154815260200191508054801561048557820191906000526020600020905b8154815290600101906020018083116104685782900360" +
+ "1f168201915b5050935050505060405180910390f35b505050600090505b600160a060020a033316600090815260016020526040902054811015610526576040" +
+ "6000208054839190839081101561000257600091825260209091200154141561051a576001600050600033600160a060020a0316815260200190815260200160" +
+ "00206000508181548110156100025760009182526020822001555b60010161049d565b5090565b5050565b5061053692915061034c565b5050600160a060020a" +
+ "038416600090815260208181526040808320858452825282208354815467ffffffffffffffff191667ffffffffffffffff919091161781556001848101805491" +
+ "8301805483825581875295859020879694959194601f01919091048101929182156105d257600052602060002091601f016020900482015b828111156105d257" +
+ "82548255916001019190600101906105b7565b506105de92915061034c565b50505050600160a060020a03841660009081526001602052604090208054839190" +
+ "61000256")
+ matches := mf2.FindLongestMatches(data)
+ for _, m := range matches {
+ fmt.Printf("%+v, match: [%x]\n", m, data[m.Start:m.End])
+ }
+ if len(matches) != 201 {
+ t.Errorf("expected matches: %d, got %d", 201, len(matches))
+ }
+}
+
+func TestFindMatches8(t *testing.T) {
+ var pt PatriciaTree
+ v := []byte{1}
+ mf2 := NewMatchFinder2(&pt)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("73ffffffffffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("60009054906101000a900473ffffffffffffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("ffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("ffffffffffffffff16"), v)
+ pt.Insert(decodeHex("ffffffffffffff16"), v)
+ pt.Insert(decodeHex("01000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000"), v)
+ pt.Insert(decodeHex("81526020019081526020016000208190"), v)
+ pt.Insert(decodeHex("81526020019081526020016000206000"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff"), v)
+ pt.Insert(decodeHex("ffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16"), v)
+ pt.Insert(decodeHex("906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffff"), v)
+ pt.Insert(decodeHex("9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673"), v)
+ pt.Insert(decodeHex("900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffff"), v)
+ pt.Insert(decodeHex("73ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffff"), v)
+ pt.Insert(decodeHex("6101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffff"), v)
+ pt.Insert(decodeHex("54906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ff"), v)
+ pt.Insert(decodeHex("0a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffff"), v)
+ pt.Insert(decodeHex("0473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffff"), v)
+ pt.Insert(decodeHex("01000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffff"), v)
+ pt.Insert(decodeHex("000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffff"), v)
+ pt.Insert(decodeHex("815260016020526040"), v)
+ pt.Insert(decodeHex("526020019081526020016000208190"), v)
+ data := decodeHex("60606040523615610103576000357c01000000000000000000000000000000000000000000000000000000009004806311bc5478146103bc578063353d90ec14" +
+ "6103dd57806343743d93146103fe5780634faa2d541461041f57806350b44712146104405780635c0ecfad1461047d5780635d80c2781461049e578063602a6c" +
+ "a1146104bf578063806b984f146104d85780638b7bcc86146104f9578063a2fb11751461051a578063a457c2ae14610541578063a59d698614610562578063a5" +
+ "e01f371461056f578063f000c30914610590578063f18d20be146105b1578063f56f48f2146105be578063f6f0b074146105df578063f961ec87146106005761" +
+ "0103565b6103ba5b600060006801639e49bba16280003410151561016e57670e398811bec680003404915081503373ffffffffffffffffffffffffffffffffff" +
+ "ffffff166000670e398811bec680003406604051809050600060405180830381858888f1935050505050610281565b6791b77e5e5d9a0000341015156101d057" +
+ "670e92596fd62900003404915081503373ffffffffffffffffffffffffffffffffffffffff166000670e92596fd6290000340660405180905060006040518083" +
+ "0381858888f1935050505050610280565b674c53ecdc18a600003410151561023257670f43fc2c04ee00003404915081503373ffffffffffffffffffffffffff" +
+ "ffffffffffffff166000670f43fc2c04ee00003406604051809050600060405180830381858888f193505050505061027f565b670ff59ee833b3000034049150" +
+ "81503373ffffffffffffffffffffffffffffffffffffffff166000670ff59ee833b300003406604051809050600060405180830381858888f19350505050505b" +
+ "5b5b6007600050543334604051808481526020018373ffffffffffffffffffffffffffffffffffffffff166c0100000000000000000000000002815260140182" +
+ "8152602001935050505060405180910390206007600050819055506103e88260036000505401046103e86003600050540414151561031b574360066000508190" +
+ "5550600760005054600a6000508190555042600b600050819055505b600060010260016006600050540140141515610345576001600660005054014060096000" +
+ "50819055505b600190505b818160ff161115156103b5573360016000506000600360008181505480929190600101919050558152602001908152602001600020" +
+ "60006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908302179055505b600181019050805061034a565b5b5050565b005b6103c7" +
+ "600450610af4565b6040518082815260200191505060405180910390f35b6103e8600450610b5c565b6040518082815260200191505060405180910390f35b61" +
+ "0409600450610b00565b6040518082815260200191505060405180910390f35b61042a600450610a65565b6040518082815260200191505060405180910390f3" +
+ "5b610451600480359060200150610b09565b604051808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6104" +
+ "88600450610ad5565b6040518082815260200191505060405180910390f35b6104a9600450610ade565b6040518082815260200191505060405180910390f35b" +
+ "6104d6600480359060200180359060200150610627565b005b6104e3600450610aba565b6040518082815260200191505060405180910390f35b610504600450" +
+ "610b65565b6040518082815260200191505060405180910390f35b61052b600480359060200150610b41565b6040518082815260200191505060405180910390f" +
+ "35b61054c600450610ac3565b6040518082815260200191505060405180910390f35b61056d600450610838565b005b61057a600450610b6e565b604051808281" +
+ "5260200191505060405180910390f35b61059b600450610acc565b6040518082815260200191505060405180910390f35b6105bc60045061094e565b005b6105c" +
+ "9600450610aed565b6040518082815260200191505060405180910390f35b6105ea600450610ae7565b6040518082815260200191505060405180910390f35b61" +
+ "0611600480359060200150610a96565b6040518082815260200191505060405180910390f35b60006000600060009054906101000a900473fffffffffffffffff" +
+ "fffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561068757" +
+ "610832565b600860005054846040518082815260200191505060405180910390201415806106b857506000600102600860005054145b156106c257610832565b6" +
+ "103e860036000505410806106e457506103e860036000505403600560005054115b156106ee57610832565b83600a600050546040518083815260200182815260" +
+ "200192505050604051809103902091506103e86009600050548318600190040660056000505401905060016000506000828152602001908152602001600020600" +
+ "09054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166000670de0b6b3a7640000" +
+ "6103e802604051809050600060405180830381858888f19350505050507f909c57d5c6ac08245cf2a6de3900e2b868513fa59099b92b27d8db823d92df9c816040" +
+ "518082815260200191505060405180910390a180600260005060006004600081815054809291906001019190505581526020019081526020016000206000508190" +
+ "55506103e860056000828282505401925050819055506000600b60005081905550826008600050819055505b50505050565b600060006000600b60005054141561" +
+ "084f5761094a565b62015180600b60005054420310156108665761094a565b60006001026008600050819055506000915060056000505490505b60036000505481" +
+ "1015610916573373ffffffffffffffffffffffffffffffffffffffff166001600050600083815260200190815260200160002060009054906101000a900473ffff" +
+ "ffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16141561090857670de0b6b3a76400008201915081505b5b80" +
+ "80600101915050610881565b3373ffffffffffffffffffffffffffffffffffffffff16600083604051809050600060405180830381858888f19350505050505b50" +
+ "50565b6000600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ff" +
+ "ffffffffffffffffffffffffffffffffffffff161415156109ac57610a62565b670de0b6b3a7640000600560005054600360005054030290507f909c57d5c6ac08" +
+ "245cf2a6de3900e2b868513fa59099b92b27d8db823d92df9c813073ffffffffffffffffffffffffffffffffffffffff1631036040518082815260200191505060" +
+ "405180910390a13373ffffffffffffffffffffffffffffffffffffffff166000823073ffffffffffffffffffffffffffffffffffffffff16310360405180905060" +
+ "0060405180830381858888f19350505050505b50565b60006000600b600050541415610a83576105399050610a9356610a92565b600b6000505442039050610a93" +
+ "565b5b90565b6000816040518082815260200191505060405180910390209050610ab5565b919050565b60066000505481565b60076000505481565b6008600050" +
+ "5481565b60096000505481565b600a6000505481565b6103e881565b6201518081565b670de0b6b3a764000081565b600b6000505481565b600160005060205280" +
+ "600052604060002060009150909054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600260005060205280600052604060002060" +
+ "00915090505481565b60036000505481565b60046000505481565b6005600050548156")
+ matches := mf2.FindLongestMatches(data)
+ for _, m := range matches {
+ fmt.Printf("%+v, match: [%x]\n", m, data[m.Start:m.End])
+ }
+ if len(matches) != 144 {
+ t.Errorf("expected matches: %d, got %d", 144, len(matches))
+ }
+}
diff --git a/erigon-lib/patricia/testdata/fuzz/FuzzLongestMatch/3a5198b65396851670329467bf211856973858cf006ef30532d6871ea859a12a b/erigon-lib/patricia/testdata/fuzz/FuzzLongestMatch/3a5198b65396851670329467bf211856973858cf006ef30532d6871ea859a12a
new file mode 100644
index 00000000000..bcfb37e95ee
--- /dev/null
+++ b/erigon-lib/patricia/testdata/fuzz/FuzzLongestMatch/3a5198b65396851670329467bf211856973858cf006ef30532d6871ea859a12a
@@ -0,0 +1,3 @@
+go test fuzz v1
+[]byte("")
+[]byte("T\x05\xcd\x11")
diff --git a/erigon-lib/patricia/testdata/fuzz/FuzzLongestMatch/50e6d6e88241b5d113eeb578e3f53211f9d4c2605391a92b5314b1522ddd6613 b/erigon-lib/patricia/testdata/fuzz/FuzzLongestMatch/50e6d6e88241b5d113eeb578e3f53211f9d4c2605391a92b5314b1522ddd6613
new file mode 100644
index 00000000000..fb3b0189f33
--- /dev/null
+++ b/erigon-lib/patricia/testdata/fuzz/FuzzLongestMatch/50e6d6e88241b5d113eeb578e3f53211f9d4c2605391a92b5314b1522ddd6613
@@ -0,0 +1,3 @@
+go test fuzz v1
+[]byte("q}")
+[]byte("q}}}u\xa9\xa9\xa9")
diff --git a/erigon-lib/patricia/testdata/fuzz/FuzzLongestMatch/a6e7cfd5b704609ef4eae0891c8bd6f60cfbe3da1bf98f71ce0c3e107042154e b/erigon-lib/patricia/testdata/fuzz/FuzzLongestMatch/a6e7cfd5b704609ef4eae0891c8bd6f60cfbe3da1bf98f71ce0c3e107042154e
new file mode 100644
index 00000000000..0b21cf8c0bf
--- /dev/null
+++ b/erigon-lib/patricia/testdata/fuzz/FuzzLongestMatch/a6e7cfd5b704609ef4eae0891c8bd6f60cfbe3da1bf98f71ce0c3e107042154e
@@ -0,0 +1,3 @@
+go test fuzz v1
+[]byte("\xf9\x97")
+[]byte("\xf9\x97\x01\xa0")
diff --git a/erigon-lib/patricia/testdata/fuzz/FuzzLongestMatch/eae7318dcf13903566ac6ce58a3188dd26cc3216cdb8a4c398871feb71d79749 b/erigon-lib/patricia/testdata/fuzz/FuzzLongestMatch/eae7318dcf13903566ac6ce58a3188dd26cc3216cdb8a4c398871feb71d79749
new file mode 100644
index 00000000000..42ead0a2558
--- /dev/null
+++ b/erigon-lib/patricia/testdata/fuzz/FuzzLongestMatch/eae7318dcf13903566ac6ce58a3188dd26cc3216cdb8a4c398871feb71d79749
@@ -0,0 +1,3 @@
+go test fuzz v1
+[]byte("\xa8")
+[]byte("\xaa")
diff --git a/erigon-lib/patricia/testdata/fuzz/FuzzPatricia/1ac0f70817537550272339767003fa71f827da8ab9b1466b539a97b48b0bec89 b/erigon-lib/patricia/testdata/fuzz/FuzzPatricia/1ac0f70817537550272339767003fa71f827da8ab9b1466b539a97b48b0bec89
new file mode 100644
index 00000000000..e25022b4483
--- /dev/null
+++ b/erigon-lib/patricia/testdata/fuzz/FuzzPatricia/1ac0f70817537550272339767003fa71f827da8ab9b1466b539a97b48b0bec89
@@ -0,0 +1,3 @@
+go test fuzz v1
+[]byte("\x15\xff\x03\x03\x03\x1a\xed\xed\xed\xed")
+[]byte("\x15")
diff --git a/erigon-lib/patricia/testdata/fuzz/FuzzPatricia/77fc7eba78cd0b1fa2a157aa2cc7e164eed8ca2c71f13d4e103e5a76887a341b b/erigon-lib/patricia/testdata/fuzz/FuzzPatricia/77fc7eba78cd0b1fa2a157aa2cc7e164eed8ca2c71f13d4e103e5a76887a341b
new file mode 100644
index 00000000000..1460fd6e438
--- /dev/null
+++ b/erigon-lib/patricia/testdata/fuzz/FuzzPatricia/77fc7eba78cd0b1fa2a157aa2cc7e164eed8ca2c71f13d4e103e5a76887a341b
@@ -0,0 +1,3 @@
+go test fuzz v1
+[]byte("\xbb")
+[]byte("")
diff --git a/erigon-lib/patricia/testdata/fuzz/FuzzPatricia/82c51172146d16d565cd6de38398aba6284e6acc17a97edccb0be3a97624f967 b/erigon-lib/patricia/testdata/fuzz/FuzzPatricia/82c51172146d16d565cd6de38398aba6284e6acc17a97edccb0be3a97624f967
new file mode 100644
index 00000000000..1b53508c228
--- /dev/null
+++ b/erigon-lib/patricia/testdata/fuzz/FuzzPatricia/82c51172146d16d565cd6de38398aba6284e6acc17a97edccb0be3a97624f967
@@ -0,0 +1,3 @@
+go test fuzz v1
+[]byte("\xb0\\:'p\xb3")
+[]byte("\xb0\\:")
diff --git a/erigon-lib/pedersen_hash/LICENSE b/erigon-lib/pedersen_hash/LICENSE
new file mode 100644
index 00000000000..b037585c15f
--- /dev/null
+++ b/erigon-lib/pedersen_hash/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2020 StarkWare Industries Ltd.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/erigon-lib/pedersen_hash/README.md b/erigon-lib/pedersen_hash/README.md
new file mode 100644
index 00000000000..e33ed828652
--- /dev/null
+++ b/erigon-lib/pedersen_hash/README.md
@@ -0,0 +1,2 @@
+This code comes from StarkWare crypto-cpp library:
+https://github.com/starkware-libs/crypto-cpp/blob/master/src/starkware/crypto/pedersen_hash.h
diff --git a/erigon-lib/pedersen_hash/big_int.h b/erigon-lib/pedersen_hash/big_int.h
new file mode 100644
index 00000000000..d90226be25f
--- /dev/null
+++ b/erigon-lib/pedersen_hash/big_int.h
@@ -0,0 +1,140 @@
+#ifndef STARKWARE_ALGEBRA_BIG_INT_H_
+#define STARKWARE_ALGEBRA_BIG_INT_H_
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "gsl-lite.hpp"
+
+#include "error_handling.h"
+#include "prng.h"
+
+namespace starkware {
+
+static constexpr inline __uint128_t Umul128(uint64_t x, uint64_t y) {
+ return static_cast<__uint128_t>(x) * static_cast<__uint128_t>(y);
+}
+
+template
+class BigInt {
+ public:
+ static constexpr size_t kDigits = N * std::numeric_limits::digits;
+
+ BigInt() = default;
+
+ template
+ constexpr BigInt(const BigInt& v) noexcept; // NOLINT implicit cast.
+ constexpr explicit BigInt(const std::array& v) noexcept : value_(v) {}
+ constexpr explicit BigInt(uint64_t v) noexcept : value_(std::array({v})) {}
+
+ static constexpr BigInt One() { return BigInt(std::array({1})); }
+ static constexpr BigInt Zero() { return BigInt(std::array({0})); }
+
+ static BigInt RandomBigInt(Prng* prng);
+
+ /*
+ Returns pair of the form (result, overflow_occurred).
+ */
+ static constexpr std::pair Add(const BigInt& a, const BigInt& b);
+ constexpr BigInt operator+(const BigInt& other) const { return Add(*this, other).first; }
+ constexpr BigInt operator-(const BigInt& other) const { return Sub(*this, other).first; }
+ constexpr BigInt operator-() const { return Zero() - *this; }
+
+ /*
+ Multiplies two BigInt numbers, this and other. Returns the result as a
+ BigInt<2*N>.
+ */
+ constexpr BigInt<2 * N> operator*(const BigInt& other) const;
+
+ /*
+ Multiplies two BigInt numbers modulo a third.
+ */
+ static BigInt MulMod(const BigInt& a, const BigInt& b, const BigInt& modulus);
+
+ /*
+ Computes the inverse of *this in the field GF(prime).
+ If prime is not a prime number, the behavior is undefined.
+ */
+ BigInt InvModPrime(const BigInt& prime) const;
+
+ /*
+ Return pair of the form (result, underflow_occurred).
+ */
+ static constexpr std::pair Sub(const BigInt& a, const BigInt& b);
+
+ constexpr bool operator<(const BigInt& b) const;
+
+ constexpr bool operator>=(const BigInt& b) const { return !(*this < b); }
+
+ constexpr bool operator>(const BigInt& b) const { return b < *this; }
+
+ constexpr bool operator<=(const BigInt& b) const { return !(*this > b); }
+
+ /*
+ Returns the pair (q, r) such that this == q*divisor + r and r < divisor.
+ */
+ std::pair Div(const BigInt& divisor) const;
+
+ /*
+ Returns the representation of the number as a string of the form "0x...".
+ */
+ std::string ToString() const;
+
+ std::vector ToBoolVector() const;
+
+ /*
+ Returns (x % target) assuming x is in the range [0, 2*target).
+
+ The function assumes that target.NumLeadingZeros() > 0.
+
+ Typically used after a Montgomery reduction which produces an output that
+ satisfies the range requirement above.
+ */
+ static constexpr BigInt ReduceIfNeeded(const BigInt& x, const BigInt& target);
+
+ /*
+ Calculates x*y/2^256 mod modulus, assuming that montgomery_mprime is
+ (-(modulus^-1)) mod 2^64. Assumes that modulus.NumLeadingZeros() > 0.
+ */
+ static constexpr BigInt MontMul(
+ const BigInt& x, const BigInt& y, const BigInt& modulus, uint64_t montgomery_mprime);
+
+ constexpr bool operator==(const BigInt& other) const;
+
+ constexpr bool operator!=(const BigInt& other) const { return !(*this == other); }
+
+ constexpr uint64_t& operator[](int i) { return gsl::at(value_, i); }
+
+ constexpr const uint64_t& operator[](int i) const { return gsl::at(value_, i); }
+
+ static constexpr size_t LimbCount() { return N; }
+
+ /*
+ Returns the number of leading zero's.
+ */
+ constexpr size_t NumLeadingZeros() const;
+
+ private:
+ std::array value_;
+};
+
+template
+std::ostream& operator<<(std::ostream& os, const BigInt& bigint);
+
+} // namespace starkware
+
+/*
+ Implements the user defined _Z literal that constructs a BigInt of an
+ arbitrary size. For example: BigInt<4> a =
+ 0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001_Z;
+*/
+template
+static constexpr auto operator"" _Z();
+
+#include "big_int.inl"
+
+#endif // STARKWARE_ALGEBRA_BIG_INT_H_
diff --git a/erigon-lib/pedersen_hash/big_int.inl b/erigon-lib/pedersen_hash/big_int.inl
new file mode 100644
index 00000000000..3c8607945dd
--- /dev/null
+++ b/erigon-lib/pedersen_hash/big_int.inl
@@ -0,0 +1,284 @@
+#include
+#include
+#include
+#include
+#include
+
+#include "math.h"
+
+namespace starkware {
+
+template
+BigInt BigInt::RandomBigInt(Prng* prng) {
+ std::array value{};
+ for (size_t i = 0; i < N; ++i) {
+ gsl::at(value, i) = prng->RandomUint64();
+ }
+ return BigInt(value);
+}
+
+template
+template
+constexpr BigInt::BigInt(const BigInt& v) noexcept : value_{} {
+ static_assert(N > K, "trimming is not supported");
+ for (size_t i = 0; i < K; ++i) {
+ gsl::at(value_, i) = v[i];
+ }
+
+ for (size_t i = K; i < N; ++i) {
+ gsl::at(value_, i) = 0;
+ }
+}
+
+template
+constexpr std::pair, bool> BigInt::Add(const BigInt& a, const BigInt& b) {
+ bool carry{};
+ BigInt r{0};
+
+ for (size_t i = 0; i < N; ++i) {
+ __uint128_t res = static_cast<__uint128_t>(a[i]) + b[i] + carry;
+ carry = (res >> 64) != static_cast<__uint128_t>(0);
+ r[i] = static_cast(res);
+ }
+
+ return std::make_pair(r, carry);
+}
+
+template
+constexpr BigInt<2 * N> BigInt::operator*(const BigInt& other) const {
+ constexpr auto kResSize = 2 * N;
+ BigInt final_res = BigInt::Zero();
+ // Multiply this by other using long multiplication algorithm.
+ for (size_t i = 0; i < N; ++i) {
+ uint64_t carry = static_cast(0U);
+ for (size_t j = 0; j < N; ++j) {
+ // For M == UINT64_MAX, we have: a*b+c+d <= M*M + 2M = (M+1)^2 - 1 ==
+ // UINT128_MAX. So we can do a multiplication and an addition without an
+ // overflow.
+ __uint128_t res = Umul128((*this)[j], other[i]) + final_res[i + j] + carry;
+ carry = gsl::narrow_cast(res >> 64);
+ final_res[i + j] = gsl::narrow_cast(res);
+ }
+ final_res[i + N] = static_cast(carry);
+ }
+ return final_res;
+}
+
+template
+BigInt BigInt::MulMod(const BigInt& a, const BigInt& b, const BigInt& modulus) {
+ const BigInt<2 * N> mul_res = a * b;
+ const BigInt<2 * N> mul_res_mod = mul_res.Div(BigInt<2 * N>(modulus)).second;
+
+ BigInt res = Zero();
+
+ // Trim mul_res_mod to the N lower limbs (this is possible since it must be smaller than modulus).
+ for (size_t i = 0; i < N; ++i) {
+ res[i] = mul_res_mod[i];
+ }
+
+ return res;
+}
+
+template
+BigInt BigInt::InvModPrime(const BigInt& prime) const {
+ ASSERT(*this != BigInt::Zero(), "Inverse of 0 is not defined.");
+ return GenericPow(
+ *this, (prime - BigInt(2)).ToBoolVector(), BigInt::One(),
+ [&prime](const BigInt& multiplier, BigInt* dst) { *dst = MulMod(*dst, multiplier, prime); });
+}
+
+template
+constexpr std::pair, bool> BigInt::Sub(const BigInt& a, const BigInt& b) {
+ bool carry{};
+ BigInt r{};
+
+ for (size_t i = 0; i < N; ++i) {
+ __uint128_t res = static_cast<__uint128_t>(a[i]) - b[i] - carry;
+ carry = (res >> 127) != static_cast<__uint128_t>(0);
+ r[i] = static_cast(res);
+ }
+
+ return std::make_pair(r, carry);
+}
+
+template
+constexpr bool BigInt::operator<(const BigInt& b) const {
+ return Sub(*this, b).second;
+}
+
+template
+std::pair, BigInt> BigInt::Div(const BigInt& divisor) const {
+ // This is a simple long-division implementation. It is not very efficient and can be improved
+ // if this function becomes a bottleneck.
+ ASSERT(divisor != BigInt::Zero(), "Divisor must not be zero.");
+
+ bool carry{};
+ BigInt res{};
+ BigInt shifted_divisor{}, tmp{};
+ BigInt a = *this;
+
+ while (a >= divisor) {
+ tmp = divisor;
+ int shift = -1;
+ do {
+ shifted_divisor = tmp;
+ shift++;
+ std::tie(tmp, carry) = Add(shifted_divisor, shifted_divisor);
+ } while (!carry && tmp <= a);
+
+ a = Sub(a, shifted_divisor).first;
+ res[shift / 64] |= Pow2(shift % 64);
+ }
+
+ return {res, a};
+}
+
+template
+std::string BigInt::ToString() const {
+ std::ostringstream res;
+ res << "0x";
+ for (int i = N - 1; i >= 0; --i) {
+ res << std::setfill('0') << std::setw(16) << std::hex << (*this)[i];
+ }
+ return res.str();
+}
+
+template
+std::vector BigInt::ToBoolVector() const {
+ std::vector res;
+ for (uint64_t value : value_) {
+ for (int i = 0; i < std::numeric_limits::digits; ++i) {
+ res.push_back((value & 1) != 0);
+ value >>= 1;
+ }
+ }
+ return res;
+}
+
+template
+constexpr bool BigInt::operator==(const BigInt& other) const {
+ for (size_t i = 0; i < N; ++i) {
+ if (gsl::at(value_, i) != gsl::at(other.value_, i)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+template
+constexpr BigInt BigInt::ReduceIfNeeded(const BigInt& x, const BigInt& target) {
+ ASSERT(target.NumLeadingZeros() > 0, "target must have at least one leading zero.");
+ return (x >= target) ? x - target : x;
+}
+
+template
+constexpr BigInt BigInt::MontMul(
+ const BigInt& x, const BigInt& y, const BigInt& modulus, uint64_t montgomery_mprime) {
+ BigInt res{};
+ ASSERT(modulus.NumLeadingZeros() > 0, "We require at least one leading zero in the modulus");
+ ASSERT(y < modulus, "y is supposed to be smaller then the modulus");
+ ASSERT(x < modulus, "x is supposed to be smaller then the modulus.");
+ for (size_t i = 0; i < N; ++i) {
+ __uint128_t temp = Umul128(x[i], y[0]) + res[0];
+ uint64_t u_i = gsl::narrow_cast(temp) * montgomery_mprime;
+ uint64_t carry1 = 0, carry2 = 0;
+
+ for (size_t j = 0; j < N; ++j) {
+ if (j != 0) {
+ temp = Umul128(x[i], y[j]) + res[j];
+ }
+ uint64_t low = carry1 + gsl::narrow_cast(temp);
+ carry1 = gsl::narrow_cast(temp >> 64) + static_cast(low < carry1);
+ temp = Umul128(modulus[j], u_i) + carry2;
+ res[j] = low + gsl::narrow_cast(temp);
+ carry2 = gsl::narrow_cast(temp >> 64) + static_cast(res[j] < low);
+ }
+ for (size_t j = 0; j < N - 1; ++j) {
+ res[j] = res[j + 1];
+ }
+ res[N - 1] = carry1 + carry2;
+ ASSERT(res[N - 1] >= carry1, "There shouldn't be a carry here.");
+ }
+ return ReduceIfNeeded(res, modulus);
+}
+
+template
+constexpr size_t BigInt::NumLeadingZeros() const {
+ int i = value_.size() - 1;
+ size_t res = 0;
+
+ while (i >= 0 && (gsl::at(value_, i) == 0)) {
+ i--;
+ res += std::numeric_limits::digits;
+ }
+
+ if (i >= 0) {
+ res += __builtin_clzll(gsl::at(value_, i));
+ }
+
+ return res;
+}
+
+template
+std::ostream& operator<<(std::ostream& os, const BigInt& bigint) {
+ return os << bigint.ToString();
+}
+
+namespace bigint {
+namespace details {
+/*
+ Converts an hex digit ASCII char to the corresponding int.
+ Assumes the input is an hex digit.
+*/
+inline constexpr uint64_t HexCharToUint64(char c) {
+ if ('0' <= c && c <= '9') {
+ return c - '0';
+ }
+
+ if ('A' <= c && c <= 'F') {
+ return c - 'A' + 10;
+ }
+
+ // The function assumes that the input is an hex digit, so we can assume 'a'
+ // <= c && c <= 'f' here.
+ return c - 'a' + 10;
+}
+
+template
+constexpr auto HexCharArrayToBigInt() {
+ constexpr size_t kLen = sizeof...(Chars);
+ constexpr std::array kDigits{Chars...};
+ static_assert(kDigits[0] == '0' && kDigits[1] == 'x', "Only hex input is currently supported");
+
+ constexpr size_t kNibblesPerUint64 = 2 * sizeof(uint64_t);
+ constexpr size_t kResLen = (kLen - 2 + kNibblesPerUint64 - 1) / (kNibblesPerUint64);
+ std::array res{};
+
+ for (size_t i = 0; i < kDigits.size() - 2; ++i) {
+ const size_t limb = i / kNibblesPerUint64;
+ const size_t nibble_offset = i % kNibblesPerUint64;
+ const uint64_t nibble = HexCharToUint64(gsl::at(kDigits, kDigits.size() - i - 1));
+
+ gsl::at(res, limb) |= nibble << (4 * nibble_offset);
+ }
+
+ return BigInt(res);
+}
+} // namespace details
+} // namespace bigint
+
+template
+static constexpr auto operator"" _Z() {
+ // This function is implemented as wrapper that calls the actual
+ // implementation and stores it in a constexpr variable as we want to force
+ // the evaluation to be done in compile time. We need to have the function
+ // call because "constexpr auto kRes = BigInt(res);" won't work
+ // unless res is constexpr.
+
+ // Note that the compiler allows HEX and decimal literals but in any case
+ // it enforces that Chars... contains only HEX (or decimal) characters.
+ constexpr auto kRes = bigint::details::HexCharArrayToBigInt();
+ return kRes;
+}
+
+} // namespace starkware
diff --git a/erigon-lib/pedersen_hash/elliptic_curve.h b/erigon-lib/pedersen_hash/elliptic_curve.h
new file mode 100644
index 00000000000..ea9adc0300b
--- /dev/null
+++ b/erigon-lib/pedersen_hash/elliptic_curve.h
@@ -0,0 +1,79 @@
+#ifndef STARKWARE_ALGEBRA_ELLIPTIC_CURVE_H_
+#define STARKWARE_ALGEBRA_ELLIPTIC_CURVE_H_
+
+#include
+#include
+#include
+#include
+
+#include "gsl-lite.hpp"
+
+#include "big_int.h"
+
+namespace starkware {
+
+using std::size_t;
+
+/*
+ Represents a point on an elliptic curve of the form: y^2 = x^3 + alpha*x + beta.
+*/
+template
+class EcPoint {
+ public:
+ constexpr EcPoint(const FieldElementT& x, const FieldElementT& y) : x(x), y(y) {}
+
+ bool operator==(const EcPoint& rhs) const { return x == rhs.x && y == rhs.y; }
+ bool operator!=(const EcPoint& rhs) const { return !(*this == rhs); }
+
+ /*
+ Computes the point added to itself.
+ */
+ EcPoint Double(const FieldElementT& alpha) const;
+
+ /*
+ Returns the sum of two points. The added point must be different than both the original point
+ and its negation.
+ */
+ EcPoint operator+(const EcPoint& rhs) const;
+ EcPoint operator-() const { return EcPoint(x, -y); }
+ EcPoint operator-(const EcPoint& rhs) const { return (*this) + (-rhs); }
+
+ /*
+ Returns a random point on the curve: y^2 = x^3 + alpha*x + beta.
+ */
+ static EcPoint Random(const FieldElementT& alpha, const FieldElementT& beta, Prng* prng);
+
+ /*
+ Returns one of the two points with the given x coordinate or nullopt if there is no such point.
+ */
+ static std::optional GetPointFromX(
+ const FieldElementT& x, const FieldElementT& alpha, const FieldElementT& beta);
+
+ template
+ EcPoint ConvertTo() const;
+
+ /*
+ Given the bool vector representing a scalar, and the alpha of the elliptic curve
+ "y^2 = x^3 + alpha * x + beta" the point is on, returns scalar*point.
+ */
+ template
+ EcPoint MultiplyByScalar(
+ const BigInt& scalar, const FieldElementT& alpha) const;
+
+ FieldElementT x;
+ FieldElementT y;
+
+ private:
+ /*
+ Returns the sum of this point with a point in the form of std::optional, where std::nullopt
+ represents the curve's zero element.
+ */
+ std::optional> AddOptionalPoint(
+ const std::optional>& point, const FieldElementT& alpha) const;
+};
+
+} // namespace starkware
+
+#include "elliptic_curve.inl"
+
+#endif // STARKWARE_ALGEBRA_ELLIPTIC_CURVE_H_
diff --git a/erigon-lib/pedersen_hash/elliptic_curve.inl b/erigon-lib/pedersen_hash/elliptic_curve.inl
new file mode 100644
index 00000000000..895a1905459
--- /dev/null
+++ b/erigon-lib/pedersen_hash/elliptic_curve.inl
@@ -0,0 +1,129 @@
+#include "error_handling.h"
+
+namespace starkware {
+
+template
+auto EcPoint::Double(const FieldElementT& alpha) const -> EcPoint {
+ // Doubling a point cannot be done by adding the point to itself with the function AddPoints
+ // because this function assumes that it gets distinct points. Usually, in order to sum two
+ // points, one should draw a straight line containing these points, find the third point in the
+ // intersection of the line and the curve, and then negate the y coordinate. In the special case
+ // where the two points are the same point, one should draw the line that intersects the elliptic
+ // curve "twice" at that point. This means that the slope of the line should be equal to the slope
+ // of the curve at this point. That is, the derivative of the function
+ // y = sqrt(x^3 + alpha * x + beta), which is slope = dy/dx = (3 * x^2 + alpha)/(2 * y). Note that
+ // if y = 0 then the point is a 2-torsion (doubling it gives infinity). The line is then given by
+ // y = slope * x + y_intercept. The third intersection point is found using the equation that is
+ // true for all cases: slope^2 = x_1 + x_2 + x_3 (where x_1, x_2 and x_3 are the x coordinates of
+ // three points in the intersection of the curve with a line).
+ ASSERT(y != FieldElementT::Zero(), "Tangent slope of 2 torsion point is infinite.");
+ const auto x_squared = x * x;
+ const FieldElementT tangent_slope = (x_squared + x_squared + x_squared + alpha) / (y + y);
+ const FieldElementT x2 = tangent_slope * tangent_slope - (x + x);
+ const FieldElementT y2 = tangent_slope * (x - x2) - y;
+ return {x2, y2};
+}
+
+template
+auto EcPoint::operator+(const EcPoint& rhs) const -> EcPoint {
+ ASSERT(this->x != rhs.x, "x values should be different for arbitrary points");
+ // To sum two points, one should draw a straight line containing these points, find the
+ // third point in the intersection of the line and the curve, and then negate the y coordinate.
+ // Notice that if x_1 = x_2 then either they are the same point or their sum is infinity. This
+ // function doesn't deal with these cases. The straight line is given by the equation:
+ // y = slope * x + y_intercept. The x coordinate of the third point is found by solving the system
+ // of equations:
+
+ // y = slope * x + y_intercept
+ // y^2 = x^3 + alpha * x + beta
+
+ // These equations yield:
+ // (slope * x + y_intercept)^2 = x^3 + alpha * x + beta
+ // ==> x^3 - slope^2 * x^2 + (alpha - 2 * slope * y_intercept) * x + (beta - y_intercept^2) = 0
+
+ // This is a monic polynomial in x whose roots are exactly the x coordinates of the three
+ // intersection points of the line with the curve. Thus it is equal to the polynomial:
+ // (x - x_1) * (x - x_2) * (x - x_3)
+ // where x1, x2, x3 are the x coordinates of those points.
+ // Notice that the equality of the coefficient of the x^2 term yields:
+ // slope^2 = x_1 + x_2 + x_3.
+ const FieldElementT slope = (this->y - rhs.y) / (this->x - rhs.x);
+ const FieldElementT x3 = slope * slope - this->x - rhs.x;
+ const FieldElementT y3 = slope * (this->x - x3) - this->y;
+ return {x3, y3};
+}
+
+template
+auto EcPoint::GetPointFromX(
+ const FieldElementT& x, const FieldElementT& alpha, const FieldElementT& beta)
+ -> std::optional {
+ const FieldElementT y_squared = x * x * x + alpha * x + beta;
+ if (!y_squared.IsSquare()) {
+ return std::nullopt;
+ }
+ return {{x, y_squared.Sqrt()}};
+}
+
+template
+auto EcPoint::Random(
+ const FieldElementT& alpha, const FieldElementT& beta, Prng* prng) -> EcPoint {
+ // Each iteration has probability of ~1/2 to fail. Thus the probability of failing 100 iterations
+ // is negligible.
+ for (size_t i = 0; i < 100; ++i) {
+ const FieldElementT x = FieldElementT::RandomElement(prng);
+ const std::optional pt = GetPointFromX(x, alpha, beta);
+ if (pt.has_value()) {
+ // Change the sign of the returned y coordinate with probability 1/2.
+ if (prng->RandomUint64(0, 1) == 1) {
+ return -*pt;
+ }
+ return *pt;
+ }
+ }
+ ASSERT(false, "No random point found.");
+}
+
+template
+template
+EcPoint EcPoint::ConvertTo() const {
+ return EcPoint(OtherFieldElementT(x), OtherFieldElementT(y));
+}
+
+template
+template
+EcPoint EcPoint::MultiplyByScalar(
+ const BigInt& scalar, const FieldElementT& alpha) const {
+ std::optional> res;
+ EcPoint power = *this;
+ for (const auto& b : scalar.ToBoolVector()) {
+ if (b) {
+ res = power.AddOptionalPoint(res, alpha);
+ }
+ // If power == -power, then power + power == zero, and will remain zero (so res will not
+ // change) until the end of the for loop. Therefore there is no point to keep looping.
+ if (power == -power) {
+ break;
+ }
+ power = power.Double(alpha);
+ }
+ ASSERT(res.has_value(), "Result of multiplication is the curve's zero element.");
+ return *res;
+}
+
+template
+std::optional> EcPoint::AddOptionalPoint(
+ const std::optional>& point, const FieldElementT& alpha) const {
+ if (!point) {
+ return *this;
+ }
+ // If a == -b, then a+b == zero element.
+ if (*point == -*this) {
+ return std::nullopt;
+ }
+ if (*point == *this) {
+ return point->Double(alpha);
+ }
+ return *point + *this;
+}
+
+} // namespace starkware
diff --git a/erigon-lib/pedersen_hash/elliptic_curve_constants.cc b/erigon-lib/pedersen_hash/elliptic_curve_constants.cc
new file mode 100644
index 00000000000..5f88af74d34
--- /dev/null
+++ b/erigon-lib/pedersen_hash/elliptic_curve_constants.cc
@@ -0,0 +1,2546 @@
+#include "elliptic_curve_constants.h"
+
+#include "big_int.h"
+#include "prime_field_element.h"
+
+namespace starkware {
+
+const EllipticCurveConstants& GetEcConstants() {
+ static auto* prime_field_ec0 = new EllipticCurveConstants(
+ // k_alpha
+ 0x1_Z,
+ // k_beta
+ 0x6f21413efbe40de150e596d72f7a8c5609ad26c15c915c1f4cdfcb99cee9e89_Z,
+ // k_order
+ 0x800000000000010ffffffffffffffffb781126dcae7b2321e66a241adc64d2f_Z,
+ // k_points
+ {
+ {0x49ee3eba8c1600700ee1b87eb599f16716b0b1022947733551fde4050ca6804_Z,
+ 0x3ca0cfe4b3bc6ddf346d49d06ea0ed34e621062c0e056c1d0405d266e10268a_Z},
+ {0x1ef15c18599971b7beced415a40f0c7deacfd9b0d1819e03d723d8bc943cfca_Z,
+ 0x5668060aa49730b7be4801df46ec62de53ecd11abe43a32873000c36e8dc1f_Z},
+ {0x234287dcbaffe7f969c748655fca9e58fa8120b6d56eb0c1080d17957ebe47b_Z,
+ 0x3b056f100f96fb21e889527d41f4e39940135dd7a6c94cc6ed0268ee89e5615_Z},
+ {0x3909690e1123c80678a7ba0fde0e8447f6f02b3f6b960034d1e93524f8b476_Z,
+ 0x7122e9063d239d89d4e336753845b76f2b33ca0d7f0c1acd4b9fe974994cc19_Z},
+ {0x40fd002e38ea01a01b2702eb7c643e9decc2894cbf31765922e281939ab542c_Z,
+ 0x109f720a79e2a41471f054ca885efd90c8cfbbec37991d1b6343991e0a3e740_Z},
+ {0x2f52066635c139fc2f64eb0bd5e3fd7a705f576854ec4f00aa60361fddb981b_Z,
+ 0x6d78a24d8a5f97fc600318ce16b3c840315979c3273078ec1a285f217ee6a26_Z},
+ {0x6a0767a1fd60d5b9027a35af1b68e57a1c366ebcde2006cdd07af27043ef674_Z,
+ 0x606b72c0ca0498b8c1817ed7922d550894c324f5efdfc85a19a1ae382411ca2_Z},
+ {0x7fa463ee2a2d6a585d5c3358918270f6c28c66df1f86803374d1edf3819cc62_Z,
+ 0xa996edf01598832e644e1cae9a37288865ad80e2787f9bf958aceccc99afae_Z},
+ {0x3d4da70d1540da597dbae1651d28487604a4e66a4a1823b97e8e9639393dbec_Z,
+ 0x45cdef70c35d3b6f0a2273a9886ccb6306d813e8204bdfd30b4efee63c8a3f9_Z},
+ {0x1e448fdbcd9896c6fbf5f36cb7e7fcb77a751ff2d942593cae023363cc7750e_Z,
+ 0x30c81da0f3a8cb64468eaa491c7ae7b4842b62cb4148820da211afc4caffb3a_Z},
+ {0x6531acf1a7cb90a4eb27de0b7f915e387a3b0fd063ba6e1289b91f48411be26_Z,
+ 0x31330f5daa091889981a3ea782ae997f5f171336ed0487a03f051551a2cafa2_Z},
+ {0x54be016394d5662d67d7e82f5e889ed2f97ccf95d911f57dd2362c4040ed4f4_Z,
+ 0xc6cb184053f054d6a59c1bf0986d17090d25089b3fdcdaf185edc87ef113e5_Z},
+ {0x35b9ecd0499ca1d5d42dcbb0c6b4042b3733c64b607ca711e706e786ef2afc6_Z,
+ 0x5624b476a5b21c3a544f0712d4817b06ad380a5a6529d323bf64da8ef862d8d_Z},
+ {0x4ce0378e3ee8f77ed58f2ddbd8bb7676c8a38bfb1d3694c275254bd8ca38e23_Z,
+ 0x5a16fcbff0769c9cf2b02c31621878ec819fff4b8231bff82c6183db2746820_Z},
+ {0x648d5c6f98680a1b926bfeb01c00224c56fdcf751b251c4449c8a94f425cfcf_Z,
+ 0x72c05ac793cd1620a833fbe2214d36900ebe446e095c62fcb740937f98cca8c_Z},
+ {0xbd09be3e4e1af8a14189977e334f097c18e4a8bf42577ef5aafa0f807bd89b_Z,
+ 0x6e0e72ed7eb65c86cee29c411fb4761122558ee81013344ba8509c49de9f9b6_Z},
+ {0x35ea4e339b44ae7724419bdfbe07022253137a4afb7cbaffad341ea61249357_Z,
+ 0x3665d676a026a174f367bb4417780e53a7803cb02d0db32eb4545c267c42f14_Z},
+ {0x36457bc744f42e697b825c2d1afd8f4029d696a4514710f81da52d88e178643_Z,
+ 0x7c93715896735492a68c7969a024b3a8fd538bffc1521538107de1a5f13ce9c_Z},
+ {0x5b3a08ebcf9c109cc9082f70d9df2b9c11b5428ee23917b4e790c4c10f6e661_Z,
+ 0x9d7b42ab0c20f5510df7ea5e196eec99342739077e9a168198c89da859753_Z},
+ {0x21883ef8580fc06e59481955d52ece3aca6e82c8c9fc58e216dcf46f96990c6_Z,
+ 0x51a6423543e6e8a43e71da34cd90f5b520b8d33b67c4bf857573ab9e301aa4c_Z},
+ {0x19e86b77f9b581e81092b305c852faf53940a8f15f0a6990c414f04c0fa7ef9_Z,
+ 0x515630e35d4398c9c79fc4ee08e1023fa47d8e03c6e7819c6d2ccef45398fa_Z},
+ {0x888ab8eb4c31bb2ac5b54aa320dbe1a69c96b864e8a5f54d89c1d1a6b86c24_Z,
+ 0x730e148467f6a55ce22c5296f5380df88f38de76ef0b2de844cd3094aaaf3ea_Z},
+ {0x75e79ff13a894e7120dac17b7429c0c32ce7828f726c9973728c0977a5f5977_Z,
+ 0x4960526e59c1c736561a201bc56f7d762641b39f609d273cc996f5d9197cfb8_Z},
+ {0x640fe009249115d7254f72ecafb3006139e4bed7e9041af51458c737282d1d5_Z,
+ 0x3cc6c978a575246e2ce4f7ef1fcc7f63085db9ff98a1b1f3fe374087c0332c_Z},
+ {0x6d6fd09ccab7c26de9b3906191235deb5c34685580c488275356a05e209ca96_Z,
+ 0x7157f81a34213dd8f91dea4f6df1bcfabc4ee091a3049eeeb3b7923d39b8645_Z},
+ {0x5531ca1d00f151d71da820918f74caf2985b24dca20e124721fff507b5a5876_Z,
+ 0x518529643d3f25e47f72c322223ba60a63d6bfe78cf3f612215d9c19bf29200_Z},
+ {0x6192d454e4f8fe212bdfccd5b15dd5056d7622ffe456c6c67e5a7265aea49c4_Z,
+ 0x2377a45dc630017ae863cb968ddb38333a70c7946d8684e6d7a6213f634b7bc_Z},
+ {0x542fb44b4ef3640a64fdb22a2560fb26668065c069cf31d1df424819a39ff18_Z,
+ 0x5dbae9b0948e0361aea443503840341c322aa1a1366ce5390e71bf161f78f8c_Z},
+ {0x299ff3e3412a7eb4cb4a3051b07b1be2e7b1c4b789f39ffb52cba3d048b71de_Z,
+ 0x1951d3175c02761b291d86b6c0a08387ad5e2a2130ccc33c852530572cb3958_Z},
+ {0x628ce3f5367dadc1411133e55eb25e2e3c2880d6e28754a5cb1c5d109627e73_Z,
+ 0xae3e9b7d50964e28bd15380400b7659b87affdef5d2586cbefcd9be7d67c0d_Z},
+ {0x6ea54aff064895eccf9db2283225d62044ae67621192b3346338948382f5933_Z,
+ 0x6431507e51aadacfaf39f102a8ff387756e9b5e1bc8323d44acae55130d93db_Z},
+ {0x28097d50d175a6235320fe8cfe138dd9e46895d189582e472c38ad7a67d923a_Z,
+ 0x7f9eab4133d7d09a7ff63368d6135c26262b62336eca1b5ca33f2096ce388ba_Z},
+ {0x619fd09cdd6ff4323973f256c2cbdcb224f7f25b8aef623af2d4a0105e62e02_Z,
+ 0x2c95f0ae11d47eeae1bc7f1350f75f9185c5bc840382ceb38a797cae9c40308_Z},
+ {0x641c18982ced304512a3f2395942a38add0d6a7156229c2a7c8b8dfbe9beb96_Z,
+ 0x6f6288c9c659b6af5ac975f4180deffe53d516399b2cc62f31732e9d4ba9837_Z},
+ {0x58ab546e51fe49fc5a382e4064a2bd6cfc268904412f86c26de14f28a71d0f2_Z,
+ 0x124b7217943e7e328408e8afdfa7da00dcbc94a2bb85fd8e01fb162d2c2c0a9_Z},
+ {0xa82c2fdedbb26c3c762a12f7e86b0e01e65320e0a25a8399d665f6e266bf74_Z,
+ 0x1a1de28e253f3e10f44d0111e8074f882d7f42e5900780ccbdc31da372d3fd8_Z},
+ {0x744c725a7455a992e3cf5bd007bc234dd4668dba285f553f38350ad94c1615b_Z,
+ 0x7f721a87f48798bdc4a9c0eb88559e2ad7a74112fd901e70ea159e67a9c33f_Z},
+ {0x434df142ddaa60f7881b6348d91687de40457de7ccfb07f0304b9e820705d0c_Z,
+ 0x7fae425e3b53f97dd1f5b20e49ed9fe24ff1efc341ba5e017ac89cf8df0cc39_Z},
+ {0x7a1e2b809dff46277021cbc376f79c37e1b683bbd6bca5317014f0dc0e1ae73_Z,
+ 0x56790278a231912c334eff05281e08af1558e85516b4411ef64647c13bea431_Z},
+ {0x4931b7990348d41cf8907be79f45bb7991fd18f8a57868351c92fa7a34cbcd7_Z,
+ 0xca35091815cdf0837d396e25aad6052ad32d497a33b123256cffdc008bc50e_Z},
+ {0x250b815d352fd89f8210b624b147ea7d0a4f47bcac49f3ac9b777840da93ebe_Z,
+ 0x1173f10e9691948b7da7632f328520455aadcba46e017f891e0a1d7da2bef04_Z},
+ {0x2223b85032fa67292f6e1f822628e6756e5c3cc08fc252ab88d63d624e4dfb2_Z,
+ 0x55619ba96a7dcec77832fcb22cd5c21c7dcebc0280d730cba0002b67e0a8c63_Z},
+ {0x249b131e04de73af9820d3e22492d9ec51bdc0c4c4f34d95352fa44dd61f245_Z,
+ 0x7576d3b5d136368ff01170a77d8286d0d1c7c40688862fb40813b4af3c6065e_Z},
+ {0x6777915d9b4769027eb7e04733f8a2d669c84fe06080f55e8a55674dfbf9efb_Z,
+ 0x640d0ff384c9635e1af364760f104e058e3c86209fa9d2320aeac887b2e02d8_Z},
+ {0x2abe3f237681052f002414399111cf07f8421535af41251edc427a36b5b19c9_Z,
+ 0x636ce4deaf468a503ab20ccb2f7e5bdc98551656ebf53e9c7786b11dd9090be_Z},
+ {0x4d5cc5414758ea1be55be779bd7da296c7e11f1564d9e8797ceea347c16f8ea_Z,
+ 0x1a680c4c410cf5ddc74e95ff2897c193edaaecce5b2cde4e96bbae5c0054eff_Z},
+ {0x46c375c684b30adf4d51de81e92afee52b1a3847e177403372c82109373edca_Z,
+ 0x1eaadc5783c90a0261306423d52009e991126b3f620e9cb6cffca41ca096f4f_Z},
+ {0x2ddfb71f51205888118cbabba8fd07d460a810289bfdeeb7118707e310cb152_Z,
+ 0x1fd905d07b3933be886f2518246bdafa6f33259a174668808223cd7c28183c7_Z},
+ {0x386f3879960713d41fdb3b1e41bbebf26b1c0e27a9a75bb1adcc1a0d3e8547b_Z,
+ 0x2b21498c0f34ec6f17c720334dc0f36021c2f87afbbbc8847d0bd536eb265e5_Z},
+ {0x407eae62c6c4de3b942195afec3f45efec71ddb5e6edee3d427631bcdbf9b90_Z,
+ 0x436e7f2d78268ef62c4172d2ff1469028bad1f1d0f97ab007064418e61caa8f_Z},
+ {0x1b881175e21201d17e095e9b3966b354f47de8c1acee5177f5909e0fd72328f_Z,
+ 0x69954b1a9b8bfccf8ec384d32924518a935758f3d3662ef754bcc88f1f6f3ec_Z},
+ {0x7d545a82bff003b8115be32a0c437f7c0a98f776bcf7fddb0392822844f3c5e_Z,
+ 0x34b6e53a9565a7daa010711f5bf72254a4e61da3e6a562210a9abc9e8b66d69_Z},
+ {0x299b9fcd4fadfc4b6141457a3036aaa68501c23df579de26df69d4def89b913_Z,
+ 0xb95bf2c2bb303c38bb396382edc798ca6a4847e573ce19b7b08533d1912675_Z},
+ {0x551f5a4dae4a341a3e20336a7d2f365ddd45849351ec6dd4fcbedfe4806d5d5_Z,
+ 0x5865c977a0ecf13ce85ae14c5c316872080bd36f0f614f56b6dfc7ece83792e_Z},
+ {0x7a1d69c08e68c80ad8b310736e6247a53bcba0183b9b8798833bc696a0fb6e2_Z,
+ 0x3ce803a20ebb3b120d5eaf0ad64bed0522fad1a0f2ce39a5c5cbae98c4438f6_Z},
+ {0x28acacc0bc41d84e83663f02b36981a2c8272ecd72d3901164be2affb09c504_Z,
+ 0x7a5aee0b160eaff5b5968ab1a0304ce58c3d5ae0148d9191c39e87668229e5b_Z},
+ {0x1f78cfdbcc767b68e69a224a077468cdfcb0afd6952b85bccbdb96d1fb8500b_Z,
+ 0x4772ba173c6b583284eb001cfc2a124104833f464ff9df096443e10ef3e9dd4_Z},
+ {0x2774108962ca9897e7f22c064d2ccedac4fef5fc9569331c27cdc336c95774b_Z,
+ 0x9e13d79b68e8dc8091c019618f5b07283a710ddf1733dc674a99fc32c12911_Z},
+ {0x770d116415cd2c4ace0d8b721dd77e4a2ef766591f9ec9fa0b61304548994ed_Z,
+ 0x42165d93c82f687635aa2b68492b3adffd516beb4baa94520efa11467a209fd_Z},
+ {0x5e6e4ece6621e2275415e1fda1e7c4f496de498b77c0b913073c6a6099394b9_Z,
+ 0x3d92ce044fc77fa227adc31f6fc17ef8b4ec1c5aafc44630c0d9195075bf56d_Z},
+ {0x6e69c717b5d98807ff1e404a5187a9ceaf0110b83aa15a84f930928b1171825_Z,
+ 0x1ee7cfc3a9744d7fa380ba28604af9df33ac077724374c04588bd71fa16b177_Z},
+ {0x404318f2d2ceb44f549c80f9d7de9879d8f7da4b81e7350c00e974ebf2daef1_Z,
+ 0x3934831b5af70d17a3f1da9d2931bd757e6acf2893236264fc7e0d92ff1a1cb_Z},
+ {0x20dcb6f394fea6d549b2e75748f61b7ec03b6e52319cb14163373a9c22bb9dc_Z,
+ 0x106a8c96cfb95a331618b7416d1498554730499e194a58fbf63019890480fc7_Z},
+ {0x119000f277ccee013e6bb121194ec1ab5460fb6a96eb702a14079865f4170aa_Z,
+ 0x1737a32f5415e8720a5606ec1dd4756f02e7c6817e3723b453d091f2d192773_Z},
+ {0x45d0fb5cd95db76d05dec3faa12e467a308eabaad363a062353db3cd2d9b749_Z,
+ 0xae08691b5b0cdd19ec499132421638f470f493320e4003d123ab1da761b965_Z},
+ {0x1257b3e65cdfb6367c6d0942327e799bc66eb221e70c6573a9862889eb51c38_Z,
+ 0x593309fd45755dd2cc4afd2b9316bc4638b0c5ddb3009694fcb7b250d0c8a2f_Z},
+ {0x186dcf9950f72e868014a8accf14aa36e82a7a2a29f86ba37f6632da4189db3_Z,
+ 0x55684c9f7a043fc523ed78f756f834b4db823d5e4161bd79602c17d55a5cd8c_Z},
+ {0x58791d5569f282f5c3b01ecdc9388df7ba3ca223a2dc1eed5edaf2a1d302fb9_Z,
+ 0x6298d7dd51561a045bb4089deda9f40b2865589ed433e56d54554f8b45e79f0_Z},
+ {0x13fd87144aa5aa4b24d5a7bf907d8280d15937fed262d41084898cb688fc28b_Z,
+ 0x3fa54367770cc4479a857411ddcabe86627b405ce1cd14ad3b2863bde13abe4_Z},
+ {0x48118139445415f0c1879224e2dee744ed35280ff00537260402a1741ec3676_Z,
+ 0x4dfa39dadaabecfc54ecb7a25319444f8e952782d863790e42a9887064fc0c1_Z},
+ {0x4ad031bb9eda84f2fe5d354c7948d41558ca657a04508654721810ee72ef158_Z,
+ 0x620ebd5d0086b92c6009a42777b946a351c2c7ba852b57d3c9905fc337459ef_Z},
+ {0x4a34abb016ad8cb4575ea5bd28385d2348e5bcc0cbba90059f90f9c71f86e8b_Z,
+ 0x4f781829ad83f9ed1e1b6de0e5f4ac60dfdfe7f23cb4411e815817e705e52c8_Z},
+ {0x7fc632d7512aab5356b7915dca854c8b12b369ab54f524fbce352f00eb9b9f9_Z,
+ 0x2ce80b944fc9158005f630b34385d50c3ad84450a9e1e529925b3211dd2a1de_Z},
+ {0x65ed10347503cbc0216ca03f7536cca16b6abd18d332a9258685907f2e5c23f_Z,
+ 0x3be1a18c6bfa6f2f4898ebefad5a8e844c74626d5baa04a820d407fe28bbca6_Z},
+ {0x1a8abba1be2e276cdd1f28c912280833a5ede1ec121738fcca47dc070dcc71d_Z,
+ 0x21b724378bc029a5199799df005922590d4e59cae52976f8e437bf6693eec4a_Z},
+ {0x3a99c22dafcfe9004ebb674805736a26aeed7ed5d465ae37226dcbe270a972b_Z,
+ 0x5bf67552af08e1e6e2a24bf562c23225e89869cab9bef8becb3669175a3c94f_Z},
+ {0x4a6a5e4b3501f2b7bbdd8da73ea81ffca347170bdfb6776a037cdd74c560fb4_Z,
+ 0x5af167ebb259c2da88740ec559ee04052bb66480b836cadd0e2590c32d7111b_Z},
+ {0x6890d95308525f0bac9dc25cc1189eb92d29d4b3fe61bc8aee1c716ac17b1e8_Z,
+ 0xe6f23f78e882026b53ea4fac6950e56e3da461e52339eb43d2fdb2dade7ca9_Z},
+ {0x748f4cf4f027efdeaed7c7f91ef3730ff2f2bb0bfc2db8f27aadde947f7d4d5_Z,
+ 0x3a1cbc550699411052c76293b8c41a3a8a1ecf12cbbc029a1b2b6ea986fca93_Z},
+ {0x7321f3f581690922cd0dec40c9c352aae412ec2ccdf718f137f7786ab452cd3_Z,
+ 0x5be5130c9277cdb76d7409452438ec15d246b211dd1e276ee58e82a81c98fd4_Z},
+ {0x6c4d6cb7e7ae70955224b8a912ff57ca218635a2436b36cee25dce8a5cdf51f_Z,
+ 0x32f8c03c6db3246946e432e4148e69f5628b200c6d7d72449df6eeac0998039_Z},
+ {0x1dad5f2e795ea6fa5177f110989516eacf8fb37bd6a091c7c93f1d73a2fe309_Z,
+ 0x56b2298c538180e99dea3e171dbb5c6fba0bd0a9ed40537277c0c2373a8e2c4_Z},
+ {0x1610605baacc9bc62c4cc923dc943347cfece7ae241e746fbe6c2c878221dbd_Z,
+ 0x431a82d657e0d109d00dea88cf3fa9b999845221b7b5590a20c40fc71368c1c_Z},
+ {0x6a4f5c787fb09a5be2b04d2eafa1e6f3d3c863ee22960eb0b64f6eaf6659162_Z,
+ 0x14dbc3eaea6146ee7eaace5a91ed9430dad3a47e9ca2f68b455171f8fe6a7b3_Z},
+ {0x738415b73e55412b0e582e45ff0d7bf4b1bf2922db581783fdcc75559f40e_Z,
+ 0x33825aeb3fd8459999eb418d15102ba5864b069c6ea517f0c6e9eab8d9aca47_Z},
+ {0x2603e72ce53985c70782774057a17944f7b4ce224a809be4e2b5af3606aa1d8_Z,
+ 0x92822921809c42318f42dac4d773325f41c43069e990adac7818a45e2554dc_Z},
+ {0x181cd967ab4615357cc96c82eae9152ce7598c1a1dfdd91a458bddb016ae9fe_Z,
+ 0x5d562fdaeb0e12647e230e50eaf216bed52fa73c6b7378821a3bfc4cd66d4ff_Z},
+ {0x1121726069b9ef5954ba6490100b226e0be53fef3e071b7c58a1286174b789a_Z,
+ 0x4b25594cf4e9eb2d14b3f52f2661a9992234fc222c0a0d44517cb77deb9c16f_Z},
+ {0xe543663969b915337f105f80995a77b356f1a51d8b4a4fb12d44364130e873_Z,
+ 0x34b2e3c009fdab4cb7349a580df2e64c0098a123280078e5da6623a9ec6b44f_Z},
+ {0x4e2f8909bb62de5ef65600e61bbf969293815296b6e23702875e049b3ce5c45_Z,
+ 0x3cb81f2c21f22a7add26fa38a9ce5d9cce1bb251bd2698f90c34ff0a84f7af_Z},
+ {0x37b546e403a1ba970c17b67c2f1361ab9c803f8d2b5cd93803014faa08861ed_Z,
+ 0x37079184ea46272f5809b523d060686633f7995167897a153be1772fd6566f6_Z},
+ {0x27bddca77f7bd7f66b3693567a4238f2e6751d95b0bcb409f6b24d08f84798c_Z,
+ 0x6417a85cbfd6fc02df560d3963a241a986baacdfa423f65d7227ce49a96c57d_Z},
+ {0x2de71a39aa043057d1bc66e45f804542acddf18f7a6d88c0d7fb0ca240debdf_Z,
+ 0x306c1ce39ab46300f7cca0f3a2fbfa77296a27e24bc66b0b8044968ec0ee413_Z},
+ {0x307c877154364c0c03534e7327d5a88e1380ceef6481567ade37a14ee7c1a72_Z,
+ 0x3404bc7dbfb33b95d922d0693aaf9358f77888d7d95e773c38d83dbe2e5f995_Z},
+ {0x79f09ff7c60850e5f5ea020722659a1ed27db4c95dca131f99552f785c8afbc_Z,
+ 0x40429528c099349b426ddbf129497176951a64a53db5f9d8bd2be0252cb22b2_Z},
+ {0x4027dc6b56d446e5972f35464eeac85c5254ef377c902d9fe37aea841bb5292_Z,
+ 0x7c3ea37689ef679fa2f5c7e031a78e23d484a8317990fd34d44d95cc1db3717_Z},
+ {0x645dbf78a3c228c4b7151450b5e65edb58e71f37e1e4bc5f471e0f1abd6d9c2_Z,
+ 0x15cfe7850f327b256e23b00627451560c5c6ab60db78d45b7ab286afb6f13ab_Z},
+ {0x1503ca373757677ad1d911a2b599d01c46eb879d1ce21ae171c7e439846a85f_Z,
+ 0x583eb269b7030da6a0c324026919de3f9489d2ff6ae0e6320c36f05469ad66c_Z},
+ {0x66e1819ba3ec4ad4ae9f7d7588d23baa004e29d3aad2393d52af204a81626ca_Z,
+ 0x505249980cbe6273b82ad5038fe04a981896f4117345ac1abcc67e2525c0ee4_Z},
+ {0x5ec20dbb290254545f9292c0a8e4fbbfb80ad9aab0a0e0e9e9923f784d70ed1_Z,
+ 0xbdb1ca3a859227cf5d00eaae1f22584e826ed83b7ccdb65483ed5213dc4323_Z},
+ {0xa5c1a5011f4b81c5c01ef0b07c0fbf0a166de77280f0ae241f2db6cba15194_Z,
+ 0x4444521fb9b33d7dfeb1247d0ee1a2b854ad166cb663d9dd2e686909362a689_Z},
+ {0x1f35335de40e00c62642dac2fda8b30f071986ce4f11db849df11bc45ad4e0c_Z,
+ 0x7801a2c761b90fd4477ba0be9a775003d5dfcd959b1ed198b4681f15e7acbf_Z},
+ {0x48db4798cf6821c1ffb8178b1d3bb6020e04186c96aaf4670972d367f4ed5f_Z,
+ 0x781019494df95b888f1578f1b4a3f8e125ea60eca47ef9207a10630671217a3_Z},
+ {0x17f653d904210148a8e74d8e719a3061683c164aa6d79c902a19f185ab437bd_Z,
+ 0x6780e97985932c3860d810af1e065d454b1cb4be0e7ffe2d8cea7d52526e223_Z},
+ {0x5c4d0c7432f9b0070436240f9855adae1467cdc9826952ae01b68cd52a3ad89_Z,
+ 0x1c5747f968ed91261b7ae9bf1023c999da9816e37de602d6a1a50d397752bff_Z},
+ {0x6fedd7639fdaa2f7bad4ca0b391710f6f8a7e890250ae8ae4252bb8b39a1e58_Z,
+ 0x436a215f655a3fd3778b2335ffdc9aca6b98474e43d764c1f8362830b084f0e_Z},
+ {0x7fbd45a889c5e9d127bb4f8474d6be7cb9796bbfff923b75e42a1ad4cae37d6_Z,
+ 0x484bd12622a6ba81cd53049c550d9ed682a8e765b656b1cbff9bbea637bd1f4_Z},
+ {0x17d984d47937263f7966a3e7b1eea04071e678494bd749c9e02b48b3234f06d_Z,
+ 0x7b341ff08722c4e161005d0037204a7a2001fdda7af2cc1a0b04a027f115a0f_Z},
+ {0x7f1822045db45ea07e1519c3ee1f7705915f35fe4dd8db1e8921b5d1c740edf_Z,
+ 0x33d41e06b93320ad1b3d9580380ec797a05dac3f1cc8008899110ebefde2f78_Z},
+ {0x7b19453ecb74b7d0e2a66b9890ff73bfbbcd61a266abd6d82dbe665bf32f34d_Z,
+ 0x6dba2355420dac582b1f349609ea1c89b89bba2d1a68a0642f1dd12d86e73cb_Z},
+ {0x273e82a15f395ddf2489a95685bec8bac62c4b459d1b28987d3cb27e4bc9128_Z,
+ 0x653375b48a4cf5d5b101c9ef533039bedce5dbeef3f59e8f168bdc99b06ca5f_Z},
+ {0x3006c9e7fc6a553d8eb4e8a47ce9f10d1a39576ac255ae9e0a4ce3869e76212_Z,
+ 0x65fe9e2ef2aae608be309332d464f57e28f1df5de1a6a519751b056971f932e_Z},
+ {0x5e8f384c8a4607fbe9789fcc52d54249d304d698562597d114c1d81452d3dee_Z,
+ 0x3c8bc78066b5d947dc1e405e326ee55ea606c7988f666748d259850fa259a22_Z},
+ {0x7841b2102e9aa103fb53a642b3e167b21113ea44751ab38e0b5ef8312654db9_Z,
+ 0x71bf5c8308fcf9c4a7847494cd9bdd946fddf7d3a37e8bb0b201ff2343deb8e_Z},
+ {0x40f68027420c11e3ade9aae041978dc18081c4f94943463aac92d887f922a62_Z,
+ 0x499c6062594a6c7e21a3cb91ea451813393bff365a27a08f1a515439b83cf42_Z},
+ {0x6ce77a50d038b222634e87948df0590b79d66087b01e42b9b6d8fa30ebb1465_Z,
+ 0x35f5c46bb1be8555a93f155a174d54ec048c2ac8676e7c743054ddc52709d37_Z},
+ {0x604f8b9f2dacb13d569262864063c2d4bb2b2cd716db6eeb2b1eeabc57746f6_Z,
+ 0x68c6799e24f3b44eec3049973445174727a66970f1614a782efa2b91ab1e457_Z},
+ {0x73d620f3bfe77f672943d448d7dc05327adf64b8e7af50039c469d7f7c994c4_Z,
+ 0x4859deb36eaf0c802f0d1514602368143a33ec6ce8fd55248b59025debc6afb_Z},
+ {0x3fd2bcd1c89d706a3647fbd354097f09c76636e93ae504973f944d8fc3bcc1_Z,
+ 0x677ef842cf5eb2444941f527abec567725e469469192354ad509a26ebb3d0e0_Z},
+ {0x39222ea924ac17b533c72ffb2c47ffdc11d6a7f7c70fbde3a10fb0b8f35eb2f_Z,
+ 0x20dc4bd1089019bc1d7379b4feb3eae6eb5af59e9f253845da9fd633057e952_Z},
+ {0x326f58994e1347f62e4102183215b5db956378d2f61f14aba4dec94577f53c_Z,
+ 0x7a03284c296003bbe05178a1d82efdb7b8125511d63e20e50aed789c2e52e1_Z},
+ {0x53aa8939c74d4ee58f03bc88bace5a45c7bfcf27466201da05dc6723a5f5632_Z,
+ 0x2e32535ca7732904a048183247b04b426ecf9b39fc393a9cebe92fb1dc7a7f1_Z},
+ {0x6cee1a03145e93b3e826e6067005f09c06099c98198c91c222407ba5c8c132e_Z,
+ 0xbeaecad1274e7c6e5476a100c271aa1a6f86ee5a9fa5c2f26124d5886fa63_Z},
+ {0x3ec659b8175e1be1bd5a252108714776b813e330393f587814f5f1f32a73332_Z,
+ 0x529a5cf9f8c237ae69a94217d173c8d19c156952041f5c980da557990863fa7_Z},
+ {0x3d66ec5963d0c534d4139c8cef2e1ac48b3e7965fafabf58be26f903318af4e_Z,
+ 0x3d3f2de7a95f59b683725ee6283cbaf31f97c4b600df9a4621413223a468740_Z},
+ {0x7fb38ace8e0932fac2ea0d3eb676db8d684db1817e2e4d59da7996ce398b4a_Z,
+ 0x68f92bd5768cdd4710249f9d49ef1d5654e497b9a4ba10bd2971366d83fb400_Z},
+ {0x1c4a49314d6b4969cdd142c76ceb7682bfb868ace7f7568b0fc8635bda5a9fb_Z,
+ 0x5fc0519f1f4cc10b5771312458748c036313b87707ed0540026ac64a5955aa9_Z},
+ {0x3073c95d08d3b97caea5f0be16b2789bee766f76b7e5499f8ce8f96abb0f344_Z,
+ 0x52a8974b4eb9a1f6a0ae2c83cb4715bf18d73f057255fcb3f63b74f7e78f590_Z},
+ {0x44485b16d597a5de3604df6f7ed7e00b8aeef9e7e8dea8688255153b8bb16aa_Z,
+ 0x6cccb0ba170123266f24b5d93a744397dc2c44820edc4f8f5b9a0f5c9b3b940_Z},
+ {0x7618f77b7b32d512688dd62e0b48231d9574c6361e8be353a7dc04f7c3a115e_Z,
+ 0x78ffcd16d80636381ca231aae70d99c9e20298b4f5388fd823ea9fa2b8ddfd9_Z},
+ {0x7dc82fee1ef95cf5b3720fcc07f63246654bfe39762627839da40e51c75654d_Z,
+ 0x4c0ccdd70955da74558de20c88352df8a02aa97e4d5971c500e884740a8cb62_Z},
+ {0x7fa5d460dc10cbb418b444d9bde97e92c70a99a222b99f244dccee7e62cc04c_Z,
+ 0x636163901baa5b7576c38c43407af578b8c4607e01e86011ae2dde587a89f84_Z},
+ {0x758930d46006623a756c89bd0cc378f6a3c1f43c9a0edbb42274c35e75c16d2_Z,
+ 0x1d74dd9f81c2fec811b8cbd6168a745b0a111932b2a345265ef2853b50b6245_Z},
+ {0x7332ee0626b044d664ef228f8cb84df7c643e52f6a2591ae1c9007ad61ec16e_Z,
+ 0x229bd8e630572cbdee54283234cf3e9f060e6382f99943bf234119d47b54470_Z},
+ {0x78a16ef803aa20a075bb2f66c61bb2dae5698bebb94a0995fa74c3d53de1614_Z,
+ 0x246d588b68edb6fed96c128349908c42dcd64c46341b205e79f4aed9b5d3675_Z},
+ {0x6e1933939bd03b67bba753cc0cbe7d2f25bad68c993887ef8c9e2fcd59b0647_Z,
+ 0x599413f7c204a11a5ce315eab11299ab7326603412bb00bc1c59ff75a37d6b4_Z},
+ {0x4a79957a5a1888ad063b51c69565a2b48e8eb917183e220a1c8d3374526d30e_Z,
+ 0x1f092de0e069bba7fc5386e2e9a114c1618f88c4b95e220cd35ffe96f99fcad_Z},
+ {0x3148aa3df9ece39aca84f59489f2710522216f14be6055ee0027529d1d55e2d_Z,
+ 0x617e9a52a92975db0ba1977f71116f7058a0d31b869ac7f3ee2fd80b0c5100c_Z},
+ {0x5c1188e72384160ae39d07328346cda4f6c12d227448e6236f04dc971625287_Z,
+ 0x1643006eb3a3bc6aafd5f685cf054f2a572e6ca58c0118bcec0b833741f116d_Z},
+ {0x3f72efc93c9b71adc4c51d8fc69d3940b20d08733af2b7d05140fdb1d1c1004_Z,
+ 0x7399259987c8f4ebfab46e522380707e58427d3962ee0c2a91760813f76d232_Z},
+ {0x3129b34c03c51aa8f611e91d5cfcc9bd3ef108ee66e6d3ee35a0e0e50055bb_Z,
+ 0x563b18b5650085efb4cf179a029e6afff27b1d3091cd28eaa68d24fa1f801c6_Z},
+ {0x16eac0f9fb4c67cf89a7fa4ee615bbe731d8edcb709a1b9b50c7d873a530f52_Z,
+ 0x7ff8288b6e199ca8f316192881424a37fb080c29daa76b1f0edaccaf580a80e_Z},
+ {0x75f6b6028c43ce832f65d7e8e620d43b16cba215b4b94df5b60fc24e9655ee4_Z,
+ 0x35e9ccfaed2293a8b94b28de03bcb13eb64a26c831e26cc61a39b97969a2ff0_Z},
+ {0x3c6152fe093bd6316897917ec56a218640ec1b2148f21db9b14fc7a5ff362e8_Z,
+ 0x6eef2df27ae7d63a28856b07b73e7aad7ca94f317201a1e675ffc6f9a1710dd_Z},
+ {0x54e01b5fe4fd96052aad55b3f26b1d254dfc7e2525fffb9ae0a77eb8cc5579_Z,
+ 0x7c3d39232ab333675b219abc766ed9b4782c840e6b046614dedb8a619696eb0_Z},
+ {0xd1e63f8ea8a76429cf254a6d3b668761f0dc572d4bfac4fd56d9eaf58fb6c0_Z,
+ 0x2bd0a84d3908a63085824c9329a0983913006ba155b56a58eb3f9becab29c45_Z},
+ {0x2d6122f2a702edd4da7385b1580796a71d13bd72be94cfb3fec01149c006c2d_Z,
+ 0x70eb282fae992efa6f5915e578b640653549f23385ef3a29ab29b1b9b8ad63b_Z},
+ {0x752fec14beaadb5ddbba6b3a17fcb86579fa588ef407fad0ea07dbb22a640d3_Z,
+ 0x3feb6728eca21a1e84e8f9f23010387a53a96a1cb62d86fb37996150a1299ef_Z},
+ {0x63f94a92f27acde8f5ed949b459506f51d70c85bcc61a34d647264ecc53c65e_Z,
+ 0x37e5dce0646ee66f4fdb93b82d54d83a054948fa7d7fa74ab6b36246fc7383e_Z},
+ {0xd6aa909287a2f05b9528690c741702c4c5f4d486c19a46c38215f52ef79c7b_Z,
+ 0x5ebe1128dd81093df4aca0df365d58adab848d1be1a94b95eeb649afd66a018_Z},
+ {0x12866812b3053e2f7a9572bdaf5ef2b48c6fb62a0eed9ff0356df50e7d05557_Z,
+ 0x6785f7eb2cd1c120e4c7167b46861d10117040a2e9f2ca86a71e9d67df90613_Z},
+ {0x46a730d05330b1b13673cb8a1b8f45460035e4a9f1a1751cfba099c4355c1c_Z,
+ 0x76fb0ec6cd16a8141cdcd875c8b2de9fce42d296072643d148ac7e7fa7472df_Z},
+ {0x4bd4380a22900bd34835e0a908eacf4b6edb61eda0cf483f9212453b37e7516_Z,
+ 0x5e9551cd20d8d7ddbf4366880b7d5267385afa1966ff30da4baaf273b009d29_Z},
+ {0x71f1994ad40baa2922424ae222663a64f93d8b67929e9a10f9e4c1ab19f3833_Z,
+ 0x85320fe68ec0d37cc19fdfd03589d66906ffa4046c80e1b094a85f27676346_Z},
+ {0x5a63b1bf5232f28f808765c6be7ce1f81c52145b39f01c879fae0f4303bee61_Z,
+ 0x3bc5d6df68bb6d0577bf9ae2ae59ec0e9b2dc7dd56ea179fb38a41e853db950_Z},
+ {0x161ded55ff1087032381e6c1449704f63ad2d88df82dfc44a71890fa09b3941_Z,
+ 0x78a52e0013842037274ea75daaf8eb4afc04ccc4b07bfaf3f5ee47d165e01b_Z},
+ {0x1bfce5229c5fbff5c0f452a22317fcfcd9262f23df41840f84fe7d44cfba1a1_Z,
+ 0x66b387872c00e63c73006a955d42cf49c46c5708fc9d1579b9ae38341b24a3d_Z},
+ {0x56d47dadc9cbd1dcb2ee3efcd5d4af5e6aea71df10815c68b54a14e81d11b44_Z,
+ 0x47e966ba54df48e9b612a903685e0060a67e4725402e8cb4cf654e54e813a3e_Z},
+ {0x4b1c44438afd4ddf20a2cf612df2ee494ce84c7274c5529e857693e73018491_Z,
+ 0x430403bd31d8f0677e06abff7159384560f27b9622943fea1a3192f14bf40d4_Z},
+ {0x7f7281728fc2214aa1dbf13176a4624b53814734abd570eb6ef7c7e32379606_Z,
+ 0x312da47be347fb3fa2c9089b38df372560dcace2effeeacab4d96ab11567295_Z},
+ {0x16a28884a1be8183e0d3fc0db84a9afbf47126fd3be548c2a584aaafbfa7dfe_Z,
+ 0x7c3f57b3b895564ba562c1cd80b71fda6d2e611665c6ab87744f5390858fe24_Z},
+ {0x323339f37b327a731232a9580e79952063c7c232bd1380146d8a83c285f4b8b_Z,
+ 0x4f16be1d983c7232f92cce6b9690695978d42cecc8eeb8c206e125d1098a265_Z},
+ {0x624d26cbaa197e104eb83cebf2adeed09a5cdad359993fe5e3529d4d0def21d_Z,
+ 0x261b7da3cfb55c788977e0d8d640e3e93ae5a325d962ce85c816d7d32cfc430_Z},
+ {0xf24ecb7ee83a3e28dab54a330dc93d0429a7aea36412e922dce8fbff40d60d_Z,
+ 0xb043e36a258d1df1d21b0cc7be9c4dcae1bd4ed326c110e668ac23d86805a6_Z},
+ {0x686cea46b710bde1231483bfdbc700cfa3da6ecd5841c0e0c782f9ea24328ec_Z,
+ 0x7eb7407aa58edd6911c7c7e8d1e03bb52ead4a2415a0c33325872ff3a521dd6_Z},
+ {0x3866ee1186264549df3dfcdf8705c0380c9372eef6d4081c2454d3aded1720e_Z,
+ 0x634c6d3e8eb8af652a4be73e3b613452c2213104ca875b66b4b15ee5b1716af_Z},
+ {0x484c687cd2969a1d20a58cdfb9a60f280a473284503b1ecff5de514aaf8206b_Z,
+ 0x34d44d26b7427e51a646d1b924084762f5b461685450f21d6a472de565bebd8_Z},
+ {0x203561333771fa0fe22c4033349f7b877d15b0542a5598e81e067968768247a_Z,
+ 0x2b6a533aff6e2163a36a2a89cb7415848bef48db40f952ffd380f47676707c2_Z},
+ {0x2ffa6cca6233695760251206fc5e34c8d3692498589478cdd3d5b09f0b7c05d_Z,
+ 0x6c57d605478fa9626c4ed769554d075daa53e1a1d0bd4d94174d3bfeeb11ad6_Z},
+ {0x5dccf0fa46a5571f204d0b033b45f299cbb3d9f80fded57253ea4f1c64faaef_Z,
+ 0x30a38e131ee8756ee5ea2a3e16618a5dbc28b5b9311308bf037ecc2039dfc7d_Z},
+ {0x57b0a2eaebeafd950221facdd24790d7d1ab8883e5c5d55635f0d14a1ee4741_Z,
+ 0x7b41cc478fa6be38417271db8ed12efc0da6982552c1496025d2df0576bf4ad_Z},
+ {0x611b5725101f611c387ccaa13889ecf3bb5595071a179ce350029bfca4ad7f1_Z,
+ 0x3129755977abc8995fec7eec1123a1561e429fde37ff36af002d3211831ecf4_Z},
+ {0x1c06bbd0c52fdab9fcaf680c7a93fb821e538a2ed79f00f3c34d5afb9ea6b31_Z,
+ 0x3873d3bdfe0be0157bbc141198dc95497823cc222986d24c594b87bd48dc527_Z},
+ {0x275cdbabc989c615130d36dabfa55ca9d539ed5f67c187444b0a9a12e5b7234_Z,
+ 0x2b7f723e68e579e551115d56f0ae71a3b787b843cc04a35b9f11084b006521_Z},
+ {0x6cc702eb20f8b5940c7da71f8b1801f55c8c2d8e2e4a3c6c983f00bc1ffdd95_Z,
+ 0x5d15b3727bc66f3aba6d589acdd139fae115232eb845abe61fbdfc51341352e_Z},
+ {0x44defb418700cee8c9bd696b872adb005490512d8bba081f8f99a9f15cc981c_Z,
+ 0x3b2072cdb1d919b2b65b5cb3557f0a3381d7ca293c267ca4a38f83e77bcc96e_Z},
+ {0xfd83ce77b1578b3a9b8c3cbeaddb1504d2fd4a19c901c21ac65961224e4966_Z,
+ 0x110cbe64fc10c6b9c66f15ca406a35f50b723b35d83c5eb9797a57f8395f4f9_Z},
+ {0x9dc6ff90e341875e113bbfb507724dc7095a280d2f32cb6ba61a1e0c2d2aef_Z,
+ 0x4aeb622896c852c2747454e8f172c9482955a42ecbe522d6ce07ecde79d0a51_Z},
+ {0x71c58b0e47b9dd9107ebd8a8c8fa9f0534e78231bac612c1ddc7a94edf33eb7_Z,
+ 0x7f90edaf4792bf8334adbaa0f4ee7c654312725af188682d75f34874c4eccb9_Z},
+ {0x1f6de1f14988778ceb2dfe844f92394f1f1e72fd1581ceb3bf336c95ce50345_Z,
+ 0x4f6007ed4e022d2ee9fe4ca8207c5f6c766c4f3b85260e941fb24ad0dcbf0bc_Z},
+ {0x3ddc3ac25ede4a67a97547ed27dc920239b585fb3624177e2e8d59eba678115_Z,
+ 0xa9afd8f8bb759cbd1dff2addc63f47da4ba1291ea34229c09c0637dc5c8d24_Z},
+ {0xc56b0269d8431556e471cab9d70edda3a37b391696f107b2dc370631de51d_Z,
+ 0x729c52f6b134f733eb750c14bd9f95c077f0f6f6ff4005701e5bedc6544599d_Z},
+ {0x44d32ce19ac6807cb22e4f25fe1486a36a13926f147fbfa054b63ff0446177d_Z,
+ 0x212a21e8c124c9cd37c80d2dd66913ceaa6b6f666522f115c39382b2d5925e8_Z},
+ {0x35dfc16f3ae6ccc06a267bf6d931601e52f3e45359ffc513570b65b96adc4f_Z,
+ 0x74311d10f4bece01b5ae65a6affe5c931463aa1b73a3320eeb41bbb7bb1ff62_Z},
+ {0xe0acd9d2d907031b319b80121dc90699d003d220ea785d50e5033cdb3b1a03_Z,
+ 0x3911ba78d6e507485d6374b0f7d2e6198f6462a7d6d3cf046404a07af690357_Z},
+ {0x3c57918ca254c0cb7dac251ef4e10c7d82327969552eae15d26c4c52660922a_Z,
+ 0x5fd5f5ff3f14e671548074114c72c48409df8a2e71fc8aa3c8acb506e2a88df_Z},
+ {0x222ad8b61e219ba2b581f606b7c996516850a46a3db72fe1f72b5a9be6c324c_Z,
+ 0x72015a5e2db648112abd284fd867b59fc5606645177d26cf6e9a655c9912d42_Z},
+ {0x3c86d5d774bc614469768ad38f7be9a53e9a233942c5c553b82e49aae684764_Z,
+ 0x480febea8229e130dedffff89c11f3c43e11724e6bd89d5566d78752859d41c_Z},
+ {0xadb73bb8352d0c10175df371f7868ef2c9e0c79ac788430c480c0f7d85c187_Z,
+ 0x60b564785248111502e6f39c4994d6293fac22bc25f4d764b2fb1957d3c9bd8_Z},
+ {0x3836ab8b46cf4f453a22532c886940b982029b29c42adca90ded5bf77e6bcb9_Z,
+ 0x7b15e91d6355f147b171a90b064a9d8b2d7bf3699bbf4987664c61c950d8996_Z},
+ {0x12ed96af1a97c45ec31f1531e96f6fb28a03ba52ab8484545fbe0dddc97bb32_Z,
+ 0x6d1f522b6c6cad0940cff8e23decc72bb8d4164696af031415508b025aa8be1_Z},
+ {0x27382994ae5878223ef802e9b4882f481a1b4008f1eec8484483471f7aa742b_Z,
+ 0xc31750d242b3975b0026a0e86ccdd17d0f680a8c6f53f197fc25eb1f777917_Z},
+ {0x431677eba3715455bc235557518a74f3b111a88844ef13e159ad44bc16de3e6_Z,
+ 0x30000e1eb6a17d9df776981e65c6e500fded1ac12003adc9446b269812c9197_Z},
+ {0x4b563e6f42589671579eabfa2cda5502b361c46a5ac8d45c8ed44741a925b33_Z,
+ 0x627bdb41678443fdd1aa607709e9699b652308615f4bea760a3b79ee0d9ab5c_Z},
+ {0x2932fd3f81fc973ca9def6b7f1bb50f980fe589187cfe9e9f52ba4d356cf2c8_Z,
+ 0x1e6bfd00fa976c4770263a227048214c38850fe0f059e7b3d2c7871ef07d68f_Z},
+ {0xe44e4f3d96d9dec775b996be57e57fdc28e7c68023109b221c414a244a0dbc_Z,
+ 0x58b1e52fa274812e5184e00e9ad812bec2463140adfb4bea3b2d665867dcc9_Z},
+ {0x7fcb89be1f4bec745887bb891e53fefd665c53d00a9e74de16b8a7e1f7adfb5_Z,
+ 0x74af0b06633f779897e199609c71cc5649bbb65bc2c0abd4c678f0480c198d1_Z},
+ {0x62a381ffb904ea3ff4d451d4c8459457cdbc3dc2fd2da646a95d8c1e90c0b7b_Z,
+ 0x1ba058658e09db9e319fa73de8ab4a992b71e4efc22c273725bdcab84e2a315_Z},
+ {0x1b0fbb7a84c67e668450a54449c7a46261a2d355589f8b84ebfbaf9a77ee938_Z,
+ 0x44f8fffa33dd33a6146c35d196595e22cc4a215f61ee9197cd751400970a1b_Z},
+ {0x78fe920bd96a356d4d95ee34adafe8fecf071d3107c36f047b4024ddc4b3eea_Z,
+ 0x6162f29607fdbec10181fbac6e57d5cb41b922c5791fb24bd28bcdd75d16c41_Z},
+ {0x5629b849e026e65d119ac11821d7ab7efd9c52226f75c7427505d6818bb0c8d_Z,
+ 0x1539c0f90970ee8b490e45bbe5568170e5708521a0e59f976be680595906feb_Z},
+ {0x62bc853f349bac8c6e5921d27ba85dbd9ba20a375d70a7bc008928f3e123b04_Z,
+ 0x6acfeb1de05ba43c3ef1a9110a983a320e77b3ca294abbc04aeca19b194f26f_Z},
+ {0x4cf4bed663464418285cbae359b5d84ec76b5997d24f3640984c7663421190f_Z,
+ 0x941f818e3e3e8fb1568da85217d17f9250ebc948379014d900a7b1a848494_Z},
+ {0x52ff3d9ffe9a302f6dfaaf74bab57c08027d5cb699a69b30830540c0a2d47a1_Z,
+ 0x987dd8876873778d933fbfed37aab2f7d6f669c37024f926b1edcb2ca55782_Z},
+ {0x1109ee32f0bc53de6bfa457060b366e909d7c18061ec9845f46ac715496897f_Z,
+ 0x38f36f172bdfd454b9285f86e6bdece8fdffc95182c7d801b03c671cc55139b_Z},
+ {0x4b4482f1d84efe23dadf3bb10df3dcaa251312dcdd604f616f1eb540e1f3232_Z,
+ 0x7c9c149dcae9135f940fb54482f9c3cd8193721643a6e23157b8020410d439c_Z},
+ {0x69cb459b9e415b7581ca163611c470d875971d5d7949de732d1f0f200544a73_Z,
+ 0xa7136fa9dd00c0469863b7def3f83a5611ed628810d7e807e7a873da5a9897_Z},
+ {0xb66a4e32ac9a4baa8f64780acd94ed3628b2b0ea874ba4dece629af65f9e62_Z,
+ 0x24328ba9996a24389658e3467b8b90dc3927ef8419fe28b3f55b1c1aaa51915_Z},
+ {0x5ecc3080062dd451236de0e4eb91c5c75100733364bc5469f5fa76f79021ecb_Z,
+ 0x6da4abb9031a27b5be94529324fad8026e7d871570780081b0f424d4fe543c9_Z},
+ {0x1e3146f00880bb22486d5bc73e54367d54251f4002bcf342d0393b05a4b9ce0_Z,
+ 0x23b6fb8e945d3205f633ba724202db5a99305f807137edf942cd60eef867699_Z},
+ {0x2e1da8013285598b899f026c6974185db12c97b4c63509769d3d4ad1d18a4e5_Z,
+ 0x1e7e7b668674d1593c39d58bc7bccbf568208732b3519bc2cdf93db34366862_Z},
+ {0xd26c3f389d81709506f184b53871497c8d36c5c9eee8e3737358204c1acba3_Z,
+ 0x34649c3d39f3b825947fedbca215ae30c5a5995e93b1c8efca4944cf85a082a_Z},
+ {0x91300478a83595d548f32f259033291fc7d083953b0b8bde88c7559660c563_Z,
+ 0xe5d2bff57fc6551e9b80c06ac7314a71907cdcc66ce82f2cce721a670df10a_Z},
+ {0x1f7abcb9d462c63ffe92aa56619ae8590089cca4d93ee3e5f34a63882452cc7_Z,
+ 0x7e9f85c7b7ca6e9a4f3a026d1048adbeef69ea9d876c6f647c257b879a81bdd_Z},
+ {0x4d2caa1323012e4c83b0ad387308b8aef5637bc35ddd882e7f5e41cf2ca410f_Z,
+ 0x47150e808c81a540b6f8864e9d6636589cacaa516f82caaa96506edfbd6f0e_Z},
+ {0x3c10a6083c38351deb3e6d1b386827d0acf48979b66b95249eb8700ec26b069_Z,
+ 0x47e34bfe561d903cffdd1d849b85aa3cbd31cb4a9bbd8cc2e5fd2f95016cabc_Z},
+ {0x758bd54868eec045d0b4d3d2bc415d24bce13fee47cefdfda46425c109b657_Z,
+ 0x3392a7c66ea3bd7b044680bbe9f78ae86752097404c067e9d2572f55330df83_Z},
+ {0x19e718e0ca1d2d6fadbc6006ee7dda7a385430e29f5e239cdd4bb7c3fdcb2f8_Z,
+ 0x5c68249b7fe03ea2e13481a63b6cd4bf74ce42009a89fee0b3f8f968b3ec709_Z},
+ {0x28077f57ea62401806367e6d54fe45d02de5b072db787ffdcc3854e12a3e855_Z,
+ 0x14f3762689072f5fb41d03e94b01808c739f6d42b7b785b0e464100b150efd2_Z},
+ {0x3b8a8cefd017363ce867265af3293cec081fa589fe561830f0078778cbd338f_Z,
+ 0x69ccf2383cb7b4f9c806d72535812483e7c5e9a1a5928529d64ca7e085e758d_Z},
+ {0x77878f388d22161a2953e5aca6bac1ea480e102f329574b4b201640d44a296b_Z,
+ 0x7eb35706a90a03aff7c2fecca72659136547cee98038746db5aba16fd7178df_Z},
+ {0x97332e6da70961f2ef31b7b628f1018d21db8db015922a301fca7d6fc6a8e6_Z,
+ 0x2e37b06f639fc7a82601b744570a2619e543cbfaf60e474107fcaf4686d3223_Z},
+ {0xa81518d452d3aac48bf0386c3ff170ef4e684a4def242c964e129c64f4d647_Z,
+ 0x37506e44c85908ec7b7adda9547fbdcc2e3605151fefa77fbf127ce3bc938f2_Z},
+ {0xe80336b2220b1d666074f6b0dac85353d0e4c2e8bd0f37055a2236a6a9fadc_Z,
+ 0x1cae76d73eda7a5964c5d9d3ad6748aff51f5543c56441d2fdb7b444a39846a_Z},
+ {0x2c01fd8430ecb44e066f352c4f697fc9fda177dbe162f82862d7b9ea8c918de_Z,
+ 0x6e1dfa99640fdf5b30603d34c7c97c1aa6e6b7f3a2c52a21fc64b0fcac7d591_Z},
+ {0x744e37b511cd0ddcfe15f3581947014c159de81ed055d15a13c7a2d1fa39f0f_Z,
+ 0x685caa8ff6979a6c63640ac638a3f9c75737f2031bd55322a47384357af164d_Z},
+ {0x40e627ff84e1a7a9068b4368770f5956128a4d9e9e33e9cf5e24d9a242149fd_Z,
+ 0x2465bd6cb20bbdf810e2bc5c3c458cecf4f3aa163a7ac99c2579e5f33417f2e_Z},
+ {0x5f635af7f554a17bceb6ccb6e637abf89ab6dadd399189b0a0390e87b1896bc_Z,
+ 0x2aa6238a69f89665646c0e3ca2ba5f709cc6e14351cf71e1b00ec45201417a2_Z},
+ {0x5edad3063c9fa8305978d7e6a4e037c9fa519b8023c7608dfc3b66e5c1e8985_Z,
+ 0x49f405d07d7d01919da51159ecdad1031a5ac208c026fdfc14d38f633d92183_Z},
+ {0x2fdf2e8a45858c12926a1f25a62255fb2d02d0149a15ef669f859806683e649_Z,
+ 0x61cfb686bb31e2524470d4ad2ae09e3cc91b16305a21d748098feb1d8ce3b3d_Z},
+ {0xecdbd7c37f1dffa3943977278da3bb429afdf948b4ea6cdebace3d3be82381_Z,
+ 0x190b67fb34f7f3ad6afd3d6b6427aa327547d8ac0fb4deeb0feeba1f63d6c60_Z},
+ {0x233021b483f578dfa5222f8cccba5766ceee0ac65f6d4a3b1673b302a21fb3c_Z,
+ 0x7d4b6d44d175d4b593f06f5a6dcba2cdbc4eaa2097abaf613123546866cf4ef_Z},
+ {0x42db4e953c2a7a743de9fe20c5798f2247f51db4eabc6f40e86c13909a310ce_Z,
+ 0x12c1a0764a0b9f3666e431923ce15e7fcd0ded5ab153f0b48d362cca1604e65_Z},
+ {0x30d539e2b545fb957e40e2255f6463b52d227c9808472cee6a3d521aa283a44_Z,
+ 0x5f9eccf747fe6313570f99e845db32b40070acee9ce9e34da7f3c29ca53a07a_Z},
+ {0x4bd64e5ade3e2733580a6116b4af328751198e7128f9acfe3a3496b545efb5a_Z,
+ 0x4d584768900dabfc0dbaa086632b8051bb3905ef79b84d96c01514441d0cc93_Z},
+ {0x62d6e771f02e591557197d13c3e77dfa2d1794ac1808407bd8227c4be31b466_Z,
+ 0x5c6f5607c1808e899ba36a425911fa8566b7ea9cc80de8a80538c0fceb837c0_Z},
+ {0x5ce406218cb2852b1d2fe1836b19462f664631785216e87ffbce26030e2101f_Z,
+ 0x5225f107743c255ab50e7be4a090fe39478d1ef4ff558468559d8cfa87bb94_Z},
+ {0x670286486e8dda3dc66b0ed3149be7697d3e06c8279844079daa7e42d5af728_Z,
+ 0x26becabe7430380c56e320f5ae3329569cae7b0af06fd5327ee23979d200eb0_Z},
+ {0x3ef448df33a4394c43e93e5850cd0c5a6dcb18ae1cd865d00fe8ede9336a9f5_Z,
+ 0x56711f6ab7e0e4f7365ac34e284ac2879f40208c46f6febcc1dcf7146ecf015_Z},
+ {0x4b63fc130288e92f2d6ba238caa7a6364804e29829ac037c57df32fbf762bc3_Z,
+ 0x1eb8c80af55278b4113286c038fff2bfad2da62763bb03426506b869139da0e_Z},
+ {0x4e7e998557b29a95f805a6e2e26efc1e970108272d4755738c04f28572295c0_Z,
+ 0x97cfcc2f447bde61bde71049d8200a74a3028b21703bc139143d81a3623f09_Z},
+ {0x574b67898f02964c408f68e9470e7b615be037e40b824e6617f89cb56c21219_Z,
+ 0x49392d5f8e6740a1b0b7444f56d7a17363f8656c6e4c628678c86223f2e46c8_Z},
+ {0x7e8cb50ea5d5c1b09e219e7305bcb601d99b6d7185b1c388aa8e36fe1e56554_Z,
+ 0x47fefa308645455c12ccb5817da338f0c4f423b341aff4a9d158891a4fd69ba_Z},
+ {0x67266dea9e71b4ed2bf24a597a823dd048cf31e725db511edceac72998c9ef6_Z,
+ 0x39babd65850befde1f7c28e41dbdbb4caf82bbcf3bcb5b33161f1c2960b2d8_Z},
+ {0x63e99c2cb9c74eb9227d48065e27abb8f606df8fc83b2c44e4ea38b046bad2b_Z,
+ 0x60494a53dd13ecf34e08079d343c88fb655d6d810785af81f08d5aa9bcdcf9_Z},
+ {0x3cf0600b0f5a2a4eb78c487cd385350e8c7848e3f6983231881d7f1bbe28543_Z,
+ 0x56dee4288528de609976ef6b903b652127c37b0590e91a2fdbebc3f11df2628_Z},
+ {0x758f09245fa4b8b23d290ee2b3bfcede199b4fdb11f3cf2502a8ceedd61b129_Z,
+ 0x622d9baadfde781e985d9722e0a04715666769a4cc7a9bea0b96d6386be1746_Z},
+ {0x38e1a45b81492aa95d7abea2b08b8c14dc0b8a41108b036871fb737910ae18c_Z,
+ 0x145c611262656385e5ed6243568cd3f9f59dbfed7a01ba11e22bb8bb272e08e_Z},
+ {0x206e54ca53a2f155bd4fc45bf2edb77798ae6623defd4cf22f2dd4a7d119dad_Z,
+ 0x6c94e7f0825ad81680e4cdbcaaaf4df806d57a0d1fb2331926c3fe2b79d22e8_Z},
+ {0x56e98d2862893caebf66180e84badf19ffc8b53041eaaa313ae7286a8fac3d_Z,
+ 0x526306f9c01afd6e0c1198ea5de17630f5a39c4ecd02d8e6f0d613c355995c6_Z},
+ {0x4fa56f376c83db33f9dab2656558f3399099ec1de5e3018b7a6932dba8aa378_Z,
+ 0x3fa0984c931c9e38113e0c0e47e4401562761f92a7a23b45168f4e80ff5b54d_Z},
+ {0x450cfaadfecdb8a2fbd4b95c44cb1db723ee5ac9677c9c188b3d7c8eff4ca58_Z,
+ 0x1a552bdfc0c81be734f1f6ca9a6dd3ab4daa61c11fb53ebb7046eee25d617c7_Z},
+ {0x6fe20e5c8a8004e33eafc84d16ef770f2f0b7bace19adaaa150f987d295a34d_Z,
+ 0x28a35040a2ebe9a14a162d3208d5eabc6e2f3a8310f926bd80be65aa71775e2_Z},
+ {0x1bd65f45a35bf62ae8f9ffcbd7de2976b90518b6820c219f039c50043bb1edf_Z,
+ 0xfb5f0f8659f9b6ed7cb0ddd7999506d0c20b26bbe69d1915a31842cfac41eb_Z},
+ {0x4ba4cc166be8dec764910f75b45f74b40c690c74709e90f3aa372f0bd2d6997_Z,
+ 0x40301cf5c1751f4b971e46c4ede85fcac5c59a5ce5ae7c48151f27b24b219c_Z},
+ {0x21cfbc678f5a279ebb6ed124273c8df37eaf12a2d04180403ae6b5ec0b1e1ef_Z,
+ 0x4478ed6a346d899ad7b0b10350270aad39ddd5b68529297e4c91a54357f0a7f_Z},
+ {0x350bfefbe3d864eaadac9cc1195c14159bb736be743aed7380d2384cadd2046_Z,
+ 0x5e2a4b3ad0e1d7b9b8ef72b10d68a80e5ee691d7db591fcfbaad6240d41da8b_Z},
+ {0x529acd569127f73c8d34345f87e96cebfb48ee12a00a3861cda209337ed94e6_Z,
+ 0x3120671a89b705e5bfd99b0e7fd2118b4914a3ac309b3d74527cacb5ad7491_Z},
+ {0x55d3d7956a97d10e65a4d8ffeba40deaf0db0b57f8e022cdb3df6df613f5c6d_Z,
+ 0x159e59a6f92f48fcf85aa96c1a03749a4c4e2cf9e2bc94dd36796daebd9b8b9_Z},
+ {0x405f019ee8f2e972a005c549b0884b5051f63d1e78480b73208dc07d8c65a1f_Z,
+ 0x4301a3d0c285ad309ff24a12c100ead7f48ba1368143712f32ac141ab4d9e8d_Z},
+ {0x376d59b298d982f02dccad0edd5bbd4e5e8fad7898750675ed0856850a7babe_Z,
+ 0x5233b12bbc50564eb61cc098a17d3d97f06ec7a230380e4c5d3b725cc318eba_Z},
+ {0x2f55624af6109ef04b2ed035a44a904ace8627f55889f011f768aabf4de9a38_Z,
+ 0x7f64209ce7dfb63337ccf3d8c14f4093295f86996cabfee23b1655549aca089_Z},
+ {0x3b8965e942bed2714bc2e685fb103496e1e3595ac6a343d6df45fb5ef6979ed_Z,
+ 0x5b7cac7a165cb69ae103dd9052fb39c00ed0aad47989005aee53972d82d45b5_Z},
+ {0x7abfe3accdec1eae1a50049efdd9a8eb7c2921a08e8bf1fe606e9d5a4039ec4_Z,
+ 0x3af178e7e831f8148244d2d2b284a32991852db6212ad0a9d77540ef648a5fe_Z},
+ {0x4983196df6ad7d6f0a8d76f86af3863ad8611374a03fc0fd00793181dbde9d_Z,
+ 0x204c1f91b70f975a21d24a8face664e496f00f602daaafa69a3b56098a4cf89_Z},
+ {0x79e2b91c1531a3b16dbd53e72d94e16bf265cbec261658151acfaea3718ea72_Z,
+ 0x3d9bdb47e8b148c1c5e9e694ffbc2cf71aac74ae1a85e8d8c3f77e580f962eb_Z},
+ {0x297efceec61b3be17565843cae465c52524b4ecd9331a4170f54f7de8c4556c_Z,
+ 0x6ccef1733624cc8b973ac63dd54e7a53604929affe81c3439525ae5ed6af993_Z},
+ {0x44f04b1966264a23ccdc870c8563ad2efcd4c8087b5469b90e792287a5581c7_Z,
+ 0x1c417f0e9829fa3d3cbb7c3cf4dc7aac04c5bf66ff3f86b833a42c533aed1fc_Z},
+ {0x6ff83f5d8b51db3be0bda80eed2e2adb7037f2f58f705e88f0f98197431ac26_Z,
+ 0x64f59b8428894c2b7afd740866065ded42e716c7d48accd3f117f22768ed9fd_Z},
+ {0x14aa8187c9559f77cd1cf96b2dfc949182529936f2b0b4050ea56e134073b24_Z,
+ 0x5f36508c68b1dc586f3fd3f4e2bd29c6d8258491b8a6aa19ede811ce0d3d0a1_Z},
+ {0x95e8882a68c5000d1c2be7c0b43e7f2a6f8de906485241f0285a5c73a27a83_Z,
+ 0x1e4cb67207ab73bc1e5d19fa2146fde6d03021393b77a55df4ddda1fd28f5b1_Z},
+ {0x2ae0704dacb3da47d564514b4c3543505b403ba09a248c6e74593cba1867ff5_Z,
+ 0x5a4b5818088dc9ef4066b90a8893ae80fc89584f987ec1928ef9d72cea2bd67_Z},
+ {0x61a10898a76fb99989e51c0e823cb60b95ec7ccccb917c42b2b28014f5fd94d_Z,
+ 0x23d8ec1de45366d3b86c64c2da05a2ce3d171adf52ca5522e652ffd0eeee795_Z},
+ {0x79884133c879cf07734976fd64de220c5a972e04c2a3afb74c362d6c3beecbf_Z,
+ 0x2aaa0e6d4891b792b5643fdf09873343cd0e3fbba3cbd0601b481a4083f32b6_Z},
+ {0x45f73d2fa82be6c5ccd0f62d2237efe8727c479967d27cce28e42b9a44bad5b_Z,
+ 0x2fa4932215f72d56d8be5205c5851c9b3e5f2a14468e4a7acace5437c6b27dd_Z},
+ {0x37f53f771850f52f9c8f87b53c6bf0c93c2bed76f5fd1d5697356d0b2325007_Z,
+ 0x50f1a052b79b446fbc7b93ffa1a4515f6c3be3a76a2b0bc5eb8ff327549960c_Z},
+ {0x71bd6d23e0d2f312d47582efa609101f15b9ccc571fca8ac4fe3457c67fbc9b_Z,
+ 0x3b3fdf86bd4c7fc26d60540a6439b4d179dcbf7b91efb0ddc60dfbff9a148c6_Z},
+ {0x78219ba049438385b829c13a4993874a4a326c4143de0dd581c7b9956f99b06_Z,
+ 0x5505f1268dcdd4ee01b77abac3bfdcbf3f0513ab097c69ff777b4a631aaf256_Z},
+ {0xb81e924a86536dcf68bc5a2ca2065a61103ba6c9eb0ae4cf8cce9dbe286f15_Z,
+ 0x653a6dfb51acfe8a844fb8362795e5549d424aed88d3a090366a44f840b5b83_Z},
+ {0x441c0d7b7aa705046dc0e07ba5f33a7d9df23f694a05192ff8c2d7be2aa3fdc_Z,
+ 0x4c06568c0902bb99d428bfa0a946ed0f0ca0a51fbf07cad88e06e9c78e38a59_Z},
+ {0x2569c8c78b6d6b92533f29f767c95720d377fa63ad5a3b9827ee0a74b0488aa_Z,
+ 0x4b59c81d3cfe08834f946d9d57614f5366e0bcd9349475aaaebe01341196fe0_Z},
+ {0x3f2fa285a0471647b214eac652bbad9d58a9f2dd2e812aff0210d0d8a6eb32f_Z,
+ 0x4cdb18e1c2848c2b52c1a6557165bd1a8f55c2f7562f5cc0b326f73c25b696c_Z},
+ {0x5bb5141ab4fcc5290ae9151b8045a2cd8391547ce7b3b33cbbb10f8fb538092_Z,
+ 0x5a36bfd52acc6a83a9913b937ec086cc27fed030b5fa70dbc5d3c12c9515f56_Z},
+ {0x3f3fed272edf91aa7f8ca5d70005d390fbc67830ffc69c5fa3ae17582d2771_Z,
+ 0x459057e0883c44d8776fa217405f443e5954f08c4a5db68e437becaa664a999_Z},
+ {0x5237ca6656237a717a739a4509f70db1b9dedbb6cd232f60c9bd8c4563a6b1f_Z,
+ 0x56c7799dd02896dbe7d69dd8bb9718270549592099569d107b7b49c34bf5a49_Z},
+ {0x1cf6b8499ac881e0b2fc7def9bc1a28937033b2fc52de99e75909a620c7a281_Z,
+ 0x5769cf4f735366fa386b6858043dc99a100f86fbc77b16d57d77766197ba27a_Z},
+ {0x1b74b8a6b86dbf9638cdb0601e1a332b8d880753423d38c3394902c57f15e40_Z,
+ 0x6bb2dc10d2ecbb913219d0ebdc8d3337d644ed8b6c4e70637ef4c7e50887488_Z},
+ {0x61e4da415661bba52a4737e2bcde1a837787c4796b2e1854778534f1582c29b_Z,
+ 0x27c43e632cb7652e8508c9c38e3b4ad0d3dd6ba748d42dc84ec2685e64b9aad_Z},
+ {0x7c460a204d23f20ce86596dae6ac9b36734e4a9f7c5b43262c97a36c6a41c6e_Z,
+ 0x481a11f9300ab4c4bf6924c5ca884728cc361247377065920966785d043fbbf_Z},
+ {0x124ff5e55e4effa40daa5b9618d75c49c8b6fad95cbe8c0bfdd83cb9bed8316_Z,
+ 0x33a2ea15d0f71f58a00de71acd7f22ccf9002115e49dd1f7631faa0d32f9987_Z},
+ {0x61c9f8fc86715e95ff43583a865c5a6515f93381839d557ef884a68637eaf4c_Z,
+ 0x5877daaa42bbab9083b571e12648a9d62ced4470d71653092b6546f4a5acceb_Z},
+ {0x70a6b9a9e5d1fcc07dd9ebef6d8f5fcf04c6cb34932d0fe2335330ac6dc8d3d_Z,
+ 0x3f0cbd332ac56922e886656bee74f6e9bb4bb88f7af7bba9098678af1f38fc_Z},
+ {0x41db8a0f1ea78443a39e08a54323743c8897eed1ddc28f41aec6f2655040d9f_Z,
+ 0x7d4bf32f8f4719c2e4af8b7889f3b65cfdd033dc2f971798a12170f2b26efce_Z},
+ {0x62f035e01acdfe841104942d6c8c07f0fbd618cb85998ea24bcc24cfac1f8_Z,
+ 0x1caa886104b7d753fda93645a746989794cd825c62473b526ea34b3d51b5771_Z},
+ {0x441c6f016d270e86c19843727b83b864cec060cafc813b23d7e41e5abb1a60a_Z,
+ 0x29fece4e40400f3acae0586f4fc8ed535e805e472123ec38d662d8a0b01c086_Z},
+ {0x2c791ba0fb0b66177815c98191fa6188dba9c795e34a7c3c8a19086215e3cee_Z,
+ 0x11123151389d4b330db6a665a560407e7cd8c3807c749e2b0cffd9c3074ba77_Z},
+ {0x5292da4ca71ae75ed0554c267747e39c7a129b3b863e1af3ebb3e368439c4ea_Z,
+ 0x63af6a5016deea8cc674c44f16c63c1db31f09af4fb4d2ea7917c28116661fc_Z},
+ {0x3367388d5d1b7758dc3d92e244f227bb8a54e3d9909e7b7dd62ab5965e3efc7_Z,
+ 0x7ffb4833071e4b03ea755ccb9938487a478248fe9b1158a08f1ac298801c092_Z},
+ {0x95c863314b7f18090f8eee602403be823a367a1b416d54c32e5f914e67d922_Z,
+ 0x159c2824f899171deee23e0ed520d4825bd667983df0a8d45d3a1f7156d91f9_Z},
+ {0x621c6e08b3c57404644ad49ac7629832c141273fa1f323781b3395393fe985c_Z,
+ 0x65d1eb0140652958c4371ebec791e03317d6b2e689d90e304666f1b610783dd_Z},
+ {0x54313129bf13993952cd2b31ed06013aba85e74c1b8a00e062031f32188a84e_Z,
+ 0x680129efc9eb8ec07fc180e8f6877e5f0f9f44e3000a2c586ed4ce49d12a313_Z},
+ {0x21ea57a1c8286bb45872e78617853c47b89091670ba51c124afa3362e7260d_Z,
+ 0x7087e5c1536df233ec9bfe2f983e8d7622892b9bf64c450c9823898e2cc2fc8_Z},
+ {0x3793b05b99e7a57d88db4ed0dbc3b771285abcd9052da50f88595354409f3f3_Z,
+ 0x12164105041c056f127e737c7cd63981e05f246bd2b6b65d1f427019c7c3801_Z},
+ {0xbefd345cef5fcae22ac37dacd6b9128cc58cbba3e3fd774e11b421c2ba392_Z,
+ 0x6209d25f24f88f7876ca604db23d05f78e6b3b67fb033f2f1bee221f352b8c8_Z},
+ {0x15fa536045fda4c65ff74f10b4e669ce88b9996c6772288289d3ad725987fa6_Z,
+ 0x30e0c2124a35e265e931ccc66ce5ac3697d982814beb407144ff6762cb691df_Z},
+ {0x38b795bd77ac573576dc204857a488cac2cce19809882631ca2069598c577c8_Z,
+ 0x786ba555d55ebef688b068bb9186a34a08cb00bdfef51619bbf911890ae9a13_Z},
+ {0x6c66853592196c3eb8d9526dc155205e2c64097adf8684bb0e15eb460ce1c72_Z,
+ 0x1bb4ebf654f4250c8dd1061a4e1b464b31a8a9999ac9960446ef8108a66871a_Z},
+ {0x5b08dfbc87ad9c00b88e78816973ad2f9c10c70f2156908892cc7b7a2a1fd30_Z,
+ 0x1151f407a77e2556073173d8f5c9ff561d8a23742121ca15f7d0ac391af50ea_Z},
+ {0x309190eba106aa6ead54b5ca5817969aa68b4b4c627700799a49fc6bdd32ba1_Z,
+ 0x505b6a2bc7b0d78ca6ce2abe7dfb7312369918a4599cccf8a615f6701cfd851_Z},
+ {0x89cc205966af08acc8910d563af7443d5dfbb5d88dae79c013c678c65dcecc_Z,
+ 0x1f8cf955694b246a423ac725791231257b88936e00347ecaa1e17045c0ab540_Z},
+ {0x480086b61a80c36cf1e1a350baf554e58ee8d9333186b70c9c512fb9e9d5a84_Z,
+ 0x511edfe58f8d36a6170df743731da1ff525cfd5108be20e30ac4183d1281570_Z},
+ {0x3caf14fb1d2e90a13ad4eb091250fe37133aabf6029633e905e5a93ead41dbb_Z,
+ 0x49122aff6059dfda19e4b973aba5ebe3804c91728936c6381c1ed1ea9380920_Z},
+ {0x66d1b8fb2cabc46cd79741ce1cb7326077ad8ea3227a6427244bdd3806bdadd_Z,
+ 0x4a52eb74f4d5371ba3265dffd61c844f9e68d4ff0b44dc4936182f9280bb66b_Z},
+ {0x373330c5afd53c31257fcc9050fef873e15ea9f81d9810f30744309b04e02b3_Z,
+ 0x5889806607b3dc97a9c5b0c8a2f16d1792099a22866b879ca480cb89a11ef5c_Z},
+ {0x26840d0ec69a22c6818ff64b8b14633b531508c866e21d1dc9239778ae9e8c7_Z,
+ 0x157971f9a6e3a24d3b307be0e7c8cd352e2eb5cad33cf276270c0f309ee63fc_Z},
+ {0xebb84848f1c38c19a754d1b5d9460e39624dadbb30800987c9419c0f933b9f_Z,
+ 0x517b297cf32f4064e6d6c8e761ba8db89809604a701c7b3aa1a9c6beb370ea7_Z},
+ {0x25780380bc0795ed0dca727c55240f1d63593e552d224adb40df2d3721c0f66_Z,
+ 0x10215fb5a893e0275e9f1f66b217dde35addee91ed0e8f7d79531a2ff57b8c8_Z},
+ {0x243e1581cd1abfbf18c31c19a4c3d1cedfe69a40bb57b607c9af2717eefc742_Z,
+ 0x1296c27929f14535718c3a4ebe045f00afdc60afc74c7d398d8ce1b6609dc0f_Z},
+ {0x48babb8649e054bc8e0b902c89e6940c265f48464520649502ef1064eb94562_Z,
+ 0x3235be7852b0526d1a16f6969ec0e5b0e09cedaadc65863dea4e47f4f398264_Z},
+ {0x592db7c27e63489ef4bcef2eafce89f40067cd9a1ba48bc3dc76b5fc62ad9ca_Z,
+ 0x48b7711b570cd9ac65910e75e752f4b751fdbfb4091a28f59b8c046d3d9f8bc_Z},
+ {0x31d133456222586ae42a9ec7ce8539ee04afbe0b2ed00a2564dab0798d9b55d_Z,
+ 0xa77c52fa1fd718db5c83e7fda6d7d4d9aafef9ad95cad621470f2b753729e5_Z},
+ {0x4651668379883521e7983aafcb93811b4a72ef2975b3277773746708ef3e3fc_Z,
+ 0x512507f3f544d80ba5d47f73b571881e8d70d7b1d305b9704bdad036b7abc47_Z},
+ {0x26069e359b2e847affaef604f772f36224608b7642245d0e643889ed231bddc_Z,
+ 0x75ae1ec379f074ebc91270077c74b4d34347ce183b676b4dbe100bfff143b9e_Z},
+ {0x3196d01d1fa11dc3803b4813c4bbc6326869f61410f2bd14bc0f570d875aebe_Z,
+ 0x20313217cac79875bd2a503db1e86d1e5559911667a02524759344468d9561d_Z},
+ {0x483256607f75f06fb126addc60cadddd602154cc4782bcc08351a48745d0b97_Z,
+ 0x2950a7e500ebbe9775f08be37cc2e62ccf9030de18948d1bab07a4a9173f75d_Z},
+ {0x65f07b6050a2fc6eebe2c29ffa62f764060f7f9d3c82d2cb5e4e368aaa442c9_Z,
+ 0x562c9654b646cb84a213b41de203c871b3eae0a05c9c105a66a53c319c06373_Z},
+ {0x284870f6181c43f3b01d94baa9c5b6ada0deb861145523ad9169580eb7bed35_Z,
+ 0x5e03e6c40c1cfa3cafb01fd0622349871832a9d35499d06408a83edc1b76d02_Z},
+ {0x32229810a52137f0e6c3d37595c46f6132822d4b05f42674b48d7a7ac3ad85_Z,
+ 0x7babde959a0cf2c53ee59fc52c77c3adf899453f077f441965629f9aead30cd_Z},
+ {0x1ea8b98a6b85e74e0a2fbc18b206e290f3ed94ce99ca665e8e2351dfade990a_Z,
+ 0x478e93c4724115fb1648c8d5347422adbc1a0bbf962b2312e14aec80e1be742_Z},
+ {0x270cbaa08c79140c85b864475a0bf569cc03ac785e57f543dc444f37ce746cf_Z,
+ 0x3a9b8d894016680ae9d1bf3deb931d8987d4d8d8bfed45b81ccc595ec79046b_Z},
+ {0x6943922708b8ae5b40dd7031ef2e487abc4ac39a3591368285e83d6c9c51f4d_Z,
+ 0x5f157c37d09634e8cbfbef90ea50af59815d011e419a691c67ca3402b5efc33_Z},
+ {0x48ac6a80979fab4912cf0cb557d917a0bd68825d8658ec100496eaae6ff62e1_Z,
+ 0x2b6931350ab183402e39476340eb1177b7006f7a552915581e29a79bd7203a0_Z},
+ {0xe3adf9517d92ef22d1e2a787740a292ba32d5ca69faa9e8675f63ed816dce5_Z,
+ 0x36bccf69bb12dadd610145a3399213248d193660d8dc90a2e206f23bf2c7997_Z},
+ {0x5e6c8ae5afb2fa470f767581f3d578cf6a49547e4b78665edfd45776948bef8_Z,
+ 0x6cbfc11953dd7e195d2ce74e52a60df524767b44c4608bdd755be4bc85eb74c_Z},
+ {0x15a576a1242d39300f0db3ad770983825988da0457718ecd596c63a0a0eb4a6_Z,
+ 0x69a42e5f6f5a63349b57683a4609bba90f556a1680fa1ec3b02ee7d3211f903_Z},
+ {0x274cd14e4fbf2ed07402e8ad8075b320c5f76b7ea45ea36af523e95ed63ab50_Z,
+ 0x6ca640f9557c5f2d8b27f6ce95b108880ff4e4816b26b70b6506114389ce656_Z},
+ {0x4d8284e132e2fe81c5f71be1e3c79ab51b229e2c56c323e207cda179999d123_Z,
+ 0x116cfc00e9fbee1cf16af6282123cdf20eed13021c2037ef4c86f94eb6e6cba_Z},
+ {0x4056194fb5643e97991942ef5b63cadd89080bf57a01489c4398aca03f0980a_Z,
+ 0x2e2cddb434fa6f6da7859c3d518f0ced8795eea043a6c9613fb3e020103339f_Z},
+ {0x5d119d5c5ce532afc0875e0ee9b026d878c8773d34237f90a0d0670da6f01b3_Z,
+ 0x4a79fc025ce076b6a4742fbcc8cad313d0a8220c58024a41a5a674c0947e64b_Z},
+ {0x11800ce4061d99b9d53fd4138802335258f7798c5a935c9979f5a949ce1d483_Z,
+ 0x36745a4741a5c7290eaa8f2a3f9ec955ccb7ca323272e5d35d35c2a724ffac8_Z},
+ {0x4302525bceb97fa642fd5560a4a39fba3d2c06f68e6aff3332ff1854439ebb3_Z,
+ 0xe31edfd081ce82f8177b2d7d96e69851d09e908c2517114ffb37ee12c0ac64_Z},
+ {0x2f5fcbb96f0a66fd3bdfbcc78bda361cb812570f50e7c476533d56eee01c0e3_Z,
+ 0x527428a34855b5695c479d8fb7e831a299f7897f36682a74169cc60d160df2d_Z},
+ {0x52167df045ad0dc999b98de3d035aced9da4434211149b8cf4bf20e774580cf_Z,
+ 0x19051d2a1ad3fab190c5dfaf45188b49b4e90cca22aae54f0a785562d3d3f41_Z},
+ {0x541b5332491dbdb2b6f6bccceb7634970c046963891fae936dd950f4432b961_Z,
+ 0x78fa54da996a51e3a9c06091d58c2405a806649da2bb1f323807c4eec50eda2_Z},
+ {0x5f11e973da659b7738f87ca5bd4f3bd02207dd3c8d978f0d3e83fe81030febd_Z,
+ 0x137aba7027069f62d25caed416e13537687bb1428e71e5f0a0c52d52f2e65bc_Z},
+ {0x15ec941ee6c2110b819b5541be52981c09d83484c9dc735c43f39f5778718b4_Z,
+ 0x4561826142dc5b56acfcf605a78a4090472bb61235bcd605a765e05d0a7e549_Z},
+ {0x68ba398736d659522f484406110b43c68158bf4992094acf797a38979c587a4_Z,
+ 0x7c1d9e1702e28afddf22fed7a7a79df4315c174d0c6c4f4c75bc77d9b56777f_Z},
+ {0x67889cea31c81a429fbae643a4fce0ecd690a5c32b99397e39ed6d7a08702df_Z,
+ 0x7ea277c80b671146c9e455b98f42f45b941ac95ca2d15c8fa9ea82ee9b45e01_Z},
+ {0x596f2c68390ac26505d3c2eca5c77d46f8f3acbed192a2649d8c525a58d2334_Z,
+ 0x49f3bd8c62c610d5c19c52d970bde24b270c4ff7ae900453b909e72483974a0_Z},
+ {0x567779fb8b0afe592cea284629e3621ccfae3c4d7d3dc559c9fed750591a395_Z,
+ 0x6010bdc33f1cdb374facefff537e7910b72a1120502f312a7ce41df0d552ddd_Z},
+ {0xcebed0233e810aa6a29a8b0829d28f1c92f303d14dd73d6b12da98117dfc7_Z,
+ 0x4bdd51e1192a00df23aa8d0673e4915877ca41ddb8c9eaf21d39dd167fde7b7_Z},
+ {0x4c7085f066adeb6781596771972b188177e63f2e2b3788d03e033cdd5af1f06_Z,
+ 0x2929ee89f525862b0cedb3ab9b5166e1680cb77fb4668f10a6a3d76b5434566_Z},
+ {0x760e341bd836899c226176f47685f69438270c150c6fe7744cd723cd1e72359_Z,
+ 0x1bf09f2f1aac1a10ce8bdf20d5d178db747f01a4aa0aa8a5e4bfeef562cd94e_Z},
+ {0x6016b94c00b54920027ef64902c61478244b1936337d2ad41d9a8d43dd6a4b2_Z,
+ 0x3bf3dd9bce7f6d6f120de87fcbce6219340b59c2c1d75ee0d45105d33aab1cd_Z},
+ {0x4929e44ff692eb944d1045bee96e750219cda3bda0500029f0df49a1db30b5b_Z,
+ 0x2e138dcbd092242699004b4ce98764ffe4e892841f56830af298581cd1e523f_Z},
+ {0x5972d0e526311bacb70a04e88969b6c63c7399b578f0dc28bbd00d65ef01da7_Z,
+ 0x76b22bca9ac12d26530e7b0757e646beb3bbc5680d0f3f82fb8ee57ed4b5e39_Z},
+ {0x2ca0a42a26e26934ca2d48db960b4719113d87c5e57fb437d557c5eb4e03ac7_Z,
+ 0x62778c02561d4ec5d83a132afd7763a8349207c6b5d01fba70b56ba660cba2e_Z},
+ {0x5137ee53f076e21a2c23da09f63c0d275408c31e4634a6b6373be5cf13e6c00_Z,
+ 0x14fb446c077beb78e04de3282a63bfde12f9af85caaca4ddfab506cee31c0c1_Z},
+ {0x7d944853d1627b63f560aeda33acf640d35a4ee4d23a744957a2dae9d5b7c6c_Z,
+ 0xbcb411a210710acbcb9ea12680d89e3e4e652228b6786d3886e95f4d9e6970_Z},
+ {0x37d412c2ffb173a728477446b60b2b702d07a5243cb5fc8963e623a5ee75843_Z,
+ 0x672c79968908f92cd0cb0b4c65ba86e8f359b015623a89441e1bf859bba84cb_Z},
+ {0x5b37f472aa80398bff12cc74c8ee784c4fc89757292580d3a498bff17e9f114_Z,
+ 0x7d79da1aab9cfef58a5f3d1c9ec466956a45f8d2af0c1da6dd4c93f720fae6e_Z},
+ {0x25c09b3f1188c562571536202eb0f5fc4b9a7590417b8ea58b4343685d88a63_Z,
+ 0x3d5b817c73b37e9a1d24ca923351359b42ced2f3cafbcac8c2d6322dc767bb_Z},
+ {0x32e60904e73f9756f71e0a918d302aeca17cad4acacc81bab15702ab5ff78f0_Z,
+ 0xbcf4c0204f8275072f98a65b09ac58b87cdc9c70c4edfe99fe18870a3a5459_Z},
+ {0x49c35575996c1517d2daed90d2fe4a58e674d6b4aaa7288d0642c8bf59e562f_Z,
+ 0x57eeee00adea4ca80eeabab57852cbf03f1a57e21872cd44221e0550b9193b8_Z},
+ {0x10e1776b4c2a867bf1b028c6edec224cc6616c747e272f49e69b67b02a893dd_Z,
+ 0x8d45d62ec8e627b56950f2f7622a0438647f9e9f28e723e4a37cebc039a1b0_Z},
+ {0x79a93a75ecbe943acc964fd39ecfc971dc6555b2bc335e7b53f52f4eb16cd36_Z,
+ 0x146132a68ce2ca8b48363612226771ac547eb3cf52b6eb7981718faac08aa3c_Z},
+ {0x6b22d32e0590e169504e7f19864fd646d0994e7ed3e578a5b88f6e095913439_Z,
+ 0x68c3b22d859fb85e5c8fa0a8aea932285945b230957e603394333e9ad5acd82_Z},
+ {0x71ce5ec8286eb8c93b8481c6d19cf0a288ef4da4397e9c80f65023e516bc097_Z,
+ 0x54470babc742780cd8a05499026e738ccbf81d4170d1731734de68a8e5b402c_Z},
+ {0x27beb13a43bc6a1f6ce046da438b0beac5899ff4d57962dcfb6476b563f74b_Z,
+ 0x14074e9e93ee45394dfbe833998b9d1691961f8ba3166224b36404448c61bb3_Z},
+ {0x6b1de6c8f161aa6509a1dcacf2c0aa1bcf6ee9d9b40e032a9d72f77a6fa298c_Z,
+ 0x5e9312eb5b59d6cbadd7d3dcbc39f1b5bd9a8346fdcfdf1107bada6f9cc048_Z},
+ {0x32670fc3fa43bf39974ba72ea51f0d045d92d084a81fe5282dfc8309aa900b9_Z,
+ 0x518fee521bf1af62356aac3b7e53fdbf57121e030c6e9572b3de69912ca4eb4_Z},
+ {0x4b9ca363eabed9c66091a347375f7065cd28f49f914447de7cc1461f1375f1e_Z,
+ 0x3a1a3a2e5e7e72476befe2571ece708052d740d02cbe6fed58740968ae609c4_Z},
+ {0x4cc6da42863a3deca62fa218b7a3b50e034eb4bafd393eccba3f4cbe192ef10_Z,
+ 0x20bfa683c884f203713953b26d2821287ecd305fa2cb70570474533fc07f918_Z},
+ {0x87705353c44a5ccec8de65cf5433be6b3d9bd21eea49b60e6c907cf1a67a6a_Z,
+ 0x112804b13eee56e3b01aff75fa08fa8374c44fc461aed8a30ad54acd09c24eb_Z},
+ {0x6cf6eeeb9d339c0a05f72fd5af73fc7588e6d957100ee8999109437bc126cae_Z,
+ 0x54fa257cea22032eac272fcd034dadf2e00d602ef9e519cf7072023c130aad1_Z},
+ {0x19b32925048c5519d929650c833661b452ef7be7963fab0b6b328ab7dd7a28a_Z,
+ 0x1bd0c14a10bf9b88ea61011c0b2e64d07da151c6203800d5a5d12063838a510_Z},
+ {0x12a5fc5559428bc3b4eff97b21b63668b866e0722807f1db1f19696bacd9b0d_Z,
+ 0x4c2eb07f0c24047a3d73b560144f3fd32c99d6dbd9fc7cd2fd2a72a6e4b24c7_Z},
+ {0x13662b7a7d390aa76eb86a7c3bff6d9913eb28db6bd1a7c42de5cdad2e35ce2_Z,
+ 0x40626aded7f56f82cc431ae30527b096f57fbfbc04d3e12a5abae3edf301cf1_Z},
+ {0x255825bd49b8a2cce114360bd9c8fe8c641af64c8e7710107213cfcb006f43d_Z,
+ 0x3619cce4482335232f9e76a1460be9d296f2d468d26e4f95a78c71524fe59cc_Z},
+ {0x7f83009eeed4f12f54d341bbf06066480cfcdf51dda103ac54d4bcecf6b3b31_Z,
+ 0x4269519d28faafd7fd68bebfd8404d71ba05d62c4bb6d65d24aa6802fb84ab6_Z},
+ {0x2f325650eb316646b4eec903fe44828fcb11054f1bd42ca3a77f7e734110b35_Z,
+ 0x44f976082271016f9048e22c507d97d628722bb431f8d5cc1890524e6c386bf_Z},
+ {0x750b166bb6edc0ee80fae39c7c106879036738df2d79fb2294e1c21e9a24d6b_Z,
+ 0x54f8aa297a1afafe2a17a3254f45861167414327e918d17003c6aad01d0b24c_Z},
+ {0x3aedb10db9cf3285cdeee375879396fac1fb50dd259e1716f8c01e66f67ca72_Z,
+ 0x7feb9400f621f58c21601f23b7ec7c94a9b6b193c1cd74a8a60846aedadd359_Z},
+ {0x4ab7151702de76faa493e7a0b1ac20ee4d10c33b83fec9477547cb1236973eb_Z,
+ 0x63f1f122e3ef3acc46b0915ac69c3f5772879799cad889a817f55f5853d1235_Z},
+ {0x1675ead0d20e5bc3a7a7331999a87ac4c916ae29669e54197bb02aa6364520f_Z,
+ 0x4d1122da90d49e491922d9b533a6a668e2f65a2737ebb391ebb29fb7c1f8a9d_Z},
+ {0x2f7148111ef53c613157aeec12e16a20f13481da4390b6ce18a85d1d8547087_Z,
+ 0x2eeda779ab395597651d2a0b833ccf53b10280750139916ae2baf4ec57c633d_Z},
+ {0x4439c7810e7b2ba772b701ec3acdca0b80c9df23047710b87f7dc3f13b337d3_Z,
+ 0x5029cfe704c602a8a4662af0a5860ec03fb88f046d0e3400f2ce7638014c621_Z},
+ {0x2248eec40b5732a6a488b681f093643af7937071bc73118acae295a32b51b05_Z,
+ 0x1577e4aec30a97b648de4d0b19cf8891151b4eb11f8de9c6d7312f091552e19_Z},
+ {0x4738424e558d4e0d87a3124ca02ea24f0adc6b7a9768b0d3945ed2a6104857c_Z,
+ 0x33576f92aca3f0c8ae689c3c274c2de6b918940d86a6852e02fc99e35d1614_Z},
+ {0x7829edd8b866ebf7baaf604ed13d19a9797578f44bbc51b1cd67ca53803e96b_Z,
+ 0x5559040a6083f2af1f9133ccaf5bc2ce06e56ddfc7dd410e9635c0116b62722_Z},
+ {0x7f927b881f2cdc05e1a69e40bb714af47b630d1425f08ab5d574ee698f33d51_Z,
+ 0x26a465288e96572de303203bd38f4a03031e8158da0591cb037c0a5111d1056_Z},
+ {0x36a65598552f8753580d1655417d645a140966e10a1e1663015f9fdfae44881_Z,
+ 0x33d5bbfaebf59eae72b89b1aea12ab2ba3c9617f8c3baed1ec16bdf668381b5_Z},
+ {0x403becfa545c826782026ff409cc16c9d4fe428f1b5b6e630c92439d2fa5fd_Z,
+ 0x47bd6f2bf5d74f710ecb479c79b01fb774fbdad590e683a415cdedf33f71dc5_Z},
+ {0x3a747826d241b877d3d56b16e0b810cf088eda4fd6048da174c9991a942a5eb_Z,
+ 0x2c7ba19b0a3486a2cdb84d4a388d34beb077a0e467ba44590166f93f6a09d2e_Z},
+ {0x3d60cd375842714b37bda89dd1f13a7e0f3ff133b522209617d031bce05a537_Z,
+ 0xf77f216451ab01ad5226844d2162a7f32744688bcb4325445539e2ce5cec4_Z},
+ {0x235bf66f67c9100e7f0e22bb299cdfaa603644b240e0770aec7e7fd163e2a65_Z,
+ 0x37110b3fa83ece3990afca2bea8d5ebb3c7aace60a0147f8e6ab733e2f2b4d5_Z},
+ {0x3b796d4eb69a55471fa86108f787b3604874e92b6887a7667a6c2bfbbd9a42b_Z,
+ 0x4912d6dc0419732ef82cb3278415851d4e2d7ca89e0f4d7128cc9de51b810fe_Z},
+ {0x48d53516dd51e49faa7ab46c8c10db1befd10f23c6a9d9bc3640a2f0da44518_Z,
+ 0x73a2fb3d064adadf21aa1362c04affc660598f38a9e069b3afb74d0a99ae9ee_Z},
+ {0x48c32cff161ed145da0d5b73084897647abb777adf65738559ceab6939cf3e0_Z,
+ 0x3d99308978e828f857c382df32b472bda81e8ec8e30c8844077ba6d6d2ba903_Z},
+ {0x2947ff091a8ec9684affbc9a62e09e598841c4a6dc638088492aa47dea57097_Z,
+ 0x19a2cc97975e547f97a4d02e42f89e6ced6f5a953cfccdec347867d26926541_Z},
+ {0x1960d85f30475615f82484eba0bdafb7ea7cac3809f0518a757d66f02b01676_Z,
+ 0x36c8f77baabf0cc8805d993bbe62041fcf4e3239cf9d53278a4fbd91e75eeb7_Z},
+ {0x2765f28074d21d5a055340b6d40092d2bbef807e02009fabfa08ec0b9bdf38b_Z,
+ 0x7fb189e0553d5df52b6843661814824b3f3cbebbd54988f042fb256c6bf30b_Z},
+ {0x348836cb2aaa00212f4b1a4e2d7fc5417f246bf2fe5c9a16ebabda449e2e08a_Z,
+ 0x3f7276fd7d69e0d55ce5ee1d2d830534a27227fe0b6d8a36c93f9a78b872969_Z},
+ {0x7afb9d34b6a42ea8c6d870e4b8191c274201dc1f93a1a2219a2392b7e345a31_Z,
+ 0x42bbc20dc7115e0758b364a110227b16b64ec58fc535ce5ff1a9ad8b8a09fdd_Z},
+ {0x2cae0c2afee1767fd4c66f52e1f176d217e92e89cc19eb36d5a6c1715f641a_Z,
+ 0x5335efe2d9bc3667d25ea88bf76438a4d6ab9ba5c512f9da7d0529b79b62d83_Z},
+ {0x1cc5fde334707723c3a06f00c106db88664284a2df47bb6b144d9f960aea3e2_Z,
+ 0xdbbf610d100316938bcd8bcd078513512ecb50d4579690dbefaa419c05980d_Z},
+ {0x54e90cb8f3a2998d2675c5780679e06c0556b1e618f8fdf07f9a4b2466fbf1e_Z,
+ 0x16248676b6f06ec5e34994bc3115f85c8147b54f34d8500928f2fdc051e2089_Z},
+ {0x525c70a2ba0dbdd68d75640f47f13d0d415ea595f7030f533f4625c2a46523b_Z,
+ 0x58292c8675e5e1a438f49e0c05648d9a7aa997f2f1fd77d5de1944afe5d7eea_Z},
+ {0x54726d78d099007393348787a03107ab492e59690a46c87fb02ec554f2353bd_Z,
+ 0x53b54b77184ba75a3391e0ebfa6d6974db028f3f8e34bbd5460759a5848dd76_Z},
+ {0x4ac81a66903537769d3aac6c483ccc08535cb767b6b5e1ec8017a7393ab70ae_Z,
+ 0x2cb22b77a8a05d26f11a4dec80eff292633aa05553a889c5ab16b6ac6e2ab17_Z},
+ {0x21d0175349e21114988a2930b9a607d43245783cb4a0c984ce27f4c4206708_Z,
+ 0x59f1f49342cc5496213d3329bf4ca7fb0044337449c579bf53147a1dac9e67c_Z},
+ {0x167f821b381f4c8adcc39789475fb55ba639e5124fe75f26dd61be396dd5e66_Z,
+ 0x22002c87d4cafb47ac9d27286d5cf5ff7a6715d69814118269b0729be9e4b3a_Z},
+ {0x31010666c6db83a9f9e4db4c48173afd405783ac53852a6e38a8ff925528843_Z,
+ 0x1f466dc9b5d9094107c741dbf380f9fd98d8549cd50f67169901516f8cce74c_Z},
+ {0x1ad3875769a5053388a86edc85dd80fdffbbda6a456aea497ff81a0f1f6707b_Z,
+ 0x2de7cdec5e2bad56a71bd2f33a4ae4c874e1ad4210a6ac32b443cfa34e85b1b_Z},
+ {0xc489650fb7f459ce09cd05a456fc5a46b849b38a671298ed645bcdaab168b0_Z,
+ 0x45610d092b8af1c43ceed474cd17f7bbee65120aa6fa4d37f949e7e41f25327_Z},
+ {0x394256a5ef4d7af5459587a0bd2edb8acaf5ecfef2563c9a04daf34a4abe4c6_Z,
+ 0x1ebee390dae1403c0c53994e1d064fa64e20fcb45392e209b2b99486a559ffd_Z},
+ {0x410a1511fead6151e9bedb089b9832d0fe01fab76d3f8459929f767525aeb27_Z,
+ 0x361f0a5ffe09fcc3ad4eff3f5e89508ac247af80267100b69de3c59df561cfa_Z},
+ {0x38cd437c9f659e110a869605c182ee9fdc26de36baf559d9229e258267bb734_Z,
+ 0x624b1128ea7739bf1cbd0e423af92a4884323c868d2ba0ee9d362946edee2d1_Z},
+ {0x78b126e50b7042d2a019f95cb87a3213c664ca1bafe345999b1e9e2dac1e608_Z,
+ 0x19e398196b22f4488cbe854c614ad8c353839abc5ab3a4f3f5c03c16ba8a198_Z},
+ {0x6d3a5ce91132f385a91823c5c8046c4b638f5fe63357424410d901457cdb867_Z,
+ 0x7b80bae16d2d487e122495174f7a70992bc5dafbed72bf84127ead7c57302bb_Z},
+ {0x32d053a904dc4d88fbe7d0b96e0cbeca22a00aa5c79c753d52b0b60abf31602_Z,
+ 0x3af6a02e5cae6d6490354ae51185149e3fdb6d0d9caab90e95ff58aa0c40377_Z},
+ {0x49b1fbff5bdb0aa6938b066dde0ed772c0d81f9eff52e7fe038b0ccbd78adb5_Z,
+ 0x1c6e57834eb14d507eed8b36c81ddf92fa91c242467061927a742fafa82b43d_Z},
+ {0x2f28b8994ca6f234d9293d26196b43b9d1d5306844348c4a638102c05de85f5_Z,
+ 0x759cfb172eab065d477248b3569f4ff5791055f01e95fe71b94b8e615d73c96_Z},
+ {0x3c2ee954ff534f856f59188fa0f29ed8a022aee0cac52d634f6dc58cd514d70_Z,
+ 0x22bd162e74925f0a876bd8a206b8767dfdd7c898576a73a490f138d9a7f99c6_Z},
+ {0x5763a7cab001e1aaeabf9ab5b9b2fffe6cc2b299ab04ec4933da74d960e1ab_Z,
+ 0x715ee4f8ee93ab5a1dba00f0a6abc4eec47d49b61254cc27fc36a031e32f0f8_Z},
+ {0x19976ad8d7b7f47c785408243a227401996b36e47c7a78a7bc7d4256233ba9a_Z,
+ 0x896b713c5d7777b0703821a73c1d9a4c3755501042120534ff13990975e1f5_Z},
+ {0x61674b992c29827186cab5ff454758dbbed8e89bc23d0bd33193afccc3a04bc_Z,
+ 0x38e1020744c13903809ea30a0662fdb5226ae760cdcf10800faabec452e00f8_Z},
+ {0x2ea2d48bcb83c0c9cda4efe11f07165cfcbc9ccd26526e5fb12556316d4b1df_Z,
+ 0x1d2d68b74ad384c5c4a9c85453104216357bfcdf635680b40215f0f800974cb_Z},
+ {0x7881212050264c40c336ed3a15dd2cd868ec9a558f5b728869eab66e8b8ed54_Z,
+ 0x21aaefcc8ad8a161b8971d6880321781dbd939570c540da4c330922b8c81e9b_Z},
+ {0xb6be88ce0461d20f59c5199573cda0170b61decf6e8e69a6d32f1695adc4ed_Z,
+ 0x5536e4808370716f2bb3423a9a49a38ddbfe91faf3b7a35eb53d3519238b6cf_Z},
+ {0xe5972af1655eb6dde2e8c77cc58044299922441b5ee41ceaf5cafedc765bcc_Z,
+ 0x550282f37a4783dd60801c237045992d6fbe82a5902e7d837ea25f6f98c7b3a_Z},
+ {0x7efc1aad1f580d8f50274f1c114c40056be19a8c96fa8c4cb5bf85e1e7f3e4_Z,
+ 0x2689f1c3898b114d668be6413643ee9f879913d40c262541fd0316264c60a4f_Z},
+ {0x7939db98037f59b0113e9d60051f75ac9c3cfd1a3eb535c73e2d945068c6c5c_Z,
+ 0x410914ca8bbf3c65cdf3e9772ca790c19131c50068d34b7346c10260a578a8e_Z},
+ {0x225b77ad00a2b83d26690190b74867326eca4f55bfbc3a13be036225ca3b1b5_Z,
+ 0x411faafef89042ce6beb64309fdaff70fa53e9d32d79a21e7f82f80e79ff05e_Z},
+ {0x1501e64c99c8b6658b0479f2c05c9142d246eaabfccf2fcec8dc4399539d8e1_Z,
+ 0x3bab1e3339e42c9ee66c65b0b20236fdd9362d3ce786ad3a9779ab578af50a8_Z},
+ {0x59b907b941f24fb8ea2458153e55f07534b388e835af7b69f3c9f54392a335_Z,
+ 0x1d5438c4f2f68a417f3d56f916d899a6ffe910f5f2989ca31687f1b10f60db8_Z},
+ {0x2887d08a26f484546f360e33abbf7a998b7170a5b30070938b84f072c676bf3_Z,
+ 0x62a78e8d00e5d3a59e2fc424ffa08961567ba1ef24c8531cd7bceee6074a535_Z},
+ {0x6e3cc8076b3d45377929033af35aab0c6d19ae4fd47c0daf844079ca04c46eb_Z,
+ 0x7b90f338e4d848aa8f19d0b5c3bca916a2a9024acbf14bddb278bca2aa39e5f_Z},
+ {0x34844dacdd3ec54a3af328bb9d67715ab33425e194ac9977ca02ef22e8f7a88_Z,
+ 0x3c1affc6372f32a1634748124f9e1a03c4f0c993971da0dc28888b0801279d_Z},
+ {0x436b192e03a49796cf9bc5e93c88268b71c9c24f9c3a85322bba634ebea309d_Z,
+ 0x67a8091ef69d62abcb28ce5df4dc7d53f8dc2b9690344f75ecd03a6d9386044_Z},
+ {0x592d25b68baff87a6d7fd41ff0dadbddc1bd1316683de3b2d677501c0eb14e4_Z,
+ 0x27ad1e1099683f54589010faeefb19e38569ace43653be8787a42b0591e7bc5_Z},
+ {0x89a5111ae911512ba62e87b97f643c0219702f235c70f62c6678a129302009_Z,
+ 0x557fa3d98e9ce7b83b47545013a4498f3de43787fb66b1a54521222242f7c1b_Z},
+ {0x1c9b5e53377e72da5066cb08566bbf9ec31ec1877f455d932cd9b1aa375d34e_Z,
+ 0x72f79555a8bc207863f32d482fca54692825449fd8963fcea3de3a8183a739a_Z},
+ {0x574a6e05eb14591729515be239ea8c1fa9e12d4049d42876f76c8ff37bca03_Z,
+ 0x5f99b3af43ca68c1c73e8190d5f73c8de162ba643d7d5f0cd73cfa8135db6d3_Z},
+ {0x513fc5c2e16505b2b25a2f284e167d5401194bcac0dc3ecf8b7c9acb560daa1_Z,
+ 0x687ee7a1a8954d08d3856e1a16ded808e419e789736d3f55f79f7693bad69f5_Z},
+ {0x53d48bd1205274b1c2b0a0ceb3d21c5fcd7c8892a784931603240b288a598b9_Z,
+ 0x35387abd7ea59c9b956de44d36533cad1f6668c438d666651695ff3862159be_Z},
+ {0x213eb1ea99e08825110dd61094eb6e8145119dc1c507636f068730b1e086d44_Z,
+ 0x744f6853f4f02f4f042468d0739e0c9f64df720b87ed77d1979547084ef7a89_Z},
+ {0x735ef017d091ca23264ad0aa7bb9b2be3309b4539605e79ed4a652ccb2fbe3c_Z,
+ 0x7f0ccc7a5747c4e921fff97d431169f690763427e2cfd1ad74d7a0308d7faa9_Z},
+ {0x3f36babc5a30070b610ed97db44997e6d9115c9c0579ad8f75d295a17130001_Z,
+ 0x79047908a2474e32d5c712a07bf5c4ad522590bb5d6cefda410d30528e12ca8_Z},
+ {0x51c04907ae88a5926b242fb2862cb1f2c651a94e6caad5bff8601c079fded74_Z,
+ 0x10a585a269f460aed43f54c7de13cdf623fc8de5957526997278be939ef32ad_Z},
+ {0xc1e1bd626a735aa2c065831317217ecce68e377eb1f67e54ce2e97bc2ef2dc_Z,
+ 0x53c5af23a9b482f420be6dfd37b6886154cfd130794098e1f51c1885ac2556a_Z},
+ {0x5aff3b30775ae4758e604a4a6262803a545f5ef4e7855fa245ac6a6431a9ece_Z,
+ 0x39a4799e5519047f29333bee9c86c99bfa8056d4aa381c396c4a44331fe795f_Z},
+ {0x3d753e9723701a8e9d99b91bb93dee2eda7ffa5072fb2cd5c5fd99aebcdb299_Z,
+ 0x15798bf5c17d6d5880fed1553af32dd8d8baf2888c715a886575448a24c7975_Z},
+ {0x6593e5078466b07a4222d2e544da826d2c583c9cc5f2eaea148b129b00d4aa0_Z,
+ 0x11b352b08a0a61d3cd67d1dc08069dec3bde907b3da0f56de5011b956bf8744_Z},
+ {0x7a6eb353c5be9ff03fe4a06c01fb71aad2b38144179a291ebcbb2c2417cca65_Z,
+ 0x3de3ecb12f2fa699b46a9d399abf77ca17bebc3e491bfb2542dd0fba991e2bb_Z},
+ {0x2c7ead583d6c32162091034a9eddfa775b4e84b8bdbea939edb2a80dcf64f6_Z,
+ 0x461790ce40d9c276d962b2a1e9a74d66e9d7335962e234e8a2fc6963d31722d_Z},
+ {0x34285af023d9b4c2c2b88e8704bf2c05a9b553b00b2e70ff05f8c2970cb134f_Z,
+ 0x33fe678e7671760a83836107428dbade68c3593fbe568f3f8f1b2c568099c44_Z},
+ {0x6222f720a24466263db6a11842f117fc4bb78da6705f140e48869db3e087441_Z,
+ 0x6eff5b9bf3aeedc962bc5a24b66e7bdad2153450ed53a058bf2c8dbf2907693_Z},
+ {0x17c6ec5ea206eb97cbf53851e37ce391080e0d2bf1e5395610f79ab0503f7ce_Z,
+ 0x3adb71ca3523d88ceb1e365f12dfb24895453c14daf0046b2626cddadfdf5f7_Z},
+ {0x70859f9771a713e54974ce11cdaf44b0dcc3e9befa0c0834908d877eeaafd27_Z,
+ 0xd18f794bf0cc0623b711e7450030424e52326c45ba9b03341883ae4828a5f8_Z},
+ {0x2a820cfd0fd4ab0871e7b303cd545a3086caf8fa818c087a4017197da74efbf_Z,
+ 0x5f992683ff37f6c041b84bfc01503d333ac9763505cc8f69473da01812969d1_Z},
+ {0x5b0526de2c07fe7cd73e3884f642d57a0ac5e13c68590ed03a14e530616e8c1_Z,
+ 0xeec69d0cbd92c9fca31ec967dba848bec368e792d6678797946a5e34fe3487_Z},
+ {0x6cf6b3efee707210cb3a72f1e885c3d0953aefb43e5e148c740aa1641725c61_Z,
+ 0x911cb630b898e2c1a9115f9e45bafe3b819edfb1eab6e15612d14289939984_Z},
+ {0x74e913de55f1e46143cb2ecfc580f8d3d3908f200281322b84e21c989cda293_Z,
+ 0x761d2736c9ac7670ba905bc2629c6c0dbe988820a4454ff415ba68710f7df92_Z},
+ {0x44084305e0c911a40b7cbefe5f13cffe9a99375d1a584c4a2200958050af7a9_Z,
+ 0x249c83877371564708ea525b64b1e7e12785460d83364446531c9adcacba5f0_Z},
+ {0x2bf71ad4d1bee1a67fb300477029f54bdb0e09f78bf2ac2e8afc7465a7adbcc_Z,
+ 0x6244dd6cad282539049be57487bfd9900bb0d5da805d02b535096368fcb4cd5_Z},
+ {0x3a62d8f763b62def36e4089458046a49c5ecb91b861549530773e0548ff2bb_Z,
+ 0x6a10a03ba61e6ac657270465c09aa9526cf1ebe96bdecdf0e7000476a47b9eb_Z},
+ {0x284eed3a17c51e0677d4fe897f056abe9def8af07a4630e6ca5723e2aa6677_Z,
+ 0x516a06ac1d5626ed03d2eee9de6f60f0311eca703a99b0fb31b9c66b01c27c7_Z},
+ {0x2a2c63b16cccd685f731d06fe93ce2cffb358d34d03dda9a7368185c1eb0c32_Z,
+ 0x7180baca0ba81284809f92eca1654cd76b925a9242e5d5e0f18d0a55d13c6ec_Z},
+ {0x5f9466017ec09769611389ea5370ad68dda936d3f5816c9e928ff9574abf9a7_Z,
+ 0x6619b5b145bb5f4f29deb7a4cd68ef4da3995312fa6537f0d01684da4267ece_Z},
+ {0x74f229babe01b4962b3307589c1a13019134b1db6822698388bebb55d21c30f_Z,
+ 0x156ae857ab3279f754facba0db36398dffec8c31e5e160473198f2f891b7531_Z},
+ {0x334b9fe3a5fd99bc966ddd1309698fd32afd1f235062f2c275b6616a185de45_Z,
+ 0x221a60053583cc0607f6f2e6966b62fc9dac00538bb7eb1148e007a92116d2_Z},
+ {0x7ad710ba002a67c731efbaba2149d16fec5d2f7aa3d126fd9886172e9f4ea30_Z,
+ 0x3a10f8e902a7a13aec94d66415347e1314f9bac83a7db176096b809b25ffb86_Z},
+ {0x4306dd0a184a3283c3097ff8f7434cec80912e9dc04b7df21ba73fda9f8e6d8_Z,
+ 0x6d42bd3d1a8dbddafd09e872e2aa3891ae79ec939dc1b382196bc21c4ab749_Z},
+ {0x1c3f2124e1135c32a426d1d14e471edd9e0f2c7bd703ee123cbbd608e8c4be7_Z,
+ 0x3cc607a3c3f1ab68dd5fa56c65996002721b8ad8ad4b0dd9e5b1467d316583_Z},
+ {0x294af33272ffcee0b56a436de1b73759cbddebef4c07888b42c2f92b0b68e1_Z,
+ 0xd837164311d5dca8d37b99ef9eb22708643c83d1cbdfe852f63ea07b06fbad_Z},
+ {0x753bdb5439a19bbffdfa02b1dc24e8368f22d0a8276b109c11e6feb26f56f39_Z,
+ 0x6ed396231af93647633eab467f1a034f38e76823eb85baf97cae56e2dcd9f75_Z},
+ {0x5674f0cb892b733fc0b50e121d8679afed0a925c32594cc65ffe83bebe7748e_Z,
+ 0x7fbf0325dd38dd94905adab2c52758552292a6a103d9edfcb11938828e828c8_Z},
+ {0x4a8f053573a0a74251059d0229d89b6660407ba0b491779fd10f87a5117c81f_Z,
+ 0x21b70112485398bf67ec9d733df24a1df30dea718a93b786f41ed04e3ae3c5e_Z},
+ {0x726c01ec4a08df8fc8de173311f50d4f3b97c5a9cf68c1536146f827db95ae8_Z,
+ 0x15013cafadefa7f1c4e4dfdd70bd4d3979dd18bd7f0332572ce2a3fd8773d12_Z},
+ {0x38ac0fbfa98937257460db7e6645d7e5112b6fce7234813fc8a704e8ade8da2_Z,
+ 0x73c0109f86048aad08c443f781ae60ad13b99f7b9cfdf3128fe6d6eeb799a7b_Z},
+ {0x6f6d3a38621582ace092eb50ecfe9eff265df141ebdcab8653299116fcea291_Z,
+ 0x4a1bf3f39bc919c8f1b720a0b1ce952cad17f2ba98308ee6b76dd9b6f3d7b75_Z},
+ {0x6a307fc28e1df8d9ad01766419e097797d65cb674436fa1c8f012d3de2c2a1f_Z,
+ 0x26911a635ba824db004875d79dd84834a97ac12643e42829015bf88c1fd6f05_Z},
+ {0x2a74860e3336d6db916555894cc8028f41508812925db1925457afe40257155_Z,
+ 0x5f8da573f4c39816ce2dba8a20224223a7cfec53117ec78973930c0e9b60244_Z},
+ {0x4d2b49e1ed0799f719b8269f092cb489a466a645bc0ccabafdc678864c176d7_Z,
+ 0x5410083df7d256f18cbf5697ae5e52c31e075d8a3b27e21d6f5177ca882f6c1_Z},
+ {0x110ecb9fbf6c333d168cee473cc5ad98809b6cb9eb5d1f6cd28ab5fab504fd3_Z,
+ 0x7e3c54d7533d9f8c3310f219dab0cc3ea4d39b418a748eeffd6bae2b8637a43_Z},
+ {0x5be4d711b80da70e6d3ac493250bbfd16f20b25f31919b3a91cf14ffbac1096_Z,
+ 0x7f55a0919f082e8885f1515e83c5b39b6022404503507498e1b4422d79c43e2_Z},
+ {0x2605125b95ca4ba93a21cbbba5762898a7cf9e988f07ab9e64cb3868e3b139d_Z,
+ 0x62f0ccf55b9fc0eaf9736fc8ee484e2acdbe259813af9803cf815829a5e9d3b_Z},
+ {0x1092bbbf206f2a3068167c3dd99a72de31e206f6c504c071c8214d105ff814d_Z,
+ 0x309f489f68a62089f53b96df5d4fbc3ecc5a1a42eb7ece0e49bad17ad490ff4_Z},
+ {0x2abdee9409d9c92559ca3f4e6bddd649c31aa09b90bfcb4a612af491241e18d_Z,
+ 0x3ffa8eac180a29de3f8a69efca84bac046f921f5725e96a6ff0530be1436aaf_Z},
+ {0x376313f27d00bb1aae7ec991745efe6ee28c6b50de0c6cd9845cc4bb4f83543_Z,
+ 0x6a8e0a9389ba528b156fa94ac090a895d7b795818d4941c29415d9e2984c547_Z},
+ {0xa80380c71bd466a696b3f0fbf02817c9459d9798f4f3899cf32edf647fe066_Z,
+ 0x6a09805e814e7cdfc76eba4b79f1df5ae559e0f0aba9f728d3cba4ea5c57471_Z},
+ {0x223694b921d247d989a79b9b2b2f07496036c40cb043eab074a9d6a2cd2ffed_Z,
+ 0xc247217f1b1df35e30d9e15fdaadf42d6fb0edd3a5a7e265d4cdc426c120aa_Z},
+ {0x102333620df278c6714bbc880fc087db58c1b9b4d77ed4d61b32a74bfc7c3e2_Z,
+ 0x6a77d37727ccf71c2caeb151faf4404d4b94e9047f9f0a7c3966367f3b53c65_Z},
+ {0x891626f466536929ee7eadcd18b41925706dedab7528ed5f0f7abf039eb9d2_Z,
+ 0x5f73d11c141c933a35b2d0d06e5cbae614a20d17dc3b439f8bcdc3413c5ea37_Z},
+ {0x215c23fd3f073f870e5e80303967391bf173f8adcdbeec72d131c557babc203_Z,
+ 0x10634332e9d9439a321597dc5b0fac9ff478834c3d6e281735f21a4a5e13266_Z},
+ {0x21ea0bdc1332bc36e6aeb43be9071651c27e4ea2eadec636c8d818d4af72a36_Z,
+ 0x3a523d9643dccc6bb9c7c58413312caa3e60ba9c7c7f0177e0f3f469a3241e3_Z},
+ {0x60deaed1bffb6190beed40caaf2bfab5e43d3707aff7ad3f278d571aa247eae_Z,
+ 0xe41f71ff254c1418e6a66992af307789fe04d6606fb2670900bb1a089fd879_Z},
+ {0x1e1fac4a1646253fb1332fadc21fbdd3e3a24a840d129400f520ae4116a4cf5_Z,
+ 0x69c406f9f46576afad68808de0ab7e8922b6226af748e721d9097e21f1800f3_Z},
+ {0x5db0ddcdf79ffe74d6454c12d2bc60b06776db03c75dc413f5be42ea9a91b5e_Z,
+ 0x134c3d6c699841f17306835bb193785228ffe7ab212a01a861c56b086a18cec_Z},
+ {0x626814e320fb5bea505b248fd1c1389ad586c1cfe04923fe2f83173e915f4f8_Z,
+ 0x7ae407a926e887206a8b85cf485f1f327c9bb8ccbb6897024e2d122877d8ee0_Z},
+ {0x23186237dc7d3b570cea645282ad4c359731bbfa54e7f036426bf6493812cd_Z,
+ 0x7d1fbab7e61a22d3b00993290d9f4cd5d820061573e787f66c2cff9a18e1eaf_Z},
+ {0x54302dcb0e6cc1c6e44cca8f61a63bb2ca65048d53fb325d36ff12c49a58202_Z,
+ 0x1b77b3e37d13504b348046268d8ae25ce98ad783c25561a879dcc77e99c2426_Z},
+ {0x13961b56b9fc0e412e468c385c22bd0680a25624ec211ffbb6bc877b2a6926c_Z,
+ 0x62f7f7792c77cd981fad13cb6863fe099c4d971c1374109185eae99943f16e9_Z},
+ {0x47abd7308c70659af3f00fafe6837298af3cb530b6c2ba710ffd07a6bc1ae98_Z,
+ 0x75d0c8a7377aa9f0663d0c124a5659750847afabc29e39893fd27534a4a03cb_Z},
+ {0x2c6276b764fb398fa555857dbe0ce0ec18fab7a233bf23851295739801f0585_Z,
+ 0x5d8f4897ce44007ec5bfcb9aeb78b8f6e1d40a514f72d213c9300d2770d2b8c_Z},
+ {0xbce48a9bf1ba2a868ccb5ec7a281b4adfb35f880a2217d3efc41fa17ec8430_Z,
+ 0x436e8dd6339b88207b24aeb39c4497e4cecb1508e26706bd72c5450d7e362d_Z},
+ {0x7335f3501c51509f0ff19e0996eb27993d2ed57511d741de1039fac608efae1_Z,
+ 0x3e0f9b7f92024f525bbe638105385ec8cadc3d9d0054c995d5f293c2ecaf2b_Z},
+ {0x2f00685b604089a426a0f8025bd4de158a3431d7a818f92a8d12ca3330cfbe4_Z,
+ 0x3b78bfdc9be254998ac5bf09faf4b3ef85e12cc8392950f069de8d750ce2a6a_Z},
+ {0x75164bdac839e799a01b2c97a3c70a063710cbaa60b965fc68e1b7fa9321887_Z,
+ 0x366a151b55dbbeb05372baa1b753340bab038b82a8457007519406fb005743d_Z},
+ {0x4cbad4f94c301d3110a57606374566457d6f2c089364e6636599a521cd52efb_Z,
+ 0x70926c2c5d53c80bcee63dbd1fda1258006196a5e371bd7508b5c65abfe6d40_Z},
+ {0x53fa2bb938fb756579e7496527c6e65c47c59a6dd10c119a55d6cdbad565ff6_Z,
+ 0x9eee73b8f85c216cc142fbb9ea7d9bbd7cb5c58d2ddcefc9e8a8bbfef55ed2_Z},
+ {0x313e19ce77eda23700db871d0a325e84f61ed923e4cf1882d745970a5c9f55a_Z,
+ 0x64560398fbb3f03b5275bb78db48a7a93890962a9310ad5db0d6310c405141f_Z},
+ {0x14d6e814f77b60e99db467e3e137124eb796cb075b2a12a9a06353194a70780_Z,
+ 0x7a56303bfe394ab06fd59708d58511c0dff923cc2a3f7c3a33e6bdae011ebd5_Z},
+ {0x4b98d86614db4ea0304ed019aeff950392b2c9a276f41143f48564138670bb9_Z,
+ 0x543f62bd0110123b347b89ce1d9fbd794380311adca3cc99dd371fd071b670d_Z},
+ {0x3aca36203db64aa6f09b811d1635afe815ddd7451d00145838ccddee9aac4e5_Z,
+ 0x4ae269ce42b4ccb03d1994aea01e15ba1b4d87709fd843c9dc9504074bb2b90_Z},
+ {0xefc778f6a5a796cd7469732da7cb16f8626ee1461a4c2dd62ec1ba0dcebaf_Z,
+ 0x720e57f989ef2bbfe2b165f1d37fb6643c8de78c736617aab046056b08c0a80_Z},
+ {0x7416789c54a831ceca6e04e4c370c4bf66f86230550ffcb3792e726baaee2f0_Z,
+ 0x7df1bc5ce8bdba2b3fdae7f786280186eb320e7e6f882c5079a155e641c4241_Z},
+ {0x376f7203f663be987ef0f2c2cba79b6c0034f42f425c0275540354b60899ac4_Z,
+ 0x5511b4813e7efa8e0a3eb586258275b9ba47e3d0186cb980e5adfa74a2e7364_Z},
+ {0x19913b2836c5f13169f955ac17d5d1f67db6b81e763feac08dec4d3fd3bdd8d_Z,
+ 0x1a76e77a6f09cdd668946bbabe23d99dd82a414cea788265d30d7c1a3fe1994_Z},
+ {0x246584d812cc7b30321272c346bb5a29fa29f923e293cca648986586e7b3a95_Z,
+ 0x7fe28cb7cf2f3c11573dd09f892b435e9329d173440909a777fff250fdc0771_Z},
+ {0x1b0bd9e66e77b8141a657358264c78a4672ee0eab767f8b8992a088fc57982b_Z,
+ 0x387fcfcb97824bf38cfe46106190a71b240999495d9d7caae0f9b8cf41188bf_Z},
+ {0x3f78596df9a080bbee9e98b7bf6c5b517afd962d47cf72b138aebaf656e3f70_Z,
+ 0x6969d5e25f5a7f3b229cc3bbd5a4367bddb94621ac470e546863970a275c28d_Z},
+ {0x32126b03e8781a20a44a92e05c16be70c501bc4e0ee8c09cf0c997ed628c3a0_Z,
+ 0x100904de59fcaf55c18b7e0866ec50715655793238fde686b9b8d8636fb80c1_Z},
+ {0x29bead2f77a4e4c744d4e83c7e439ecee03980a20006da9a7d3c57c7b714636_Z,
+ 0x44be13d072f7c4f2396dea08a19cbe4acef8a2e072e0c038dde69804ebdeb40_Z},
+ {0x4e912475957c58f8ae120592076e6ffa50a4405ff41f5bfb8d20b2c0a28efd6_Z,
+ 0x2be9f3c03d3696749096b85667fb5044bdc216474a9ad0d8cbd1eace70627ca_Z},
+ {0x15df84721fa5bb2994557ac6cab5444bb50d539f9627bd373e77b965c1d1690_Z,
+ 0x45179abdad31f112c1cf42ef6f17e641d9eb6d19b32b3246c1465f2665fa840_Z},
+ {0x795e85f1015d6f85ff303321b38dcf77452f6fd2b5669df41d715fa115ac938_Z,
+ 0x674da8ce723640f4aa81f3511a7d0a0d225997db7c581143bc009005b365d89_Z},
+ {0x2b4d941c72210bae832efc47665bae7cf783b4c1904f51bf5dd512d72bdf108_Z,
+ 0x309a8300fd432c05f8092778078c26d13719e0354eafc4dae1ec512993c9491_Z},
+ {0x5d297cc4ff962982a39ce89842cd87ae01875bc7710524f263eee3ff5ed498d_Z,
+ 0x36d5336a6f51e2ff5c6995bc8b87defc61a05251103eca8b32ede509374e9ec_Z},
+ {0x26815b43b017a41a5ce4f4971cb9fc9035c45f22703296a6996fb98adf65027_Z,
+ 0x9433f389903812b8399cc6740ae13abdd4ae8aa0d38b12d1b0a5a3f90ad2d4_Z},
+ {0x4b3ad725435dd69c69101b3bd073f8ce3e8559ea73f4d9d944c88ec0460285f_Z,
+ 0x19416e704d0a61305b5f1fefcf037438ae6872c409ed787af13f496eb5a94f9_Z},
+ {0x3c40dde269f0a840d857a2dba52023521e3889be597a843f062055fd79631c2_Z,
+ 0xd4a04943ec16198cef1f05de15ecd6f1bcfe33a41c1502d12487e5244963ba_Z},
+ {0x7d418df02fc1dae693c9de50932bf1f182dcf58d6ec0695c20726520fbaaa1a_Z,
+ 0x60a0699f233b5cb79e121ef4d060088e1262ccdcd6e471fc6d7ea0febc21c45_Z},
+ {0x1507ea3ce76d90dc3332dfea74d452b6fe76670eeff3ed208ab049c6ab12715_Z,
+ 0x6095740c9a874b6242246e6a98816a239d8ea4d35cd08219c2c2f1870d68ff3_Z},
+ {0x5458ca1221c99bb056c14a0ac7f77ac45de5416a8639abdc70e567df7ce6f49_Z,
+ 0x271b0470a1ae9fba5abd17a016e079684532a17c553cdfcd1c17dd07dbee098_Z},
+ {0x419375e9e0e03c5461ed4a1fb2a40b1c4f9ecb56947c44fd4ce47c69b2e6663_Z,
+ 0x3b4f29ba97afc4e1f691cb8e1f3bfdc074334d7f9b2a464e10dd647468102fe_Z},
+ {0x25380328908ad9f12905c311507d8125dd586607b232ac544adde8338c6e130_Z,
+ 0x6ecad58b36d5afdced17f889337a9cb1cffb919dd6dc03cde7500eb6197e4cd_Z},
+ {0x150fa7730e4ba5106eb903b351a0fe41fa5f0eca3b961ae4697c3946f5f111_Z,
+ 0x44c787122ab138ddd4c02e1a61e7ea4b6e24a145f1f4cf7022ffdad4db999b5_Z},
+ {0x40bd0f774627782f59bd79a92ba928cefffd2f703771552384c2da1e278ef2d_Z,
+ 0x134960affd67335df6e6ff910a4813fecab596aecff7a1e81a3a2e1ba838d2d_Z},
+ {0x5f254557c28f25ba86a7956aeb684c03ad4770d1e6e85b313ae414a200ef5d5_Z,
+ 0x40d767bdeffafb30d40abad22ff189d1ab122eaae73ba64dd5ee2b84abc007a_Z},
+ {0x103e4a12d4c51f5f724051a2834c64dc8fe1a9a6cfc860deecfbc525c3432d0_Z,
+ 0x3603dbdbd7e45020d7b5dfac3b7c0da26b10abbec47a771f7afe85e07da9f3a_Z},
+ {0x6129433d07b14f2ae1c71e332738f945c4d1536f9c89cf58eb9dd789578f8e0_Z,
+ 0x2640cd3fbbc579cd64f64a87f9c63e49e27289951bc3640dc6f4799c47667b4_Z},
+ {0x5e731a8c7eab559932b816b92deeb7cf012183e3012c15ee9adbcd172625e18_Z,
+ 0x1c9fc9522014434a5dc9513d5cefacd2f7eda4ba9cf46ca5f941db0fb9c6aea_Z},
+ {0x67cf7e4d99b15c3979a012ad4646d077b65507073c0dd0da901e3cc4039e6d1_Z,
+ 0x13fc08992a882b0aca6af476a597c3022fa91af7477bebe4554d11af7fb172d_Z},
+ {0x3d954d3bb9b7f8b10655d0e2b471e3e3fbcb479a061a7604b92c94b99640494_Z,
+ 0x54b9002f0e61354d6a9bee607f3aec9575b2fd227c20a131c92c3ebe847d461_Z},
+ {0x6761c711ab95f185943ca0dc50a11c00ee7d197fbe4b6f45d2a7dd81c641bec_Z,
+ 0x540db2e2b21c6e22a1e7d7daeb47551101c9993b517b88afbc2d0c54d491a60_Z},
+ {0xd835e09e0c1b11265e33a218d5a0736353ee48b534a9a3caf3379509b3fb80_Z,
+ 0x7a293b0f8e14d0e93822c446fd0d2cf2d6261ab61b187583b85456218ab354e_Z},
+ {0x5b2431a65cd0c76a94eff28d4d77257639c205b29e0372fc3613ec19650d46d_Z,
+ 0x5f5508dae26adfd5feb5ee120eca9e086ef696fb2fcc57ce897d408a9210bf9_Z},
+ {0x217a6c9739f11f41625c4b0702d7122ec1c432898d1b0501692e3c021e6cbda_Z,
+ 0x2d1712f78263d0b175c8743e7c77d8fb9d15469445055672d9be4cf259f76af_Z},
+ {0x70af5c202d525c1ca0af1db4105045874c30936850bd9590f3920bd135df01b_Z,
+ 0xcbc6b907b3c70ffa773cfc09a9a2a6ad8e6a0b566119a922c609385dc8f010_Z},
+ {0x4583e0265583b943fc4e26643d9dac77ec5e784790e140cdce0690d8457dc3d_Z,
+ 0x66761e99440da31940fcaf5c02e261c8512db629cd4ce83d5a6afedcbc6365d_Z},
+ {0x6fa066b6070a2e9bdad2e3c88f152af7d217cf14c4c0e2c0f0cb8fd748f2146_Z,
+ 0x2463c1ada175cc6e3356bb9c96844ff6a67182e4d5fc26f334fe007bc7a8644_Z},
+ {0x58158da3994a8e95cb6aa3f638b6981644d4fde0dd1badcdaf5626ba2d4ab6e_Z,
+ 0x3d3bbd4277c9793c45be600e8fedc66a8fe55becc2c65bcb7c11b49acf26a73_Z},
+ {0x2ca60069324a91e1a38d3663dfdb47a27f65c3b7e2d81de1f3f65905e842e09_Z,
+ 0x3f2acbe89b8a75cc67a049d53e09e291540f7899908f4ce92c293fb0af144a4_Z},
+ {0x71e44d03108b6fa350f2a644ab2d845dd04c410cfbb14f9f72b7b54a52dc76_Z,
+ 0x5a5ba61d27308a8ff7f0403c5c46e443960e7579622c75bd112299c54a434de_Z},
+ {0x7eb7d63173c028985094d2c4581b73ab150d0d3f2ba68203d8c639fc013758c_Z,
+ 0x3cd7c33bddc8fd4342cb1239846a4679cb2b670d88d2d7f75360f887fe93b6b_Z},
+ {0x4ad4f2d6ec049bd21d655c633843af4a8ea1d12eb9352aaa044419d91b26296_Z,
+ 0xbaa70201e2b2858a3508ebbb753617e4f49aab631ed2d18e8c3fe78dd29f4d_Z},
+ {0x3669acaf65b4422e1dea26d1dbd1b92f9ce238b61d12a29c3138a9dc5772048_Z,
+ 0x657c3a618e530c8c3f57a5d6383474fb3158e5c2cfbe1592fabb6f3a22469d4_Z},
+ {0x6d4c0ab00ba75074173d4bce4fd82f8623d7a60444157632d975c2bde0b4923_Z,
+ 0x140a60490c60f77b99ae87687a8025626bc8a61d084a546e0fe757e4c966c3d_Z},
+ {0x54681129618499a6409b1c52a7ff03ea56c95e4c5a8fa4d86f10358eb2e9707_Z,
+ 0x58d39d95a230a8323bee498fcea3f15ee089be54cbeca2e63e54b764b08e890_Z},
+ {0x2b4930b3d47d9cba98980ad642c8cb0baf7d1e09c13a2d715d072380bf09400_Z,
+ 0x6a46ab2f87f23b11d35a6ca43040c73543f53e972132277a76ef1eda32cdc34_Z},
+ {0x249c9c191d82d25b864e6debea7ccf5c39b4a355dc4761510253522ba8ace67_Z,
+ 0x114dbb1963bd906cb8965f149d1d2db7cb86fd8be03591ae5f2dd3489f9f3f6_Z},
+ {0x3cc5f417362f8bb2791e0494d6bd2fe339fbf33c83ee7f70c484b43986dcf10_Z,
+ 0x2c47a96a94993ed1cf1f07b8fc68b2ae45fe691e2ef4b2c4767b9027d645ec9_Z},
+ {0x4e55c8fde1b0dc2e2b6c5508f5718eb29edf0e97ae55c2470196dab114732bb_Z,
+ 0x2eefc04348a85bf96bed3dc61a709cdb0a83294ff004d6813335cbb7aee7d31_Z},
+ {0x6e9d46468f414ed3dec19473dc189d1a39640acf616234d88a964a0c32cd86c_Z,
+ 0x49dd61ffd5b0f96cb69d115649132f558256166a798b33c7545cc64882d3add_Z},
+ {0x78bc532b10fc07098403752011edcdb884ec456a2f8899edb88023bb5e43dbb_Z,
+ 0xf8676f26b26f97d1dae5737b46d0dacf32c3aecb9db9df41a860f85c6c59e8_Z},
+ {0x3a2903e3dc45f773bc3ac72728fcb09878e95a0f36e7b798fba2c77fd8f2abe_Z,
+ 0x369e5f8b59923476657a3d6767c6cc6645ea18a5853ce9705ac3c193b873d70_Z},
+ {0x5b715fe20bd24e17eaf74bc155e847a2a6e63e01374cc4853b469ed2304ed7a_Z,
+ 0x31af84b404359c352663e76a58e5c98f08fdcc4b59990a74a3b81154ed372d9_Z},
+ {0x3510d1233edfc535959069b71fd8c510fb94caae7607a83f17a0cad1e9c277f_Z,
+ 0x1eab9a957fa3a9df1f8d8afefcc98be4792a5bd61a55288dd1eae454ca66f6b_Z},
+ {0x87b84c4f139f80f8e52ce24393d895350a56383dce3aa6efbd9757efb70a4a_Z,
+ 0x1b5032dc6eb8173c4372ccab7300f1c6bff5d52934058f2d6899c28971ecab0_Z},
+ {0x3192072e020733ddc8312a7819740b72d8dacf90a9aaefbe5e0a629ef95a25c_Z,
+ 0x85fe2f9877c5f99fa170a6f2ca74c8f51fc5061f9817aa6c065bdc3834c9a0_Z},
+ {0x7e1240bfbfda43377c9c94a364fa768c95a1ace83f05641e30e7f55682d3bf2_Z,
+ 0x162fe3ec40839cc13f342b0efe8edf4f46ad5557a945db40cfe1d59c8e1abd5_Z},
+ {0x70debbaf395d56b5283cb2e340e04fa9e7b67cdc910f15ad679a5f9da45689d_Z,
+ 0x2dabf0bb233e012ed28c7fce1feee8182ca6b637f0ffd13388a26174e836e53_Z},
+ {0x7f1985327c7a51b43b0ecdc776e2af9c522546059309aacab74b3ddd51e65e3_Z,
+ 0x24cffe79e071b15fd98e33e0449aab000d39a9141bcc6891135fd27c1a255b6_Z},
+ {0x5963fe0e56c20ba85f32067681d283f9c6beac6fbde2c38dd88bc5094a045d7_Z,
+ 0x43b2a353e8dea35b097c7145efda102673de92d02c082511cbe905f5e2983d_Z},
+ {0x6b0765d38659e149a115c8e3b94c0287fc1ae2f191e03f08167825aae8b2499_Z,
+ 0x23aa97ce8127273c38fc9242ce987968e074d6dee8f109144566f39a0f82e76_Z},
+ {0x96b91d630f4bb0e7d6c99efe035b17c3b91864fe890efe16bce192194b430b_Z,
+ 0x2c35efe88ac0c3f0f75f0035fa2fdc84967c0768ad3ee4662572a2d2cbdcfc6_Z},
+ {0x32c8087df54545bf151c48a7561cc6626b5a8cee958159bd45bad5457799190_Z,
+ 0x3fd90e79f86b4d62dec76bd049e89ceb796cf5edbf7dc7a5f590ad17757257b_Z},
+ {0xc4443e69dc789f85c674bfa98f6220e107c89a2ed56f39b06a0af6a2e0ab1a_Z,
+ 0x17fda5fc4354a26a5dcae479028e62e361deee63f04fc42ce790481eebffd71_Z},
+ {0x6372a79060e52a49eb5b2b33a51bf4e9291656c70e6c8b86b970af8436f0cc0_Z,
+ 0x2f25f69b54b8c99dae634912fcd0e4662a696bd9e84bc8680d950074e816e59_Z},
+ {0x1d1212f13fb36271dcbfdb08f91a518c27878b599971b6698124960ed84abde_Z,
+ 0x6e882ce7bb6ec9a57af2adba1524ba41311b927a593c218a9b727965534bbc9_Z},
+ {0x411525f3c72367b57275e47a680853a1c7483f1cdeda5efa25792455c281941_Z,
+ 0xab0aebfc77d7acb1e82cb5facb68156e19468e6de71a32799afb0873eaa861_Z},
+ {0xca0ae3bb1d6b50f26d8f08dd29d7ae1a4167ded349d7ad73456e50cb1ad1b_Z,
+ 0x51ee2496bf3c6ef515c516fd82b4d248566c29142e4d7f6fa160a2c1427e354_Z},
+ {0x39f896b3cba0b8aeb8b7973b57adb975cf412acf51f4e2e0dba239d7ed3a131_Z,
+ 0x762df591bd79c8ee306797c4ad13734937a89517bb53c406398f65b40bdba6d_Z},
+ {0x1bf896da13165fa9c669ead6929c44f61361a24cbee12b1024be5a0cff19864_Z,
+ 0x6b411e8c0411077a328f621f8aa408bd4c9d73dad2b499402430795a7718b97_Z},
+ {0x9fda6699915b894342d50e584750703147ba4ed605ea5b2d1395b7111bdd7f_Z,
+ 0x6faeffb7106923808f12a0ca3a85c64cddaad806dd969c90900c218528cb21a_Z},
+ {0x30c257bfb5a3f4cfc2406a1cf0da757bc5ba1fa7a5cc27abef7bcbc36afc9a7_Z,
+ 0x275e30a10990c7273bc3e62bd3d9487c2d8ec436acfba6ba06a34d92761613e_Z},
+ {0x3c18d7011c060f09a93f833b195644cf118c046887385373a280bf82f4b70db_Z,
+ 0x77391b5f2d28b7ef75fd811aa9fc97b1523d3f2467ceee79df261d325c5f125_Z},
+ {0x52e32781eae622e13e96c778d7bf25e9edbcaa8de5c85da593ada0d4380e5fe_Z,
+ 0x171b3dfd6c8e1a3c2aa3765c38f9e1486e5baccb9451c228d8ea38f655cfb0b_Z},
+ {0x281545e88937786549efc352c25e2c994817d2b1aedcd003c79d0bd626514db_Z,
+ 0x5dd931d55fe2724f164a275baeba8c2e5d61d44675c6697fa72dad05f85045a_Z},
+ {0x6a454b70bfd375c1ff4a5e9d2a7b500c3bdf6121b16f56a0ccdbb4073e12b99_Z,
+ 0x5812b5a39850b16364cd61e949b5a02e285d96025a6376dce578bfb9700ce1b_Z},
+ {0x3432b1c8b9da9feb6098d296892842835854a61ba5934a0b78cd3b31c500a82_Z,
+ 0x5f3b1ead34dcb6e87890ef8fb349f09931c3900d380069c96f25ed9942d177b_Z},
+ {0x1a40e292116c4c08eb3f7239b5f40c5d45a5a1fe67bcb09cc41d37235efef5c_Z,
+ 0x71f125041481517ee5a8ceb9db852906657c2088a5bdb4ca4086931c9f17bdf_Z},
+ {0x157c1df7ea04cc62247a5c1f465e0dec62c985922997b12426742243b161e6d_Z,
+ 0x21af16b389c76b3d067f500b0c7ae96323ff244a30214b671f46fb0e8a2ab15_Z},
+ {0x9e72f8c84f2c94f304bd24d110a191119d476f849b69e24e42be641f8d84c3_Z,
+ 0x4181896441406cf25fdfd7ed916d280842ad6d2e93f703d28ba9d00284dec28_Z},
+ {0x4d21bc613726f30cd426926a49ae651a9a53b755bacb3f25c5442c22f91d07d_Z,
+ 0x206e4d57c48d63ba6a25b942fcc6c4abba62d304f3895977f095a5b1d8b859_Z},
+ {0x6889df1f9a1ffe35e43804beb85ace5150146fc7d2937c5cc3e311098896564_Z,
+ 0x3a2d46e249f860f3386710b3ed998198bc944fe1b1959e52afb53d991bd6063_Z},
+ {0x5f0c378443e572caff8eb4fd607752d171354ff9d025ab9ba0c3baafbcabba1_Z,
+ 0x31d4361df3799be7c0502c958acfe18e399286435efb97da6e4ffdf277853ef_Z},
+ {0x7071b515f67b24060d9a3de742a54d84e82735deec91dc9dfa04026f612bea0_Z,
+ 0x3a661a2dc55805508873ff0e5b6be5bf307f0a604630afdadf88c111cfc991b_Z},
+ {0x3827cdfa765694bd3b447bc1afef9c0c5594565c32497262013eb4b97f65ef0_Z,
+ 0x24f7148d111016d6d2ab9f54f10f75c56d2aff9ce5da6db6fa14f7da285b882_Z},
+ {0x7e941bc3fe6568fd5f0049d3a2edd4aa4a021eeddf1676535d3d242c17508eb_Z,
+ 0x2bc250721f54ec146071a7be83b55574f936a74e93b56e7341b0b927c6f37cf_Z},
+ {0x5a9bda630732c2f095a1f088b9acd7526f4d7657b588c57a34aca9eaef7b673_Z,
+ 0x1938ba9061f7830e7994b213e56c761bc7efdf0372d0036b5130cedcd9c5789_Z},
+ {0x26e4e120764c9164fe4224ece58a87c4a5331116d29f38a9a7df4ef4392cfe3_Z,
+ 0x79cb5a3f67a13051e0afc2fb22c816a02f6d728eb155f59e8d7685010e87ebb_Z},
+ {0x180a677de26e87c5b8c92a5f54615cef6a3e28ec45df72d2f56fd0848742c2e_Z,
+ 0x2ebf878e85a7264c270cedf44c12331827faf56274698fe81bcf27f4c3d5872_Z},
+ {0x692cbd1845010fc701f0fda041f6f74567230e8d42c6dac0d214278adfa0bed_Z,
+ 0x5ea8cd6361234bfb5551e9447b94b4926f928ab2f1b7a657d306ea79b67ed1b_Z},
+ {0x420e0fbd48935ac36a90a262c90affe020254a946fad0ea0a0642b61222070e_Z,
+ 0x700cfbc7fe76f5f62a125ec467ee0fc69ed175c0fef9accd14c673c483c0f98_Z},
+ {0x7f33c220700c354e9a50cf7f5afb4146d810b428bb1f948632706b0a485e88c_Z,
+ 0x507a3cb76d1d1d618e2d2a9e8683a6d81a87af36b32488997b941606fed9894_Z},
+ {0x1167220284cceaa7b11c56fb0395f0a55b628a5df7ec8dbfd7a103d9044c05f_Z,
+ 0x656bc6e80c63d246f42c79dc5f4436a48c35c77217810e4dc77149bf197bdbc_Z},
+ {0x72ed3cb1f85fa3eff8e0c6b6b5f76e55d880657ddb63b28cc306659c0f43960_Z,
+ 0x701677617ef5c6f78ccb3a2c5f05fb3fc01accaa1b0367a1fb988465507052d_Z},
+ {0x3c00590dc0c634acf60ca6b254243869e9d41beb438427538023956843cb827_Z,
+ 0x3bd96f1f05814232828baa730719213b8eb29f0cde1e5f3c127f8efb690e5e8_Z},
+ {0x56a995ef7f75038c5746fb3bd1b1a77a22a026a8f1dac3a73af306d116b434c_Z,
+ 0x48356a48c5d86011d80a2fd2981d4c1a745c504c8b881b423c31b8ef76aadb2_Z},
+ {0xfeadf0259835b42ce4853acda2db2a1e5d074ec7a9339b65490458697e9438_Z,
+ 0x3ba16e49dd440a953518673e9a23b80395cc44e6ff22c40ed821915065272b6_Z},
+ {0x418b173df47595f35548c5f95f338d753a39bb76d53b85a32edbc577f6fce01_Z,
+ 0x1110930e29fe4d70816a056e1eb93d2101615639878349bd516307bd290da3c_Z},
+ {0x3b58ab78f462084098ecf0af201b24835ff0a8abf189299be640a7fb5ffc16a_Z,
+ 0x74041d90572f93d9d70c4ff296803a3df8bc803d8a32ca570165d4c5fe44f8f_Z},
+ {0x447cf5b60081e6b469e1954ea3aa9eaa6def674afc3a54f307d35ae01738f2d_Z,
+ 0x82777c181b4e2b351ff0bed0674bfef8e40e68dd1622fed5a69d72f0446f7_Z},
+ {0x4099c98f120b2ca44314de29a169547abd2afc30f345a5c53cc8fd6b0c27019_Z,
+ 0x720148fb6b50c90e64c4bbdb9729a81f5e3b54b22b9e31de1507c8158e9fbf_Z},
+ {0x578dac23670640909fd263caa2a66e7022a8c0c3620f44f75088df308db177c_Z,
+ 0x4996ea791e650c820a2314d13775720332176e7af68118d017e6127c8ee01ce_Z},
+ {0x35dd513e684a32e7bc025663181e50083ddd2af8eb6e4ee0e6756b591d91d81_Z,
+ 0x3551f2271841091f685d73580d01cb3cc862a0ceacdf4b45b19f4acb75a1565_Z},
+ {0x7fa77248b4ce371d0a655269bccf89c20dc28b79b4c55fd6f7b19b636692b51_Z,
+ 0x2bc688eb8e85702b031d92951adeae37ff22ed8a8b23e1c89a145c5342adf2f_Z},
+ {0xe361deefc72b2a8c96070791093414ab741ff9a2791a45d8b58b6f06df4c3b_Z,
+ 0x9ae31cce210ed516b72d092d173080b63a3472b8d9fc02a0ee13ab51dcbdc7_Z},
+ {0x7118de6e756cb435def84c500635de5f7c96645237d47faf912d95b0ec1ebdd_Z,
+ 0xbbf8a8994c518fb18bfe2aa373b04b98520f2b9698cd43f5000b86ba2d52f0_Z},
+ {0x5e6292843027c1509017bd5aeb42ae42dbc4f57392fe45a44d35af3148698c6_Z,
+ 0x5d3f2803337620eb19233dfebcdc601ef2771b8d2a0af945eb92390ddf375e5_Z},
+ {0x4279d92757255e47e88796ca9ff2d61fea8537d491f764278bbca797fc5cd85_Z,
+ 0x3ffac2d366fd1b050d442098933117a309d766807f6e57dffbdacfd1957cdc6_Z},
+ {0x53d57cbf5be2ee853ac196d3e3869c8f3d099dc8de7e9f71372adbaf1febcd0_Z,
+ 0x3819bfec3da20ed253f15529f902b1ebbe2247806711a81dd9e014e86dcf8b1_Z},
+ {0x634755815094f9314d217b6b20d852f3a8144667f4576dc45d6b9c0b9a35553_Z,
+ 0x1951384679e12801076733678c5b3bf1bfb8d1242c36cf283a7d0fb662060a3_Z},
+ {0x29c37191896170f667d1a828e1ac4b4d0af93f4e99d881b331295a47945a174_Z,
+ 0x702b884cb87ce396d83635d1f00c6e689f86dfc21c2ec72b81e3d03881e5a7e_Z},
+ {0x3ed8641f61929a350166f1910277fce32b723829b3ed16749328da154fa01a9_Z,
+ 0x73258818d0a9589387e30d0f39eb636fd4c9511f1a0f7d14a83c148f991b834_Z},
+ {0x81db8474b1641f0be808e74e156c61ed82572138a2479f75aa012180b1ea9c_Z,
+ 0x6553c3d64eda74afb828670a377f5e7d5228cf43219b46af5046664f11d8da5_Z},
+ {0x593ad9a9342864f04c172d2fcead3160449a6aec2e41c83042f9a71d3a700a6_Z,
+ 0x2650f17d1c7f029f1d88f2cac113cc2874fcdf5ad907eeef82a48ab03d9f240_Z},
+ {0x19595dedbd11d08dd365dcffe37dfc0cc923f9fe702f6f92ea14588626f7277_Z,
+ 0x71657a8b7e6237c9c4476491a4d6d5ba67bcc946a05f681a863bc143bf97a2f_Z},
+ {0x4b95feff1d5b9ab1f0dba108419e9c7033bd9646fd5e61ee17087a1e110133d_Z,
+ 0x7689384f6146c35aa86b3224df6601fcd15cf8cb484cbc6bcb4c9ef9b694d00_Z},
+ {0x857e8722c42cb30c23e34af37fe4dc029d59d5258d817921c108dc6cfb6e1e_Z,
+ 0x3ff63c305b549f1cc06ea56a7b7c2797aab60f216a83a2578f1ce46ad075366_Z},
+ {0xc62e4e1f2bd1156ca104b98ae8ae66d836285ac082b34c654841908b051b8f_Z,
+ 0x5354c777759d8d740835bfe5a6fef8f1f68eca4e43bab151d52a74d673a9d12_Z},
+ {0x45048be3a993e349be630354c880c19edc59b40225c0fcf13813e95a5bd524e_Z,
+ 0x4d4ab0f6c8762ab881695b8fd52c6c2a62fd34b2dfaf6f794dc1428e8a18b02_Z},
+ {0x479ad16fb6e221c46f11eaf2238f7e7bc612a85859724c907c844794a590f52_Z,
+ 0x24a95fa796abf1adf48f3b0bc2a9805afad4f3597952bb6f600b7304e566c9f_Z},
+ {0x53c839c8a28f32af284a25b635f3c7664869e06fa937bc7562676468e7b7377_Z,
+ 0x3ecf21c45ffe4648d1a2370ece904a566c599dc461b0e55628932887bccb08b_Z},
+ {0x798f7e55463a870f0ed5d97072996da2cd71df97305ffadcc4ceb09e7101979_Z,
+ 0x614eacf5529226e69910a12204fbc916e8f4aaa7fd2dec7b41c51a7df8dc103_Z},
+ {0x7718564fda3ddbee0f34b8d830be5fcbbe21264d9682339ef48bdd65cea4c7b_Z,
+ 0x6e0b482d1bdb8b6065f05ae9541862a186f5908da3aa6f684cacf22a5537be1_Z},
+ {0xd7fd4e51ea5bf3d0b1814f4befc30bc4b7cb141dcaca85c8be6b840ba95ed1_Z,
+ 0x22d5096a0ae4950e437a69b82f83da0f0163806429fde5b8ec1eeb66ed0ab33_Z},
+ {0x449c1893267c5a83a8841c52dc13d6123d47db32b7e9c6b0dd713771301ce6f_Z,
+ 0x777bec62dc6e8bed4fc9736925e70220537949191bc07d27fb20580a69857c4_Z},
+ {0x42afcd8e0e8698f20e7f3b777c4c3ccb20d2634ff535cbdb3d8a1decb4bc142_Z,
+ 0x52b840f9b93f808ecf58274c15885952155ae91076106c027300d1afac2ca1a_Z},
+ {0x1f8bb9e993e0d60ca720cfed70a295c195da0abc1e7d8c22baf868995d5ecd3_Z,
+ 0x53a69e2327476f90a9261fda7cf6e53cebb659cb134c2c7964649a46f92aea7_Z},
+ {0x3a3782a50a764ea48da8f6e45cd5dc26a319af1ef3dd413604ad76e0757221b_Z,
+ 0x476ccc625b90b86f7539406327140e075d00e7ffd82585d8b2259582ea6e645_Z},
+ {0x37ec303f4008f11731a3ed8ea86d9dfc50d4531ebb9f2ba69a57ccc2f95a22e_Z,
+ 0x75f6511cdb286d472cd840a512d6bb600207c096b9d3bfa4862e2b7cc1e7507_Z},
+ {0xdeea487783b19deb09eadf31b1a67808a24fe73a3a2be3038c6e948eaa1f78_Z,
+ 0x39e784ede26db861e359134aa306c70ed30a8d3c205a2a9e3750958c1a1c393_Z},
+ {0x7d7cb0b35fbaf51b182cf49d446f6cec06c9956633e46b3d82a0e55e5e625ae_Z,
+ 0x7dc3b01a79de9e48b18e734f05895860215555788fe2308e68822500243d5f1_Z},
+ {0x633132ae736272a3b396bd7df7964ba2d796c2280394d7b6a2e5ccdfe167c27_Z,
+ 0x41184527848d33f60da4f25ceb955b6c33d8a254421ff1cb1db6efb9a2f62bf_Z},
+ {0x648ea258f205c068fffe4630b4138f2eef2ff7d7bd313828525f6384fde66c8_Z,
+ 0x7b4a3f7c56944e49655fef1e57e62ee44cfe65268eff9e040f0280f2b15866f_Z},
+ {0x2586aaaa22a4496b4584dde931c62f9f0f429b671ebc5700e90e5f3be3a3501_Z,
+ 0x266f5b481b199d162999fd5903ac43a60255ec3afe7125cb9a550b42a87eabb_Z},
+ {0x7b0fdc234e584225a99e972eb3234356302a5f38685491f135df05657e97fda_Z,
+ 0xc8fceeea95d693872c5174d4468bea78aa87de092cbc8623375a4b3b16c313_Z},
+ {0x5da435d80df1e3f43cfc2fafe57066b6f5cccee278b13a66b300eff7b7c7944_Z,
+ 0x231f2a772c0c9a50fb17ac251609022aa07881f988666d47daf1898f80c0829_Z},
+ {0xb9f6edadae537994f515178794ce527d8e836023a963807ea87d57a50efe41_Z,
+ 0x64f5e4604a82a441e9f1c9a8c2a2815c741c0e5e2d0cb79be30889b82fe4068_Z},
+ {0x63b3313fcfb3f7c45244a8d2ad18a3904a739c49e338e7a06f55202090ad67c_Z,
+ 0x45fb0e99c46700e4a89c80dcdb381a4a0499fc79c414ed7f839c6767a9fe588_Z},
+ {0x3901929f0caa74a6967e0ff50c8f45f8381b4db36682e87204a36ec9da2e63c_Z,
+ 0x73021b4c55cc3165f2c1cac93f298d95c7f6e5306be9b26a2c705371b6df736_Z},
+ {0x3b65dacb517b90cfbc26d382b6db2d91e6de306dcc6a18cb9ee7cb6b0827a48_Z,
+ 0x17eb8927906a696b313a4dd294db6ad80df7a9b88392408c0447c25ebc74361_Z},
+ {0x68194f5511068a47db0629a27160caae1cce6a881c6fed8cdead07cfc1a9425_Z,
+ 0x1f1420e9cb1c6c4922eb6b40f10f24f15dada3dfdcddaca674e73383ccd78c7_Z},
+ {0x5e83fe32f18c97e8ae49712056b314aa66d932d5a77dee8e1b9eddf339da897_Z,
+ 0x40f69dea65ce3a43675e75a263094f7a32364e08bf8a81991174c0e107f9d22_Z},
+ {0x3060eb23ac6a6caf9af47337f0b510f959b8bde88e9c61b0420ace71b7be022_Z,
+ 0x74b621d1a9acb7d981737684822dc4eb1554c26128ba6ec7a597ffc4d566412_Z},
+ {0xee062abe0510b74de3247e25ee93af0ef6b4b4548fd1720b9b96e26385b505_Z,
+ 0x9bf5b33c229c7c82513fc5cc572dc6e633fa53ee4b72c29387b3456a165395_Z},
+ {0x4059eee4397291986401d80515ab7a155c747981fe3a547f02eaf658d83315d_Z,
+ 0x583b5472ddafeb00fede29a202d079f0a1599e898874e4dc90244e697bc5604_Z},
+ {0x30df889a489a4ea1c39631247a2588b100736664d5d9b468c4aa46927fd9a01_Z,
+ 0x35f98a4e5569cda355997d7c05b92b1afebc0f0eb00c18d627644d4391f153b_Z},
+ {0x1b4a7b89426cad1f1139e4a95e424912b28e09ea4ef920f4290670557b340b4_Z,
+ 0x6deeec12bebac8eb5244cfc774122475a7bc94aefaccb4bd45b676a09dbd5af_Z},
+ {0x63f7772aa09ac5c0a88d93f3a8cc60b0df16bb180e99ef00412c38179166e0c_Z,
+ 0x33e86bf3634f4eca8c88beefdee250257c486ee48fd28e323f1394a5e1c1076_Z},
+ {0x247588c4f0c7b37c917b4f42e3b21658097ac7cd0e7644b086b912998d5af41_Z,
+ 0x5e1c543b5b8561a1c59cdc8a7395f665433989085ffbe84c2a5b4cff3b99c48_Z},
+ {0x4b9e758b286080b937b428cbe678628bc992c1e6a6ffc6e244792b82150c35_Z,
+ 0x53acf792531a89bedc300b80e5363744fd31af737d8dc04cb875af9a1f22cd7_Z},
+ {0x29acdc5609d77d4f6eaa984119918f30450143dc289571c0524816273eec65c_Z,
+ 0xb4cafbf77273cacbaf450c9ae651e2f3dfee4fbbb9067effe47ca82f12ee58_Z},
+ {0x2f293cb26feabaf6274a37aee59bccb9efdce19554328c077c21eabd5729ff8_Z,
+ 0xfd36c9fbad079f5bccc20eedda52b98fcbdcaf792face8d234d3d402542e04_Z},
+ {0x63f18e5b9e5ce0e8752aefda835704e5627f95f1b73790fbf815d33cd941acd_Z,
+ 0x3630acaa86d47055810a49071a854036627aea14444945c81aa120e8505c9ea_Z},
+ {0x51b6a4d7a48d4aabd8f6d1355e6694a96e8e0259b58a175d56ca4259d7c95ad_Z,
+ 0x4dd595ace960f4e10063d98a8476e4a2eec480d2ad2d80ba69196ad0279c470_Z},
+ {0x29f80454575d19baa77c9b505bc5f33f0577d8f2ce63780f9213fe7ab18f6cc_Z,
+ 0x6848ae259cfd050fd46b04b4acc14d7af20b2cf1b3d0c9973fe068658d043f1_Z},
+ {0x4c89b6510677f513d789833e9d41694b91aa009ef2d147b5c7caf0ec36128a1_Z,
+ 0x73d0c5696689fb0dd0a86e70fd2927929d60162d24ffedee43686534f3539fc_Z},
+ {0x3a249f87a20b1b5b4db2e26a256217abc47641b944ac4aa433098a5861e856e_Z,
+ 0x26be6d948015c7b9030c6713c38768e7632320a38493000721d29ab91089ecd_Z},
+ {0x754a7f5db51874bad620d86b01cf462e18011e0cac314c9310787c266fc92c5_Z,
+ 0x1b29e1cdca47382faf98f2588909a7dd593f878f766a6a7b109d4254b0ef8cf_Z},
+ {0x48d9fcc542ce46fd5ca669d9ef77747698b757918c6a0cf2d8282faf9ef09fe_Z,
+ 0x2d6b8804befebfd2f4b9c6bd2eced32bb15ed8585e919cb46ad74ff11c0595e_Z},
+ {0x721afb3823d5e2f754bbd6b7a90daccf2dde0c1f9f1bbdee8de9e64cb52b0db_Z,
+ 0x51e351ee5e73cc5e41be981183fa5a11527fdeed09e290531bfd83e574d534_Z},
+ {0x37a286f8bc5d0525c64f028710434d6f832c3e7057ee3f36fb5653ad6afebfd_Z,
+ 0x50cde5410faa07c33d30188d6cad656590014044895e2bd6a940998dc7516ae_Z},
+ {0x3ef6f601b2a1d947ad6a3c6eafb3f21db71d63fbc5ed57a1f2b5418454a3f48_Z,
+ 0x5783cbde55750bea5e6d93a2893df8fefe8f3491b359c35382883da0b5d5d8c_Z},
+ {0x65fa96cda15d5f3b10aac96ee6389477bafbabc0080f81fe336bab6421c733b_Z,
+ 0x27d19fd7cd38d60baf5f20b064cb941a5d31f48ca8c36e42931b0617db5d780_Z},
+ {0x61bc6c2e82eea33335735aee11a976c2830ce6ca853456e1f462eb1caa08c1c_Z,
+ 0x4251f89865abbd7b86f4f97c2e3c1592ca982a3e65a67d14baa391fd75746b2_Z},
+ {0xaade4a6822d0164045dd5f4787cff3a442b538d24f25fa1fc770f8f999c2f7_Z,
+ 0x1eab702b2009863e2fd3f922e67e13d80f7ee1ec2552ad56d16f3a5fce61c4c_Z},
+ {0x6ba561370d1765819e5e3f6ff8bab69ebacb11d9b09e7af00c5b2c1aadbe95b_Z,
+ 0x6fbb1a8372a5c79abd9a8e3c54365ffb95602eb47ec328f3664f0b90665d561_Z},
+ {0x5f6b00f02978e07dd51556c9045218a62ba6a9c5f758e8bd71f256c8c834a0e_Z,
+ 0x9cf6eb5b793062dda8d59cfe8af5c8409d45f2a10d30d1ff7476a9130df5fb_Z},
+ {0x1283cf7179b31667bcd6e95bd9653b34fda5f86ad4f2d7655528b828789442c_Z,
+ 0x6b40826a1923296e28e9fb1731fd38c0d33e3571836fcb4f44dd634f7515f3c_Z},
+ {0x1332b53eb582b13900572c6e7edc988477dc03710251c771b0bc03beb50602b_Z,
+ 0x5c54c2a2b78840824a1df6e8d68e49e81d14c1a53d2f94b1a8790728453252f_Z},
+ {0x6d9daeb3548e6e20760cabf269230bda6d31c863e906ac94becd9da74d60168_Z,
+ 0x1d7aa3f094beb180479b92a802021e13cb8a90d97e8275b04fc9a323f9b1d12_Z},
+ {0x2bb0eb48f444f53fbd8da7eb5c5d0679965557016596ceb1d122a81eec64ab1_Z,
+ 0x58b80ab0399b6086b2c4a9ca531a58fd205956a2743f47f412cd308e4d1bbbf_Z},
+ {0x27520779c7fa5ccc80bcb568ce0f129094c368059cd5786e1f059af0cb247e0_Z,
+ 0x70c52afde816e2838f6e9a9128b0ffc6f6170c115c5027469d3c42ea5a7613c_Z},
+ {0x6670ea7600b2696a3d79f2f537dd0dd654f35a9a400b78955f119a878e43c13_Z,
+ 0x34193e913f773cec4f3ef2b981b64efd29282748b1651ac116d91ee9b3b7ad0_Z},
+ {0x10bb1558dbfbb9fdecce1eea2ac58c5114290f232b031e039293549ae639205_Z,
+ 0x7540dd1fa8ebc532f8ffffa7eecd7c88f6733ae125d33a0757407d16f0bcdaa_Z},
+ {0x6a4a0272dbb1647b6b6b164926d08f58a5c5116e9241c15bc2734015ed33b70_Z,
+ 0x7aaf4235a6014579b23aac9cb747d96b43e5c56dc7042c5ac6733a1edaf76cf_Z},
+ {0x75903abfb45f1af7fdf465aef84feae7043a7304922c6c3461ca54204c211e2_Z,
+ 0x4ace274430c59d7c72b5c1c3ee701b44585462b0500b1640f0f126044ccd075_Z},
+ {0x772b1c338a673f3674f0fa675a6754335099558465d32f0b61d9fcc20a15768_Z,
+ 0x37b22715631dc08d4fdd4969015fd36da3e9708c2a3eb93f0f9531867eb2c7e_Z},
+ {0x289344a7505528b7d83d7eb1e329a37dc3ad3ee62ae0d5bd6bbb3d7364f9365_Z,
+ 0x464e8226cad85ab44f82297295682df6b7f29bcbfdccf77e33a63edab3482a2_Z},
+ {0x12ac4b94bdc9ba032fa6ef13ab4c3cd8f930aa3682a3e72b9b75ce521f17551_Z,
+ 0x6e20ed9110acaa5e1fe831095b1d73e266be3a42a97fa35657869ef82f5039e_Z},
+ {0x312fc9b8952dce8d7400d971a14b9318647af65ded97e8ddb78b02a9b9e48ca_Z,
+ 0x2509889cb2ae15e82a061e4a283d28825ba80cb04d4b396ae866b3d1f49cd4c_Z},
+ {0x4c3ebe4ba6e5943f2598e3d7fada73deef3eb04c1c0a33c6a30e7f9b0d57dd1_Z,
+ 0x64d65f309326dc2874610c5a32d14b47f2fa589661795cfb35728486696bb6f_Z},
+ {0x4a24c797a0d2bedc68e0440895ac2e278939604342d6f17f2fe6910ffa02ce_Z,
+ 0x65abf261f1f31499b309414385d835a922c639ed7abf111e7d9f9f5a93c8229_Z},
+ {0x1e32487c2390fc4e4ab2363c170987305bf649dd594f5fa2d665cdcb23b4b28_Z,
+ 0x709f0b4d0b4cfaceba4c30c4d4f90b51c68e617813500236339cc7730f1e7e8_Z},
+ {0x347647dc2d309bdcfa4ba5258087ca7d5f1494ba7460d444f7eabb2fd7876ad_Z,
+ 0x350048c6b8154ce3308fdefb5ca1a630b299eadee4e85f535e7eee2383b3b_Z},
+ {0x524c9c7da0674535f368ec82c959d01e183866eb1c2c39495a4b1585357bcdd_Z,
+ 0x6e4108a34f33b515fac2a304078d66b2da3847be3b803d719a2071ef831a94b_Z},
+ {0x731e6b87e0ae824e0803c070240d5c9b8f55e0816a4dea1b594116df1ffc41f_Z,
+ 0xe3d1241ad5cc299ca7bb8469cf5b03ef026e2fa82807874ea53a6c72ac5f94_Z},
+ {0x783802df2714b9606271b083c7fa045dc1136d9f3c04935c4e559f96d0b22b9_Z,
+ 0x6a7890e392b5221f806aeca5f1f5df45f57b3b7845a1d362597dbb50bfaeb63_Z},
+ {0x2dfa2e588ecd358f55acd6ff3c298a20b187711adad7fb1de4ee6ea08e0e529_Z,
+ 0x78098929df65ad32c914dae8edde692d2ea008d0d0a7c59dc46c2a408a98a34_Z},
+ {0x3469a5191de32126fdfae979621f81da116cdc49e4839b62371801bd951dbbf_Z,
+ 0x49863114da9745b7d9c465545cb80c7b8f7b7b04c1d131c8a933b8493d646c7_Z},
+ {0x46117b535b2cfebdce8543469874111d856f97f60b4981c37f0b5f2577a1ddd_Z,
+ 0x2d85b170f9888a4700df782518a878c091b1a308513ad1bda7138c82b8b6a02_Z},
+ {0x278dd235b1e06fe7b67e31b9067440ebaba3a7814b513575b8f7d77000f05a5_Z,
+ 0x66c5569f2d71b2d4bb96347c2f41ef302deb8853d96f8026735acbfe73e17fd_Z},
+ {0x1d08e6eda54a930f51e6dea44310a50f6b3cc55fed4efa28cab8dd77946b37f_Z,
+ 0x4ba19a92d5575b3332a98b831e82c10c54f94ff5895cad3d65d4e9d3d0b8e7f_Z},
+ {0x1c937fc16c5f636df5e5deeaa85854236e53eef17031e1f4f54709bbc1e31be_Z,
+ 0x6f0a94ff110a7e82499d68c0bce06497320d494c5bba8d574a032024e86fa36_Z},
+ {0x3ac10559c70ec64e45ac2e1175e6445107278b17e14e85ed5e8572fdb7cab72_Z,
+ 0x606f496889c861c343f0da76deab855ed401384ec82d789926e70372097a07b_Z},
+ {0x5360a9a6b226e690e25d3dea096a017d408f6c8d39f9040e1504e9ab43e2035_Z,
+ 0x67cd680eb8eab3304f52ea7d07886eee1eeb2ef8374a06ca629e9b774700ae3_Z},
+ {0x1a441f62d9824483d34fd6b4665336413a0db92545f82a4bca26ed357f79a99_Z,
+ 0x142c5f1d249874f407817cc3c59b22ef1b48a0ec2406a8dbbc421cca7504cef_Z},
+ {0x68a455849d29c68443021afd6cde779bc30df5cba50303c97913a7739ec4ce_Z,
+ 0x62bb4f4c55d86545a01aa12f8b7f101793cd0b9584003efd0956b72c19e62d5_Z},
+ {0x497f65ab2ce7519b2f7f1724df34a5ffb76c448606b0a7249d19dbc7da2ce47_Z,
+ 0x781bddc0bea5ae32cc9f80278d4bb069ac5766076ab340aa06d8d7448c4852_Z},
+ {0x39dbfce999a9f128d0ac6ee8a6e55976aa7ee7b85731bfd7f5dfd9af68f5034_Z,
+ 0x1255d957f4d8e03141862aa3092ebf33e793346e5758ef186dfc6b87dce6457_Z},
+ {0x7a455afdbe1c2ae3833eb39a771963c89b4fff5017c33d284459fe9b1e0dc17_Z,
+ 0x5b5caba4463a6cbeecdaa01569ce38e21b755ff5e18860e9fe9f79160c4d7e2_Z},
+ {0x52ac708678e9f0972df611994f5c4c31461f42d728c2140f388a658f03ec6a6_Z,
+ 0x44f1b9e6bdd8918fb058a24af838cd8733f46d99e9f2615d8fafc8c47699579_Z},
+ {0x3439b57886902e6ce95cd98c71441dc3971432190e644cf0c43ae17eac79332_Z,
+ 0x480e9a024b09e0895a82c2acefef7cfb4995146acb1ffce12cf971af0de8ce9_Z},
+ {0x4f1ca9d1c16157e8c1c1af8bca7407b9e7e7e461f920a82cfe61ee2ad0bf799_Z,
+ 0x642cebdb864f19821b0ac2109ef3fabb9a4ef46f6e0738b4a64c477e4bf8089_Z},
+ {0x3f8ce9bb8dc5bb555e253ac32e05ce897e8340d0a840d9d2969203edeaf1ff7_Z,
+ 0x3be0f06708673bdc03da6b1fdc98808ac48436dcaa72515d9a2961476571304_Z},
+ {0x75ce80956b7c379f47612153d071f529e1a532ab33add0fc30a12965110a936_Z,
+ 0x5afd4cc391bf1a1e9965f46b670d01c547e2a874cdfab4e4941d57d8c19568e_Z},
+ {0x43bbb28447d013e0d5f6b87537112987552b40bca59410ddba5792dde0b358c_Z,
+ 0x70ee7fcccba2a3a9c470f945dfe7f5f102f466528fa2e404279747188c56d3d_Z},
+ {0x437e18b727e23698b48cf6b2e4db9d0acf9c485afa02d6b4ef244f0db1ecae1_Z,
+ 0x253a749c48b8712244991dd8f5e3b12c3e488acee188564c23ed8a629cc4308_Z},
+ {0x57dc4b3be50eb9f2c4c7f24e42f2a285685c7d7fdad973dad18e4a53c889cdf_Z,
+ 0x199aa8211afe736e4a3405fe1c700db2d63f04e0f2b05540d5c034888d67e39_Z},
+ {0x6dc56b368eae5439ebacd1e8def5161a79d3ec2f77c7e4706911765e9e8e7fa_Z,
+ 0x5078cdafd1ded955dc10f28b009bd501bf41451a771cc86c9fbc93f7f4cbb66_Z},
+ {0x36f4ddba0c7ebbf1845d87e7e0fda7a60c4d90157063ea2781745baefd1fc64_Z,
+ 0x329d0d91cd2047ef0ed3166217375c9d8604c21c973ffb694c8c8c8da1007bd_Z},
+ {0x4ba56b88372f1fa0ac4e15dba3cdb987f04089a470266e79d9e00304c5a20a6_Z,
+ 0x247c5879d9e9424639655f590b6cd7d9cc44e73c504554b44d1a5896db1a0cf_Z},
+ {0x51ee6486f9ef97868475986dc107f23b41c203210ccdb953221ee8be360ab99_Z,
+ 0x4a9f8279ed1da580fe7c9234205bb0b15d74e5729fa5b8a182e33e501e33475_Z},
+ {0x122aaa53eec4eab1dabf34abb82f01e8e9ac01b013d7f868371d8fcee46fb02_Z,
+ 0x5391b0ee19a008fd81c9a402be1026da730f345d359a5aeaabca7f68804f257_Z},
+ {0x5a2d0d278efb0d0db252e01f60e01d18ad9933676acfabafa79aac3613b6308_Z,
+ 0x385cda0105caff7d04e511b18aa93573c9314d4669d377151828a8f3c523b09_Z},
+ {0x1ef156e976d5f3c956ec8abd7ef747c6e697d0ac7e17163980eef8cc92cd6b4_Z,
+ 0x1e51351d6a5fd1c39510bad0dd0f46b4f0780a6d0cba3af716d5b4dcc3079bd_Z},
+ {0x6c9088ac52f0610f60c1f513d5d99e7d02583f7d3a95170bfe28604de6f633f_Z,
+ 0x203b4475bb155b559ec3143ee76afbaa4ec9a76b20c33239f2f62102670844f_Z},
+ {0x6eb6a8059bbbad74a94ac5cf54260de510c38278eb9572c0c0afbe4ce99807f_Z,
+ 0x1fb5f7bb2b258544443824d0c61edb7e74942c4d0c796583187726c4e2f63b0_Z},
+ {0x4aeb7c141dd8346a668da775b07cab6338f9f62540c559c5058328d1b839e23_Z,
+ 0x41201d9c711f200cfc9ddf2767a664f3243449d05f9c68a6da6ab3d70f036ce_Z},
+ {0x5028392ea52427082e2e47a1178909cff59351ac67af08d2f53d98d7d7480d7_Z,
+ 0x75223ee95a4c4769bd767f237408f101cf5a2fb7a311d583d4f5bcd00c6e9cd_Z},
+ {0x4009b3f66daf34172ce1c0dd7fc721dc84bd953b2805002ca34b31ff6848565_Z,
+ 0xa9499f1d05eb4da0df4781861171e3f874497276c5d3861e3780a20d357392_Z},
+ {0x7da6f0ab777574d4edf4062659aa801800b1f1864e8bdd6aa0bc43cbff6f02c_Z,
+ 0x67dcd8629e7d2fbcb15864cedd3797a58fbbf9cf83de3558345392c9309d123_Z},
+ {0x54f05827a66a3ac1a16dd144f5e6a725a46bdb619100d64af7b08ba9c769e5f_Z,
+ 0x40abe38d69f62d7b4668599eca7f65c9757bae3110a1d92c7de5f8e9236a0d7_Z},
+ {0x1601b23cb99bae74cd4cc275c2101fc12130f1793c47d3db36db11c4c0ab7cd_Z,
+ 0x508bbb0f401bae72aae1b9541518d7d42ddb4952a3f0da31ba2f8c7fbfa5a1a_Z},
+ {0x481ca7759003a055b135f503b007bec121ceff1c5cf8c7b85d778a739ebc37_Z,
+ 0x46e13df3a4c1a837d617d92450f24445086eec02e7927e894b9ea6059b6954b_Z},
+ {0x781196d3ef74e292b0509add3bfc55c47cbff4fe914ff1ebefa385b25ec0ae1_Z,
+ 0x5c711d611deec110e1afbbeccc4d1bbd2138a81f058802078e517503aa96d2f_Z},
+ {0x4c454d46137d2a8c17f2ae17089134c0f0fca5b5350a7281d1ee36d332086b_Z,
+ 0x2f8e59da4ce16de7339f9cc726ac823ef12968e8af41955afc5e68114d85e6d_Z},
+ {0x7b4cfe1aad3fb7cc50e7d7abee6bd944287262c7cd3ec521f3e2f7409591778_Z,
+ 0x41e80cc0d2c46450a68027881f55d08a78e3e8acbc85755cb42ac245469490_Z},
+ {0x5b121a76e83791be7a8c769e4a738e66176652e7b0f28e9421ae3e28632cd1b_Z,
+ 0x3be93fcd4f5e5ec270f62748600bb8320dcc8d9d057deb5a09c641d59419c85_Z},
+ {0x38aba78d7303de1bb786cd90c16c35c069794a981f14501cdff81f04809ef36_Z,
+ 0x61c93ae1d29af35ce83e04166275c460004b4fa6a44a2943892877c5a0b9e95_Z},
+ {0x238ff975534b39d4328d2fb09434828202953f0c8508ca1762e22b831db84f1_Z,
+ 0x77b3f83a3515336fcefc9327b79e6bbd3bebff04b882aff661efca85059037_Z},
+ {0x4af8fa59b6d118b188b73af5bc8fe6c622c949ad176c8858570d9ce9c801a03_Z,
+ 0x11f6bebd56701bb03d0c4d09abba3e9798919f4c91af2ab3e86cc290b4b97d7_Z},
+ {0x19ff31fffe94c26623b202805feef4c6b278c653f828bef4fe87026f79f99cc_Z,
+ 0x3c89d9a61dbdda9993feca6ec58daa403cd9aba21579c336c9b911b2f54a4c5_Z},
+ {0x176663c53442e46b1a9ad40e37dd689f03f5f752dd97a7d61aa82b4d582a425_Z,
+ 0xbe033ae1aaae81a4fb295051274d53690fdc1661e04f6a658594c00812549b_Z},
+ {0x4644b372ddfa2aec55326546728f3c2a5d9d8fdaa6df6ff5034116bdc440453_Z,
+ 0x37cc7dc4842d88e252fe3f7bf177b95deb3b11d097f258ce5522a4fb476741d_Z},
+ {0x121aec6903ef3f52a533931b61f5eaa14891b693f37079e95f2da60c5078d23_Z,
+ 0x1b11a440d1937267596491ec1668715d6efdf1a029e56449dff7d1d065fdac9_Z},
+ {0x47c71454919af69a1aa1a798fac0d600a848dcb6eb769e89e498700e0c5db84_Z,
+ 0x7dce06b0b64fd23cc4b68f13aa5aa7cbba22d2e0d4c09d3008fcf7dd376b7ee_Z},
+ {0xbf74a7cf05fd18b3b7902c21563b3ff81b19f41edfb7a5fb06a3f041849950_Z,
+ 0x3d9dadb167c7ad778af9dfbcadf0135d8df8ec6d9c1a7592c02783b061db55f_Z},
+ {0x535b5029d6e00b1afe0b8edce3a426615b188e6afec56a2a0eb0d6cb60d31bd_Z,
+ 0xdf1105c3c9fc6d5e5710811c1d949d8e41d42f31e782c988658732b29cb4ae_Z},
+ {0x6005e128fd5a27b749a30fa56766ec34958d2116a479828ffdf30ffa22a8991_Z,
+ 0x12158816d08f33bf1c2895eec0b5df2a4bdd2281349af68184834092e41af8f_Z},
+ {0x2545ba00a30adb19d1fb7bab616b1ea53c8765bd06c4b6f8f312b01b3284eef_Z,
+ 0x79fd72dc8a0c42556398d471a3b98d4eca7cae3c0a3d2e26b4f69b63b0c5b1f_Z},
+ {0x5388a5709476b0627b3d828eae4fef9c57231da3db781c7324bc98df5484b3_Z,
+ 0x6843447ef99c9e235f9e74983aec2d7e028ce8d5020d506e2d2a16dc8788bf_Z},
+ {0x4e9885148da09a82508eef0a58cddcdfaaa9bb5cb96d23785e00eca2bd26796_Z,
+ 0x3d3232efde10a157594085ad96354015cd1184f55b739a7c5b576d7cd781221_Z},
+ {0x1c06a16f6f297e8d315f6b7ce5ed8b6cc3591b302d4563be99f26f78ce8280c_Z,
+ 0x3db714410aebfd11faca0a7575258d78b8f1c546666c923aa420e75af637975_Z},
+ {0x55134f976dc28ac8268a63aa842b47c94c3be6bc0e36a0b1ed74f58d36f1097_Z,
+ 0x677dbb3c83300c7b21b9d293c335be073f8e813c13b438b5cacb1ceead1917e_Z},
+ {0x3776fa5079139641d326fbc230447e84af274de750b8d727e25970e5f857eef_Z,
+ 0x7fc740c13f075f45f818f7e68b2aca1b3e5a80bb77ed7b8f2ed92d82543208c_Z},
+ {0x3b7c92306b73facb18c97266c726d6441e635056d3a3b7782d85d31afa4fd0c_Z,
+ 0x5d4446c08c5540b302094fabb709fda15d95732be4ea893bb3f7ddc6eee29b7_Z},
+ {0x4953cb98125af9e57906a09b7928162317342a136673b2ff747212b6d74c702_Z,
+ 0x2089bc2bd5b2b24e7e1da8836b4d35d5360ad9122fdf6c23f1c56ee893c7aa9_Z},
+ {0x47a3cac8d25eae7c132aba000d01ab14bca53382552165354b14756124a1e11_Z,
+ 0x2a82e196de1bdc1a659c98d009510feede1514fe5b7c21b76ede052faa0475d_Z},
+ {0x5dfbd7d85c400e3c79e39b72930bbb1e9bf0567904882419908dd84291ae507_Z,
+ 0x2fb5d589ac6b7579b8478272e44dfa97194ad3b23c3ed9ae7919917ccaf8a36_Z},
+ {0x1fb14c5b1ce91a7e899431b2e48ca21721147bcb7b215270e0b5406e35429fa_Z,
+ 0x56bdd8820fb76adb9c6557da4b38516beadfca90bd77032fe45bb4bfec8942c_Z},
+ {0x156783d5ae22b963b7ecc392d06e41d54d4c20041fb68f17dd06d36a3fab20_Z,
+ 0x43bab810f9fe1c85ce9cb74a990382dc70038b668bc4ccb5519469edfc1c3a5_Z},
+ {0x67d7557635a26c4be097d53efbf464a5722b981fb433f4855d58c22378571bc_Z,
+ 0x38dc41b7789665e99286d57b8e010b92c0414af4f8af86088c956bc11a0e4f8_Z},
+ {0x54e64e59b4791d1db806edbd0797a62ba57143ca694e5b530eee284ee51cec7_Z,
+ 0x61529214c3f319a14ecb3cde2fde41169a8659aaf9c1c5edfabd1fae284f5c4_Z},
+ {0x5b5e045838c2b1155a097eb6550e1b81ea3b0f18f0a84ff2b6b5b16a6062704_Z,
+ 0x5d8e957dfb2c1e4b6f59351280e914da35dac3aa95e13e3935ea9da73b5d1ac_Z},
+ {0xe73175a1afb0252c98cd90c75f4e7378159fa9348e1fc83d06b4728bc4f042_Z,
+ 0x12607d43cb7b7e01676abf0d6187360f8e3d408927cbd1014037a201ce18ea_Z},
+ {0x5a79cd6ef96c3a08c214c903604353638f0cda3e6c3ed663feb21a197bbc55a_Z,
+ 0x7dd8fdacc5e7ed304c925276381f56822e841285aee5db0295b0c11129020b2_Z},
+ {0x6ad9f0942d93041b7dd804a617994ef551afc9298488abe9efd96b1d4da2b1f_Z,
+ 0xc2866244139576ad4e0e22b2bf6c4a84c921d3a924f5e27b92f2cd93d12593_Z},
+ {0x7906827c4573bc96eb6cffec41d0a946723f49ba033911f676cc103b81a7cbb_Z,
+ 0xfcba35fc191a10da4263653e47a09e5209c21164c1367fdae933a9a2ee8eaf_Z},
+ {0x7ed61595b843e7563ea1187bd3038e1e597ecfc18366ccf656c81296cfaa8e8_Z,
+ 0x15cc11c29d87fa4c5c03fb4b04b6cf5415582e50a2a40d5a4d3b37f4c3b8c80_Z},
+ {0x43338c31a36188b524713b623f4a9888010da4ca7df7f342115c31fa3e55ba_Z,
+ 0xae5a37445464fa52b81d29120e50f58812089d1870bc52aa5a986421e57396_Z},
+ {0x3f5e18f1626b4f7b5f3b375597906f0ad7680a328ab4290a5f624a35581529e_Z,
+ 0x71535e5adbd009ef6cfc0988265f48d7d1d504f008b072ba68fd78897c4946e_Z},
+ {0x6197cd3f3840e02889ca392e74f029f1274860d40c1e735978dcf9720394efc_Z,
+ 0x4e7f7c332e85462a9b6c41475d8b680e3d325feee64e8c9fa5b97ea0514bde6_Z},
+ {0x45f006cb94c77c4b3ab7e05678bd5408cbd34e766a236b6a21b0bd86b5678d5_Z,
+ 0x41b2369ec30edfd367cbee13eb4f637970007a83a20661e3df51726e585f88e_Z},
+ {0x4bb2055b575cb2cfc5cac5d1063583ae9ca5e8bdb2a3833aedd785ca390eba8_Z,
+ 0x22ce4f112ef89b8a2d60d8f9e39a4a2c0c67ca68e689563ef2b350d0f2fcb40_Z},
+ {0xf62c995c3ad71d588164d287f09e869092108afa19ef1002554973d864f43a_Z,
+ 0x2b40450b8e008bb7baf8010c404f64b1c6c637253ff9f3640b3652de2f9d757_Z},
+ {0x248d90bd9d5b7fb5ac15a1d9ac615da4e117205276bc057809493726104c738_Z,
+ 0x42591f77ea99e056b4108d8d912abf78792d2f9858344d359a7b3a71f26bf9c_Z},
+ {0x7b7f301a2e9cc8c9c2f9033a688385b29a3f542e055e0c813658965deb5a465_Z,
+ 0x707f0a147ae182e5dedaaf01ccc3704a9b86d99f76e0df26dad86cd2495dd70_Z},
+ {0x6cf31ce7646ccdbbb072d09042e930f2d3a2955d113340336bb83130a62d99e_Z,
+ 0x5a675ebe7793cb8aa3b1e8f0cda248f2dda63860138e3f736f42f685d3fad17_Z},
+ {0x1b1cde48a059c857916e3dab95b4da00f65d574e461860a08e5345893b099_Z,
+ 0x2134b26f97e85b83d73d54944b53782a54a5bfb296a01aa3b8021cb1b8a3a8e_Z},
+ {0x35a0f5b87c68d5f818f1b4248af2095afdcdee1671465d29c3ef20bd2b37f49_Z,
+ 0x4d59435ac0c522f4488c0288e81436e781aaea592e69793d04056cb85353ec_Z},
+ {0x2a457e5ee4452f7b1c444902a6e641278d313a00651c96d226b9f80be534b89_Z,
+ 0x7034d817fbfa5e537977caa1336183fcf638e3a76d23af14eeeada586af7e54_Z},
+ {0x18ef354603641b7fcb0e0c0e0bad8c80d8e4df58ed2b550faba9a04609c65a6_Z,
+ 0x3185dbf54d0054b5341be6f46f91a86ee6cb5bc30c8e475d3bf6dc30146c421_Z},
+ {0x2fb25b811dc4c03317d6166cdb4336e874c39b7b4832e52efe03ba510b52ede_Z,
+ 0x79143c126dbc695e975b12ece1edbbea18596275f3bc041251702a3c0fd44ba_Z},
+ {0x70d5652ca45eb6548b9461659eb3499f9e352755619a86749ff37c8437f1c5_Z,
+ 0x524add209462b7887f152cd410779aef09420f2154614051d2bd5fbfd0f874b_Z},
+ {0x4988b924d574364981cc5c420a137b5f3aa3519e79b20e349299782e5a04c71_Z,
+ 0x299559fd60b20e8f566901a324a6712e8de539231992e7ff0e0ef3a2305af7_Z},
+ {0x72fbce3530a5f33c615f6cab16ed5069e637a670350f46d42124713aba6804a_Z,
+ 0x6c9f81497cef871b11e46c54b0f56fec69f20c39e1f7e6456c1d557846deea4_Z},
+ {0x6b94c5fd6397e833ccfecd5c0b0dd003e55513ed795e25d7525333b4249bc76_Z,
+ 0x1c629a33bf6eb58df451dde2004227caae839acb61da0a84f6351c9b2b49e58_Z},
+ {0x1577b1d4c4f3b7eb2ef3fb779d0834d1cb3ac01f92a5c64f1f895a2e8c7809_Z,
+ 0x11907f1bee9535ff99a928da4074e14032ced9ffc4bc7f22acf50ce5fc6b571_Z},
+ {0x29e423af260a41c87ef68f4a47fd60816a4fb834ac716b76d70d827ce2c60ec_Z,
+ 0x19d9b631d1211cb4ac798954bd01c5c660e4581ddd091c2f87837a07576090c_Z},
+ {0x595c10636e5aca55664add69b7da7c1aee69e240449a3bc426f5d232f444118_Z,
+ 0x6fd321bfaa65a91ff58c1e556ac3962d7626745a179374b8daa431f92391d27_Z},
+ {0x401fca5438170be38b64644e04a4b5e0eed041637d2a5d25db9d21c302e0405_Z,
+ 0x640d4cf3fdb37827f13bbbd1a1d3e2aeed73fe3af7371cc0b390064e0fe807c_Z},
+ {0x7d27918e4da91411a3eff411949953c1d1ea92d3daae31985d13700cfbef388_Z,
+ 0x6dca5b7aadd9e79bb7c80de3d16e1fbd089eae88f025c30e1a8e82b4d06ae98_Z},
+ {0x78cb92cceed4be55590b246b75e98edd40af4a08861695a5f8049153851fdc7_Z,
+ 0x3e8fdf005c92b5bdc12b1934fe33e2ca8997bfdc447763a4ae585ed6c5ebe64_Z},
+ {0x72b7d90edd1b88ed7eff953047e0b35ef3ee69037e72cc14070e545d0efc62c_Z,
+ 0x22d8c9dbba7421033477ad598d905e66dcf28cb298a31102c591aeb716a2be9_Z},
+ {0x62501d2a6e7547426e2422f9049468bfde28900cf35bc70496e834f08bcff48_Z,
+ 0x40f46f6dc3c898ed57c445f42614c48933eefccb9478fe28f03725c3b73a7c5_Z},
+ {0x10769e00be1f52801db350f9cfa217f70052fe231a9e4e5f89a8e2826277357_Z,
+ 0x3c1f18a367f91e6bc742e345fcdb21aeca8d3308f97cc9455375ae64bac8968_Z},
+ {0x6ac2a0514f082cb871d745fbaded0acd2b32173f9f305fda3e1263b3394a48f_Z,
+ 0x6401676cbe781e0672e5db5e3247715e81c5d10be5c17eb15772ef20321d5fa_Z},
+ {0x2646f5231bd48c0a60711393f4638696a9fbfd26409efb7b9c3ca19a9c4a947_Z,
+ 0x708863ee0a3316ff8c37f9ebdb998653dfe1694f2bcdd21725d923a03f6db9c_Z},
+ {0x652f896e06dba85a862034d63aa853f3e5f6680e2df8af0cf768687aaaefba6_Z,
+ 0x58824fee6af2a79b5d147356523b9ff3b7c8b160025c3908878f81e70464777_Z},
+ {0x3d87f4f906d142cad9216a5d454498d176ac9130dd8ca93336c5b36d93810d6_Z,
+ 0x52eb2a2d289c45e7f989dacd22004fb7c4c6c7c80b0ef1affe5860ecf545581_Z},
+ {0x75da6243ee773fc35a2fb8e71ede5e7e15804b9ae958d1dfdb83ae347dca5d4_Z,
+ 0x67aa31147f05550d463b8e8a96bd3124dd64cf1f758fa87c6f60828ffdf3562_Z},
+ {0x2e959802e69a1ea0733783f4c8663596f1d9b364d5cc0429645280f271f5d65_Z,
+ 0x6fae879ebe85bd36d1ffb4daeb1974bfc5fd0da26eb970828691fe879e9920a_Z},
+ {0x29a1ae1d2bbe2925e1228a9033b0aab3395fd5659a4d363119bdc26b743663_Z,
+ 0x69cc6091e847aa3bf9c8675feb8982b556fd2653c5105318261e0d65398c567_Z},
+ {0x5c61f16dd6f8f4f319953544c48fa14725198552bebdc23381bb45ac57cb84_Z,
+ 0x1122ee056b2b0c014c6d13e48d085232fb90714fc4c8529ac0f307dd07ba7b2_Z},
+ {0x311f7160881284a32cd3d898fa7506029e9700f89c13cb9f44683c5cf8ed5e2_Z,
+ 0x304527cd8290a5efc46bb7f14f9a1424f9c9b66a3dcbfeabe46fec6d51f5db0_Z},
+ {0xdb90fad7ccfed0fc75cf97b35b6f1d22d7d510ab72c493967bc5a725e365a0_Z,
+ 0x2c1246aafaa83af43709585fb58fe9268b835d2b3ba422d1810e1ce3b35b4fa_Z},
+ {0x5ef446efc4dd2342b4af2039d198e4fadffc6cc9a9df30c16344174754947ef_Z,
+ 0x5c96520a785a3db8578eb44d6fec9b54301528843c6282fea2872abde592df_Z},
+ {0x520027a1b8ae39638dea6298cc3679c1353b9eda5621b741b6ef917129e3fae_Z,
+ 0xb2a9db68052221e0b4960ceca79a04730f3fcdb4f8f1d3f8a0ebba35305677_Z},
+ {0x1faac1fca9a1be6419d7bbb22da16a1b01d85f879015ca263e3644e14ccdac3_Z,
+ 0x4d8472d5216839936f2f625f8352974d5b1a56caa509697507a7faba9b34589_Z},
+ {0x17af7912a3dcfb148596af539bcf834bc610c107c70e8388ca29896304698ca_Z,
+ 0x44af6671d3d058e2702d211a48367656da00712d2e860743fbd3d67069699b1_Z},
+ {0xddfcfdc8a516c64dacd20dcc6f1ad1eb71bb176ce3c1dfba6526b58be8055_Z,
+ 0x5a7dae83d200cb51e2f05571275e9a97cf496dcf95ec8363224d397ce9598be_Z},
+ {0x416b64c265bf0fb88bbe0e228283f126fafd3a4366aa70d12f006e83830c8eb_Z,
+ 0x40af9648af11d1531c953b5656bba6133589ed89aaad7a644ca8420a16a8b9a_Z},
+ {0x773d6a63ee6b87112dc6481856ab103e41db911af31106f1e85ff970d11ad6e_Z,
+ 0x6cba5fdafee6f73fd4fd628c8e6bcaf912c54529f2e168ed4cb3a4f3234f09d_Z},
+ {0xb7b580595f4197afa3e47746aef12836ea3feb91f5866af0fc067c900e9437_Z,
+ 0xb6657c903612318ad7fc133bae8db9c98d48400d5c293cd69205e3e2ca630e_Z},
+ {0x44c7b00a8ae7a0e009777433a04967afc00eda9ea58a5c1b606195a481dd34b_Z,
+ 0x5c53bfc6507c4da87cc24c956c3d0c54e0c38408cece82015172dfddb917ffa_Z},
+ {0x5b808f27c93a36db266495a42568ae7cfe2b904db3ae024852d02b44c5f34b6_Z,
+ 0x6664c42dd34e331f8f9d7e89b05b75062d0e0cf2f91c5b2dc72b87c663d1823_Z},
+ {0x6240cc3bf7700a871631e6c95823ed0ef8b523fec3a28821efd7a79315ff3a7_Z,
+ 0x1a4d78dae58cc15e38e9b748cf74f209ff5870994efaacfec29b09a91e2928a_Z},
+ {0x396a6b34236c379c017fd6f81f1f17815279e3ec8528d485ade529c474dfbfb_Z,
+ 0x7987bffa88967c22b2af6f83a282199d978c4f32dd71747e6e9422c36bc61ea_Z},
+ {0x77ac38b38b75d8105da36832a916ab51394f6050ab8a147855c8e66eb6f9a90_Z,
+ 0xc896bd901b4cb1877e7f0aeb214b289a2f8d2a7a74fae464203556c6faa9b1_Z},
+ {0x33eea635a3989f5f0eb89acf4e61e8fafb5897e537833ec817cf5d82347f054_Z,
+ 0xbace338a110dbf629ed80cc43b6a410a52bece49ad8c0916c0e811be5120fe_Z},
+ {0x1dbcddebb6192166a07411e8693136505b4ae35f9b398f6889b127137adf529_Z,
+ 0x19f813783936268e2f4797ffcc5ca4d85e0ccfc61b8895872f03ed83aa2fcd7_Z},
+ {0x167c5342be653bcf694b5247d16268a55e7be3921e3495177a040a15a421502_Z,
+ 0x5b9374664465a776833724c88e16cf3bfa0962e01907024d8ea572c62a7dc4f_Z},
+ {0x14c48695bda1ffb71ddf882b153d4495a254bbb952e957de36b1f4000d1b884_Z,
+ 0x2ed2ba0d852244bd6b8b681b5040c02bc6f5df65e810f01330794314d6a4010_Z},
+ {0x43cdb4a3040d9a5bc6852a4fd61aff473be2f4309fac4f6d0015c76feeefb30_Z,
+ 0x3b37352dbf40b38abdc8ed7531b9c8bd9e89cd7bb2cd4bda3496073617164a5_Z},
+ {0x5844e3cf59e3b6fd492e3ba8a2581040d75192f02e441f3eb0c18fc35bd541d_Z,
+ 0x2605a965f9a10275737b2bfa16b1c73d80edfe7d307f197fbb3519a0499e298_Z},
+ {0x12c8679fe1aef0913290d120085aed4da0a9a7dfc7d98e78c2ef01ed9b2a8e0_Z,
+ 0x38e54d038cfa5cf480d69caf84823a8e5f440ad04c276a29543f18fc2284918_Z},
+ {0x13dcaf8d1d3473ab2a5b66e98a69a7648adeb50fd55de898adf3b63e673cb3_Z,
+ 0x4550b4af97821ee2a5736dac408b49735ef9a983798a460e2fc25bd73f12a75_Z},
+ {0x4e8b700ea960b81c264226fdff2067b1a6d3d0dafec18f4e61db56459bad40a_Z,
+ 0x642b4d25e8eab539ccafc71c685de9f769c4d2d8696b28ffaa3c4d05faa530f_Z},
+ {0x205699b1c8ce7d29f7f65e821b03629aa31e02170d03bd700ebc8e56f02e879_Z,
+ 0x5c8a942796ceb27cb13ccd2f25524b6942d0ac49bf881284e0461b91367e664_Z},
+ {0x2b89d0330449e735fd402d441796c70543793a112818ce0d4c811fedf044ad1_Z,
+ 0x6b1d279bb22ba8e19a0d8f453e4e27ff19d5ee5d3c4662b35e9d8df0624cb22_Z},
+ {0x22a0d2c8a2d994324287433af77894c6a4aad150dce5641a194b6bf747eab01_Z,
+ 0x14e3ffbb4560466a7e6ce69ff3920bcb6ecd41ef71e0c429b0c1d534b85f7bf_Z},
+ {0x218af58a5eab24e51876f288b7a8eb31669339af0af26c11723aa8689728df_Z,
+ 0x550dd4c1e5c207eb53c545e7f44f39215eeb422fff565e2ea10d73349333a74_Z},
+ {0x690c6eea2e0b8fc38c21c3b124ab94a52831bddc8960d9c0261f28c4ddbee99_Z,
+ 0x28bb0b5b9de86dc8d4ebef7a47af0bd3b8812c0a930687d6e416858a11bc6d6_Z},
+ {0x4644e2f3361876e6ed3ef41b51c81b4f1f2deebcd8de09e27cd790476980f17_Z,
+ 0x1e396662f2fd58c2e7f318cf3a7a4876734b5ff482df84d05007bf96d3fdd3_Z},
+ {0x2adca4e762c7abc025cd2357383915677649764891e64bec49d0430d10ec3cb_Z,
+ 0xe172edbdf1613cfa320c3038cb39dbb5d2a7b40a38b683c40b5bf4c1bd4051_Z},
+ {0x2ae5ec6b394e2f023595f4dc0f683857f9e9f182d4728481f91225f2e568100_Z,
+ 0x1f3f7828e27b25d71157d4584bc3d05c13213ca6f7d3cb52b007dc899215e27_Z},
+ {0x9715519f4d9f5bc79485dc92f62ac47c75b0cf0ab8ab17699f02769fff0b76_Z,
+ 0x6310eea2c61a3d20150098131f2d3a1a05764d11091caf226757a54f5c443d0_Z},
+ {0x7abe13e018a0d5be2e74b1b222458416e6e647299be3a40cba9faf989183546_Z,
+ 0x5deada7defe77635dc940f02490dde55a5ee4a4f19245f7efc8229d58b23696_Z},
+ {0x7d1a26bc6666500dcf8584959afbf92590855d78f280368b23500c739d71677_Z,
+ 0x109742f7ebc46dfd0518316589e9a83ea714b1d88a99cafa036e8693bd633af_Z},
+ {0x22d6a0611db5b8980d9c8aef66fa1f2397f2964b6212c03f345f5582376b836_Z,
+ 0x62af598c1de730393503f75fb87e760ebcb36469d213db862036fe4bb5f4de5_Z},
+ {0x78638c51b590d5978f8a0771aff1bcb291e4c9e61baa69c69b916b3eeeda5cf_Z,
+ 0x7ff81b9f75926a38b1a7ef12ebb34b86cb2d67d0a1a6f2aa01ca86387a5966_Z},
+ {0x3f7e43d034d544eba6cbbb64e2a10005fd78ecddc028a2e1af80216125d7953_Z,
+ 0x2fc4847abb15e0177af2e9f81eb2ad392076752808db891008850e4a52e9c37_Z},
+ {0x7e3280ab174481222150f6aee632713060042cb4231e53fd4128826b1b778f9_Z,
+ 0x502002e7fddff85e05e5c791870493cd47a718fbe6a3fdc451f72557dec3f27_Z},
+ {0x7cc4202b6e4606b49fe165dd8596933b0d6fac15915d3a88df3ab5751ce333e_Z,
+ 0x72cb41fcb7022234c0c45ed291816d9f2005387404d333f0a0627448cdd59bc_Z},
+ {0x47bda44a5e33c31b73f816dd860c6097b8f4c68b09f161a227a30e0625861ea_Z,
+ 0x6195fe7a34738e4c771e8aee6b21846204bdbcd043686554e2d9bf2ad7abcdc_Z},
+ {0x32f186deb230ffac000b2d025de7d3ec876e3148ac806ebbb9935934ec6f81c_Z,
+ 0x3c636044487012dc195ae8bacfe987e1640099e2cbddf9e07f5f0d613a7571e_Z},
+ {0x1e756fa128be69cf8a5ad404fe3c8d194907157d5f3fa097c83c43ef72c95e0_Z,
+ 0x75d6c9ec9c79ef2fff8ac6de42d4100a1d0b0601136ac9b367a1205c41d8698_Z},
+ {0x115b03bf41d9f5e106fdb98d94cc420e7b2251418a2150d94c2584bff93cfec_Z,
+ 0x3bd42bd645d1bf03018cb9144fd6623137897536d95d73ca9129ed9e7dd87c7_Z},
+ {0x34f0578072bc7cfcfed2f8e201144ebbbff86718c9d22f5a4334c7ac002ef85_Z,
+ 0x2c1c4b5d8189ff2fc6feb4a94a82841c8278f9374343da34e6bf0c74e1762d2_Z},
+ {0x461c569e453285487ea3588d59c8480ba0086d6af3d1eff56a27943b4010dba_Z,
+ 0x4ceda82983567153544f2095b010f64fc1ac641afb1420cb44465d59a05560a_Z},
+ {0x4d1bb0fdb75923e420d527a9a3737c65c10d488de81382f19b11aff90bd802e_Z,
+ 0x56ad20760a145eab94442c7400f033cd429d9d1af68187b6eb1b4491399eb2c_Z},
+ {0x50061acb65190ac9f20cf550c030978e30cca4d5c2d15932a61faa6fa509b6e_Z,
+ 0x4249b6b48c871a77d0239eee6c17178d0a69c5e42e6550c4bc4f4a20e7fc5e6_Z},
+ {0x5353c52c6ff6ecb5b11a7819244b474ee7228d3656b4408441496c1993dedc1_Z,
+ 0xbd2428698558a277a503c5692394c7fe71a6cd2aca0acddd33f5d87b7ac502_Z},
+ {0x54a7538c3d59d708cd58a3839d7c8df709bc4cf6d976e04e3fcc039cfeb31eb_Z,
+ 0x325d079b25b64823f0a2879663988e51cf390b2f0bb1ed844f543a59e58a9ec_Z},
+ {0x22e5dd548e9122e72265570aae734e842ddca313b5a5bc7f8da4b64bd029d91_Z,
+ 0x352e9b98bed95bb719cf77a57e161995a7765ae41f9883ca5deafba652d0865_Z},
+ {0x4b261a11ed1754c844bd355425017be06a2fb702b9998858fa81679e758da00_Z,
+ 0x1791915ff61cb6e051e39095b2d2d2df5a6de6ea2fa93910274cb34ed0fa9c7_Z},
+ {0x6ec6fd1c5772bf015f6db5c6c1167cf6132c35ca7ca8df68f9538134b5ab86_Z,
+ 0x79c6edb2e8ff788110d8b3012f5ab4663ebb72ce79c621629d4e05cf8be9a3d_Z},
+ {0x6bda9f06cc36cc49c4a922da29f2c6e8330682542422a463d403c774a99b8d6_Z,
+ 0x32fe43ca15fe53fa3f1fcac2dfcd5b82a5516f21c19ec1c686fcf12df99c897_Z},
+ {0x5acf9cec77a323e4aed7e6babc95eb73efd608e06c7a68b71db2bd1b71acd52_Z,
+ 0x2a55469622cb952c3184e08e95fee471d8d4dd7a2234f89f77de73f9a5f6f31_Z},
+ {0x675252f7625aad5a604622e19e6f78bfe282595cc4784b7e870e298e851afd7_Z,
+ 0x7e39366c6ee08fb09420b32326837943e90c19710b190e77bb55e148f487c4d_Z},
+ {0x18d498f8062ddf66f61798007c5fefc456af2a92eccbaad4f5742f5c7bf9171_Z,
+ 0x5142a6c4a107e043ba3d2d77a14533d427449773e978b3564753f4a782003c8_Z},
+ {0x143c4ed078ca1283342616bca92feaa5f318751d53cd6779dbc1ff9c225620b_Z,
+ 0x487a1a075d2c7b8db2abf6f2800b45dc611075b04cb727e264e540e60b0b8d3_Z},
+ {0x7458a5863bd1ae4d3afba58528a7aa5d21d1584bb210bab466ebd7073e3a50c_Z,
+ 0x297c1c02aa6f8621c8f9cf51470d743e0f94c1c57f57a513ad4acf8f059a89d_Z},
+ {0x79a0299f479ed1db5b4e6f5323ce86c95aa0eb02b01d61841390f502d1fc7f1_Z,
+ 0x316b33e86fcb5bade46d50256ff2aecdea3706d47ad446a392a1a24c6bd5e6f_Z},
+ {0x4c8f3d7ae0535a3c1c5f8648705b0808de4a5b5189779bada9af3047ae38cf2_Z,
+ 0xb2e1aafaf978fe082cdc8c577a0823d7fc21cd55611835d5a463d47b40809_Z},
+ {0x11bdf0480f6fa704690f4f746c72d55b59401840142f39a173163cfe3274ec7_Z,
+ 0x17f675a570f644a665f47cdcfc639830bd29818a6c1c15eca156ad896b3ab0d_Z},
+ {0x2634ebbcd752b223e625e790776730ca3010d49b0aaacdc08bcf481ca2d8502_Z,
+ 0x1d811c7554197eb1d67ed471ebe37445a9befcc8393bc6a96f91960dffec11f_Z},
+ {0x2e623c7397d52c754a883ba540c75161c518ec299fbf430fc919e4b63867c2e_Z,
+ 0x5aa31e0574e2e083e7a02e779b834f115da926ea737ed5e33b0da7b6907f88_Z},
+ {0x1b59ad6781fcc6b8288071b4710c3eababc630114027967a5c8ab65a83e04f9_Z,
+ 0x6ac976d1bc47507e5a1e5d09d27cb6fdb9d02ee724f623e4ed4869605a05bad_Z},
+ {0x1b09750a3898808f9c9e8b1e89d8b54249076976cccad25d4f567b4bcc1658b_Z,
+ 0x6583e9acbc47da277e255860a25d3b014bcde160cb67ac1dc097dc785888025_Z},
+ {0x7b28e913a5287a629620edb25fa48688fc6f04fd4a2b1639968a27b61f0895c_Z,
+ 0x6e9e1e2a4c436614edefefc208b5d5c83d47525fac35f4676ef561ccac03ea8_Z},
+ {0x7c8cc11b3836e0e77006b15a10d58baaa128b2849669b1e97cab277f5f974a2_Z,
+ 0x2367dc364d06203dd257e35ef8503297ed976964ed35f5a0f17446181d979b0_Z},
+ {0x5d4b98d271e027e8580da0a9a2cf007a0481a73b2e59f79642a0b5c4f88bf26_Z,
+ 0x594d75e358662af89b3d0bb908e8bc7b0f34a356ca449a762a1414b1449ee72_Z},
+ {0x4257b6082b933da2f30728a8713ba0d14503eb3fbf3b0364cdfa3a41a3d04fc_Z,
+ 0x2f611777737d62bb56d77f090091ea03a6f9a6a92f9d78ca4e7c49b8e082255_Z},
+ {0x4ce5b109167f77ac01e4ac63c96fa2b22bf6c041993da3ce1b485a5e4eceb64_Z,
+ 0x7c5003d57f3e1a744c9d866297d2ba0b9ac6901114b750a1bdff662a5f30078_Z},
+ {0x15124e3e13a9e69d52ca0a4141190e74d9671e8fe770e1183c03822b44193a7_Z,
+ 0x7f74309a374574e6f4f732e81b06c714acf36b1d5dde258734a5267f658d3a5_Z},
+ {0x42333f4b49089cfee86175af162e66919755819b63dbfc7875639fc5ffef56f_Z,
+ 0x20639625bb0d6cdc4fd6f386e949b3291fc6c4061c566bad953e96fabf606a6_Z},
+ {0x694efd01f4ebc915688c2398849b58e2dacf85b57e941eb24b9fa0c799bb561_Z,
+ 0x6d2fcca13be65c1db7058e01d8be935834428aa365144b36e4e1d4bee169f97_Z},
+ {0x51beffd36160081e4a06fe0ceab50275a776fc3ec3e243eb356127cdd71e9b4_Z,
+ 0x54f4df7f430434ad0f6022ee4960f57651fa3a14aaaa2a845886c3b9e3f6473_Z},
+ {0x34302a16edb497ddc1a13127f4afecf1b67e2efc3a30aa2571e0e367551943c_Z,
+ 0x5d62cd1e97d96332c1ef1ae66200079e2c34dc0bf2322a8f8db0c9bc1bd9650_Z},
+ {0x3b7d7c38eb68b50855b50548b0961bb991668dd27434a98680b2e81e4761a97_Z,
+ 0x3e4a6944456a6dca9e7cfdde738857d0e8db99526af3de35dcb6e02679349d4_Z},
+ {0x6b15707f3694fe19521d42eccbbc57e988d79d1ac65604c241c1fc0bd20b11e_Z,
+ 0x1b5c76f58eb298bf99a6f916f658210b8034dd9a69143545ea2462b90c3a26b_Z},
+ {0x21de44099f98028bdf0363c4d6439902103f8f842a972b3cfa672312c074d2b_Z,
+ 0x450b08f46348ab45f577aca3e90c5eed4b7190cda97d60b642a487b517af033_Z},
+ {0x6e8a30f52f23f6ed7eb4ea3d8b7752449d6df7e84486a89a286543b153b89f0_Z,
+ 0x7704713a39e8601f7dfa4be91b9c63a47b148153e8eeb2bf54b8c70b67f1a33_Z},
+ {0x353dd999e948cfa9acb61f4582ebeab16e775606b5be03d29c3d2a13b4d5e29_Z,
+ 0x1563a3e89c6b27baad5f2bd1e3bb864c1b59fcaa3cbf6700c3841383084ecc3_Z},
+ {0x35fdf8ab25827a3472f22e6fdef7b8e8d01d6d6c8ca2ec2e5d090981af65daf_Z,
+ 0x6a42399f7f05b4bf5adf84e8331e28215173f372226ca710cd18b2b40f79454_Z},
+ {0x67f02aceb9907cee72e549d558c27d30a3776f7e46b6749b3c159d8b97be20b_Z,
+ 0x8be1ac6d79d57cdaad49aa159fd36b80cba8572d1f5f765cb50985398155d0_Z},
+ {0x5554c980c9e9c1009f66adf554ec73b52eb25ae087afa6adf450514b84eacce_Z,
+ 0x4c4275a2932733d1345d64981b4daab26e65190b613a27cf4e6034cf06c0fb3_Z},
+ {0x1eb0f99a2cb77c9fa69860b68a357b48577126d6c8da4a8f9bf84401cb6f81f_Z,
+ 0x578a0d5f5fdbef207cbec740c5ab162fce0e07261e48a9ca6bf7613f8752604_Z},
+ {0x13d7bdd9e39aba8d49f8fc374b2abede20a1bc29fb671a054f45387f1a0e02d_Z,
+ 0x70c6ad4bdc7277c1d72d9f11fec7df8658762f67fdfe1afcbd8f6e72900ec5a_Z},
+ {0x1d980c2825bd694db28240e0069db1e0f9dfd1c151a5668d9b5f42d4351903b_Z,
+ 0x399db4c47fff7fc6c84708689e72c4edc2dfb29489397652e02f22a2c6babe7_Z},
+ {0x73230486ed47e678e76dec21f0ffe22dbcff88071d90f8914febe3cbf1ae6ae_Z,
+ 0x6c3c1abd07106c220985829a2468088ab1bb4fb0a07b6ffe759f471eda00b8c_Z},
+ {0x5e3652d6926d6643ed7810aefc1b40e136d2c8f50d580bb806b1ac106370f97_Z,
+ 0x7b508bed86cfc1ca3fab9f62b43ab426ae8d421fe2416bef38bdd992f06d946_Z},
+ {0x78b5dcc2feb7a50bf0615c6bc52183d462dd4fbf8f8fc8612894d7505ccf7ac_Z,
+ 0x3281ce9088b825e7a58981bf3b12b6dd6067b298008601fe792f1075a47ab01_Z},
+ {0x4705cad2e383b00582da583006b49332f233c581aa9ce35648810e68eaa9c3b_Z,
+ 0x129085212c231925bafb53736050b6cb6476bf6863c6ac3dec94566ffce3a4a_Z},
+ {0x5089efe433fe56d92c8efd50437eac928f1509ed6915db457b18188d439eaf3_Z,
+ 0x610fb11cfed4a7a6f916853941be547cd2c14542dd6da253c7df5a39f6b4969_Z},
+ {0x71c33ba83d090327fcb4469784591ffe7d41909719b13aea7322369d9f4d5e2_Z,
+ 0x58b3f6d6e61a8ccc48fc0ec43194be0925067b8414ef581c85005c678214bf1_Z},
+ {0x28fe0bb11238665014b97bf4c21e2df94bcd67bd5dbf07cbda6e13e1b1b3913_Z,
+ 0x2b4afa2fedd517b2bd5b30779be6d5d1f0382ee535ae0b73b14130d13291746_Z},
+ {0x6913e3ac7ff9eb0f10c39dc746831fe4baaddf6c57b406e0de32f206c802680_Z,
+ 0x600ca47989675ce328c6bf27813cb426273fc87708258447a721a3b953b9f9_Z},
+ {0x42cdfada4a07ff2256e2b7aa5798a6f711c564816d1befa774f8c0f8e8b3f87_Z,
+ 0x6cb39531e0c7ca77ffad3f7d9c10c26788798baaf1052788008d7465594f1be_Z},
+ {0x4a895cb3ae6df92f81b9537c5ca87fa2fe3eccdce51eca7305d39cf94cc0fd_Z,
+ 0x3076e7f66ef42a3a46488b80cba0c9b5b755ffb3acf50e99a53afe43ddc38df_Z},
+ {0x6ba92487422b7717e63c8d9ffced6b1057f0dd4b5d8581f8289fb79bff8f616_Z,
+ 0x3c31c300acaac2f095416426e587c08fb4e9011df852471a2b4b54c139211f0_Z},
+ {0x3873c32471651ef319bef80fc7f885fda79d45341fb88ccf5ba26f542a57535_Z,
+ 0x55dd960b1f5da0979beb7cfdc3581ff166c52c9dcbbe2ba34b98ade54336e45_Z},
+ {0x645f2e1629ef655ad9c6a502f8a13f01d0bbb8167238c3f2ad1814d34a0e863_Z,
+ 0x7551e3bfef3c322cf0929a92ced1e8e5b13f10d36c656ab5c7480015ee2853c_Z},
+ {0x437684dae956f29f97ab427a0cf480800c5dabb074a068e96eae7e9513b7eca_Z,
+ 0x75b8ff513a7fa3316575c1439f690f06542efac5d8fcccb67cc4b389db29c22_Z},
+ {0xb4294b9cfb659b568f79ef5f9384e837e476e406cab18ed7ad43773e616d6c_Z,
+ 0x343970fccbb7f2678b79a96f9720891cafe6ed4cee2f718add8508bac06df55_Z},
+ {0x41558a0e4603aad44213cbcaeedc9da7710f5a069b563f8e4cb493ef701fb36_Z,
+ 0x75c61fb3a1b07dd94fe1d876e9ee10f7b03cd662e3c89331042a855627d83c_Z},
+ {0x6ac9b8bda77b16bc042e45efcdeadbe70eb4ff0f12c06b02f76374661cb4c11_Z,
+ 0x161d60e17f0e5e70a35437110d5c2f79cfa9722c04e4e92b14f70fdd1c18f6e_Z},
+ {0x43aa2b70ef660de80ad8f4c0d5d965633d8686c030b06b6bb6312c35e2ce2b_Z,
+ 0x517f38a92e44f43eff094ba0b6f1df836ed5b30f23bf4975c2a0f8e19c6d04a_Z},
+ {0x5f82db6d6eab266e07179a877351dfbf7ab180ffb42767a9f0cf59aa4bcde05_Z,
+ 0x67059d8ad9d65a4658381aa549495b1f7f24d167f8b3649656962ba3c56aded_Z},
+ {0x40ede50e36ba94099023deb88750cec92bf290a15e9db556f6ccee6d120e51f_Z,
+ 0x74d312ac4c763ddeab678a5d2b7ab12ac3d0578af6366ce1c5297a881accc3e_Z},
+ {0x38f42922f6d1bf2c88834b6ebe4d87089934f4e0fabf22151ce144b72b5caf0_Z,
+ 0x453d82bf1347c15912f3664c1c3ae8e9266ff67234de5d8e60bab927625dcb_Z},
+ {0xfd73c943851baea1781dc61f68b2d004e01d3bfc89af50cb5126095ba9f17e_Z,
+ 0xfcebfaad59794aaea0e40e0a6d44f0a088374b09ffb4f73c0944bf1d1a0e23_Z},
+ {0x60317a7a06a2df8b3f58136510030a8808fdb9cd0c601e3c068af7a53712452_Z,
+ 0x753e564871eca54c6101c9584a40851c18ae7058e239139cc069fb95bd34574_Z},
+ {0x29a3aae0e6484543a413f9013b571659adff12260e6bf0e778d41f0fd754106_Z,
+ 0x885b9a319dc7b61d64a25bafaa231fa2908e2095dbdcc9bb80aa67cf731b04_Z},
+ {0x8bc9095b18a8f7b6154f2adb31b5b0adc38cf2168cd71d04fdd541d27ea768_Z,
+ 0xdc8b1e50d421768d0be16796fb98a43e0c8cd3aad2131640cd7f6f67131c02_Z},
+ {0x2df1cd3162ad44b18fd0055f8e6f19de30856b911b1ee607550dc8a1343c60c_Z,
+ 0x2417c31469a6572273f77cbbb41602e5028ff783fa12b5da8848ec5e638f21c_Z},
+ {0x5c643bc7516efe14436d1c8a5a25ff9f24641dc30b6a79469b922ed076fe9b0_Z,
+ 0x60b3846dc2d887485b47250903c5bfd5958cfcad24c57b1ff057bc18ebe092d_Z},
+ {0x1f0b53787768fd5876ed1ad1b94b6ecce73ad7c75c5586bc7ba1e7463bc0f09_Z,
+ 0x7850e21d3d691e46a7c1b7e2b5e4af4839cc6bd741ebf5f31311ce577a7384f_Z},
+ {0x7881e33114033f21627646bbd2d69a0d10a54f077e8ccf8ad4c95590b8624e4_Z,
+ 0x6926c34d1fc46982a04d30d466d1cf38de7a2bc1783829bacd42cf27b88a062_Z},
+ {0x7e30b347b103a146d5eb9362f33bbec85c820a7a3344c31cc30d422248b8425_Z,
+ 0x68e087a29ffcd414e29ce5f49b1b511edddcc1093546d2fd6ee1575dc88d941_Z},
+ {0x3c1e0130dbb356d5ea89ee6556e151cf22423c1157fbb19e2e055842754b7fe_Z,
+ 0x6e5ee957943e6eaa0de7dcf400cdd7b131d147e560d1abdfb68acac457787fe_Z},
+ {0x4e9db25d49e5a62a5051bc80532fd6d2ca012aef53b21a733870cf854b78337_Z,
+ 0x45d6d3a33c0afc00f1c7f05ad61c36192f2a2306b44ee8d92e813e99303f87e_Z},
+ {0x5c576f52989f45ab512736502917d4468535542437aca24245e786540034dc4_Z,
+ 0xfeabcc9818622afb386fb06445a38e20ac703ae6c096cfaae8080850d2c24d_Z},
+ {0x66a7fe3667d0490bd3ab9a673d13f26f8ba7fcefa2efceb23ed6ec880df9e3a_Z,
+ 0x1ba405adc3dbc009b17c991388ece076bca24c4fccf5c15d263ca929297a17b_Z},
+ {0x49e337be850275fa6ebcb7ce8c265e1569824521dae3bb2b51233ec625337fe_Z,
+ 0x673d5bdbee6e8cc16a839993143d86d78ab9a93b6b1df3dff15272d58433e83_Z},
+ {0x1baa881896903dccfa0490e4b1721d265e6d135089a321f79c3643d89256745_Z,
+ 0x2e2309caa98f46a21834f3777fae48dcdbfbc7b65c162704f824c4805396d6_Z},
+ {0x515eba84e1130dbe07c7f6e8ec6464efcc26981bf740f7c4507c6cddb261ddb_Z,
+ 0x154955581b77b779b7b0fcbdcb5da3b33a8d23ca28c81051c9d7e7da9dda716_Z},
+ {0xf9f5456acc172c31c7333ef38617ad2279a4748968966948f1815ec258a509_Z,
+ 0x5b54c6c39f90c4941bef128cd8aa62a831b0c4828de5f7c97d0968c16668ced_Z},
+ {0x1d77d149acb3f96e20e6613917d13b71f5cfd75761047b3ffac67241a20e66a_Z,
+ 0x7e573d7b6b93fc261e86df335a8711c5ad678cf3a38485b833d9641457a3cdf_Z},
+ {0x2a0f076fa7bb20c9c201d3ef4632b05065a2cb2c0f77a1dc2c793f433c377a2_Z,
+ 0x79d83b5b8839396eb9a8861d38f37de06443e3a4464f4c3018b3679bdb2a56b_Z},
+ {0x369185209d9ca7406512f7edc18de2edb3940553369b8fcbc63fe7cd1b89f56_Z,
+ 0x469ab82138d8ab813ef58b834f651d4d941d586d3da412ae9bf23f153b18f51_Z},
+ {0x4e8f2d5232ee9352e49586f3850488661f3341f40044888661286ceee512429_Z,
+ 0x619a5fb3c7d289bbf7ccf6142d3726458eddf6208f14d83ba02f59acb451def_Z},
+ {0x2747f91c91ab961494d5f5255c6f72103d785be6e272d73fe0a625229b3568e_Z,
+ 0x2ac74dd2884255b1e3dac3451da96c6b83e284395735a6c642120a5c617b02f_Z},
+ {0x34fee10767e32de09259054204496bb26d0f341853778b17295622ef674971e_Z,
+ 0x1a3587acf23d34eef9dbe6a6f90001080390c20e923d34f052ad9e680d27347_Z},
+ {0x4ad8e1a9e2e5b3e3227eca0d75a74043b7f74d232a792cd8f2357c55c930a2_Z,
+ 0x1ed316966e5e3161367c6e140110fb27fc64061a91abb294b745f12bc540801_Z},
+ {0x57e7cfaf0f333c11a0ff343a0cdda6329608ca5d3c8eb9b34f96beb3747249c_Z,
+ 0x560cc3daeb81fefe7d050522dd12e72300e12991669ed760c59f2f033f5b295_Z},
+ {0x51718e417055b3909d776460afa5af6ca100f4b5f27129f9a9cc6c584cb88c_Z,
+ 0x42ce6a1d312ef5ef72370b6e472561f662d3fe28c07272682401dc14b506ae8_Z},
+ {0x2a3101e3602f98739bfda54eb5de107009925f9e736919ad817bd777ef23e89_Z,
+ 0x4a4fdbecb1ed19b46976519dec4bb0ef6f89ce26bdb6297db1949294c8b48e4_Z},
+ {0x179cf026a61750887ec5c12fb9373f53cf50b783e548c0d30ce8f4f8aeb2657_Z,
+ 0x324f92a1490079175ee248b860306b8abda8d67df16e6b8154171f0b7d2c7e1_Z},
+ {0x717d6a597afae02078d0fbb1f5a7d890912f7ae57b3d97daf7955dd4e2ec9e1_Z,
+ 0x412011fc5eda080ebeb84a90c0b7d80a08d368c60cabed804af3a055c4f9b37_Z},
+ {0x4c16c7b9bd26ad227b063bf78565cceb6edb5057a30d281ed359a32610724e2_Z,
+ 0x4a134c96a461137149e69f9d8834bdbddd9efed68ba1784776c7021e264586c_Z},
+ {0xe96fc44c79139addc82377706e7af3c82baf7a8a043907b98c43439e5d2e30_Z,
+ 0x1032b759c03b49fad3799975ef781336b68da2f3f9a86b10c75d35f4c5b9516_Z},
+ {0x40ab909efdd9c9d929ec933ebb9de36bc388b5dc701a2aeca6e9e302151393a_Z,
+ 0x510cb53bc0aaf2d49db23ae16a0c7653dedeec23dd196a61c00bdf18f32b8fd_Z},
+ {0x1f14f576df12a693e41f1e0df40316e80f267aaf9b73a6fc8b86a625b2c6b4e_Z,
+ 0x5d77054a0d97fca6371c3741de544f7a1b03813b63cedac50b5123a4dfa7c55_Z},
+ {0x2dc023f3b323cbcd0702a1aac431498d65e689e27ca4dba0a12a31ccf49f2d0_Z,
+ 0x5e4d739e15b2c7d031eec12fb308b588da6616cd99c1f7dec783b456684a0d3_Z},
+ {0x3080fc9f579d884e3d31482a7b38d3986012ca383d2eb1bc6ec237f4037a680_Z,
+ 0x6976f6f475ee34406b50e0625deea7e57c1c94ac4abbd62c9e52028be3a1ee2_Z},
+ {0x500c83f78817833f09c4aa54e7559f1782b73343888980600ba92280f1400d_Z,
+ 0x869e9196270ce2f9d7ad3fc51f4ff81245ab66bcfb1f6123a7ed78d8b01da8_Z},
+ {0x70ea074c1b2bbc9c17433068ab041eb9fc888e1e257b0e70e7fc1ef5c4f8992_Z,
+ 0x27b443b728d2ff2765d414a4b63fb23e273b219da7cb37967804d9317e560a6_Z},
+ {0x5481f2e4803b7f2db575a267fe20694ba058b93cd6a7beb5cd33284a72dafbd_Z,
+ 0x247b51d83ae103241196cd7fd9d0312ec3b49a9a6e32fbfdd03a066b6012dd2_Z},
+ {0x6a743026c6075fdacf0da6d8d717c577816fa9a78768c74e4ae0a289b394df1_Z,
+ 0x67ea02eb85f1ceb26d8b90ec1efdc54c1fcb5724d9917280b282beeb6f1692b_Z},
+ {0x9e71414e336b991f37d0cd405fc4acc56e6f8f1e28dce6da5f3f114543e3cd_Z,
+ 0x455ce2dba80c456118ddfc021b8548d12942689a4c49e0620d1ae57163f05db_Z},
+ {0x2ac6f01421c7dc90742bcf279bf4bf3413cfe57136ae224c3cae77c39499494_Z,
+ 0x4a53485a82876c8cf990a685a856e55d7b44eb0c9e663c9c7ba6a58b3f0d164_Z},
+ {0x50fe102ffb5664c5da29704a2b23286150182da218db47eefec241fd6b5359a_Z,
+ 0x82d2461ea8e50308c307be5050614f4ceede8cd5be93b9df8a12c25b46a919_Z},
+ {0x2516219050db1e69f5ea2c656ab38b07cb19df02b7313ecdd056ec4c9f4440_Z,
+ 0x19ec20c066f9341555b616f4f3b2462afd57d5b78ebb4864d341545b819907d_Z},
+ {0xcefeb541bf9a77bc74fca9200354e3e8afe3df7fa483984dd539a7f9d6b040_Z,
+ 0xada3483ddcb274d5c12c5d629dff3c8d61a00be2a478fd6d3e09eae061ffc5_Z},
+ {0x4937a380a265c8863f7efebc41a0b3d512861969efb537cf376bb92b383f0e1_Z,
+ 0x12395864741ab7d8cddc37b3e9b28959110f8a90fe6f5088091528c28f0d722_Z},
+ {0x680f583ed47615783be15c08760edcaa27f1b5b605eaef85a48661f98cba480_Z,
+ 0xb3aa31c239a881b465eef30a48c69d57a119b73c67a6921b4cc3d1a90da941_Z},
+ {0x25a56f1c0062bf748c6472fa11ccded695f2b7542562331ad36c4bc1cc755dd_Z,
+ 0x6e73d0bb790978887d8381596a5f7c614094ed5aa8c4f196d44f32e12f2d9be_Z},
+ {0x3a1633fc2ca3a54f96ed0494e9859c4b0b81707b813fe02d6f80a6a2b2ed6af_Z,
+ 0xaa91ff4b0d449b7b5d73741bc8da8611def01cb042d6147274a6a921b66ece_Z},
+ {0x13e2d5b2e6137be89d60b6220bb1f1b4a1f1b2f2768bbdf6ef2137f6b739cf8_Z,
+ 0x3c87c29f436d2cc941e7f260be058c8fa0ce134446082a80076609d95bc8514_Z},
+ {0x27e3dddf634a4efd734b78e74395714143a3fc10c6390b3b89e4df15a573046_Z,
+ 0xab8ef979cd7ca06d6fa4e670f855109dd70b628150736c627637fcf1689447_Z},
+ {0x49928876747da068fc49ec448101c0f65a3346c42555e81d4e11d1d057cd37b_Z,
+ 0x37eb03ac2b0d0391da35aa19727b6f7ea045ff216a97b7a4bdb84998e0e4135_Z},
+ {0xa4e729b05c165101187cc49bf3a27ce854833c11eb8545a3f2f7f71df646c_Z,
+ 0x442608af3c2dd023ef2773e79ee339be89e36a8ae94c60a9f587ce37bfabfb0_Z},
+ {0x5c20a597f2b94ee836aa31de6e21bc642747ee17b6c8c77615f9c29fce19f53_Z,
+ 0x64a6fe6580cf7373d1f39a5846fc4f92beb636689f86533bee2a62caabfc465_Z},
+ {0x792c81a8c4eeac3fe4630bf2daa6a5e3045130fb1e392ec52489296adb614fa_Z,
+ 0x3efd54cac4852e39d5a970407ecdaea6d6e5b5bcc21ddcc6a5349733d496cd6_Z},
+ {0x59748898c332e2208424b81e916f15c18136cc027cbec672197fcaf299debe3_Z,
+ 0xd8c78205bc6868bd2822ccfc80a8f24301baa635bb34fcf1b4b635ad63639c_Z},
+ {0x7a9963851201f75a329351770c2ba63891d075538336574fdaf3ccb4fc7fcef_Z,
+ 0x65c9b2560a9cfcc16784a2e12dd1fac05333122f660defadef3531796942aef_Z},
+ {0x56705874ce12c9cc1d57afc81f15015f3874322f60444f9de57a65fe12458da_Z,
+ 0x1a82d5cf99ef79261a4e3d906073eb4e38a071ed5c67e8cbb64849e7eab5d95_Z},
+ {0x62f84bf5674eb90a848f0ef991f7ac988a3bbc2e217e7b3d6aefd9fd6a9c8f0_Z,
+ 0xde054544753eeee1aaa29b381b24cac20b5856853d23d35df2091ce1dc610e_Z},
+ {0x75be937986547604c1030e45a493fb975e16d43b467586ad72a3fcafb306246_Z,
+ 0x207163c590b95f3b0a6563bc6c07fc658c1b95319e70647a1bf5d3ec5410494_Z},
+ {0x735dbe43a0c9df1c0b85aceca32ed9a1bf6e3c56ea1731cd8e04b54b6777dbd_Z,
+ 0x4d42712ca8cf1a95ada153bb272830bca5048268f95b3c959a25f5a28ac5783_Z},
+ {0x6363de5a4832ee73691d847c0eaf3204f0110a8f67870406f74480468ea6d78_Z,
+ 0x3f6485799da1943c68602caca14c5bf4caff3958eb1c0e2342ab28e89aa85fc_Z},
+ {0x277c51c5f931f01c0893e8d3ae5bb8b5c91feeeab3dc4fa8bfdec413228ddd7_Z,
+ 0x4230784805147c15e6b01297342b741365ceebb9bd3da1c45bb3f68958a9438_Z},
+ {0x331043b70bacaf8b38ecadc7e16a4417280ee97643c18194d401b12544267fd_Z,
+ 0x53f15698992fdaefccf2f44d5c5393b62e1f511cc51f62c776c27cf89d34089_Z},
+ {0x60ea9a8a3246c8f369ba91d224517759f54549fb596aecb81dd47e27a6ddd0_Z,
+ 0x69ab37615d78e8f00ac111dfe4f72fc02a55c464c72d4fe7e3e26d5b15b2f7a_Z},
+ {0x4ffb11a7884dd3c292109d84afb836c377a98c0a74da6bbdf6b42bb4eab237b_Z,
+ 0x75e7280d170d7830b9207af8dcca064691fb7a99c42a8ed6743624d1794a9b8_Z},
+ {0x3f317f7931e8ae4617d1203ba1b6989fc1d938acfaddd1d09c878431e4249d6_Z,
+ 0x3b2c6b25367c29a30c40b7e343205337d10bf2e003b8c35e7ed721b2580f492_Z},
+ {0x5a06c81351ed13de5c28ff1254efd779d2bd617d1387d7b22f7940e822942a8_Z,
+ 0x58ea74de916e355ef7d1ff94c14a70d921245ced2c9b4e8132375d8146bc2fc_Z},
+ {0x57d2114ec52af5d112ca499de7564baa97ba04563655c46a838ea9ff4c73002_Z,
+ 0x20b994314ce1b34309e3034d51ae32a615ffa13948a0e918a329de7c1dee9c8_Z},
+ {0x200f3266af4e0c2a0b3bec3a88ad5c6f138955d20cd6bcb906c337c95eb4415_Z,
+ 0x176edb1ece81ecf7ef88ccdfdf8f964e64c961660d868d2c4ada454205dad50_Z},
+ {0x6a85478cf0ec78b3531dafd9b6c7c24644cc95908192ca4d8982f4c08e8aef4_Z,
+ 0x4725edc324c9cbb414345b440a078bbb7962c2dab05ec91c766fee37d4857c0_Z},
+ {0x7bf511a54b0e0245f20a5824bb6e020c60ce0c7db030d5b955f5b337620f219_Z,
+ 0x60e54196e1bd4d80a5ff5533f81d89d1221c0061f9d92916afea61367531710_Z},
+ {0x377e8b5155730171b79339476f3e23291613a6d4cd24449da37f2dcb18f0061_Z,
+ 0x6c57475bdd1188891a56d0977185be9974bbb7435ee0c1f514f78080536810f_Z},
+ {0x1e6cd71a6ac437a2e610484f03c252581fbcc0382976c4f8d0b1945f9706ba7_Z,
+ 0x62b49dbb88390c644bea307d87fbfa08ad6d0a81bab414a1de1c47e9d2febb3_Z},
+ {0x1f560c6ab420a1a0d2be2d1ea0de11964f6cdd97020a88143ec1e7bdb4c60d4_Z,
+ 0x5659e102331b3004a5f79011419261b96a0013e0ca88dad953e41a49152b73c_Z},
+ {0x3fc7f5d294fa7ec108ba4b88eab8dd875307f2e59a1915c64e5319467494bb8_Z,
+ 0x3c80befeea7821ac786292f123c616dc8fc321d61751911babaf404a13804b4_Z},
+ {0x444fc960b6685225ad8fe92d306aa50622fd3be5414281f16ef62ce799c7237_Z,
+ 0xa14e5d2cf6089b7c38731e91dc8fb74c3effff69771934b1aecf37817d7921_Z},
+ {0x4037b6fe202f20beaa8166ad91d2645e50858982d9bf5dca5293825532f4d85_Z,
+ 0x45fd440141e4db02d44981ccb9812e255c564cd7e8d2b1955e736fe06eb6f13_Z},
+ {0x762168996c3e77b068348dff3a79866a5363a768e58ac534f3f1679343705e5_Z,
+ 0x5c10dcfbc98b71d73e6ae7c93c90eb4c3cdbf70c1894bc8bdf135fd4d30ae65_Z},
+ {0x76d7cd37e8ea31d72a62285defcb8363a39ff88a3d6867c3e8b8be5feeb154c_Z,
+ 0x20968e6145313a493d459bb568db6d2af09dd1430afc9c4da5f3fcaaaa08223_Z},
+ {0x14e13a565d5bffad665aa6e1d93092a486fac7a707f39dbb3ac245355b0e34_Z,
+ 0x11546bc481a890b7d88273e5722a00ec976ccb4c51c0090342cf0db1979f3b7_Z},
+ {0x75f4250929e028054217e764dc22e8eaf9beb8a0027e1f4beaae0a8b4da7caa_Z,
+ 0x54e9097a3339ab63665e18e15c7f54763c5076d078c3197d7ace9994b286198_Z},
+ {0x2e00405441af4832b78d0aa70ea5bb0050ccfcfc819a7a2105f4c63a3bdaaa0_Z,
+ 0x5a77448f7388085ea6978f40cf5ba8f18d0b0f2be350f4ec0b9d9a1bd6ea500_Z},
+ {0x5a420c4d98617112300faa59c23196531dbe77a5083ee6b413da88b103aedd4_Z,
+ 0x2f5e1e16645838a4198bd8bb12f3461734841e6e591c3be6e849a2bbbe6e7ff_Z},
+ {0x4a2d8f8615824de98efcf0b77ea95315eb0bb163a713a19b816a81527dd7edc_Z,
+ 0x1c337c2ffa49be75e7110d80dfb37866649aaa4f482a9e5d2af71beffa95ce0_Z},
+ {0x6c00424c9d851c2e2a922fac4ad52961457dadab9835254473a6a37eeb24ece_Z,
+ 0x344c85b81b709fe6d701b269004a7b564f673ed8504a605652f30e5a0c9d47a_Z},
+ {0x3bff7590d4154411fb768b2a00fb2b5d17f07338c5fbcff9ad63d4247ba16d_Z,
+ 0x16afe52d9340315d5a2fe2d9f0493fee0921bf37bebf11e4e03bdcdd902157c_Z},
+ {0xd0fe726a7c5057c891e955a42df5a33a1b5479a270ea075c8cde350c42b0b2_Z,
+ 0x239cd33847e8eef3fba2e9a0eadc7f8c5f6a2c6a24aab87152231e24479da5d_Z},
+ {0x7d7d66bbbba8cb54a0ecfc732a6df87622c82cfdf9a244263fe3d554f8a0b51_Z,
+ 0x592167da1bbe7d08362b9ef71bd5fa69f11d27434442db15ab507929103fdd2_Z},
+ {0x7bcee06abaa11251a294b041fc21102b1b60bb2dde33a164e9c246dec7de24c_Z,
+ 0x16003366f985bef69b3b7ef9112f0dd7256f1501f8aa3e525d0f72de5885a32_Z},
+ {0x73b43dc6999f3c2c456934226cb7fae52e96c920e193c3c4aebabf38c31149a_Z,
+ 0x2ee9846999a7809f6a26ba7c0e3ceece2a02e02e03be122fec782a424223e4f_Z},
+ {0x5080a7e441e7bd443c39cca3977e3ee001c3e81274c6ab02ab5d36cdeb896a6_Z,
+ 0x596f2d72a412c6c6d42ae6812d591c89e490dd4e9e7a96315f5c04c3b65c65_Z},
+ {0x48c801de150490894a982f8a86f71af65f7c6b6e6cbbb2aef7df35d13c66de4_Z,
+ 0x6d080bffe4f2214156a8acc6301d9df24dab03e692157ee170e7c6171b600d6_Z},
+ {0x2ccd344949a471541296e01e27a2b1edae8d3b8ace42a6e4e2a49ef62ffb9b4_Z,
+ 0x30e28f3a7b4c218e473fed409187b45f884b6f35cef64188bfb6c22ffac852a_Z},
+ {0x6f56805519932fdce386a179a3d011ee48049d58731098235d3b950ef3ad786_Z,
+ 0x35a1a2360c3fef160d3d2669e5fa01c86714e6a1ce94d9b8042d186b10d48a6_Z},
+ {0x3ac5cc607a7ee069c47eda5d753685455b6663fc35787c26933ccef7d3328bb_Z,
+ 0x16def6807a4794bcf88e3426d4c5f17a5d2e171493e64541a0a4825c0257d41_Z},
+ {0x2ba89dfa5b15fe506a0afe4582ba05bfa6c9347e2e95c3089daf79e5518795_Z,
+ 0x633c5714af166068799767833a573a498a4bef12137d14cde354eb5701b6de3_Z},
+ {0x421442d61cbceeae384d3cc37d6960d41388e03a620df5f1cb14c4f241b5600_Z,
+ 0x2afd64270905442c3bcd12f362f93420ddce90c7e7c8381c2e74beb87a435e9_Z},
+ {0x5ce4e2101e5ab063bb01aebed2cca69a58417a139b569377c963f7dddad5cd0_Z,
+ 0x364ff9b91882f05516eb4fee2f447447aa31af32dedb47b0b8c4c21d27cfbb8_Z},
+ {0x147b22050e955f58b85fa97f2d7e256aed0b5cc536f9628f1060acc48292e14_Z,
+ 0x60e64217e98a5bf2b85a824796b9e45ec204a1e83a3e057b47238d43bc80e0f_Z},
+ {0x5a017a8af829b0f1d239e5cf1b5a55ca869c1ab4b3cf8a2f78ae4e560b97415_Z,
+ 0x79e870c71f05b1dc05ef9f479d3a8c8afcc18574c453a9fc9a05b83894b6661_Z},
+ {0x38df2817db7c7d755fb1ad252c858d41ad39f0baadd850d8f0aa009c85a482d_Z,
+ 0x6841085a5f5b3511fab46413461480cd08ec0fce43619994c795c23b2a64bf8_Z},
+ {0x35ab3a2b0db6b2e036da4f2bafec962ee8f967da06d79b11db052d7403ae03c_Z,
+ 0x78bedea30ea8a832a3376e7502e319243f5df7e628b50a55bf3f17e7d03b60d_Z},
+ {0x1b357fa2259b7cbcc36dfdaf49340268d53ea4e0114018dd6c5dace59b5e852_Z,
+ 0x39a5b269a391cf6b68110df5f9e8f1e0710e36064391bdf429ba82333124cbd_Z},
+ {0x4f7148152567c9af0149719bcd7bf7ff7fedb187e0526e93ed3cef310cd32a7_Z,
+ 0x26a1a51aa8e013de698a0f644530dca83d3a4fc2daefd6e6c71c5e6c074ea64_Z},
+ {0x159a8c98e7616b8806a273a8e149e08f297ba52ec397a8e66329e5dfd004d9f_Z,
+ 0x2bce0a59ddbeb0576ed6db48e65c72aa760cd04840edc07d9cbaceed05d47f1_Z},
+ {0x62bba8eb18225b139e96da547e0a4ff135a6b180d2ceff985165ad4c8aa3720_Z,
+ 0x4c20a20866faf537239f82474cd0ea7dd91aed5e0086355acf285109ad54ae4_Z},
+ {0x186f0fe9cf69c8e320d168f98c49bde2074ca75d178c91e6b6e5216374a0bad_Z,
+ 0xdb0224b7764586b26e8d08e000e18aee5f0d4f1c513c57ee6ef927d37c53fe_Z},
+ {0xef766c060dca040a1eb958f8cbf0a183972dc26ef35a1d42ee7745609a5a97_Z,
+ 0x56ba478b6f35ef829dacc2e1bfd4f85a279d342c302563a5674856a57b79a11_Z},
+ {0x40027058e2663824cd5d692c1b6caa1208c70393f072a6b0df34ef90ff9cc7d_Z,
+ 0x335e52580a582d146938ce928f2ba8e3ba9881bbf4d365289b0c75175f4f0b8_Z},
+ {0x50769d9e1b2e7f2a1e7fc679d3cc0831914fae2e0f00ed54b2850b3cfe737d7_Z,
+ 0x1da30aec069c80bf1a33280d265d28664bc9ef996ce4cf7525217c6bb43083b_Z},
+ {0x1cf8fa795bcbe70f8e176dc4e0cbb82f108905a32280cc733a10c35df4c4c51_Z,
+ 0x3bccecf7ee6d3faf0e50672ca19100b66bed3e43144b7356bf6fb752b8e400a_Z},
+ {0x3a1cf7b86267a04c9d018f1bd11976218b8a8097b5ee87a2a20297be723c7f9_Z,
+ 0x3675846c670987be8b0463f4b745954fe7f38709d7318fd7c9300b350b857c6_Z},
+ {0x347ee661c8048c097a546f7a02ab089ad325960ebe3e4dd85143fdbe7ec6fdc_Z,
+ 0x48602f8a71e65d83a2ef74e6b77e57e517a8baa38516ac58eeab5e6f78d411d_Z},
+ {0x701d24b02d5022d842ea70f164c208fc687706f6c2ec8eea58b2241232953ff_Z,
+ 0x2a3642e63ae5cfdc5db90219332e058cb7437815fe7e5368cbcef039cc955ce_Z},
+ {0x24c1279e991ed23a43466d27cb77ea8c69117ae41a310e2d938b647213287c9_Z,
+ 0x5c38e3df29e5c861334404ee136709bbc08b192f2fa65699afa1f7f91c61d06_Z},
+ {0xa9a3513a16a0ec1962072ad81fff8bc3c09e6272b07c2815725592e9dea7f8_Z,
+ 0x7501eec6647e29f9210dca9862df6ac9a5ceeb49006cf500175cfb81334de02_Z},
+ {0xf92f5d803e091914e1633e9ddbe2c2287a6559342e2f1eb1c19ab33fa1a695_Z,
+ 0x18d236ef77bc4a7e758c9a2084506f7f9f52284805827f411112f34c1518b09_Z},
+ {0x12a1d4f4461db0fcac6e15129c514627f3a3f2dac2c4a3b82899c5f02becf72_Z,
+ 0x2987a7633228ccfb0675890d0f87f65657bb330807597b32958df87d0ce39ce_Z},
+ {0x2016475846c25a7e90a12a1a8df7ed4966f4f4eca3d824cb5e6c781b0530e6_Z,
+ 0x601fdb808d4739bd398315b0d71ecac2569e84667242202a30d97b17ca6f489_Z},
+ {0x4254e6cd8e16ea49b9dc718ae9c2ad2de0205c6418e0b6214e55bb1a75f306_Z,
+ 0x799b2f89383f0c99ac5ac061daaa400871833617852a3369d72a67c6bff860c_Z},
+ {0x76acede608b8a2dcea601766dd4bd9b7236703c2b144a4940b05e937fd536d5_Z,
+ 0x3ed889fb9e591f92f8db79abe60598fadd30bb249bdb2c0c5789c69136cc81f_Z},
+ {0x1d176bef7f2a5d61c3a96d2b7115800054679b832c7f28855e56a594d7cfba6_Z,
+ 0x1d0bcd12c94b5059c5a55bdce5c726563e21b2be58168974deaf4af1a7238bf_Z},
+ {0x2042f7df7e0a74d850b9f6aabe748df6dcb61dc747d353f4bf1c5e8e42f86dc_Z,
+ 0x42069b11d90283d36bc9f0266bb6e436bb6377104751f74e10ec8d89249a8ff_Z},
+ {0x2df366d0ca086a2d963e72f3b4cf01de571e19175fa46f70543e37b18751a59_Z,
+ 0x7565346573c0683e47796cbbc3482333abda0f3e25b0b26ddfa588f85225ef9_Z},
+ {0x58121cf9b2562db3e56d78cdfee787f75e09470406eee04ee1c987a84855f74_Z,
+ 0x7aff5ab2427a9cb50cef8d4fd90826d7b1b85a2634d45dc8c79abacc847adf8_Z},
+ {0x349295e173993145173b5082fe5cb5786051fd00db330695b41966e3190ccad_Z,
+ 0x6b0257ca8e49c67ff1a6208ef609d36aa6846f3637747d32fc8f31b9d288261_Z},
+ {0x4dcbab28ad63e729be5462418e8664954a75b67e1c57dd374721d90b72943e5_Z,
+ 0x14aafbbfd8ea06c270ab90eb57a57c7247ee9350f68412dbeb85797451a111b_Z},
+ {0x1a5f2db1e215fb967f56bedc77daaeb192136f11042bdb812ca44423a5b77f7_Z,
+ 0x2cf8b2742451ab857f642dd36c6c7ae02ff5f8544a683501598ce0f3bb6cb27_Z},
+ {0x35f6e6e50c4ba71bc912f6b6da6f71ae00b49ba69a0f4476f7b4bf62e0dd71a_Z,
+ 0x36d494277dd4b4d74b325ff65510df72d64c333f5ef0a83805b9ce49a67b2eb_Z},
+ {0x48d962f990930bd54902ca5ec4e80f263b52a7bd95d70a9142c9534fdb52f78_Z,
+ 0x6af3fb454883130df9f894527a738df0f188c353302389b317709796ccfe274_Z},
+ {0x398a3272127a5bbfad04ac9e9f0c82869c22a2ac486f76f20dad37601552c89_Z,
+ 0x1987f05fe86d12fe6c57dddd4fb31093fb3fae8a9df1c876960eac2a157674f_Z},
+ {0x62c1c29ef8925e3f6f413d40a8e9a4b538f099520673f462dc30119c7554d04_Z,
+ 0x756d90321c37d49b2f08182ac351eb9ad8d0220167dec9f901f6eaaba801edc_Z},
+ {0x3823eac94d69cc1d66b502b9ef270585c46285773a404124c6126c8dc515fe6_Z,
+ 0x6a1e01483b55d86d3acef79a5977c5c47586758698e28c4a76066cc23c9e302_Z},
+ {0xe5cfb8d64ed97c741399d871ec03e6f3d61a4b3028506d36981c5e43ce1d7a_Z,
+ 0x5a6c21c75aca6bef70da755d4fe14b0e094d51d24716427ac2e2831ce4cfd2e_Z},
+ {0xbcc984d2448acf454c19e80aa7e11ebba2b6676831b49c6b622d590ae2005a_Z,
+ 0x4f730fffbceafeba9576ef2b5da0203a058781dcc0611b37cd6f598a1918b5b_Z},
+ {0x7425fc03e2322bd1a85bedf059eabb73ef1ab8980c071acdde4327f9104fb19_Z,
+ 0x6d37d62b1caf99ebcb5ff7d7b4550edaf6f66b314275783e91458e568f7385e_Z},
+ {0xbe03b736e4ad5a955b009e5806e4914910a96c52ac210b0603f0eee2821b9d_Z,
+ 0x1706994a79713b41bff06c3cfc555b50e232f4eed4e247e1fcf59404c674c86_Z},
+ {0x779a94655d788e4ce933ca5695e49d98f6d9a049b29b1f7ca926b7f08ce6931_Z,
+ 0x1c77a3559d02027a390a03f7147507819b109f3f18c69e9d4924693b96e6984_Z},
+ {0x42cb005e2184594ebb065002e9949dc13ee0bb6177cd6fc6403c9ebe90f54ad_Z,
+ 0x6f65f30bc85222993fb679f53031065c078a76630758007f19be35ef3212558_Z},
+ {0x1876c2ecd5c04c4b29e67c41d51ade19291a1775758e2029776c744868aa725_Z,
+ 0x3bd48ed05be3b33d43bd692ecee9033b213d36df5473180422ab2fffa0578af_Z},
+ {0x541da15409bcc67208c9c276b76ac862d25319982816c0ffda209ed97c48a45_Z,
+ 0x342df7468f749b8a32567aeed07cfd240a3c8d043856f85591b8aaecb78eef5_Z},
+ {0x491819c6c4c53f44f8863db3fd75b04ee897cd4955a16bc81c0c49f68d8ee9e_Z,
+ 0x7c3f86f79dbef3ad22c5ce5a39b4ee40997e6733d96492b6ca145c9d5a737da_Z},
+ {0x20624d393ac86f79e72b4416a8bb2581cb7d553ea3873140ff72134354c6fb4_Z,
+ 0x65149b1ca41b5eb78f3af456293548233940b12c1208ca2fce406a63893f1ce_Z},
+ {0x7ed55abc1380f3b690162beefe8561b7418a1bda08b463c6b4ed55488050f0b_Z,
+ 0x4e7a36b0264aa4b436741539acf618dfe7f99f70893060a6f2bfc25ed77f9f5_Z},
+ {0x7da5d4f5154dfa51e72eaa41a8fc3cdc1eee525b93b7a4359071621a72e1979_Z,
+ 0x4442f5778b290ba0a23497bccfe18f4a42322ba05f4c23ab19da40b5fa1193_Z},
+ {0x36f89f3fcf545e54af1f08cb2000dbf1e98f524084945d1f078832b588541ec_Z,
+ 0x5af9ee0b3fd6ecedcbd753692448229c28cb97de6829f99f5a14b8449b32513_Z},
+ {0xd180132945f796820cd975a707b38ed7d01bd846855dd4b1acef48f1f0bbdc_Z,
+ 0x12a72003fb075f6ab134168aa467066cdce8ef92c996757c959c56609b28e46_Z},
+ {0x482276846cd8ff9060dc5b085493f612c3855949102539a239a4d7f509256d4_Z,
+ 0x98670c286bd93349c04f13ef5915748f6f6295273b23f6147f1a745c7af8f3_Z},
+ {0x77f923e5644fa39bd7be79cc3b6b8a1150c8bd14f8db07fa4c79ab7d362fe3a_Z,
+ 0x720a694b788cd0d9a6f2680eba5f450d242af19797320b8c6279fc5b69a4b85_Z},
+ {0x232efd4818f50a538e938473722bcaf379111fe853625a7e52e44090f5927d1_Z,
+ 0x73dce67bae268037507f4ed844b07b7c0be075650a0e4b31750fb7fa5ab6673_Z},
+ {0x51689112de7fe180e0880d5e6ebfbc243518b739b885484ad033895c5363063_Z,
+ 0x19c4098654bb316cccc3208985f3d5e567a0c0c1bb5e2d504beee3e2fc9faf6_Z},
+ {0x5ca31e901478c60cf4bc836598068e107a74b32f6db923742a3322b031c5a1d_Z,
+ 0x46fc9f1287e2304536d466ef830f0f2375907b0e00204bfe17d676793e91aae_Z},
+ {0x162a13e8319007f7b69aa99f36be6988375ba3676ab49c5e132fe5208666211_Z,
+ 0x1440f2d022c68736e327e762daca9a6b2ba9c684e5b997b56a6cc8d1effb9ae_Z},
+ {0x38c47ae4c21bae11a3d83d7b29c174e1491f15914bb9fbc644718f8ef2b5209_Z,
+ 0x47475f462b6f4f20e9ee9d5ec086dcd4c80d26b8d265b66c1bc637e974f1ec0_Z},
+ {0x63f536a51884d99b986637c4bbaf6a0c17303953eeb2cc4e4008b22f43bdc2e_Z,
+ 0x1ac4127e35c3cd2c3d6e4100a352e6e02832e224df9219516361e9fbf06fc7b_Z},
+ {0x34b9f20d0ee806f8020473285b2704c7381d4cbed6145593bf2fa3c7e69ba16_Z,
+ 0x1fdb8f07562a2e5839fe156c40777f47ca6113377a311e7add9f148f952a2e6_Z},
+ {0x444bb42fbfc8273fb814bad9a80e03bd4d6b98258083c4bc17f1f0dae282964_Z,
+ 0x559fad63173159790b5f96c8991b9f596c756cea51825b452283579feadea4a_Z},
+ {0x2d4562ac577393eccf5a379eb70f975436a6e0f50b05b92a2bd2bf549a5cb69_Z,
+ 0x626d6e2b7312b47a4a1e95728b84fe641220c54f0930ff61b3411795b652018_Z},
+ {0x2a10f944742ea5c61f24e03b650b854757e2484754cf83aa66bb6f0ef6055c3_Z,
+ 0x4b5caaabfc838464fdea5cffdc8450398e7edb21f053a9c72cc6e0b45b3c867_Z},
+ {0x33177e04a73480472793505560b593a772b3d22b81b3609d405f685a9afc2d4_Z,
+ 0x1dc382097614e03e2b55aed5989910c528f18bf6b0ab8288e2c01c6b5222fc_Z},
+ {0x528d5aeb02d7fd69c9871cc31d75244fe9cbd103ad7afe490f7aff089ac6555_Z,
+ 0x375b90398c081c923aa132994fc1eedb206a207beabe523ab2cce1dd7c60083_Z},
+ {0x7e807d0ee79f3a0af94b62551827587b7595bafa990884da54675e246f4fe7f_Z,
+ 0x451184fa0754e7ccae1329719bfd8ba2b5cfe13bec2a26c81e972da41fff646_Z},
+ {0x397c0aaa8a7e3f4e13f85ee36543dd873c08a0fe9577343a872a1aca2b445d1_Z,
+ 0x643eaa4dccd4e4d8ea861c1eeb66d4c998752e5c7f107cca006060991010c2f_Z},
+ {0x430f5f9af135bb66cc330088037ff29690d6ee97ea1331a5227783aaa42e880_Z,
+ 0x647f1dfb86fa9af7b6235b0db991d19d5866407db9868abb8863cc5f3a85203_Z},
+ {0x6a52081b70e1477c2078399c473c0073aff03fd2974262d87da6c74e809a98e_Z,
+ 0x2f90ec03db8694e0b3038f0b96e32df6b265f8f0e426572b26ff9e9d300288c_Z},
+ {0x7e94a46e12bea7d5ca3a4dbccbe69b2c0c4cb3af07a814bbb52a46370ab8312_Z,
+ 0x56ddff1d93bf5cfe557fc6351d0d0d3f52a8404aa46b6c27e5bca698e033790_Z},
+ {0x69e703d6a694784466c996c290a4b323b69ce5d84ce27d4509d4c627024904c_Z,
+ 0x7f987c78c83280ed6a7b7365d5ed8668afd3f36950fac41cc1e807a5d29fd40_Z},
+ {0x2b9c6cc778e98fc04d1ee21bcc8a732aedde2870cbd674bd50c4c3a1ab5ba2e_Z,
+ 0x6f1a2f2c705914c8b0f61baaef1acac04941084da5fc6eb6b2d1a066bf218a5_Z},
+ {0x550bd5cb197e0c90f383756968a0399a48557cbf248ccfb5b66983326c463d9_Z,
+ 0xedd8ca110237e520274fa3bc3e86d9b325805120af8e48e2225ca99288d5a_Z},
+ {0xeb6131dcd4a57fd981ecde8d1b29cb19770e6de447967265540dc269b3519a_Z,
+ 0x2e15b563260ba4d8522c0b5200c5ebfc24b9d0cabad0f659949f105c8ab4331_Z},
+ {0x154af063d5646e3b324c70059be76cf52152329cf971ab27bfdf4d6858bdc73_Z,
+ 0x472e84e7c14a3d6eb7371f0d3f4d370c1803d5c60c8845e8c8a1e18e7b26c86_Z},
+ {0x2503fc9f91fb17e1626f5a8b806e55119210cc13abf76a566ef7741a948899f_Z,
+ 0x117c2b932de38b7473c38397f3869cf11f915da065bba547e7929bda73a0b00_Z},
+ {0x704444a259a3a6a08cca78a53893f5764aa0ae7da9d4a330b625e5045c4a6ae_Z,
+ 0x77c7ee65438d0e23bd2ed761cded7108f37e2f923c99e84221edb4179a2782d_Z},
+ {0x1f8965279e84378bcacfe78a3bf8c3ad47d20118c6c08bf36bde36078bc8d1e_Z,
+ 0xaeed7b7a773cabb4288168f5ce9b5bf3054d0980a11c216a7c9ff130ca6aea_Z},
+ {0x6c95617f6cc3ca96850c0ba2a1c4cde0b34f9cec5ae355ba0384d52b2810da9_Z,
+ 0x13f5984a200ab872f3013c5222640e2aa4ce12f0b99f2fb57406fa860b6621d_Z},
+ {0x5bc1f49337b7ae9ef7b1067abba63f36aa3afcc2b718204a7b511ec3984a96d_Z,
+ 0x2e040a0b79ed82af190b0ad9455c40e10e0882c8fd46ceb9fd96707c7f28212_Z},
+ {0x1e365499420c6d9eaebc0469d0a27d1bcd23fb96512f382c37650431d19f1a8_Z,
+ 0x2a428fce3a585e06168c24bd14f994f6648a07042646819a482d32c1b43b0de_Z},
+ {0x7b21d486b6d49a5b5a891e9522b08bac0cebd78dbb9b102d9cf3dd5ab2f9058_Z,
+ 0x23e478948d098303c865d00d7ba42bc58a9285fd24b7519bcfbc5d0c1c209a8_Z},
+ {0x4287b5164f53147c0d83fc49de129b17e9543ffb9c2926b79c16118a2f6d3fc_Z,
+ 0x91e17d6a5b2adba9a172fb90fc2002eb75d93f589123cbbf5a0a3d060ab4b5_Z},
+ {0x71d7c7b174a9ccbab5d191170bd16bbfa3728be318d20a3d0ebbcee7b3fa1c7_Z,
+ 0x1af36cf8fb35e3a6b27bcab4338fc087cf869b661fa111cc3ba83a2f2bec253_Z},
+ {0x341aac79bb5d020e18b9bd79f411be7f53b99d3c37cb88d0ef77b0d884d9bb6_Z,
+ 0x489cf69c1f5fb8e4c9808ac8a2c5b20dcb90eabef74069585e66533f1e7b21b_Z},
+ {0x2d6b4ea44f14119ee276f6810a315ab152503a14d5742cf299e2421c6d87ce2_Z,
+ 0x62a7155036fac616bf0ec17622cf2160be27f94fc04b6a4d692b6def748f1ed_Z},
+ {0x5f623d2d57ac36a4f386468f07f67e82af1ca9a93b093f553a24971612c7ceb_Z,
+ 0x44309b5973eabdc829d8105b317164d8bbb64d5a567c49fa9aab34f721ee215_Z},
+ {0x3ac65082cac12f934d07001e3fe633d43b332f1f14eeecece807679c7e539b_Z,
+ 0x76f45e08a50db6a706198ab174658c86bdee5a0ce55d4087d542e6c8bcf15ad_Z},
+ {0x26e1c9c2abc417b9c095aaccf553892731a469edac08715c80bc03ddd230f9_Z,
+ 0x4e62b15eba5b911b9ba6b5977addc0b1df71fbe3251d478f069230fc34b5018_Z},
+ {0x3e2c8b5fe7574adfaa9f252f847faf9137eebddda126d62964b707aae911a8f_Z,
+ 0x606490867c30ef7ed9c3c85c50a648803f3a36a512ad89c082489da8dc9f5d1_Z},
+ {0x54f62dee981af130c2d2342ea48ae1c2176d762091e83359853b88c4b2baf4e_Z,
+ 0x34ab726c86b8bdd2605f1a7a37a9deb90a4cecd8da6d73c7625579530a6d913_Z},
+ {0x6899430a29798bde6b02dc8a93a365c6248003510670ccda7bfa7708e4f5ffb_Z,
+ 0x21919db4d4ec664a59b2f77659b8fd307f390464f509e13b751e12fa62da297_Z},
+ {0x9f33e056e4bdee7156c2ae18c936210d6b7ef3c452d17b5424c132b9a7f7c9_Z,
+ 0x2aa8af6bb9a63adea95d46275f9693a4a697a45d6d1540aa3f3322cc600c4ca_Z},
+ {0x1263b93a9d44aa4fd1d384dca15b31182a4144ae3586fa3547c2f88c9d2cb98_Z,
+ 0x2849e96fea308b36c677afd999c1b5b63c4d407dd7b7dddc45b5bff824fbd5a_Z},
+ {0x5b69efb0b59d7c3cb3e000f83bf5eee119c26d01784d0e9ca610ea257ad2bdb_Z,
+ 0x45fd04cabf84be493dd9a47a90182ec4ae643e358db3a14b22053792464d67e_Z},
+ {0x2a1253a76225044b7570d52a0b01af8a40009566232463889d19fd5d635efc6_Z,
+ 0x4355a45710a74ec1851ab2596e92a4c81faf0681fbd322f3d0f8f18856a6f0b_Z},
+ {0x7e296fae037055854c61cf8141f72ad1f62af7293190d2d0f7fe6737cd2629c_Z,
+ 0x29ecab578a1dda484f2893b77efcc70702e6753b8c2cd40044b4015a7175f89_Z},
+ {0x7bfa8216ce89a4c360722735f13d484fd20fb221c2f6e34be9e6708ab5592ac_Z,
+ 0x6df2d64f1aacc72a06fe75383d52e3328efc464716a96c47e47f69d7d5835cf_Z},
+ {0x571c6be7385e9acea1a8eedd70270086738480819d5a2ebb91bde58be06ddc1_Z,
+ 0x2ba3d227d8ded5f4a98826cfa268d1abfbe97a7072e83382653de10e6d44378_Z},
+ {0x190a23a7480e1d4833e3662ce87ff618e0c437bdb54d7d82c8193f8819a3255_Z,
+ 0x57f72e7e6713ff0ce96a1faa5dcd4c7c2412fedb49de63961d81b5377af33d3_Z},
+ {0x32d8e8b25dcd84d1cc223e0b7bdf99c04db3fb3955055499d4d91a9545ec5fb_Z,
+ 0x2177c769e4c84ab8f053498527300bdc5611999945cf1ffc78a7d4cad593d3d_Z},
+ {0x3df1cfeb003596452518dadb133fb4449f49063e27bb0e3569e1a20b0a70c11_Z,
+ 0x758c616eb3074b2f85a915a7696c5ba683fc0150e7fee84487fd566599a9971_Z},
+ {0x645dd6a836468b29829869e7f2359c8e95fe08e30f660523c850531356c0d8d_Z,
+ 0x7b61e97bc7ca81e535fe795a750ce2a7ba792ffc1aa85fd7945ee2b3222eeef_Z},
+ {0x3b2b262b7568f12cc4291bdba92ac2fdfe21c3e8b89090d12b8f6bcffe04d89_Z,
+ 0x340e826874ca13f76a71e60ed1f281839b53bca9e47eb88dae8f8ef9d8265b1_Z},
+ {0x432d29695922222abfb1d117951ce2c9dcc108e0a2fde6bab65fe82d9b673f3_Z,
+ 0x541f4bb81ec0b86779953cc031f2d6753328d660cbfd0ac95f4ec0763895e62_Z},
+ {0x1559f1ff03020f8bd0f7704945f9653fa653d874b416e5aeab649c545ff928f_Z,
+ 0x6c5e9520a2a5743f9ed939094214177c01ae54a766aec3ac0b2a2bfb150e1d6_Z},
+ {0x70285a537c5743ad18c507c9fc7b36f7c89d92f1f4b8838b14a3fcc674e3287_Z,
+ 0x46044c8e9b3f2c3997eada3c6f2ab7850c5521250cd5c8be58b250e09d03cbf_Z},
+ {0x19e89768f95f21153f7d5761ea30e8c1e423b688dc7ad57cef583b8d9ba038b_Z,
+ 0x5568d4089ec4493d5498b434e4c359b6172448d22d70948fa0aae37503abb1f_Z},
+ {0x3ca52895174255e5c604353552d0add7ac0383066c7a52a5a6e628bc71e3d73_Z,
+ 0x4b7113d92fe45b2dba693c72125588d29841e283d53d3471f6aac16fdd92afb_Z},
+ {0x72b02492dc9313f4c685484b1a4e9e3a9d66d4d4048512956c8e4d2972268d0_Z,
+ 0xda3d0bcaf753c75ed7434e0bf15db062e65a1b2ab9c8075ff7f0584f786db3_Z},
+ {0x6b65039005a452e7655b2191392c45bc4de40f54c8b0fea343ee8dc5e1ad2e8_Z,
+ 0x345e91a226e39a6456460bf1d41a8e8aaacec83f7414ba4f37974d0d9e4ec96_Z},
+ {0x1912e5ee42b5ca2f8b5424a98202980753341047e9bb4552134f37fac14f676_Z,
+ 0x7c6e7bfa7feb9171b895c0b1ce6f666a5739009e7a8474210b6c4bdb1bbd4b2_Z},
+ {0x48502deb034583aba6b2561fff8b79fe0575f6fdbea905b6aaa8534824adcaa_Z,
+ 0x78c7d1400fa78bd65fe0228b0a9d39ef52a03ae0be17b3041ab8d62ea74bb62_Z},
+ {0x432403d50ea249943adac5035feb01ecb01ffc7afa15f57c4eef8a99eab1fc7_Z,
+ 0x6e6585f10c8eeee417e39b428573beb81dfc18d21f51cf6bdbc4c96779d8eb4_Z},
+ {0x329e2012285ce7853b795c9b49391d5c8f840fa272b4b41ea31c0424d6873e6_Z,
+ 0x64b7c9332ced6ebca8e286375f0a0a82042e7d18566560939345c14782b19ee_Z},
+ {0x6018f13d059ce660df1cf0e39848f7f98f6862122eca13f6d2a4894e4eb0c96_Z,
+ 0x1351307604eea32271d80d7de43bd14d9193b8bb206721beff30b77cece1efe_Z},
+ {0x27d98d968d65a5e8499c93a8c07bd7d06015b7474f50a2da8c3fcb52172ef25_Z,
+ 0x1b08032d41cc31370de64b7190e2786d8e2fef0089d2cd22dc625f027500f93_Z},
+ {0x75abce30534a0fa9dd1e47dec066129c1ed0eb7da1a158af6676e15899450d7_Z,
+ 0x4b0d6f9ebd604381cd31e0e1aeca8c3eda68d9d0e8070fb461998727b628e8f_Z},
+ {0x5171c08605159938775bc44986710deb27da9d9b8c13be44363d64fa9332203_Z,
+ 0xb8501685ff5a81abca64f511b26e781abd7654cb2d848595cd1eee0d4169e9_Z},
+ {0x628e25d795dc2f9e22cd26976c19d5a9426abfd0b32af5ee8e57d3356e6fd76_Z,
+ 0x17a2b6c94f22377d3b90f9c7073916c98792037e020cb282e910d22ae0ec6c7_Z},
+ {0x2d830e65cf45fbff86a69510d1f81f9a25701a85233fa943ddb3f048ed760c4_Z,
+ 0xe40ee450b343be503d6e0221dfff8300b86db28d3951cc8b92627444d78728_Z},
+ {0x33db16f15031a95e038b84aab7c3d811caaf60cdc86c2ac059f3392d9d82f2a_Z,
+ 0x3e1c6ea96fc16cf45f964042085ba7ffd3939920bb2a8b63477a300e467b82_Z},
+ {0x478156135addad99f4d9836ae3679b88198b9618b245ca439ec6922952ddbb8_Z,
+ 0x74be4449e9995725e3fd329631914837447a6994b5c0fd92a015d1016bb3ea3_Z},
+ {0xb078248c627348d14be00d67098ff502824ed0d000f01bc6137f53d57108ce_Z,
+ 0x3af46e874248384d75b833aa7d6f96153fd1cd68a20be14ba1528e8e27e2b1d_Z},
+ {0x43436a0ac6e38ef524622233e962e010e4c3ea8f65d8937b9d0d19368e75f22_Z,
+ 0x1dcb4bce500942e5e5a1255da1278063123568e5979f746fc09831b57c3c391_Z},
+ {0x5fd652881aa2693c6c3c5ce8297f39b1e9df6b1f76a8e7fa37fae4e42c67ac9_Z,
+ 0x2d7ddb2b3a990fc2a84b239fb5b60e2589fa205bc5527caa7f85693ebd82a2c_Z},
+ {0x131bbdc5a18e78e7668cece6358454b59409018242abf5010d4a278489378df_Z,
+ 0x596732c7866fbb1dae935d243ff370c00c713e7cf227ca08719ef57d418c6e3_Z},
+ {0x87eb4a6f86ce8db30cb357d6b312748849534e96fb2064e37bf0e23a2461dd_Z,
+ 0x6efddd20c82c7bcc41ffd27c33ac2ddd323af83ab126a8b8de0523b76f71a0_Z},
+ {0x28036905e8ad4ad24082c3e6187cd37b978e60549ff033f852a2bf39b58d347_Z,
+ 0x4f55aa9486996ecc55539161f0c00ca0bf1aaf63a3ca065bbd39fd56d0bf1c7_Z},
+ {0x2e9b202d9f3add0cc5ee7cf6e539640a2d8c021e0661f9f61fa8b2c0f3025c8_Z,
+ 0x24fbf10584140f45ccd97b7bdd7e26f4cdc3830f4f1076eacb9c5368f9a1141_Z},
+ {0x4fe1d9cd818f16803ff8518096b357e98775d84cd55cef5f6561048a212ccf6_Z,
+ 0x782ff1acacefb956bbb4db36b53e0abad7eb94c28f8a979e9bc64ee58be6e87_Z},
+ {0x5949ccb93fabcd906657018bc59bc7724c7806b02c16067260493e9ed0dfeb4_Z,
+ 0x11df2be72526fb2c7998995aec224df748f1b01189b4f5cb751a9befffaa62d_Z},
+ {0x11bcaaa03c63fa34f4babbcfadcbef45b607ca99fee6cdc206c85772079d23a_Z,
+ 0xcc0cab40822a3f803d733c1a538791c500143791d3dc95c47cec83aeabe878_Z},
+ {0x6a3b80b9f3f77053cfe7607d2a94526d7409361a20eab4601e723ac557400c4_Z,
+ 0x50fe612b5cdb308da922f8b98426041407f799321544e2303355bb1490f5c43_Z},
+ {0x31880b42036960b5650a848ed9e86a67d9e947b8ef5c389a012e3536f679ff1_Z,
+ 0x3502e2fbf039026aea3779a4d6b452f999520d915829a415e495c19f00b40eb_Z},
+ {0x3ba2f7e3ea919e1aeb00afe10bca5a7d940ed15e32877744f5afb7dd3307f72_Z,
+ 0x7402cb75a1784097932623d7f5dc67aabe5e202fb5683bf144f97f25a1a75c0_Z},
+ {0x152a0d759e3d99c4ee1bae5ccb3add41cab2f0a915ebc7994fbe17e8c16c2a6_Z,
+ 0x2546344519f6b35e94d41ac75d8fce5c48363ef744a849bd947b60dc4da531a_Z},
+ {0x6c67f02177664b56fb032c8296f54c738e020bb4a968d7fc74ce255aa2d76c2_Z,
+ 0x160494da260ef06969ddd1069214d399c25a0cb8e9e796d73df73fd808bc55b_Z},
+ {0xca20f58bdd8b2cebf93f9373bc62d94a53325175dfdebcdf259d8df64e505e_Z,
+ 0x376ad50a444a37554edb7733e9fcdce3f3f5b10e01954c3db1716d049a4586c_Z},
+ {0x7069deb975d5c2b160c9f943c403ac0be6df00ffe23606a6708e7abbbbc2110_Z,
+ 0x3374f89a706fdb690c1e38fdc1e8a91ce2de01c8f4a2f96b748bc7dd7d44c5d_Z},
+ {0x7e3b2f6c449e105727a55b7875869acad18b05ef45646baef683c4017b60c24_Z,
+ 0x3c25cdddf8f7904118a8ff9c81036706a4f39edc1d77ba770309c230a9d0c65_Z},
+ {0x16fba8459862e9deadaf846c4b5b92126485fff525e7486e75f2270bcf27dac_Z,
+ 0x78568c1cfd5541cddb3b78bfae372d71d4094c83a4da2454b0830c88192295_Z},
+ {0x7b045f1eccd4828b79ae9f4dda9080f891d1d25dc1d1b7daf82325651880ed5_Z,
+ 0x38beeee5e7e5339f21a5cc335fc1da4b5e2f265f7c36e541714fe9c328a0dac_Z},
+ {0xb0edf652c470bc9e555aa4d3eb66fa5aef80ba9daaf10abccdef3c7a784c1c_Z,
+ 0x30592c9aaa613a965d224c5d80856cf63bef9a9ce9bb5f2e0ff55e10b4ca630_Z},
+ {0x2532a2b4b24dc6b0595c4b9d55e90ddcad124ccf37abe317caa293f233e7e3a_Z,
+ 0x147ae8b445c1335d4b883d0dfaa3afc3f42d970777cdbd4af027236590e892c_Z},
+ {0x2d61d3a719d78ce751eef76cf84e1ed4f144d549cd6e82833e2dfab989787bc_Z,
+ 0x292efb15f851b384dbaffdf9f5f392bc146f041da01546a60a9b0241251f5bc_Z},
+ {0x5c8853d75a7171147709bf2b2a61254bb4253283ce6c31567d0b54a7d4fd614_Z,
+ 0x3844f4a9c9542c454f21426ce923317ca638536b0c8e21831be64a5f0673187_Z},
+ {0x7ed6bb72c544e32d39002f7f42ab053d5d3b4fd30efdc1714b8316e7371447a_Z,
+ 0x78a39ae4fd507e9e1239559d08e63c2c643f66fa975079c773fe42432de1e9d_Z},
+ {0x5b0fb1cd21a15c1adcd1de4181ae9df92beaa790a70f8fe1dd4bb3dcccf0a56_Z,
+ 0x72b1c419a1769015ebb4ec7574725e56b324adfac6b270c130bf992fc757d54_Z},
+ {0x6fcd2d13d76c14b93891bee2f29e41c05eea71bfad6d1a3bb41ae374c442d2e_Z,
+ 0x6461615f730e6cd45acf5b9731389a6ad6f920aa9e00b5fb9771cddf3c6dcbc_Z},
+ {0x34689327933520a0c196345fb520e6357021ff285dbbdc27d011a45dba63c7e_Z,
+ 0x326dbae561cc9a6e240beff736766d48b929c52951a1d17c378248bcb449bf9_Z},
+ {0x5d01c051059f7731f3516f148c6d8f748cd9dd27294e89d1dbfbb7d8719ddf8_Z,
+ 0x51fc4755e0cb57d1a20ba0da40c1644aeb7e21fe133ae8c48d3a7189e6c4e62_Z},
+ {0xd669677be87d87a2aec188a0734ba8b36822d2050ea3ac9c08476332730f63_Z,
+ 0x747f037ede3d72e0b8a8a61a33470436d2fe57a01e5366d6920abdb3c4241a0_Z},
+ {0xf2fcc3e4c01b6f88cec60a180ab39971031fe40037870f1231abbab46774a7_Z,
+ 0x30a29e5032d74800d2bb64bbff2a7464b09faf3c1163dd60a5b956bf7d37e0_Z},
+ {0x30d5fa5a7deb5473bfdf96f48c62e2e74f1e20d725a8b8024e098754500937a_Z,
+ 0x77ee292c9e674b67b02cb656fb5e5ed797963cde261077826569020f948a400_Z},
+ {0x353422847452595eeb42491eef7ff046a489ba204cc7713972d13985adc4799_Z,
+ 0x43eaa6aee689e456fda5472c7842ae0ffdf34c69e016261329549215930b722_Z},
+ {0xff9c8bd9da0f9bba69339ffe26fa4f31d1c8deb61bfc2a0e53f542efa9894f_Z,
+ 0x6e2956683dd63a6d43c6a63099c9f528d82a2d7a417e5559d95a51ee32741c9_Z},
+ {0x25519ad4974d3c51ea09edf3f4ad4992307453b513cac412185b6922190db0f_Z,
+ 0x5f6e31f8f5218ddb86e47e3585e345a0e4e819d0b33aa3656b160e8ab54e2df_Z},
+ {0x534c696772e1677d412797c4ce9848f4940ec47a791868022c08a202afd80e4_Z,
+ 0x48a1fd856ca958bb2306e6cb4ae2634b307723d58588579fede2f44836bfd4a_Z},
+ {0x28f36c7c4993063c0e3556219b0ba1d22a41614983b88734770ab52b86293ea_Z,
+ 0x57fa579a22d122924386b1b2709ae82e44af5283523408a21854353a4fb7db3_Z},
+ {0x7a6bf283d8cb8ee332ccde05cfe39b79f9256e9b67c815731616995f04b37dc_Z,
+ 0x52f2cc7e99f18aa74055fe27174dd98b13eb62b58820101d490cca785811d19_Z},
+ {0x43a3915601111e4b65875d627651162e35ada3ff800a7cd82b45401b437b7b0_Z,
+ 0x514ea8c6d3d81de7d2f7da64b99ea4b3d513ee09cd0b0b35966c361943ab767_Z},
+ {0x6acf2d8696603cf4bd69ea84ceb8965bda071ead5fea9130c2f351af8805775_Z,
+ 0x6bff1e20909266f70a8093392f06f90b752f0d4dfb3f0251cc197f3813230ec_Z},
+ {0xd730ffffba62a7cdd01cfc234a5bb6f0c9e103cff5e186ff21072666220008_Z,
+ 0x1410f2db1f1b173876015ac4e82d5a7fc2dbbd89c572bdaff4843359c522a_Z},
+ {0x4e9176d3f7d8611e5b0dad5358c36c20b07f8f22161b9861fbe8580c2b94725_Z,
+ 0x5f392ef87f97cf8ba8d16992cd44b5923d96bcf60d299bd0fde7d0c79bee558_Z},
+ {0x63ad76e0daf34503a99f189b720b741334846ae2eac1361510b04481635c8f_Z,
+ 0x3aa055ceec2860c6da9f820a5cfb26ea5bec859c92a8ce79f3c9db82b8b2bdc_Z},
+ {0x755956b5bfaf8c29438a06974b51462abdff7ef87fea3cccf64da2d12ca5fc0_Z,
+ 0xa38380c3c87baa2c6a9bd006ab41dee5e20fe429e2797fb3fccf700b526ade_Z},
+ {0x67d11353c4a408c5bc238f22eb8a587e4e45d9ccc0fa368eb58d983bba31b9_Z,
+ 0x226df7784553b5b581e4b154b695f17f0aba64ed2576baac010deb26209a7d7_Z},
+ {0x74699ea52e3a3ddccc266e5103801fdf8f5244dd90a6e956a873e722d41000e_Z,
+ 0x7bb47c7d17b634f63b4d03f708b213718be0a616a4328a1c2bcbf319d585fa1_Z},
+ {0x1aff3c42435c9a2d586951e484b1d99945ea9fdcf4ea4fe886caed837a4a781_Z,
+ 0x74e136a3ff17c06c7d80636474bba9a3b69efc37d168da3005b9e0cf6d3a32f_Z},
+ {0x2af5cdc5d24d0ec821436b7c8d101b86680895e481342867a2f513f65acd588_Z,
+ 0x5cdc0540c7f914cc74a8d780ae71588bf607ad8678740732483f6d75097383d_Z},
+ {0x12e55895598fed285a076a0b67be9a75a18e9782fba04bc3c4596f039d3f91_Z,
+ 0x415e108362e4bfa184cb1217a86a10ef75a9c1735bd035705d132e135d1487f_Z},
+ {0x5b9bb03217304d6e21cb120a29b6b1a7c032fecfdb4267dd972865dd802bb2e_Z,
+ 0x6744d60e969b91f7bc23023f081405bb630deeb5e7d5f891b6c4a16ce74fd24_Z},
+ {0xa19b50cdb68f3ba25914e9e0c256153de8a784983477e666fb9e35dc324b58_Z,
+ 0x6c3742c8d889c6d78a4457eaf676891b4bb78d31d331556f1e1eb422e00e997_Z},
+ {0x1c41af23dd2d880d2cd9e00da34652ededa32d86abda28220d443a6d6d9a526_Z,
+ 0x32bb392277d245a518ef1b1378dbf475e1971621e8d0b9296ec17e66c796e8f_Z},
+ {0x5696362d1996d3ab9590fe6efa5681402218ebfdb62b3823014ca6cb964500_Z,
+ 0x41369fb45470626fcb91bcf2bb36b37072c099f6e0c02f1186714f3cb43a67e_Z},
+ {0x5de0528a7857369e377b7e3cc6a0242c37487fa8498bfe1d5cfbd23a173cef7_Z,
+ 0x3ba72911f5f741e260b0dcfde93b2e9efa819135847083170911b8a0cc14457_Z},
+ {0x2b5f39b9680e1f3455f934ac5f82236c3bef8fbe2d82bb004f1a63ea9d0a078_Z,
+ 0x7e8c9deb4ab5c7c49cca9b852d9ab01833455a0044be1ea2acd01e35f1d459c_Z},
+ {0x5a0fb4a941ea9fc92fada62a9bb70853b436c3bd40a6170703b05198c36ce99_Z,
+ 0x72d3e0a41851c425a83a413448414214a35d9e785a870d1f9b5555105322bfd_Z},
+ {0x7b263af26400af6523cb5371808c2d9e328a8d64f8d64d658d01bc3a55f5896_Z,
+ 0x1c87ef17d66a92ae07c55592cd75c474698348898dc778545c395a1931fd220_Z},
+ {0x3c6987eb0db2455329e2e1bbd3f4fe489d611e34949f18fae1159db5ac90a2c_Z,
+ 0x7dfeade17a4de80690e4858d0bd2c2ed509a3cc24abf61adbfb1392b9259628_Z},
+ {0x41b0908f1b22462f50bea2821a41dfb63bb9c99f0b46a9ddb7e2d3c6e42624f_Z,
+ 0x4ef9a0000b31d4e4fc6e5333ea3f75cc8c109dae8884ed0842d66bdab98c181_Z},
+ {0x5a97c5c22fe6126e616600b9290ca79bb1559c24d17d9be7e2897912627bebc_Z,
+ 0x290aa7175f9a88bb521229156d95131962a4b9f97019d25cb4e035979b6b83e_Z},
+ {0x72fa1ac62d78ffd626c58700286869e0d83f2e654337ca0aba531f4d6c55fe9_Z,
+ 0x28fdcd3b2f8a05df0b6472d2893f88f063847c558b32484f7a526aa176cde61_Z},
+ {0x6288b393a5ee49a2e681441a0000a6bbd93862f5014f8d6118da65b1cec7c3e_Z,
+ 0x79a757687561f7d3e28c38ac52a5eb9abbad434d5602944f170973400a530b_Z},
+ {0x296c0885871770d82e4936ab13ff095106fcff5a6f6a717768404092d27438d_Z,
+ 0x232357bef69ec966a35d68fb02c4d13a5041b91f97d8bddcb66ebbe8cdc92c0_Z},
+ {0x22b74938bc3c0f01bab7d8014f82ed15c2c9e3502584d68b23cc49b061cfeb4_Z,
+ 0x1bd68eaf1fd0d25b20673cf2beefa40e719219e28027daafb4676186429bfe5_Z},
+ {0x6035419487a4c44584b35960f28314998775f92744d2950f12c4a09d850c35e_Z,
+ 0x5c1dcff4d003601b869fd57606e879906d6f3c5ecc5dd131bf19e23f00c87dc_Z},
+ {0x3a8031bb37a07d1fc901f3899542835164d474bd0bbda3515a9927ac2099879_Z,
+ 0x2975ee8fd8b03850d29b4a5300af7e8a6e6ced3aa92c8572448df80f1330b0_Z},
+ {0x3145991f434db6b85e7fe8e6c8b2f4bb86f6269f78bdac2ada40e9ecf3b1023_Z,
+ 0x2a3456594fa0abdf683853ffcf5e076d0507946a66e8679bb2b975e0fc8c308_Z},
+ {0x18d88e5a10ca642a49f4ef6e0800306cda1e3fbdd6b0685252ed95d2214ea80_Z,
+ 0x1e37cb95beed80d95d37e636b1b8ff544ed46fc2e1f31af943e1c22faeef286_Z},
+ {0x46ed465d5a716a91dc4e33e29571823fd3016199daf4ee4ef91dacfe146003b_Z,
+ 0x7ead4cf3c80595dcf09e52ee7e20076f4dd83a923cc333294acae9434bec0d3_Z},
+ {0x7be5fb85294422ff899decf255f7cb2ab92badfda88becdd2c066f6f6f89318_Z,
+ 0xdecf0e2f89110caf7821d23f74185775e2c083d65edfa14aa9267b18894640_Z},
+ {0x2a2370efa4c63b6645cb146952ab856a9043545ab10f48074e74e5b8b7a34ea_Z,
+ 0x2ad8bda41768f3ec3d9f99e103a36fe49e54ca6c0484fcabfdaf0bd6e39ca1e_Z},
+ {0x7c6ae8e5927161976a566d4ded4c185cbf5cb8757f9fdba18ec50491bad075d_Z,
+ 0x28cc01dbdbf255ba95cffee404953368e2b3dbe2966345ee8852d50eaf90fb1_Z},
+ {0x5518b2308ae8e88b9153c1b053ed3ce8ca53e726a45c507ba6fe400f8f1a443_Z,
+ 0x1e3e5d6aecc7b847e5148b651199fcec653fa4684b2de5a6f0f84579e656a8_Z},
+ {0x677e92701efe79e503d601a86ac16316e5e54b8db6866335497d8558047b9e6_Z,
+ 0x7602c024046aff921dfc9cfba35d4a5b063fbf5a814d02e5e4aa8c64f30a346_Z},
+ {0x7affb3f80cc5748494f05cd59e69f777df86443bd4ac7c64ad5c4e304998294_Z,
+ 0x145eab080128dd81e6ca3937a8b175355ee65aec816237f55035c0859c717a0_Z},
+ {0x1feb81cdf33fa17f9cc8b7cab0d5ff3f8eda819f9533c1f96a8c6721ab8273b_Z,
+ 0x1fc0267c7de843cf4571d69ee16b7e100c1aa5d58fea96014920976727f73e0_Z},
+ {0x30695dc5dc203e6dd0b86746db008fe3de796a36815952509bacefb43a969ae_Z,
+ 0x7c14905354cb90fb75417ea8894ade13b72039df5effd2b1c7a549d75ef5e39_Z},
+ {0x17ac9e8bb6fc520b4b181ecb2a763490fb107f6825307fb6de962d1b96ecb15_Z,
+ 0x5ccf93f17a1c0446f06d0b3e6c6e951a3bbc3754787dad6b13db72a970b24cd_Z},
+ {0x79fd139d6123dcc50a56e9bb79782170e660a959ae910195dabb342583ab994_Z,
+ 0x20ca3d4e48b7091a687070e13840827f44ae3ba0d2cf57d3363cc6c4a105a71_Z},
+ {0x5901cc324168796096d09cb91154c574f7fcb2724604b534dee5f5705ff5ee3_Z,
+ 0x16f8db7dd1294ef466e7cc75e5ade8c4db6bfdc8d3ca5375c3171c5b5602be5_Z},
+ {0x774991bfded5676421c6634b0708b57ad795284fdd9b35b2f3eebc9d825de76_Z,
+ 0x4083e65888bdf72522753ecb6dbebfe515b9046188cdafe1baabb3c6acd1502_Z},
+ {0x44117de27481477452862c161e2ebf2f25643c2b153b46ed1cb5b06aa91d75d_Z,
+ 0x4c82003c7b2c06d3d03ef38c85da2025d6c792008464835f89911d7b629eb79_Z},
+ {0x31af6e7496b9fda1af3d921f7c6bde1cc8b44ffd54e7c0bdea631669149029e_Z,
+ 0x27f265f734c0748b6f27caf14f42b720fa10d3b0a01a9fc61c52b4bbd7db5f7_Z},
+ {0x58b80a14442bbca0a0c23e4441a0ff88402d0d8811c20d840590dd172f80f56_Z,
+ 0x614965f7d864d6bcac190ba8386d45850a53c43d759ff595aa25fd47d819de7_Z},
+ {0x71948fef9cf0e2caf918883104ea7153009c305322870d504d44a1868bcb8db_Z,
+ 0x68a65ec9bea022e156726f8e1c51e36b4a6b5465ebba54ddef0d2f114987886_Z},
+ {0x6f790944c01ec08aea6c0a8b8498e6fdebe2f723332c5d1734bd558f1e440_Z,
+ 0x5e08f859e593bcc5654c918a96248f58e46471ff51ed4cc9a69598cf47d124a_Z},
+ {0x7ac66e970a3f7df1a59b6b46936891575d5452fa90f4b813917d644abda6e91_Z,
+ 0x554090e6f5dd27f78e7c149565ebbe8afe1fc1ca7128e52dc7e2da1f5cf8ba7_Z},
+ {0x52c5264bbd83d671820dd2f9221d057ba050a501b6c721ec77404424f74ae7c_Z,
+ 0x5a407e6a1e1e52609914a468d510f3ffa45be931b881b6fabf3154b86a3724d_Z},
+ {0x3e4e94af698b03793a6e0975fc6d4afff3204e1ad5d02f66602f1939942783a_Z,
+ 0x3ae8774152d576d2c12a94fc347b788853568d4ad49946bc1d6745fbf5f735d_Z},
+ {0x3a23b51ef56eda14f929ae715832083da8d8ff1b2b1f40ecef539e60f04167b_Z,
+ 0x494c7016ea479def3ccfa4c47e551215f29e8905723a69a96b93c0e496efc0e_Z},
+ {0x6d45acc5bbe1ccb6cdb703063c74cfa21c1f3a21a59f9cecf716831c54a28f9_Z,
+ 0x70d4998663cb3042a7f3a278a14741185d97528c79f2583d6397d45125c71d5_Z},
+ {0x59d7bf8bf6be9a3e792a86d3b1693b79365354ccd66e3238a3d837e297e5e4c_Z,
+ 0x6abab3fad87153ca6568efa386c94d7470ac8dc89abf726c0f8b72616009bf5_Z},
+ {0x6dac169a2b256442e21d8af8a5fb0db74c48aeabc7ccffed182486156c1a986_Z,
+ 0x67aec17c79453728a3be44e0a1806d15e433d85bc5eb4eac0edc9153397d542_Z},
+ {0x699546888289aebe1795af7cc0d2c83b30327d019e4c9e01665d3450ea4f756_Z,
+ 0xbdbf4295455701be1e648d03729c26b0bb8a5b4b60f726184bf858be24e46b_Z},
+ {0x7286faddf0403fed8fb16840e60f624e88d66dac90c8c8dc7536fcc8f682ab9_Z,
+ 0x6aef44260d4dc77b5cfc05cab4005631b213939a69835f3523494367a03468c_Z},
+ {0x1859dcecf9b2ee187134d76caa6b40e55e699bf822cdbfe0db9c9a1db7f4db4_Z,
+ 0x20dca845a62654643495067b8613045f3bdc8af69799eb888cc5fbe8e8a998d_Z},
+ {0x31d61264dc643821bcb0a6467500933a9418a5ad5d291156c8ab3b796175ab_Z,
+ 0x33eba79d2c508bbf71b7f5f01903f4488e5e79b1de04ef0f3f9e21adc8ae950_Z},
+ {0x58d0811300bf3740fbf522d7d91b524da1fe6eb69bc4478340dc6c25c977f98_Z,
+ 0x1fcb528cc898516f3ba25c56b9075af57b6ba921c04133df2f45b0f071e8a79_Z},
+ {0x6431f995f6050cb9bf592688e3334be4a15d36aa2d4293ad9d8951a0f6522f1_Z,
+ 0x20d2c1654e7872bc764ae3932ed3e48913f363e95500b04b5213902825854ef_Z},
+ {0x7998d1e4b9df95112cde2774571ad4eb5d85b8c994d5cde7ff854f66d446a5b_Z,
+ 0x65bd3baeec8420de17fb60c14c22f26d13102ae2ab4b40779289c89a3ef8af4_Z},
+ {0x6a10729d3426d0e7d4e4ba17ea5e662555b29f177ce8ce76360fe1162e61f4c_Z,
+ 0x5ad6e4c49bc693224f497a35e29a1cd25d60744c4e072941e8364c9b6db0ba3_Z},
+ {0x5bb440cbb07b78c7e161d49302605d9a39c9df08b29866bd1f9460fc865693e_Z,
+ 0x4f301f2f284b4dedc845dd0233ed2f0e10a831bf44949ce009d81ceeae5d3ad_Z},
+ {0x16e671569ddd634b235207a1dd7534fbaeeb1b9ff258666734e8fe854d5b0f0_Z,
+ 0x29b9716f059c5f50f47f295d51ee9be2bc86781adb29c4777696e0c04515933_Z},
+ {0xf7ed43c69ae47d3fb1b003ea46662f99a40c00c3aabc5fc18ed19812e9ace_Z,
+ 0x6b27b3bd78d428a39b17d14bb67f14eee9f7a9e4f47178fb13b3771b59920e4_Z},
+ {0x646b693faac93d996ec8073fdf9872db19164d3b429db0fea51a85f271e3b25_Z,
+ 0x6c32faae574b9dfdf89c42cfcda3b29b77841928d9f6fd865282b5892a751ad_Z},
+ {0x46340adb23ac9d2854acd649d33a369cd0f3d44cf72df48f8d1a54c4ce7ba01_Z,
+ 0x643607d669dd430b7a89d82a77a3f07c968a6c8ec46bc73fe45f9bce267008e_Z},
+ {0x577c3ceb96a6a0d000039fd9172bf1592cde479d6c7027dc3b734f2d93456a3_Z,
+ 0x3ab8b4194e01015872812892f02ff2c57e1c5447bc974c0dc7e5f23f8ebaeab_Z},
+ {0x7e3a579884f8ebbcb037b4dce6fa7c8144abf4150aacf2c92baed923a4539bf_Z,
+ 0x3b6ab45393c6905097f10a1ca2a2f15e4543654f0b729b6388430a87186bc08_Z},
+ {0x57278ccce2f700b98559d59ea2b24d9b710b28c0be45c9ad2b52f18759f4992_Z,
+ 0x74e9c1347aa9ae6a574ddbeb4808bae7ebdd6d05d74b820dc811b2d3d4e1de3_Z},
+ {0x293851117a3c62e9ad3d584fc0271882ce9212c3550e18c0a3dc6435a08f804_Z,
+ 0x487d363b24ea945d11b948be7c6d431b0f222a42a1643662bfa565b704d4b12_Z},
+ {0x2bb40c08016a28a012ada59f8326af6459b32c931369ff5eb0e66e2bfa91839_Z,
+ 0x6d3c13cfe4442c3cf334bdea918421f0304f84fee0503b37fa254fa9695a7fd_Z},
+ {0x41b60152c072939d03382d16008e225296ef34ac904b2ca1b53117ac42a9fc0_Z,
+ 0x2395c3c6b29c382e472f8a8cdfe115387ee92f0e9effb0950884d447729378c_Z},
+ {0x49c91a07842eb149de925554f5cc911d0622477f621af42dc661b6254d92149_Z,
+ 0x6f6df3d40a3257d8cb1030182345ee33275a9729736559ae2b9493e71b13f66_Z},
+ {0x5327c310ff48d8828030be496b732a5c8d4b54d6d33359a55bf86f73338648c_Z,
+ 0xc5496b84211e0e6c6bfca22b6fe4a90c9ab4b74f38f5aef24063e19f33807f_Z},
+ {0xc29502e40237c3cdfb3df12402e00b10ecd34196deb16b9847d16f6a722ea6_Z,
+ 0x39aac26bf68523e8d0f71f4fe3b57370abb5e93a88b0be25d51933b2ce00ef_Z},
+ {0x29e6ff0160618de648e497c1bd71e29fa85e64c5de6384131a45a680106899e_Z,
+ 0x44489c972cd0206777be0e3d538a50dc41129e9ef6a573ffca463817c4b56b8_Z},
+ {0x7abdf663a32846ce3589e251644a0392ae0b2331e2ae9ceee2a8121bbe2f377_Z,
+ 0x1a6887de903db96dba6ea4ed4da968bf62b9b7cfc0794caf3fbfd8cd2b4e79f_Z},
+ {0x57bdd25e214e6d5ac0fcff6d136d883b26a58357a80c2292e734f989568bfcd_Z,
+ 0x21a4ba863a96efdde95da050be9d1fea4696bd9b9b9c658f2f89a6335aebffc_Z},
+ {0x639c23ba45096922137df71352eb293c33b69bb8cab211560fd74337dff135e_Z,
+ 0x52b4abdfb9884ec8a5e36501042d7bae523ea25ad9cf727b68acb34ed30f9bd_Z},
+ {0x296a39feb0d8f12f0338d21bf5d4e41879a2f0bb4e1115cfdf302b0c193efb9_Z,
+ 0x44111a6871525d015a8953659c0a25c843e73b6c831f3ce0fd61bacebd8645a_Z},
+ {0xe0a5fbd6a3040de8fd18a69512df864c9f9a6bac714dd89f63c16dc98d0edc_Z,
+ 0x5cd6903683197c35e29a53fa9dc7b2dc108d6d5d4c4c1b61bf0a36dc28281f1_Z},
+ {0x35b6b70b29b725bdd4abfaa9016424455fef75cf5db8ac105349c22a6cf0c79_Z,
+ 0x26257545e6c40ce862da7092d0078574b354960625f899bb0459c5fb457b040_Z},
+ {0x4ad230e998d6327668c6efb8da326c3cccfaab8318068ede5c35b785f466972_Z,
+ 0x71d065349d2a37db9c8da8696d6537c83ec5ad9da66d445adc85ad6d14ad4b7_Z},
+ {0x4b623eea766641c914601bd0e0dcb05118b99b5be46fcf4016d43270fcbb75b_Z,
+ 0x4f279640e187ad4a28be5ff7c3f3ae0784c65242f9d8dd8c075f441f0ce6931_Z},
+ {0x1113b3c7239bd09c41d279ad7152ccff0846d8205fa9e0d15bf61d18346f4f7_Z,
+ 0x2724c2af72e534450ed6eba9441674693f54bd82b3f205ae4fbb27bc4fecb7_Z},
+ });
+ return *prime_field_ec0;
+}
+
+} // namespace starkware
diff --git a/erigon-lib/pedersen_hash/elliptic_curve_constants.h b/erigon-lib/pedersen_hash/elliptic_curve_constants.h
new file mode 100644
index 00000000000..a3bee807a10
--- /dev/null
+++ b/erigon-lib/pedersen_hash/elliptic_curve_constants.h
@@ -0,0 +1,119 @@
+#ifndef STARKWARE_CRYPTO_ELLIPTIC_CURVE_CONSTANTS_H_
+#define STARKWARE_CRYPTO_ELLIPTIC_CURVE_CONSTANTS_H_
+
+#include
+#include
+#include
+
+#include "big_int.h"
+#include "elliptic_curve.h"
+#include "prime_field_element.h"
+
+namespace starkware {
+
+/*
+ Contains a set of constants that go along with an elliptic curve.
+
+ FieldElementT is the underlying field of the curve.
+ The equation of the elliptic curve is y^2 = x^3 + k_alpha * x + k_beta.
+ k_order is the size of the group.
+ k_points are points on the curve that were generated independently in a "nothing up my sleeve"
+ manner to ensure that no one knows their discrete log.
+*/
+template
+struct EllipticCurveConstants {
+ public:
+ using ValueType = typename FieldElementT::ValueType;
+
+ const FieldElementT k_alpha;
+ const FieldElementT k_beta;
+ const ValueType k_order;
+ const std::vector> k_points;
+
+ constexpr EllipticCurveConstants(
+ const FieldElementT& k_alpha, const FieldElementT& k_beta, const ValueType& k_order,
+ std::vector> k_points) noexcept
+ : k_alpha(k_alpha), k_beta(k_beta), k_order(k_order), k_points(std::move(k_points)) {}
+
+ constexpr EllipticCurveConstants(
+ const ValueType& k_alpha, const ValueType& k_beta, const ValueType& k_order,
+ std::initializer_list> k_points) noexcept
+ : EllipticCurveConstants(
+ FieldElementT::FromBigInt(k_alpha), FieldElementT::FromBigInt(k_beta), k_order,
+ ECPointsVectorFromPairs(std::move(k_points))) {}
+
+ private:
+ static std::vector> ECPointsVectorFromPairs(
+ std::initializer_list> k_points) {
+ std::vector> res;
+ res.reserve(k_points.size());
+
+ for (const auto& p : k_points) {
+ res.emplace_back(FieldElementT::FromBigInt(p.first), FieldElementT::FromBigInt(p.second));
+ }
+ return res;
+ }
+};
+
+/*
+ This elliptic curve over the prime field PrimeFieldElement was chosen in a "nothing up my sleeve"
+ manner to show that we don't know any special properties of this curve (other than being of prime
+ order).
+
+ alpha was chosen to be 1 because any elliptic curve has an isomorphic curve with a small alpha,
+ but we didn't want a zero alpha because then the discriminant is small.
+
+ beta was generated in the following way:
+ 1) Take beta to be the integer whose digits are the first 76 decimal digits of pi (76 is the
+ number of digits required to represent a field element).
+ 2) While [y^2 = x^3 + alpha * x + beta] is not a curve of prime order, increase beta by 1.
+
+ The points were generated by the following steps:
+ 1) Take the decimal digits of pi and split them into chunks of 76 digits (the number of decimal
+ digits of the modulus).
+
+ 2) Each chunk of 76 digits is the seed for generating a point, except for the first chunk
+ which was used for generating the curve.
+
+ 3) For each such seed x:
+
+ 3.1) while (x^3 + alpha * x + beta) is not a square in the prime field:
+ increase x by 1.
+
+ 3.2) (x, square_root(x^3 + alpha * x + beta)) is a point on the elliptic curve (for square_root
+ the smaller root).
+
+
+ 4) The first two points are taken as-is, as they will be used as the shift point and the
+ ECDSA generator point.
+
+ 5) Each subsequent point P is expanded to 248 or 4 points alternatingly, by taking the set
+ {2^i P : 0 <= i < 248} or {2^i P : 0 <= i < 3}. 248 is chosen to be the largest multiple of 8
+ lower than 251.
+
+ This is a sage code that implements these steps:
+
+ R = RealField(400000)
+ long_pi_string = '3' + str(R(pi))[2:]
+ p = 2^251 + 17 * 2^192 + 1
+ beta = GF(p)(long_pi_string[:76]) + 379
+ ec = EllipticCurve(GF(p), [1, beta])
+ points = []
+ for i in range(1, 13):
+ x = GF(p)(int(long_pi_string[i * 76 : (i+1) * 76]))
+ while not is_square(x^3 + x + beta):
+ x += 1
+ P = ec((x, sqrt(x^3 + x + beta)))
+ if i <= 2:
+ points.append(P.xy())
+ continue
+ for j in range(248 if i%2==1 else 4):
+ points.append(P.xy())
+ P *= 2
+ print "".join("{0x%x_Z,0x%x_Z},\n" % p for p in points)
+*/
+const EllipticCurveConstants& GetEcConstants();
+
+} // namespace starkware
+
+#endif // STARKWARE_CRYPTO_ELLIPTIC_CURVE_CONSTANTS_H_
diff --git a/erigon-lib/pedersen_hash/error_handling.h b/erigon-lib/pedersen_hash/error_handling.h
new file mode 100644
index 00000000000..424420c919c
--- /dev/null
+++ b/erigon-lib/pedersen_hash/error_handling.h
@@ -0,0 +1,31 @@
+#ifndef STARKWARE_UTILS_ERROR_HANDLING_H_
+#define STARKWARE_UTILS_ERROR_HANDLING_H_
+
+#include
+#include
+#include
+
+namespace starkware {
+
+class StarkwareException : public std::exception {
+ public:
+ explicit StarkwareException(std::string message) : message_(std::move(message)) {}
+ const char* what() const noexcept { return message_.c_str(); } // NOLINT
+
+ private:
+ std::string message_;
+};
+
+/*
+ We use "do {} while(false);" pattern to force the user to use ; after the macro.
+*/
+#define ASSERT(cond, msg) \
+ do { \
+ if (!(cond)) { \
+ throw StarkwareException(msg); \
+ } \
+ } while (false)
+
+} // namespace starkware
+
+#endif // STARKWARE_UTILS_ERROR_HANDLING_H_
diff --git a/erigon-lib/pedersen_hash/ffi_pedersen_hash.cc b/erigon-lib/pedersen_hash/ffi_pedersen_hash.cc
new file mode 100644
index 00000000000..fccdfd22bec
--- /dev/null
+++ b/erigon-lib/pedersen_hash/ffi_pedersen_hash.cc
@@ -0,0 +1,56 @@
+#include "ffi_pedersen_hash.h"
+#include "pedersen_hash.h"
+
+#include
+
+#include "prime_field_element.h"
+#include "ffi_utils.h"
+
+#include "gsl-lite.hpp"
+
+namespace starkware {
+
+namespace {
+
+using ValueType = PrimeFieldElement::ValueType;
+
+constexpr size_t kElementSize = sizeof(ValueType);
+constexpr size_t kOutBufferSize = 1024;
+static_assert(kOutBufferSize >= kElementSize, "kOutBufferSize is not big enough");
+
+} // namespace
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int Hash(
+ const gsl::byte in1[kElementSize], const gsl::byte in2[kElementSize],
+ gsl::byte out[kOutBufferSize]) {
+ try {
+ auto hash = PedersenHash(
+ PrimeFieldElement::FromBigInt(Deserialize(gsl::make_span(in1, kElementSize))),
+ PrimeFieldElement::FromBigInt(Deserialize(gsl::make_span(in2, kElementSize))));
+ Serialize(hash.ToStandardForm(), gsl::make_span(out, kElementSize));
+ } catch (const std::exception& e) {
+ return HandleError(e.what(), gsl::make_span(out, kOutBufferSize));
+ } catch (...) {
+ return HandleError("Unknown c++ exception.", gsl::make_span(out, kOutBufferSize));
+ }
+ return 0;
+}
+
+#ifdef __cplusplus
+} // extern C
+#endif
+} // namespace starkware
+
+
+
+int GoHash(const char* in1, const char* in2, char* out) {
+ return starkware::Hash(
+ reinterpret_cast(in1),
+ reinterpret_cast(in2),
+ reinterpret_cast(out));
+}
+
diff --git a/erigon-lib/pedersen_hash/ffi_pedersen_hash.h b/erigon-lib/pedersen_hash/ffi_pedersen_hash.h
new file mode 100644
index 00000000000..2dd79d45b30
--- /dev/null
+++ b/erigon-lib/pedersen_hash/ffi_pedersen_hash.h
@@ -0,0 +1,7 @@
+#ifndef STARKWARE_CRYPTO_FFI_PEDERSEN_HASH_H_
+#define STARKWARE_CRYPTO_FFI_PEDERSEN_HASH_H_
+
+int Hash(const char* in1, const char* in2, char* out);
+int GoHash(const char* in1, const char* in2, char* out);
+
+#endif // STARKWARE_CRYPTO_FFI_PEDERSEN_HASH_H_
diff --git a/erigon-lib/pedersen_hash/ffi_utils.cc b/erigon-lib/pedersen_hash/ffi_utils.cc
new file mode 100644
index 00000000000..e3fd95c1d47
--- /dev/null
+++ b/erigon-lib/pedersen_hash/ffi_utils.cc
@@ -0,0 +1,38 @@
+#include
+#include
+#include
+
+#include "ffi_utils.h"
+
+namespace starkware {
+
+using ValueType = PrimeFieldElement::ValueType;
+
+int HandleError(const char* msg, gsl::span out) {
+ const size_t copy_len = std::min(strlen(msg), out.size() - 1);
+ memcpy(out.data(), msg, copy_len);
+ memset(out.data() + copy_len, 0, out.size() - copy_len);
+ return 1;
+}
+
+ValueType Deserialize(const gsl::span span) {
+ const size_t N = ValueType::LimbCount();
+ ASSERT(span.size() == N * sizeof(uint64_t), "Source span size mismatches BigInt size.");
+ std::array value{};
+ gsl::copy(span, gsl::byte_span(value));
+ for (uint64_t& x : value) {
+ x = le64toh(x);
+ }
+ return ValueType(value);
+}
+
+void Serialize(const ValueType& val, const gsl::span span_out) {
+ const size_t N = ValueType::LimbCount();
+ ASSERT(span_out.size() == N * sizeof(uint64_t), "Span size mismatches BigInt size.");
+ for (size_t i = 0; i < N; ++i) {
+ uint64_t limb = htole64(val[i]);
+ gsl::copy(gsl::byte_span(limb), span_out.subspan(i * sizeof(uint64_t), sizeof(uint64_t)));
+ }
+}
+
+} // namespace starkware
diff --git a/erigon-lib/pedersen_hash/ffi_utils.h b/erigon-lib/pedersen_hash/ffi_utils.h
new file mode 100644
index 00000000000..cde1b9ca5ab
--- /dev/null
+++ b/erigon-lib/pedersen_hash/ffi_utils.h
@@ -0,0 +1,31 @@
+#ifndef STARKWARE_CRYPTO_FFI_UTILS_H_
+#define STARKWARE_CRYPTO_FFI_UTILS_H_
+
+#include
+
+#include "pedersen_hash.h"
+
+#include "gsl-lite.hpp"
+
+namespace starkware {
+
+using ValueType = PrimeFieldElement::ValueType;
+
+/*
+ Handles an error, and outputs a relevant error message as a C string to out.
+*/
+int HandleError(const char* msg, gsl::span out);
+
+/*
+ Deserializes a BigInt (PrimeFieldElement::ValueType) from a byte span.
+*/
+ValueType Deserialize(const gsl::span span);
+
+/*
+ Serializes a BigInt (PrimeFieldElement::ValueType) to a byte span.
+*/
+void Serialize(const ValueType& val, const gsl::span span_out);
+
+} // namespace starkware
+
+#endif // STARKWARE_CRYPTO_FFI_UTILS_H_
diff --git a/erigon-lib/pedersen_hash/fraction_field_element.h b/erigon-lib/pedersen_hash/fraction_field_element.h
new file mode 100644
index 00000000000..5800086ae1b
--- /dev/null
+++ b/erigon-lib/pedersen_hash/fraction_field_element.h
@@ -0,0 +1,75 @@
+#ifndef STARKWARE_ALGEBRA_FRACTION_FIELD_ELEMENT_H_
+#define STARKWARE_ALGEBRA_FRACTION_FIELD_ELEMENT_H_
+
+#include "error_handling.h"
+#include "prng.h"
+
+namespace starkware {
+
+/*
+ Represents a field element as an element of the fraction field of the original field. The elements
+ of the fraction field are a/b for a,b in the original field, and b != 0. The representation of
+ a FieldElementT b is b/1. Addition and multiplication for the fraction field are defined naturally
+ (see operator+ and operator*). The resulting field is isomorphic to the original field. This
+ fractional representation of the original field enables to perform an inverse cheaply: the inverse
+ of a/b is simply b/a.
+*/
+template
+class FractionFieldElement {
+ public:
+ explicit constexpr FractionFieldElement(const FieldElementT& num_val)
+ : numerator_(num_val), denominator_(FieldElementT::One()) {}
+
+ /*
+ Creates a FractionFieldElement with the value num_val/denom_val.
+ denom_val can't be zero.
+ */
+ constexpr FractionFieldElement(const FieldElementT& num_val, const FieldElementT& denom_val)
+ : numerator_(num_val), denominator_(denom_val) {
+ ASSERT(denominator_ != FieldElementT::Zero(), "Denominator can't be zero.");
+ }
+
+ FractionFieldElement operator+(const FractionFieldElement& rhs) const;
+
+ FractionFieldElement operator-(const FractionFieldElement& rhs) const;
+
+ FractionFieldElement operator-() const { return FractionFieldElement(-numerator_, denominator_); }
+
+ FractionFieldElement operator*(const FractionFieldElement& rhs) const;
+ FractionFieldElement operator/(const FractionFieldElement& rhs) const {
+ return *this * rhs.Inverse();
+ }
+
+ bool operator==(const FractionFieldElement& rhs) const;
+ bool operator!=(const FractionFieldElement& rhs) const { return !(*this == rhs); }
+
+ FractionFieldElement Inverse() const;
+
+ static constexpr FractionFieldElement Zero() {
+ return FractionFieldElement(FieldElementT::Zero());
+ }
+
+ static constexpr FractionFieldElement One() { return FractionFieldElement(FieldElementT::One()); }
+
+ /*
+ Returns a fraction field element: its numerator is a random FieldElementT generated by
+ FieldElementT::RandomElement of and its denominator is FieldElementT::One().
+ */
+ static FractionFieldElement RandomElement(Prng* prng) {
+ return FractionFieldElement(FieldElementT::RandomElement(prng));
+ }
+
+ FieldElementT ToBaseFieldElement() const { return this->numerator_ * denominator_.Inverse(); }
+
+ explicit operator FieldElementT() const { return ToBaseFieldElement(); }
+
+ private:
+ FieldElementT numerator_;
+ FieldElementT denominator_;
+};
+
+} // namespace starkware
+
+#include "fraction_field_element.inl"
+
+#endif // STARKWARE_ALGEBRA_FRACTION_FIELD_ELEMENT_H_
diff --git a/erigon-lib/pedersen_hash/fraction_field_element.inl b/erigon-lib/pedersen_hash/fraction_field_element.inl
new file mode 100644
index 00000000000..710bf35e8d2
--- /dev/null
+++ b/erigon-lib/pedersen_hash/fraction_field_element.inl
@@ -0,0 +1,42 @@
+#include "fraction_field_element.h"
+
+#include "error_handling.h"
+
+namespace starkware {
+
+template
+FractionFieldElement FractionFieldElement::operator+(
+ const FractionFieldElement& rhs) const {
+ const auto num_value = this->numerator_ * rhs.denominator_ + this->denominator_ * rhs.numerator_;
+ const auto denom_value = this->denominator_ * rhs.denominator_;
+ return FractionFieldElement(num_value, denom_value);
+}
+
+template
+FractionFieldElement FractionFieldElement::operator-(
+ const FractionFieldElement& rhs) const {
+ const auto num_value = this->numerator_ * rhs.denominator_ - this->denominator_ * rhs.numerator_;
+ const auto denom_value = this->denominator_ * rhs.denominator_;
+ return FractionFieldElement(num_value, denom_value);
+}
+
+template
+FractionFieldElement FractionFieldElement::operator*(
+ const FractionFieldElement& rhs) const {
+ return FractionFieldElement(
+ this->numerator_ * rhs.numerator_, this->denominator_ * rhs.denominator_);
+}
+
+template
+bool FractionFieldElement